hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf54140fcb4235c93da553623f68c97e9a078c6 | 751 | py | Python | setup.py | Geneticerror/swagger-coverage-py | 1e2d4f58d895bd296fe870c6b2437a77e0a8c697 | [
"MIT"
] | null | null | null | setup.py | Geneticerror/swagger-coverage-py | 1e2d4f58d895bd296fe870c6b2437a77e0a8c697 | [
"MIT"
] | null | null | null | setup.py | Geneticerror/swagger-coverage-py | 1e2d4f58d895bd296fe870c6b2437a77e0a8c697 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open("README.rst") as fh:
long_description = fh.read()
setup(
name="swagger-coverage-py",
version="2.1.1",
author="Jamal Zeinalov",
author_email="jamal.zeynalov@gmail.com",
description='Python adapter for "swagger-coverage" tool',
long_description=long_description,
url="https://github.com/JamalZeynalov/swagger-coverage-py",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"requests>=2.25.1",
"Faker>=6.0.0",
],
python_requires=">=3.6",
include_package_data=True,
)
| 27.814815 | 63 | 0.645806 |
acf54173f5a2fd5befb0cd4b39b648eec2d374f4 | 105 | py | Python | nopt/problems/__init__.py | Logismos/nopt | 033147d18288d7ff4339013844e656029294d1dd | [
"MIT"
] | null | null | null | nopt/problems/__init__.py | Logismos/nopt | 033147d18288d7ff4339013844e656029294d1dd | [
"MIT"
] | null | null | null | nopt/problems/__init__.py | Logismos/nopt | 033147d18288d7ff4339013844e656029294d1dd | [
"MIT"
] | null | null | null | from nopt.problems.inverted_pendulum import InvertedPendulum
from nopt.problems.cartpole import CartPole
| 35 | 60 | 0.885714 |
acf541a9b70fb03d9c9958b7eca511ab73110bcf | 1,803 | py | Python | Lib/turtledemo/round_dance.py | hbbtstar/cpython | ce6ab1eb7483f66347188305bbbb7e3388794ad8 | [
"PSF-2.0"
] | null | null | null | Lib/turtledemo/round_dance.py | hbbtstar/cpython | ce6ab1eb7483f66347188305bbbb7e3388794ad8 | [
"PSF-2.0"
] | null | null | null | Lib/turtledemo/round_dance.py | hbbtstar/cpython | ce6ab1eb7483f66347188305bbbb7e3388794ad8 | [
"PSF-2.0"
] | null | null | null | """ turtle-example-suite:
tdemo_round_dance.py
(Needs version 1.1 of the turtle module that
comes with Python 3.1)
Dancing turtles have a compound shape
consisting of a series of triangles of
decreasing size.
Turtles march along a circle while rotating
pairwise in opposite direction, with one
exception. Does that breaking of symmetry
enhance the attractiveness of the example?
Press any key to stop the animation.
Technically: demonstrates use of compound
shapes, transformation of shapes as well as
cloning turtles. The animation is
controlled through update().
"""
from turtle import *
def stop():
global running
running = False
def main():
global running
clearscreen()
bgcolor("gray10")
tracer(False)
shape("triangle")
f = 0.793402
phi = 9.064678
s = 5
c = 1
# create compound shape
sh = Shape("compound")
for i in range(10):
shapesize(s)
p =get_shapepoly()
s *= f
c *= f
tilt(-phi)
sh.addcomponent(p, (c, 0.25, 1-c), "black")
register_shape("multitri", sh)
# create dancers
shapesize(1)
shape("multitri")
pu()
setpos(0, -200)
dancers = []
for i in range(180):
fd(7)
tilt(-4)
lt(2)
update()
if i % 12 == 0:
dancers.append(clone())
home()
# dance
running = True
onkeypress(stop)
listen()
cs = 1
while running:
ta = -4
for dancer in dancers:
dancer.fd(7)
dancer.lt(2)
dancer.tilt(ta)
ta = -4 if ta > 0 else 2
if cs < 180:
right(4)
shapesize(cs)
cs *= 1.005
update()
return "DONE!"
if __name__=='__main__':
print main()
mainloop()
| 20.724138 | 51 | 0.577371 |
acf541af1bb3ebd0a182c3839d64f9ce9a19e679 | 903 | py | Python | ParadoxTrading/EngineExt/Futures/__init__.py | yutiansut/ParadoxTrading | b915d1491663443bedbb048017abeed3f7dcd4e2 | [
"MIT"
] | 2 | 2018-01-25T08:33:59.000Z | 2018-05-14T13:59:54.000Z | ParadoxTrading/EngineExt/Futures/__init__.py | yutiansut/ParadoxTrading | b915d1491663443bedbb048017abeed3f7dcd4e2 | [
"MIT"
] | null | null | null | ParadoxTrading/EngineExt/Futures/__init__.py | yutiansut/ParadoxTrading | b915d1491663443bedbb048017abeed3f7dcd4e2 | [
"MIT"
] | null | null | null | from .Arbitrage import ArbitrageEqualFundSimplePortfolio, \
ArbitrageEqualFundVolatilityPortfolio, ArbitrageStrategy
from .BacktestEngine import BacktestEngine
from .BacktestMarketSupply import BacktestMarketSupply
from .BarBacktestExecution import BarBacktestExecution
from .BarPortfolio import BarPortfolio
from .InterDayBacktestExecution import InterDayBacktestExecution
from .InterDayOnlineEngine import InterDayOnlineEngine
from .InterDayOnlineExecution import InterDayOnlineExecution
from .InterDayOnlineMarketSupply import InterDayOnlineMarketSupply
from .InterDayPortfolio import InterDayPortfolio
from .TickBacktestExecution import TickBacktestExecution
from .TickPortfolio import TickPortfolio
from .Trend import CTAEqualFundPortfolio, CTAEqualRiskATRPortfolio, \
CTAEqualRiskRatePortfolio, CTAEqualRiskVolatilityPortfolio, \
CTAStatusType, CTAStrategy, CTAEqualRiskGARCHPortfolio
| 53.117647 | 69 | 0.890365 |
acf542dfc8ba84c50fb43c28c5cd717b8d14d936 | 16,507 | py | Python | tests/core/test_variation_decider.py | MDAkramSiddiqui/vwo-python-sdk | ebd194931a42111fdfdfa16a7888dce1754d345c | [
"Apache-2.0"
] | null | null | null | tests/core/test_variation_decider.py | MDAkramSiddiqui/vwo-python-sdk | ebd194931a42111fdfdfa16a7888dce1754d345c | [
"Apache-2.0"
] | null | null | null | tests/core/test_variation_decider.py | MDAkramSiddiqui/vwo-python-sdk | ebd194931a42111fdfdfa16a7888dce1754d345c | [
"Apache-2.0"
] | null | null | null | # Copyright 2019-2021 Wingify Software Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import random
import copy
from ..data.settings_files import SETTINGS_FILES
from vwo.core.variation_decider import VariationDecider
from vwo.helpers import campaign_util
from vwo import UserStorage
class ClientUserStorage:
def __init__(self):
self.storage = {}
def get(self, user_id, campaign_key):
return self.storage.get((user_id, campaign_key))
def set(self, user_data):
self.storage[(user_data.get("userId"), user_data.get("campaignKey"))] = user_data
class VariationDeciderTest(unittest.TestCase):
def setUp(self):
self.user_id = str(random.random())
self.settings_file = SETTINGS_FILES["DUMMY_SETTINGS_FILE"]
self.dummy_campaign = self.settings_file.get("campaigns")[0]
self.campaign_key = self.dummy_campaign.get("key")
campaign_util.set_variation_allocation(self.dummy_campaign)
self.variation_decider = VariationDecider()
self.user_storage = ClientUserStorage()
self.variation_decider_with_us = VariationDecider(self.user_storage)
def test_init_with_valid_user_storage(self):
class US:
def set(self, user_data):
pass
def get(self, user_id, campaign_key):
pass
variation_decider = VariationDecider(US())
self.assertIsInstance(variation_decider.user_storage, US)
def test_init_with_our_user_storage(self):
variation_decider = VariationDecider(UserStorage())
self.assertIsInstance(variation_decider.user_storage, UserStorage)
def test_get_with_user_storage_(self):
client_db = {}
class US(UserStorage):
def get(self, user_id, _campaign_key):
return client_db.get(user_id)
def set(self, user_data):
client_db[user_data["userId"]] = user_data
variation_decider = VariationDecider(US())
# First let variation_decider compute variation, and store
user_id = "Sarah"
variation = variation_decider.get_variation(user_id, self.dummy_campaign)
self.assertEqual(variation.get("id"), "1")
self.assertEqual(variation.get("name"), "Control")
# Now check whether the variation_decider is able to retrieve
# variation for user_storage, no campaign is required
# for this.
variation = variation_decider.get_variation(user_id, self.dummy_campaign)
self.assertEqual(variation.get("id"), "1")
self.assertEqual(variation.get("name"), "Control")
def test_get_with_broken_set_in_user_storage(self):
client_db = {}
class US(UserStorage):
def get(self, user_id, campaign_key):
return client_db.get(user_id)
def set(self):
pass
variation_decider = VariationDecider(US())
user_id = "Sarah"
variation = variation_decider.get_variation(user_id, self.dummy_campaign)
self.assertEqual(variation.get("id"), "1")
self.assertEqual(variation.get("name"), "Control")
def test_get_with_broken_get_in_user_storage(self):
client_db = {}
class US(UserStorage):
def get(self):
# def get(self, user_id): pass works, check later to rectify
pass
def set(self, user_data):
client_db[user_data["userId"]] = user_data
variation_decider = VariationDecider(US())
user_id = "Sarah"
variation = variation_decider.get_variation(user_id, self.dummy_campaign)
self.assertEqual(variation.get("id"), "1")
self.assertEqual(variation.get("name"), "Control")
variation = variation_decider.get_variation(user_id, self.dummy_campaign)
self.assertEqual(variation.get("id"), "1")
self.assertEqual(variation.get("name"), "Control")
def test__get_user_storage_data_no_get(self):
variation_decider = VariationDecider()
variation = variation_decider._get_user_storage_data("Sarah", "AB_BA")
self.assertEquals(variation, False)
def test_get_variation_from_user_storage_return_variation(self):
client_storage = ClientUserStorage()
variation_decider = VariationDecider(user_storage=client_storage)
campaign = self.settings_file["campaigns"][0]
variation = campaign["variations"][0]
set_status = variation_decider._set_user_storage_data(
variation_decider._create_user_storage_data("Sarah", campaign.get("key"), variation.get("name"))
)
self.assertIs(set_status, True)
user_storage_data = variation_decider._get_user_storage_data("Sarah", campaign.get("key"))
result_variation = variation_decider.get_variation_from_user_storage("Sarah", campaign, user_storage_data)
self.assertEquals(result_variation.get("name"), variation.get("name"))
def test_find_targeted_variation_returns_None(self):
variation_decider = VariationDecider()
settings_file = SETTINGS_FILES.get("FT_100_W_33_33_33_WS_WW")
campaign = settings_file["campaigns"][0]
false_variation_targeting_variables = {"chrome": "true", "safari": "false", "browser": "firefox 106.69"}
result_variation = variation_decider.find_targeted_variation(
"Sarah", campaign, false_variation_targeting_variables
)
self.assertIsNone(result_variation)
def test_find_targeted_variation_returns_control(self):
variation_decider = VariationDecider()
settings_file = SETTINGS_FILES.get("FT_100_W_33_33_33_WS_WW")
campaign = settings_file["campaigns"][0]
true_variation_targeting_variables = {"chrome": "false", "safari": "true", "browser": "chrome 107.107"}
result_variation = variation_decider.find_targeted_variation(
"Sarah", campaign, true_variation_targeting_variables
)
self.assertEquals("Control", result_variation.get("name"))
def test_evaluate_pre_segmentation_fails(self):
variation_decider = VariationDecider()
settings_file = SETTINGS_FILES.get("FT_100_W_33_33_33_WS_WW")
campaign = settings_file["campaigns"][0]
false_custom_variables = {
"contains_vwo": "legends say that vwo is the best",
"regex_for_no_zeros": 1223123,
"regex_for_all_letters": "dsfASF",
"regex_for_small_letters": "sadfksjdf",
"regex_real_number": 12321.2242,
"regex_for_zeros": 0,
"is_equal_to": "!equal_to_variable",
"contains": "contains_variable",
"regex_for_capital_letters": "SADFLSDLF",
"is_not_equal_to": "is_not_equal_to_variable",
"this_is_regex": "this is regex",
"starts_with": "starts_with_variable",
}
status = variation_decider.evaluate_pre_segmentation("Sarah", campaign, false_custom_variables)
self.assertEquals(status, False)
def test_evaluate_pre_segmentation_passes(self):
variation_decider = VariationDecider()
settings_file = SETTINGS_FILES.get("FT_100_W_33_33_33_WS_WW")
campaign = settings_file["campaigns"][0]
true_custom_variables = {
"contains_vwo": "legends say that vwo is the best",
"regex_for_no_zeros": 1223123,
"regex_for_all_letters": "dsfASF",
"regex_for_small_letters": "sadfksjdf",
"regex_real_number": 12321.2242,
"regex_for_zeros": 0,
"is_equal_to": "equal_to_variable",
"contains": "contains_variable",
"regex_for_capital_letters": "SADFLSDLF",
"is_not_equal_to": "is_not_equal_to_variable",
"this_is_regex": "this is regex",
"starts_with": "starts_with_variable",
}
status = variation_decider.evaluate_pre_segmentation("Sarah", campaign, true_custom_variables)
self.assertEquals(status, True)
def test_get_white_listed_variations_list_returns_empty_list_all_fails(self,):
variation_decider = VariationDecider()
settings_file = SETTINGS_FILES.get("FT_100_W_33_33_33_WS_WW")
campaign = settings_file["campaigns"][0]
false_variation_targeting_variables = {"chrome": "true", "safari": "false", "browser": "firefox 106.69"}
variation_list = variation_decider._get_white_listed_variations_list(
"Sarah", campaign, false_variation_targeting_variables
)
self.assertFalse(variation_list)
def test_get_white_listed_variations_list_returns_empty_list_control_empty_segments(self,):
variation_decider = VariationDecider()
settings_file = SETTINGS_FILES.get("FT_100_W_33_33_33_WS_WW")
campaign = copy.deepcopy(settings_file["campaigns"][0])
campaign["variations"][0]["segments"] = {}
false_variation_targeting_variables = {"chrome": "true", "safari": "false", "browser": "firefox 106.69"}
variation_list = variation_decider._get_white_listed_variations_list(
"Sarah", campaign, false_variation_targeting_variables
)
self.assertFalse(variation_list)
def test_get_white_listed_variations_list_returns_variation_1_list_variation_1_whitelisting_pass(self,):
variation_decider = VariationDecider()
settings_file = SETTINGS_FILES.get("FT_100_W_33_33_33_WS_WW")
campaign = copy.deepcopy(settings_file["campaigns"][0])
campaign["variations"][1]["segments"] = {"or": [{"custom_variable": {"browser": "wildcard(firefox*)"}}]}
false_variation_targeting_variables = {"chrome": "true", "safari": "false", "browser": "firefox 106.69"}
variation_list = variation_decider._get_white_listed_variations_list(
"Sarah", campaign, false_variation_targeting_variables
)
self.assertTrue(variation_list)
self.assertEquals(variation_list[0].get("name"), "Variation-1")
def test_get_white_listed_variations_list_returns_all_variation_list_whitelisting_passes_for_all(self,):
variation_decider = VariationDecider()
settings_file = SETTINGS_FILES.get("FT_100_W_33_33_33_WS_WW")
campaign = copy.deepcopy(settings_file["campaigns"][0])
true_variation_targeting_variables = {"chrome": "false", "safari": "true", "browser": "chrome 107.107"}
variation_list = variation_decider._get_white_listed_variations_list(
"Sarah", campaign, true_variation_targeting_variables
)
self.assertTrue(variation_list)
self.assertEquals(variation_list[0].get("name"), "Control")
self.assertEquals(variation_list[1].get("name"), "Variation-1")
self.assertEquals(variation_list[2].get("name"), "Variation-2")
def test_is_user_part_of_campaign_true(self):
variation_decider = VariationDecider()
settings_file = SETTINGS_FILES.get("FT_100_W_33_33_33_WS_WW")
campaign = copy.deepcopy(settings_file["campaigns"][0])
status = variation_decider.is_user_part_of_campaign("Sarah", campaign)
self.assertTrue(status)
def test_is_user_part_of_campaign_false(self):
variation_decider = VariationDecider()
settings_file = SETTINGS_FILES.get("FT_100_W_33_33_33_WS_WW")
campaign = copy.deepcopy(settings_file["campaigns"][0])
campaign["percentTraffic"] = 1
status = variation_decider.is_user_part_of_campaign("Sarah", campaign)
self.assertFalse(status)
def test_set_user_storage_data_return_true(self):
client_storage = ClientUserStorage()
variation_decider = VariationDecider(user_storage=client_storage)
campaign = self.settings_file["campaigns"][0]
variation = campaign["variations"][0]
set_status = variation_decider._set_user_storage_data(
variation_decider._create_user_storage_data("Sarah", campaign.get("key"), variation.get("name"))
)
self.assertIs(set_status, True)
def test_get_user_storage_data_true(self):
client_storage = ClientUserStorage()
variation_decider = VariationDecider(user_storage=client_storage)
user_storage_data = {"userId": "Sarah", "campaignKey": "FEATURE_TEST_1", "variationName": "DESIGN_4"}
client_storage.set(user_storage_data)
result_user_storage_data = variation_decider._get_user_storage_data("Sarah", "FEATURE_TEST_1")
self.assertDictEqual(result_user_storage_data, user_storage_data)
def test_get_user_storage_data_false_different_campaign(self):
client_storage = ClientUserStorage()
variation_decider = VariationDecider(user_storage=client_storage)
user_storage_data = {"userId": "Sarah", "campaignKey": "FEATURE_TEST_2", "variationName": "DESIGN_4"}
client_storage.set(user_storage_data)
result_user_storage_data = variation_decider._get_user_storage_data("Sarah", "FEATURE_TEST_1")
self.assertIsNone(result_user_storage_data)
def test_get_variation_from_user_storage_returns_none_as_garbage_variation_name(self,):
client_storage = ClientUserStorage()
variation_decider = VariationDecider(user_storage=client_storage)
settings_file = SETTINGS_FILES.get("FT_100_W_33_33_33_WS_WW")
campaign = settings_file["campaigns"][0]
user_storage_data = {"userId": "Sarah", "campaignKey": "FEATURE_TEST_2", "variationName": "None"}
client_storage.set(user_storage_data)
user_storage_data = variation_decider._get_user_storage_data("Sarah", campaign.get("key"))
result_variation = variation_decider.get_variation_from_user_storage("Sarah", campaign, user_storage_data)
self.assertIsNone(result_variation)
def test_get_variation_from_user_storage_returns_control(self):
client_storage = ClientUserStorage()
variation_decider = VariationDecider(user_storage=client_storage)
settings_file = SETTINGS_FILES.get("FT_100_W_33_33_33_WS_WW")
campaign = settings_file["campaigns"][0]
user_storage_data = {
"userId": "Sarah",
"campaignKey": campaign["key"],
"variationName": campaign["variations"][0]["name"],
}
client_storage.set(user_storage_data)
user_storage_data = variation_decider._get_user_storage_data("Sarah", campaign.get("key"))
result_variation = variation_decider.get_variation_from_user_storage("Sarah", campaign, user_storage_data)
self.assertEquals(result_variation["name"], campaign["variations"][0]["name"])
def test_set_get_user_storage_data(self):
user_storage = ClientUserStorage()
variation_decider = VariationDecider(user_storage)
variation_decider._set_user_storage_data(
variation_decider._create_user_storage_data("user_id", "campaign_key", "variation_name")
)
self.assertEquals(
user_storage.storage.get(("user_id", "campaign_key")),
variation_decider._get_user_storage_data("user_id", "campaign_key"),
)
def test_set_user_storage_data(self):
user_storage = ClientUserStorage()
variation_decider = VariationDecider(user_storage)
variation_decider._set_user_storage_data(
variation_decider._create_user_storage_data("user_id", "campaign_key_1", "variation_name_1")
)
variation_decider._set_user_storage_data(
variation_decider._create_user_storage_data("user_id", "campaign_key_2", "variation_name_2")
)
self.assertEquals(
user_storage.storage.get(("user_id", "campaign_key_1")),
variation_decider._get_user_storage_data("user_id", "campaign_key_1"),
)
self.assertEquals(
user_storage.storage.get(("user_id", "campaign_key_2")),
variation_decider._get_user_storage_data("user_id", "campaign_key_2"),
)
| 47.708092 | 114 | 0.696856 |
acf542fee42e86abc879c500fbafe993f01e5f1e | 1,235 | py | Python | colcon_hardware_acceleration/verb/acceleration.py | methylDragon/colcon-acceleration | 934fcad145d0934413aea93287b4d8d3fd792c51 | [
"Apache-2.0"
] | null | null | null | colcon_hardware_acceleration/verb/acceleration.py | methylDragon/colcon-acceleration | 934fcad145d0934413aea93287b4d8d3fd792c51 | [
"Apache-2.0"
] | 3 | 2022-03-29T11:14:18.000Z | 2022-03-30T09:58:30.000Z | colcon_hardware_acceleration/verb/acceleration.py | ros-acceleration/colcon-hardware-acceleration | dd3369d2821f90c8c5be37229167e5bb676fe70c | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Víctor Mayoral-Vilches
# Licensed under the Apache License, Version 2.0
from colcon_core.command import add_subparsers
from colcon_core.plugin_system import satisfies_version
from colcon_core.verb import VerbExtensionPoint
from colcon_hardware_acceleration.subverb import get_subverb_extensions
class AccelerationVerb(VerbExtensionPoint):
"""Manage hardware acceleration capabilities."""
def __init__(self): # noqa: D107
super().__init__()
satisfies_version(VerbExtensionPoint.EXTENSION_POINT_VERSION, "^1.0")
self._subparser = None
def add_arguments(self, *, parser): # noqa: D102
# remember the subparser to print usage in case no subverb is passed
self._subparser = parser
# get subverb extensions and let them add their arguments
subverb_extensions = get_subverb_extensions()
add_subparsers(
parser, "colcon acceleration",
subverb_extensions, attribute="subverb_name"
)
def main(self, *, context): # noqa: D102
# error: no subverb provided
if context.args.subverb_name is None:
print(self._subparser.format_usage())
return "Error: No subverb provided"
| 36.323529 | 77 | 0.709312 |
acf544340a5c9c20791e2d8b2d95f5eecbeed94f | 2,655 | py | Python | credentials_test.py | EidAbdullahi/Password-Locker | 0c58264477697a892ba868f901eb0ed83bc34a20 | [
"Info-ZIP"
] | null | null | null | credentials_test.py | EidAbdullahi/Password-Locker | 0c58264477697a892ba868f901eb0ed83bc34a20 | [
"Info-ZIP"
] | null | null | null | credentials_test.py | EidAbdullahi/Password-Locker | 0c58264477697a892ba868f901eb0ed83bc34a20 | [
"Info-ZIP"
] | null | null | null | import unittest
from credential import Credential
class TestCredential(unittest.TestCase):
def setUp(self):
self.new_credential = Credential("Eid","Eid","12345a","eidabdullahi10@gmail.com") # create Account object
def test_init(self):
self.assertEqual(self.new_credential.credential_name,"Eid")
self.assertEqual(self.new_credential.user_name,"Eid")
self.assertEqual(self.new_credential.password,"12345a")
self.assertEqual(self.new_credential.email,"eidabdullahi10@gmail.com")
def test_save_credential(self):
'''
test to save the credentials
'''
self.new_credential.save_credential() # saving the new account
self.assertEqual(len(Credential.credential_list),1)
def tearDown(self):
'''
test to clean up the credential list in the credential.py
'''
Credential.credential_list = []
def test_save_multiple_credential(self):
'''
to save multiple credentials
'''
self.new_credential.save_credential()
test_credential = Credential("Test","user","0717062455","test@user.com")
test_credential.save_credential()
self.assertEqual(len(Credential.credential_list),2)
def test_delete_credential(self):
'''
to delete credentials
'''
self.new_credential.save_credential()
test_credential = Credential("Test","user","0717062455","test@user.com")
test_credential.save_credential()
self.new_credential.delete_credential()
self.assertEqual(len(Credential.credential_list),1)
def test_find_credential_by_credential_name(self):
'''
to find credential by credential name
'''
self.new_credential.save_credential()
test_credential = Credential("Test","user","0717062455","test@user.com")
test_credential.save_credential()
found_credential = Credential.find_by_name("Test")
self.assertEqual(found_credential.email,test_credential.email)
def test_credential_exists(self):
'''
to check whether the credential exists
'''
self.new_credential.save_credential()
test_credential = Credential("Test","user","0717062455","test@user.com") # new account
test_credential.save_credential()
credential_exists = Credential.credential_exist("0712345678")
self.assertTrue(credential_exists)
if __name__ == '__main__':
unittest.main()
| 30.517241 | 113 | 0.635405 |
acf5460de775554739a45eee8255f7a603c96d93 | 4,546 | py | Python | tests/p2p/test_ecies.py | zixuanzh/py-evm | de05e73036c663e85083316bc503549044792892 | [
"MIT"
] | 137 | 2017-03-17T11:37:51.000Z | 2022-03-07T07:51:28.000Z | tests/p2p/test_ecies.py | zixuanzh/py-evm | de05e73036c663e85083316bc503549044792892 | [
"MIT"
] | 102 | 2017-04-07T10:43:03.000Z | 2018-11-11T18:01:56.000Z | tests/p2p/test_ecies.py | zixuanzh/py-evm | de05e73036c663e85083316bc503549044792892 | [
"MIT"
] | 39 | 2017-03-17T11:38:52.000Z | 2021-02-18T23:05:17.000Z | import pytest
from eth_utils import (
decode_hex,
encode_hex,
)
from eth_keys import keys
from p2p import ecies
# (pvikey_hex, pubkey_hex, expected_ecdh) tuples with known-good values, to ensure our
# ECC backends are compatible with other clients'.
# Copied from
# https://github.com/ethereum/cpp-ethereum/blob/3c49a0/test/unittests/libp2p/rlpx.cpp#L427
# and
# https://github.com/ethereum/go-ethereum/blob/5c9346/crypto/ecies/ecies_test.go#L456
STATIC_ECDH_VALUES = [
("0x332143e9629eedff7d142d741f896258f5a1bfab54dab2121d3ec5000093d74b",
"0xf0d2b97981bd0d415a843b5dfe8ab77a30300daab3658c578f2340308a2da1a07f0821367332598b6aa4e180a41e92f4ebbae3518da847f0b1c0bbfe20bcf4e1", # noqa: E501
"0xee1418607c2fcfb57fda40380e885a707f49000a5dda056d828b7d9bd1f29a08",
),
("0x7ebbc6a8358bc76dd73ebc557056702c8cfc34e5cfcd90eb83af0347575fd2ad",
"0x83ede0f19c3c98649265956a4193677b14c338a22de2086a08d84e4446fe37e4e233478259ec90dbeef52f4f6c890f8c38660ec7b61b9d439b8a6d1c323dc025", # noqa: E501
"0x167ccc13ac5e8a26b131c3446030c60fbfac6aa8e31149d0869f93626a4cdf62",
),
]
def test_encrypt_decrypt():
msg = b'test yeah'
privkey = ecies.generate_privkey()
ciphertext = ecies.encrypt(msg, privkey.public_key)
decrypted = ecies.decrypt(ciphertext, privkey)
assert decrypted == msg
privkey2 = ecies.generate_privkey()
with pytest.raises(ecies.DecryptionError):
decrypted = ecies.decrypt(ciphertext, privkey2)
def test_decrypt_known_good_handshake():
# Data taken from https://gist.github.com/fjl/3a78780d17c755d22df2
privkey = keys.PrivateKey(
decode_hex("c45f950382d542169ea207959ee0220ec1491755abe405cd7498d6b16adb6df8"))
auth_ciphertext = decode_hex(
"04a0274c5951e32132e7f088c9bdfdc76c9d91f0dc6078e848f8e3361193dbdc43b94351ea3d89e4ff33ddcefbc80070498824857f499656c4f79bbd97b6c51a514251d69fd1785ef8764bd1d262a883f780964cce6a14ff206daf1206aa073a2d35ce2697ebf3514225bef186631b2fd2316a4b7bcdefec8d75a1025ba2c5404a34e7795e1dd4bc01c6113ece07b0df13b69d3ba654a36e35e69ff9d482d88d2f0228e7d96fe11dccbb465a1831c7d4ad3a026924b182fc2bdfe016a6944312021da5cc459713b13b86a686cf34d6fe6615020e4acf26bf0d5b7579ba813e7723eb95b3cef9942f01a58bd61baee7c9bdd438956b426a4ffe238e61746a8c93d5e10680617c82e48d706ac4953f5e1c4c4f7d013c87d34a06626f498f34576dc017fdd3d581e83cfd26cf125b6d2bda1f1d56") # noqa: E501
auth_plaintext = decode_hex(
"884c36f7ae6b406637c1f61b2f57e1d2cab813d24c6559aaf843c3f48962f32f46662c066d39669b7b2e3ba14781477417600e7728399278b1b5d801a519aa570034fdb5419558137e0d44cd13d319afe5629eeccb47fd9dfe55cc6089426e46cc762dd8a0636e07a54b31169eba0c7a20a1ac1ef68596f1f283b5c676bae4064abfcce24799d09f67e392632d3ffdc12e3d6430dcb0ea19c318343ffa7aae74d4cd26fecb93657d1cd9e9eaf4f8be720b56dd1d39f190c4e1c6b7ec66f077bb1100") # noqa: E501
decrypted = ecies.decrypt(auth_ciphertext, privkey)
assert auth_plaintext == decrypted
@pytest.mark.parametrize("privkey_hex, pubkey_hex, ecdh_expected", STATIC_ECDH_VALUES)
def test_ecdh(privkey_hex, pubkey_hex, ecdh_expected):
privkey = keys.PrivateKey(decode_hex(privkey_hex))
pubkey = keys.PublicKey(decode_hex(pubkey_hex))
assert ecdh_expected == encode_hex(ecies.ecdh_agree(privkey, pubkey))
# FIXME: Document those values; this test was lifted from pydevp2p:
# https://github.com/ethereum/pydevp2p/blob/e1ef07a782b9369d18a8441c3b9bcf12456e0608/devp2p/tests/test_ecies.py#L31
def test_hmac_sha256():
k_mac = decode_hex("0x07a4b6dfa06369a570f2dcba2f11a18f")
indata = decode_hex("0x4dcb92ed4fc67fe86832")
hmac_expected = decode_hex("0xc90b62b1a673b47df8e395e671a68bfa68070d6e2ef039598bb829398b89b9a9")
hmac = ecies.hmac_sha256(k_mac, indata)
assert hmac_expected == hmac
# message tag generated by geth
tag_secret = decode_hex("0xaf6623e52208c596e17c72cea6f1cb09")
tag_input = decode_hex("0x3461282bcedace970df2")
tag_expected = decode_hex("0xb3ce623bce08d5793677ba9441b22bb34d3e8a7de964206d26589df3e8eb5183")
hmac = ecies.hmac_sha256(tag_secret, tag_input)
assert hmac == tag_expected
# FIXME: Document those values; this test was lifted from pydevp2p:
# https://github.com/ethereum/pydevp2p/blob/e1ef07a782b9369d18a8441c3b9bcf12456e0608/devp2p/tests/test_ecies.py#L46
def test_kdf():
input_ = decode_hex("0x961c065873443014e0371f1ed656c586c6730bf927415757f389d92acf8268df")
expected_key = decode_hex("0x4050c52e6d9c08755e5a818ac66fabe478b825b1836fd5efc4d44e40d04dabcc")
key = ecies.kdf(input_)
assert key == expected_key
| 52.860465 | 639 | 0.83634 |
acf54690a8c5d747f2ee07d8565d63db6af7ddce | 11,754 | py | Python | clientside/ai4u/ai4u/ml/a3c/preprocessing.py | gilcoder/AI4U | 17aaa5a150dd7d35258ecf8f53202e8daad0d4ff | [
"MIT"
] | 20 | 2020-07-21T08:29:56.000Z | 2022-03-06T13:25:43.000Z | clientside/ai4u/ai4u/ml/a3c/preprocessing.py | gilcoder/AI4U | 17aaa5a150dd7d35258ecf8f53202e8daad0d4ff | [
"MIT"
] | 6 | 2020-07-16T01:01:30.000Z | 2021-11-22T01:19:11.000Z | clientside/ai4u/ai4u/ml/a3c/preprocessing.py | gilcoder/AI4U | 17aaa5a150dd7d35258ecf8f53202e8daad0d4ff | [
"MIT"
] | 2 | 2020-07-27T13:08:39.000Z | 2020-11-06T18:08:06.000Z | from collections import deque
import cv2
import numpy as np
from gym import spaces
from gym.core import Wrapper, ObservationWrapper, RewardWrapper
from gym.spaces import Box
from ai4u.utils import image_decode
"""
Observation preprocessing and environment tweaks.
Section 8 ("Experimental Setup") of the paper says:
"The Atari experiments used the same input preprocessing as (Mnih et al., 2015)
and an action repeat of 4."
'Mnih et al., 2015' is 'Human-level control through deep reinforcement learning'.
The relevant parts of that paper's Methods section are summarised below.
# Observation preprocessing:
'Preprocessing':
- "First, to encode a single frame we take the maximum value for each pixel colour value over the
frame being encoded and the previous frame. This was necessary to remove flickering that is
present in games where some objects appear only in even frames while other objects appear only
in odd frames, an artefact caused by the limited number of sprites Atari 2600 can display at
once."
- "Second, we then extract the Y channel, also known as luminance, from the RGB frame and rescale
it to 84 x 84."
- "The function phi from algorithm 1 described below applies this preprocessing to the m most
recent frames and stacks them to produce the input to the Q-function, in which m = 4, although
the algorithm is robust to different values of m (for example, 3 or 5)."
'Training details':
- "Following previous approaches to playing Atari 2600 games, we also use a simple frame-skipping
technique. More precisely, the agent sees and selects actions on every kth frame instead of
every frame, and its last action is repeated on skipped frames. Because running the emulator
forward for one step requires much less computation than having the agent select an action,
this technique allows the agent to play roughly k times more games without significantly
increasing the runtime. We use k = 4 for all games."
There's some ambiguity about what order to apply these steps in. I think the right order should be:
1. Max over subsequent frames
So - observation 0: max. over frames 0 and 1
observation 1: max. over frames 1 and 2
etc.
2. Extract luminance and scale
3. Skip frames
So - observation 0: max. over frames 0 and 1
observation 1: max. over frames 4 and 5
etc.
4. Stack frames
So - frame stack 0: max. over frames 0 and 1
max. over frames 4 and 5
max. over frames 8 and 9
max. over frames 12 and 13
frame stack 1: max. over frames 4 and 5
max. over frames 8 and 9
max. over frames 12 and 13
max. over frames 16 and 17
The main ambiguity is whether frame skipping or frame stacking should be done first.
Above we've assumed frame skipping should be done first. If we did frame stacking first, we would
only look at every 4th frame stack, giving:
- Frame stack 0: max. over frames 0 and 1
max. over frames 1 and 2
max. over frames 2 and 3
max. over frames 3 and 4
- Frame stack 4: max. over frames 4 and 5
max. over frames 5 and 6
max. over frames 6 and 7
max. over frames 7 and 8
Note that there's a big difference: frame skip then frame stack gives the agent much less temporal
scope than frame stack then frame skip. In the former, the agent has access to 12 frames' worth of
observations, whereas in the latter, only 4 frames' worth.
## Environment tweaks
'Training details':
- "As the scale of scores varies greatly from game to game, we clipped all positive rewards at 1 and
all negative rewards at -1, leaving 0 rewards unchanged."
- "For games where there is a life counter, the Atari 2600 emulator also sends the number of lives
left in thegame, which is then used to mark the end of an episode during training."
'Evaluation procedure':
- "The trained agents were evaluated by playing each game 30 times for up to 5 min each time with
different initial random conditions ('no-op'; see Extended Data Table 1)."
Extended Data Table 1 lists "no-op max" as 30 (set in params.py).
We implement all these steps using a modular set of wrappers, heavily inspired by Baselines'
atari_wrappers.py (https://git.io/vhWWG).
"""
def get_noop_action_index(env):
action_meanings = env.unwrapped.get_action_meanings()
try:
noop_action_index = action_meanings.index('NOOP')
return noop_action_index
except ValueError:
raise Exception("Unsure about environment's no-op action")
class MaxWrapper(Wrapper):
"""
Take maximum pixel values over pairs of frames.
"""
def __init__(self, env):
Wrapper.__init__(self, env)
self.frame_pairs = deque(maxlen=2)
def reset(self):
obs = self.env.reset()
self.frame_pairs.append(obs)
# The first frame returned should be the maximum of frames 0 and 1.
# We get frame 0 from env.reset(). For frame 1, we take a no-op action.
noop_action_index = get_noop_action_index(self.env)
obs, _, done, _ = self.env.step(noop_action_index)
if done:
raise Exception("Environment signalled done during initial frame "
"maxing")
self.frame_pairs.append(obs)
return np.max(self.frame_pairs, axis=0)
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.frame_pairs.append(obs)
obs_maxed = np.max(self.frame_pairs, axis=0)
return obs_maxed, reward, done, info
class ExtractImageWrapper(Wrapper):
"""
Convert observations from bytearray to image
"""
def reset(self):
state = self.env.reset()
self.state = image_decode(state)
def step(self, action):
obs, reward, done, info = self.env.step(action)
return image_decode(obs), reward, done, info
class ExtractLuminanceAndScaleWrapper(ObservationWrapper):
"""
Convert observations from colour to grayscale, then scale to 84 x 84
"""
def __init__(self, env):
ObservationWrapper.__init__(self, env)
# Important so that gym's play.py picks up the right resolution
self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84), dtype=np.uint8)
def observation(self, obs):
obs = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
# Bilinear interpolation
obs = cv2.resize(obs, (84, 84), interpolation=cv2.INTER_LINEAR)
return obs
class FrameStackWrapper(Wrapper):
"""
Stack the most recent 4 frames together.
"""
def __init__(self, env):
Wrapper.__init__(self, env)
self.frame_stack = deque(maxlen=4)
low = np.tile(env.observation_space.low[..., np.newaxis], 4)
high = np.tile(env.observation_space.high[..., np.newaxis], 4)
dtype = env.observation_space.dtype
self.observation_space = Box(low=low, high=high, dtype=dtype)
def _get_obs(self):
obs = np.array(self.frame_stack)
# Switch from (4, 84, 84) to (84, 84, 4), so that we have the right order for inputting
# directly into the convnet with the default channels_last
obs = np.moveaxis(obs, 0, -1)
return obs
def reset(self):
obs = self.env.reset()
self.frame_stack.append(obs)
# The first observation returned should be a stack of observations 0 through 3. We get
# observation 0 from env.reset(). For the rest, we take no-op actions.
noop_action_index = get_noop_action_index(self.env)
for _ in range(3):
obs, _, done, _ = self.env.step(noop_action_index)
if done:
raise Exception("Environment signalled done during initial "
"frame stack")
self.frame_stack.append(obs)
return self._get_obs()
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.frame_stack.append(obs)
return self._get_obs(), reward, done, info
class FrameSkipWrapper(Wrapper):
def __init__(self, env, k=4):
Wrapper.__init__(self, env)
self.k = k
"""
Repeat the chosen action for k frames, only returning the last frame.
"""
def reset(self):
return self.env.reset()
def step(self, action):
reward_sum = 0
for _ in range(self.k):
obs, reward, done, info = self.env.step(action)
reward_sum += reward
if done:
break
return obs, reward_sum, done, info
class RandomStartWrapper(Wrapper):
"""
Start each episode with a random number of no-ops.
"""
def __init__(self, env, max_n_noops):
Wrapper.__init__(self, env)
self.max_n_noops = max_n_noops
def step(self, action):
return self.env.step(action)
def reset(self):
obs = self.env.reset()
n_noops = np.random.randint(low=0, high=self.max_n_noops + 1)
noop_action_index = get_noop_action_index(self.env)
for _ in range(n_noops):
obs, _, done, _ = self.env.step(noop_action_index)
if done:
obs = self.env.reset()
return obs
class NormalizeObservationsWrapper(ObservationWrapper):
"""
Normalize observations to range [0, 1].
"""
def __init__(self, env):
ObservationWrapper.__init__(self, env)
self.observation_space = spaces.Box(low=0.0, high=1.0, shape=env.observation_space.shape,
dtype=np.float32)
def observation(self, obs):
return obs / 255.0
class ClipRewardsWrapper(RewardWrapper):
"""
Clip rewards to range [-1, +1].
"""
def reward(self, reward):
return np.clip(reward, -1, +1)
class EndEpisodeOnLifeLossWrapper(Wrapper):
"""
Send 'episode done' when life lost. (Baselines' atari_wrappers.py claims that this helps with
value estimation. I guess it makes it clear that only actions since the last loss of life
contributed significantly to any rewards in the present.)
"""
def __init__(self, env):
Wrapper.__init__(self, env)
self.done_because_life_lost = False
self.reset_obs = None
def step(self, action):
lives_before = self.env.unwrapped.ale.lives()
obs, reward, done, info = self.env.step(action)
lives_after = self.env.unwrapped.ale.lives()
if done:
self.done_because_life_lost = False
elif lives_after < lives_before:
self.done_because_life_lost = True
self.reset_obs = obs
done = True
return obs, reward, done, info
def reset(self):
assert self.done_because_life_lost is not None
# If we sent the 'episode done' signal after a loss of a life, then we'll probably get a
# reset signal next. But we shouldn't actually reset! We should just keep on playing until
# the /real/ end-of-episode.
if self.done_because_life_lost:
self.done_because_life_lost = None
return self.reset_obs
else:
return self.env.reset()
def generic_preprocess(env, max_n_noops, clip_rewards=True):
return env
| 36.846395 | 101 | 0.639357 |
acf547114f0b0ef3f1a08641227a0046711d1670 | 2,333 | py | Python | eregs_core/management/commands/import_reg.py | cfpb/eregs-2.0 | 820bd56fa5265ee25f88fe8dce4c7aa092e5d6a3 | [
"CC0-1.0"
] | 1 | 2019-02-25T21:43:36.000Z | 2019-02-25T21:43:36.000Z | eregs_core/management/commands/import_reg.py | cfpb/eregs-2.0 | 820bd56fa5265ee25f88fe8dce4c7aa092e5d6a3 | [
"CC0-1.0"
] | 46 | 2017-03-16T21:58:49.000Z | 2017-09-05T16:21:29.000Z | eregs_core/management/commands/import_reg.py | cfpb/eregs-2.0 | 820bd56fa5265ee25f88fe8dce4c7aa092e5d6a3 | [
"CC0-1.0"
] | 7 | 2017-06-07T14:56:46.000Z | 2021-02-20T10:50:48.000Z | import os
import glob
import time
from django.core.management import call_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Import the specified regulation into the database.'
def add_arguments(self, parser):
parser.add_argument('-r', '--regml-root', help='RegML root',
required=True)
parser.add_argument('part_number', nargs='+')
def handle(self, *args, **options):
part_number = options['part_number']
regml_root = options['regml_root']
if not os.path.isdir(regml_root):
raise CommandError('{} is not a directory'.format(regml_root))
for part_number in options['part_number']:
self.handle_part(regml_root, part_number)
def handle_part(self, regml_root, part_number):
reg_path = os.path.join(regml_root, 'regulation', part_number)
files = glob.glob(reg_path + '/*.xml')
if not files:
raise ValueError('no files in {}'.format(reg_path))
print 'RegML root in {}'.format(regml_root)
print 'Importing the regulation texts for {}'.format(part_number)
start_time = time.clock()
for filename in files:
doc = os.path.split(filename)[1]
print 'Importing {}'.format(doc)
call_command('import_xml', filename)
diff_path = os.path.join(regml_root, 'diff', part_number)
print 'Importing diffs between every pair of notices in {}'.format(part_number)
files = glob.glob(diff_path + '/*.xml')
for diff_file in files:
split_version = os.path.split(diff_file)[-1].replace('.xml', '').split(':')
if len(split_version) != 4:
print('File named incorrectly! Cannot infer versions!\n Make sure that your file ' \
'is named <left_doc_number>:<left_effective_date>:<right_doc_number>:<right_effective_date>')
exit(0)
left_version = ':'.join(split_version[0:2])
right_version = ':'.join(split_version[2:])
print 'Importing diff between {} and {}'.format(left_version, right_version)
call_command('import_diff', diff_file)
end_time = time.clock()
print 'Import took {} seconds'.format(end_time - start_time)
| 40.224138 | 115 | 0.626232 |
acf5473002f3770d623ca0a54e67acd433247a1f | 10,212 | py | Python | Documentation/sphinx/automarkup.py | jainsakshi2395/linux | 7ccb860232bb83fb60cd6bcf5aaf0c008d903acb | [
"Linux-OpenIB"
] | 5 | 2020-07-08T01:35:16.000Z | 2021-04-12T16:35:29.000Z | kernel/Documentation/sphinx/automarkup.py | SFIP/SFIP | e428a425d2d0e287f23d49f3dd583617ebd2e4a3 | [
"Zlib"
] | 1 | 2021-01-27T01:29:47.000Z | 2021-01-27T01:29:47.000Z | kernel/Documentation/sphinx/automarkup.py | SFIP/SFIP | e428a425d2d0e287f23d49f3dd583617ebd2e4a3 | [
"Zlib"
] | null | null | null | # SPDX-License-Identifier: GPL-2.0
# Copyright 2019 Jonathan Corbet <corbet@lwn.net>
#
# Apply kernel-specific tweaks after the initial document processing
# has been done.
#
from docutils import nodes
import sphinx
from sphinx import addnodes
if sphinx.version_info[0] < 2 or \
sphinx.version_info[0] == 2 and sphinx.version_info[1] < 1:
from sphinx.environment import NoUri
else:
from sphinx.errors import NoUri
import re
from itertools import chain
#
# Python 2 lacks re.ASCII...
#
try:
ascii_p3 = re.ASCII
except AttributeError:
ascii_p3 = 0
#
# Regex nastiness. Of course.
# Try to identify "function()" that's not already marked up some
# other way. Sphinx doesn't like a lot of stuff right after a
# :c:func: block (i.e. ":c:func:`mmap()`s" flakes out), so the last
# bit tries to restrict matches to things that won't create trouble.
#
RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=ascii_p3)
#
# Sphinx 2 uses the same :c:type role for struct, union, enum and typedef
#
RE_generic_type = re.compile(r'\b(struct|union|enum|typedef)\s+([a-zA-Z_]\w+)',
flags=ascii_p3)
#
# Sphinx 3 uses a different C role for each one of struct, union, enum and
# typedef
#
RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
#
# Detects a reference to a documentation page of the form Documentation/... with
# an optional extension
#
RE_doc = re.compile(r'(\bDocumentation/)?((\.\./)*[\w\-/]+)\.(rst|txt)')
RE_namespace = re.compile(r'^\s*..\s*c:namespace::\s*(\S+)\s*$')
#
# Reserved C words that we should skip when cross-referencing
#
Skipnames = [ 'for', 'if', 'register', 'sizeof', 'struct', 'unsigned' ]
#
# Many places in the docs refer to common system calls. It is
# pointless to try to cross-reference them and, as has been known
# to happen, somebody defining a function by these names can lead
# to the creation of incorrect and confusing cross references. So
# just don't even try with these names.
#
Skipfuncs = [ 'open', 'close', 'read', 'write', 'fcntl', 'mmap',
'select', 'poll', 'fork', 'execve', 'clone', 'ioctl',
'socket' ]
c_namespace = ''
def markup_refs(docname, app, node):
t = node.astext()
done = 0
repl = [ ]
#
# Associate each regex with the function that will markup its matches
#
markup_func_sphinx2 = {RE_doc: markup_doc_ref,
RE_function: markup_c_ref,
RE_generic_type: markup_c_ref}
markup_func_sphinx3 = {RE_doc: markup_doc_ref,
RE_function: markup_func_ref_sphinx3,
RE_struct: markup_c_ref,
RE_union: markup_c_ref,
RE_enum: markup_c_ref,
RE_typedef: markup_c_ref}
if sphinx.version_info[0] >= 3:
markup_func = markup_func_sphinx3
else:
markup_func = markup_func_sphinx2
match_iterators = [regex.finditer(t) for regex in markup_func]
#
# Sort all references by the starting position in text
#
sorted_matches = sorted(chain(*match_iterators), key=lambda m: m.start())
for m in sorted_matches:
#
# Include any text prior to match as a normal text node.
#
if m.start() > done:
repl.append(nodes.Text(t[done:m.start()]))
#
# Call the function associated with the regex that matched this text and
# append its return to the text
#
repl.append(markup_func[m.re](docname, app, m))
done = m.end()
if done < len(t):
repl.append(nodes.Text(t[done:]))
return repl
#
# In sphinx3 we can cross-reference to C macro and function, each one with its
# own C role, but both match the same regex, so we try both.
#
def markup_func_ref_sphinx3(docname, app, match):
class_str = ['c-func', 'c-macro']
reftype_str = ['function', 'macro']
cdom = app.env.domains['c']
#
# Go through the dance of getting an xref out of the C domain
#
base_target = match.group(2)
target_text = nodes.Text(match.group(0))
xref = None
possible_targets = [base_target]
# Check if this document has a namespace, and if so, try
# cross-referencing inside it first.
if c_namespace:
possible_targets.insert(0, c_namespace + "." + base_target)
if base_target not in Skipnames:
for target in possible_targets:
if target not in Skipfuncs:
for class_s, reftype_s in zip(class_str, reftype_str):
lit_text = nodes.literal(classes=['xref', 'c', class_s])
lit_text += target_text
pxref = addnodes.pending_xref('', refdomain = 'c',
reftype = reftype_s,
reftarget = target, modname = None,
classname = None)
#
# XXX The Latex builder will throw NoUri exceptions here,
# work around that by ignoring them.
#
try:
xref = cdom.resolve_xref(app.env, docname, app.builder,
reftype_s, target, pxref,
lit_text)
except NoUri:
xref = None
if xref:
return xref
return target_text
def markup_c_ref(docname, app, match):
class_str = {# Sphinx 2 only
RE_function: 'c-func',
RE_generic_type: 'c-type',
# Sphinx 3+ only
RE_struct: 'c-struct',
RE_union: 'c-union',
RE_enum: 'c-enum',
RE_typedef: 'c-type',
}
reftype_str = {# Sphinx 2 only
RE_function: 'function',
RE_generic_type: 'type',
# Sphinx 3+ only
RE_struct: 'struct',
RE_union: 'union',
RE_enum: 'enum',
RE_typedef: 'type',
}
cdom = app.env.domains['c']
#
# Go through the dance of getting an xref out of the C domain
#
base_target = match.group(2)
target_text = nodes.Text(match.group(0))
xref = None
possible_targets = [base_target]
# Check if this document has a namespace, and if so, try
# cross-referencing inside it first.
if c_namespace:
possible_targets.insert(0, c_namespace + "." + base_target)
if base_target not in Skipnames:
for target in possible_targets:
if not (match.re == RE_function and target in Skipfuncs):
lit_text = nodes.literal(classes=['xref', 'c', class_str[match.re]])
lit_text += target_text
pxref = addnodes.pending_xref('', refdomain = 'c',
reftype = reftype_str[match.re],
reftarget = target, modname = None,
classname = None)
#
# XXX The Latex builder will throw NoUri exceptions here,
# work around that by ignoring them.
#
try:
xref = cdom.resolve_xref(app.env, docname, app.builder,
reftype_str[match.re], target, pxref,
lit_text)
except NoUri:
xref = None
if xref:
return xref
return target_text
#
# Try to replace a documentation reference of the form Documentation/... with a
# cross reference to that page
#
def markup_doc_ref(docname, app, match):
stddom = app.env.domains['std']
#
# Go through the dance of getting an xref out of the std domain
#
absolute = match.group(1)
target = match.group(2)
if absolute:
target = "/" + target
xref = None
pxref = addnodes.pending_xref('', refdomain = 'std', reftype = 'doc',
reftarget = target, modname = None,
classname = None, refexplicit = False)
#
# XXX The Latex builder will throw NoUri exceptions here,
# work around that by ignoring them.
#
try:
xref = stddom.resolve_xref(app.env, docname, app.builder, 'doc',
target, pxref, None)
except NoUri:
xref = None
#
# Return the xref if we got it; otherwise just return the plain text.
#
if xref:
return xref
else:
return nodes.Text(match.group(0))
def get_c_namespace(app, docname):
source = app.env.doc2path(docname)
with open(source) as f:
for l in f:
match = RE_namespace.search(l)
if match:
return match.group(1)
return ''
def auto_markup(app, doctree, name):
global c_namespace
c_namespace = get_c_namespace(app, name)
#
# This loop could eventually be improved on. Someday maybe we
# want a proper tree traversal with a lot of awareness of which
# kinds of nodes to prune. But this works well for now.
#
# The nodes.literal test catches ``literal text``, its purpose is to
# avoid adding cross-references to functions that have been explicitly
# marked with cc:func:.
#
for para in doctree.traverse(nodes.paragraph):
for node in para.traverse(nodes.Text):
if not isinstance(node.parent, nodes.literal):
node.parent.replace(node, markup_refs(name, app, node))
def setup(app):
app.connect('doctree-resolved', auto_markup)
return {
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 34.734694 | 85 | 0.565022 |
acf547460a21142a3f54722c9eeeaf8e1c33e7a2 | 388 | py | Python | lecture/Lab_01-fastapi/app/utils.py | Spoons-tutorial/Server | ad699c4d3caf1bab4f3bfbb62888fc794684162a | [
"MIT"
] | 1 | 2022-01-29T05:56:32.000Z | 2022-01-29T05:56:32.000Z | lecture/Lab_01-fastapi/app/utils.py | Spoons-tutorial/Server | ad699c4d3caf1bab4f3bfbb62888fc794684162a | [
"MIT"
] | null | null | null | lecture/Lab_01-fastapi/app/utils.py | Spoons-tutorial/Server | ad699c4d3caf1bab4f3bfbb62888fc794684162a | [
"MIT"
] | 1 | 2022-03-20T05:01:21.000Z | 2022-03-20T05:01:21.000Z | import joblib
from sklearn.ensemble import RandomForestClassifier
def load_rf_clf(model_path: str) -> RandomForestClassifier:
"""경로를 읽어 RandomForestClassifier모델을 반환합니다.
Args:
model_path (str): 모델이 위치한 경로
Returns:
RandomForestClassifier: joblib.load를 통해 읽어온 RandomForestClassifier모델
"""
model = joblib.load(model_path)
return model | 25.866667 | 77 | 0.703608 |
acf5479a0efdaeb78ad5aa7afe91380a8e976a40 | 3,833 | py | Python | towhee/models/layers/multi_head_attention.py | krishnakatyal/towhee | c5e043aa1509cf46644ca6b53f691d6ed2647212 | [
"Apache-2.0"
] | null | null | null | towhee/models/layers/multi_head_attention.py | krishnakatyal/towhee | c5e043aa1509cf46644ca6b53f691d6ed2647212 | [
"Apache-2.0"
] | null | null | null | towhee/models/layers/multi_head_attention.py | krishnakatyal/towhee | c5e043aa1509cf46644ca6b53f691d6ed2647212 | [
"Apache-2.0"
] | null | null | null | # Original pytorch implementation by:
# 'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale'
# - https://arxiv.org/abs/2010.11929
# 'How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers'
# - https://arxiv.org/abs/2106.10270
#
# Built on top of codes from / Copyright 2020, Ross Wightman & Facebook, Inc. and its affiliates.
# Modifications & additions by / Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch import nn
class MultiHeadAttention(nn.Module):
"""
Multi-head attention layer.
Args:
dim (`int`):
number of features
num_heads (`int=8`):
number of heads
qkv_bias (`bool=False`):
if add bias to qkv layer
qk_scale (`float=None`):
number to scale qk
attn_drop_ratio (`float=0.`):
drop rate of attention layer
proj_drop_ratio (`float=0.`):
drop rate of projection layer
with_qkv (`bool=True`):
if use qkv layer
Example:
>>> import torch
>>> from towhee.models.layers.multi_head_attention import MultiHeadAttention
>>>
>>> test_shape = (1, 196+1, 768) # shape of output from patch_embed
>>> input_x = torch.rand(test_shape)
>>> model = MultiHeadAttention(dim=test_shape[2])
>>> out = model.forward(input_x)
>>> print(out.shape)
torch.Size([1, 197, 768])
"""
def __init__(self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop_ratio=0.,
proj_drop_ratio=0.,
with_qkv=True):
super().__init__()
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = qk_scale or self.head_dim ** -0.5
self.with_qkv = with_qkv
if self.with_qkv:
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop_ratio)
self.attn_drop = nn.Dropout(attn_drop_ratio)
def forward(self, x):
batch_size, new_num_patch, dim = x.shape
if self.with_qkv:
qkv = self.qkv(x).reshape(
batch_size,
new_num_patch,
3,
self.num_heads,
self.head_dim,
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
else:
qkv = x.reshape(batch_size, new_num_patch, self.num_heads, dim // self.num_heads).permute(0, 2, 1, 3)
q, k, v = qkv, qkv, qkv
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(batch_size, new_num_patch, dim)
if self.with_qkv:
x = self.proj(x)
x = self.proj_drop(x)
return x
# if __name__ == '__main__':
# import torch
#
# test_shape = (1, 196+1, 768)
# input_x = torch.rand(test_shape) # shape of output from patch_embed
# model = MultiHeadAttention(dim=test_shape[2])
# out = model.forward(input_x)
#
# assert(out.shape == (1, 197, 768))
| 34.845455 | 113 | 0.589095 |
acf547cfb29b50c134c0e07643b64b0c2a8f04df | 21,663 | py | Python | rate_limit/rl_wallet.py | nondejus/wallets | 44c643bbc4145ce010788bc44562cfaa62f4e46f | [
"Apache-2.0"
] | 2 | 2021-07-11T11:39:21.000Z | 2021-12-21T18:56:35.000Z | rate_limit/rl_wallet.py | nondejus/wallets | 44c643bbc4145ce010788bc44562cfaa62f4e46f | [
"Apache-2.0"
] | null | null | null | rate_limit/rl_wallet.py | nondejus/wallets | 44c643bbc4145ce010788bc44562cfaa62f4e46f | [
"Apache-2.0"
] | 1 | 2021-12-21T18:54:29.000Z | 2021-12-21T18:54:29.000Z | from chiasim.atoms import hexbytes
from standard_wallet.wallet import *
import clvm
from chiasim.hashable import Program, ProgramHash, CoinSolution, SpendBundle, BLSSignature
from binascii import hexlify
from chiasim.hashable.Coin import Coin
from chiasim.hashable.CoinSolution import CoinSolutionList
from clvm_tools import binutils
from chiasim.wallet.BLSPrivateKey import BLSPrivateKey
from chiasim.validation.Conditions import ConditionOpcode
from chiasim.puzzles.p2_delegated_puzzle import puzzle_for_pk
import math
# RLWallet is subclass of Wallet
class RLWallet(Wallet):
def __init__(self):
self.aggregation_coins = set()
self.rl_parent = None
self.rl_coin = None
self.interval = 0
self.limit = 0
self.rl_origin = None
self.pubkey_orig = None
self.current_rl_balance = 0
self.rl_index = 0
self.tip_index = 0
self.all_rl_additions = {}
self.all_rl_deletions = {}
self.rl_clawback_pk = None
self.clawback_limit = 0
self.clawback_interval = 0
self.clawback_origin = None
self.clawback_pk = None
self.clawback_puzzlehash = None
self.rl_receiver_pk = None
self.latest_clawback_coin = None
super().__init__()
return
def set_origin(self, origin):
#In tests Coin object is passed, in runnable it's a dictionary
if isinstance(origin, Coin):
self.rl_origin = origin.name()
else:
self.rl_origin = origin["name"]
self.rl_parent = origin
def rl_available_balance(self):
if self.rl_coin is None:
return 0
unlocked = int(((self.tip_index - self.rl_index) / self.interval)) * self.limit
total_amount = self.rl_coin.amount
available_amount = min(unlocked, total_amount)
return available_amount
def notify(self, additions, deletions, index):
super().notify(additions, deletions)
self.tip_index = index
self.rl_notify(additions, deletions, index)
spend_bundle_list = self.ac_notify(additions)
return spend_bundle_list
def rl_notify(self, additions, deletions, index):
for coin in additions:
if coin.name() in self.all_rl_additions:
continue
if coin.puzzle_hash == self.clawback_puzzlehash:
self.latest_clawback_coin = coin
continue
self.all_rl_additions[coin.name()] = coin
if self.can_generate_rl_puzzle_hash(coin.puzzle_hash):
self.current_rl_balance += coin.amount
if self.rl_coin:
self.rl_parent = self.rl_coin
else:
self.rl_origin = coin.parent_coin_info
self.rl_coin = coin
self.rl_index = index
for coin in deletions:
if self.rl_coin is None:
break
if coin.name() in self.all_rl_deletions:
continue
self.all_rl_deletions[coin.name()] = coin
if coin.puzzle_hash == self.rl_coin.puzzle_hash:
self.current_rl_balance -= coin.amount
if self.current_rl_balance == 0:
self.rl_coin = None
self.rl_origin = None
self.rl_parent = None
#TODO clean/reset all state so that new rl coin can be received again
def ac_notify(self, additions):
if self.rl_coin is None:
return # prevent unnecessary searching
spend_bundle_list = []
for coin in additions:
if ProgramHash(self.rl_make_aggregation_puzzle(self.rl_coin.puzzle_hash)) == coin.puzzle_hash:
self.aggregation_coins.add(coin)
spend_bundle = self.rl_generate_signed_aggregation_transaction()
spend_bundle_list.append(spend_bundle)
if spend_bundle_list:
return spend_bundle_list
else:
return None
def can_generate_rl_puzzle_hash(self, hash):
if self.rl_origin is None:
return None
if self.rl_clawback_pk is None:
return None
return any(map(lambda child: hash == ProgramHash(self.rl_puzzle_for_pk(
self.extended_secret_key.public_child(child).get_public_key().serialize(), self.limit, self.interval,
self.rl_origin, self.rl_clawback_pk)),
reversed(range(self.next_address))))
# Solution to this puzzle must be in format:
# (1 my_parent_id, my_puzzlehash, my_amount, outgoing_puzzle_hash, outgoing_amount, min_block_time, parent_parent_id, parent_amount)
# RATE LIMIT LOGIC:
# M - chia_per_interval
# N - interval_blocks
# V - amount being spent
# MIN_BLOCK_AGE = V / (M / N)
# if not (min_block_age * M >= V * N) do X (raise)
# ASSERT_COIN_BLOCK_AGE_EXCEEDS min_block_age
def rl_puzzle_for_pk(self, pubkey, rate_amount, interval_time, origin_id, clawback_pk):
hex_pk = hexbytes(pubkey)
#breakpoint()
opcode_aggsig = hexlify(ConditionOpcode.AGG_SIG).decode('ascii')
opcode_coin_block_age = hexlify(ConditionOpcode.ASSERT_BLOCK_AGE_EXCEEDS).decode('ascii')
opcode_create = hexlify(ConditionOpcode.CREATE_COIN).decode('ascii')
opcode_myid = hexlify(ConditionOpcode.ASSERT_MY_COIN_ID).decode('ascii')
if (not origin_id):
return None
TEMPLATE_MY_PARENT_ID = "(sha256 (f (r (r (r (r (r (r (a)))))))) (f (r (a))) (uint64 (f (r (r (r (r (r (r (r (a)))))))))))"
TEMPLATE_SINGLETON_RL = f"((c (i (i (= {TEMPLATE_MY_PARENT_ID} (f (a))) (q 1) (= (f (a)) (q 0x{origin_id}))) (q (c (q 1) (q ()))) (q (x (q \"Parent doesnt satisfy RL conditions\")))) (a)))"
TEMPLATE_BLOCK_AGE = f"((c (i (i (= (* (f (r (r (r (r (r (a))))))) (q {rate_amount})) (* (f (r (r (r (r (a)))))) (q {interval_time}))) (q 1) (q (> (* (f (r (r (r (r (r (a))))))) (q {rate_amount})) (* (f (r (r (r (r (a))))))) (q {interval_time})))) (q (c (q 0x{opcode_coin_block_age}) (c (f (r (r (r (r (r (a))))))) (q ())))) (q (x (q \"wrong min block time\")))) (a) ))"
TEMPLATE_MY_ID = f"(c (q 0x{opcode_myid}) (c (sha256 (f (a)) (f (r (a))) (uint64 (f (r (r (a)))))) (q ())))"
CREATE_CHANGE = f"(c (q 0x{opcode_create}) (c (f (r (a))) (c (- (f (r (r (a)))) (f (r (r (r (r (a))))))) (q ()))))"
CREATE_NEW_COIN = f"(c (q 0x{opcode_create}) (c (f (r (r (r (a))))) (c (f (r (r (r (r (a)))))) (q ()))))"
RATE_LIMIT_PUZZLE = f"(c {TEMPLATE_SINGLETON_RL} (c {TEMPLATE_BLOCK_AGE} (c {CREATE_CHANGE} (c {TEMPLATE_MY_ID} (c {CREATE_NEW_COIN} (q ()))))))"
TEMPLATE_MY_PARENT_ID_2 = "(sha256 (f (r (r (r (r (r (r (r (r (a)))))))))) (f (r (a))) (uint64 (f (r (r (r (r (r (r (r (a)))))))))))"
TEMPLATE_SINGLETON_RL_2 = f"((c (i (i (= {TEMPLATE_MY_PARENT_ID_2} (f (r (r (r (r (r (a)))))))) (q 1) (= (f (r (r (r (r (r (a))))))) (q 0x{origin_id}))) (q (c (q 1) (q ()))) (q (x (q \"Parent doesnt satisfy RL conditions\")))) (a)))"
CREATE_CONSOLIDATED = f"(c (q 0x{opcode_create}) (c (f (r (a))) (c (+ (f (r (r (r (r (a)))))) (f (r (r (r (r (r (r (a))))))))) (q ()))))"
MODE_TWO_ME_STRING = f"(c (q 0x{opcode_myid}) (c (sha256 (f (r (r (r (r (r (a))))))) (f (r (a))) (uint64 (f (r (r (r (r (r (r (a)))))))))) (q ())))"
CREATE_LOCK = f"(c (q 0x{opcode_create}) (c (sha256 (wrap (c (q 7) (c (c (q 5) (c (c (q 1) (c (sha256 (f (r (r (a)))) (f (r (r (r (a))))) (uint64 (f (r (r (r (r (a)))))))) (q ()))) (c (q (q ())) (q ())))) (q ()))))) (c (uint64 (q 0)) (q ()))))"
MODE_TWO = f"(c {TEMPLATE_SINGLETON_RL_2} (c {MODE_TWO_ME_STRING} (c {CREATE_LOCK} (c {CREATE_CONSOLIDATED} (q ())))))"
AGGSIG_ENTIRE_SOLUTION = f"(c (q 0x{opcode_aggsig}) (c (q 0x{hex_pk}) (c (sha256 (wrap (a))) (q ()))))"
WHOLE_PUZZLE = f"(c {AGGSIG_ENTIRE_SOLUTION} ((c (i (= (f (a)) (q 1)) (q ((c (q {RATE_LIMIT_PUZZLE}) (r (a))))) (q {MODE_TWO})) (a))) (q ()))"
CLAWBACK = f"(c (c (q 0x{opcode_aggsig}) (c (q 0x{clawback_pk}) (c (sha256 (wrap (a))) (q ())))) (r (a)))"
WHOLE_PUZZLE_WITH_CLAWBACK = f"((c (i (= (f (a)) (q 3)) (q {CLAWBACK}) (q {WHOLE_PUZZLE})) (a)))"
return Program(binutils.assemble(WHOLE_PUZZLE_WITH_CLAWBACK))
def rl_make_aggregation_puzzle(self, wallet_puzzle):
# If Wallet A wants to send further funds to Wallet B then they can lock them up using this code
# Solution will be (my_id wallet_coin_primary_input wallet_coin_amount)
opcode_myid = hexlify(ConditionOpcode.ASSERT_MY_COIN_ID).decode('ascii')
opcode_consumed = hexlify(ConditionOpcode.ASSERT_COIN_CONSUMED).decode('ascii')
me_is_my_id = f"(c (q 0x{opcode_myid}) (c (f (a)) (q ())))"
# lock_puzzle is the hash of '(r (c (q "merge in ID") (q ())))'
lock_puzzle = "(sha256 (wrap (c (q 7) (c (c (q 5) (c (c (q 1) (c (f (a)) (q ()))) (c (q (q ())) (q ())))) (q ())))))"
parent_coin_id = f"(sha256 (f (r (a))) (q 0x{wallet_puzzle}) (uint64 (f (r (r (a))))))"
input_of_lock = f"(c (q 0x{opcode_consumed}) (c (sha256 {parent_coin_id} {lock_puzzle} (uint64 (q 0))) (q ())))"
puz = f"(c {me_is_my_id} (c {input_of_lock} (q ())))"
return Program(binutils.assemble(puz))
# Solution is (1 my_parent_id, my_puzzlehash, my_amount, outgoing_puzzle_hash, outgoing_amount, min_block_time, parent_parent_id, parent_amount)
# min block time = Math.ceil((new_amount * self.interval) / self.limit)
def solution_for_rl(self, my_parent_id, my_puzzlehash, my_amount, out_puzzlehash, out_amount, my_parent_parent_id,
parent_amount):
min_block_count = math.ceil((out_amount * self.interval) / self.limit)
solution = f"(1 0x{my_parent_id} 0x{my_puzzlehash} {my_amount} 0x{out_puzzlehash} {out_amount} {min_block_count} 0x{my_parent_parent_id} {parent_amount})"
return Program(binutils.assemble(solution))
def rl_make_solution_mode_2(self, my_puzzle_hash, consolidating_primary_input, consolidating_coin_puzzle_hash,
outgoing_amount, my_primary_input, incoming_amount, parent_amount, my_parent_parent_id):
my_puzzle_hash = hexlify(my_puzzle_hash).decode('ascii')
consolidating_primary_input = hexlify(consolidating_primary_input).decode('ascii')
consolidating_coin_puzzle_hash = hexlify(consolidating_coin_puzzle_hash).decode('ascii')
primary_input = hexlify(my_primary_input).decode('ascii')
sol = f"(2 0x{my_puzzle_hash} 0x{consolidating_primary_input} 0x{consolidating_coin_puzzle_hash} {outgoing_amount} 0x{primary_input} {incoming_amount} {parent_amount} 0x{my_parent_parent_id})"
return Program(binutils.assemble(sol))
def make_clawback_solution(self, puzzlehash, amount):
opcode_create = hexlify(ConditionOpcode.CREATE_COIN).decode('ascii')
solution = f"(3 (0x{opcode_create} 0x{puzzlehash} {amount}))"
return Program(binutils.assemble(solution))
def rl_make_aggregation_solution(self, myid, wallet_coin_primary_input, wallet_coin_amount):
opcode_myid = hexlify(myid).decode('ascii')
primary_input = hexlify(wallet_coin_primary_input).decode('ascii')
sol = f"(0x{opcode_myid} 0x{primary_input} {wallet_coin_amount})"
return Program(binutils.assemble(sol))
def get_keys(self, hash):
s = super().get_keys(hash)
if s is not None:
return s
for child in reversed(range(self.next_address)):
pubkey = self.extended_secret_key.public_child(
child).get_public_key()
if hash == ProgramHash(
self.rl_puzzle_for_pk(pubkey.serialize(), self.limit, self.interval, self.rl_origin, self.rl_clawback_pk)):
return pubkey, self.extended_secret_key.private_child(child).get_private_key()
def get_keys_pk(self, clawback_pubkey):
for child in reversed(range(self.next_address)):
pubkey = self.extended_secret_key.public_child(
child).get_public_key()
if hexbytes(pubkey.serialize()) == clawback_pubkey:
return pubkey, self.extended_secret_key.private_child(child).get_private_key()
# This is for spending from received RL coin, not creating a new RL coin
def rl_generate_unsigned_transaction(self, to_puzzlehash, amount):
spends = []
coin = self.rl_coin
puzzle_hash = coin.puzzle_hash
pubkey, secretkey = self.get_keys(puzzle_hash)
puzzle = self.rl_puzzle_for_pk(pubkey.serialize(), self.limit, self.interval, self.rl_origin, self.rl_clawback_pk)
if isinstance(self.rl_parent, Coin):
solution = self.solution_for_rl(coin.parent_coin_info, puzzle_hash, coin.amount, to_puzzlehash, amount,
self.rl_parent.parent_coin_info, self.rl_parent.amount)
else:
solution = self.solution_for_rl(coin.parent_coin_info, puzzle_hash, coin.amount, to_puzzlehash, amount,
self.rl_parent["parent_coin_info"], self.rl_parent["amount"])
spends.append((puzzle, CoinSolution(coin, solution)))
return spends
def rl_generate_signed_transaction(self, amount, to_puzzle_hash):
if amount > self.rl_coin.amount:
return None
transaction = self.rl_generate_unsigned_transaction(to_puzzle_hash, amount)
return self.rl_sign_transaction(transaction)
def rl_sign_transaction(self, spends: (Program, [CoinSolution])):
sigs = []
for puzzle, solution in spends:
pubkey, secretkey = self.get_keys(
solution.coin.puzzle_hash)
secretkey = BLSPrivateKey(secretkey)
signature = secretkey.sign(
ProgramHash(Program(solution.solution)))
sigs.append(signature)
aggsig = BLSSignature.aggregate(sigs)
solution_list = CoinSolutionList(
[CoinSolution(coin_solution.coin, clvm.to_sexp_f([puzzle, coin_solution.solution])) for
(puzzle, coin_solution) in spends])
spend_bundle = SpendBundle(solution_list, aggsig)
return spend_bundle
def generate_unsigned_clawback_transaction(self):
spends = []
coin = self.latest_clawback_coin
puzzle = self.rl_puzzle_for_pk(self.rl_receiver_pk, self.clawback_limit, self.clawback_interval, self.clawback_origin, self.clawback_pk)
solution = self.make_clawback_solution(self.get_new_puzzlehash(), self.latest_clawback_coin.amount)
spends.append((puzzle, CoinSolution(coin, solution)))
return spends
def sign_clawback_transaction(self, spends: (Program, [CoinSolution]), clawback_pubkey):
sigs = []
for puzzle, solution in spends:
pubkey, secretkey = self.get_keys_pk(clawback_pubkey)
secretkey = BLSPrivateKey(secretkey)
signature = secretkey.sign(
ProgramHash(Program(solution.solution)))
sigs.append(signature)
aggsig = BLSSignature.aggregate(sigs)
solution_list = CoinSolutionList(
[CoinSolution(coin_solution.coin, clvm.to_sexp_f([puzzle, coin_solution.solution])) for
(puzzle, coin_solution) in spends])
spend_bundle = SpendBundle(solution_list, aggsig)
return spend_bundle
def clawback_rl_coin(self):
transaction = self.generate_unsigned_clawback_transaction()
if transaction is None:
return None
return self.sign_clawback_transaction(transaction, self.clawback_pk)
# This is for using the AC locked coin and aggregating it into wallet - must happen in same block as RL Mode 2
def rl_generate_signed_aggregation_transaction(self):
list_of_coinsolutions = []
if self.aggregation_coins is False: # empty sets evaluate to false in python
return
consolidating_coin = self.aggregation_coins.pop()
pubkey, secretkey = self.get_keys(
self.rl_coin.puzzle_hash)
# Spend wallet coin
puzzle = self.rl_puzzle_for_pk(pubkey.serialize(), self.limit, self.interval, self.rl_origin, self.rl_clawback_pk)
if isinstance(self.rl_parent, Coin):
solution = self.rl_make_solution_mode_2(self.rl_coin.puzzle_hash, consolidating_coin.parent_coin_info,
consolidating_coin.puzzle_hash, consolidating_coin.amount,
self.rl_coin.parent_coin_info, self.rl_coin.amount,
self.rl_parent.amount, self.rl_parent.parent_coin_info)
else:
solution = self.rl_make_solution_mode_2(self.rl_coin.puzzle_hash, consolidating_coin.parent_coin_info,
consolidating_coin.puzzle_hash, consolidating_coin.amount,
self.rl_coin.parent_coin_info, self.rl_coin.amount,
self.rl_parent["amount"], self.rl_parent["parent_coin_info"])
signature = BLSPrivateKey(secretkey).sign(ProgramHash(solution))
list_of_coinsolutions.append(CoinSolution(self.rl_coin, clvm.to_sexp_f([puzzle, solution])))
# Spend consolidating coin
puzzle = self.rl_make_aggregation_puzzle(self.rl_coin.puzzle_hash)
solution = self.rl_make_aggregation_solution(consolidating_coin.name()
, self.rl_coin.parent_coin_info
, self.rl_coin.amount)
list_of_coinsolutions.append(CoinSolution(consolidating_coin, clvm.to_sexp_f([puzzle, solution])))
# Spend lock
puzstring = "(r (c (q 0x" + hexlify(consolidating_coin.name()).decode('ascii') + ") (q ())))"
puzzle = Program(binutils.assemble(puzstring))
solution = Program(binutils.assemble("()"))
list_of_coinsolutions.append(CoinSolution(Coin(self.rl_coin, ProgramHash(
puzzle), 0), clvm.to_sexp_f([puzzle, solution])))
aggsig = BLSSignature.aggregate([signature])
solution_list = CoinSolutionList(list_of_coinsolutions)
return SpendBundle(solution_list, aggsig)
def get_puzzle_for_pk(self, pubkey):
puzzle = puzzle_for_pk(pubkey)
return puzzle
def get_puzzlehash_for_pk(self, pubkey):
puzzle = self.get_puzzle_for_pk(pubkey)
puzzlehash = ProgramHash(puzzle)
return puzzlehash
def rl_get_aggregation_puzzlehash(self, wallet_puzzle):
return ProgramHash(self.rl_make_aggregation_puzzle(wallet_puzzle))
# We need to select origin primary input
def select_coins(self, amount, origin_name=None):
if amount > self.temp_balance:
return None
used_utxos = set()
if origin_name is not None:
for coin in self.temp_utxos.copy():
if str(coin.name()) == str(origin_name):
used_utxos.add(coin)
while sum(map(lambda coin: coin.amount, used_utxos)) < amount:
tmp = self.temp_utxos.pop()
if tmp.amount is not 0:
used_utxos.add(tmp)
return used_utxos
def generate_unsigned_transaction_with_origin(self, amount, newpuzzlehash, origin_name):
if self.temp_balance < amount:
return None # TODO: Should we throw a proper error here, or just return None?
utxos = self.select_coins(amount, origin_name)
spends = []
spend_value = sum([coin.amount for coin in utxos])
change = spend_value - amount
for coin in utxos:
puzzle_hash = coin.puzzle_hash
pubkey, secretkey = self.get_keys(puzzle_hash)
puzzle = self.puzzle_for_pk(pubkey.serialize())
if str(origin_name) == str(coin.name()):
primaries = [{'puzzlehash': newpuzzlehash, 'amount': amount}]
if change > 0:
changepuzzlehash = self.get_new_puzzlehash()
primaries.append(
{'puzzlehash': changepuzzlehash, 'amount': change})
# add change coin into temp_utxo set
self.temp_utxos.add(Coin(coin, changepuzzlehash, change))
solution = make_solution(primaries=primaries)
else:
solution = make_solution(consumed=[coin.name()])
spends.append((puzzle, CoinSolution(coin, solution)))
self.temp_balance -= amount
return spends
def generate_signed_transaction_with_origin(self, amount, newpuzzlehash, origin_name):
transaction = self.generate_unsigned_transaction_with_origin(amount, newpuzzlehash, origin_name)
if transaction is None:
return None # TODO: Should we throw a proper error here, or just return None?
return self.sign_transaction(transaction)
"""
Copyright 2018 Chia Network Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
""" | 53.488889 | 378 | 0.631168 |
acf548c2a4d6840bbbf930968cd7b622d3c64aa8 | 152 | py | Python | test.py | sebastiankeshuqi/Apply-Classification-Tree-to-Red-Wine-Quality | f1e6f575d0f351bc5b3a5171bd2afbe3b8725bf1 | [
"MIT"
] | 1 | 2021-06-14T07:02:02.000Z | 2021-06-14T07:02:02.000Z | test.py | sebastiankeshuqi/Apply-Classification-Tree-to-Red-Wine-Quality | f1e6f575d0f351bc5b3a5171bd2afbe3b8725bf1 | [
"MIT"
] | null | null | null | test.py | sebastiankeshuqi/Apply-Classification-Tree-to-Red-Wine-Quality | f1e6f575d0f351bc5b3a5171bd2afbe3b8725bf1 | [
"MIT"
] | null | null | null | from math import exp,factorial
res = 0
for i in range(7):
res += factorial(100)/factorial(i)/factorial(100-i)*(0.05**i)*(0.95**(100-i))
print(1-res) | 30.4 | 81 | 0.657895 |
acf549633e34b5d709ed0b601988bd271d62d664 | 5,500 | py | Python | openaerostruct/tests/test_aero_analysis.py | carlosferpereira/OpenAeroStruct | 35e1ff8aac5c67e40b1925829cfbc203ba1b2f2d | [
"Apache-2.0"
] | null | null | null | openaerostruct/tests/test_aero_analysis.py | carlosferpereira/OpenAeroStruct | 35e1ff8aac5c67e40b1925829cfbc203ba1b2f2d | [
"Apache-2.0"
] | null | null | null | openaerostruct/tests/test_aero_analysis.py | carlosferpereira/OpenAeroStruct | 35e1ff8aac5c67e40b1925829cfbc203ba1b2f2d | [
"Apache-2.0"
] | 1 | 2021-04-09T16:45:27.000Z | 2021-04-09T16:45:27.000Z | from openmdao.utils.assert_utils import assert_rel_error
import unittest
import numpy as np
from openaerostruct.geometry.utils import generate_mesh
from openaerostruct.geometry.geometry_group import Geometry
from openaerostruct.aerodynamics.aero_groups import AeroPoint
import openmdao.api as om
class Test(unittest.TestCase):
def test(self):
# Create a dictionary to store options about the surface
mesh_dict = {'num_y' : 7,
'num_x' : 3,
'wing_type' : 'CRM',
'symmetry' : True,
'num_twist_cp' : 5}
mesh, twist_cp = generate_mesh(mesh_dict)
surf_dict = {
# Wing definition
'name' : 'wing', # name of the surface
'symmetry' : True, # if true, model one half of wing
# reflected across the plane y = 0
'S_ref_type' : 'wetted', # how we compute the wing area,
# can be 'wetted' or 'projected'
'fem_model_type' : 'tube',
'twist_cp' : twist_cp,
'mesh' : mesh,
# Aerodynamic performance of the lifting surface at
# an angle of attack of 0 (alpha=0).
# These CL0 and CD0 values are added to the CL and CD
# obtained from aerodynamic analysis of the surface to get
# the total CL and CD.
# These CL0 and CD0 values do not vary wrt alpha.
'CL0' : 0.0, # CL of the surface at alpha=0
'CD0' : 0.015, # CD of the surface at alpha=0
# Airfoil properties for viscous drag calculation
'k_lam' : 0.05, # percentage of chord with laminar
# flow, used for viscous drag
't_over_c_cp' : np.array([0.15]), # thickness over chord ratio (NACA0015)
'c_max_t' : .303, # chordwise location of maximum (NACA0015)
# thickness
'with_viscous' : True, # if true, compute viscous drag
'with_wave' : False, # if true, compute wave drag
}
surfaces = [surf_dict]
# Create the problem and the model group
prob = om.Problem()
indep_var_comp = om.IndepVarComp()
indep_var_comp.add_output('v', val=248.136, units='m/s')
indep_var_comp.add_output('alpha', val=5., units='deg')
indep_var_comp.add_output('Mach_number', val=0.84)
indep_var_comp.add_output('re', val=1.e6, units='1/m')
indep_var_comp.add_output('rho', val=0.38, units='kg/m**3')
indep_var_comp.add_output('cg', val=np.zeros((3)), units='m')
prob.model.add_subsystem('prob_vars',
indep_var_comp,
promotes=['*'])
# Loop over each surface in the surfaces list
for surface in surfaces:
geom_group = Geometry(surface=surface)
# Add tmp_group to the problem as the name of the surface.
# Note that is a group and performance group for each
# individual surface.
prob.model.add_subsystem(surface['name'], geom_group)
# Loop through and add a certain number of aero points
for i in range(1):
# Create the aero point group and add it to the model
aero_group = AeroPoint(surfaces=surfaces)
point_name = 'aero_point_{}'.format(i)
prob.model.add_subsystem(point_name, aero_group)
# Connect flow properties to the analysis point
prob.model.connect('v', point_name + '.v')
prob.model.connect('alpha', point_name + '.alpha')
prob.model.connect('Mach_number', point_name + '.Mach_number')
prob.model.connect('re', point_name + '.re')
prob.model.connect('rho', point_name + '.rho')
prob.model.connect('cg', point_name + '.cg')
# Connect the parameters within the model for each aero point
for surface in surfaces:
name = surface['name']
# Connect the mesh from the geometry component to the analysis point
prob.model.connect(name + '.mesh', point_name + '.' + name + '.def_mesh')
# Perform the connections with the modified names within the
# 'aero_states' group.
prob.model.connect(name + '.mesh', point_name + '.aero_states.' + name + '_def_mesh')
prob.model.connect(name + '.t_over_c', point_name + '.' + name + '_perf.' + 't_over_c')
recorder = om.SqliteRecorder("aero_analysis.db")
prob.driver.add_recorder(recorder)
prob.driver.recording_options['record_derivatives'] = True
prob.driver.recording_options['includes'] = ['*']
# Set up the problem
prob.setup()
# om.view_model(prob)
prob.run_driver()
assert_rel_error(self, prob['aero_point_0.wing_perf.CD'][0], 0.038041969673747206, 1e-6)
assert_rel_error(self, prob['aero_point_0.wing_perf.CL'][0], 0.5112640267782032, 1e-6)
assert_rel_error(self, prob['aero_point_0.CM'][1], -1.735548800386354, 1e-6)
if __name__ == '__main__':
unittest.main()
| 41.353383 | 103 | 0.555818 |
acf549851e9d1a35744a381c806d55d66492ae4f | 321 | py | Python | progressbar.py | jacobdshimer/Bro-Log-Utility-Script | f25bef1b6526ad9428c9d790a860710890c4702b | [
"MIT"
] | null | null | null | progressbar.py | jacobdshimer/Bro-Log-Utility-Script | f25bef1b6526ad9428c9d790a860710890c4702b | [
"MIT"
] | null | null | null | progressbar.py | jacobdshimer/Bro-Log-Utility-Script | f25bef1b6526ad9428c9d790a860710890c4702b | [
"MIT"
] | null | null | null | import sys
def progressbar(count, total, status=''):
bar_len = 40
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', status))
sys.stdout.flush()
| 32.1 | 69 | 0.626168 |
acf54bd8f13cdd329f22e9011690cefd9879acd8 | 7,884 | py | Python | data_managers/data_manager_snpsift_dbnsfp/data_manager/data_manager_snpsift_dbnsfp.py | hexylena/tools-iuc | 811337eaab815f54f0fd93a3dd23a1153993ea2a | [
"MIT"
] | null | null | null | data_managers/data_manager_snpsift_dbnsfp/data_manager/data_manager_snpsift_dbnsfp.py | hexylena/tools-iuc | 811337eaab815f54f0fd93a3dd23a1153993ea2a | [
"MIT"
] | null | null | null | data_managers/data_manager_snpsift_dbnsfp/data_manager/data_manager_snpsift_dbnsfp.py | hexylena/tools-iuc | 811337eaab815f54f0fd93a3dd23a1153993ea2a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import gzip
import json
import optparse
import os
import os.path
import re
import shutil
import sys
import urllib
import zipfile
from pysam import ctabix
"""
# Install dbNSFP databases
# from DbNsfp site
# Download dbNSFP database
$ wget ftp://dbnsfp:dbnsfp@dbnsfp.softgenetics.com/dbNSFPv2.4.zip
# Uncompress
$ unzip dbNSFP2.4.zip
# Create a single file version
$ (head -n 1 dbNSFP2.4_variant.chr1 ; cat dbNSFP2.4_variant.chr* | grep -v "^#") > dbNSFP2.4.txt
# Compress using block-gzip algorithm
bgzip dbNSFP2.4.txt
# Create tabix index
tabix -s 1 -b 2 -e 2 dbNSFP2.4.txt.gz
data_table:
<table name="snpsift_dbnsfps" comment_char="#">
<columns>key, build, name, value, annotations</columns>
<file path="tool-data/snpsift_dbnsfps.loc" />
</table>
#id build description path annotations
#GRCh37_dbNSFP2.4 GRCh37 GRCh37 dbNSFP2.4 /depot/snpeff/dbNSFP2.4.gz SIFT_pred,Uniprot_acc
#GRCh38_dbNSFP2.7 GRCh38 GRCh38 dbNSFP2.7 /depot/snpeff/dbNSFP2.7.gz SIFT_pred,Uniprot_acc
"""
data_table = 'snpsift_dbnsfps'
softgenetics_url = 'ftp://dbnsfp:dbnsfp@dbnsfp.softgenetics.com/'
dbNSFP_file_pat = '(dbNSFP(.*)_variant|dbscSNV(.*)).chr(.*)'
tokenize = re.compile(r'(\d+)|(\D+)').findall
dbNSFP_name_pat = 'dbNSFP(v|_light)?(\d*).*?'
def stop_err(msg):
sys.stderr.write(msg)
sys.exit(1)
def get_nsfp_genome_version(name):
genome_version = 'hg19'
dbNSFP_name_pat = '(dbscSNV|dbNSFP(v|_light)?)(\d*).*?'
m = re.match(dbNSFP_name_pat, name)
if m:
(base, mid, ver) = m.groups()
if base == 'dbscSNV':
genome_version = 'hg19'
else:
genome_version = 'hg38' if ver == '3' else 'hg19' if ver == '2' else 'hg18'
return genome_version
def get_annotations(gzip_path):
annotations = None
fh = None
try:
fh = gzip.open(gzip_path, 'r')
buf = fh.read(10000)
lines = buf.splitlines()
headers = lines[0].split('\t')
annotations = ','.join([x.strip() for x in headers[4:]])
except Exception as e:
stop_err('Error Reading annotations %s : %s' % (gzip_path, e))
finally:
if fh:
fh.close()
return annotations
def tabix_file(input_fname, output_fname):
print >> sys.stdout, "tabix_file: %s -> %s" % (input_fname, output_fname)
ctabix.tabix_compress(input_fname, output_fname, force=True)
# Column indices are 0-based.
ctabix.tabix_index(output_fname, seq_col=0, start_col=1, end_col=1)
def natural_sortkey(string):
return tuple(int(num) if num else alpha for num, alpha in tokenize(string))
def download_dbnsfp_database(url, output_file):
dbnsfp_tsv = None
file_path = 'downloaded_file'
urllib.urlretrieve(url, file_path)
with zipfile.ZipFile(file_path, 'r') as my_zip:
dbnsfp_tsv = output_file if output_file else 'dbnsfp_tsv'
wtr = open(dbnsfp_tsv, 'w')
allfiles = [info.filename for info in my_zip.infolist()]
files = [f for f in allfiles if re.match(dbNSFP_file_pat, f)]
files = sorted(files, key=natural_sortkey)
for j, file in enumerate(files):
tempfiles = []
tempfiles.append(file + "_%d" % len(tempfiles))
tfh = open(tempfiles[-1], 'w')
lastpos = None
fh = my_zip.open(file, 'rU')
for i, line in enumerate(fh):
if i == 0:
if j == 0:
wtr.write(line)
continue
else:
pos = int(line.split('\t')[1])
if lastpos and pos < lastpos:
tfh.close()
tempfiles.append(file + "_%d" % len(tempfiles))
tfh = open(tempfiles[-1], 'w')
print >> sys.stderr, "%s [%d] pos: %d < %d" % (file, i, pos, lastpos)
lastpos = pos
tfh.write(line)
tfh.close()
if len(tempfiles) == 1:
with open(tempfiles[0], 'r') as tfh:
wtr.writelines(tfh.readlines())
else:
tfha = [open(temp, 'r') for temp in tempfiles]
lines = [tfh.readline() for tfh in tfha]
curpos = [int(line.split('\t')[1]) for line in lines]
while len(tfha) > 0:
k = curpos.index(min(curpos))
wtr.write(lines[k])
line = tfha[k].readline()
if line:
lines[k] = line
curpos[k] = int(line.split('\t')[1])
else:
tfha[k].close()
del tfha[k]
del lines[k]
del curpos[k]
return dbnsfp_tsv
def main():
# Parse Command Line
parser = optparse.OptionParser()
parser.add_option('-g', '--dbkey', dest='dbkey', action='store', type="string", default=None, help='dbkey genome version')
parser.add_option('-n', '--db_name', dest='db_name', action='store', type="string", default=None, help='A name for a history snpsiftdbnsfp dataset')
parser.add_option('-s', '--softgenetics', dest='softgenetics', action='store', type="string", default=None, help='A name for softgenetics dbNSFP file')
parser.add_option('-H', '--snpsiftdbnsfp', dest='snpsiftdbnsfp', action='store', type="string", default=None, help='A history snpsiftdbnsfp dataset')
parser.add_option('-T', '--dbnsfp_tabular', dest='dbnsfp_tabular', action='store', type="string", default=None, help='A history dbnsfp_tabular dataset')
(options, args) = parser.parse_args()
filename = args[0]
params = json.loads(open(filename).read())
target_directory = params['output_data'][0]['extra_files_path']
if not os.path.exists(target_directory):
os.mkdir(target_directory)
data_manager_dict = {}
genome_version = options.dbkey if options.dbkey else 'unknown'
dbnsfp_tsv = None
db_name = None
bzip_path = None
if options.softgenetics:
dbnsfp_url = softgenetics_url + options.softgenetics
db_name = options.db_name if options.db_name else re.sub('\.zip$', '', options.softgenetics)
genome_version = get_nsfp_genome_version(options.softgenetics)
tsv = db_name + '.tsv'
dbnsfp_tsv = download_dbnsfp_database(dbnsfp_url, tsv)
elif options.dbnsfp_tabular:
db_name = options.db_name
dbnsfp_tsv = options.dbnsfp_tabular
elif options.snpsiftdbnsfp:
(dirpath, bgzip_name) = os.path.split(options.snpsiftdbnsfp)
idxpath = options.snpsiftdbnsfp + '.tbi'
shutil.copy(options.snpsiftdbnsfp, target_directory)
shutil.copy(idxpath, target_directory)
bzip_path = os.path.join(target_directory, bgzip_name)
db_name = re.sub('(.txt)?.gz$', '', bgzip_name)
else:
stop_err('Either --softgenetics or --dbnsfp_tabular required')
if dbnsfp_tsv:
bgzip_name = '%s.txt.gz' % db_name
bzip_path = os.path.join(target_directory, bgzip_name)
tabix_file(dbnsfp_tsv, bzip_path)
annotations = get_annotations(bzip_path)
# Create the SnpSift dbNSFP Reference Data
data_table_entry = dict(key='%s_%s' % (genome_version, db_name), build=genome_version, name='%s %s' % (genome_version, db_name), value=bgzip_name, annotations=annotations)
data_manager_dict['data_tables'] = data_manager_dict.get('data_tables', {})
data_manager_dict['data_tables'][data_table] = data_manager_dict['data_tables'].get(data_table, [])
data_manager_dict['data_tables'][data_table].append(data_table_entry)
# save info to json file
open(filename, 'wb').write(json.dumps(data_manager_dict))
if __name__ == "__main__":
main()
| 39.029703 | 175 | 0.612253 |
acf54c44bba50780168055c3611bb1f1f0d9da34 | 7,253 | py | Python | HLTrigger/Configuration/python/customizeHLTforCMSSW.py | kondratyevd/cmssw | d4c52b72613dfff99cce543408b8e90ea165972c | [
"Apache-2.0"
] | null | null | null | HLTrigger/Configuration/python/customizeHLTforCMSSW.py | kondratyevd/cmssw | d4c52b72613dfff99cce543408b8e90ea165972c | [
"Apache-2.0"
] | null | null | null | HLTrigger/Configuration/python/customizeHLTforCMSSW.py | kondratyevd/cmssw | d4c52b72613dfff99cce543408b8e90ea165972c | [
"Apache-2.0"
] | null | null | null | import FWCore.ParameterSet.Config as cms
# modifiers
from Configuration.ProcessModifiers.gpu_cff import gpu
# helper fuctions
from HLTrigger.Configuration.common import *
# add one customisation function per PR
# - put the PR number into the name of the function
# - add a short comment
# for example:
# CCCTF tuning
# def customiseFor12718(process):
# for pset in process._Process__psets.values():
# if hasattr(pset,'ComponentType'):
# if (pset.ComponentType == 'CkfBaseTrajectoryFilter'):
# if not hasattr(pset,'minGoodStripCharge'):
# pset.minGoodStripCharge = cms.PSet(refToPSet_ = cms.string('HLTSiStripClusterChargeCutNone'))
# return process
# Eta Extended Electrons
def customiseFor35309(process):
for pset in process._Process__psets.values():
if hasattr(pset,'ComponentType'):
if (pset.ComponentType == 'CkfBaseTrajectoryFilter'):
if not hasattr(pset, 'highEtaSwitch'):
pset.highEtaSwitch = cms.double(5.0)
if not hasattr(pset, 'minHitsAtHighEta'):
pset.minHitsAtHighEta = cms.int32(5)
return process
def customiseHCALFor2018Input(process):
"""Customise the HLT to run on Run 2 data/MC using the old readout for the HCAL barel"""
for producer in producers_by_type(process, "HBHEPhase1Reconstructor"):
# switch on the QI8 processing for 2018 HCAL barrel
producer.processQIE8 = True
# adapt CaloTowers threshold for 2018 HCAL barrel with only one depth
for producer in producers_by_type(process, "CaloTowersCreator"):
producer.HBThreshold1 = 0.7
producer.HBThreshold2 = 0.7
producer.HBThreshold = 0.7
# adapt Particle Flow threshold for 2018 HCAL barrel with only one depth
from RecoParticleFlow.PFClusterProducer.particleFlowClusterHBHE_cfi import _thresholdsHB, _thresholdsHEphase1, _seedingThresholdsHB
logWeightDenominatorHCAL2018 = cms.VPSet(
cms.PSet(
depths = cms.vint32(1, 2, 3, 4),
detector = cms.string('HCAL_BARREL1'),
logWeightDenominator = _thresholdsHB
),
cms.PSet(
depths = cms.vint32(1, 2, 3, 4, 5, 6, 7),
detector = cms.string('HCAL_ENDCAP'),
logWeightDenominator = _thresholdsHEphase1
)
)
for producer in producers_by_type(process, "PFRecHitProducer"):
if producer.producers[0].name.value() == 'PFHBHERecHitCreator':
producer.producers[0].qualityTests[0].cuts[0].threshold = _thresholdsHB
for producer in producers_by_type(process, "PFClusterProducer"):
if producer.seedFinder.thresholdsByDetector[0].detector.value() == 'HCAL_BARREL1':
producer.seedFinder.thresholdsByDetector[0].seedingThreshold = _seedingThresholdsHB
producer.initialClusteringStep.thresholdsByDetector[0].gatheringThreshold = _thresholdsHB
producer.pfClusterBuilder.recHitEnergyNorms[0].recHitEnergyNorm = _thresholdsHB
producer.pfClusterBuilder.positionCalc.logWeightDenominatorByDetector = logWeightDenominatorHCAL2018
producer.pfClusterBuilder.allCellsPositionCalc.logWeightDenominatorByDetector = logWeightDenominatorHCAL2018
for producer in producers_by_type(process, "PFMultiDepthClusterProducer"):
producer.pfClusterBuilder.allCellsPositionCalc.logWeightDenominatorByDetector = logWeightDenominatorHCAL2018
# done
return process
def customiseFor2017DtUnpacking(process):
"""Adapt the HLT to run the legacy DT unpacking
for pre2018 data/MC workflows as the default"""
if hasattr(process,'hltMuonDTDigis'):
process.hltMuonDTDigis = cms.EDProducer( "DTUnpackingModule",
useStandardFEDid = cms.bool( True ),
maxFEDid = cms.untracked.int32( 779 ),
inputLabel = cms.InputTag( "rawDataCollector" ),
minFEDid = cms.untracked.int32( 770 ),
dataType = cms.string( "DDU" ),
readOutParameters = cms.PSet(
localDAQ = cms.untracked.bool( False ),
debug = cms.untracked.bool( False ),
rosParameters = cms.PSet(
localDAQ = cms.untracked.bool( False ),
debug = cms.untracked.bool( False ),
writeSC = cms.untracked.bool( True ),
readDDUIDfromDDU = cms.untracked.bool( True ),
readingDDU = cms.untracked.bool( True ),
performDataIntegrityMonitor = cms.untracked.bool( False )
),
performDataIntegrityMonitor = cms.untracked.bool( False )
),
dqmOnly = cms.bool( False )
)
return process
def customisePixelGainForRun2Input(process):
"""Customise the HLT to run on Run 2 data/MC using the old definition of the pixel calibrations
Up to 11.0.x, the pixel calibarations were fully specified in the configuration:
VCaltoElectronGain = 47
VCaltoElectronGain_L1 = 50
VCaltoElectronOffset = -60
VCaltoElectronOffset_L1 = -670
Starting with 11.1.x, the calibrations for Run 3 were moved to the conditions, leaving in the configuration only:
VCaltoElectronGain = 1
VCaltoElectronGain_L1 = 1
VCaltoElectronOffset = 0
VCaltoElectronOffset_L1 = 0
Since the conditions for Run 2 have not been updated to the new scheme, the HLT configuration needs to be reverted.
"""
# revert the Pixel parameters to be compatible with the Run 2 conditions
for producer in producers_by_type(process, "SiPixelClusterProducer"):
producer.VCaltoElectronGain = 47
producer.VCaltoElectronGain_L1 = 50
producer.VCaltoElectronOffset = -60
producer.VCaltoElectronOffset_L1 = -670
return process
def customiseFor2018Input(process):
"""Customise the HLT to run on Run 2 data/MC"""
process = customisePixelGainForRun2Input(process)
process = customiseHCALFor2018Input(process)
return process
def customiseFor35315(process):
"""Update the HLT configuration for the changes in #35315"""
for module in filters_by_type(process, "HLTHcalCalibTypeFilter"):
if hasattr(module, "FilterSummary"):
delattr(module, "FilterSummary")
return process
# MultipleScatteringParametrisationMakerESProducer
def customiseFor35269(process):
process.load("RecoTracker.TkMSParametrization.multipleScatteringParametrisationMakerESProducer_cfi")
return process
# CMSSW version specific customizations
def customizeHLTforCMSSW(process, menuType="GRun"):
# if the gpu modifier is enabled, make the Pixel, ECAL and HCAL reconstruction offloadable to a GPU
from HLTrigger.Configuration.customizeHLTforPatatrack import customizeHLTforPatatrack
gpu.makeProcessModifier(customizeHLTforPatatrack).apply(process)
# add call to action function in proper order: newest last!
# process = customiseFor12718(process)
process = customiseFor35309(process)
process = customiseFor35315(process)
process = customiseFor35269(process)
return process
| 41.445714 | 135 | 0.687853 |
acf54d0b19bbf49be33497e58552501d9f56933d | 1,850 | py | Python | share/qt/extract_strings_qt.py | cmkcoin/cmkcore | 5c2a3222ef901d1c6d9315177ba79e3f5094f2a6 | [
"MIT"
] | 1 | 2019-10-23T05:58:59.000Z | 2019-10-23T05:58:59.000Z | share/qt/extract_strings_qt.py | cmkcoin/cmkcore | 5c2a3222ef901d1c6d9315177ba79e3f5094f2a6 | [
"MIT"
] | null | null | null | share/qt/extract_strings_qt.py | cmkcoin/cmkcore | 5c2a3222ef901d1c6d9315177ba79e3f5094f2a6 | [
"MIT"
] | null | null | null | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/dashstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *dash_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("cmk-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| 23.417722 | 78 | 0.595676 |
acf54d945c8d1b5719a904831f5d58c0eb440136 | 1,460 | py | Python | data/cloud/tpot/fold8/pipeline.py | luisferreira97/autoautoml | 501d2de8b2153748b57e5c8cb247058c587ce29c | [
"MIT"
] | 7 | 2020-05-15T23:10:26.000Z | 2022-01-21T10:36:50.000Z | data/cloud/tpot/fold8/pipeline.py | luisferreira97/autoautoml | 501d2de8b2153748b57e5c8cb247058c587ce29c | [
"MIT"
] | 13 | 2020-11-13T18:47:51.000Z | 2021-07-24T10:04:57.000Z | data/cloud/tpot/fold8/pipeline.py | luisferreira97/autoautoml | 501d2de8b2153748b57e5c8cb247058c587ce29c | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.linear_model import ElasticNetCV
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from tpot.builtins import StackingEstimator
from tpot.export_utils import set_param_recursive
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
tpot_data = pd.read_csv("PATH/TO/DATA/FILE",
sep="COLUMN_SEPARATOR", dtype=np.float64)
features = tpot_data.drop("target", axis=1)
training_features, testing_features, training_target, testing_target = train_test_split(
features, tpot_data["target"], random_state=42
)
# Average CV score on the training set was: -0.226360341661062
exported_pipeline = make_pipeline(
StackingEstimator(
estimator=GradientBoostingRegressor(
alpha=0.99,
learning_rate=0.01,
loss="huber",
max_depth=3,
max_features=0.5,
min_samples_leaf=3,
min_samples_split=2,
n_estimators=100,
subsample=0.9500000000000001,
)
),
ElasticNetCV(l1_ratio=0.25, tol=0.0001),
)
# Fix random state for all the steps in exported pipeline
set_param_recursive(exported_pipeline.steps, "random_state", 42)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| 36.5 | 88 | 0.732192 |
acf54e3e01b9c32efbd753967cd94adeb61ce4aa | 806 | py | Python | remove_omex.py | sys-bio/temp-biomodels | 596eebb590d72e74419773f4e9b829a62d7fff9a | [
"CC0-1.0"
] | null | null | null | remove_omex.py | sys-bio/temp-biomodels | 596eebb590d72e74419773f4e9b829a62d7fff9a | [
"CC0-1.0"
] | 5 | 2022-03-30T21:33:45.000Z | 2022-03-31T20:08:15.000Z | remove_omex.py | sys-bio/temp-biomodels | 596eebb590d72e74419773f4e9b829a62d7fff9a | [
"CC0-1.0"
] | null | null | null | import zipfile
import os
KEEP = ["BIOMD0000000585"]
def run(id, omex_filenames, working_dir):
""" Remove OMEX files
* Remove OMEX files that are simple duplicates of the files already in the directory.
Args:
id (:obj:`str`): id of BioModels entry
omex_filenames(:obj:'str'): list of omex files to check.
working_dir (:obj:`str`): directory of entries to change (e.g., ``final``, ``original``)
Returns:
:obj:`list` of :obj:`str`: list of removed files
"""
removed = []
for omex in omex_filenames:
if id in KEEP:
zf = zipfile.ZipFile(omex, "r")
zf.extractall(path=os.path.dirname(omex))
else:
os.remove(os.path.join(working_dir, omex))
removed.append(omex)
return removed
| 26 | 96 | 0.605459 |
acf551ef6911245fffe15e0e4d6950252eed0c3e | 22,496 | py | Python | test/functional/feature_rbf.py | azsxcv11060/bitcoin | 086dae9878680608e189b9aac2a605254d3b4395 | [
"MIT"
] | 13 | 2019-03-21T03:55:52.000Z | 2022-01-06T17:49:16.000Z | test/functional/feature_rbf.py | azsxcv11060/bitcoin | 086dae9878680608e189b9aac2a605254d3b4395 | [
"MIT"
] | 3 | 2020-05-12T23:01:27.000Z | 2020-05-17T05:14:25.000Z | test/functional/feature_rbf.py | azsxcv11060/bitcoin | 086dae9878680608e189b9aac2a605254d3b4395 | [
"MIT"
] | 5 | 2019-12-17T23:52:00.000Z | 2021-06-13T20:39:56.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RBF code."""
from decimal import Decimal
from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut
from test_framework.script import CScript, OP_DROP
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, satoshi_round
from test_framework.script_util import DUMMY_P2WPKH_SCRIPT, DUMMY_2_P2WPKH_SCRIPT
MAX_REPLACEMENT_LIMIT = 100
def txToHex(tx):
return tx.serialize().hex()
def make_utxo(node, amount, confirmed=True, scriptPubKey=DUMMY_P2WPKH_SCRIPT):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1*COIN
while node.getbalance() < satoshi_round((amount + fee)/COIN):
node.generate(100)
new_addr = node.getnewaddress()
txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
if txout['scriptPubKey']['addresses'] == [new_addr]:
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, scriptPubKey)]
tx2.rehash()
signed_tx = node.signrawtransactionwithwallet(txToHex(tx2))
txid = node.sendrawtransaction(signed_tx['hex'], 0)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert new_size < mempool_size
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [
[
"-acceptnonstdtxn=1",
"-maxorphantx=1000",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101",
],
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Leave IBD
self.nodes[0].generate(1)
make_utxo(self.nodes[0], 1*COIN)
# Ensure nodes are synced
self.sync_all()
self.log.info("Running test simple doublespend...")
self.test_simple_doublespend()
self.log.info("Running test doublespend chain...")
self.test_doublespend_chain()
self.log.info("Running test doublespend tree...")
self.test_doublespend_tree()
self.log.info("Running test replacement feeperkb...")
self.test_replacement_feeperkb()
self.log.info("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
self.log.info("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
self.log.info("Running test too many replacements...")
self.test_too_many_replacements()
self.log.info("Running test opt-in...")
self.test_opt_in()
self.log.info("Running test RPC...")
self.test_rpc()
self.log.info("Running test prioritised transactions...")
self.test_prioritised_transactions()
self.log.info("Passed")
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# make_utxo may have generated a bunch of blocks, so we need to sync
# before we can spend the coins generated, or else the resulting
# transactions might not be accepted by our peers.
self.sync_all()
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
self.sync_all()
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(1 * COIN, DUMMY_2_P2WPKH_SCRIPT)]
tx1b_hex = txToHex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
# Extra 0.1 BTC fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx1b_hex = txToHex(tx1b)
# Works when enabled
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0)
mempool = self.nodes[0].getrawmempool()
assert tx1a_txid not in mempool
assert tx1b_txid in mempool
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 10*COIN:
remaining_value -= 1*COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1, OP_DROP] * 15 + [1]))]
tx_hex = txToHex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, 0)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 BTC - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 30 * COIN, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0)
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, 0)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert doublespent_txid not in mempool
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = txToHex(tx)
assert len(tx.serialize()) < 100000
txid = self.nodes[0].sendrawtransaction(tx_hex, 0)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = int(0.0001*COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee * n, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0)
# 1 BTC fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee * n - 1 * COIN, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, 0)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert tx.hash not in mempool
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
fee = int(0.0001*COIN)
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2 * fee * n, DUMMY_P2WPKH_SCRIPT)]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = txToHex(tx1a)
self.nodes[0].sendrawtransaction(tx1a_hex, 0)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))]
tx1b_hex = txToHex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], int(1.2*COIN))
utxo2 = make_utxo(self.nodes[0], 3*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(int(1.1 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, 0)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, 0)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN))
unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1_hex = txToHex(tx1)
self.nodes[0].sendrawtransaction(tx1_hex, 0)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, 0)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10*COIN
utxo = make_utxo(self.nodes[0], initial_nValue)
fee = int(0.0001*COIN)
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = txToHex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, 0)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT+1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value - fee, DUMMY_P2WPKH_SCRIPT)]
tx_i_hex = txToHex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, 0)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, 0)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, 0)
def test_opt_in(self):
"""Replacing should only work if orig tx opted in"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
# This transaction isn't shown as replaceable
assert_equal(self.nodes[0].getmempoolentry(tx1a_txid)['bip125-replaceable'], False)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx1b_hex = txToHex(tx1b)
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
tx2a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, 0)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx2b_hex = txToHex(tx2b)
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, 0)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))]
tx3a_hex = txToHex(tx3a)
tx3a_txid = self.nodes[0].sendrawtransaction(tx3a_hex, 0)
# This transaction is shown as replaceable
assert_equal(self.nodes[0].getmempoolentry(tx3a_txid)['bip125-replaceable'], True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx3b.vout = [CTxOut(int(0.5 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx3b_hex = txToHex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
tx3c.vout = [CTxOut(int(0.5 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx3c_hex = txToHex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, 0)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, 0)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))]
tx1b_hex = txToHex(tx1b)
# Verify tx1b cannot replace tx1a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1*COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0)
assert tx1b_txid in self.nodes[0].getrawmempool()
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)]
tx2a_hex = txToHex(tx2a)
self.nodes[0].sendrawtransaction(tx2a_hex, 0)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(1.01 * COIN), DUMMY_P2WPKH_SCRIPT)]
tx2b.rehash()
tx2b_hex = txToHex(tx2b)
# Verify tx2b cannot replace tx2a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, 0)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1*COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, 0)
assert tx2b_txid in self.nodes[0].getrawmempool()
def test_rpc(self):
us0 = self.nodes[0].listunspent()[0]
ins = [us0]
outs = {self.nodes[0].getnewaddress() : Decimal(1.0000000)}
rawtx0 = self.nodes[0].createrawtransaction(ins, outs, 0, True)
rawtx1 = self.nodes[0].createrawtransaction(ins, outs, 0, False)
json0 = self.nodes[0].decoderawtransaction(rawtx0)
json1 = self.nodes[0].decoderawtransaction(rawtx1)
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967295)
rawtx2 = self.nodes[0].createrawtransaction([], outs)
frawtx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True})
frawtx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False})
json0 = self.nodes[0].decoderawtransaction(frawtx2a['hex'])
json1 = self.nodes[0].decoderawtransaction(frawtx2b['hex'])
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967294)
if __name__ == '__main__':
ReplaceByFeeTest().main()
| 39.260035 | 124 | 0.634868 |
acf552d7e363c63cc6b7a6c04b0c67a070ad60ec | 980 | py | Python | text_to_json.py | osmomysl/Lesson_7 | 2365d7103580ecfea6f4633ca42196ea767c89c2 | [
"MIT"
] | null | null | null | text_to_json.py | osmomysl/Lesson_7 | 2365d7103580ecfea6f4633ca42196ea767c89c2 | [
"MIT"
] | null | null | null | text_to_json.py | osmomysl/Lesson_7 | 2365d7103580ecfea6f4633ca42196ea767c89c2 | [
"MIT"
] | null | null | null | import io
import json
import time
from datetime import datetime
start_time = time.time()
car_parameters = []
with io.open('car_data', encoding='utf-8') as f:
for i, line in enumerate(f):
if i == 0:
keys = line.replace("\n", "").split(', ')
else:
values = line.replace("\n", "").split(', ')
dict_cars = dict(zip(keys, values))
car_parameters.append(dict_cars)
print(car_parameters)
# parameters_json = json.dumps(car_parameters, ensure_ascii=False)
# print(type(parameters_json), parameters_json)
file_date = datetime.now().strftime("%Y-%m-%d_%H%M%S")
with open('car_parameters_{}.txt'.format(file_date), 'w') as json_file:
json.dump(car_parameters, json_file, ensure_ascii=False)
# ensure_ascii=False - чтобы кириллица читалась, иначе в json будут записаны коды Unicode (это тоже норм)
print('done')
print('it took {} seconds'.format(time.time() - start_time)) # время, затраченное на генерацию отчета
| 35 | 105 | 0.676531 |
acf55488f1036ae9c7a04d4220e2d5dfb543eaba | 2,427 | py | Python | sample_random_trajectory.py | khkim1/mgail | 287e2f89d04a4a8fc1f88e5c98ad560a9c8cf41f | [
"MIT"
] | 1 | 2020-07-01T16:45:20.000Z | 2020-07-01T16:45:20.000Z | sample_random_trajectory.py | khkim1/mgail | 287e2f89d04a4a8fc1f88e5c98ad560a9c8cf41f | [
"MIT"
] | null | null | null | sample_random_trajectory.py | khkim1/mgail | 287e2f89d04a4a8fc1f88e5c98ad560a9c8cf41f | [
"MIT"
] | null | null | null | import argparse
import gym
import numpy as np
from network_models.policy_net import Policy_net
import tensorflow as tf
import math
# noinspection PyTypeChecker
def open_file_and_save(file_path, data):
"""
:param file_path: type==string
:param data:
"""
try:
with open(file_path, 'ab') as f_handle:
np.savetxt(f_handle, data, fmt='%s')
except FileNotFoundError:
with open(file_path, 'wb') as f_handle:
np.savetxt(f_handle, data, fmt='%s')
def argparser():
parser = argparse.ArgumentParser()
parser.add_argument('--iteration', default=10, type=int)
return parser.parse_args()
def main(args):
env = gym.make('CartPole-v0')
env.env.theta_threshold_radians = 350 * math.pi / 360
env.seed(0)
ob_space = env.observation_space
num_repeat = 4
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
obs = env.reset()
for iteration in range(args.iteration): # episode
observations = []
actions = []
reset_idx = []
run_steps = 0
while True:
run_steps += 1
# prepare to feed placeholder Policy.obs
obs = np.stack([obs]).astype(dtype=np.float32)
#act = np.random.randint(2)
act = 0
observations.append(obs)
actions.append(act)
for i in range(num_repeat):
next_obs, reward, done, info = env.step(act)
if done:
break
if run_steps == 1:
reset_idx += [0]
else:
reset_idx += [1]
if done:
print(run_steps)
obs = env.reset()
break
else:
obs = next_obs
observations = np.reshape(observations, newshape=[-1] + list(ob_space.shape))
actions = np.array(actions).astype(dtype=np.int32)
reset_idx = np.array(reset_idx).astype(dtype=np.int32)
open_file_and_save('rtrajectory/observations.csv', observations)
open_file_and_save('rtrajectory/actions.csv', actions)
open_file_and_save('rtrajectory/reset_idx.csv', reset_idx)
if __name__ == '__main__':
args = argparser()
main(args)
| 27.896552 | 89 | 0.54965 |
acf554a1b416b90743c1dd96cb07cb7a445fd989 | 2,194 | py | Python | sprokit/tests/bindings/python/modules/sprokit/schedulers/pythonpath_test_scheduler.py | aaron-bray/kwiver | be55ec5a511ba5b9f2af94b268ac5f9a657accc4 | [
"BSD-3-Clause"
] | 1 | 2017-07-31T07:07:32.000Z | 2017-07-31T07:07:32.000Z | sprokit/tests/bindings/python/modules/sprokit/schedulers/pythonpath_test_scheduler.py | aaron-bray/kwiver | be55ec5a511ba5b9f2af94b268ac5f9a657accc4 | [
"BSD-3-Clause"
] | 3 | 2021-03-19T15:39:43.000Z | 2021-09-08T02:47:15.000Z | sprokit/tests/bindings/python/modules/sprokit/schedulers/pythonpath_test_scheduler.py | acidburn0zzz/kwiver | 6e4205f1c46df04759c57c040f01cc804b27e00d | [
"BSD-3-Clause"
] | null | null | null | #ckwg +28
# Copyright 2012 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from sprokit.pipeline import config
from sprokit.pipeline import pipeline
from sprokit.pipeline import scheduler
class TestPythonScheduler(scheduler.PythonScheduler):
def __init__(self, conf, pipe):
scheduler.PythonScheduler.__init__(self, conf, pipe)
def __sprokit_register__():
from sprokit.pipeline import scheduler_factory
module_name = 'python:test.pythonpath.test'
if scheduler_factory.is_scheduler_module_loaded(module_name):
return
scheduler_factory.add_scheduler('pythonpath_test_scheduler', 'A test scheduler.', TestPythonScheduler)
scheduler_factory.mark_scheduler_module_as_loaded(module_name)
| 42.192308 | 106 | 0.783045 |
acf554bb8d5079daac331eef0fc673da30ee84f6 | 10,467 | py | Python | python/pymei/Modules/header.py | Feasinde/libmei | 08760d17d17f41f8962ee952b970316ae65c4605 | [
"MIT"
] | 38 | 2015-04-30T06:53:38.000Z | 2022-01-14T22:18:18.000Z | python/pymei/Modules/header.py | Feasinde/libmei | 08760d17d17f41f8962ee952b970316ae65c4605 | [
"MIT"
] | 35 | 2015-01-12T17:43:01.000Z | 2021-12-15T20:21:16.000Z | python/pymei/Modules/header.py | Feasinde/libmei | 08760d17d17f41f8962ee952b970316ae65c4605 | [
"MIT"
] | 16 | 2015-05-09T19:50:43.000Z | 2021-12-26T10:03:36.000Z |
"""
Copyright (c) 2011-2013 Andrew Hankinson, Alastair Porter, and Others
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from pymei import MeiElement
class accessRestrict_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "accessRestrict")
# <accessRestrict>
class altId_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "altId")
# <altId>
class appInfo_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "appInfo")
# <appInfo>
class application_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "application")
# <application>
class audience_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "audience")
# <audience>
class availability_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "availability")
# <availability>
class byline_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "byline")
# <byline>
class captureMode_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "captureMode")
# <captureMode>
class carrierForm_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "carrierForm")
# <carrierForm>
class change_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "change")
# <change>
class changeDesc_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "changeDesc")
# <changeDesc>
class classCode_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "classCode")
# <classCode>
class classification_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "classification")
# <classification>
class condition_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "condition")
# <condition>
class contentItem_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "contentItem")
# <contentItem>
class contents_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "contents")
# <contents>
class context_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "context")
# <context>
class correction_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "correction")
# <correction>
class dimensions_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "dimensions")
# <dimensions>
class editionStmt_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "editionStmt")
# <editionStmt>
class editorialDecl_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "editorialDecl")
# <editorialDecl>
class encodingDesc_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "encodingDesc")
# <encodingDesc>
class exhibHist_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "exhibHist")
# <exhibHist>
class extMeta_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "extMeta")
# <extMeta>
class fileChar_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "fileChar")
# <fileChar>
class fileDesc_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "fileDesc")
# <fileDesc>
class fingerprint_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "fingerprint")
# <fingerprint>
class hand_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "hand")
# <hand>
class handList_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "handList")
# <handList>
class history_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "history")
# <history>
class incipCode_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "incipCode")
# <incipCode>
class incipText_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "incipText")
# <incipText>
class inscription_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "inscription")
# <inscription>
class interpretation_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "interpretation")
# <interpretation>
class key_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "key")
# <key>
class langUsage_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "langUsage")
# <langUsage>
class language_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "language")
# <language>
class meiHead_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "meiHead")
# <meiHead>
class mensuration_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "mensuration")
# <mensuration>
class meter_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "meter")
# <meter>
class normalization_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "normalization")
# <normalization>
class notesStmt_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "notesStmt")
# <notesStmt>
class otherChar_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "otherChar")
# <otherChar>
class perfDuration_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "perfDuration")
# <perfDuration>
class perfMedium_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "perfMedium")
# <perfMedium>
class perfRes_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "perfRes")
# <perfRes>
class perfResList_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "perfResList")
# <perfResList>
class physDesc_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "physDesc")
# <physDesc>
class physMedium_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "physMedium")
# <physMedium>
class plateNum_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "plateNum")
# <plateNum>
class playingSpeed_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "playingSpeed")
# <playingSpeed>
class price_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "price")
# <price>
class projectDesc_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "projectDesc")
# <projectDesc>
class provenance_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "provenance")
# <provenance>
class pubStmt_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "pubStmt")
# <pubStmt>
class revisionDesc_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "revisionDesc")
# <revisionDesc>
class samplingDecl_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "samplingDecl")
# <samplingDecl>
class scoreFormat_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "scoreFormat")
# <scoreFormat>
class segmentation_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "segmentation")
# <segmentation>
class seriesStmt_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "seriesStmt")
# <seriesStmt>
class soundChan_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "soundChan")
# <soundChan>
class source_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "source")
# <source>
class sourceDesc_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "sourceDesc")
# <sourceDesc>
class specRepro_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "specRepro")
# <specRepro>
class stdVals_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "stdVals")
# <stdVals>
class sysReq_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "sysReq")
# <sysReq>
class term_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "term")
# <term>
class termList_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "termList")
# <termList>
class titleStmt_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "titleStmt")
# <titleStmt>
class trackConfig_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "trackConfig")
# <trackConfig>
class treatHist_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "treatHist")
# <treatHist>
class treatSched_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "treatSched")
# <treatSched>
class unpub_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "unpub")
# <unpub>
class useRestrict_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "useRestrict")
# <useRestrict>
class watermark_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "watermark")
# <watermark>
class work_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "work")
# <work>
class workDesc_(MeiElement):
def __init__(self):
MeiElement.__init__(self, "workDesc")
# <workDesc>
| 25.343826 | 74 | 0.676316 |
acf555b61403ed97bd4f65caf44a3aa86e88b055 | 335 | py | Python | bilbyweb/migrations/0004_remove_job_submission_time.py | ASVO-TAO/SS18B-PLasky | a3c13b05f894fb8cebd5be381c170b8a78adb81a | [
"MIT"
] | null | null | null | bilbyweb/migrations/0004_remove_job_submission_time.py | ASVO-TAO/SS18B-PLasky | a3c13b05f894fb8cebd5be381c170b8a78adb81a | [
"MIT"
] | null | null | null | bilbyweb/migrations/0004_remove_job_submission_time.py | ASVO-TAO/SS18B-PLasky | a3c13b05f894fb8cebd5be381c170b8a78adb81a | [
"MIT"
] | null | null | null | # Generated by Django 2.0.7 on 2018-10-10 08:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bilbyweb', '0003_auto_20181010_1140'),
]
operations = [
migrations.RemoveField(
model_name='job',
name='submission_time',
),
]
| 18.611111 | 48 | 0.6 |
acf557028f07c10c2cc15ef3876b73d0b94a6bec | 17,186 | py | Python | pybind/slxos/v16r_1_00b/brocade_mpls_rpc/show_mpls_bypass_bypass_lsp_extensive/output/bypass_lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/brocade_mpls_rpc/show_mpls_bypass_bypass_lsp_extensive/output/bypass_lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/brocade_mpls_rpc/show_mpls_bypass_bypass_lsp_extensive/output/bypass_lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import lsp_admin_group_exclude_any
import lsp_admin_group_include_any
import lsp_admin_group_include_all
class lsp_admin_group(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/show-mpls-bypass-bypass-lsp-extensive/output/bypass-lsp/show-mpls-lsp-extensive-info/show-mpls-lsp-sec-path-info/sec-path/lsp-sec-path-config-admin-groups/lsp-admin-group. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__lsp_admin_group_exclude_any','__lsp_admin_group_include_any','__lsp_admin_group_include_all',)
_yang_name = 'lsp-admin-group'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__lsp_admin_group_exclude_any = YANGDynClass(base=YANGListType("lsp_admin_group_exclude_any_group_id",lsp_admin_group_exclude_any.lsp_admin_group_exclude_any, yang_name="lsp-admin-group-exclude-any", rest_name="lsp-admin-group-exclude-any", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-exclude-any-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-exclude-any", rest_name="lsp-admin-group-exclude-any", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
self.__lsp_admin_group_include_any = YANGDynClass(base=YANGListType("lsp_admin_group_include_any_group_id",lsp_admin_group_include_any.lsp_admin_group_include_any, yang_name="lsp-admin-group-include-any", rest_name="lsp-admin-group-include-any", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-include-any-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-include-any", rest_name="lsp-admin-group-include-any", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
self.__lsp_admin_group_include_all = YANGDynClass(base=YANGListType("lsp_admin_group_include_all_group_id",lsp_admin_group_include_all.lsp_admin_group_include_all, yang_name="lsp-admin-group-include-all", rest_name="lsp-admin-group-include-all", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-include-all-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-include-all", rest_name="lsp-admin-group-include-all", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_mpls_rpc', u'show-mpls-bypass-bypass-lsp-extensive', u'output', u'bypass-lsp', u'show-mpls-lsp-extensive-info', u'show-mpls-lsp-sec-path-info', u'sec-path', u'lsp-sec-path-config-admin-groups', u'lsp-admin-group']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'show-mpls-bypass-bypass-lsp-extensive', u'output', u'bypass-lsp', u'sec-path', u'lsp-sec-path-config-admin-groups']
def _get_lsp_admin_group_exclude_any(self):
"""
Getter method for lsp_admin_group_exclude_any, mapped from YANG variable /brocade_mpls_rpc/show_mpls_bypass_bypass_lsp_extensive/output/bypass_lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/lsp_admin_group_exclude_any (list)
"""
return self.__lsp_admin_group_exclude_any
def _set_lsp_admin_group_exclude_any(self, v, load=False):
"""
Setter method for lsp_admin_group_exclude_any, mapped from YANG variable /brocade_mpls_rpc/show_mpls_bypass_bypass_lsp_extensive/output/bypass_lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/lsp_admin_group_exclude_any (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_admin_group_exclude_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_admin_group_exclude_any() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("lsp_admin_group_exclude_any_group_id",lsp_admin_group_exclude_any.lsp_admin_group_exclude_any, yang_name="lsp-admin-group-exclude-any", rest_name="lsp-admin-group-exclude-any", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-exclude-any-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-exclude-any", rest_name="lsp-admin-group-exclude-any", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_admin_group_exclude_any must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("lsp_admin_group_exclude_any_group_id",lsp_admin_group_exclude_any.lsp_admin_group_exclude_any, yang_name="lsp-admin-group-exclude-any", rest_name="lsp-admin-group-exclude-any", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-exclude-any-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-exclude-any", rest_name="lsp-admin-group-exclude-any", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)""",
})
self.__lsp_admin_group_exclude_any = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_admin_group_exclude_any(self):
self.__lsp_admin_group_exclude_any = YANGDynClass(base=YANGListType("lsp_admin_group_exclude_any_group_id",lsp_admin_group_exclude_any.lsp_admin_group_exclude_any, yang_name="lsp-admin-group-exclude-any", rest_name="lsp-admin-group-exclude-any", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-exclude-any-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-exclude-any", rest_name="lsp-admin-group-exclude-any", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
def _get_lsp_admin_group_include_any(self):
"""
Getter method for lsp_admin_group_include_any, mapped from YANG variable /brocade_mpls_rpc/show_mpls_bypass_bypass_lsp_extensive/output/bypass_lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/lsp_admin_group_include_any (list)
"""
return self.__lsp_admin_group_include_any
def _set_lsp_admin_group_include_any(self, v, load=False):
"""
Setter method for lsp_admin_group_include_any, mapped from YANG variable /brocade_mpls_rpc/show_mpls_bypass_bypass_lsp_extensive/output/bypass_lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/lsp_admin_group_include_any (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_admin_group_include_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_admin_group_include_any() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("lsp_admin_group_include_any_group_id",lsp_admin_group_include_any.lsp_admin_group_include_any, yang_name="lsp-admin-group-include-any", rest_name="lsp-admin-group-include-any", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-include-any-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-include-any", rest_name="lsp-admin-group-include-any", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_admin_group_include_any must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("lsp_admin_group_include_any_group_id",lsp_admin_group_include_any.lsp_admin_group_include_any, yang_name="lsp-admin-group-include-any", rest_name="lsp-admin-group-include-any", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-include-any-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-include-any", rest_name="lsp-admin-group-include-any", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)""",
})
self.__lsp_admin_group_include_any = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_admin_group_include_any(self):
self.__lsp_admin_group_include_any = YANGDynClass(base=YANGListType("lsp_admin_group_include_any_group_id",lsp_admin_group_include_any.lsp_admin_group_include_any, yang_name="lsp-admin-group-include-any", rest_name="lsp-admin-group-include-any", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-include-any-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-include-any", rest_name="lsp-admin-group-include-any", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
def _get_lsp_admin_group_include_all(self):
"""
Getter method for lsp_admin_group_include_all, mapped from YANG variable /brocade_mpls_rpc/show_mpls_bypass_bypass_lsp_extensive/output/bypass_lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/lsp_admin_group_include_all (list)
"""
return self.__lsp_admin_group_include_all
def _set_lsp_admin_group_include_all(self, v, load=False):
"""
Setter method for lsp_admin_group_include_all, mapped from YANG variable /brocade_mpls_rpc/show_mpls_bypass_bypass_lsp_extensive/output/bypass_lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path/lsp_sec_path_config_admin_groups/lsp_admin_group/lsp_admin_group_include_all (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_admin_group_include_all is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_admin_group_include_all() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("lsp_admin_group_include_all_group_id",lsp_admin_group_include_all.lsp_admin_group_include_all, yang_name="lsp-admin-group-include-all", rest_name="lsp-admin-group-include-all", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-include-all-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-include-all", rest_name="lsp-admin-group-include-all", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_admin_group_include_all must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("lsp_admin_group_include_all_group_id",lsp_admin_group_include_all.lsp_admin_group_include_all, yang_name="lsp-admin-group-include-all", rest_name="lsp-admin-group-include-all", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-include-all-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-include-all", rest_name="lsp-admin-group-include-all", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)""",
})
self.__lsp_admin_group_include_all = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_admin_group_include_all(self):
self.__lsp_admin_group_include_all = YANGDynClass(base=YANGListType("lsp_admin_group_include_all_group_id",lsp_admin_group_include_all.lsp_admin_group_include_all, yang_name="lsp-admin-group-include-all", rest_name="lsp-admin-group-include-all", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-admin-group-include-all-group-id', extensions=None), is_container='list', yang_name="lsp-admin-group-include-all", rest_name="lsp-admin-group-include-all", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
lsp_admin_group_exclude_any = __builtin__.property(_get_lsp_admin_group_exclude_any, _set_lsp_admin_group_exclude_any)
lsp_admin_group_include_any = __builtin__.property(_get_lsp_admin_group_include_any, _set_lsp_admin_group_include_any)
lsp_admin_group_include_all = __builtin__.property(_get_lsp_admin_group_include_all, _set_lsp_admin_group_include_all)
_pyangbind_elements = {'lsp_admin_group_exclude_any': lsp_admin_group_exclude_any, 'lsp_admin_group_include_any': lsp_admin_group_include_any, 'lsp_admin_group_include_all': lsp_admin_group_include_all, }
| 87.683673 | 730 | 0.786338 |
acf5577438fcb64bb2b93a0560cb7f28a1bf87df | 1,844 | py | Python | preprocess_corpora/alignment/align.py | time-in-translation/preprocess-corpora | 20366cb159204616b9940693f6e5f002fb1b0485 | [
"MIT"
] | null | null | null | preprocess_corpora/alignment/align.py | time-in-translation/preprocess-corpora | 20366cb159204616b9940693f6e5f002fb1b0485 | [
"MIT"
] | 20 | 2019-10-01T12:31:21.000Z | 2022-03-31T04:18:19.000Z | preprocess_corpora/alignment/align.py | time-in-translation/preprocess-corpora | 20366cb159204616b9940693f6e5f002fb1b0485 | [
"MIT"
] | null | null | null | import glob
import itertools
import os
import shutil
import subprocess
import click
from ..core.constants import LANGUAGES, VARIETIES
from .merge_alignments import merge
UPLUG_ALIGN = 'uplug align/hun -src {src} -trg {trg} -s {sl} -t {tl}'
def check_variety(language):
result = language
if language in VARIETIES:
result = VARIETIES.get(language)
return result
@click.command()
@click.argument('working_dir', type=click.Path(exists=True))
@click.argument('languages', nargs=-1, type=click.Choice(LANGUAGES))
def sentence_align(working_dir, languages):
"""
Applies sentence alignment (using hunalign) to a corpus
"""
os.chdir(working_dir)
if len(languages) < 2:
raise click.ClickException('Please supply at least two languages')
comb_ls = itertools.combinations(sorted(languages), 2)
for sl, tl in comb_ls:
for src in glob.glob(os.path.join(sl, '*.xml')):
src_base = os.path.splitext(os.path.basename(src))[0]
trg = os.path.join(tl, '{}.xml'.format(src_base))
sl_align = check_variety(sl)
tl_align = check_variety(tl)
command = UPLUG_ALIGN.format(src=src, trg=trg, sl=sl_align, tl=tl_align)
out_file = '{sl}-{tl}-{base}.xml'.format(sl=sl, tl=tl, base=src_base)
with open(out_file, 'w') as out:
subprocess.call(command, stdout=out, stderr=open(os.devnull, 'w'), shell=True)
alignments = glob.glob('{sl}-{tl}-*.xml'.format(sl=sl, tl=tl))
merged_file = '{sl}-{tl}.xml'.format(sl=sl, tl=tl)
merge(alignments, merged_file, delete_files_in=True)
# Remove artefacts created by hunalign
shutil.rmtree(os.path.join(working_dir, 'data'))
os.remove(os.path.join(working_dir, 'translate.txt'))
if __name__ == "__main__":
sentence_align()
| 30.733333 | 94 | 0.658351 |
acf5580daa29dad64802a21326253c2235629eea | 919 | py | Python | rx/core/operators/maxby.py | daliclass/RxPY | d3ff1b72963fd08341807986d49480351015165e | [
"MIT"
] | null | null | null | rx/core/operators/maxby.py | daliclass/RxPY | d3ff1b72963fd08341807986d49480351015165e | [
"MIT"
] | null | null | null | rx/core/operators/maxby.py | daliclass/RxPY | d3ff1b72963fd08341807986d49480351015165e | [
"MIT"
] | null | null | null | from typing import Callable, Optional
from rx.core import Observable
from rx.core.typing import Mapper, Comparer
from rx.internal.basic import default_sub_comparer
from .minby import extrema_by
def _max_by(key_mapper: Mapper,
comparer: Optional[Comparer] = None
) -> Callable[[Observable], Observable]:
comparer = comparer or default_sub_comparer
def max_by(source: Observable) -> Observable:
"""Partially applied max_by operator.
Returns the elements in an observable sequence with the maximum
key value.
Examples:
>>> res = max_by(source)
Args:
Source: The source observable sequence to.
Returns:
An observable sequence containing a list of zero or more
elements that have a maximum key value.
"""
return extrema_by(source, key_mapper, comparer)
return max_by
| 27.848485 | 71 | 0.664853 |
acf5581cd7d5da80c1e5d8c09c03d7348abff5d6 | 685 | py | Python | cruddy/exceptions.py | PastNullInfinity/cruddy | 4bb6cbfa0fbff6aba22f95c21cffe411301893a7 | [
"Apache-2.0"
] | 13 | 2016-11-17T18:49:20.000Z | 2021-11-18T14:53:39.000Z | cruddy/exceptions.py | PastNullInfinity/cruddy | 4bb6cbfa0fbff6aba22f95c21cffe411301893a7 | [
"Apache-2.0"
] | 17 | 2016-01-06T07:04:17.000Z | 2016-02-06T20:25:59.000Z | cruddy/exceptions.py | PastNullInfinity/cruddy | 4bb6cbfa0fbff6aba22f95c21cffe411301893a7 | [
"Apache-2.0"
] | 4 | 2016-01-06T06:40:53.000Z | 2016-08-19T11:40:04.000Z | # Copyright (c) 2016 CloudNative, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CruddyKeySchemaError(Exception):
pass
class CruddyKeyNameError(Exception):
pass
| 28.541667 | 74 | 0.756204 |
acf5581f8cfd0118750f74913b7db3a217f35052 | 6,854 | py | Python | malcolm/modules/pmac/parts/pmactrajectorypart.py | thomascobb/pymalcolm | 801f8fe6217c0c028b5edc87fa0aef9d60b91d9d | [
"Apache-2.0"
] | null | null | null | malcolm/modules/pmac/parts/pmactrajectorypart.py | thomascobb/pymalcolm | 801f8fe6217c0c028b5edc87fa0aef9d60b91d9d | [
"Apache-2.0"
] | null | null | null | malcolm/modules/pmac/parts/pmactrajectorypart.py | thomascobb/pymalcolm | 801f8fe6217c0c028b5edc87fa0aef9d60b91d9d | [
"Apache-2.0"
] | null | null | null | from typing import Union
import numpy as np
from annotypes import Anno, Array, add_call_types
from malcolm.core import (
DEFAULT_TIMEOUT,
Display,
IncompatibleError,
NumberMeta,
PartRegistrar,
Widget,
)
from malcolm.modules import builtin
from ..util import CS_AXIS_NAMES
# recommended trajectory program number and lowest allowed program number
TRAJECTORY_PROGRAM_NUM = 3
FALLBACK_TRAJ_PROGRAM_NUM = 2
# The maximum number of points in a single trajectory scan
MAX_NUM_POINTS = 4000000
with Anno("The Asyn Port name of the Co-ordinate system port we want to scan"):
ACSPort = str
with Anno("The relative time points to scan in microseconds"):
ATimeArray = Union[Array[np.int32]]
with Anno("The velocity mode of each point"):
AVelocityMode = Union[Array[np.int32]]
with Anno("Which user program to run for each point"):
AUserPrograms = Union[Array[np.int32]]
with Anno("The position the axis should be at for each point in the scan"):
ADemandTrajectory = Union[Array[np.float64]]
# Pull re-used annotypes into our namespace in case we are subclassed
APartName = builtin.parts.APartName
AMri = builtin.parts.AMri
def _zeros_or_right_length(array, num_points):
if array is None:
array = np.zeros(num_points, np.int32)
else:
assert (
len(array) == num_points
), f"Array {array} should be {num_points} points long"
return array
# We will set these attributes on the child block, so don't save them
@builtin.util.no_save(
"numPoints",
"enableCallbacks",
"computeStatistics",
"timeArray",
"cs",
"velocityMode",
"userPrograms",
"pointsToBuild",
)
@builtin.util.no_save(f"use{x}" for x in CS_AXIS_NAMES)
@builtin.util.no_save(f"positions{x}" for x in CS_AXIS_NAMES)
class PmacTrajectoryPart(builtin.parts.ChildPart):
def __init__(
self,
name: APartName,
mri: AMri,
) -> None:
super().__init__(name, mri, initial_visibility=True)
# The total number of points we have written
self.total_points = 0
self.points_scanned = NumberMeta(
"int32", "The number of points scanned", tags=[Widget.METER.tag()]
).create_attribute_model(0)
def setup(self, registrar: PartRegistrar) -> None:
super().setup(registrar)
# Add methods
registrar.add_method_model(
self.write_profile, "writeProfile", needs_context=True
)
registrar.add_method_model(
self.execute_profile, "executeProfile", needs_context=True
)
registrar.add_method_model(
self.abort_profile, "abortProfile", needs_context=True
)
# Add Attributes
registrar.add_attribute_model("pointsScanned", self.points_scanned)
# Serialized, so use camelCase
# noinspection PyPep8Naming
@add_call_types
def write_profile(
self,
context: builtin.hooks.AContext,
timeArray: ATimeArray,
csPort: ACSPort = None,
velocityMode: AVelocityMode = None,
userPrograms: AUserPrograms = None,
a: ADemandTrajectory = None,
b: ADemandTrajectory = None,
c: ADemandTrajectory = None,
u: ADemandTrajectory = None,
v: ADemandTrajectory = None,
w: ADemandTrajectory = None,
x: ADemandTrajectory = None,
y: ADemandTrajectory = None,
z: ADemandTrajectory = None,
) -> None:
child = context.block_view(self.mri)
# make sure a matching trajectory program is installed on the pmac
if child.trajectoryProgVersion.value != TRAJECTORY_PROGRAM_NUM:
if child.trajectoryProgVersion.value >= FALLBACK_TRAJ_PROGRAM_NUM:
self.log.warning(
f"pmac trajectory program is version {FALLBACK_TRAJ_PROGRAM_NUM}"
f" version {TRAJECTORY_PROGRAM_NUM} is recommended"
)
else:
raise (
IncompatibleError(
f"pmac trajectory program {child.trajectoryProgVersion.value} "
f"detected. Malcolm requires {TRAJECTORY_PROGRAM_NUM}"
)
)
# The axes taking part in the scan
use_axes = []
for axis in CS_AXIS_NAMES:
if locals()[axis.lower()] is not None:
use_axes.append(axis)
if csPort is not None:
# This is a build
action = child.buildProfile
self.total_points = 0
child.numPoints.put_value(MAX_NUM_POINTS)
try:
child.cs.put_value(csPort)
except ValueError as e:
raise ValueError(
f"Cannot set CS to {csPort}, did you use a compound_motor_block "
f"for a raw motor?\n{e}"
)
# Tell the trajectory scans which of the arrays to use
arrays = {f"use{axis}": axis in use_axes for axis in CS_AXIS_NAMES}
child.put_attribute_values(arrays)
else:
# This is an append
action = child.appendProfile
# Fill in the arrays
num_points = len(timeArray)
attribute_values = dict(
timeArray=timeArray,
pointsToBuild=num_points,
velocityMode=_zeros_or_right_length(velocityMode, num_points),
userPrograms=_zeros_or_right_length(userPrograms, num_points),
)
for axis in use_axes:
demand = locals()[axis.lower()]
attribute_values[f"positions{axis}"] = demand
child.put_attribute_values(attribute_values)
# Write the profile
action()
# Record how many points we have now written in total
self.total_points += num_points
def set_scan_length(self, value):
self.points_scanned.meta.set_display(Display(limitHigh=value))
@add_call_types
def execute_profile(self, context: builtin.hooks.AContext) -> None:
child = context.block_view(self.mri)
fs1 = context.subscribe(
[self.mri, "pointsScanned", "value"], self.points_scanned.set_value
)
fs2 = context.subscribe(
[self.mri, "pointsBuilt", "value"], self.set_scan_length
)
try:
child.executeProfile()
# Now wait for up to 2*min_delta time to make sure any
# update_completed_steps come in
child.when_value_matches(
"pointsScanned", self.total_points, timeout=DEFAULT_TIMEOUT
)
finally:
context.unsubscribe(fs1)
context.unsubscribe(fs2)
@add_call_types
def abort_profile(self, context: builtin.hooks.AContext) -> None:
child = context.block_view(self.mri)
child.abortProfile()
| 34.791878 | 87 | 0.628684 |
acf5585cf1925c2e316fa567ce56bbcefde4885b | 5,056 | py | Python | chemex/experiments/cpmg_15n_tr.py | gbouvignies/ChemEx | b1748f1bdc623a1d078de47dffe8cae2515d3411 | [
"BSD-3-Clause"
] | 10 | 2018-09-20T00:33:59.000Z | 2021-08-08T10:21:27.000Z | chemex/experiments/cpmg_15n_tr.py | gbouvignies/ChemEx | b1748f1bdc623a1d078de47dffe8cae2515d3411 | [
"BSD-3-Clause"
] | 28 | 2018-09-17T12:01:58.000Z | 2022-03-23T13:40:48.000Z | chemex/experiments/cpmg_15n_tr.py | gbouvignies/ChemEx | b1748f1bdc623a1d078de47dffe8cae2515d3411 | [
"BSD-3-Clause"
] | 6 | 2018-09-17T13:50:13.000Z | 2021-02-23T07:39:10.000Z | """
15N–1HN TROSY CPMG
==================
Analyzes 15N constant-time TROSY CPMG relaxation dispersion experiments for
measurement of ΔD NH in protein systems undergoing millisecond-time-scale
exchange dynamics. Resulting magnetization intensity after the CPMG block is
calculated using the (6n)×(6n), two-spin matrix, where n is the number of
states::
{ Nx(a), Ny(a), Nz(a), 2HzNx(a), 2HzNy(a), 2HzNz(a),
Nx(b), Ny(b), Nz(b), 2HzNx(b), 2HzNy(b), 2HzNz(b), ... }
References
----------
Vallurupalli, Hansen, Stollar, Meirovitch and Kay. PNAS (2007) 104:18473-18477
Note
----
A sample configuration file for this module is available using the command::
$ chemex config cpmg_15n_tr
"""
import functools as ft
import numpy as np
import numpy.linalg as nl
import chemex.experiments.helper as ceh
import chemex.helper as ch
import chemex.nmr.liouvillian as cnl
_SCHEMA = {
"type": "object",
"properties": {
"experiment": {
"type": "object",
"properties": {
"time_t2": {"type": "number"},
"carrier": {"type": "number"},
"pw90": {"type": "number"},
"time_equil": {"type": "number", "default": 0.0},
"taub": {"type": "number", "default": 2.68e-3},
"antitrosy": {"type": "boolean", "default": False},
"observed_state": {
"type": "string",
"pattern": "[a-z]",
"default": "a",
},
},
"required": ["time_t2", "carrier", "pw90"],
}
},
}
def read(config):
ch.validate(config, _SCHEMA)
config["basis"] = cnl.Basis(type="ixyzsz", spin_system="nh")
config["fit"] = _fit_this(config)
return ceh.load_experiment(config=config, pulse_seq_cls=PulseSeq)
def _fit_this(config):
this = {
"rates": ["r2_i_{observed_state}"],
"model_free": ["tauc_{observed_state}"],
}
if config["experiment"]["antitrosy"]:
this["rates"].append("etaxy_i_{observed_state}")
this["model_free"].append("s2_{observed_state}")
return this
class PulseSeq:
def __init__(self, config, propagator):
self.prop = propagator
settings = config["experiment"]
self.time_t2 = settings["time_t2"]
self.time_eq = settings["time_equil"]
self.prop.carrier_i = settings["carrier"]
self.pw90 = settings["pw90"]
self.taub = settings["taub"] - 2.0 * self.pw90 - 2.0 * self.pw90 / np.pi
self.t_neg = -2.0 * self.pw90 / np.pi
self.prop.b1_i = 1 / (4.0 * self.pw90)
self.antitrosy = settings["antitrosy"]
self.prop.detection = self._get_detection(settings["observed_state"])
@ft.lru_cache(maxsize=10000)
def calculate(self, ncycs, params_local):
self.prop.update(params_local)
# Calculation of the propagators corresponding to all the delays
tau_cps, all_delays = self._get_delays(ncycs)
delays = dict(zip(all_delays, self.prop.delays(all_delays)))
d_neg = delays[self.t_neg]
d_eq = delays[self.time_eq]
d_taub = delays[self.taub]
d_cp = {ncyc: delays[delay] for ncyc, delay in tau_cps.items()}
# Calculation of the propagators corresponding to all the pulses
p90 = self.prop.p90_i
p180 = self.prop.p180_i
p180_sx = self.prop.perfect180_s[0]
# Getting the starting magnetization
start = self.prop.get_start_magnetization("2izsz")
# Calculating the p-element
if self.antitrosy:
palmer0 = (
p180_sx @ d_taub @ p90[2] @ p90[1] @ p180_sx @ p90[1] @ p90[2] @ d_taub
)
else:
palmer0 = (
p180_sx @ d_taub @ p90[1] @ p90[0] @ p180_sx @ p90[0] @ p90[1] @ d_taub
)
palmer = np.mean(p90[[0, 2]] @ palmer0 @ p90[[1, 3]], axis=0)
# Calculating the cpmg trains
part1 = p90[0] @ start
part2 = d_eq @ p90[1]
intst = {0: self.prop.detect(part2 @ palmer0 @ part1)}
for ncyc in set(ncycs) - {0}:
echo = d_cp[ncyc] @ p180[[1, 0]] @ d_cp[ncyc]
cpmg1, cpmg2 = d_neg @ nl.matrix_power(echo, ncyc) @ d_neg
intst[ncyc] = self.prop.detect(part2 @ cpmg2 @ palmer @ cpmg1 @ part1)
# Return profile
return np.array([intst[ncyc] for ncyc in ncycs])
@ft.lru_cache()
def _get_delays(self, ncycs):
ncycs_ = np.asarray(ncycs)
ncycs_ = ncycs_[ncycs_ > 0]
tau_cps = dict(zip(ncycs_, self.time_t2 / (4.0 * ncycs_) - self.pw90))
delays = [self.t_neg, self.taub, self.time_eq, *tau_cps.values()]
return tau_cps, delays
def _get_detection(self, state):
if self.antitrosy:
return f"[2izsz_{state}] + [iz_{state}]"
return f"[2izsz_{state}] - [iz_{state}]"
def ncycs_to_nu_cpmgs(self, ncycs):
ncycs_ = np.asarray(ncycs)
ncycs_ = ncycs_[ncycs_ > 0]
return ncycs_ / self.time_t2
| 32.831169 | 87 | 0.579707 |
acf5591949feb09d8d613efde004ef804a833756 | 1,970 | py | Python | app.py | MarekSuchanek/dummy-currency-api | b8463e127059e35e94acf05a2f381f52ea1f38b3 | [
"MIT"
] | null | null | null | app.py | MarekSuchanek/dummy-currency-api | b8463e127059e35e94acf05a2f381f52ea1f38b3 | [
"MIT"
] | null | null | null | app.py | MarekSuchanek/dummy-currency-api | b8463e127059e35e94acf05a2f381f52ea1f38b3 | [
"MIT"
] | null | null | null | import datetime
import flask
import flask_cors
import hashlib
import random
app = flask.Flask(__name__)
api_token = "token " + hashlib.sha1(b'MI-AFP').hexdigest()
base_rates = {
"USD": {"EUR": 0.834900163, "CZK": 21.2743325,
"GBP": 0.735600257, "CNY": 6.36780438},
"EUR": {"USD": 1.197748, "CZK": 25.4812892,
"GBP": 0.881063737, "CNY": 7.62702496},
"CZK": {"USD": 0.047005, "EUR": 0.0392444821,
"GBP": 0.0345768901, "CNY": 0.299318645},
"GBP": {"USD": 1.359434, "EUR": 1.13499167,
"CZK": 28.921051, "CNY": 8.65660978},
"CNY": {"USD": 0.15704, "EUR": 0.131112722,
"CZK": 3.34092118, "GBP": 0.115518664},
}
@app.route('/')
def index():
return flask.render_template('index.html')
@app.route('/currencies')
@flask_cors.cross_origin()
def currencies():
print(flask.request.headers.get('Authorization', ''))
print(api_token)
if flask.request.headers.get('Authorization', '') != api_token:
flask.abort(401)
return flask.jsonify(
[
{"code": "USD", "name": "U.S. Dollar", "sign": "$"},
{"code": "EUR", "name": "Euro", "sign": "€"},
{"code": "CZK", "name": "Czech Koruna", "sign": "Kč"},
{"code": "GBP", "name": "Pound Sterling", "sign": "£"},
{"code": "CNY", "name": "Chinese Yuan Renminbi", "sign": "¥"}
]
)
def recalc_rates(currency):
return {
c: v + v * ((random.random()-0.5))/50
for c, v in base_rates[currency].items()
}
@app.route('/exchange-rates/<currency>')
@flask_cors.cross_origin()
def rates(currency):
if not flask.request.headers.get('Authorization', '') == api_token:
flask.abort(401)
if currency not in base_rates:
flask.abort(400)
return flask.jsonify(
{
"base": currency,
"timestamp": str(datetime.datetime.utcnow()),
"rates": recalc_rates(currency)
}
)
| 28.970588 | 73 | 0.554822 |
acf559afb36cd4b2a6f83556adc3d094e51e1cf9 | 4,545 | py | Python | Aulas/Aula 5.py | vmonteiro6/lead-machine-learning | 577e6bbd52bc0a68415294993fa210f47bc36ed7 | [
"MIT"
] | null | null | null | Aulas/Aula 5.py | vmonteiro6/lead-machine-learning | 577e6bbd52bc0a68415294993fa210f47bc36ed7 | [
"MIT"
] | null | null | null | Aulas/Aula 5.py | vmonteiro6/lead-machine-learning | 577e6bbd52bc0a68415294993fa210f47bc36ed7 | [
"MIT"
] | null | null | null | #Aula 6.1 - SVM (Support Vector Machines)
#Implementando o SVM de classificação
from numpy.core.numeric import cross
from numpy.lib.npyio import load
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
X, y = load_breast_cancer(return_X_y = True)
X_train, X_test, y_train, y_test = train_test_split(X, y)
svm = SVC(kernel = 'linear', C = 1.0)
svm.fit(X_train, y_train)
svm.score(X_test, y_test)
#Implementando o SVM com kerneis
from sklearn.datasets import load_iris
X, y = load_iris(return_X_y = True)
X_train, X_test, y_train, y_test = train_test_split(X, y)
svm = SVC(kernel = 'linear')
svm.fit(X_train, y_train)
svm.score(X_test, y_test)
svm_kernel = SVC(kernel = 'poly', degree = 3)
svm_kernel.fit(X_train, y_train)
svm_kernel.score(X_test, y_test)
svm_rbf = SVC(kernel = 'rbf')
svm_rbf.fit(X_train, y_train)
svm_rbf.score(X_test, y_test)
#Fórum da Aula 6.1
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
wine_quality_data_frame = pd.read_csv('winequality-red.csv')
wine_quality_data_frame.head()
wine_quality_data_frame.isnull().sum()
wine_quality_data_frame['quality'].replace({3 : 0, 4 : 0, 5 : 0, 6 : 0, 7 : 1, 8 : 1}, inplace = True)
X = wine_quality_data_frame.drop('quality', axis = 1)
y = wine_quality_data_frame['quality']
mm = MinMaxScaler()
X_train, X_test, y_train, y_test = train_test_split(X, y)
X_train = mm.fit_transform(X_train)
X_test = mm.fit_transform(X_test)
svm_linear_c1 = SVC(kernel = 'linear', C = 1.0)
svm_linear_c1.fit(X_train, y_train)
svm_linear_c1.score(X_test, y_test)
#Aula 6.2 - Avaliação de Modelos de Classificação e Regressão
#Modelos Dummy
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.datasets import load_iris, load_boston
X, y = load_iris(return_X_y = True)
dc = DummyClassifier(strategy = 'stratified')
dc.fit(X, y)
dc.score(X, y)
X, y = load_boston(return_X_y = True)
dr = DummyRegressor(strategy = 'mean')
dr.fit(X, y)
dr.score(X, y)
#Matriz de confusão, recall e precisão
from sklearn.datasets import load_breast_cancer
from sklearn.metrics import confusion_matrix, classification_report
X, y = load_breast_cancer(return_X_y = True)
dc = DummyClassifier(strategy = 'stratified')
dc.fit(X, y)
confusion_matrix(y, dc.predict(X))
print(classification_report(y, dc.predict(X)))
#Validação cruzada
from sklearn.model_selection import cross_val_score
X, y = load_iris(return_X_y = True)
dc = DummyClassifier(strategy = 'stratified')
cross_val_score(dc, X, y, cv = 5)
cross_val_score(dc, X, y, cv = 5).mean()
#Aula 6.3 - Random Forest e Gradient Boosted Decision Trees
#Introdução ao Random Forest
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.model_selection import train_test_split
X, y = load_breast_cancer(return_X_y = True)
X_train, X_test, y_train, y_test = train_test_split(X, y)
rf = RandomForestClassifier(n_estimators = 100)
rf.fit(X_train, y_train)
rf.score(X_test, y_test)
from sklearn.datasets import load_boston
X, y = load_boston(return_X_y = True)
X_train, X_test, y_train, y_test = train_test_split(X, y)
rf = RandomForestRegressor(n_estimators = 100)
rf.fit(X_train, y_train)
rf.score(X_test, y_test)
#Introdução ao Gradient Boosted Decision Trees
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor
X, y = load_breast_cancer(return_X_y = True)
X_train, X_test, y_train, y_test = train_test_split(X, y)
gb = GradientBoostingClassifier(n_estimators = 100, max_depth = 3)
gb.fit(X_train, y_train)
gb.score(X_test, y_test)
X, y = load_boston(return_X_y = True)
X_train, X_test, y_train, y_test = train_test_split(X, y)
gb = GradientBoostingRegressor(n_estimators = 100, max_depth = 3)
gb.fit(X_train, y_train)
gb.score(X_test, y_test)
#Aula 6.4 - PCA (Principal Component Analysis)
#Implementação do PCA
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.datasets import load_boston
X, y = load_boston(return_X_y = True)
X.shape
pca = PCA(n_components = 10)
X_pca = pca.fit_transform(X)
X_pca.shape
plt.plot(X_pca[:, 0], X_pca[:, 1]), plt.show()
plt.scatter(X_pca[:, 0], X_pca[:, 1]), plt.show()
X_train, X_test, y_train, y_test = train_test_split(X_pca, y)
lr = LinearRegression()
lr.fit(X_train, y_train)
lr.score(X_test, y_test) | 37.875 | 103 | 0.755776 |
acf55a7c0fa214925ce968ef3f57725ed83b89c1 | 13,152 | py | Python | cinder/tests/unit/api/v3/test_backups.py | alexisries/openstack-cinder | 7cc6e45c5ddb8bf771bdb01b867628e41761ae11 | [
"Apache-2.0"
] | 2 | 2019-05-24T14:13:50.000Z | 2019-05-24T14:21:13.000Z | cinder/tests/unit/api/v3/test_backups.py | vexata/cinder | 7b84c0842b685de7ee012acec40fb4064edde5e9 | [
"Apache-2.0"
] | 28 | 2017-08-17T14:46:05.000Z | 2022-03-29T12:42:12.000Z | cinder/tests/unit/api/v3/test_backups.py | vexata/cinder | 7b84c0842b685de7ee012acec40fb4064edde5e9 | [
"Apache-2.0"
] | 3 | 2017-04-27T16:11:40.000Z | 2020-02-12T21:27:00.000Z | # Copyright (c) 2016 Intel, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The backups V3 api."""
import copy
import ddt
import mock
from oslo_serialization import jsonutils
from oslo_utils import strutils
import webob
from cinder.api import microversions as mv
from cinder.api.openstack import api_version_request as api_version
from cinder.api.v3 import backups
from cinder.api.views import backups as backup_view
import cinder.backup
from cinder import context
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import utils as test_utils
@ddt.ddt
class BackupsControllerAPITestCase(test.TestCase):
"""Test cases for backups API."""
def setUp(self):
super(BackupsControllerAPITestCase, self).setUp()
self.backup_api = cinder.backup.API()
self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID,
auth_token=True,
is_admin=True)
self.controller = backups.BackupsController()
self.user_context = context.RequestContext(
fake.USER_ID, fake.PROJECT_ID, auth_token=True)
def _fake_update_request(self, backup_id, version=mv.BACKUP_UPDATE):
req = fakes.HTTPRequest.blank('/v3/%s/backups/%s/update' %
(fake.PROJECT_ID, backup_id))
req.environ['cinder.context'].is_admin = True
req.headers['Content-Type'] = 'application/json'
req.headers['OpenStack-API-Version'] = 'volume ' + version
req.api_version_request = api_version.APIVersionRequest(version)
return req
def test_update_wrong_version(self):
req = self._fake_update_request(
fake.BACKUP_ID, version=mv.get_prior_version(mv.BACKUP_UPDATE))
body = {"backup": {"name": "Updated Test Name", }}
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.update, req, fake.BACKUP_ID,
body)
def test_backup_update_with_no_body(self):
# omit body from the request
req = self._fake_update_request(fake.BACKUP_ID)
self.assertRaises(exception.ValidationError,
self.controller.update,
req, fake.BACKUP_ID, body=None)
def test_backup_update_with_unsupported_field(self):
req = self._fake_update_request(fake.BACKUP_ID)
body = {"backup": {"id": fake.BACKUP2_ID,
"description": "", }}
self.assertRaises(exception.ValidationError,
self.controller.update,
req, fake.BACKUP_ID, body=body)
def test_backup_update_with_backup_not_found(self):
req = self._fake_update_request(fake.BACKUP_ID)
updates = {
"name": "Updated Test Name",
"description": "Updated Test description.",
}
body = {"backup": updates}
self.assertRaises(exception.NotFound,
self.controller.update,
req, fake.BACKUP_ID, body=body)
def _create_multiple_backups_with_different_project(self):
test_utils.create_backup(
context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True))
test_utils.create_backup(
context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True))
test_utils.create_backup(
context.RequestContext(fake.USER_ID, fake.PROJECT2_ID, True))
@ddt.data('backups', 'backups/detail')
def test_list_backup_with_count_param_version_not_matched(self, action):
self._create_multiple_backups_with_different_project()
is_detail = True if 'detail' in action else False
req = fakes.HTTPRequest.blank("/v3/%s?with_count=True" % action)
req.headers = mv.get_mv_header(
mv.get_prior_version(mv.SUPPORT_COUNT_INFO))
req.api_version_request = mv.get_api_version(
mv.get_prior_version(mv.SUPPORT_COUNT_INFO))
ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
req.environ['cinder.context'] = ctxt
res_dict = self.controller._get_backups(req, is_detail=is_detail)
self.assertNotIn('count', res_dict)
@ddt.data({'method': 'backups',
'display_param': 'True'},
{'method': 'backups',
'display_param': 'False'},
{'method': 'backups',
'display_param': '1'},
{'method': 'backups/detail',
'display_param': 'True'},
{'method': 'backups/detail',
'display_param': 'False'},
{'method': 'backups/detail',
'display_param': '1'}
)
@ddt.unpack
def test_list_backups_with_count_param(self, method, display_param):
self._create_multiple_backups_with_different_project()
is_detail = True if 'detail' in method else False
show_count = strutils.bool_from_string(display_param, strict=True)
# Request with 'with_count' and 'limit'
req = fakes.HTTPRequest.blank(
"/v3/%s?with_count=%s&limit=1" % (method, display_param))
req.headers = mv.get_mv_header(mv.SUPPORT_COUNT_INFO)
req.api_version_request = mv.get_api_version(mv.SUPPORT_COUNT_INFO)
ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False)
req.environ['cinder.context'] = ctxt
res_dict = self.controller._get_backups(req, is_detail=is_detail)
self.assertEqual(1, len(res_dict['backups']))
if show_count:
self.assertEqual(2, res_dict['count'])
else:
self.assertNotIn('count', res_dict)
# Request with 'with_count'
req = fakes.HTTPRequest.blank(
"/v3/%s?with_count=%s" % (method, display_param))
req.headers = mv.get_mv_header(mv.SUPPORT_COUNT_INFO)
req.api_version_request = mv.get_api_version(mv.SUPPORT_COUNT_INFO)
ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False)
req.environ['cinder.context'] = ctxt
res_dict = self.controller._get_backups(req, is_detail=is_detail)
self.assertEqual(2, len(res_dict['backups']))
if show_count:
self.assertEqual(2, res_dict['count'])
else:
self.assertNotIn('count', res_dict)
# Request with admin context and 'all_tenants'
req = fakes.HTTPRequest.blank(
"/v3/%s?with_count=%s&all_tenants=1" % (method, display_param))
req.headers = mv.get_mv_header(mv.SUPPORT_COUNT_INFO)
req.api_version_request = mv.get_api_version(mv.SUPPORT_COUNT_INFO)
ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
req.environ['cinder.context'] = ctxt
res_dict = self.controller._get_backups(req, is_detail=is_detail)
self.assertEqual(3, len(res_dict['backups']))
if show_count:
self.assertEqual(3, res_dict['count'])
else:
self.assertNotIn('count', res_dict)
@ddt.data(mv.get_prior_version(mv.RESOURCE_FILTER),
mv.RESOURCE_FILTER,
mv.LIKE_FILTER)
@mock.patch('cinder.api.common.reject_invalid_filters')
def test_backup_list_with_general_filter(self, version, mock_update):
url = '/v3/%s/backups' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url,
version=version,
use_admin_context=False)
self.controller.index(req)
if version != mv.get_prior_version(mv.RESOURCE_FILTER):
support_like = True if version == mv.LIKE_FILTER else False
mock_update.assert_called_once_with(req.environ['cinder.context'],
mock.ANY, 'backup',
support_like)
@ddt.data(mv.get_prior_version(mv.BACKUP_SORT_NAME),
mv.BACKUP_SORT_NAME)
def test_backup_list_with_name(self, version):
backup1 = test_utils.create_backup(
self.ctxt, display_name='b_test_name',
status=fields.BackupStatus.AVAILABLE)
backup2 = test_utils.create_backup(
self.ctxt, display_name='a_test_name',
status=fields.BackupStatus.AVAILABLE)
url = '/v3/%s/backups?sort_key=name' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, version=version)
if version == mv.get_prior_version(mv.BACKUP_SORT_NAME):
self.assertRaises(exception.InvalidInput,
self.controller.index,
req)
else:
expect = backup_view.ViewBuilder().summary_list(req,
[backup1, backup2])
result = self.controller.index(req)
self.assertEqual(expect, result)
def test_backup_update(self):
backup = test_utils.create_backup(
self.ctxt,
status=fields.BackupStatus.AVAILABLE)
req = self._fake_update_request(fake.BACKUP_ID)
new_name = "updated_test_name"
new_description = "Updated Test description."
updates = {
"name": new_name,
"description": new_description,
}
body = {"backup": updates}
self.controller.update(req,
backup.id,
body=body)
backup.refresh()
self.assertEqual(new_name, backup.display_name)
self.assertEqual(new_description,
backup.display_description)
@ddt.data({"backup": {"description": " sample description",
"name": " test name"}},
{"backup": {"description": "sample description ",
"name": "test "}},
{"backup": {"description": " sample description ",
"name": " test "}})
def test_backup_update_name_description_with_leading_trailing_spaces(
self, body):
backup = test_utils.create_backup(
self.ctxt,
status=fields.BackupStatus.AVAILABLE)
req = self._fake_update_request(fake.BACKUP_ID)
expected_body = copy.deepcopy(body)
self.controller.update(req,
backup.id,
body=body)
backup.refresh()
# backup update call doesn't return 'description' in response so get
# the updated backup to assert name and description
req = webob.Request.blank('/v2/%s/backups/%s' % (
fake.PROJECT_ID, backup.id))
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_context))
res_dict = jsonutils.loads(res.body)
self.assertEqual(expected_body['backup']['name'].strip(),
res_dict['backup']['name'])
self.assertEqual(expected_body['backup']['description'].strip(),
res_dict['backup']['description'])
@ddt.data(mv.get_prior_version(mv.BACKUP_METADATA),
mv.BACKUP_METADATA)
def test_backup_show_with_metadata(self, version):
backup = test_utils.create_backup(
self.ctxt, display_name='test_backup_metadata',
status=fields.BackupStatus.AVAILABLE)
# show backup metadata
url = '/v3/%s/backups/%s' % (fake.PROJECT_ID, backup.id)
req = fakes.HTTPRequest.blank(url, version=version)
backup_get = self.controller.show(req, backup.id)['backup']
if version == mv.get_prior_version(mv.BACKUP_METADATA):
self.assertNotIn('metadata', backup_get)
else:
self.assertIn('metadata', backup_get)
def test_backup_update_with_null_validate(self):
backup = test_utils.create_backup(
self.ctxt,
status=fields.BackupStatus.AVAILABLE)
req = self._fake_update_request(fake.BACKUP_ID)
updates = {
"name": None,
}
body = {"backup": updates}
self.controller.update(req,
backup.id,
body=body)
backup.refresh()
self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status)
| 43.263158 | 79 | 0.614583 |
acf55a8db43cd1e4dd23b5ae7d555355ba6a0768 | 1,273 | py | Python | tests/ext/test_scss.py | Descent098/hyde | 7f415402cc3e007a746eb2b5bc102281fdb415bd | [
"MIT"
] | 804 | 2015-01-03T22:52:38.000Z | 2022-02-19T08:47:54.000Z | tests/ext/test_scss.py | Descent098/hyde | 7f415402cc3e007a746eb2b5bc102281fdb415bd | [
"MIT"
] | 86 | 2015-01-16T16:56:43.000Z | 2021-10-05T05:25:25.000Z | tests/ext/test_scss.py | Descent098/hyde | 7f415402cc3e007a746eb2b5bc102281fdb415bd | [
"MIT"
] | 161 | 2015-01-06T18:52:17.000Z | 2022-02-04T21:21:54.000Z | # -*- coding: utf-8 -*-
"""
Use nose
`$ pip install nose`
`$ nosetests`
"""
from hyde.generator import Generator
from hyde.site import Site
from fswrap import File, Folder
from util import assert_no_diff
SCSS_SOURCE = File(__file__).parent.child_folder('scss')
TEST_SITE = File(__file__).parent.parent.child_folder('_test')
class TestSassyCSS(object):
def setUp(self):
TEST_SITE.make()
TEST_SITE.parent.child_folder(
'sites/test_jinja').copy_contents_to(TEST_SITE)
SCSS_SOURCE.copy_contents_to(TEST_SITE.child('content/media/css'))
File(TEST_SITE.child('content/media/css/site.css')).delete()
def tearDown(self):
TEST_SITE.delete()
def test_scss(self):
s = Site(TEST_SITE)
s.config.mode = 'prod'
s.config.plugins = ['hyde.ext.plugins.css.SassyCSSPlugin']
source = TEST_SITE.child('content/media/css/site.scss')
target = File(
Folder(s.config.deploy_root_path).child('media/css/site.css'))
gen = Generator(s)
gen.generate_resource_at_path(source)
assert target.exists
text = target.read_all()
expected_text = File(SCSS_SOURCE.child('expected-site.css')).read_all()
assert_no_diff(expected_text, text)
| 28.931818 | 79 | 0.669285 |
acf55ca48ebabdf0c3429838201ec8763bcf52c0 | 468 | py | Python | Python/Reduce function/Reduce_function.py | sachinprabhu007/HackerRank-Solutions | f42d3c1e989b288e42b4674a926d007aa22940a1 | [
"MIT"
] | null | null | null | Python/Reduce function/Reduce_function.py | sachinprabhu007/HackerRank-Solutions | f42d3c1e989b288e42b4674a926d007aa22940a1 | [
"MIT"
] | 1 | 2019-01-16T12:13:29.000Z | 2019-01-16T14:57:57.000Z | Python/Reduce function/Reduce_function.py | sachinprabhu007/HackerRank-Solutions | f42d3c1e989b288e42b4674a926d007aa22940a1 | [
"MIT"
] | null | null | null | from fractions import Fraction
from functools import reduce
import operator
def product(fracs):
t = reduce(lambda x, y : x * y, fracs) # complete this line with a reduce statement
# complete this line with a reduce statement
return t.numerator, t.denominator
if __name__ == '__main__':
fracs = []
for _ in range(int(input())):
fracs.append(Fraction(*map(int, input().split())))
result = product(fracs)
print(*result) | 31.2 | 88 | 0.660256 |
acf55d06d2c26cbebba4580c1e7a280d98ab3055 | 2,965 | py | Python | MACD/huobi_Python-1.0.3/huobi/impl/utils/inputchecker.py | yangdemin/dingpan | 0b68c7f9b497c7becab6ec3e7a2b21b5c03a1dd9 | [
"MIT"
] | 1 | 2020-12-28T07:04:45.000Z | 2020-12-28T07:04:45.000Z | MACD/huobi_Python-1.0.3/huobi/impl/utils/inputchecker.py | yangdemin/dingpan | 0b68c7f9b497c7becab6ec3e7a2b21b5c03a1dd9 | [
"MIT"
] | 1 | 2020-12-05T11:41:35.000Z | 2020-12-05T11:41:35.000Z | MACD/huobi_Python-1.0.3/huobi/impl/utils/inputchecker.py | yangdemin/dingpan | 0b68c7f9b497c7becab6ec3e7a2b21b5c03a1dd9 | [
"MIT"
] | 1 | 2022-03-27T10:36:04.000Z | 2022-03-27T10:36:04.000Z | import re
import time
from huobi.exception.huobiapiexception import HuobiApiException
reg_ex = "[ _`~!@#$%^&*()+=|{}':;',\\[\\].<>/?~!@#¥%……&*()——+|{}【】‘;:”“’。,、?]|\n|\t"
def check_symbol(symbol):
if not isinstance(symbol, str):
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] symbol must be string")
if re.match(reg_ex, symbol):
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] " + symbol + " is invalid symbol")
def check_symbol_list(symbols):
if not isinstance(symbols, list):
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] symbols in subscription is not a list")
for symbol in symbols:
check_symbol(symbol)
def check_currency(currency):
if not isinstance(currency, str):
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] currency must be string")
if re.match(reg_ex, currency) is not None:
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] " + currency + " is invalid currency")
def check_range(value, min_value, max_value, name):
if value is None:
return
if min_value > value or value > max_value:
raise HuobiApiException(HuobiApiException.INPUT_ERROR,
"[Input] " + name + " is out of bound. " + str(value) + " is not in [" + str(
min_value) + "," + str(max_value) + "]")
def check_should_not_none(value, name):
if value is None:
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] " + name + " should not be null")
def check_should_none(value, name):
if value is not None:
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] " + name + " should be null")
def check_list(list_value, min_value, max_value, name):
if list_value is None:
return
if len(list_value) > max_value:
raise HuobiApiException(HuobiApiException.INPUT_ERROR,
"[Input] " + name + " is out of bound, the max size is " + str(max_value))
if len(list_value) < min_value:
raise HuobiApiException(HuobiApiException.INPUT_ERROR,
"[Input] " + name + " should contain " + str(min_value) + " item(s) at least")
def greater_or_equal(value, base, name):
if value is not None and value < base:
raise HuobiApiException(HuobiApiException.INPUT_ERROR,
"[Input] " + name + " should be greater than " + base)
def format_date(value, name):
if value is None:
return None
if not isinstance(value, str):
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] " + name + " must be string")
try:
new_time = time.strptime(value, "%Y-%m-%d")
return time.strftime("%Y-%m-%d", new_time)
except:
raise HuobiApiException(HuobiApiException.INPUT_ERROR, "[Input] " + name + " is not invalid date format")
| 39.533333 | 113 | 0.636762 |
acf55d1934294f19a191ab3a5b38f72e59be38b1 | 1,829 | py | Python | serverctl_deployd/models/deployments.py | delta/serverctl_daemon | 3999539da01715affc8a3471d860294184756e6f | [
"MIT"
] | 2 | 2021-09-18T15:30:33.000Z | 2021-12-23T01:50:19.000Z | serverctl_deployd/models/deployments.py | delta/serverctl_daemon | 3999539da01715affc8a3471d860294184756e6f | [
"MIT"
] | 54 | 2021-09-18T12:22:38.000Z | 2022-03-30T13:25:17.000Z | serverctl_deployd/models/deployments.py | delta/serverctl_deployd | 3999539da01715affc8a3471d860294184756e6f | [
"MIT"
] | null | null | null | """
Models related to the deployments feature
"""
from enum import Enum
from typing import Dict, Optional
from pydantic import BaseModel
from pydantic.fields import Field
class DBType(str, Enum):
"""Enum of supported databases"""
MYSQL = "mysql"
MONGODB = "mongodb"
class DBConfig(BaseModel):
"""Class for database config"""
dbtype: DBType = Field(
..., title="Type of the database service"
)
username: str = Field(
..., title="Username for connecting to database service"
)
password: str = Field(
..., title="Password for connecting to the database service"
)
class UpdateDBConfig(BaseModel):
"""Class for updating database config"""
dbtype: Optional[DBType] = Field(
None, title="Type of the database service"
)
username: Optional[str] = Field(
None, title="Username for connecting to database service"
)
password: Optional[str] = Field(
None, title="Password for connecting to the database service"
)
class Deployment(BaseModel):
"""Class for deployment"""
name: str = Field(
..., title="Name of the deployment"
)
compose_file: str = Field(
..., title="Content of the docker-compose file"
)
env_file: Optional[str] = Field(
None, title="Content of the .env file"
)
databases: Optional[Dict[str, DBConfig]] = Field(
None, title="List of database services"
)
class UpdateDeployment(BaseModel):
"""Class for updating deployment"""
compose_file: str = Field(
None, title="Content of the docker-compose file"
)
env_file: Optional[str] = Field(
None, title="Content of the .env file"
)
databases: Optional[Dict[str, UpdateDBConfig]] = Field(
None, title="List of database services"
)
| 25.760563 | 69 | 0.6386 |
acf55de4ae8dcce3ff6862f5f55c7999ade07e1b | 1,430 | py | Python | tapis_cli/commands/taccapis/v2/apim/token_refresh.py | bpachev/tapis-cli | c3128fb5b63ef74e06b737bbd95ef28fb24f0d32 | [
"BSD-3-Clause"
] | 8 | 2020-10-18T22:48:23.000Z | 2022-01-10T09:16:14.000Z | tapis_cli/commands/taccapis/v2/apim/token_refresh.py | bpachev/tapis-cli | c3128fb5b63ef74e06b737bbd95ef28fb24f0d32 | [
"BSD-3-Clause"
] | 238 | 2019-09-04T14:37:54.000Z | 2020-04-15T16:24:24.000Z | tapis_cli/commands/taccapis/v2/apim/token_refresh.py | bpachev/tapis-cli | c3128fb5b63ef74e06b737bbd95ef28fb24f0d32 | [
"BSD-3-Clause"
] | 5 | 2019-09-20T04:23:49.000Z | 2020-01-16T17:45:14.000Z | from agavepy.agave import AgaveError
from requests.exceptions import HTTPError
from tapis_cli.display import Verbosity
from tapis_cli import et
from . import API_NAME, SERVICE_VERSION
from .models import Token
from .formatters import TokenFormatOne
__all__ = ['TokenRefresh']
class TokenRefresh(TokenFormatOne):
"""Refresh the current Tapis access token
"""
VERBOSITY = Verbosity.BRIEF
EXTRA_VERBOSITY = Verbosity.RECORD_VERBOSE
def get_parser(self, prog_name):
# TODO - accept refresh token
parser = super(TokenRefresh, self).get_parser(prog_name)
return parser
def take_action(self, parsed_args):
parsed_args = super(TokenRefresh, self).preprocess_args(parsed_args)
self.requests_client.setup(API_NAME, SERVICE_VERSION)
self.update_payload(parsed_args)
headers = super(TokenRefresh, self).render_headers(Token, parsed_args)
try:
result = self.tapis_client.refresh()
except HTTPError as h:
if str(h).startswith('400'):
raise AgaveError(
'Failed to refresh token. Try "tapis sessions token create"'
)
else:
raise AgaveError(str(h))
result = list()
for h in headers:
result.append(self.tapis_client.token.token_info.get(h))
et.phone_home()
return (tuple(headers), tuple(result))
| 31.777778 | 80 | 0.663636 |
acf55e548415cd0d9cbb31ec39c04cf4f82091f5 | 1,764 | py | Python | db/models.py | ehsanbarkhordar/balebot-twitter-follow-unfollow | c5ea76f57563181f2f1618fe8c3044ea7a9894f8 | [
"MIT"
] | null | null | null | db/models.py | ehsanbarkhordar/balebot-twitter-follow-unfollow | c5ea76f57563181f2f1618fe8c3044ea7a9894f8 | [
"MIT"
] | 5 | 2021-03-18T23:23:51.000Z | 2022-03-11T23:43:35.000Z | db/models.py | ehsanbarkhordar/balebot-twitter-follow-unfollow | c5ea76f57563181f2f1618fe8c3044ea7a9894f8 | [
"MIT"
] | null | null | null | from datetime import datetime
from sqlalchemy import Column, BigInteger, ForeignKey, Boolean, DateTime, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Account(Base):
__tablename__ = 'accounts'
peer_id = Column(String, nullable=False)
access_hash = Column(String, nullable=False)
oauth_token = Column(String, nullable=False)
oauth_token_secret = Column(String, nullable=False)
user_id = Column(BigInteger, nullable=False, primary_key=True)
screen_name = Column(String, nullable=False)
is_sync = Column(Boolean, default=False)
def __init__(self, peer_id, access_hash, oauth_token, oauth_token_secret, user_id, screen_name):
self.peer_id = peer_id
self.access_hash = access_hash
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
self.user_id = user_id
self.screen_name = screen_name
def __repr__(self):
return "%s(%r)" % (self.__class__, self.__dict__)
class Friend(Base):
__tablename__ = 'friends'
user_id = Column(BigInteger, primary_key=True)
account_user_id = Column(BigInteger, ForeignKey('accounts.user_id'), primary_key=True)
follow_datetime = Column(DateTime)
unfollow_datetime = Column(DateTime)
unfollow_permission = Column(Boolean, default=False)
follow_back = Column(Boolean, default=False)
def __init__(self, user_id, account_user_id, follow_datetime, unfollow_permission=False):
self.user_id = user_id
self.account_user_id = account_user_id
self.follow_datetime = follow_datetime
self.unfollow_permission = unfollow_permission
def __repr__(self):
return "%s(%r)" % (self.__class__, self.__dict__)
| 36.75 | 100 | 0.727891 |
acf55e93159a3b5ef148a855ae54eda84b3d5af7 | 3,273 | py | Python | utils.py | ramnasidharta/midena | d13d462a0d8449a50bd786ec655f46525f4b5b87 | [
"MIT"
] | null | null | null | utils.py | ramnasidharta/midena | d13d462a0d8449a50bd786ec655f46525f4b5b87 | [
"MIT"
] | 14 | 2019-04-08T15:20:33.000Z | 2019-06-22T22:44:37.000Z | utils.py | ramnasidharta/midena | d13d462a0d8449a50bd786ec655f46525f4b5b87 | [
"MIT"
] | null | null | null | import os.path
from model.RegularGrammar import RegularGrammar
from model.FiniteAutomata import FiniteAutomata
divisorStr = "--------------------------------------------------"
def printError(msg):
print("[ERROR]", msg)
def startTestMsg(msg):
print(divisorStr)
print(msg)
print(divisorStr)
def remove_flag(flags, flag):
if flags & flag:
return flags ^ flag
return flags
def is_into(element, listOfLists: list) -> bool:
for L in listOfLists:
if element in listOfLists:
return True
return False
def regular_grammar_to_automata(grammar):
name = grammar.name + "FiniteAutomata"
initial = grammar_symbol_form(grammar.root)
table = {}
sigma = set()
acceptingState = "X"
for prod in grammar.productions:
state = prod[0]
table[state] = {}
for beta in prod[1]:
symbol = beta[0]
sigma.add(symbol)
if symbol not in table[state]:
table[state][symbol] = set()
if len(beta) == 1:
table[state][symbol].add(acceptingState)
else:
table[state][symbol].add(beta[1])
table[acceptingState] = {}
for t in sigma:
table[acceptingState][t] = []
for state in table.keys():
for symbol in table[state]:
table[state][symbol] = list(table[state][symbol])
return FiniteAutomata(list(sigma), table, initial, [acceptingState])
def finite_automata_to_grammar(automata):
name = automata.name + "Grammar"
root = grammar_symbol_form(automata.initial)
sigma = set(automata.sigma)
faStates = automata.states()
grammarStates = list(map(lambda s: grammar_symbol_form(s), faStates))
symbols = set(grammarStates)
productions = []
for state in faStates:
stateTransictions = automata.table[state]
alpha = grammar_symbol_form(state)
beta = []
for symbol in automata.sigma:
if symbol in stateTransictions.keys():
nextStates =\
next_states_separated(stateTransictions, symbol)
beta += list(map(
lambda ns: symbol + grammar_symbol_form(ns),\
nextStates)
)
add_terminal_symbol(
symbol, beta, nextStates, automata.accepting)
productions.append((alpha, beta))
return RegularGrammar(symbols, sigma, productions, root, name)
def add_terminal_symbol(symbol, beta, nextStates, accepting):
if list(filter(lambda s: s in accepting, nextStates)):
beta.append(symbol)
def grammar_symbol_form(state):
return "_".join(state.upper())
def next_states_separated(transitions, symbol):
nextStates = []
for ns in transitions[symbol]:
nextStates += ns.split(", ")
return nextStates
def promptFile(msg, optional = False):
while True:
try:
fname = input(msg)
if optional and fname != "":
findFile(fname)
return fname
except FileNotFoundError:
printError("File not found!")
def findFile(path):
if not os.path.isfile(path):
raise FileNotFoundError()
return True
| 28.215517 | 73 | 0.594867 |
acf55fef795e26a2a1d23ed007c54105e651a986 | 3,949 | py | Python | ahorcado.py | MrInternauta/Python-Juego-ahorcado | 6f163dffd9bd36aef81f179c60f088a67f1762dd | [
"MIT"
] | null | null | null | ahorcado.py | MrInternauta/Python-Juego-ahorcado | 6f163dffd9bd36aef81f179c60f088a67f1762dd | [
"MIT"
] | null | null | null | ahorcado.py | MrInternauta/Python-Juego-ahorcado | 6f163dffd9bd36aef81f179c60f088a67f1762dd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# 0
# /|\
# |
# /\
IMAGES = ['''
+---+
| |
|
|
|
|
=========''', '''
+---+
| |
O |
|
|
|
=========''', '''
+---+
| |
O |
| |
|
|
=========''', '''
+---+
| |
O |
/| |
|
|
=========''', '''
+---+
| |
O |
/|\ |
|
|
=========''', '''
+---+
| |
O |
/|\ |
| |
|
=========''', '''
+---+
| |
O |
/|\ |
| |
/ |
=========''', '''
+---+
| |
O |
/|\ |
| |
/ \ |
=========''', '''
''']
#Main del programa
def main():
name = get_name()
to_welcome(name)
word = get_word()
spaces_letters(word)
try_get_word(word)
#Obtiene el nombre
def get_name():
return str(raw_input('Ingresa tu nombre: '))
#Mensaje de Bienvenida
def to_welcome(name):
print('================================')
print('Bienvendo al juego del Ahorcado')
print('================================')
print('{}'.format(name))
#Obtiene palabra a adivinar
def get_word():
return str(raw_input('Ingresa palabra a adivinar: ')).upper()
#Genera un espacio de las palabras
def spaces_letters(word ):
array_space = list()
array_word = list(word)
for i in array_word:
array_space.append('|___| ')
print('\n')
print(''.join(array_space))
#Pide la letra y verifica si es correcta
def try_get_word(word):
word_array = list()
word_array_true = list()
complete = False
intents = 0
array_space = ['|___| '] * len(word)
letter_true = list()
print(IMAGES[intents])
while complete == False:
if intents < 7:
word_array.append(str(raw_input('Ingresa una letra: ')).upper())
letter_false = word.find(str(word_array[len(word_array)-1]))
if(letter_false == -1):
intents += 1
print(IMAGES[intents])
print('...:::Letra incorrecta:::...\n')
else:
print(IMAGES[intents])
array_word_correct = ''.join(word_array)
for i in range(0, len(word), 1):
for j in range(0, len(array_word_correct), 1):
if word[i] == array_word_correct[j]:
#//Detrerminar si gane
result = win(array_space, word)
if( result == True):
print('----------------------------')
print('...::: G A N A S T E :::...')
print('----------------------------')
print(IMAGES[intents])
complete = True
else:
array_space[i] = list(word[i])
print('\n...:::Letra correcta:::...\n')
print(array_space)
else:
print('...:::Has perdido:::...\nLa palabra correcta era {}.'.format(word))
complete = True
try_now()
#gane
def win(array_space, word):
lista = list()
for i in word:
lista.append(list(i))
if lista == array_space:
return True
else:
return False
#Funcion para volver a intertalo
def try_now():
res = str(raw_input('\n¿Quieres intentarlo nuevamente?\n1 = si:\ncualquier cosa = no:\n'))
if res == '1':
word = get_word()
spaces_letters(word)
try_get_word(word)
#Indica donde incia el programa
if __name__ == '__main__':
main()
| 22.565714 | 95 | 0.382629 |
acf5609c56bbda2a2e580ea8bb4af50bfe09e893 | 4,023 | py | Python | setup.py | denizhanpak/dopamine | 1603cf5bac05de1db7f0d958d4a99d185c2ad758 | [
"Apache-2.0"
] | null | null | null | setup.py | denizhanpak/dopamine | 1603cf5bac05de1db7f0d958d4a99d185c2ad758 | [
"Apache-2.0"
] | null | null | null | setup.py | denizhanpak/dopamine | 1603cf5bac05de1db7f0d958d4a99d185c2ad758 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for Dopamine.
This script will install Dopamine as a Python module.
See: https://github.com/google/dopamine
"""
import codecs
from os import path
from setuptools import find_packages
from setuptools import setup
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file.
with codecs.open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
install_requires = ['gin-config >= 0.1.1', 'absl-py >= 0.2.2',
'tensorflow', 'opencv-python >= 3.4.1.15',
'gym >= 0.10.5']
tests_require = ['gin-config >= 0.1.1', 'absl-py >= 0.2.2',
'tensorflow >= 1.9.0', 'opencv-python >= 3.4.1.15',
'gym >= 0.10.5', 'mock >= 1.0.0']
dopamine_description = (
'Dopamine: A framework for flexible Reinforcement Learning research')
setup(
name='dopamine_rl',
version='1.0.3',
include_package_data=True,
packages=find_packages(exclude=['docs']), # Required
package_data={'testdata': ['testdata/*.gin']},
install_requires=install_requires,
tests_require=tests_require,
description=dopamine_description,
long_description=long_description,
url='https://github.com/google/dopamine', # Optional
author='The Dopamine Team', # Optional
author_email='opensource@google.com',
classifiers=[ # Optional
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
# Pick your license as you wish
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={ # Optional
'Documentation': 'https://github.com/google/dopamine',
'Bug Reports': 'https://github.com/google/dopamine/issues',
'Source': 'https://github.com/google/dopamine',
},
license='Apache 2.0',
keywords='dopamine reinforcement-learning python machine learning'
)
| 37.598131 | 77 | 0.675118 |
acf56233ed2ff0a9574e659b12e14a4752dec0f1 | 5,911 | py | Python | tests/sentry/tasks/test_store.py | Ali-Tahir/sentry | aa7b306c5ea671ac002a3524982563679557cb31 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/tasks/test_store.py | Ali-Tahir/sentry | aa7b306c5ea671ac002a3524982563679557cb31 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/tasks/test_store.py | Ali-Tahir/sentry | aa7b306c5ea671ac002a3524982563679557cb31 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import mock
import uuid
from time import time
from sentry import quotas, tsdb
from sentry.event_manager import EventManager, HashDiscarded
from sentry.plugins.base.v2 import Plugin2
from sentry.tasks.store import preprocess_event, process_event, save_event
from sentry.testutils import PluginTestCase
from sentry.utils.dates import to_datetime
class BasicPreprocessorPlugin(Plugin2):
def get_event_preprocessors(self, data):
def remove_extra(data):
del data["extra"]
return data
def put_on_hold(data):
data["unprocessed"] = True
return data
if data.get("platform") == "mattlang":
return [remove_extra, lambda x: None]
if data.get("platform") == "noop":
return [lambda data: None]
if data.get("platform") == "holdmeclose":
return [put_on_hold]
return []
def is_enabled(self, project=None):
return True
class StoreTasksTest(PluginTestCase):
plugin = BasicPreprocessorPlugin
@mock.patch("sentry.tasks.store.save_event")
@mock.patch("sentry.tasks.store.process_event")
def test_move_to_process_event(self, mock_process_event, mock_save_event):
project = self.create_project()
data = {
"project": project.id,
"platform": "mattlang",
"logentry": {"formatted": "test"},
"extra": {"foo": "bar"},
}
preprocess_event(data=data)
assert mock_process_event.delay.call_count == 1
assert mock_save_event.delay.call_count == 0
@mock.patch("sentry.tasks.store.save_event")
@mock.patch("sentry.tasks.store.process_event")
def test_move_to_save_event(self, mock_process_event, mock_save_event):
project = self.create_project()
data = {
"project": project.id,
"platform": "NOTMATTLANG",
"logentry": {"formatted": "test"},
"extra": {"foo": "bar"},
}
preprocess_event(data=data)
assert mock_process_event.delay.call_count == 0
assert mock_save_event.delay.call_count == 1
@mock.patch("sentry.tasks.store.save_event")
@mock.patch("sentry.tasks.store.default_cache")
def test_process_event_mutate_and_save(self, mock_default_cache, mock_save_event):
project = self.create_project()
data = {
"project": project.id,
"platform": "mattlang",
"logentry": {"formatted": "test"},
"extra": {"foo": "bar"},
}
mock_default_cache.get.return_value = data
process_event(cache_key="e:1", start_time=1)
# The event mutated, so make sure we save it back
(_, (key, event, duration), _), = mock_default_cache.set.mock_calls
assert key == "e:1"
assert "extra" not in event
assert duration == 3600
mock_save_event.delay.assert_called_once_with(
cache_key="e:1", data=None, start_time=1, event_id=None, project_id=project.id
)
@mock.patch("sentry.tasks.store.save_event")
@mock.patch("sentry.tasks.store.default_cache")
def test_process_event_no_mutate_and_save(self, mock_default_cache, mock_save_event):
project = self.create_project()
data = {
"project": project.id,
"platform": "noop",
"logentry": {"formatted": "test"},
"extra": {"foo": "bar"},
}
mock_default_cache.get.return_value = data
process_event(cache_key="e:1", start_time=1)
# The event did not mutate, so we shouldn't reset it in cache
assert mock_default_cache.set.call_count == 0
mock_save_event.delay.assert_called_once_with(
cache_key="e:1", data=None, start_time=1, event_id=None, project_id=project.id
)
@mock.patch("sentry.tasks.store.save_event")
@mock.patch("sentry.tasks.store.default_cache")
def test_process_event_unprocessed(self, mock_default_cache, mock_save_event):
project = self.create_project()
data = {
"project": project.id,
"platform": "holdmeclose",
"logentry": {"formatted": "test"},
"extra": {"foo": "bar"},
}
mock_default_cache.get.return_value = data
process_event(cache_key="e:1", start_time=1)
(_, (key, event, duration), _), = mock_default_cache.set.mock_calls
assert key == "e:1"
assert event["unprocessed"] is True
assert duration == 3600
mock_save_event.delay.assert_called_once_with(
cache_key="e:1", data=None, start_time=1, event_id=None, project_id=project.id
)
@mock.patch.object(tsdb, "incr_multi")
@mock.patch.object(quotas, "refund")
def test_hash_discarded_raised(self, mock_refund, mock_incr):
project = self.create_project()
data = {
"project": project.id,
"platform": "NOTMATTLANG",
"logentry": {"formatted": "test"},
"event_id": uuid.uuid4().hex,
"extra": {"foo": "bar"},
}
now = time()
mock_save = mock.Mock()
mock_save.side_effect = HashDiscarded
with mock.patch.object(EventManager, "save", mock_save):
save_event(data=data, start_time=now)
mock_incr.assert_called_with(
[
(tsdb.models.project_total_received, project.id),
(tsdb.models.organization_total_received, project.organization.id),
(tsdb.models.project_total_blacklisted, project.id),
(tsdb.models.organization_total_blacklisted, project.organization_id),
(tsdb.models.project_total_received_discarded, project.id),
],
timestamp=to_datetime(now),
)
| 32.838889 | 90 | 0.614786 |
acf5634353a2c1903170eb7a643c628c44e74750 | 631 | py | Python | 400-500Q/454.py | rampup01/Leetcode | 8450a95a966ef83b24ffe6450f06ce8de92b3efb | [
"MIT"
] | 990 | 2018-06-05T11:49:22.000Z | 2022-03-31T08:59:17.000Z | 400-500Q/454.py | rampup01/Leetcode | 8450a95a966ef83b24ffe6450f06ce8de92b3efb | [
"MIT"
] | 1 | 2021-11-01T01:29:38.000Z | 2021-11-01T01:29:38.000Z | 400-500Q/454.py | rampup01/Leetcode | 8450a95a966ef83b24ffe6450f06ce8de92b3efb | [
"MIT"
] | 482 | 2018-06-12T22:16:53.000Z | 2022-03-29T00:23:29.000Z | class Solution(object):
def fourSumCount(self, A, B, C, D):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
hashTable ={}
for a in A:
for b in B:
if a+b in hashTable:
hashTable[a+b] += 1
else:
hashTable[a+b] = 1
result = 0
for c in C:
for d in D:
if -(c+d) in hashTable:
result += hashTable[-(c+d)]
return result
| 25.24 | 47 | 0.362916 |
acf56371d3243f4be45dfe2f68cd521799ad3bf1 | 1,649 | py | Python | PyGame/pygame3/ex9/myGameEngine.py | hoppfull/Legacy-Python | 43f465bfdb76c91f2ac16aabb0783fdf5f459adb | [
"MIT"
] | null | null | null | PyGame/pygame3/ex9/myGameEngine.py | hoppfull/Legacy-Python | 43f465bfdb76c91f2ac16aabb0783fdf5f459adb | [
"MIT"
] | null | null | null | PyGame/pygame3/ex9/myGameEngine.py | hoppfull/Legacy-Python | 43f465bfdb76c91f2ac16aabb0783fdf5f459adb | [
"MIT"
] | null | null | null | import pygame as pg
from pygame.locals import KEYUP, K_ESCAPE, QUIT, MOUSEBUTTONUP, MOUSEMOTION
class GameEngine:
def __init__(self, size = (640, 480), fps = 1):
pg.init()
self.size, self.fps = size, fps
self.screen = pg.display.set_mode(self.size)
self.running = True
def mainLoop(self):
while(self.running):
self.inputEvents()
self.update()
self.draw()
pg.display.flip()
pg.time.Clock().tick(self.fps)
def inputEvents(self):
for events in pg.event.get():
if(events.type == QUIT): #trigger if user press the x on the top right corner of the program window
self.running = False
elif(events.type == KEYUP and events.key == K_ESCAPE): #trigger if user press escape
self.running = False
elif(events.type == MOUSEBUTTONUP): #trigger if user press any mousebutton and if user move the mouse
self.mouseUp(events.button, events.pos)
elif(events.type == MOUSEMOTION): #trigger if user press and hold any mousebutton while and if the user move the mouse
self.mouseMotion(events.buttons, events.pos, events.rel)
def mouseUp(self, button, pos):
pass
def mouseMotion(self, buttons, pos, rel):
pass
def update(self):
pass
def draw(self):
pass
##2D-Vector mathematics:
def get_magnitude(vector):
return(( (vector[0])**2 + (vector[1])**2 )**0.5)
def get_unitvector(vector):
mag = get_magnitude(vector)
if(mag != 0):
return((vector[0] / mag, vector[1] / mag))
else:
return((0,1))
def get_force(strength, direction, distance = 1):
if(distance != 0):
return((direction[0] * strength / (distance), direction[1] * strength / (distance)))
else:
return((0,0)) | 28.431034 | 121 | 0.683445 |
acf563ef207f982a4661b5bf6b5cc12a752cba77 | 402 | py | Python | biosamples_v4/exceptions.py | ebi-ait/python_biosamples-v4_lib | ce7e5a98d5352003b1f0a320a47fcab83d566566 | [
"Apache-2.0"
] | 2 | 2020-10-14T10:30:14.000Z | 2020-10-21T16:22:56.000Z | biosamples_v4/exceptions.py | ebi-ait/python_biosamples-v4_lib | ce7e5a98d5352003b1f0a320a47fcab83d566566 | [
"Apache-2.0"
] | 1 | 2020-06-29T14:05:39.000Z | 2020-06-29T14:05:39.000Z | biosamples_v4/exceptions.py | ebi-ait/python_biosamples-v4_lib | ce7e5a98d5352003b1f0a320a47fcab83d566566 | [
"Apache-2.0"
] | null | null | null | class JWTMissingException(Exception):
pass
class CursorNotFoundException(KeyError):
pass
class LinkNotFoundException(KeyError):
pass
class ConvertionException(Exception):
pass
class SampleConvertionException(ConvertionException):
pass
class AttributeConvertionException(ConvertionException):
pass
class RelationshipConvertionException(ConvertionException):
pass
| 14.888889 | 59 | 0.79602 |
acf56444f6ecd4fac7751582807e6f3c003a83a6 | 4,457 | py | Python | Old_Model/resnet18.py | ZouJiu1/Mask_face_recognitionZ | bac006718627d869b8ffaaa2c0605a300efd35e8 | [
"MIT"
] | 17 | 2020-12-29T06:00:35.000Z | 2022-02-24T13:38:41.000Z | Old_Model/resnet18.py | LY-Road/Mask-Face-Recognition | f256a38084073718628b99a09622f5c830a232e7 | [
"MIT"
] | 2 | 2021-01-02T13:12:51.000Z | 2021-07-03T04:37:18.000Z | Old_Model/resnet18.py | LY-Road/Mask-Face-Recognition | f256a38084073718628b99a09622f5c830a232e7 | [
"MIT"
] | 12 | 2021-01-06T08:38:04.000Z | 2022-01-27T15:44:21.000Z | import torch
import torch.nn as nn
import torchvision.models as models
class Resnet18Center(nn.Module):
"""Constructs a ResNet-18 model for FaceNet training using center loss with cross entropy loss.
Args:
num_classes (int): Number of classes in the training dataset required for cross entropy loss.
embedding_dimension (int): Required dimension of the resulting embedding layer that is outputted by the model.
using center loss. Defaults to 128.
pretrained (bool): If True, returns a model pre-trained on the ImageNet dataset from a PyTorch repository.
Defaults to False.
"""
def __init__(self, num_classes, embedding_dimension=128, pretrained=False):
super(Resnet18Center, self).__init__()
self.model = models.resnet18(pretrained=pretrained)
input_features_fc_layer = self.model.fc.in_features
# Output embedding
self.model.fc = nn.Linear(input_features_fc_layer, embedding_dimension)
# Output logits for cross entropy loss
self.model.classifier = nn.Linear(embedding_dimension, num_classes)
def l2_norm(self, input):
"""Perform l2 normalization operation on an input vector.
code copied from liorshk's repository: https://github.com/liorshk/facenet_pytorch/blob/master/model.py
"""
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-10)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def forward(self, images):
"""Forward pass to output the embedding vector (feature vector) after l2-normalization and multiplication
by scalar (alpha)."""
embedding = self.model(images)
embedding = self.l2_norm(embedding)
# Multiply by alpha = 10 as suggested in https://arxiv.org/pdf/1703.09507.pdf
# Equation 9: number of classes in VGGFace2 dataset = 9131
# lower bound on alpha = 5, multiply alpha by 2; alpha = 10
alpha = 10
embedding = embedding * alpha
return embedding
def forward_training(self, images):
"""Forward pass during training to output both the l2-normed embedding vector and logits
for cross entropy loss and center loss.
"""
embedding = self.forward(images)
logits = self.model.classifier(embedding)
return embedding, logits
class Resnet18Triplet(nn.Module):
"""Constructs a ResNet-18 model for FaceNet training using triplet loss.
Args:
embedding_dimension (int): Required dimension of the resulting embedding layer that is outputted by the model.
using triplet loss. Defaults to 128.
pretrained (bool): If True, returns a model pre-trained on the ImageNet dataset from a PyTorch repository.
Defaults to False.
"""
def __init__(self, embedding_dimension=128, pretrained=False):
super(Resnet18Triplet, self).__init__()
self.model = models.resnet18(pretrained=pretrained)
input_features_fc_layer = self.model.fc.in_features
# Output embedding
self.model.fc = nn.Linear(input_features_fc_layer, embedding_dimension)
def l2_norm(self, input):
"""Perform l2 normalization operation on an input vector.
code copied from liorshk's repository: https://github.com/liorshk/facenet_pytorch/blob/master/model.py
"""
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-10)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def forward(self, images):
"""Forward pass to output the embedding vector (feature vector) after l2-normalization and multiplication
by scalar (alpha)."""
embedding = self.model(images)
embedding = self.l2_norm(embedding)
# Multiply by alpha = 10 as suggested in https://arxiv.org/pdf/1703.09507.pdf
# Equation 9: number of classes in VGGFace2 dataset = 9131
# lower bound on alpha = 5, multiply alpha by 2; alpha = 10
alpha = 10
embedding = embedding * alpha
return embedding
| 43.271845 | 118 | 0.660085 |
acf5655f8a9fbcf0f719272160ec767f457f9847 | 81 | py | Python | backup.py | johnjones4/Doomsday-Machine-2 | 63e9aedc1b10c556ec19c7987071f48ffd4e8524 | [
"MIT"
] | 42 | 2017-07-20T10:57:13.000Z | 2020-03-14T09:14:06.000Z | backup.py | johnjones4/Doomsday-Machine-2 | 63e9aedc1b10c556ec19c7987071f48ffd4e8524 | [
"MIT"
] | 1 | 2020-01-22T05:39:19.000Z | 2020-01-22T05:39:19.000Z | backup.py | johnjones4/Doomsday-Machine | 63e9aedc1b10c556ec19c7987071f48ffd4e8524 | [
"MIT"
] | 2 | 2018-09-16T14:11:32.000Z | 2019-10-06T01:32:07.000Z | from doomsdaymachine.backup import start
if __name__ == '__main__':
start()
| 16.2 | 40 | 0.728395 |
acf5656f3c0b0fa52ba446be07a8dcfbb214c089 | 2,006 | py | Python | mmsr/models/archs/contras_extractor_arch.py | yumingj/C2-Matching | fa171ca6707c6f16a5d04194ce866ea70bb21d2b | [
"Apache-2.0"
] | 111 | 2021-05-31T17:15:41.000Z | 2022-03-30T03:06:31.000Z | mmsr/models/archs/contras_extractor_arch.py | yumingj/C2-Matching | fa171ca6707c6f16a5d04194ce866ea70bb21d2b | [
"Apache-2.0"
] | 20 | 2021-06-02T03:03:25.000Z | 2022-03-28T03:49:08.000Z | mmsr/models/archs/contras_extractor_arch.py | yumingj/C2-Matching | fa171ca6707c6f16a5d04194ce866ea70bb21d2b | [
"Apache-2.0"
] | 21 | 2021-06-04T05:49:41.000Z | 2022-03-18T12:45:24.000Z | from collections import OrderedDict
import torch
import torch.nn as nn
import torchvision.models.vgg as vgg
class ContrasExtractorLayer(nn.Module):
def __init__(self):
super(ContrasExtractorLayer, self).__init__()
vgg16_layers = [
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1',
'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3',
'pool5'
]
conv3_1_idx = vgg16_layers.index('conv3_1')
features = getattr(vgg,
'vgg16')(pretrained=True).features[:conv3_1_idx + 1]
modified_net = OrderedDict()
for k, v in zip(vgg16_layers, features):
modified_net[k] = v
self.model = nn.Sequential(modified_net)
# the mean is for image with range [0, 1]
self.register_buffer(
'mean',
torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
# the std is for image with range [0, 1]
self.register_buffer(
'std',
torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
def forward(self, batch):
batch = (batch - self.mean) / self.std
output = self.model(batch)
return output
class ContrasExtractorSep(nn.Module):
def __init__(self):
super(ContrasExtractorSep, self).__init__()
self.feature_extraction_image1 = ContrasExtractorLayer()
self.feature_extraction_image2 = ContrasExtractorLayer()
def forward(self, image1, image2):
dense_features1 = self.feature_extraction_image1(image1)
dense_features2 = self.feature_extraction_image2(image2)
return {
'dense_features1': dense_features1,
'dense_features2': dense_features2
}
| 33.433333 | 79 | 0.599202 |
acf5662b46fe9d40ec03a5ab1b047a4f73755315 | 3,763 | py | Python | mathlab/module_04.py | EnzoTonti/Lectures | 42670674ad6191e8c7eb95234b8b27e7ab40959c | [
"BSD-3-Clause"
] | 1 | 2016-04-02T15:04:18.000Z | 2016-04-02T15:04:18.000Z | mathlab/module_04.py | beginWithMatlab/forStudents | 42670674ad6191e8c7eb95234b8b27e7ab40959c | [
"BSD-3-Clause"
] | null | null | null | mathlab/module_04.py | beginWithMatlab/forStudents | 42670674ad6191e8c7eb95234b8b27e7ab40959c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ####################################################################################
# Copyright (c) 2016, Francesco De Carlo #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met: #
# #
# * Redistributions of source code must retain the above copyright notice, this #
# list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright notice, #
# this list of conditions and the following disclaimer in the documentation #
# and/or other materials provided with the distribution. #
# #
# * Neither the name of project nor the names of its #
# contributors may be used to endorse or promote products derived from #
# this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" #
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE #
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, #
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
# ####################################################################################
"""
Module for describing .....
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy
__authors__ = "First Name Last Name"
__copyright__ = "Copyright (c) 2016, Affiliation"
__version__ = "0.1.0"
__docformat__ = "restructuredtext en"
__all__ = ['function_03',
'function_04']
def function_03(parameter_01, parameter_02, parameter_03):
"""
Function description.
Parameters
----------
parameter_01 : type
Description.
parameter_02 : type
Description.
parameter_03 : type
Description.
Returns
-------
return_01
Description.
"""
return_01 = parameter_01 + parameter_02 + parameter_03
return return_01
def function_04(parameter_01, parameter_02, parameter_03):
"""
Function description.
Parameters
----------
parameter_01 : type
Description.
parameter_02 : type
Description.
parameter_03 : type
Description.
Returns
-------
return_01
Description.
"""
return_01 = parameter_01 + parameter_02 + parameter_03
return return_01
| 38.010101 | 86 | 0.52219 |
acf566fbf04ef5578320eaebd09dba8ce796ed80 | 311 | py | Python | package/tests/data/postprocessing_examples/input/test_code2.py | MrKriss/stonemason | d78becc9168c2566b31b48c9a951e2823bc98362 | [
"MIT"
] | 2 | 2017-11-13T17:40:52.000Z | 2021-05-08T15:58:28.000Z | package/tests/data/postprocessing_examples/input/test_code2.py | MrKriss/masonry | d78becc9168c2566b31b48c9a951e2823bc98362 | [
"MIT"
] | 3 | 2017-09-03T22:58:37.000Z | 2017-09-12T21:45:27.000Z | package/tests/data/postprocessing_examples/input/test_code2.py | MrKriss/stonemason | d78becc9168c2566b31b48c9a951e2823bc98362 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Im a doc sting!!!
"""
import sys
import os
from fileinput import FileInput
# Here are some data science libraries
import pandas as pd
import numpy as np
MY_CONSTANT = 12124
if __name__ == '__main__':
print(function1(5, 6))
print(function2(5, 6))
| 14.136364 | 38 | 0.675241 |
acf5672c76bb5b44bb89937083f8fef1210f94b8 | 8,075 | py | Python | scripts/match_ampsim.py | mkarim2017/insarzd | e7d05f836e7ca044166e38bad549629ed00d71f1 | [
"ECL-2.0",
"Apache-2.0"
] | 28 | 2019-10-04T01:18:29.000Z | 2022-02-15T11:18:18.000Z | scripts/match_ampsim.py | mkarim2017/insarzd | e7d05f836e7ca044166e38bad549629ed00d71f1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | scripts/match_ampsim.py | mkarim2017/insarzd | e7d05f836e7ca044166e38bad549629ed00d71f1 | [
"ECL-2.0",
"Apache-2.0"
] | 11 | 2019-10-04T08:36:54.000Z | 2021-06-21T08:47:28.000Z | #!/usr/bin/env python3
#Cunren Liang, 5-MAY-2015
#JPL/Caltech
import os
import sys
import shutil
import argparse
import pickle
import isce
import isceobj
from isceobj.Location.Offset import OffsetField,Offset
from mroipac.ampcor.Ampcor import Ampcor
from crlpac import getWidth
from crlpac import getLength
from crlpac import runCmd
from crlpac import cullOffset
from crlpac import getOffset
def cmdLineParse():
'''
Command line parser.
'''
parser = argparse.ArgumentParser( description='matching between radar and simulation')
parser.add_argument('-m', '--amp', dest='amp', type=str, required=True,
help = 'amplitude image')
parser.add_argument('-s', '--sim', dest='sim', type=str, required=True,
help = 'simulated radar image')
parser.add_argument('-f', '--aff', dest='aff', type=str, required=True,
help = '(output) affine transformation')
parser.add_argument('-r','--rlks', dest='rlks', type=int, default=1,
help = 'range looks')
parser.add_argument('-a', '--alks', dest='alks', type=int, default=1,
help = 'azimuth looks')
if len(sys.argv) <= 1:
print('')
parser.print_help()
sys.exit(1)
else:
return parser.parse_args()
if __name__ == '__main__':
inps = cmdLineParse()
ampWidth = getWidth(inps.amp + '.xml')
ampLength = getLength(inps.amp + '.xml')
lookAmpWidth = int(ampWidth/inps.rlks)
lookAmpLength = int(ampLength/inps.alks)
simWidth = getWidth(inps.sim + '.xml')
simLength = getLength(inps.sim + '.xml')
lookSimWidth = int(simWidth/inps.rlks)
lookSimLength = int(simLength/inps.alks)
#prepare parameters for ampcor
lookAmp = 'float_{}rlks_{}alks.amp'.format(inps.rlks, inps.alks)
lookSim = 'float_{}rlks_{}alks.sim'.format(inps.rlks, inps.alks)
offsetFile = 'ampsim.off'
numAzimuth = 30 #number of matches in azimuth
numRange = 30 #number of matches in range
skipLines = int(lookAmpLength / numAzimuth)
skipSamples = int(lookAmpWidth / numRange)
fftWidth = 64 #matching window width
fftLength = 64 #matching window length
searchMaxWidth = 60
searchMaxLength = 60
s1 = 0
s2 = 1
#take looks and prepare data here!!!!!!!!!
#do matching at matchdir
matchdir = 'match_ampsim'
os.mkdir(matchdir)
os.chdir(matchdir)
tmpAmp = 'tmp'
cmd = '$INSAR_ZERODOP_SCR/look.py -i {} -o {} -r {} -a {}'.format('../' + inps.amp, tmpAmp, inps.rlks, inps.alks)
runCmd(cmd)
cmd = "imageMath.py -e='sqrt(a_0*a_0+a_1*a_1)' --a={} -o {} -t float".format(tmpAmp, lookAmp)
runCmd(cmd)
os.remove(tmpAmp)
os.remove(tmpAmp + '.xml')
os.remove(tmpAmp + '.vrt')
if inps.rlks != 1 and inps.alks != 1:
cmd = 'looks.py -i {} -o {} -r {} -a {}'.format('../' + inps.sim, lookSim, inps.rlks, inps.alks)
runCmd(cmd)
else:
cmd = "cp {} {}".format('../' + inps.sim, lookSim)
runCmd(cmd)
cmd = "cp {} {}".format('../' + inps.sim + '.xml', lookSim + '.xml')
runCmd(cmd)
cmd = "cp {} {}".format('../' + inps.sim + '.vrt', lookSim + '.vrt')
runCmd(cmd)
img = isceobj.createImage()
img.load(lookSim + '.xml')
img.filename = lookSim
img.extraFilename = lookSim + '.vrt'
img.renderHdr()
#############################################################################################################
# Line in match_ampsim_roipac.py in replaced by the following. Other part remains the same.
#set amp image
objAmp = isceobj.createImage()
objAmp.setFilename(lookAmp)
objAmp.setWidth(lookAmpWidth)
objAmp.setLength(lookAmpLength)
objAmp.dataType='FLOAT'
objAmp.setAccessMode('read')
objAmp.createImage()
#set sim image
objSim = isceobj.createImage()
objSim.setFilename(lookSim)
objSim.setWidth(lookSimWidth)
objSim.setLength(lookSimLength)
objSim.dataType='FLOAT'
objSim.setAccessMode('read')
objSim.createImage()
objAmpcor = Ampcor(name='insarapp_intsim_ampcor')
objAmpcor.configure()
#DATA TYPE
objAmpcor.setImageDataType1('real')
objAmpcor.setImageDataType2('real')
#INPUT/OUTPUT FILES
objAmpcor.setMasterSlcImage(objAmp)
objAmpcor.setSlaveSlcImage(objSim)
#MATCH REGION
########################################
#avoid the complain of Ampcor.py
xMargin = 2 * searchMaxWidth + fftWidth
yMargin = 2 * searchMaxLength + fftLength
#make it smaller
xMargin = fftWidth / 2 + 5
yMargin = fftLength / 2 + 5
firstSampleAcross = xMargin
firstSampleDown = yMargin
lastSampleAcross = lookAmpWidth - xMargin
lastSampleDown = lookAmpLength - yMargin
########################################
objAmpcor.setFirstSampleAcross(firstSampleAcross)
objAmpcor.setLastSampleAcross(lastSampleAcross)
objAmpcor.setNumberLocationAcross(numRange)
objAmpcor.setFirstSampleDown(firstSampleDown)
objAmpcor.setLastSampleDown(lastSampleDown)
objAmpcor.setNumberLocationDown(numAzimuth)
#MATCH PARAMETERS
objAmpcor.setWindowSizeWidth(fftWidth)
objAmpcor.setWindowSizeHeight(fftLength)
objAmpcor.setSearchWindowSizeWidth(searchMaxWidth)
objAmpcor.setSearchWindowSizeHeight(searchMaxLength)
objAmpcor.setAcrossLooks(1)
objAmpcor.setDownLooks(1)
objAmpcor.setOversamplingFactor(64)
objAmpcor.setZoomWindowSize(16)
objAmpcor.setAcrossGrossOffset(0)
objAmpcor.setDownGrossOffset(0)
#1. The following not set
#Matching Scale for Sample/Line Directions (-) = 1. 1.
#should add the following in Ampcor.py?
#if not set, in this case, Ampcor.py'value is also 1. 1.
#objAmpcor.setScaleFactorX(1.)
#objAmpcor.setScaleFactorY(1.)
#MATCH THRESHOLDS AND DEBUG DATA
#2. The following not set
#in roi_pac the value is set to 0 1
#in isce the value is set to 0.001 1000.0
#SNR and Covariance Thresholds (-) = {s1} {s2}
#should add the following in Ampcor?
#THIS SHOULD BE THE ONLY THING THAT IS DIFFERENT FROM THAT OF ROI_PAC
#objAmpcor.setThresholdSNR(0)
#objAmpcor.setThresholdCov(1)
objAmpcor.setDebugFlag(False)
objAmpcor.setDisplayFlag(False)
#in summary, only two things not set which are indicated by 'The following not set' above.
#run ampcor
objAmpcor.ampcor()
#get offsets
offsets = objAmpcor.getOffsetField()
offsetsPlain = ''
for offsetx in offsets:
offsetsPlainx = "{}".format(offsetx)
offsetsPlainx = offsetsPlainx.split()
offsetsPlain = offsetsPlain + "{:8d} {:10.3f} {:8d} {:12.3f} {:11.5f} {:11.6f} {:11.6f} {:11.6f}\n".format(
int(offsetsPlainx[0]),
float(offsetsPlainx[1]),
int(offsetsPlainx[2]),
float(offsetsPlainx[3]),
float(offsetsPlainx[4]),
float(offsetsPlainx[5]),
float(offsetsPlainx[6]),
float(offsetsPlainx[7])
)
with open(offsetFile, 'w') as f:
f.write(offsetsPlain)
#############################################################################################################
cullOffsetFile = 'ampsim_cull.off'
dumpFile = inps.aff
#run fitoff here
cmd = '$INSAR_ZERODOP_BIN/fitoff {} {} 1.5 .5 50 > {}'.format(offsetFile, cullOffsetFile, dumpFile)
print("{}".format(cmd))
runCmd(cmd)
#check number of matching points left
with open(cullOffsetFile, 'r') as ff:
numCullOffsets = sum(1 for linex in ff)
ff.close
if numCullOffsets < 50:
raise Exception('Too few points left after culling, {} left'.format(numCullOffsets))
#cp it to the parent directory
shutil.copy(dumpFile, '../' + dumpFile)
#############################################################################################################
#./match_ampsim.py -m 141018-141123.amp -s 141018-141123.sim -f ampsim_16rlks_16alks.aff -r 16 -a 16
| 32.560484 | 117 | 0.616223 |
acf567ea7bd024724ddad53e3984a9da92d2d451 | 8,246 | py | Python | annodb/management/commands/lib/GeneReview.py | always-waiting/Django-Xromate | 1fb5b4bbdfac9549622c5714971095325f201a96 | [
"MIT"
] | null | null | null | annodb/management/commands/lib/GeneReview.py | always-waiting/Django-Xromate | 1fb5b4bbdfac9549622c5714971095325f201a96 | [
"MIT"
] | null | null | null | annodb/management/commands/lib/GeneReview.py | always-waiting/Django-Xromate | 1fb5b4bbdfac9549622c5714971095325f201a96 | [
"MIT"
] | null | null | null | # encoding: utf-8
import os
import annodb.models as dbmodels
from mongoengine import register_connection
from mongoengine.context_managers import switch_db
import annodb.lib.parser as parser
import re
import urllib3
from threading import Thread
from Queue import Queue
from bs4 import BeautifulSoup
urllib3.disable_warnings()
def importdb(cmdobj, **opt):
"""
导入GeneReview数据库
只导入accession,gene_symbol,description三个字段
"""
register_connection("cmd-import", opt['db'], opt['host'])
url = "ftp.ncbi.nih.gov"
path = "pub/GeneReviews/"
filename = "GRshortname_NBKid_genesymbol_dzname.txt"
GRparser = parser.ParseGeneReview(url, path, filename, opt['debug'], opt['nthread'])
itervalue = iter(GRparser)
if opt['test']:
pass
else:
if opt['debug']: print "删除数据库原有信息"
with switch_db(dbmodels.GeneReview, "cmd-import") as GeneReview:
GeneReview.objects.delete()
for one in itervalue:
gene = GeneReview(**one)
gene.save()
def download_html(cmdobj, **opt):
"""
从NCBI下载需要的HTML
"""
print "开发"
dhtml = GeneReviewDownloadHTML(debug = opt['debug'], path = opt['outdir'], num = opt['nthread'])
dhtml.download_entry()
dhtml.trim_html()
class GeneReviewDownloadHTML(object):
def __init__(self, path = "./genereviewhtml", num = 10, debug=False):
self.urlbook = "https://www.ncbi.nlm.nih.gov/books/NBK1116/";
self.urlbase = "http://www.ncbi.nlm.nih.gov/books/n/gene"
self.http = urllib3.PoolManager()
self.debug = debug
self.path = path
self.queue = Queue()
self.nthread = num
self.thread = []
for i in range(self.nthread):
worker = worker = Thread(target=self._handle_file)
worker.setDaemon(True)
self.thread.append(worker)
self._get_items_url()
def _get_items_url(self):
count = 0;
while True:
if self.debug: print "Try to generate itmes url times %s" % count
try:
tx = self.http.request("GET", self.urlbook)
except Exception,e:
if count< 10:
count += 1
if self.debug:
print "[Error] for get items url\n%s, 再次尝试" % e
else:
raise Exception("[Error] 多次尝试失败,退出")
continue
if tx.status == 200:
if self.debug: print "Download from %s done!" % self.urlbook
soup = BeautifulSoup(tx.data,'lxml')
items = soup.select("ul[id=toc_tllNBK1116_toc_del1p36]")[0].select('li[class=half_rhythm]')
geneitems = [ one['id'] for one in items]
geneitems.pop();geneitems.pop();geneitems.pop();geneitems.pop()
items_url = []
for one in geneitems:
get = re.search("NBK1116_(?:toc_)?(.+)$",one,flags=re.IGNORECASE)
if get:
url_get = "%s/%s" % (self.urlbase,get.group(1))
items_url.append(url_get)
else:
if self.debug: print "Fail to get %s url" % one
self.items_url = items_url
if self.debug: print "Generate items_url done!"
break
else:
if count < 10:
count += 1
if self.debug:
print "[Error] for connect %s of status %s\n再次尝试" % (self.urlbook,tx.status)
continue
else:
raise Exception("[Error] 多次尝试失败,退出")
def download_entry(self):
# 测试目录存在
if not os.path.exists(self.path):
os.makedirs(self.path)
if self.debug:
#fun = [self.items_url[0]]
fun = self.items_url
else:
fun = self.items_url
for url in fun:
count = 0
while True:
if self.debug: print "Begin Downloading %s - times: %s" % (url,count)
try:
tx = self.http.request("GET", url)
except Exception,e:
if count < 10:
count += 1
if self.debug:
print "[Error] for http request of %s,再次尝试" % url
continue
else:
if self.debug: print "多次尝试,退出"
break
if tx.status == 200:
with open("%s/%s" % (self.path, os.path.basename(url)), "w") as f:
f.write(tx.data)
if self.debug: print "Download %s done!" % url
break
else:
if count < 10:
count += 1
if self.debug:
print "[Error] for get entry of %s,再次尝试" % url
continue
else:
if self.debug: print "多次尝试,退出"
break
def trim_html(self):
if not os.path.exists(self.path):
raise Exception("%s not exists. Please run download_entry first" % self.path)
if self.debug:
print "Trim html"
files= os.listdir(self.path)
for filename in files:
if not os.path.isdir(filename):
self.queue.put(filename)
else:
if self.debug: print "%s is not file" % filename
for worker in self.thread:
worker.start()
for worker in self.thread:
worker.join()
self.queue.join()
def _handle_file(self):
while True:
if self.queue.qsize() == 0:
if self.debug: print "队列为空"
break
filename = self.queue.get()
if self.debug: print "Trim file %s" % filename
with open("%s/%s" % (self.path,filename)) as f:
soup = BeautifulSoup(f.read(), 'lxml')
map(lambda x: x.extract(), soup.select('div[class="post-content"]'))
map(lambda x: x.extract(), soup.select('div[class="pre-content"]'))
map(lambda x: x.extract(), soup.select('div[class=top]'))
map(lambda x: x.extract(), soup.select('div[id=rightcolumn]'))
map(lambda x: x.extract(), soup.select('div[id=footer]'))
map(lambda x: x.extract(), soup.findAll('meta'))
map(lambda x: x.extract(), soup.findAll('script'))
map(lambda x: x.extract(), soup.findAll('link'))
head = soup.find('head')
head_add_tag1 = soup.new_tag('link', href="//static.pubmed.gov/portal/portal3rc.fcgi/4098875/css/3852956/3985586/3808861/3734262/3974050/3917732/251717/4098876/14534/45193/4113719/3849091/3984811/3751656/4033350/3840896/3577051/3852958/4008682/3881636/3579733/4062871/12930/3964959/3854974/36029/4052581/9685/3549676/3609192/3609193/3609213/3395586.css", rel="stylesheet", type="text/css")
head_add_tag2 = soup.new_tag('link', href="//static.pubmed.gov/portal/portal3rc.fcgi/4098875/css/3411343/3882866.css", media="print", rel="stylesheet", type="text/css")
head.append(head_add_tag1)
head.append(head_add_tag2)
nbk = soup.select('.meta-content.fm-sec')[0].select("h1")[0]['id'].replace("_","")
if self.debug: print "NBK: %s" % nbk
for img in soup.findAll("img"):
img['src'] = "http://www.ncbi.nlm.nih.gov%s" % img['src']
if img.has_attr("src-large"):
img['src-large'] = "http://www.ncbi.nlm.nih.gov%s" % img['src-large']
for a in soup.findAll('a'):
if not a['href'].startswith("#"):
a['href'] = "http://www.ncbi.nlm.nih.gov%s" % a['href']
with open("%s/%s.html" % (self.path,nbk),"w") as f:
f.write(str(soup))
os.remove("%s/%s" % (self.path,filename))
self.queue.task_done()
| 42.287179 | 405 | 0.517099 |
acf56a03a1baef99979ae069e63dc5de58047ca9 | 152 | py | Python | vina_wrapper/vina_errors.py | bioexcel/virtualscreening | e973958e012e38f99b0c8ed2b798c5e5a7f72b22 | [
"Apache-2.0"
] | 3 | 2020-02-17T11:11:08.000Z | 2021-12-03T18:54:47.000Z | vina_wrapper/vina_errors.py | bioexcel/virtualscreening | e973958e012e38f99b0c8ed2b798c5e5a7f72b22 | [
"Apache-2.0"
] | 1 | 2019-12-05T15:32:50.000Z | 2019-12-10T16:13:08.000Z | vina_wrapper/vina_errors.py | bioexcel/virtualscreening | e973958e012e38f99b0c8ed2b798c5e5a7f72b22 | [
"Apache-2.0"
] | 2 | 2019-09-26T20:21:14.000Z | 2021-07-10T04:37:31.000Z | """Custom AutoDockVina wrapper errors"""
class AutoDockVinaError(Exception):
pass
class MGLToolsConfigurationError(AutoDockVinaError):
pass
| 15.2 | 52 | 0.776316 |
acf56a6609740644ee703b760f8d48371228b799 | 2,886 | py | Python | tests/feature_tests/test_max_feature.py | smogork/TAiO_ImageClassification | 14b2f6e707475b45e936a8ddd4345309aaef78f2 | [
"MIT"
] | null | null | null | tests/feature_tests/test_max_feature.py | smogork/TAiO_ImageClassification | 14b2f6e707475b45e936a8ddd4345309aaef78f2 | [
"MIT"
] | 2 | 2021-10-12T17:45:49.000Z | 2021-12-21T19:23:30.000Z | tests/feature_tests/test_max_feature.py | smogork/TAiO_ImageClassification | 14b2f6e707475b45e936a8ddd4345309aaef78f2 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
"""
Modul zawiera testy klasy MaxFeature
"""
import unittest
from bitmap.bitmap_grayscale import BitmapGrayscale
from feature.simple_features.max_feature import MaxFeature
from tests.bitmap_generator import BitmapGenerator
class TestMaxFeature(unittest.TestCase):
"""
Klasa testująca klase MaxFeature
"""
def setUp(self):
self.feature = MaxFeature()
def count_feature(self, bitmap: BitmapGrayscale) -> float:
"""
Prawidłowow wylicza wartośc feature
:param bitmap: Bitmapa, dla której wyliczamy feature
:return: Wyliczony feature
"""
self.feature.prepare(bitmap)
return self.feature.calculate()
def test_reorder_calculate_prepare(self):
"""
Test sprawdza, czy wywołanie w złej kolejności metody prepare i calculate zgłaszaja wyjątek.
Oczekujemy zgłoszenia wyjątku RuntimeError.
:return:
"""
with self.assertRaises(RuntimeError):
self.feature.calculate()
def test_non_white_plain(self):
"""
Dostarczamy bitmapę wypełniona jednym nie białym kolorem.
Oczekujemy wyniku ilości komórek -1.
:return:
"""
color = 0.5
size = 3
bitmap = BitmapGenerator.plain_color(size, size, color)
res = self.count_feature(bitmap)
self.assertIs(size * size - 1, res)
def test_white_plain(self):
"""
Dostarczamy bitmapę wypełniona tylko białym kolorem.
Oczekujemy Liczby -1 jako informacji o całej białej bitmapie.
:return:
"""
size = 3
bitmap = BitmapGenerator.plain_white(size, size)
res = self.count_feature(bitmap)
self.assertIs(-1, res)
def test_black_dot(self):
"""
Dostarczamy bitmape całkowicie białą, z wyjątkiem jednego czrnego pixela.
oczekujemy Numeru tego pixela.
:return:
"""
size = 5
x = 4
y = 1
bitmap = BitmapGenerator.plain_white(size, size)
bitmap.set_cell_value(x, y, BitmapGrayscale.Black)# czarna kropka
res = self.count_feature(bitmap)
self.assertIs(res, size * x + y)
def test_black_dot_multiple_times(self):
"""
Dostarczamy bitmape całkowicie białą, z wyjątkiem jednego czrnego pixela.
oczekujemy Numeru tego pixela.
TEst uruchamiamy 3 razy
:return:
"""
size = 5
x = 4
y = 1
bitmap = BitmapGenerator.plain_white(size, size)
bitmap.set_cell_value(x, y, BitmapGrayscale.Black)# czarna kropka
res1 = self.count_feature(bitmap)
res2 = self.count_feature(bitmap)
res3 = self.count_feature(bitmap)
self.assertIs(res1, size * x + y)
self.assertIs(res2, size * x + y)
self.assertIs(res3, size * x + y)
| 27.75 | 100 | 0.626819 |
acf56a766f88f7bc045b9e28dedc8e7c5b19bed2 | 349 | py | Python | NoRoads_WebApp/runserver.py | atr2600/NoRoads_WebApp | 73a3190f872e272253b96b739459696178510935 | [
"MIT"
] | null | null | null | NoRoads_WebApp/runserver.py | atr2600/NoRoads_WebApp | 73a3190f872e272253b96b739459696178510935 | [
"MIT"
] | null | null | null | NoRoads_WebApp/runserver.py | atr2600/NoRoads_WebApp | 73a3190f872e272253b96b739459696178510935 | [
"MIT"
] | null | null | null | """
This script runs the NoRoads_WebApp application using a development server.
"""
from os import environ
from NoRoads_WebApp import app
if __name__ == '__main__':
HOST = environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
app.run(HOST, PORT)
| 23.266667 | 75 | 0.676218 |
acf56acdab86dd65ff366827727df3f80304a9c2 | 2,503 | py | Python | rcnn/core/callback.py | chengdazhi/Flow-Guided-Feature-Aggregation | 63cdffb3d4837d87fb590530b1d16dd59ccb292b | [
"MIT"
] | 1 | 2019-06-19T09:23:41.000Z | 2019-06-19T09:23:41.000Z | rcnn/core/callback.py | chengdazhi/Flow-Guided-Feature-Aggregation | 63cdffb3d4837d87fb590530b1d16dd59ccb292b | [
"MIT"
] | null | null | null | rcnn/core/callback.py | chengdazhi/Flow-Guided-Feature-Aggregation | 63cdffb3d4837d87fb590530b1d16dd59ccb292b | [
"MIT"
] | null | null | null | # Based on:
# MX-RCNN
# Copyright (c) 2016 by Contributors
# Licence under The Apache 2.0 License
# https://github.com/ijkguo/mx-rcnn/
# --------------------------------------------------------
import time
import logging
import mxnet as mx
class Speedometer(object):
def __init__(self, batch_size, frequent=50):
self.batch_size = batch_size
self.frequent = frequent
self.init = False
self.tic = 0
self.last_count = 0
def __call__(self, param):
"""Callback to Show speed."""
count = param.nbatch
if self.last_count > count:
self.init = False
self.last_count = count
if self.init:
if count % self.frequent == 0:
speed = self.frequent * self.batch_size / (time.time() - self.tic)
s = ''
if param.eval_metric is not None:
name, value = param.eval_metric.get()
s = "Epoch[%d] Batch [%d]\tSpeed: %.2f samples/sec\tTrain-" % (param.epoch, count, speed)
for n, v in zip(name, value):
s += "%s=%f,\t" % (n, v)
else:
s = "Iter[%d] Batch [%d]\tSpeed: %.2f samples/sec" % (param.epoch, count, speed)
logging.info(s)
print(s)
self.tic = time.time()
else:
self.init = True
self.tic = time.time()
class PhillyProgressCallback(object):
def __init__(self, total_iter, frequent=50):
self.total_iter = total_iter
self.frequent = frequent
self.cur_iter = 0
def __call__(self, param):
if self.cur_iter % self.frequent == 0:
print('\nPROGRESS: {:.2f}%\n'.format(100.0 * self.cur_iter / self.total_iter))
self.cur_iter += 1
def do_checkpoint(prefix, means, stds):
def _callback(iter_no, sym, arg, aux):
weight = arg['bbox_pred_weight']
bias = arg['bbox_pred_bias']
repeat = bias.shape[0] / means.shape[0]
arg['bbox_pred_weight_test'] = weight * mx.nd.repeat(mx.nd.array(stds), repeats=repeat).reshape((bias.shape[0], 1, 1, 1))
arg['bbox_pred_bias_test'] = arg['bbox_pred_bias'] * mx.nd.repeat(mx.nd.array(stds), repeats=repeat) + mx.nd.repeat(mx.nd.array(means), repeats=repeat)
mx.model.save_checkpoint(prefix, iter_no + 1, sym, arg, aux)
arg.pop('bbox_pred_weight_test')
arg.pop('bbox_pred_bias_test')
return _callback | 36.275362 | 159 | 0.556133 |
acf56b4f3b29e41256eb43b7456801d9fcf8b066 | 1,214 | py | Python | features/environment.py | Devops-2020-Products/products | 3d02bbba94e3ea1134d64cba42de759a4636f852 | [
"Apache-2.0"
] | 1 | 2021-05-16T15:29:45.000Z | 2021-05-16T15:29:45.000Z | features/environment.py | Devops-2020-Products/products | 3d02bbba94e3ea1134d64cba42de759a4636f852 | [
"Apache-2.0"
] | 120 | 2020-10-02T23:42:42.000Z | 2020-12-09T08:02:01.000Z | features/environment.py | Devops-2020-Products/products | 3d02bbba94e3ea1134d64cba42de759a4636f852 | [
"Apache-2.0"
] | 12 | 2020-09-27T21:49:30.000Z | 2021-07-05T22:26:52.000Z | """
Environment for Behave Testing
"""
from os import getenv
from selenium import webdriver
WAIT_SECONDS = int(getenv('WAIT_SECONDS', '60'))
BASE_URL = getenv('BASE_URL', 'http://localhost:5000')
def before_all(context):
""" Executed once before all tests """
options = webdriver.ChromeOptions()
options.add_argument("start-maximized") # open Browser in maximized mode
options.add_argument("disable-infobars") # disabling infobars
options.add_argument("--disable-extensions") # disabling extensions
options.add_argument("--disable-gpu") # applicable to windows os only
options.add_argument("--disable-dev-shm-usage") # overcome limited resource problems
options.add_argument("--no-sandbox") # Bypass OS security model
options.add_argument("--headless")
context.driver = webdriver.Chrome(options=options)
context.driver.implicitly_wait(WAIT_SECONDS) # seconds
# context.driver.set_window_size(1200, 600)
context.base_url = BASE_URL
# -- SET LOG LEVEL: behave --logging-level=ERROR ...
# on behave command-line or in "behave.ini"
context.config.setup_logging()
def after_all(context):
""" Executed after all tests """
context.driver.quit() | 37.9375 | 88 | 0.724876 |
acf56b6c87ca2f0e4c97b1c6ff7b269504d8bf4f | 29 | py | Python | main.py | carlm125/test | 1dfd0816adcd2fe78ad05dc50a78637fc8bd61b6 | [
"MIT"
] | null | null | null | main.py | carlm125/test | 1dfd0816adcd2fe78ad05dc50a78637fc8bd61b6 | [
"MIT"
] | null | null | null | main.py | carlm125/test | 1dfd0816adcd2fe78ad05dc50a78637fc8bd61b6 | [
"MIT"
] | null | null | null | import login_gui
| 2.071429 | 16 | 0.517241 |
acf56c361d4f52d998528258d6219544caa98d25 | 1,604 | py | Python | nebula/cli/aws.py | nptato/nebula | 74791df20c73add31a29ecdabcde71e2dd2a27b7 | [
"CC-BY-2.0"
] | 31 | 2018-04-17T04:14:19.000Z | 2021-10-04T16:03:38.000Z | nebula/cli/aws.py | nptato/nebula | 74791df20c73add31a29ecdabcde71e2dd2a27b7 | [
"CC-BY-2.0"
] | 7 | 2019-06-26T18:57:21.000Z | 2021-03-16T07:10:09.000Z | nebula/cli/aws.py | nptato/nebula | 74791df20c73add31a29ecdabcde71e2dd2a27b7 | [
"CC-BY-2.0"
] | 8 | 2018-04-17T04:14:25.000Z | 2022-02-12T05:48:18.000Z | import boto3
import os, sys
from datetime import datetime, timedelta
import subprocess
import click
from flask import Flask, session, redirect, url_for, escape, request, render_template, flash, send_from_directory
from nebula import app
from nebula.services.cache import cache
from nebula.services import aws
from time import time
@app.cli.command()
def list_instance_types():
print(aws.get_instance_types())
@app.cli.command()
def shutdown_expired_instances():
aws.shutdown_expired_instances()
@app.cli.command()
@click.argument('profile_id')
def get_profile_ami(profile_id):
print(aws.get_ami_from_profile(profile_id))
@app.cli.command()
@click.argument('settings_path')
@click.argument('secret_name')
def config_to_aws_sm(settings_path, secret_name):
if not os.path.isfile(settings_path):
print('Unable to find settings file at %s' % (settings_path))
sys.exit(1)
with open(settings_path, 'r') as stream:
config_string = stream.read()
set_secret(secret_name, config_string)
def set_secret(secret_name, secret_string):
client = boto3.client(
service_name='secretsmanager',
)
try:
response = client.create_secret(
Name=secret_name,
Description='Nebula Configuration File',
SecretString=secret_string,
Tags=[
{'Key': 'nebula', 'Value': 'true'}
]
)
except client.exceptions.ResourceExistsException:
response = client.put_secret_value(
SecretId=secret_name,
SecretString=secret_string,
)
| 26.733333 | 113 | 0.692643 |
acf56cf458f496d1ac7ca829f5d416c1b7a79c95 | 848 | py | Python | app/user/views.py | matthew-tozzi47/recipe-app-api | 5f8ceb87b2d9bf2fb690ab7247bb2439bdb8647f | [
"MIT"
] | null | null | null | app/user/views.py | matthew-tozzi47/recipe-app-api | 5f8ceb87b2d9bf2fb690ab7247bb2439bdb8647f | [
"MIT"
] | null | null | null | app/user/views.py | matthew-tozzi47/recipe-app-api | 5f8ceb87b2d9bf2fb690ab7247bb2439bdb8647f | [
"MIT"
] | null | null | null | from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from user.serializers import UserSerializer, AuthTokenSerializer
class CreateUserView(generics.CreateAPIView):
"""Create a new user in the system"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""dfsfsdf"""
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
"""Manage the authenticated user"""
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
return self.request.user
| 31.407407 | 66 | 0.798349 |
acf56de977c7b6e30ed8332618a89b64c42675c9 | 6,914 | py | Python | other/old/client.py | georgymh/decentralized-ml | 117f998657a0a22c5c865814551082a40da00596 | [
"Apache-2.0"
] | 11 | 2018-09-04T04:36:33.000Z | 2021-07-14T13:11:35.000Z | other/old/client.py | georgymh/decentralized-ml | 117f998657a0a22c5c865814551082a40da00596 | [
"Apache-2.0"
] | 30 | 2018-07-12T16:44:21.000Z | 2019-01-23T23:36:59.000Z | other/old/client.py | georgymh/decentralized-ml | 117f998657a0a22c5c865814551082a40da00596 | [
"Apache-2.0"
] | 4 | 2018-12-18T17:11:41.000Z | 2021-07-14T13:11:42.000Z | import logging
import pickle
import shutil
import random
import numpy as np
import tensorflow as tf
import keras
from models.perceptron import Perceptron
from models.cnn import CNN
from models.lstm import LSTMModel
from models.keras_perceptron import KerasPerceptron
logging.basicConfig(level=logging.DEBUG,
format='[Client] %(asctime)s %(levelname)s %(message)s')
class TensorflowClient(object):
def __init__(self, iden, X, y):
self.iden = iden
self.X = X
self.y = y
# TODO: Should be randomized.
cut_off = int(X.shape[0] * 0.8)
self.X_train = X[:cut_off]
self.y_train = y[:cut_off]
self.X_test = X[cut_off:]
self.y_test = y[cut_off:]
def setup_model(self, model_type):
self.model_type = model_type
if model_type == "perceptron":
self.model = Perceptron()
elif model_type == "cnn-mnist":
self.model = CNN()
elif model_type == "cnn-cifar10":
self.model = CNN()
else:
raise ValueError("Model {0} not supported.".format(model_type))
def train(self, weights, config):
logging.info('Training just started.')
assert weights != None, 'weights must not be None.'
assert config["averaging_type"] in ["data_size", "val_acc"]
if config["averaging_type"] == "data_size":
X, y = self.X, self.y
elif config["averaging_type"] == "val_acc":
X, y = self.X_train, self.y_train
batch_size = X.shape[0] \
if config["batch_size"] == -1 else config["batch_size"]
epochs = config["epochs"]
learning_rate = config["learning_rate"]
params = {'new_weights': weights, 'learning_rate': learning_rate}
classifier = tf.estimator.Estimator(
model_fn=self.model.get_model,
model_dir=self.get_checkpoints_folder(),
params = params
)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": X},
y=y,
batch_size=batch_size,
num_epochs=epochs,
shuffle=True
)
classifier.train(input_fn=train_input_fn)
logging.info('Training complete.')
new_weights = self.model.get_weights(self.get_latest_checkpoint())
if config["averaging_type"] == "data_size":
omega = X.shape[0]
elif config["averaging_type"] == "val_acc":
eval_classifier = tf.estimator.Estimator(
model_fn=self.model.get_model,
model_dir=self.get_checkpoints_folder(),
params = {'new_weights': new_weights}
)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": self.X_test},
y=self.y_test,
num_epochs=1,
shuffle=False
)
eval_results = eval_classifier.evaluate(input_fn=eval_input_fn)
omega = eval_results["accuracy"]
shutil.rmtree("./checkpoints-{0}/".format(self.iden))
return new_weights, omega
def validate(self, t, weights, config):
# check if this is needed
model_type = config["model_type"]
self.setup_model(model_type)
classifier = tf.estimator.Estimator(
model_fn=self.model.get_model,
model_dir=self.get_checkpoints_folder(),
params = {'new_weights': weights, 'learning_rate': 0.0}
)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": self.X},
y=self.y,
batch_size=1,
num_epochs=None,
shuffle=False
)
classifier.train(
input_fn=train_input_fn,
steps=1
)
metagraph_file = self.get_checkpoints_folder() + '.meta'
self.model.load_weights(weights, self.get_latest_checkpoint(),
self.get_checkpoints_folder())
logging.info('Main model updated.')
self.setup_model(model_type)
classifier = tf.estimator.Estimator(
model_fn=self.model.get_model,
model_dir=self.get_checkpoints_folder(),
params = {'new_weights': weights}
)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": self.X},
y=self.y,
num_epochs=1,
shuffle=False
)
eval_results = classifier.evaluate(input_fn=eval_input_fn)
logging.info("[Round {0}] Validation results: {1}".format(t, eval_results))
return eval_results
def get_initial_weights(self, model_type):
tf.reset_default_graph()
if model_type == "perceptron":
m = Perceptron()
inputs = tf.placeholder(tf.float32, shape=(None, 28*28))
_ = m.get_model(features={"x": inputs}, labels=None, mode='predict', params=None)
elif model_type == 'cnn-mnist':
m = CNN()
inputs = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
_ = m.get_model(features={"x": inputs}, labels=None, mode='predict', params=None)
elif model_type == 'cnn-cifar10':
m = CNN()
inputs = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
_ = m.get_model(features={"x": inputs}, labels=None, mode='predict', params=None)
else:
raise ValueError("Model {model_type} not supported.".format(model_type))
with tf.Session().as_default() as sess:
sess.run(tf.global_variables_initializer())
collection = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
weights = {tensor.name:sess.run(tensor) for tensor in collection}
tf.reset_default_graph()
return weights
def get_checkpoints_folder(self):
return "./checkpoints-{0}/{1}/".format(self.iden, self.model_type)
def get_latest_checkpoint(self):
return tf.train.latest_checkpoint(self.get_checkpoints_folder())
class KerasClient(object):
def __init__(self, iden, X, y):
self.iden = iden
self.X = X
self.y = y
def setup_model(self, model_type):
self.model_type = model_type
if model_type == "lstm":
self.model = LSTMModel()
else:
raise ValueError("Model {0} not supported.".format(model_type))
def train(self, weights, config):
logging.info('Training just started.')
assert weights != None, 'weights must not be None.'
self.model.set_weights(weights)
self.model.train(X)
logging.info('Training complete.')
new_weights = self.model.get_weights()
return new_weights, len(X)
def validate(self, t, weights, config):
# TODO: Need to implement Keras validation.
pass
def get_initial_weights(self):
return self.model.get_initial_weights()
| 35.639175 | 93 | 0.595748 |
acf56f0624ed303c83267bb1da5342e88fea6718 | 1,041 | py | Python | app/logger.py | TomoProg/karuta-bot | a7f86f4dfd68fb3c9f191408fe9e5ddeeca408cf | [
"MIT"
] | null | null | null | app/logger.py | TomoProg/karuta-bot | a7f86f4dfd68fb3c9f191408fe9e5ddeeca408cf | [
"MIT"
] | 4 | 2017-09-11T15:44:42.000Z | 2020-07-20T15:55:12.000Z | app/logger.py | TomoProg/karuta-bot | a7f86f4dfd68fb3c9f191408fe9e5ddeeca408cf | [
"MIT"
] | null | null | null | #-*- coding:utf-8 -*-
import datetime
import inspect
class Logger():
"""
Content:
ロガー
"""
def __init__(self, filepath):
"""
Content:
コンストラクタ
Param:
1. filepath: ファイルパス
"""
self.__filepath = filepath
def write(self, msg):
"""
Content:
書き込みメソッド
Param:
1. msg: 出力メッセージ
"""
# スタックフレーム解析
stack_frame = inspect.stack()[1]
frame = stack_frame[0]
info = inspect.getframeinfo(frame)
# ログファイル内容作成
linetxt = ""
linetxt += datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S ")
linetxt += (info.filename + " ")
linetxt += (str(info.lineno) + " ")
linetxt += msg
linetxt += "\n"
# ログファイルに書き込む
with open(self.__filepath, "a") as f:
f.write(linetxt)
if __name__ == "__main__":
logger = Logger("abc.txt")
logger.write("sample msg")
| 21.6875 | 73 | 0.463016 |
acf56f467bd3685fe22afa594f5649e641383ffb | 1,549 | py | Python | parse.py | RenolY2/scg-modeldump | 1069eaaac93f283e8d76b754da57f699d86a14fb | [
"MIT"
] | 3 | 2020-09-20T12:03:11.000Z | 2022-01-09T00:51:13.000Z | parse.py | RenolY2/scg-modeldump | 1069eaaac93f283e8d76b754da57f699d86a14fb | [
"MIT"
] | null | null | null | parse.py | RenolY2/scg-modeldump | 1069eaaac93f283e8d76b754da57f699d86a14fb | [
"MIT"
] | null | null | null | def read_next_notcomment(f):
line = f.readline()
if not line:
return None
else:
line = line.strip()
if not line or line.startswith(";") or line.startswith("//"):
return read_next_notcomment(f)
else:
return line
def read_material(f):
name = read_next_notcomment(f)
print(name)
if name is None:
return None, None
openBracket = read_next_notcomment(f)
assert openBracket == "{"
line = read_next_notcomment(f)
params = {}
indent_level = 0
while line != "}" or (line == "}" and indent_level > 0):
if line == "{":
indent_level += 1
elif line == "}":
indent_level -= 1
else:
data = line.split(maxsplit=1)
if len(data) > 0:
if len(data) == 1:
param = data[0]
val = None
else:
param, val = data
params[param] = val
line = read_next_notcomment(f)
if line is None:
break
return name, params
def read_material_file(f):
materials = {}
mat = read_material(f)
while mat[0] is not None:
name, data = mat
materials[name] = data
mat = read_material(f)
return materials
if __name__ == "__main__":
import json
with open("3dMaterials.nsa", "r") as f:
with open("3dmaterials.json", "w") as g:
json.dump(read_material_file(f), g, indent=4) | 26.706897 | 69 | 0.509361 |
acf56fc9d30e2f052190cc71d8fdd20a3444c60a | 924 | py | Python | python3-47/python3-47.py | t7y/python3 | ea3755cb5f41f60587bda379feea9e5f4b8c111b | [
"Apache-2.0"
] | 25 | 2020-10-29T21:07:00.000Z | 2022-03-22T08:20:34.000Z | python3-47/python3-47.py | t7y/python3 | ea3755cb5f41f60587bda379feea9e5f4b8c111b | [
"Apache-2.0"
] | null | null | null | python3-47/python3-47.py | t7y/python3 | ea3755cb5f41f60587bda379feea9e5f4b8c111b | [
"Apache-2.0"
] | 14 | 2020-12-03T00:42:11.000Z | 2022-03-21T18:03:49.000Z | #Daemon threads
#Quitting when we quit the app
#Imports
import logging
import threading
from threading import Thread, Timer
import time
#Test functions
def test():
threadname = threading.current_thread().name
logging.info(f'Starting: {threadname}')
for x in range(60):
logging.info(f'Working: {threadname}')
time.sleep(1)
logging.info(f'Finished: {threadname}')
def stop():
logging.info('Exiting the application')
exit(0)
#Main function
def main():
logging.basicConfig(format='%(levelname)s - %(asctime)s.%(msecs)03d: %(message)s',datefmt='%H:%M:%S', level=logging.DEBUG)
logging.info('Main thread Started')
#Stop in 3 seconds
timer = Timer(3,stop)
timer.start()
#Run a thread
#t = Thread(target=test,daemon=False)
t = Thread(target=test,daemon=True)
t.start()
logging.info('Main thread Finished')
if __name__ == "__main__":
main() | 22.536585 | 126 | 0.668831 |
acf5706478326928cc42fe75c523fcf112ed7116 | 699 | py | Python | 2022-04-11 Homework.py | ryankwondev/hanahs_programming | a44bca6eee5eda6d8cfbf3c231c3eea70823f8cf | [
"MIT"
] | null | null | null | 2022-04-11 Homework.py | ryankwondev/hanahs_programming | a44bca6eee5eda6d8cfbf3c231c3eea70823f8cf | [
"MIT"
] | null | null | null | 2022-04-11 Homework.py | ryankwondev/hanahs_programming | a44bca6eee5eda6d8cfbf3c231c3eea70823f8cf | [
"MIT"
] | null | null | null | # 권동한_LAB3-2
weekend = ['토', '일']
w = input('무슨 요일? ')
if w in weekend:
print("주말")
else:
print("평일")
# 권동한_HW1
x = float(input("x값을 입력하세요 : "))
if x <= 0:
sol = x ** 3 - 9 * x + 2
else:
sol = 7 * x + 2
print("f(x) = ", sol)
# 권동한_HW2
n1 = int(input("첫 번째 숫자를 입력하세요 : "))
n2 = int(input("두 번째 숫자를 입력하세요 : "))
n3 = int(input("세 번째 숫자를 입력하세요 : "))
min = 0
min = n1
if n2 < min:
min = n2
if n3 < min:
min = n3
print("가장 작은 숫자는", min, "입니다.")
# 권동한_HW3
season = int(input("값을 입력하세요 : "))
if season == 1:
print("봄 입니다.")
elif season == 2:
print("여름 입니다.")
elif season == 3:
print("가을 입니다.")
elif season == 4:
print("겨을 입니다.")
else:
print("계절")
| 12.944444 | 36 | 0.51073 |
acf570902e0e07bddfe0c624d4dab4b2bf8b4240 | 503 | py | Python | PyQuM/ver(0.0)/QuApp/app.py | takehuge/PYQUM | bfc9d9b1c2f4246c7aac3a371baaf587c99f8069 | [
"MIT"
] | null | null | null | PyQuM/ver(0.0)/QuApp/app.py | takehuge/PYQUM | bfc9d9b1c2f4246c7aac3a371baaf587c99f8069 | [
"MIT"
] | null | null | null | PyQuM/ver(0.0)/QuApp/app.py | takehuge/PYQUM | bfc9d9b1c2f4246c7aac3a371baaf587c99f8069 | [
"MIT"
] | null | null | null | from flask import Flask
from QuApp.views.index import bp as index_bp
from QuApp.views.characterizations import bp as characterization_bp
from QuApp.views.experiments import bp as experiments_bp
from QuApp.views.calibration import bp as calib_bp
from QuApp.views.schemes import bp as schemes_bp
app = Flask(__name__)
app.register_blueprint(index_bp)
app.register_blueprint(characterization_bp)
app.register_blueprint(experiments_bp)
app.register_blueprint(calib_bp)
app.register_blueprint(schemes_bp)
| 31.4375 | 67 | 0.854871 |
acf570d6ec999e3336a4c6e74ae115fb85bb77d1 | 532 | py | Python | weather/models/last_day.py | bomzheg/Weather | cd0e300c581f61f23ef5d927f36e6a117c6fc208 | [
"MIT"
] | 1 | 2020-10-25T12:54:03.000Z | 2020-10-25T12:54:03.000Z | weather/models/last_day.py | bomzheg/WeatherDiary | cd0e300c581f61f23ef5d927f36e6a117c6fc208 | [
"MIT"
] | null | null | null | weather/models/last_day.py | bomzheg/WeatherDiary | cd0e300c581f61f23ef5d927f36e6a117c6fc208 | [
"MIT"
] | null | null | null | import datetime
from sqlite3 import Connection
SQL_LAST_DAY = """
SELECT
date
FROM weather
ORDER BY date DESC
LIMIT 1
"""
def get_last_day(conn: Connection) -> datetime.date:
cur = conn.cursor()
cur.execute(SQL_LAST_DAY)
last_date = cur.fetchone()[0]
cur.close()
return datetime.date.fromordinal(last_date)
if __name__ == "__main__":
import sqlite3
with sqlite3.connect(r"c:\Users\Public\Python\WeatherDiary.db") as connect:
d = get_last_day(connect)
print(d)
| 19.703704 | 79 | 0.667293 |
acf570f8b847859ae237e84abf01706ffd8072fa | 1,996 | py | Python | kiosk/forms.py | AndiBr/ffksk | ff4bc4ad26d4571eaa1a6ff815b2e6a876f8ba99 | [
"MIT"
] | null | null | null | kiosk/forms.py | AndiBr/ffksk | ff4bc4ad26d4571eaa1a6ff815b2e6a876f8ba99 | [
"MIT"
] | 14 | 2018-09-12T06:59:55.000Z | 2020-02-26T07:17:48.000Z | kiosk/forms.py | AndiBr/ffksk | ff4bc4ad26d4571eaa1a6ff815b2e6a876f8ba99 | [
"MIT"
] | null | null | null | from django import forms
from django.core.validators import MinValueValidator
from . import models
class Kontakt_Nachricht_Form(forms.ModelForm):
class Meta:
model = models.Kontakt_Nachricht
exclude = ('beantwortet',)
widgets= {
'betreff': forms.Textarea(attrs={'rows':1,})
}
class TransaktionenForm(forms.Form):
idFrom = forms.IntegerField(
widget=forms.TextInput(attrs={'readonly':'true'})
)
idTo = forms.IntegerField(
widget=forms.TextInput(attrs={'readonly':'true'})
)
betrag = forms.DecimalField(decimal_places=2, max_digits=10, validators=[MinValueValidator(0)])
kommentar = forms.CharField(widget=forms.Textarea, max_length=500)
class EinzahlungenForm(forms.Form):
CHOICES = [('Einzahlung','Einzahlung'),('Auszahlung','Auszahlung')]
idUser = forms.IntegerField(
widget=forms.TextInput(attrs={'readonly':'true'})
)
typ = forms.ChoiceField(choices=CHOICES, widget=forms.RadioSelect())
betrag = forms.DecimalField(decimal_places=2, max_digits=10, validators=[MinValueValidator(0)])
kommentar = forms.CharField(widget=forms.Textarea, max_length=500,required=False)
class RueckbuchungForm(forms.Form):
kaeufer_id = forms.IntegerField(
widget=forms.TextInput(attrs={'readonly':'true'})
)
produkt_id = forms.IntegerField(
widget=forms.TextInput(attrs={'readonly':'true'})
)
produkt_name = forms.CharField(
widget=forms.TextInput(attrs={'readonly':'true'})
)
anzahl_gekauft = forms.IntegerField(
widget=forms.TextInput(attrs={'readonly':'true'})
)
anzahl_zurueck = forms.IntegerField(required=False, validators=[MinValueValidator(0)])
# Clean "anzahl_zurueck" so that no values have to be filled in and they are set to default with zero
def clean_anzahl_zurueck(self):
data = self.cleaned_data['anzahl_zurueck']
anzahl_gekauft = self.cleaned_data['anzahl_gekauft']
if not data:
data = 0
if data > anzahl_gekauft:
raise forms.ValidationError('Der Wert muss kleiner gleich '+str(anzahl_gekauft)+' sein.')
return data
| 29.791045 | 102 | 0.749499 |
acf571b6be6e728c024b885d03a2a2f92287ffe5 | 137 | py | Python | __init__.py | Cicadadenis/999 | f1de12723c89d77fc4e020ba9343289665330776 | [
"MIT"
] | null | null | null | __init__.py | Cicadadenis/999 | f1de12723c89d77fc4e020ba9343289665330776 | [
"MIT"
] | null | null | null | __init__.py | Cicadadenis/999 | f1de12723c89d77fc4e020ba9343289665330776 | [
"MIT"
] | null | null | null | git init
git add .
git commit -m 'first relise'
heroku create 'cicada'
git remote -v
git push heroku main
heroku ps:scale worker=1
| 9.785714 | 28 | 0.722628 |
acf571f66231085d16bc408ec990f0b79e887572 | 294 | py | Python | dumbo_contact_point_estimation_demo/setup.py | fevb/dumbo_apps | b177dc88fdf489d635416d4a4ce77fdd11c162ee | [
"BSD-3-Clause"
] | null | null | null | dumbo_contact_point_estimation_demo/setup.py | fevb/dumbo_apps | b177dc88fdf489d635416d4a4ce77fdd11c162ee | [
"BSD-3-Clause"
] | null | null | null | dumbo_contact_point_estimation_demo/setup.py | fevb/dumbo_apps | b177dc88fdf489d635416d4a4ce77fdd11c162ee | [
"BSD-3-Clause"
] | 1 | 2021-10-06T07:55:43.000Z | 2021-10-06T07:55:43.000Z | ## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
d = generate_distutils_setup(
scripts=['scripts/dumbo_contact_point_estimation_demo']
)
setup(**d)
| 24.5 | 61 | 0.79932 |
acf5726b9dc57dc7034603ec45b6301fe1ea4e40 | 6,105 | py | Python | utils/google_utils.py | Andrew3927/Fire-Smoke-Detection | 4bddbc0e43159483dbd27f222b90a23d1f19fb7f | [
"MIT"
] | 1 | 2022-03-04T15:36:49.000Z | 2022-03-04T15:36:49.000Z | utils/google_utils.py | Andrew3927/Fire-Smoke-Detection | 4bddbc0e43159483dbd27f222b90a23d1f19fb7f | [
"MIT"
] | null | null | null | utils/google_utils.py | Andrew3927/Fire-Smoke-Detection | 4bddbc0e43159483dbd27f222b90a23d1f19fb7f | [
"MIT"
] | 3 | 2022-03-04T03:38:54.000Z | 2022-03-11T02:58:06.000Z | # Google utils: https://cloud.google.com/storage/docs/reference/libraries
import os
import platform
import subprocess
import time
import urllib
from pathlib import Path
import requests
import torch
def gsutil_getsize(url=''):
# gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du
s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8')
return eval(s.split(' ')[0]) if len(s) else 0 # bytes
def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''):
# Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes
file = Path(file)
assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}"
try: # url1
print(f'Downloading {url} to {file}...')
torch.hub.download_url_to_file(url, str(file))
assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check
except Exception as e: # url2
file.unlink(missing_ok=True) # remove partial downloads
print(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...')
os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail
finally:
if not file.exists() or file.stat().st_size < min_bytes: # check
file.unlink(missing_ok=True) # remove partial downloads
print(f"ERROR: {assert_msg}\n{error_msg}")
print('')
def attempt_download(file, repo='ultralytics/yolov5'): # from utils.google_utils import *; attempt_download()
# Attempt file download if does not exist
file = Path(str(file).strip().replace("'", ''))
if not file.exists():
# URL specified
name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc.
if str(file).startswith(('http:/', 'https:/')): # download
url = str(file).replace(':/', '://') # Pathlib turns :// -> :/
name = name.split('?')[0] # parse authentication https://url.com/file.txt?auth...
safe_download(file=name, url=url, min_bytes=1E5)
return name
# GitHub assets
file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required)
try:
response = requests.get(f'https://api.github.com/repos/{repo}/releases/v5.0').json() # github api
assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...]
tag = response['tag_name'] # i.e. 'v5.0'
except: # fallback plan
assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt',
'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt']
try:
tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1]
except:
tag = 'v5.0' # current release
if name in assets:
safe_download(file,
url=f'https://github.com/{repo}/releases/download/{tag}/{name}',
# url2=f'https://storage.googleapis.com/{repo}/ckpt/{name}', # backup url (optional)
min_bytes=1E5,
error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/')
return str(file)
def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'):
# Downloads a file from Google Drive. from yolov5.utils.google_utils import *; gdrive_download()
t = time.time()
file = Path(file)
cookie = Path('cookie') # gdrive cookie
print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='')
file.unlink(missing_ok=True) # remove existing file
cookie.unlink(missing_ok=True) # remove existing cookie
# Attempt file download
out = "NUL" if platform.system() == "Windows" else "/dev/null"
os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}')
if os.path.exists('cookie'): # large file
s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}'
else: # small file
s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"'
r = os.system(s) # execute, capture return
cookie.unlink(missing_ok=True) # remove existing cookie
# Error check
if r != 0:
file.unlink(missing_ok=True) # remove partial
print('Download error ') # raise Exception('Download error')
return r
# Unzip if archive
if file.suffix == '.zip':
print('unzipping... ', end='')
os.system(f'unzip -q {file}') # unzip
file.unlink() # remove zip to free space
print(f'Done ({time.time() - t:.1f}s)')
return r
def get_token(cookie="./cookie"):
with open(cookie) as f:
for line in f:
if "download" in line:
return line.split()[-1]
return ""
# def upload_blob(bucket_name, source_file_name, destination_blob_name):
# # Uploads a file to a bucket
# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
#
# storage_client = storage.Client()
# bucket = storage_client.get_bucket(bucket_name)
# blob = bucket.blob(destination_blob_name)
#
# blob.upload_from_filename(source_file_name)
#
# print('File {} uploaded to {}.'.format(
# source_file_name,
# destination_blob_name))
#
#
# def download_blob(bucket_name, source_blob_name, destination_file_name):
# # Uploads a blob from a bucket
# storage_client = storage.Client()
# bucket = storage_client.get_bucket(bucket_name)
# blob = bucket.blob(source_blob_name)
#
# blob.download_to_filename(destination_file_name)
#
# print('Blob {} downloaded to {}.'.format(
# source_blob_name,
# destination_file_name))
| 42.395833 | 119 | 0.607043 |
acf572fe9204c2f2d53195152bd26eeb216591a2 | 411 | py | Python | register_app/migrations/0010_alter_grades_grade.py | arcziwal/e-school-register | 2ef821a5d129d75b0fb599900770fe3d90d727bb | [
"MIT"
] | null | null | null | register_app/migrations/0010_alter_grades_grade.py | arcziwal/e-school-register | 2ef821a5d129d75b0fb599900770fe3d90d727bb | [
"MIT"
] | null | null | null | register_app/migrations/0010_alter_grades_grade.py | arcziwal/e-school-register | 2ef821a5d129d75b0fb599900770fe3d90d727bb | [
"MIT"
] | null | null | null | # Generated by Django 4.0.3 on 2022-04-23 01:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('register_app', '0009_alter_schoolclass_name_of_class_and_more'),
]
operations = [
migrations.AlterField(
model_name='grades',
name='grade',
field=models.CharField(max_length=2),
),
]
| 21.631579 | 74 | 0.622871 |
acf5730ef6ea15a3ac608c80eb5fed76e8004266 | 602 | py | Python | docker/aws/sessions.py | uk-gov-mirror/nationalarchives.tdr-jenkins | 1bcbee009d4384a777247039d44b2790eba34caa | [
"MIT"
] | null | null | null | docker/aws/sessions.py | uk-gov-mirror/nationalarchives.tdr-jenkins | 1bcbee009d4384a777247039d44b2790eba34caa | [
"MIT"
] | 34 | 2020-02-03T14:20:42.000Z | 2022-01-26T09:22:09.000Z | docker/aws/sessions.py | uk-gov-mirror/nationalarchives.tdr-jenkins | 1bcbee009d4384a777247039d44b2790eba34caa | [
"MIT"
] | 1 | 2021-04-11T07:11:53.000Z | 2021-04-11T07:11:53.000Z | import boto3
from boto3 import Session
def get_session(account_number, role_name):
sts_default_provider_chain = boto3.client('sts')
role_to_assume_arn='arn:aws:iam::' + account_number + ':role/' + role_name
role_session_name='session'
response=sts_default_provider_chain.assume_role(
RoleArn=role_to_assume_arn,
RoleSessionName=role_session_name
)
creds=response['Credentials']
return Session(
aws_access_key_id=creds['AccessKeyId'],
aws_secret_access_key=creds['SecretAccessKey'],
aws_session_token=creds['SessionToken'],
) | 27.363636 | 78 | 0.722591 |
acf57358bd09fa919ba9e0017d01ebc380be3182 | 885 | py | Python | modules/sendrequest.py | Ahmed-Entersoft/Astra | aa43c80b912fbdd6c225fb8543140a427836e843 | [
"Apache-2.0"
] | null | null | null | modules/sendrequest.py | Ahmed-Entersoft/Astra | aa43c80b912fbdd6c225fb8543140a427836e843 | [
"Apache-2.0"
] | null | null | null | modules/sendrequest.py | Ahmed-Entersoft/Astra | aa43c80b912fbdd6c225fb8543140a427836e843 | [
"Apache-2.0"
] | null | null | null | import utils.logs as logs
try:
import requests
requests.packages.urllib3.disable_warnings()
except:
print ("[-]Failed to import requests module")
def api_request(url,method,headers,body=None):
try:
if method.upper() == "GET":
auth_request = requests.get(url,headers=headers, allow_redirects=False,verify=False)
elif method.upper() == "POST":
auth_request = requests.post(url,headers=headers,json=body, allow_redirects=False,verify=False)
elif method.upper() == "PUT":
auth_request = requests.put(url,headers=headers,data=body, allow_redirects=False,verify=False)
elif method.upper() == "OPTIONS":
auth_request = requests.options(url,headers=headers, verify=False)
return auth_request
except Exception as e:
logs.logging.error("Exception from sendrequest %s",e)
| 36.875 | 107 | 0.672316 |
acf57362b5641de7c28172d80ee3424dddf7f88b | 1,533 | py | Python | scripts/createheader.py | RandomcodeDev/purpl-engine | 956587ec6720385926aebd9bc68944f56f98ac43 | [
"Zlib"
] | null | null | null | scripts/createheader.py | RandomcodeDev/purpl-engine | 956587ec6720385926aebd9bc68944f56f98ac43 | [
"Zlib"
] | 1 | 2020-09-15T22:41:18.000Z | 2020-09-24T02:10:11.000Z | scripts/createheader.py | RandomcodeDev/purpl-engine | 956587ec6720385926aebd9bc68944f56f98ac43 | [
"Zlib"
] | null | null | null | import os
import sys
import pathlib
try:
name = str(sys.argv[1])
except IndexError:
name = input('File name: ')
while not name:
name = input('File name: ')
if name:
break
print('No file name given.')
try:
os.chdir(sys.argv[2])
except IndexError:
chdir = input('Change to alternate directory first [current directory]: ')
if chdir:
os.chdir(chdir)
try:
namespace = str(sys.argv[3])
except IndexError:
namespace = input('Namespace to create [none]: ')
# Chech if the file is there and if not empty, in which case prompt whether to overwrite it
if pathlib.Path(name).exists() and os.stat(name).st_size:
if input('File exists and is not empty, overwrite it? [no] ') == ('y' or 'Y' or 'yes' or 'Yes'):
print('Overwriting file.')
else:
print('Not overwriting file.')
exit()
with open(name, 'wb+') as file:
# Generate the include guard symbol for the header, then write everything to the file
symbol = name.replace('include/', '').replace('include\\', '').replace('.',
'_').replace('/', '_').replace('\\', '_').upper()
contents = bytes(
F"#pragma once\n\n#ifndef {symbol}\n#define {symbol} 1\n\n", encoding='utf8')
if namespace:
contents += bytes(F"namespace {namespace} ", encoding='utf8')
contents += bytes("{\n\n}\n\n", encoding='utf-8')
contents += bytes(F"#endif /* !{symbol} */\n", encoding='utf8')
file.write(contents)
| 31.9375 | 100 | 0.592303 |
acf5740c4f816894c7fa0ebd6249ac36efc3c8bf | 2,988 | py | Python | ribbonbuilder/translate.py | davethecipo/pymolecular-ribbon-builder | dd9093df4c963cc7c101be4a37cb3e914f893fb1 | [
"MIT"
] | null | null | null | ribbonbuilder/translate.py | davethecipo/pymolecular-ribbon-builder | dd9093df4c963cc7c101be4a37cb3e914f893fb1 | [
"MIT"
] | null | null | null | ribbonbuilder/translate.py | davethecipo/pymolecular-ribbon-builder | dd9093df4c963cc7c101be4a37cb3e914f893fb1 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import itertools
import numpy as np
import openbabel
class MolBuilder(object):
def __init__(self, monomer, polymer, a1, a2, n, m, base):
self.monomer = monomer
self.polymer = polymer
self.a1 = a1
self.a2 = a2
self.n = n
self.m = m
self.top_border = self._default_border()
self.bottom_border = self._default_border()
self.left_border = self._default_border()
self.right_border = self._default_border()
self.base = base
@staticmethod
def _default_border():
return {'atoms': [],
'offset': np.ndarray(shape=(3,1),
dtype=float,
buffer=np.array([0, 0, 0])
)
}
def create_bulk(self):
for step_a, step_b in itertools.product(range(self.n), range(self.m)):
tot_translation = step_a * self.a1 + step_b * self.a2
self._iter_atoms(self.base, tot_translation)
def create_left_border(self):
for step_m in range(self.m):
translation = step_m * self.a2 - self.left_border['offset']
self._iter_atoms(self.left_border['atoms'], translation)
def create_right_border(self):
for step_m in range(self.m):
# since the range function goes from 0 to m-1, I need m-1
translation = (self.n -1) * self.a1 + step_m * self.a2 - \
self.right_border['offset']
self._iter_atoms(self.right_border['atoms'], translation)
def create_top_border(self):
for step_n in range(self.n):
translation = step_n * self.a1 + (self.m - 1) * self.a2 - \
self.top_border['offset']
self._iter_atoms(self.top_border['atoms'], translation)
def create_bottom_border(self):
for step_n in range(self.n):
translation = step_n * self.a1 - self.bottom_border['offset']
self._iter_atoms(self.bottom_border['atoms'], translation)
def create_all(self):
self.create_bulk()
self.create_top_border()
self.create_bottom_border()
self.create_left_border()
self.create_right_border()
def _iter_atoms(self, atom_indexes, translation):
"""Apply translation to all atoms corresponding to atom_indexes.
Populates self.polymer with the atoms corresponding to atom_indexes
after applying the translation.
"""
for index in atom_indexes:
atom = self.monomer.GetAtom(index)
initial_pos = np.array([atom.GetX(), atom.GetY(), atom.GetZ()])
final_pos = initial_pos + translation
new_atom = openbabel.OBAtom()
new_atom.SetAtomicNum(atom.GetAtomicNum())
new_atom.SetVector(final_pos[0], final_pos[1], final_pos[2])
self.polymer.AddAtom(new_atom)
| 36.888889 | 78 | 0.585341 |
acf5742968d40a128c38a5a76745551e10716cc0 | 42 | py | Python | arghandle/__init__.py | svaisakh/magnet | bff6748803ac8efd081f0ddbdca8b1743c674a14 | [
"MIT"
] | 343 | 2018-09-03T09:59:36.000Z | 2022-02-08T11:32:34.000Z | arghandle/__init__.py | svaisakh/magnet | bff6748803ac8efd081f0ddbdca8b1743c674a14 | [
"MIT"
] | 7 | 2018-09-04T07:03:11.000Z | 2019-03-21T07:17:14.000Z | arghandle/__init__.py | MagNet-DL/magnet | bff6748803ac8efd081f0ddbdca8b1743c674a14 | [
"MIT"
] | 23 | 2018-09-03T19:12:04.000Z | 2021-02-20T09:23:30.000Z | from arghandle.core import arghandle, args | 42 | 42 | 0.857143 |
acf574ed1980b659cabbd3bd8b9c49e314615a08 | 431 | py | Python | tweet/migrations/0002_auto_20201201_2128.py | marcornett/twitter-clone | bcee2828311ffeb598a5740edcd576d562135981 | [
"MIT"
] | null | null | null | tweet/migrations/0002_auto_20201201_2128.py | marcornett/twitter-clone | bcee2828311ffeb598a5740edcd576d562135981 | [
"MIT"
] | null | null | null | tweet/migrations/0002_auto_20201201_2128.py | marcornett/twitter-clone | bcee2828311ffeb598a5740edcd576d562135981 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.3 on 2020-12-01 21:28
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('tweet', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='tweet',
name='date_created',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| 21.55 | 74 | 0.62413 |
acf576f382601b0fc0a676b746ad2b6a7c7e2e11 | 1,389 | py | Python | app/migrations/0003_add_google_view__user_pk__sale_campaign.py | CrowdClick/CrowdLink | beb4b7822b787d53b104138d4b2612cf3a6713d1 | [
"BSD-3-Clause"
] | 2 | 2021-12-16T19:43:57.000Z | 2021-12-18T08:15:39.000Z | app/migrations/0003_add_google_view__user_pk__sale_campaign.py | CrowdClick/CrowdLink | beb4b7822b787d53b104138d4b2612cf3a6713d1 | [
"BSD-3-Clause"
] | 5 | 2020-06-25T20:44:25.000Z | 2021-09-22T19:01:54.000Z | app/migrations/0003_add_google_view__user_pk__sale_campaign.py | CrowdClick/CrowdLink | beb4b7822b787d53b104138d4b2612cf3a6713d1 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.0.6 on 2020-05-24 14:46
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0002_add_minlength_validator_for_user_pk'),
]
operations = [
migrations.AddField(
model_name='salecampaign',
name='google_view_id',
field=models.CharField(default='ga:141873340', max_length=15, verbose_name='Google View ID'),
preserve_default=False,
),
migrations.AddField(
model_name='salecampaign',
name='user_public_key',
field=models.CharField(default='0x0', max_length=42, validators=[django.core.validators.MinLengthValidator(42)], verbose_name='User public key'),
preserve_default=False,
),
migrations.AlterField(
model_name='clickcampaign',
name='user_public_key',
field=models.CharField(max_length=42, validators=[django.core.validators.MinLengthValidator(42)], verbose_name='User public key'),
),
migrations.AlterUniqueTogether(
name='clicklink',
unique_together={('user_public_key', 'campaign')},
),
migrations.AlterUniqueTogether(
name='salelink',
unique_together={('user_public_key', 'campaign')},
),
]
| 34.725 | 157 | 0.62635 |
acf577d9f4fe0e20240e1d956de13034cea69a47 | 3,892 | py | Python | tests/formatters/firefox.py | berggren/plaso | 2658c80c5076f97a9a27272e73997bde8c39e875 | [
"Apache-2.0"
] | 27 | 2019-04-05T12:01:49.000Z | 2022-02-08T02:26:25.000Z | tests/formatters/firefox.py | berggren/plaso | 2658c80c5076f97a9a27272e73997bde8c39e875 | [
"Apache-2.0"
] | null | null | null | tests/formatters/firefox.py | berggren/plaso | 2658c80c5076f97a9a27272e73997bde8c39e875 | [
"Apache-2.0"
] | 8 | 2019-11-28T08:06:34.000Z | 2020-08-29T13:53:30.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Firefox history event formatter."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import firefox
from tests.formatters import test_lib
class FirefoxBookmarkAnnotationFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Firefox bookmark annotation event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = firefox.FirefoxBookmarkAnnotationFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = firefox.FirefoxBookmarkAnnotationFormatter()
expected_attribute_names = ['content', 'title', 'url']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
# TODO: add test for GetSources.
class FirefoxBookmarkFolderFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Firefox bookmark folder event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = firefox.FirefoxBookmarkFolderFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = firefox.FirefoxBookmarkFolderFormatter()
expected_attribute_names = ['title']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
# TODO: add test for GetSources.
class FirefoxBookmarkFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Firefox URL bookmark event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = firefox.FirefoxBookmarkFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = firefox.FirefoxBookmarkFormatter()
expected_attribute_names = [
'type', 'title', 'url', 'places_title', 'visit_count']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
# TODO: add test for GetSources.
class FirefoxPageVisitFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Firefox page visited event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = firefox.FirefoxPageVisitFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = firefox.FirefoxPageVisitFormatter()
expected_attribute_names = [
'url', 'title', 'visit_count', 'host', 'extra_string']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
# TODO: add test for GetSources.
class FirefoxDowloadFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Firefox download event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = firefox.FirefoxDowloadFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = firefox.FirefoxDowloadFormatter()
expected_attribute_names = [
'url', 'full_path', 'received_bytes', 'total_bytes']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
# TODO: add test for GetSources.
if __name__ == '__main__':
unittest.main()
| 31.387097 | 78 | 0.761048 |
acf577f2ab5079702aa5d1c61d0162aefdb149db | 63 | py | Python | kot/core/__init__.py | dragon-hex/kot-two-project | 170594ffbac61a967ecf553b4cd0f30e572619ec | [
"MIT"
] | null | null | null | kot/core/__init__.py | dragon-hex/kot-two-project | 170594ffbac61a967ecf553b4cd0f30e572619ec | [
"MIT"
] | 8 | 2021-09-23T22:46:12.000Z | 2021-11-06T12:53:46.000Z | kot/core/__init__.py | dragon-hex/kot-two-project | 170594ffbac61a967ecf553b4cd0f30e572619ec | [
"MIT"
] | null | null | null | from . import provider
from . import system
from . import cview | 21 | 22 | 0.777778 |
acf5786827d019ca15d18da6e452a508c0dce00f | 17,634 | py | Python | models/progress.py | teachteamgithub/coursebuilder | 178c0ff3cd28858079488c3c2a0bc22a5baf58e3 | [
"Apache-2.0"
] | 13 | 2015-02-07T12:43:40.000Z | 2020-02-10T18:30:36.000Z | models/progress.py | esacosta/u-mooc | 8d9a1427c988121e12dd6e2f7c8835f0e561c507 | [
"Apache-2.0"
] | null | null | null | models/progress.py | esacosta/u-mooc | 8d9a1427c988121e12dd6e2f7c8835f0e561c507 | [
"Apache-2.0"
] | 8 | 2015-08-29T03:10:16.000Z | 2019-09-13T22:59:03.000Z | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Student progress trackers."""
__author__ = 'Sean Lip (sll@google.com)'
import datetime
import os
from tools import verify
from models import StudentPropertyEntity
import transforms
class UnitLessonCompletionTracker(object):
"""Tracks student completion for a unit/lesson-based linear course."""
PROPERTY_KEY = 'linear-course-completion'
# Here are representative examples of the keys for the various entities
# used in this class:
# Unit 1: u.1
# Unit 1, Lesson 1: u.1.l.1
# Unit 1, Lesson 1, Video 1: u.1.l.1.v.1
# Unit 1, Lesson 1, Activity 2: u.1.l.1.a.2
# Unit 1, Lesson 1, Activity 2, Block 4: u.1.l.1.a.2.b.4
# Assessment 'Pre': s.Pre
# At the moment, we do not divide assessments into blocks.
#
# IMPORTANT NOTE: The values of the keys mean different things depending on
# whether the entity is a composite entity or not.
# If it is a composite entity (unit, lesson, activity), then the value is
# - 0 if none of its sub-entities has been completed
# - 1 if some, but not all, of its sub-entities have been completed
# - 2 if all its sub-entities have been completed.
# If it is not a composite entity (video, block, assessment), then the value
# is just the number of times the event has been triggered.
# Constants for recording the state of composite entities.
# TODO(sll): Change these to enums.
NOT_STARTED_STATE = 0
IN_PROGRESS_STATE = 1
COMPLETED_STATE = 2
EVENT_CODE_MAPPING = {
'unit': 'u',
'lesson': 'l',
'video': 'v',
'activity': 'a',
'block': 'b',
'assessment': 's',
}
def __init__(self, course):
self._course = course
def _get_course(self):
return self._course
def get_activity_as_python(self, unit_id, lesson_id):
"""Gets the corresponding activity as a Python object."""
root_name = 'activity'
course = self._get_course()
activity_text = course.app_context.fs.get(
os.path.join(course.app_context.get_home(),
course.get_activity_filename(unit_id, lesson_id)))
content, noverify_text = verify.convert_javascript_to_python(
activity_text, root_name)
activity = verify.evaluate_python_expression_from_text(
content, root_name, verify.Activity().scope, noverify_text)
return activity
def _get_unit_key(self, unit_id):
return '%s.%s' % (self.EVENT_CODE_MAPPING['unit'], unit_id)
def _get_lesson_key(self, unit_id, lesson_id):
return '%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id
)
def _get_video_key(self, unit_id, lesson_id, video_id):
return '%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['video'], video_id
)
def _get_activity_key(self, unit_id, lesson_id, activity_id):
return '%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['activity'], activity_id
)
def _get_block_key(self, unit_id, lesson_id, activity_id, block_id):
return '%s.%s.%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['activity'], activity_id,
self.EVENT_CODE_MAPPING['block'], block_id
)
def _get_assessment_key(self, assessment_id):
return '%s.%s' % (self.EVENT_CODE_MAPPING['assessment'], assessment_id)
def get_valid_block_ids(self, unit_id, lesson_id):
"""Returns a list of block ids representing interactive activities."""
valid_block_ids = []
# Get the activity corresponding to this unit/lesson combination.
activity = self.get_activity_as_python(unit_id, lesson_id)
for block_id in range(len(activity['activity'])):
block = activity['activity'][block_id]
if isinstance(block, dict):
valid_block_ids.append(block_id)
return valid_block_ids
def _update_unit(self, progress, event_key):
"""Updates a unit's progress if all its lessons have been completed."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 2
unit_id = split_event_key[1]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one lesson in this unit has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
# Check if all lessons in this unit have been completed.
lessons = self._get_course().get_lessons(unit_id)
for lesson in lessons:
# Skip lessons that do not have activities associated with them.
if not lesson.activity:
continue
if not (self.get_lesson_status(
progress,
unit_id, lesson.lesson_id) == self.COMPLETED_STATE):
return
# Record that all lessons in this unit have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_lesson(self, progress, event_key):
"""Updates a lesson's progress if its activities have been completed."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 4
unit_id = split_event_key[1]
lesson_id = split_event_key[3]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one activity in this lesson has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
lessons = self._get_course().get_lessons(unit_id)
for lesson in lessons:
if str(lesson.lesson_id) == lesson_id and lesson:
if not (self.get_activity_status(
progress, unit_id, lesson_id) == self.COMPLETED_STATE):
return
# Record that all activities in this lesson have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_activity(self, progress, event_key):
"""Updates activity's progress when all interactive blocks are done."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 6
unit_id = split_event_key[1]
lesson_id = split_event_key[3]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one block in this activity has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
valid_block_ids = self.get_valid_block_ids(unit_id, lesson_id)
for block_id in valid_block_ids:
if not self.is_block_completed(
progress, unit_id, lesson_id, block_id):
return
# Record that all blocks in this activity have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
UPDATER_MAPPING = {
'activity': _update_activity,
'lesson': _update_lesson,
'unit': _update_unit
}
# Dependencies for recording derived events. The key is the current
# event, and the value is a tuple, each element of which contains:
# - the dependent entity to be updated
# - the transformation to apply to the id of the current event to get the
# id for the new event
DERIVED_EVENTS = {
'block': (
{
'entity': 'activity',
'generate_new_id': (lambda s: '.'.join(s.split('.')[:-2])),
},
),
'activity': (
{
'entity': 'lesson',
'generate_new_id': (lambda s: '.'.join(s.split('.')[:-2])),
},
),
'lesson': (
{
'entity': 'unit',
'generate_new_id': (lambda s: '.'.join(s.split('.')[:-2])),
},
),
}
def put_video_completed(self, student, unit_id, lesson_id):
"""Records that the given student has completed a video."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
self._put_event(
student, 'video', self._get_video_key(unit_id, lesson_id, 0))
def put_activity_completed(self, student, unit_id, lesson_id):
"""Records that the given student has completed an activity."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
self._put_event(
student, 'activity', self._get_activity_key(unit_id, lesson_id, 0))
def put_block_completed(self, student, unit_id, lesson_id, block_id):
"""Records that the given student has completed an activity block."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
if not block_id in self.get_valid_block_ids(unit_id, lesson_id):
return
self._put_event(
student,
'block',
self._get_block_key(unit_id, lesson_id, 0, block_id)
)
def put_assessment_completed(self, student, assessment_id):
"""Records that the given student has completed the given assessment."""
if not self._get_course().is_valid_assessment_id(assessment_id):
return
self._put_event(
student, 'assessment', self._get_assessment_key(assessment_id))
def put_activity_accessed(self, student, unit_id, lesson_id):
"""Records that the given student has accessed this activity."""
# This method currently exists because we need to mark activities
# without interactive blocks as 'completed' when they are accessed.
if not self.get_valid_block_ids(unit_id, lesson_id):
self.put_activity_completed(student, unit_id, lesson_id)
def _put_event(self, student, event_entity, event_key):
"""Starts a cascade of updates in response to an event taking place."""
if event_entity not in self.EVENT_CODE_MAPPING:
return
progress = self.get_or_create_progress(student)
self._update_event(
student, progress, event_entity, event_key, direct_update=True)
progress.updated_on = datetime.datetime.now()
progress.put()
def _update_event(self, student, progress, event_entity, event_key,
direct_update=False):
"""Updates statistics for the given event, and for derived events.
Args:
student: the student
progress: the StudentProgressEntity for the student
event_entity: the name of the affected entity (unit, video, etc.)
event_key: the key for the recorded event
direct_update: True if this event is being updated explicitly; False
if it is being auto-updated.
"""
if direct_update or event_entity not in self.UPDATER_MAPPING:
if event_entity in self.UPDATER_MAPPING:
# This is a derived event, so directly mark it as completed.
self._set_entity_value(
progress, event_key, self.COMPLETED_STATE)
else:
# This is not a derived event, so increment its counter by one.
self._inc(progress, event_key)
else:
self.UPDATER_MAPPING[event_entity](self, progress, event_key)
if event_entity in self.DERIVED_EVENTS:
for derived_event in self.DERIVED_EVENTS[event_entity]:
self._update_event(
student=student,
progress=progress,
event_entity=derived_event['entity'],
event_key=derived_event['generate_new_id'](event_key),
)
def get_unit_status(self, progress, unit_id):
return self._get_entity_value(progress, self._get_unit_key(unit_id))
def get_lesson_status(self, progress, unit_id, lesson_id):
return self._get_entity_value(
progress, self._get_lesson_key(unit_id, lesson_id))
def get_video_status(self, progress, unit_id, lesson_id):
return self._get_entity_value(
progress, self._get_video_key(unit_id, lesson_id, 0))
def get_activity_status(self, progress, unit_id, lesson_id):
return self._get_entity_value(
progress, self._get_activity_key(unit_id, lesson_id, 0))
def get_block_status(self, progress, unit_id, lesson_id, block_id):
return self._get_entity_value(
progress, self._get_block_key(unit_id, lesson_id, 0, block_id))
def get_assessment_status(self, progress, assessment_id):
return self._get_entity_value(
progress, self._get_assessment_key(assessment_id))
def is_video_completed(self, progress, unit_id, lesson_id):
value = self._get_entity_value(
progress, self._get_video_key(unit_id, lesson_id, 0))
return value is not None and value > 0
def is_block_completed(self, progress, unit_id, lesson_id, block_id):
value = self._get_entity_value(
progress, self._get_block_key(unit_id, lesson_id, 0, block_id))
return value is not None and value > 0
def is_assessment_completed(self, progress, assessment_id):
value = self._get_entity_value(
progress, self._get_assessment_key(assessment_id))
return value is not None and value > 0
@classmethod
def get_or_create_progress(cls, student):
progress = StudentPropertyEntity.get(student, cls.PROPERTY_KEY)
if not progress:
progress = StudentPropertyEntity.create(
student=student, property_name=cls.PROPERTY_KEY)
progress.put()
return progress
def get_unit_progress(self, student):
"""Returns a dict with the states of each unit."""
units = self._get_course().get_units()
progress = self.get_or_create_progress(student)
result = {}
for unit in units:
if unit.type == 'A':
result[unit.unit_id] = self.is_assessment_completed(
progress, unit.unit_id)
elif unit.type == 'U':
value = self.get_unit_status(progress, unit.unit_id)
if value is None:
value = 0
result[unit.unit_id] = value
return result
def get_lesson_progress(self, student, unit_id):
"""Returns a dict saying which lessons in this unit are completed."""
lessons = self._get_course().get_lessons(unit_id)
progress = self.get_or_create_progress(student)
result = {}
for lesson in lessons:
value = self.get_lesson_status(progress, unit_id, lesson.lesson_id)
if value is None:
value = 0
result[lesson.lesson_id] = value
return result
def _get_entity_value(self, progress, event_key):
if not progress.value:
return None
return transforms.loads(progress.value).get(event_key)
def _set_entity_value(self, student_property, key, value):
"""Sets the integer value of a student property.
Note: this method does not commit the change. The calling method should
call put() on the StudentPropertyEntity.
Args:
student_property: the StudentPropertyEntity
key: the student property whose value should be incremented
value: the value to increment this property by
"""
try:
progress_dict = transforms.loads(student_property.value)
except (AttributeError, TypeError):
progress_dict = {}
progress_dict[key] = value
student_property.value = transforms.dumps(progress_dict)
def _inc(self, student_property, key, value=1):
"""Increments the integer value of a student property.
Note: this method does not commit the change. The calling method should
call put() on the StudentPropertyEntity.
Args:
student_property: the StudentPropertyEntity
key: the student property whose value should be incremented
value: the value to increment this property by
"""
try:
progress_dict = transforms.loads(student_property.value)
except (AttributeError, TypeError):
progress_dict = {}
if key not in progress_dict:
progress_dict[key] = 0
progress_dict[key] += value
student_property.value = transforms.dumps(progress_dict)
| 39.538117 | 80 | 0.639333 |
acf578a089efd14b961e863f58fc81168ced1e6b | 2,802 | py | Python | tests/rule_based_profiler/plugins/my_custom_semantic_type_column_domain_builder.py | romalee/great_expectations | c2c5df42f878612d25aa76ee3e6d4e3852de797e | [
"Apache-2.0"
] | null | null | null | tests/rule_based_profiler/plugins/my_custom_semantic_type_column_domain_builder.py | romalee/great_expectations | c2c5df42f878612d25aa76ee3e6d4e3852de797e | [
"Apache-2.0"
] | null | null | null | tests/rule_based_profiler/plugins/my_custom_semantic_type_column_domain_builder.py | romalee/great_expectations | c2c5df42f878612d25aa76ee3e6d4e3852de797e | [
"Apache-2.0"
] | null | null | null | from typing import List, Optional, Union
import great_expectations.exceptions as ge_exceptions
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.rule_based_profiler.domain_builder.domain import Domain
from great_expectations.rule_based_profiler.domain_builder.domain_builder import (
DomainBuilder,
)
from great_expectations.rule_based_profiler.domain_builder.inferred_semantic_domain_type import (
SemanticDomainTypes,
)
from great_expectations.validator.validator import MetricConfiguration, Validator
class MyCustomSemanticTypeColumnDomainBuilder(DomainBuilder):
"""
This custom DomainBuilder defines and filters for "user_id" semantic type fields
"""
def __init__(
self,
semantic_types: Optional[
Union[str, SemanticDomainTypes, List[Union[str, SemanticDomainTypes]]]
] = None,
column_name_suffixes: Optional[List[str]] = None,
):
if semantic_types is None:
semantic_types = ["user_id"]
self._semantic_types = semantic_types
if column_name_suffixes is None:
column_name_suffixes = [
"_id",
]
self._column_name_suffixes = column_name_suffixes
def _get_domains(
self,
*,
validator: Optional[Validator] = None,
batch_ids: Optional[List[str]] = None,
) -> List[Domain]:
"""
Find the semantic column type for each column and return all domains matching the specified type or types.
"""
if validator is None:
raise ge_exceptions.ProfilerExecutionError(
message=f"{self.__class__.__name__} requires a reference to an instance of the Validator class."
)
table_column_names: List[str] = validator.get_metric(
metric=MetricConfiguration(
metric_name="table.columns",
metric_domain_kwargs={},
metric_value_kwargs=None,
metric_dependencies=None,
)
)
# First check the column name ends in "_id".
candidate_column_names: List[str] = list(
filter(
lambda candidate_column_name: candidate_column_name.endswith(
tuple(self._column_name_suffixes)
),
table_column_names,
)
)
column_name: str
domains: List[Domain] = [
Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs={
"column": column_name,
"batch_id": validator.active_batch_id,
},
)
for column_name in candidate_column_names
]
return domains
| 33.759036 | 114 | 0.629907 |
acf578c8cf55103d7556edfa8f040d7320cc1af5 | 2,907 | py | Python | testinfra/modules/docker.py | hfuss/testinfra | 6251e29376679cc5bedc92baae2c82b4f3fcfd48 | [
"Apache-2.0"
] | 1 | 2021-08-09T21:12:16.000Z | 2021-08-09T21:12:16.000Z | testinfra/modules/docker.py | hfuss/testinfra | 6251e29376679cc5bedc92baae2c82b4f3fcfd48 | [
"Apache-2.0"
] | 1 | 2019-03-23T14:17:22.000Z | 2019-03-23T14:17:22.000Z | testinfra/modules/docker.py | hfuss/testinfra | 6251e29376679cc5bedc92baae2c82b4f3fcfd48 | [
"Apache-2.0"
] | 1 | 2020-09-22T14:51:06.000Z | 2020-09-22T14:51:06.000Z | # coding: utf-8
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from testinfra.modules.base import Module
class Docker(Module):
"""Test docker containers running on system.
Example:
>>> nginx = host.docker("app_nginx")
>>> nginx.is_running
True
>>> nginx.id
'7e67dc7495ca8f451d346b775890bdc0fb561ecdc97b68fb59ff2f77b509a8fe'
>>> nginx.name
'app_nginx'
"""
def __init__(self, name):
self._name = name
super(Docker, self).__init__()
def inspect(self):
output = self.check_output("docker inspect %s", self._name)
return json.loads(output)[0]
@property
def is_running(self):
return self.inspect()['State']['Running']
@property
def id(self):
return self.inspect()["Id"]
@property
def name(self):
return self.inspect()["Name"][1:] # get rid of slash in front
@classmethod
def get_containers(cls, **filters):
"""Return a list of containers
By default return list of all containers, including non-running
containers.
Filtering can be done using filters keys defined on
https://docs.docker.com/engine/reference/commandline/ps/#filtering
Multiple filters for a given key is handled by giving a list of string
as value.
>>> host.docker.get_containers()
[<docker nginx>, <docker redis>, <docker app>]
# Get all running containers
>>> host.docker.get_containers(status="running")
[<docker app>]
# Get containers named "nginx"
>>> host.docker.get_containers(name="nginx")
[<docker nginx>]
# Get containers named "nginx" or "redis"
>>> host.docker.get_containers(name=["nginx", "redis"])
[<docker nginx>, <docker redis>]
"""
cmd = "docker ps --all --quiet --format '{{.Names}}'"
args = []
for key, value in filters.items():
if isinstance(value, (list, tuple)):
values = value
else:
values = [value]
for v in values:
cmd += " --filter %s=%s"
args += [key, v]
result = []
for docker_id in cls(None).check_output(cmd, *args).splitlines():
result.append(cls(docker_id))
return result
def __repr__(self):
return "<docker %s>" % (self._name)
| 30.6 | 78 | 0.615067 |
acf5790ebf77d78bb51f99b72aa3ce4011cee976 | 574 | py | Python | smart-chef-api/src/smartchef/models/recipe.py | Flusinerd/SoftSol | 9eea560853b6414daa4348a70bae6fabd3289c88 | [
"MIT"
] | 2 | 2022-03-29T09:27:12.000Z | 2022-03-29T09:47:40.000Z | smart-chef-api/src/smartchef/models/recipe.py | Flusinerd/SoftSol | 9eea560853b6414daa4348a70bae6fabd3289c88 | [
"MIT"
] | 14 | 2022-03-31T08:30:11.000Z | 2022-03-31T09:01:47.000Z | smart-chef-api/src/smartchef/models/recipe.py | Flusinerd/SoftSol | 9eea560853b6414daa4348a70bae6fabd3289c88 | [
"MIT"
] | null | null | null | from django.db import models
from . import Resource
class Recipe(Resource):
"""
A recipe for a product.
"""
name: str = models.CharField(max_length=512)
description: str = models.TextField(max_length=1000, null=True)
instruction: str = models.TextField(max_length=10000, null=True)
url: str = models.URLField(max_length=512, null=True)
image: models.ImageField(upload_to="recipe_images", null=True)
stars = models.IntegerField(default=0)
category = models.ForeignKey(
"RecipeCategory", on_delete=models.PROTECT, null=False)
| 33.764706 | 68 | 0.712544 |
acf5798594348f4c023ce29bfd074bbedc6d9475 | 224 | py | Python | Basic Question Bank/a065/a065.py | michael21910/zerojudge | 6f4375413619ad2f5e2bb492d6d6f2024921e0d7 | [
"MIT"
] | 2 | 2021-09-20T05:59:18.000Z | 2021-12-04T05:22:22.000Z | Basic Question Bank/a065/a065.py | michael21910/zerojudge | 6f4375413619ad2f5e2bb492d6d6f2024921e0d7 | [
"MIT"
] | null | null | null | Basic Question Bank/a065/a065.py | michael21910/zerojudge | 6f4375413619ad2f5e2bb492d6d6f2024921e0d7 | [
"MIT"
] | null | null | null | try:
while True:
string = str(input())
output = ""
for i in range(1, 7):
output = output + str(abs(ord(string[i]) - ord(string[i - 1])))
print(output)
except EOFError:
pass | 24.888889 | 75 | 0.504464 |
acf57a2e1598f3d3fca782056f0323f3a3de5f5d | 3,020 | py | Python | alexnet_cifar10.py | shubhe25p/ApproxAlex-on-Keras | 81dbd10f1bbace3cfd5b5b1b2ecc17bdbbbe9024 | [
"MIT"
] | 1 | 2021-04-11T20:33:14.000Z | 2021-04-11T20:33:14.000Z | alexnet_cifar10.py | shubhe25p/ApproxAlex-on-Keras | 81dbd10f1bbace3cfd5b5b1b2ecc17bdbbbe9024 | [
"MIT"
] | null | null | null | alexnet_cifar10.py | shubhe25p/ApproxAlex-on-Keras | 81dbd10f1bbace3cfd5b5b1b2ecc17bdbbbe9024 | [
"MIT"
] | null | null | null |
from tensorflow import keras
from keras.layers import *
from keras.models import Model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from keras.initializers import glorot_uniform
from keras.utils import to_categorical
import tensorflow as tf
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
X_train = x_train/255
X_test = x_test/255
Y_train = to_categorical(y_train)
Y_test = to_categorical(y_test)
def approx_alex(input_shape):
X_input = Input(input_shape)
#X=38,38,3
X = ZeroPadding2D((3,3))(X_input)
X = Conv2D(32,(7,7),strides=(1,1),name='conv0', kernel_initializer=glorot_uniform(seed=0))(X)
#X=32,32,32
X = BatchNormalization(axis=3,name='bn0')(X)
X = Activation('relu')(X)
#31,31,32
X = MaxPooling2D((2,2),strides=(2,2),name='max-pool0')(X)
X = Conv2D(64,(5,5),strides=(1,1),name='conv1', kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name='bn1')(X)
X = Activation('relu')(X)
X = MaxPooling2D((2,2), strides=(1,1),name='max-pool1')(X)
X = Conv2D(96,(3,3),strides=(1,1),name='conv2', kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name='bn2')(X)
X = Activation('relu')(X)
X = Conv2D(128,(3,3),strides=(1,1),name='conv3', kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name='bn3')(X)
X = Activation('relu')(X)
X = Conv2D(256,(3,3),strides=(1,1),name='conv4', kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name='bn4')(X)
X = Activation('relu')(X)
X = MaxPooling2D((2,2), strides=(1,1),name='max-pool2')(X)
X = Flatten()(X)
X = Dense(2048, activation='relu', name='fc1', kernel_initializer=glorot_uniform(seed=0))(X)
X = Dense(2048, activation='relu', name='fc2', kernel_initializer=glorot_uniform(seed=0))(X)
X = Dense(10, activation='softmax', name='fc3', kernel_initializer=glorot_uniform(seed=0))(X)
model=Model(inputs = X_input, outputs = X, name='approx_alex')
return model
model = approx_alex(X_train.shape[1:])
model.summary()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
#model.fit(X_train, Y_train, epochs=32, batch_size=64)
#history=model.fit(X_train, Y_train, epochs=30, batch_size=32)
h2=model.fit(X_train, Y_train, epochs=40, batch_size=32)
plt.plot(h2.history['accuracy'])
plt.title('Model Acc')
plt.ylabel('acc')
plt.xlabel('epoch')
plt.show()
#import kerastuner as kt
#
#tuner = kt.Hyperband(
# model,
# objective='accuracy',
# max_epochs=30,
# hyperband_iterations=2)
preds=model.evaluate(X_test, Y_test)
print()
print("Loss="+ str(preds[0]))
print("Test Accuracy=" + str(preds[1]))
| 27.454545 | 96 | 0.716225 |
acf57a609a5a21cce4f8d9afb638bd121f48555a | 687 | py | Python | MF_GC_network_model/biophysical_model/create_folders.py | delvendahl/GluA4_cerebellum_eLife | 1d1c19853e18f8fbb3307eeecc64096e53d74820 | [
"MIT"
] | null | null | null | MF_GC_network_model/biophysical_model/create_folders.py | delvendahl/GluA4_cerebellum_eLife | 1d1c19853e18f8fbb3307eeecc64096e53d74820 | [
"MIT"
] | null | null | null | MF_GC_network_model/biophysical_model/create_folders.py | delvendahl/GluA4_cerebellum_eLife | 1d1c19853e18f8fbb3307eeecc64096e53d74820 | [
"MIT"
] | null | null | null | # create folders to store temporary simulation data and results
import os
def create_folder(name):
created = False
if not os.path.exists(name):
os.mkdir(name)
created = True
return created
# create temp folder for simulation data
foldername = './tempdata'
create_folder(foldername)
# create folders for simulation results
foldername = './results'
create_folder(foldername)
correlations = [0, 5, 10, 15, 20, 25, 30]
for c in correlations:
# for original model
foldername = './results/orig_data_r{}'.format(c)
create_folder(foldername)
# for KO model
foldername = './results/ko_data_r{}'.format(c)
create_folder(foldername)
| 21.46875 | 63 | 0.695779 |
acf57a6d05ff3549e1ea982c0ad1dac96587682a | 3,287 | py | Python | lib/jnpr/healthbot/swagger/models/hb_graphs_query_where.py | Juniper/healthbot-py-client | 49f0884b5d01ac8430aa7ed4c9acb4e7a2b717a6 | [
"Apache-2.0"
] | 10 | 2019-10-23T12:54:37.000Z | 2022-02-07T19:24:30.000Z | docs/jnpr_healthbot_swagger/swagger_client/models/hb_graphs_query_where.py | Juniper/healthbot-py-client | 49f0884b5d01ac8430aa7ed4c9acb4e7a2b717a6 | [
"Apache-2.0"
] | 5 | 2019-09-30T04:29:25.000Z | 2022-02-16T12:21:06.000Z | docs/jnpr_healthbot_swagger/swagger_client/models/hb_graphs_query_where.py | Juniper/healthbot-py-client | 49f0884b5d01ac8430aa7ed4c9acb4e7a2b717a6 | [
"Apache-2.0"
] | 4 | 2019-09-30T01:17:48.000Z | 2020-08-25T07:27:54.000Z | # coding: utf-8
"""
Paragon Insights APIs
API interface for PI application # noqa: E501
OpenAPI spec version: 4.0.0
Contact: healthbot-feedback@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class HbGraphsQueryWhere(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'where_clause': 'HbGraphsQueryWhereWhereClause'
}
attribute_map = {
'where_clause': 'where_clause'
}
def __init__(self, where_clause=None): # noqa: E501
"""HbGraphsQueryWhere - a model defined in Swagger""" # noqa: E501
self._where_clause = None
self.discriminator = None
if where_clause is not None:
self.where_clause = where_clause
@property
def where_clause(self):
"""Gets the where_clause of this HbGraphsQueryWhere. # noqa: E501
:return: The where_clause of this HbGraphsQueryWhere. # noqa: E501
:rtype: HbGraphsQueryWhereWhereClause
"""
return self._where_clause
@where_clause.setter
def where_clause(self, where_clause):
"""Sets the where_clause of this HbGraphsQueryWhere.
:param where_clause: The where_clause of this HbGraphsQueryWhere. # noqa: E501
:type: HbGraphsQueryWhereWhereClause
"""
self._where_clause = where_clause
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(HbGraphsQueryWhere, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HbGraphsQueryWhere):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.336207 | 87 | 0.583815 |
acf57a96a997821c0523d01e735e1333900e4981 | 135 | py | Python | random_customer/__init__.py | sonigovi/random_customer | 96b702ac2b5ab406c19ad5c5cff028ae4f0a6f60 | [
"MIT"
] | null | null | null | random_customer/__init__.py | sonigovi/random_customer | 96b702ac2b5ab406c19ad5c5cff028ae4f0a6f60 | [
"MIT"
] | null | null | null | random_customer/__init__.py | sonigovi/random_customer | 96b702ac2b5ab406c19ad5c5cff028ae4f0a6f60 | [
"MIT"
] | null | null | null | """Top-level package for random_customer."""
__author__ = """Govind Soni"""
__email__ = 'govind.soni@gmail.com'
__version__ = '0.1.0'
| 22.5 | 44 | 0.696296 |
acf57c17eb0ed21d8fef38fdcea682bb4070ff54 | 1,740 | py | Python | training/src/tests/tests/python/roll.py | steelONIONknight/bolt | 9bd3d08f2abb14435ca3ad0179889e48fa7e9b47 | [
"MIT"
] | null | null | null | training/src/tests/tests/python/roll.py | steelONIONknight/bolt | 9bd3d08f2abb14435ca3ad0179889e48fa7e9b47 | [
"MIT"
] | null | null | null | training/src/tests/tests/python/roll.py | steelONIONknight/bolt | 9bd3d08f2abb14435ca3ad0179889e48fa7e9b47 | [
"MIT"
] | null | null | null | # Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import torch
torch.manual_seed(41)
a = torch.tensor(
[
[[[1, 2, 3], [4, 5, 6], [7, 8, 9.0], [10, 11, 12]]],
[[[13, 14, 15], [16, 17, 18], [19, 20, 21.0], [22, 23, 24]]],
],
requires_grad=True,
)
print("Input: ", a)
shifts = [1, 2, 3, 4]
for i in range(4):
a = torch.tensor(
[
[[[1, 2, 3], [4, 5, 6], [7, 8, 9.0], [10, 11, 12]]],
[[[13, 14, 15], [16, 17, 18], [19, 20, 21.0], [22, 23, 24]]],
],
requires_grad=True,
)
b = torch.roll(a, shifts[i], i)
print("Result:", b)
b.sum().backward()
print("Gradient for input:", a.grad)
| 44.615385 | 148 | 0.661494 |
acf57d56248fce7a8549972200288218477bae53 | 7,737 | py | Python | tinysqlbuilder/sql.py | koichirock/tinysqlbuilder | 91f4d96942ff5ef908fff3efe5cf337cd35ba886 | [
"MIT"
] | null | null | null | tinysqlbuilder/sql.py | koichirock/tinysqlbuilder | 91f4d96942ff5ef908fff3efe5cf337cd35ba886 | [
"MIT"
] | null | null | null | tinysqlbuilder/sql.py | koichirock/tinysqlbuilder | 91f4d96942ff5ef908fff3efe5cf337cd35ba886 | [
"MIT"
] | null | null | null | from typing import List, Optional, TypeVar, Union
all = [
"Query",
"Condition",
"Join",
"and_",
"or_",
"not_",
"eq",
"not_eq",
"gt",
"lt",
"ge",
"le",
"between",
"like",
"in_",
"inner_join",
"left_outer_join",
"right_outer_join",
"full_outer_join",
]
class Function:
"""Function interface"""
def __init__(self, stmt: str) -> None:
self._stmt = stmt
def build_function(self) -> str:
"""Convert to SQL"""
return self._stmt
class Condition:
"""Condition interface"""
def build_condition(self) -> str:
raise NotImplementedError
class _AndCondition(Condition):
"""Add condition to the AND clause"""
def __init__(self, *condition: Union[str, Condition]) -> None:
self.condition = condition
def build_condition(self) -> str:
return " AND ".join(_build_condition(condition, inner=True) for condition in self.condition)
class _OrCondition(Condition):
"""Add condition to the OR clause"""
def __init__(self, *condition: Union[str, Condition]) -> None:
self.condition = condition
def build_condition(self) -> str:
return " OR ".join(_build_condition(condition, inner=True) for condition in self.condition)
class _NotCondition(Condition):
"""Add condition to the NOT clause"""
def __init__(self, condition: Union[str, Condition]) -> None:
self.condition = condition
def build_condition(self) -> str:
return f"NOT {_build_condition(self.condition, inner=True)}"
def _build_condition(condition: Union[str, Condition], inner: bool = False) -> str:
"""Build a operator"""
if isinstance(condition, str):
return condition
return f"({condition.build_condition()})" if inner else condition.build_condition()
def and_(*conditions: Union[str, Condition]) -> Condition:
"""Add condition to the AND clause"""
return _AndCondition(*conditions)
def or_(*conditions: Union[str, Condition]) -> Condition:
"""Add condition to the OR clause"""
return _OrCondition(*conditions)
def not_(condition: Union[str, Condition]) -> Condition:
"""Add condition to the NOT clause"""
return _NotCondition(condition)
OpT = TypeVar("OpT")
def enclose_in_single_quote_when_value_is_str(value: OpT) -> str:
"""Enclose value in single quote when value is str.
If use sql function (e.g. CAST), value must be `Function` instance.
"""
if isinstance(value, str):
return f"'{value}'"
if isinstance(value, Function):
return value.build_function()
if isinstance(value, bool):
if value:
return "1"
return "0"
return str(value)
def eq(column: str, value: OpT) -> str:
"""Equal"""
return f"{column} = {enclose_in_single_quote_when_value_is_str(value)}"
def not_eq(column: str, value: OpT) -> str:
"""Not equal"""
return f"{column} != {enclose_in_single_quote_when_value_is_str(value)}"
def gt(column: str, value: OpT) -> str:
"""Greater than"""
return f"{column} > {enclose_in_single_quote_when_value_is_str(value)}"
def lt(column: str, value: OpT) -> str:
"""Less than"""
return f"{column} < {enclose_in_single_quote_when_value_is_str(value)}"
def ge(column: str, value: OpT) -> str:
"""Greater or equal"""
return f"{column} >= {enclose_in_single_quote_when_value_is_str(value)}"
def le(column: str, value: OpT) -> str:
"""Less or equal"""
return f"{column} <= {enclose_in_single_quote_when_value_is_str(value)}"
def between(column: str, value1: OpT, value2: OpT) -> str:
"""Between"""
return f"{column} BETWEEN {enclose_in_single_quote_when_value_is_str(value1)} AND {enclose_in_single_quote_when_value_is_str(value2)}"
def like(column: str, value: OpT) -> str:
"""Like"""
return f"{column} LIKE {enclose_in_single_quote_when_value_is_str(value)}"
def in_(column: str, values: List[OpT]) -> str:
"""In"""
return f"{column} IN ({', '.join(map(lambda v: enclose_in_single_quote_when_value_is_str(v), values))})"
class Join:
"""Join interface"""
def build_join(self) -> str:
raise NotImplementedError
class _InnerJoin(Join):
"""Add join to the INNER clause"""
def __init__(self, table: Union[str, "Query"], condition: Union[str, Condition]) -> None:
self.table = table
self.condition = condition
def build_join(self) -> str:
return f" JOIN {_build_joined_table(self.table)} ON {_build_condition(self.condition)}"
class _LeftOuterJoin(Join):
"""Add join to the LEFT OUTER clause"""
def __init__(self, table: Union[str, "Query"], condition: Union[str, Condition]) -> None:
self.table = table
self.condition = condition
def build_join(self) -> str:
return f" LEFT OUTER JOIN {_build_joined_table(self.table)} ON {_build_condition(self.condition)}"
class _RightOuterJoin(Join):
"""Add join to the RIGHT OUTER clause"""
def __init__(self, table: Union[str, "Query"], condition: Union[str, Condition]) -> None:
self.table = table
self.condition = condition
def build_join(self) -> str:
return f" RIGHT OUTER JOIN {_build_joined_table(self.table)} ON {_build_condition(self.condition)}"
class _FullOuterJoin(Join):
"""Add join to the FULL OUTER clause"""
def __init__(self, table: Union[str, "Query"], condition: Union[str, Condition]) -> None:
self.table = table
self.condition = condition
def build_join(self) -> str:
return f" FULL OUTER JOIN {_build_joined_table(self.table)} ON {_build_condition(self.condition)}"
def inner_join(table: Union[str, "Query"], condition: Union[str, Condition]) -> Join:
"""Add join to the INNER clause"""
return _InnerJoin(table, condition)
def left_outer_join(table: Union[str, "Query"], condition: Union[str, Condition]) -> Join:
"""Add join to the LEFT OUTER clause"""
return _LeftOuterJoin(table, condition)
def right_outer_join(table: Union[str, "Query"], condition: Union[str, Condition]) -> Join:
"""Add join to the RIGHT OUTER clause"""
return _RightOuterJoin(table, condition)
def full_outer_join(table: Union[str, "Query"], condition: Union[str, Condition]) -> Join:
"""Add join to the FULL OUTER clause"""
return _FullOuterJoin(table, condition)
def _build_joined_table(table: Union[str, "Query"]) -> str:
"""Build joined table"""
if isinstance(table, str):
return table
return table.subquery()
class Query:
"""SQL Query."""
def __init__(self, table: str):
self.table = table
self.columns: List[str] = []
self.joins: List[Join] = []
self.condition: Optional[Union[str, Condition]] = None
self.alias: Optional[str] = None
def __str__(self) -> str:
return self.to_sql()
def to_sql(self) -> str:
"""Build query string."""
query = f"SELECT {f', '.join(self.columns)}"
query += f" FROM {self.table}"
if self.joins:
for join in self.joins:
query += join.build_join()
if self.condition:
query += f" WHERE {_build_condition(self.condition)}"
return query
def subquery(self) -> str:
"""Build subquery string."""
query = f"({self.to_sql()})"
if self.alias:
query += f" AS {self.alias}"
return query
def union(*queries: Query) -> str:
"""Build UNION query."""
return " UNION ".join(query.to_sql() for query in queries)
def union_all(*queries: Query) -> str:
"""Build UNION ALL query."""
return " UNION ALL ".join(query.to_sql() for query in queries)
| 27.931408 | 138 | 0.64379 |
acf57da00fabcd0dd04e036ba68db65deaee69d6 | 1,123 | py | Python | var/spack/repos/builtin/packages/r-readbitmap/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | var/spack/repos/builtin/packages/r-readbitmap/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | var/spack/repos/builtin/packages/r-readbitmap/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RReadbitmap(RPackage):
"""Identifies and reads Windows BMP, JPEG, PNG, and TIFF format bitmap
images. Identification defaults to the use of the magic number embedded in
the file rather than the file extension. Reading of JPEG and PNG image
depends on libjpg and libpng libraries. See file INSTALL for details if
necessary."""
homepage = "https://github.com/jefferis/readbitmap"
url = "https://cloud.r-project.org/src/contrib/readbitmap_0.1.5.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/readbitmap"
version('0.1.5', sha256='737d7d585eb33de2c200da64d16781e3c9522400fe2af352e1460c6a402a0291')
depends_on('r-bmp', type=('build', 'run'))
depends_on('r-jpeg', type=('build', 'run'))
depends_on('r-png', type=('build', 'run'))
depends_on('r-tiff', type=('build', 'run'))
depends_on('jpeg')
depends_on('libpng')
| 38.724138 | 95 | 0.711487 |
acf57e44a6de97f10bea619f3dff89ec78a35b02 | 220 | py | Python | contrib/wallettools/walletchangepass.py | yrsanaoki/litecoin | e86872a9d15b2258c5375f3a8accd3d4bf94c12c | [
"MIT"
] | 1 | 2021-11-29T21:28:16.000Z | 2021-11-29T21:28:16.000Z | contrib/wallettools/walletchangepass.py | yrsanaoki/poohcoin | e86872a9d15b2258c5375f3a8accd3d4bf94c12c | [
"MIT"
] | null | null | null | contrib/wallettools/walletchangepass.py | yrsanaoki/poohcoin | e86872a9d15b2258c5375f3a8accd3d4bf94c12c | [
"MIT"
] | null | null | null | from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:9502")
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
| 36.666667 | 49 | 0.768182 |
acf57eaafcbe64bb1c02e60b79ef675f25ff17b7 | 1,812 | py | Python | tests.py | GooeeIOT/drf-ujson-renderer | 8d076e6c9b74f814bcac1618c873e911f783b291 | [
"MIT"
] | 117 | 2015-02-17T16:06:19.000Z | 2021-08-31T16:15:09.000Z | tests.py | GooeeIOT/drf-ujson-renderer | 8d076e6c9b74f814bcac1618c873e911f783b291 | [
"MIT"
] | 12 | 2015-02-04T11:04:47.000Z | 2021-05-19T23:00:16.000Z | tests.py | GooeeIOT/drf-ujson-renderer | 8d076e6c9b74f814bcac1618c873e911f783b291 | [
"MIT"
] | 28 | 2015-02-04T07:52:36.000Z | 2021-08-31T16:15:09.000Z | from unittest import TestCase
from io import BytesIO
from django.conf import settings
import ujson
settings.configure()
from drf_ujson.renderers import UJSONRenderer
from drf_ujson.parsers import UJSONParser
class UJSONRendererTests(TestCase):
def setUp(self):
self.renderer = UJSONRenderer()
self.data = {
'a': [1, 2, 3],
'b': True,
'c': 1.23,
'd': 'test',
'e': {'foo': 'bar'},
}
def test_basic_data_structures_rendered_correctly(self):
rendered = self.renderer.render(self.data)
reloaded = ujson.loads(rendered)
self.assertEqual(reloaded, self.data)
def test_renderer_works_correctly_when_media_type_and_context_provided(self):
rendered = self.renderer.render(
data=self.data,
media_type='application/json',
renderer_context={},
)
reloaded = ujson.loads(rendered)
self.assertEqual(reloaded, self.data)
class UJSONParserTests(TestCase):
def setUp(self):
self.parser = UJSONParser()
self.data = {
'a': [1, 2, 3],
'b': True,
'c': 1.23,
'd': 'test',
'e': {'foo': 'bar'},
}
def test_basic_data_structures_parsed_correctly(self):
dumped = ujson.dumps(self.data)
parsed = self.parser.parse(BytesIO(dumped.encode('utf-8')))
self.assertEqual(parsed, self.data)
def test_parser_works_correctly_when_media_type_and_context_provided(self):
dumped = ujson.dumps(self.data)
parsed = self.parser.parse(
stream=BytesIO(dumped.encode('utf-8')),
media_type='application/json',
parser_context={},
)
self.assertEqual(parsed, self.data)
| 25.885714 | 81 | 0.59989 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.