content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from django.conf import settings
from django.conf.urls import patterns, url, include
from views import login, logout, connect
urlpatterns = patterns('',
url(r'^login/$', login,
{'template_name': 'registration/login.html'}, name='fb_login'),
url(r'^logout/$', logout,
{'template_name': 'registration/logout.html'}, name='fb_logout'),
url(r'^connect/$', connect, {}, name='fb_connect'),
)
if 'registration' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'', include('registration.backends.default.urls')),
)
|
nilq/baby-python
|
python
|
from django.contrib import admin
from .models import Category, Product, LaptopsCategory, SmartPhonesCategory
from django import forms
from django.forms import ValidationError
from PIL import Image
# Настройка изображения
class AdminForm(forms.ModelForm):
MIN_RESOLUTION = (400, 400)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['image'].help_text = 'Загружайте картинку не меньше {}x{}'.format(*self.MIN_RESOLUTION)
def clean_image(self):
image = self.cleaned_data['image']
img = Image.open(image)
min_height, min_width = self.MIN_RESOLUTION
if img.height < min_height or img.width < min_width:
raise ValidationError('Загруженное изображение не соответствует минимальным требованиям')
return image
# Настройка выбора категории в админ панеле
class LaptopAdmin(admin.ModelAdmin):
form = AdminForm
def formfield_for_dbfield(self, db_field, request, **kwargs):
if db_field.name == 'category':
return forms.ModelChoiceField(Category.objects.filter(name__icontains='ноутбуки'))
return super().formfield_for_dbfield(db_field, request, **kwargs)
class SmartPhoneAdmin(admin.ModelAdmin):
form = AdminForm
def formfield_for_dbfield(self, db_field, request, **kwargs):
if db_field.name == 'category':
return forms.ModelChoiceField(Category.objects.filter(name__icontains='смартфоны'))
return super().formfield_for_dbfield(db_field, request, **kwargs)
admin.site.register(Category)
admin.site.register(Product)
admin.site.register(LaptopsCategory, LaptopAdmin)
admin.site.register(SmartPhonesCategory, SmartPhoneAdmin)
|
nilq/baby-python
|
python
|
'''
Created on Nov 13, 2017
@author: khoi.ngo
'''
# /usr/bin/env python3.6
import sys
import asyncio
import json
import os.path
import logging.handlers
# import shutil
import time
import random
from indy import signus, wallet, pool, ledger
from indy.error import IndyError
import abc
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from utils.utils import *
from utils.constant import Colors, Constant, Roles
from utils.report import TestReport
from utils.common import Common
# -----------------------------------------------------------------------------------------
# This will run acceptance tests that will validate the add/remove roles functionality.
# -----------------------------------------------------------------------------------------
class MyVars:
""" Needed some global variables. """
begin_time = 0
pool_handle = 0
# Need the path to the pool transaction file location
pool_genesis_txn_file = os.path.expanduser('~') + os.sep + "Git/indy-testcase/khoi"
# domain_transactions_sandbox_genesis = Constant.domain_transactions_sandbox_genesis
# domain_transactions_sandbox_genesis_bak = Constant.domain_transactions_sandbox_genesis + str(random.randrange(10, 1000, 2))
wallet_handle = 0
test_report = TestReport("Test_scenario_07_Add_Node")
pool_name = generate_random_string("test_pool", size=32)
wallet_name = generate_random_string("test_wallet", size=32)
debug = False
test_results = {'Step3': False, 'Step4': False, 'Step5': False, 'Step6': False, 'Step7': False, 'Step8': False, 'Step9': False}
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.ERROR)
def test_prep():
""" Delete all files out of the .indy/pool and .indy/wallet directories """
print(Colors.HEADER + "\nPrecondition. Clean up pools and wallets\n" + Colors.ENDC)
Common.clean_up_pool_and_wallet_folder(MyVars.pool_name, MyVars.wallet_name)
async def test_scenario_07_add_node():
logger.info("Test Scenario 07 -> started")
seed_default_trustee = Constant.seed_default_trustee
seed_steward_node5 = generate_random_string(prefix="StewardNode5", size=32)
seed_steward_node6 = generate_random_string(prefix="StewardNode6", size=32)
seed_trust_anchor = generate_random_string(prefix="TrustAnchor", size=32)
seed_identity_owner = generate_random_string(prefix="IdentityOwner", size=32)
base_58_node_5 = "4Tn3wZMNCvhSTXPcLinQDnHyj56DTLQtL61ki4jo2Loc"
base_58_node_6 = "6G9QhQa3HWjRKeRmEvEkLbWWf2t7cw6KLtafzi494G4G"
# seed_tgb = generate_random_string(prefix="TGB", size=32)
# data
data_node5={'client_port': 9702, 'client_ip': '10.20.30.205', 'alias': 'Node5', 'node_ip': '10.20.30.205',
'node_port': 9701, 'services': ['VALIDATOR']}
data_node6={'client_port': 9702, 'client_ip': '10.20.30.206', 'alias': 'Node6', 'node_ip': '10.20.30.206',
'node_port': 9701, 'services': ['VALIDATOR']}
# 1. Create and open pool Ledger ---------------------------------------------------------
print(Colors.HEADER + "\n\t1. Create and open pool Ledger\n" + Colors.ENDC)
try:
MyVars.pool_handle, MyVars.wallet_handle = await Common.prepare_pool_and_wallet(MyVars.pool_name, MyVars.wallet_name, MyVars.pool_genesis_txn_file)
except IndyError as E:
MyVars.test_report.set_test_failed()
MyVars.test_report.set_step_status(1, "Create and open pool Ledger", str(E))
print(Colors.FAIL + str(E) + Colors.ENDC)
return None
# 2. Create DIDs ----------------------------------------------------
print(Colors.HEADER + "\n\t2. Create DID's\n" + Colors.ENDC)
try:
(default_trustee_did, default_trustee_verkey, default_trustee_pk) = await signus.create_and_store_my_did(
MyVars.wallet_handle, json.dumps({"seed": seed_default_trustee}))
(steward_node_5_did, steward_node_5_verkey, steward_node_5_pk) = await signus.create_and_store_my_did(
MyVars.wallet_handle, json.dumps({"seed": seed_steward_node5}))
(steward_node_6_did, steward_node_6_verkey, steward_node_6_pk) = await signus.create_and_store_my_did(
MyVars.wallet_handle, json.dumps({"seed": seed_steward_node6}))
(trust_anchor_did, trust_anchor_verkey, trust_anchor_pk) = await signus.create_and_store_my_did(
MyVars.wallet_handle, json.dumps({"seed": seed_trust_anchor}))
(identity_owner_did, identity_owner_verkey, identity_owner_pk) = await signus.create_and_store_my_did(
MyVars.wallet_handle, json.dumps({"seed": seed_identity_owner}))
except IndyError as E:
print(Colors.FAIL + str(E) + Colors.ENDC)
if MyVars.debug:
input(Colors.WARNING + "\n\nDID's created..." + Colors.ENDC)
# 3. Trustee create a steward5
print(Colors.HEADER + "\n\t3. Trustee create a steward5, steward6, trust anchor, identity owner\n" + Colors.ENDC)
parts3={'3': False, '3a': False, '3b': False, '3c': False}
try:
await Common.build_and_send_nym_request(MyVars.pool_handle, MyVars.wallet_handle, default_trustee_did,
steward_node_5_did, steward_node_5_verkey, None, Roles.STEWARD)
parts3['3'] = True
except IndyError as E:
print(Colors.FAIL + str(E) + Colors.ENDC)
return None
# 3a. Trustee create a steward6
try:
await Common.build_and_send_nym_request(MyVars.pool_handle, MyVars.wallet_handle, default_trustee_did,
steward_node_6_did, steward_node_6_verkey, None, Roles.STEWARD)
parts3['3a'] = True
except IndyError as E:
print(Colors.FAIL + str(E) + Colors.ENDC)
return None
# 3b. Trustee create a trustanchor
try:
await Common.build_and_send_nym_request(MyVars.pool_handle, MyVars.wallet_handle, default_trustee_did,
trust_anchor_did, trust_anchor_verkey, None, Roles.TRUST_ANCHOR)
parts3['3b'] = True
except IndyError as E:
print(Colors.FAIL + str(E) + Colors.ENDC)
return None
# 3c. Trustee create a identityowner
try:
await Common.build_and_send_nym_request(MyVars.pool_handle, MyVars.wallet_handle, default_trustee_did,
identity_owner_did, identity_owner_verkey, None, Roles.NONE)
parts3['3c'] = True
except IndyError as E:
print(Colors.FAIL + str(E) + Colors.ENDC)
return None
# If any of the results are are not true, then fail the test
if not all(value is True for value in parts3.values()):
print(Colors.FAIL + "\n\tOne of the commands in step 3 failed" + Colors.ENDC)
else:
# Pass the test
MyVars.test_results['Step3'] = True
await asyncio.sleep(0)
# 4. Verify that a Trustee cannot add a validator node
print(Colors.HEADER + "\n\t4. Verify that a Trustee cannot add a validator node\n" + Colors.ENDC)
node_req4 = await ledger.build_node_request(default_trustee_did, base_58_node_5, json.dumps(data_node5))
try:
await ledger.sign_and_submit_request(MyVars.pool_handle, MyVars.wallet_handle, default_trustee_did, node_req4)
except IndyError as E:
if E.error_code == 304:
MyVars.test_results['Step4'] = True
print(Colors.OKGREEN + ("::PASS::Validated that a Trustee cannot add a validator node\n" + Colors.ENDC))
else:
print(str(E))
# 5. Verify that a Trust Anchor cannot add a validator node
print(Colors.HEADER + "\n\t5. Verify that a Trust Anchor cannot add a validator node\n" + Colors.ENDC)
node_req5 = await ledger.build_node_request(trust_anchor_did, base_58_node_5, json.dumps(data_node5))
try:
await ledger.sign_and_submit_request(MyVars.pool_handle, MyVars.wallet_handle, trust_anchor_did, node_req5)
except IndyError as E:
print("\nError: %s\n" % str(E.error_code))
if E.error_code == 304:
MyVars.test_results['Step5'] = True
print(Colors.OKGREEN + ("::PASS::Validated that a Trust Anchor cannot add a validator node\n" + Colors.ENDC))
else:
print(str(E))
# 6. Verify that a Identity Owner cannot add a validator node
print(Colors.HEADER + "\n\t6. Verify that a Identity Owner cannot add a validator node\n" + Colors.ENDC)
node_req6 = await ledger.build_node_request(identity_owner_did, base_58_node_5, json.dumps(data_node5))
try:
await ledger.sign_and_submit_request(MyVars.pool_handle, MyVars.wallet_handle, identity_owner_did, node_req6)
except IndyError as E:
if E.error_code == 304:
MyVars.test_results['Step6'] = True
print(Colors.OKGREEN + ("::PASS::Validated that a Identity Owner cannot add a validator node\n" + Colors.ENDC))
else:
print(str(E))
# 7. Verify that a Steward5 can add a validator node
print(Colors.HEADER + "\n\t7. Verify that a Steward_node_5 can add a validator node\n" + Colors.ENDC)
node_req7 = await ledger.build_node_request(steward_node_5_did, base_58_node_5, json.dumps(data_node5))
try:
await ledger.sign_and_submit_request(MyVars.pool_handle, MyVars.wallet_handle, steward_node_5_did, node_req7)
MyVars.test_results['Step7'] = True
print(Colors.OKGREEN + ("::PASS::Validated that a Steward_node_5 can add a validator node\n" + Colors.ENDC))
except IndyError as E:
print(str(E))
# 8. Verify that a steward can only add one node by trying to add another one.
print(Colors.HEADER + "\n\t8. Verify that a Steward_node_5 can only add one node by trying to add another one\n" + Colors.ENDC)
node_req8 = await ledger.build_node_request(steward_node_5_did, base_58_node_6, json.dumps(data_node6))
try:
await ledger.sign_and_submit_request(MyVars.pool_handle, MyVars.wallet_handle, steward_node_5_did, node_req8)
except IndyError as E:
if E.error_code == 304:
MyVars.test_results['Step8'] = True
print(Colors.OKGREEN + ("::PASS::Validated that a Steward_node_5 can only add one node by trying to add another one\n" + Colors.ENDC))
else:
print(str(E))
# 9. Verify that a Steward_node_6 can add a validator node.
print(Colors.HEADER + "\n\t9. Verify that a Steward_node_6 can add a validator node\n" + Colors.ENDC)
node_req9 = await ledger.build_node_request(steward_node_6_did, base_58_node_6, json.dumps(data_node6))
try:
await ledger.sign_and_submit_request(MyVars.pool_handle, MyVars.wallet_handle, steward_node_6_did, node_req9)
MyVars.test_results['Step9'] = True
print(Colors.OKGREEN + ("::PASS::Validated that a Steward_node_6 can add a validator node\n" + Colors.ENDC))
except IndyError as E:
print(str(E))
# ==================================================================================================================
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! End of test, run cleanup !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ==================================================================================================================
# 10. Close wallet and pool ------------------------------------------------------------------------------
print(Colors.HEADER + "\n\t==Clean up==\n\t10. Close and delete the wallet and the pool ledger...\n" + Colors.ENDC)
try:
await Common.clean_up_pool_and_wallet(MyVars.pool_name, MyVars.pool_handle, MyVars.wallet_name, MyVars.wallet_handle)
except IndyError as E:
print(Colors.FAIL + str(E) + Colors.ENDC)
await asyncio.sleep(0)
logger.info("Test Scenario 07 -> completed")
def final_results():
""" Show the test results """
if all(value is True for value in MyVars.test_results.values()):
print(Colors.OKGREEN + "\n\tAll the tests passed...\n" + Colors.ENDC)
else:
for test_num, value in MyVars.test_results.items():
if not value:
print('%s: ' % str(test_num) + Colors.FAIL + 'Failed' + Colors.ENDC)
else:
print('%s: ' % str(test_num) + Colors.OKGREEN + 'Passed' + Colors.ENDC)
MyVars.test_report.set_duration(time.time() - MyVars.begin_time)
MyVars.test_report.write_result_to_file()
def test():
MyVars.begin_time = time.time()
# Run the cleanup first...
test_prep()
# Create the loop instance using asyncio
loop = asyncio.get_event_loop()
loop.run_until_complete(test_scenario_07_add_node())
loop.close()
print("\n\nResults\n+" + 40 * "=" + "+")
final_results()
test()
|
nilq/baby-python
|
python
|
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def range_normalization(data, rnge=(0, 1), per_channel=True):
for b in range(data.shape[0]):
if per_channel:
for c in range(data.shape[1]):
mn = data[b, c].min()
mx = data[b, c].max()
data[b, c] -= mn
data[b, c] /= (mx - mn)
data[b, c] *= (rnge[1] - rnge[0])
data[b, c] += rnge[0]
else:
mn = data[b].min()
mx = data[b].max()
data[b] -= mn
data[b] /= (mx - mn)
data[b] *= (rnge[1] - rnge[0])
data[b] += rnge[0]
return data
def zero_mean_unit_variance_normalization(data, per_channel=True, epsilon=1e-7):
for b in range(data.shape[0]):
if per_channel:
for c in range(data.shape[1]):
mean = data[b, c].mean()
std = data[b, c].std() + epsilon
data[b, c] = (data[b, c] - mean) / std
else:
mean = data[b].mean()
std = data[b].std() + epsilon
data[b] = (data[b] - mean) / std
return data
def mean_std_normalization(data, mean, std, per_channel=True):
if isinstance(data, np.ndarray):
data_shape = tuple(list(data.shape))
elif isinstance(data, (list, tuple)):
assert len(data) > 0 and isinstance(data[0], np.ndarray)
data_shape = [len(data)] + list(data[0].shape)
else:
raise TypeError("Data has to be either a numpy array or a list")
if per_channel and isinstance(mean, float) and isinstance(std, float):
mean = [mean] * data_shape[1]
std = [std] * data_shape[1]
elif per_channel and isinstance(mean, (tuple, list, np.ndarray)):
assert len(mean) == data_shape[1]
elif per_channel and isinstance(std, (tuple, list, np.ndarray)):
assert len(std) == data_shape[1]
for b in range(data_shape[0]):
if per_channel:
for c in range(data_shape[1]):
data[b][c] = (data[b][c] - mean[c]) / std[c]
else:
data[b] = (data[b] - mean) / std
return data
def cut_off_outliers(data, percentile_lower=0.2, percentile_upper=99.8, per_channel=False):
for b in range(len(data)):
if not per_channel:
cut_off_lower = np.percentile(data[b], percentile_lower)
cut_off_upper = np.percentile(data[b], percentile_upper)
data[b][data[b] < cut_off_lower] = cut_off_lower
data[b][data[b] > cut_off_upper] = cut_off_upper
else:
for c in range(data.shape[1]):
cut_off_lower = np.percentile(data[b, c], percentile_lower)
cut_off_upper = np.percentile(data[b, c], percentile_upper)
data[b, c][data[b, c] < cut_off_lower] = cut_off_lower
data[b, c][data[b, c] > cut_off_upper] = cut_off_upper
return data
|
nilq/baby-python
|
python
|
from os import read
import sys
import argparse
import numpy as np
import shlex
import subprocess
import numpy as np
from operator import itemgetter
def init_chrom_list():
chrom_list = []
for i in range(1, 23):
chrom_list.append(str(i))
chrom_list.append('X')
chrom_list.append('Y')
return(chrom_list)
def read_file(vcf_path):
with open(vcf_path, 'r') as file:
raw = file.readlines()
raw = [s.strip() for s in raw]
raw = [s.split() for s in raw]
return raw
def parse_bed(path):
bed_raw = read_file(path)
chrom_list = init_chrom_list()
comp_range = [[] for ch in range(24)]
for ch in range(24):
bed_raw_ch = [s for s in bed_raw if s[0] == 'chr' + chrom_list[ch]]
for s in bed_raw_ch:
comp_range[ch].append(range(int(s[1]), int(s[2]) + 1))
return comp_range
def parse_vcf(vcf_path, bed_path = ''):
if bed_path != '':
comp_range = parse_bed(bed_path)
chrom_list = init_chrom_list()
sv_set = read_file(vcf_path)
sv_set = [s for s in sv_set if s[0][0] != '#']
info = []
for s in sv_set:
if s[0][3:] not in chrom_list:
continue
info.append(dict())
t = len(info) - 1
info[t]['chr'] = s[0]
info[t]['pos'] = int(s[1])
info[t]['id'] = s[2] + s[0] + s[1]
info[t]['hp'] = s[-1][:3]
if info[t]['hp'][0] == '.':
info[t]['hp'] = '0' + info[t]['hp'][1:]
if info[t]['hp'][2] == '.':
info[t]['hp'] = info[t]['hp'][:2] + '0'
if info[t]['hp'][1] == '/':
info[t]['hp'] = info[t]['hp'][0] + '|' + info[t]['hp'][2]
info[t]['ps'] = s[0]
else:
info[t]['ps'] = s[0] + '_' + s[-1][4:] # baseinfo ps will be only chrN_
sv_info = s[7].split(';')
if 'SVLEN' in s[7]:
info[t]['len'] = abs(int([s for s in sv_info if 'SVLEN' in s][0][6:]))
info[t]['type'] = s[4][1:-1] if s[4] in ['<INS>', '<DEL>', '<DUP:TANDEM>', '<DUP:INT>'] else [s for s in sv_info if 'SVTYPE' in s][0][7:]
if 'DUP' in info[t]['type']:
info[t]['type'] = 'INS'
else:
l = len(s[3]) - len(s[4])
if l > 0:
info[t]['len'] = l
info[t]['type'] = 'DEL'
if l< 0:
info[t]['len'] = -l
info[t]['type'] = 'INS'
if bed_path == '':
flag = True
else:
flag = False
for bed_rg in comp_range[chrom_list.index(s[0][3:])]:
if int(s[1]) in bed_rg:
flag = True
break
if not flag or info[t]['len'] < 50 or info[t]['hp'] == '0|0':
info.pop()
return info
def evaluation(baseinfo, callinfo, threshold_tp_range, ratio):
chrom_list = init_chrom_list()
call_tp, call_tp_gt, call_tp_hp, base_tp, base_tp_gt, base_tp_hp = set(), set(), set(), set(), set(), set()
avg_sv_num = len(callinfo) / len(set([s['ps'] for s in callinfo]))
for ch in range(24):
base_ch_type = dict()
for svtype in ['INS', 'DEL']:
base_ch_type[svtype] = sorted([s for s in baseinfo if s['chr'] == 'chr' + chrom_list[ch] and s['type'] == svtype], key = itemgetter('pos'))
call_ch = [s for s in callinfo if s['chr'] == 'chr' + chrom_list[ch]]
ps_set = set([s['ps'] for s in call_ch])
for ps in ps_set:
call_ch_ps = [s for s in call_ch if s['ps'] == ps]
tmp1_call_tp_hp, tmp1_base_tp_hp, tmp2_call_tp_hp, tmp2_base_tp_hp = set(), set(), set(), set()
for svtype in ['INS', 'DEL']:
call = [s for s in call_ch_ps if s['type'] == svtype]
base = base_ch_type[svtype]
if not call:
continue
idx_list = np.searchsorted([s['pos'] for s in base], [s['pos'] for s in call])
for call_idx in range(len(idx_list)):
if idx_list[call_idx] == len(base):
base_idx = idx_list[call_idx] - 1
elif idx_list[call_idx] > 0 and abs(call[call_idx]['pos'] - base[idx_list[call_idx]]['pos']) > \
abs(call[call_idx]['pos'] - base[idx_list[call_idx] - 1]['pos']):
base_idx = idx_list[call_idx] - 1
else:
base_idx = idx_list[call_idx]
if abs(call[call_idx]['pos'] - base[base_idx]['pos']) <= threshold_tp_range and \
(call[call_idx]['len'] / base[base_idx]['len'] >= ratio or base[base_idx]['len'] / call[call_idx]['len'] >= ratio):
call_tp.add(call[call_idx]['id'])
base_tp.add(base[base_idx]['id'])
if call[call_idx]['hp'] in ['1|0', '0|1'] and base[base_idx]['hp'] in ['1|0', '0|1'] or \
call[call_idx]['hp'] == base[base_idx]['hp'] == '1|1':
call_tp_gt.add(call[call_idx]['id'])
base_tp_gt.add(base[base_idx]['id'])
if call[call_idx]['hp'] == base[base_idx]['hp']:
tmp1_call_tp_hp.add(call[call_idx]['id'])
tmp1_base_tp_hp.add(base[base_idx]['id'])
if call[call_idx]['hp'] == base[base_idx]['hp'] == '1|1' or \
call[call_idx]['hp'] == '0|1' and base[base_idx]['hp'] == '1|0' or \
call[call_idx]['hp'] == '1|0' and base[base_idx]['hp'] == '0|1':
tmp2_call_tp_hp.add(call[call_idx]['id'])
tmp2_base_tp_hp.add(base[base_idx]['id'])
if len(tmp1_call_tp_hp) + len(tmp1_base_tp_hp) > len(tmp2_call_tp_hp) + len(tmp2_base_tp_hp):
call_tp_hp = call_tp_hp.union(tmp1_call_tp_hp)
base_tp_hp = base_tp_hp.union(tmp1_base_tp_hp)
else:
call_tp_hp = call_tp_hp.union(tmp2_call_tp_hp)
base_tp_hp = base_tp_hp.union(tmp2_base_tp_hp)
p = len(call_tp) / len(callinfo)
r = len(base_tp) / len(baseinfo)
f1 = 2 * p * r / (p + r)
p_gt = len(call_tp_gt) / len(callinfo)
r_gt = len(base_tp_gt) / len(baseinfo)
f1_gt = 2 * p_gt * r_gt / (p_gt + r_gt)
p_hp = len(call_tp_hp) / len(callinfo)
r_hp = len(base_tp_hp) / len(baseinfo)
f1_hp = 2 * p_hp * r_hp / (p_hp + r_hp)
return avg_sv_num, p, r, f1, p_gt, r_gt, f1_gt, p_hp, r_hp, f1_hp
def parse_args(argv):
parser = argparse.ArgumentParser(description = 'evaluate SV calling, genotyping and phasing performance')
parser.add_argument('callset', type = str,
help = 'phased SV callset in .vcf format')
parser.add_argument('truthset', type = str,
help = 'phased SV truthset in .vcf format')
parser.add_argument('-r', '--refdist', type = int, default = 1000,
help = 'maximum distance comparison calls must be within from base call')
parser.add_argument('-p', '--pctsim', type = float, default = 0,
help = 'edit distance ratio between the REF/ALT haplotype sequences of base and comparison call')
parser.add_argument('-b', '--bed_file', type = str,
help = 'optional .bed file to confine benchmark regions')
parser.add_argument('--skip_phasing', action = 'store_true',
help = 'only benchmark on SV calling and genotyping [%(default)s]')
args = parser.parse_args()
return args
def main(argv):
args = parse_args(argv)
if not args.bed_file:
avg_sv_num, p, r, f1, p_gt, r_gt, f1_gt, p_hp, r_hp, f1_hp = evaluation(parse_vcf(args.truthset), parse_vcf(args.callset), args.refdist, args.pctsim)
else:
avg_sv_num, p, r, f1, p_gt, r_gt, f1_gt, p_hp, r_hp, f1_hp = evaluation(parse_vcf(args.truthset, args.bed_file), parse_vcf(args.callset, args.bed_file), args.refdist, args.pctsim)
if not args.skip_phasing:
print('Average SV number per phase set is', avg_sv_num)
print('The precision, recall and F1 score of SV calling are', p, r, f1)
print('The precision, recall and F1 score of SV genotyping are', p_gt, r_gt, f1_gt)
if not args.skip_phasing:
print('The precision, recall and F1 score of SV phasing are', p_hp, r_hp, f1_hp)
if __name__ == '__main__':
main(sys.argv[1:])
|
nilq/baby-python
|
python
|
from itertools import takewhile
import os
import setuptools
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
long_description = str.join('', takewhile(lambda l: not l.startswith('Installation'), f.readlines()[15:]))
setuptools.setup(
name = 'OverloadingFixed',
version = '1.11',
author="L. Pham-Trong",
author_email="spam@lucasanss.xyz",
description="Function overloading for Python 3",
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://github.com/toto112358/overloading.py",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.0',
)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2014 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import os
import unittest
from splinter import Browser
from .base import BaseBrowserTests
from .fake_webapp import app, EXAMPLE_APP
from .is_element_present_nojs import IsElementPresentNoJSTest
class FlaskClientDriverTest(
BaseBrowserTests, IsElementPresentNoJSTest, unittest.TestCase
):
@classmethod
def setUpClass(cls):
cls.browser = Browser("flask", app=app, wait_time=0.1)
def setUp(self):
self.browser.visit(EXAMPLE_APP)
@classmethod
def tearDownClass(self):
self.browser.quit()
def test_should_support_with_statement(self):
with Browser("flask", app=app) as internet:
self.assertIsNotNone(internet)
def test_attach_file(self):
"should provide a way to change file field value"
file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "mockfile.txt"
)
self.browser.attach_file("file", file_path)
self.browser.find_by_name("upload").click()
html = self.browser.html
self.assertIn("text/plain", html)
self.assertIn(open(file_path, "rb").read().decode("utf-8"), html)
def test_serialize_select_mutiple(self):
"should serialize a select with multiple values into a list"
self.browser.select("pets", ["cat", "dog"])
form = self.browser.find_by_name("send")._get_parent_form()
data = self.browser.serialize(form)
self.assertListEqual(data["pets"], ["cat", "dog"])
def test_forward_to_none_page(self):
"should not fail when trying to forward to none"
browser = Browser("flask", app=app)
browser.visit(EXAMPLE_APP)
browser.forward()
self.assertEqual(EXAMPLE_APP, browser.url)
browser.quit()
def test_can_clear_password_field_content(self):
"flask should not be able to clear"
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("password").first.clear()
def test_can_clear_tel_field_content(self):
"flask should not be able to clear"
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("telephone").first.clear()
def test_can_clear_text_field_content(self):
"flask should not be able to clear"
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("query").first.clear()
def test_cant_switch_to_frame(self):
"flask should not be able to switch to frames"
with self.assertRaises(NotImplementedError) as cm:
self.browser.get_iframe("frame_123")
self.fail()
e = cm.exception
self.assertEqual("flask doesn't support frames.", e.args[0])
def test_simple_type(self):
"""
flask won't support type method
because it doesn't interact with JavaScript
"""
with self.assertRaises(NotImplementedError):
self.browser.type("query", "with type method")
def test_simple_type_on_element(self):
"""
flask won't support type method
because it doesn't interact with JavaScript
"""
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("query").type("with type method")
def test_slowly_typing(self):
"""
flask won't support type method
because it doesn't interact with JavaScript
"""
with self.assertRaises(NotImplementedError):
self.browser.type("query", "with type method", slowly=True)
def test_slowly_typing_on_element(self):
"""
flask won't support type method
on element because it doesn't interac with JavaScript
"""
with self.assertRaises(NotImplementedError):
query = self.browser.find_by_name("query")
query.type("with type method", slowly=True)
def test_cant_mouseover(self):
"flask should not be able to put the mouse over the element"
with self.assertRaises(NotImplementedError):
self.browser.find_by_css("#visible").mouse_over()
def test_cant_mouseout(self):
"flask should not be able to mouse out of an element"
with self.assertRaises(NotImplementedError):
self.browser.find_by_css("#visible").mouse_out()
def test_links_with_nested_tags_xpath(self):
links = self.browser.find_by_xpath('//a/span[text()="first bar"]/..')
self.assertEqual(
len(links),
1,
'Found not exactly one link with a span with text "BAR ONE". %s'
% (map(lambda item: item.outer_html, links)),
)
def test_finding_all_links_by_non_ascii_text(self):
"should find links by non ascii text"
non_ascii_encodings = {
"pangram_pl": u"Jeżu klątw, spłódź Finom część gry hańb!",
"pangram_ja": u"天 地 星 空",
"pangram_ru": u"В чащах юга жил бы цитрус? Да, но фальшивый экземпляр!",
"pangram_eo": u"Laŭ Ludoviko Zamenhof bongustas freŝa ĉeĥa manĝaĵo kun spicoj.",
}
for key, text in non_ascii_encodings.items():
link = self.browser.find_link_by_text(text)
self.assertEqual(key, link["id"])
class FlaskClientDriverTestWithCustomHeaders(unittest.TestCase):
@classmethod
def setUpClass(cls):
custom_headers = {
"X-Splinter-Customheaders-1": "Hello",
"X-Splinter-Customheaders-2": "Bye",
}
cls.browser = Browser("flask", app=app, custom_headers=custom_headers)
def test_create_a_flask_client_with_custom_headers(self):
self.browser.visit(EXAMPLE_APP + "headers")
self.assertTrue(
self.browser.is_text_present("X-Splinter-Customheaders-1: Hello")
)
self.assertTrue(self.browser.is_text_present("X-Splinter-Customheaders-2: Bye"))
@classmethod
def tearDownClass(cls):
cls.browser.quit()
|
nilq/baby-python
|
python
|
import pandas as pd
from ggplot import *
import sys
def data_clean():
dat = pd.read_csv(sys.argv[1], header=False)
dat1 = dat
dat = dat.drop(['q1','lambda1','mu1'], axis=1)
dat1 = dat1.drop(['q0','lambda0','mu0'], axis=1)
dat['Parity'] = 'Viviparity'
dat1['Parity'] = 'Oviparity'
dat.columns = ['Lambda','Mu','Q','Likelihood','Parity']
dat1.columns = ['Lambda','Mu','Q','Likelihood','Parity']
joined_mat = dat.append(dat1)
return(joined_mat)
def plot_params():
joined_mat = data_clean()
mypb_mle_dict_o = {'Lambda':3.126341e-02, 'Mu':8.043702e-06, 'Q':6.693207e-04, 'Likelihood':-18088.72 }
mypb_mle_dict_v = {'Lambda':3.554580e-02, 'Mu':9.342949e-07, 'Q':2.932465e-03, 'Likelihood':-18088.72}
pb_mle_dict_o = {'Lambda':0.0628, 'Mu':0.0000, 'Q':0.0007, 'Likelihood':-16735.45 }
pb_mle_dict_v = {'Lambda':0.0848, 'Mu':0.0270, 'Q':0.0059, 'Likelihood':-16735.45}
columns =list(joined_mat.columns)
#We don't want to try and plot parity mose. Pop it on out.
columns.pop()
for column in columns:
joined_mat[column] = joined_mat[column].apply(float)
plot = ggplot(joined_mat, aes(x=column, fill='Parity')) + geom_density(alpha=.25) + geom_vline(x=pb_mle_dict_o[column], color='salmon', size = 5) + geom_vline(x=pb_mle_dict_v[column], color='teal', size = 5)
ggsave(plot, column, 'png')
plot_params()
|
nilq/baby-python
|
python
|
#python program to reverse the array
array = [10,20,30,40,50];
print("Array in reverse order: ");
#Loop through the array in reverse order
for i in range(len(array)-1, -1, -1):
print(array[i])
|
nilq/baby-python
|
python
|
# pylint: disable=missing-function-docstring, missing-module-docstring/
def ones():
print(22)
from numpy import ones
g = ones(6)
print(g)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import sys, os
import pprint
""" Hardlink UCI History oral histories files for loading into Nuxeo """
raw_dir = u"/apps/content/raw_files/UCI/UCIHistory/OralHistories/ContentFiles/"
new_path_dir = u"/apps/content/new_path/UCI/UCIHistory/OralHistories/"
pp = pprint.PrettyPrinter()
def main(argv=None):
obj_nums = [dirs for root, dirs, files in os.walk(raw_dir)][0]
for obj_num in obj_nums:
obj_dir = os.path.join(raw_dir, obj_num)
pp.pprint(obj_dir)
files = [files for root, dirs, files in os.walk(obj_dir)][0]
for file in files:
if file.endswith('.mp3') or file.endswith('.pdf'):
raw_path = os.path.join(obj_dir, file)
new_path = os.path.join(new_path_dir, obj_num, file)
print "link", raw_path, new_path, '\n'
link_file(raw_path, new_path)
def link_file(fullpath_from, fullpath_to):
print "link {} {}".format(fullpath_from, fullpath_to)
_mkdir(os.path.dirname(fullpath_to))
os.link(fullpath_from, fullpath_to)
# http://code.activestate.com/recipes/82465-a-friendly-mkdir/
def _mkdir(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired " \
"dir, '%s', already exists." % newdir)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
_mkdir(head)
#print "_mkdir %s" % repr(newdir)
if tail:
os.mkdir(newdir)
if __name__ == "__main__":
sys.exit(main())
|
nilq/baby-python
|
python
|
import argparse
import yaml
import logging
import numpy as np
import glob
from astropy.coordinates import SkyCoord, Angle
from astropy import units as u
from astropy.convolution import Tophat2DKernel, Gaussian2DKernel
from os import path
from copy import deepcopy
from fermiAnalysis.batchfarm import utils
from simCRpropa.cascmaps import CascMap, stack_results_lso
def pl2_to_pl(src_dict, scale=1000.):
"""Convert integral flux of PL2 to prefactor of PL"""
index = src_dict['spectral_pars']['Index']['value']
emin = src_dict['spectral_pars']['LowerLimit']['value']
emax = src_dict['spectral_pars']['UpperLimit']['value']
f = src_dict['spectral_pars']['Integral']['value']
prefactor = f * (1. - index)
prefactor /= (emax ** (1. - index) - emin ** (1. - index))
prefactor *= scale ** -index
return prefactor
def convert(data):
if isinstance(data, bytes): return data.decode('ascii')
if isinstance(data, dict): return dict(map(convert, data.items()))
if isinstance(data, tuple): return map(convert, data)
return data
class GenerateFitsTemplates(object):
def __init__(self, **kwargs):
"""
Initialize the class
"""
self._config = deepcopy(kwargs)
self.__dict__.update(self._config)
self.emcasc = self.Simulation['emcasc']
for i, k in enumerate(['B', 'maxTurbScale']):
if isinstance(self.Bfield[k], list):
x = deepcopy(self.Bfield[k])
self.Bfield[k] = x[0]
elif isinstance(self.Bfield[k], float):
x = [self.Bfield[k]]
else:
raise ValueError("{0:s} type not understood: {1}".format(
type(k, self.Bfield[k])))
if not i:
self._bList = x
else:
self._turbScaleList = x
for i, k in enumerate(['th_jet', 'z']):
if isinstance(self.Source[k], list):
x = deepcopy(self.Source[k])
self.Source[k] = x[0]
elif isinstance(self.Source[k], float):
x = [self.Source[k]]
else:
raise ValueError("{0:s} type not understood: {1}".format(
type(k, self.Source[k])))
if not i:
self._th_jetList= x
else:
self._zList = x
def setOutput(self, idB=0, idL=0, it=0, iz=0):
"""Set output file and directory"""
self.OutName = 'combined.hdf5'
self.Source['th_jet'] = self._th_jetList[it]
self.Source['z'] = self._zList[iz]
# append options to file path
self.FileIO['outdir'] = path.join(self.FileIO['basedir'],
'z{0[z]:.3f}'.format(self.Source))
if self.Source.get('source_morphology', 'cone') == 'cone':
self.FileIO['outdir'] = path.join(self.FileIO['outdir'],
'th_jet{0[th_jet]}/'.format(self.Source))
elif self.Source.get('source_morphology', 'cone') == 'iso':
self.FileIO['outdir'] = path.join(self.FileIO['outdir'],
'iso/')
elif self.Source.get('source_morphology', 'cone') == 'dir':
self.FileIO['outdir'] = path.join(self.FileIO['outdir'],
'dir/')
else:
raise ValueError("Chosen source morphology not supported.")
self.FileIO['outdir'] = path.join(self.FileIO['outdir'],
'th_obs{0[obsAngle]}/'.format(self.Observer))
self.FileIO['outdir'] = path.join(self.FileIO['outdir'],
'spec{0[useSpectrum]:n}/'.format(self.Source))
self.Bfield['B'] = self._bList[idB]
self.Bfield['maxTurbScale'] = self._turbScaleList[idL]
if self.Bfield['type'] == 'turbulence':
self.FileIO['outdir'] = path.join(self.FileIO['outdir'],
'Bturb{0[B]:.2e}/q{0[turbIndex]:.2f}/scale{0[maxTurbScale]:.2f}/'.format(self.Bfield))
elif self.Bfield['type'] =='cell':
self.FileIO['outdir'] = path.join(self.FileIO['outdir'],
'Bcell{0[B]:.2e}/scale{0[maxTurbScale]:.2f}/'.format(self.Bfield))
else:
raise ValueError("Bfield type must be either 'cell' or 'turbulence' not {0[type]}".format(self.Bfield))
self.outputfile = str(path.join(self.FileIO['outdir'],self.OutName))
logging.info("outdir: {0[outdir]:s}".format(self.FileIO))
logging.info("outfile: {0:s}".format(self.outputfile))
def generate_fits_templates(self,
fermipy_files,
select_z=None,
tmax=1e7,
theta_obs=0.,
#cov=2.,
cov=0.75,
#index_step=0.5,
index_step=0.075,
Ecut_TeV=np.arange(1.,13., 2.),
rotation=0.,
ts_thr=25.,
redshifts=None,
use_closest_z=True,
dry=False,
make_plots=False,
n_ebins_add=3,
select_src=None,
select_bfield=None,
overwrite=False):
"""
Generate IGMF fits templates for sources analyzed with fermipy
Parameters
----------
fermipy_files: list
list of npy files that are the result of an ROI fermipy fit
select_z: float or None
if not none, only generate templates for this redshift
select_bfield: float or None
if not none, only generate templates for this magnetic field
theta_obs: float
Angle between jet axis and line of sight in degrees
tmax: float
Maximum allowed delay time of cascade photons in years.
cov: float
determines the range of the spectral index loop
through index + cov * error
index_step: float
increment for spectral index loop,
cov_loop = np.arange(-cov, cov + index_step, index_step)
Ecut_TeV: array-like
assumed exponential cutoff energies in TeV
rotation: float
Angle in degrees by which cascade template is rotated
ts_thr: float
Only consider sources for which fermipy analysis gave ts value larger than this
value
use_closest_z: bool
if True, use template with redshift closest to source redshift,
otherwise, redshift has to match exactly
redshifts: array-like
list with source redshifts. If not given, it's extracted from the fermipy
files. This only works if the sources were fitted with EBL absorption
dry: bool
only perform template generation if this is False.
Useful for debugging.
overwrite: bool
Overwrite existing templates.
n_ebins_add: int
number of energy bins that will be added at low and high energy end
"""
if make_plots:
from myplot.spectrum import SEDPlotter
import matplotlib.pyplot as plt
# the assumed intrinsic spectrum:
# power law with exponential cut off
inj_spec = lambda E, **p : p['Prefactor'] * (E / p['Scale']) ** (-p['Index']) * \
np.exp(-E / p['Cutoff'])
# the steps for the index: Index + (Index_err) * cov_scale
cov_scale = np.arange(-cov, cov + index_step, index_step)
logging.info("Ecut_TeV: {0}".format(Ecut_TeV))
logging.info("cov_scale: {0}".format(cov_scale))
logging.info("{0:n} x {1:n} = {2:n} files will be generated for each source and B field config".format(
Ecut_TeV.shape[0], cov_scale.shape[0], Ecut_TeV.shape[0] * cov_scale.shape[0]))
for ifile, f in enumerate(fermipy_files):
if not path.exists(f):
logging.warning("{0:s} not found".format(f))
continue
d = np.load(f, allow_pickle=True, encoding="latin1").flat[0]
src = d['config']['selection']['target']
src_dict = convert(d['sources'])[src]
sed_file = f.rstrip(".npy") + "_" + src.lower().replace(' ','_') + "_sed.npy"
if path.exists(sed_file):
sed = np.load(sed_file, allow_pickle=True, encoding='latin1').flat[0]
else:
sed = None
logging.info(" ===== {0:s} = {1:s} ===== ".format(src, src_dict['assoc']['ASSOC1']))
if select_src is not None:
if not src == select_src:
continue
if src_dict['ts'] < ts_thr:
logging.warning("Source TS = {0:.2f} < thr."
" No templates will be generated".format(src_dict['ts']))
continue
c = SkyCoord(ra=src_dict['ra'], dec=src_dict['dec'], unit='deg', frame='icrs')
log_energy_edges_eV = d['roi']['log_energies'] + 6.
# prepend and append energy bins
d_log_e = np.diff(log_energy_edges_eV)[0]
log_e_low = log_energy_edges_eV[0] - np.arange(1, n_ebins_add + 1, 1)[::-1] * d_log_e
log_e_high = log_energy_edges_eV[-1] + np.arange(1, n_ebins_add + 1, 1) * d_log_e
energy_edges_eV = 10.**np.concatenate([log_e_low, log_energy_edges_eV, log_e_high])
width = d['config']['binning']['roiwidth']
binsz = d['config']['binning']['binsz']
# get source redshift either from best fit including EBL
# or from a user-provided list
if redshifts is None:
try:
z = src_dict['spectral_pars']['redshift']['value']
except KeyError:
logging.warning("redshift not in spectral pars dict and no redshift information given!")
raise
else:
z = redshifts[ifile]
# get the index of the file corresponding to this redshift
if use_closest_z:
iz = np.argmin(np.abs(z - np.array(self._zList)))
if np.abs(z - self._zList[iz]) > 0.005:
logging.info("Source redshift = {0:.3f}, nearest template redshift {1:.3f},"
" difference larger than 0.005, continuing".format(z, self._zList[iz]))
continue
else:
iz = self._zList.index(np.round(z, 3))
if select_z is not None and not self._zList[iz] == select_z:
continue
logging.info("Source redshift = {0:.3f}, using template redshift {1:.3f}".format(z, self._zList[iz]))
logging.info("Starting loop over B field parameters")
for ib, b in enumerate(self._bList):
if select_bfield is not None:
if not select_bfield == b:
continue
for il, l in enumerate(self._turbScaleList):
for it, t in enumerate(self._th_jetList):
self.setOutput(idB=ib, idL=il, it=it, iz=iz)
if not path.exists(self.outputfile):
logging.warning("{0:s} not found. Simulations not finished yet?".format(self.outputfile))
continue
# generate new output file for chosen theta obs angle
fname = self.outputfile.replace('.hdf5', '_th_obs{0:.1f}.hdf5'.format(theta_obs))
if not path.isfile(fname) or overwrite:
data, config = stack_results_lso(infile=self.outputfile,
outfile=fname,
theta_obs=theta_obs,
use_cosmo=False,
)
# set up cascade map
if not dry:
casc = CascMap.gen_from_hd5f(fname,
skycoord=c,
width=width,
binsz=binsz,
ebins=energy_edges_eV * u.eV,
id_detection=22,
smooth_kwargs={'kernel': Gaussian2DKernel, 'threshold': 1, 'steps': 50}
)
# set the maximum delay time
logging.info("Applying time cut {0:.1e}".format(tmax))
if not dry:
casc.tmax = tmax * u.yr
# loop through spectral index and cut off energy,
# set the weights, and export fits file
if "PowerLaw2" in src_dict['SpectrumType']:
scale = 1e9 * u.eV
prefactor = pl2_to_pl(src_dict, scale.to('MeV').value) * u.Unit("MeV-1 s-1 cm-2")
else:
prefactor = src_dict['spectral_pars']['Prefactor'] * u.Unit("MeV-1 s-1 cm-2")
scale = src_dict['spectral_pars']['Scale'] * u.MeV.to('eV')
pars = {'Prefactor': prefactor, 'Scale': scale}
logging.info("Starting loop over spectral parameters")
# apply a rotation
if not dry:
casc.rotation(Angle(rotation * u.deg))
#err = 0.1 * src_dict['spectral_pars']['Index']['value'] if np.isnan(src_dict['spectral_pars']['Index']['error']) \
#else src_dict['spectral_pars']['Index']['error']
for i, ecut in enumerate(Ecut_TeV):
for j, cs in enumerate(cov_scale):
#pars['Index'] = src_dict['spectral_pars']['Index']['value'] + cs * err
pars['Index'] = src_dict['spectral_pars']['Index']['value'] + cs
pars['Cutoff'] = (ecut * u.TeV).to('eV')
if np.any(np.isnan([v.value if isinstance(v, u.Quantity) else v for v in pars.values()])):
raise ValueError("Parameters contain nans!")
suffix = "em_casc_{5:s}_tmax_{0:.0e}_theta_obs_{1:s}_rotation_{2:s}" \
"_index_{3:s}_Ecut_{4:s}".format(tmax, str(theta_obs).replace('.', 'p'),
str(rotation).replace('.', 'p'),
"{0:.3f}".format(pars["Index"]).replace('.', 'p'),
str(ecut).replace('.', 'p'),
src.lower().replace(' ',''))
filename = path.join(path.dirname(self.outputfile), suffix + '.fits')
if path.exists(filename) and not overwrite:
logging.info("{0:s} exists and overwrite is set to False. Continuing".format(filename))
continue
# set the weights
if not dry:
casc.apply_spectral_weights(lambda E: inj_spec(E, **pars), smooth=True)
# plot the skymap and spectrum
# for one set of assumed spectral parameters
if make_plots and j == len(cov_scale) / 2 and i == len(Ecut_TeV) - 1:
#if make_plots:
# skymap
fig, ax, cax = casc.casc_obs.sum_over_axes(['energy']).plot(
add_cbar=True, stretch='log', cmap='cubehelix_r')
ax.tick_params(direction='out')
fig.savefig(path.join(path.dirname(self.outputfile), suffix + '_skymap.png'), dpi=150)
plt.close("all")
# spectrum
fig, ax = casc.plot_spectrum(energy_unit='MeV', E2dNdE_unit='MeV cm-2 s-1')
cen = casc.casc.geom.get_axis_by_name('energy_true').center
ax.loglog(cen.to('MeV'),
(inj_spec(cen, **pars).to(casc.casc_obs.quantity.unit * u.sr) * cen ** 2. / (1. + z)).to('MeV cm-2 s-1'),
label=r'injected $\gamma$-ray spectrum'
)
vy = ax.get_ylim()
vx = ax.get_xlim()
if sed is not None:
SEDPlotter.plot_sed(sed, ax=ax)
vy2 = ax.get_ylim()
vx2 = ax.get_xlim()
ax.set_ylim(vy[1] / 1e4, np.max([vy[1], vy2[1]]))
ax.set_xlim(vx[0], vx[1])
ax.legend(loc=1, fontsize='xx-small')
ax.grid()
fig.savefig(path.join(path.dirname(self.outputfile), suffix + '_spec.png'), dpi=150)
plt.close("all")
# export to fits file
logging.info("writing fits template to {0:s}".format(filename))
extra_dict = {"spectral_parameters" :{k: v if isinstance(v, float) else v.to_string() for k, v in pars.items()},
"sim_config": casc.config if not dry else {}
}
if not dry:
casc.export_casc_obs_to_fits(filename, extra_header_dict=extra_dict)
if not dry:
del casc
def plot_templates(self,
fermipy_files,
select_z=None,
tmax=1e7,
theta_obs=0.,
select_b_field=None,
cov=2.,
index_step=0.5,
Ecut_TeV=np.arange(1.,17., 2.),
rotation=0.,
ts_thr=25.,
redshifts=None,
use_closest_z=True,
n_ebins_add=3,
cmap="cividis",
select_src=None,
overwrite=False):
"""
Plot the IGMF templates
Parameters
----------
fermipy_files: list
list of npy files that are the result of an ROI fermipy fit
{options}
select_z: float or None
if not none, only generate templates for this redshift
theta_obs: float
Angle between jet axis and line of sight in degrees
tmax: float
Maximum allowed delay time of cascade photons in years.
cov: float
determines the range of the spectral index loop
through index + cov * error
index_step: float
increment for spectral index loop,
cov_loop = np.arange(-cov, cov + index_step, index_step)
Ecut_TeV: array-like
assumed exponential cutoff energies in TeV
rotation: float
Angle in degrees by which cascade template is rotated
ts_thr: float
Only consider sources for which fermipy analysis gave ts value larger than this
value
use_closest_z: bool
if True, use template with redshift closest to source redshift,
otherwise, redshift has to match exactly
redshifts: array-like
list with source redshifts. If not given, it's extracted from the fermipy
files. This only works if the sources were fitted with EBL absorption
dry: bool
only perform template generation if this is False.
Useful for debugging.
overwrite: bool
Overwrite existing templates.
n_ebins_add: int
number of energy bins that will be added at low and high energy end
"""
from myplot.spectrum import SEDPlotter
import matplotlib.pyplot as plt
# the assumed intrinsic spectrum:
# power law with exponential cut off
inj_spec = lambda E, **p : p['Prefactor'] * (E / p['Scale']) ** (-p['Index']) * \
np.exp(-E / p['Cutoff'])
# the steps for the index: Index + (Index_err) * cov_scale
cov_scale = np.arange(-cov, cov + index_step, index_step)
cp = plt.cm.get_cmap(cmap)
for ifile, f in enumerate(fermipy_files):
if not path.exists(f):
logging.warning("{0:s} not found".format(f))
continue
d = np.load(f, allow_pickle=True, encoding="latin1").flat[0]
src = d['config']['selection']['target']
src_dict = convert(d['sources'])[src]
sed_file = f.rstrip(".npy") + "_" + src.lower().replace(' ','_') + "_sed.npy"
if path.exists(sed_file):
sed = np.load(sed_file, allow_pickle=True, encoding='latin1').flat[0]
else:
sed = None
assoc = src_dict['assoc']['ASSOC1']
logging.info(" ===== {0:s} = {1:s} ===== ".format(src, assoc))
if select_src is not None:
if not src == select_src:
continue
if src_dict['ts'] < ts_thr:
logging.warning("Source TS = {0:.2f} < thr."
" No templates will be generated".format(src_dict['ts']))
continue
c = SkyCoord(ra=src_dict['ra'], dec=src_dict['dec'], unit='deg', frame='icrs')
log_energy_edges_eV = d['roi']['log_energies'] + 6.
# prepend and append energy bins
d_log_e = np.diff(log_energy_edges_eV)[0]
log_e_low = log_energy_edges_eV[0] - np.arange(1, n_ebins_add + 1, 1)[::-1] * d_log_e
log_e_high = log_energy_edges_eV[-1] + np.arange(1, n_ebins_add + 1, 1) * d_log_e
energy_edges_eV = 10.**np.concatenate([log_e_low, log_energy_edges_eV, log_e_high])
width = d['config']['binning']['roiwidth']
binsz = d['config']['binning']['binsz']
# get source redshift either from best fit including EBL
# or from a user-provided list
if redshifts is None:
try:
z = src_dict['spectral_pars']['redshift']['value']
except KeyError:
logging.warning("redshift not in spectral pars dict and no redshift information given!")
raise
else:
z = redshifts[ifile]
# get the index of the file corresponding to this redshift
if use_closest_z:
iz = np.argmin(np.abs(z - np.array(self._zList)))
if np.abs(z - self._zList[iz]) > 0.005:
logging.info("Source redshift = {0:.3f}, nearest template redshift {1:.3f},"
" difference larger than 0.005, continuing".format(z, self._zList[iz]))
continue
else:
iz = self._zList.index(np.round(z, 3))
if select_z is not None and not self._zList[iz] == select_z:
continue
logging.info("Source redshift = {0:.3f}, using template redshift {1:.3f}".format(z, self._zList[iz]))
logging.info("Starting loop over B field parameters")
iplot = 0
nplots = len(cov_scale) + len(Ecut_TeV)
if select_b_field is None:
nplots += len(self._bList)
for ib, b in enumerate(self._bList):
if select_b_field is not None and not b == select_b_field:
continue
for il, l in enumerate(self._turbScaleList):
for it, t in enumerate(self._th_jetList):
self.setOutput(idB=ib, idL=il, it=it, iz=iz)
if not path.exists(self.outputfile):
logging.warning("{0:s} not found. Simulations not finished yet?".format(self.outputfile))
continue
# generate new output file for chosen theta obs angle
fname = self.outputfile.replace('.hdf5', '_th_obs{0:.1f}.hdf5'.format(theta_obs))
if not path.isfile(fname) or overwrite:
data, config = stack_results_lso(infile=self.outputfile,
outfile=fname,
theta_obs=theta_obs,
use_cosmo=False,
)
# set up cascade map
casc = CascMap.gen_from_hd5f(fname,
skycoord=c,
width=width,
binsz=binsz,
ebins=energy_edges_eV * u.eV,
id_detection=22,
smooth_kwargs={'kernel': Gaussian2DKernel, 'threshold': 4, 'steps': 50}
)
# set the maximum delay time
logging.info("Applying time cut {0:.1e}".format(tmax))
casc.tmax = tmax * u.yr
# loop through spectral index and cut off energy,
# set the weights, and export fits file
if "PowerLaw2" in src_dict['SpectrumType']:
scale = 1e9 * u.eV
prefactor = pl2_to_pl(src_dict, scale.to('MeV').value) * u.Unit("MeV-1 s-1 cm-2")
else:
prefactor = src_dict['spectral_pars']['Prefactor'] * u.Unit("MeV-1 s-1 cm-2")
scale = src_dict['spectral_pars']['Scale'] * u.MeV.to('eV')
pars = {'Prefactor': prefactor, 'Scale': scale}
logging.info("Starting loop over spectral parameters")
logging.info("Ecut_TeV: {0}".format(Ecut_TeV))
logging.info("cov_scale: {0}".format(cov_scale))
# apply a rotation
casc.rotation(Angle(rotation * u.deg))
err = 0.1 * src_dict['spectral_pars']['Index']['value'] if np.isnan(src_dict['spectral_pars']['Index']['error']) \
else src_dict['spectral_pars']['Index']['error']
for i, ecut in enumerate(Ecut_TeV):
for j, cs in enumerate(cov_scale):
pars['Index'] = src_dict['spectral_pars']['Index']['value'] + cs * err
pars['Cutoff'] = (ecut * u.TeV).to('eV')
suffix = "em_casc_{5:s}_tmax_{0:.0e}_theta_obs_{1:s}_rotation_{2:s}" \
"_index_{3:s}_Ecut_{4:s}".format(tmax, str(theta_obs).replace('.', 'p'),
str(rotation).replace('.', 'p'),
"{0:.3f}".format(pars["Index"]).replace('.', 'p'),
str(ecut).replace('.', 'p'),
src.lower().replace(' ',''))
# set the weights
casc.apply_spectral_weights(lambda E: inj_spec(E, **pars), smooth=True)
# skymap, only plot once
if not iplot:
fig_sky, ax_sky, cax = casc.casc_obs.sum_over_axes(['energy']).plot(
add_cbar=True, stretch='log', cmap=cmap)
ax_sky.tick_params(direction='out')
title = r"{0:s}, $t_\mathrm{{max}}$ = {1:.1e}, $\theta_\mathrm{{obs}} = {2:.1f}^\circ$, $\phi = {3:.1f}^\circ$".format(
assoc, tmax, theta_obs, rotation)
fig_sky.suptitle(title)
ax_sky.grid(color="0.7", ls=":")
fig_sky.savefig(path.join(path.dirname(self.outputfile), suffix + '_skymap.png'), dpi=150)
label = "$\Gamma = {0:.2f}, E_\mathrm{{cut}} = {1:.2f}$ TeV".format(pars["Index"], ecut)
label_casc = "$B = {0:.2f}$".format(b)
# TODO: pre calculate number of lines to use full color scale
# TODO: customize lables
# TODO use steps
# TODO check transparency so that observed spectrum is still visiblie
# TODO include IACT data points
# spectrum
col = cp(iplot / float(nplots))
ds = "steps-pre"
lw = 1.5
zorder=-2
if not iplot:
fig_spec, ax_spec = casc.plot_spectrum(energy_unit='MeV',
E2dNdE_unit='MeV cm-2 s-1',
kwargs_casc=dict(label=label_casc, color=col, drawstyle=ds, lw=lw, marker='', ls='-', zorder=zorder),
kwargs_prim=dict(plot=True, label='', color=col, lw=lw, marker='', ls='-', zorder=zorder),
kwargs_tot=dict(plot=False, label='', color=col, drawstyle=ds, lw=lw),
)
else:
casc.plot_spectrum(energy_unit='MeV',
E2dNdE_unit='MeV cm-2 s-1',
ax=ax_spec,
fig=fig_spec,
kwargs_casc=dict(label=label_casc, color=col, drawstyle=ds, lw=lw, marker='', ls='-', zorder=zorder),
kwargs_prim=dict(plot=True, label='', color=col, lw=lw, marker='', ls='-', zorder=zorder),
kwargs_tot=dict(plot=False, label='', color=col, drawstyle=ds, lw=lw),
)
cen = casc.casc.geom.get_axis_by_name('energy_true').center
if (len(cov_scale) > 1 or len(Ecut_TeV) > 1) or iplot == 0:
ax_spec.loglog(cen.to('MeV'),
(inj_spec(cen, **pars).to(casc.casc_obs.quantity.unit * u.sr) * cen ** 2. / (1. + z)).to('MeV cm-2 s-1'),
label=label,
color=col,
lw=lw
)
vy = ax_spec.get_ylim()
vx = ax_spec.get_xlim()
iplot += 1
if sed is not None:
SEDPlotter.plot_sed(sed, ax=ax_spec)
vy2 = ax_spec.get_ylim()
vx2 = ax_spec.get_xlim()
ax_spec.set_ylim(vy[1] / 1e4, np.max([vy[1], vy2[1]]))
ax_spec.set_xlim(vx[0], vx[1])
ax_spec.legend(loc=1, fontsize='xx-small')
ax_spec.grid()
fig_spec.savefig(path.join(path.dirname(self.outputfile), suffix + '_spec.png'), dpi=150)
def main(**kwargs):
usage = "usage: %(prog)s"
description = "Run the analysis"
parser = argparse.ArgumentParser(usage=usage,description=description)
parser.add_argument('-c', '--conf', required=True)
parser.add_argument('-f', '--files', required=True, nargs="+")
parser.add_argument('--tmax', default=1e7, help='max AGN duty cycle in years', type=float)
parser.add_argument('--theta-obs', default=0., help='Angle between AGN jet axis and line of sight', type=float)
args = parser.parse_args()
utils.init_logging('INFO', color=True)
with open(args.conf) as f:
config = yaml.safe_load(f)
generator = GenerateFitsTemplates(**config)
return generator, args
if __name__ == '__main__':
gen, args = main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/option--Y.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import sys
import TestSCons
if sys.platform == 'win32':
_exe = '.exe'
else:
_exe = ''
test = TestSCons.TestSCons()
test.subdir('repository', 'work1')
repository = test.workpath('repository')
repository_foo_c = test.workpath('repository', 'foo.c')
work1_foo = test.workpath('work1', 'foo' + _exe)
work1_foo_c = test.workpath('work1', 'foo.c')
test.write(['repository', 'SConstruct'], r"""
env = Environment()
env.Program(target= 'foo', source = Split('aaa.c bbb.c foo.c'))
""")
test.write(['repository', 'aaa.c'], r"""
#include <stdio.h>
void
aaa(void)
{
printf("repository/aaa.c\n");
}
""")
test.write(['repository', 'bbb.c'], r"""
#include <stdio.h>
void
bbb(void)
{
printf("repository/bbb.c\n");
}
""")
test.write(['repository', 'foo.c'], r"""
#include <stdio.h>
#include <stdlib.h>
extern void aaa(void);
extern void bbb(void);
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
aaa();
bbb();
printf("repository/foo.c\n");
exit (0);
}
""")
opts = '-Y ' + repository
# Make the entire repository non-writable, so we'll detect
# if we try to write into it accidentally.
test.writable('repository', 0)
test.run(chdir = 'work1', options = opts, arguments = '.')
test.run(program = work1_foo, stdout = """repository/aaa.c
repository/bbb.c
repository/foo.c
""")
test.up_to_date(chdir = 'work1', options = opts, arguments = '.')
#
test.write(['work1', 'bbb.c'], r"""
#include <stdio.h>
#include <stdlib.h>
void
bbb(void)
{
printf("work1/bbb.c\n");
}
""")
test.run(chdir = 'work1', options = opts, arguments = '.')
test.run(program = work1_foo, stdout = """repository/aaa.c
work1/bbb.c
repository/foo.c
""")
test.up_to_date(chdir = 'work1', options = opts, arguments = '.')
#
test.write(['work1', 'aaa.c'], r"""
#include <stdio.h>
#include <stdlib.h>
void
aaa(void)
{
printf("work1/aaa.c\n");
}
""")
test.write(['work1', 'foo.c'], r"""
#include <stdio.h>
#include <stdlib.h>
extern void aaa(void);
extern void bbb(void);
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
aaa();
bbb();
printf("work1/foo.c\n");
exit (0);
}
""")
test.run(chdir = 'work1', options = opts, arguments = '.')
test.run(program = work1_foo, stdout = """work1/aaa.c
work1/bbb.c
work1/foo.c
""")
test.up_to_date(chdir = 'work1', options = opts, arguments = '.')
#
test.unlink(['work1', 'bbb.c'])
test.unlink(['work1', 'foo.c'])
test.run(chdir = 'work1', options = opts, arguments = '.')
test.run(program = work1_foo, stdout = """work1/aaa.c
repository/bbb.c
repository/foo.c
""")
test.up_to_date(chdir = 'work1', options = opts, arguments = '.')
#
test.subdir('r.NEW', 'r.OLD', 'work2')
workpath_r_NEW = test.workpath('r.NEW')
workpath_r_OLD = test.workpath('r.OLD')
work2_foo = test.workpath('work2', 'foo' + _exe)
SConstruct = """
env = Environment()
env.Program(target = 'foo', source = 'foo.c')
"""
test.write(['r.OLD', 'SConstruct'], SConstruct)
test.write(['r.NEW', 'SConstruct'], SConstruct)
test.write(['r.OLD', 'foo.c'], r"""
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
printf("r.OLD/foo.c\n");
exit (0);
}
""")
opts = '-Y %s -Y %s' % (workpath_r_NEW, workpath_r_OLD)
# Make the repositories non-writable, so we'll detect
# if we try to write into them accidentally.
test.writable('r.OLD', 0)
test.writable('r.NEW', 0)
test.run(chdir = 'work2', options = opts, arguments = '.')
test.run(program = work2_foo, stdout = "r.OLD/foo.c\n")
test.up_to_date(chdir = 'work2', options = opts, arguments = '.')
#
test.writable('r.NEW', 1)
test.write(['r.NEW', 'foo.c'], r"""
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
printf("r.NEW/foo.c\n");
exit (0);
}
""")
test.writable('r.NEW', 0)
test.run(chdir = 'work2', options = opts, arguments = '.')
test.run(program = work2_foo, stdout = "r.NEW/foo.c\n")
test.up_to_date(chdir = 'work2', options = opts, arguments = '.')
#
test.write(['work2', 'foo.c'], r"""
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
printf("work2/foo.c\n");
exit (0);
}
""")
test.run(chdir = 'work2', options = opts, arguments = '.')
test.run(program = work2_foo, stdout = "work2/foo.c\n")
test.up_to_date(chdir = 'work2', options = opts, arguments = '.')
#
test.writable('r.OLD', 1)
test.writable('r.NEW', 1)
test.write(['r.OLD', 'foo.c'], r"""
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
printf("r.OLD/foo.c 2\n");
exit (0);
}
""")
test.write(['r.NEW', 'foo.c'], r"""
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
printf("r.NEW/foo.c 2\n");
exit (0);
}
""")
test.writable('r.OLD', 0)
test.writable('r.NEW', 0)
test.up_to_date(chdir = 'work2', options = opts, arguments = '.')
#
test.unlink(['work2', 'foo.c'])
test.run(chdir = 'work2', options = opts, arguments = '.')
test.run(program = work2_foo, stdout = "r.NEW/foo.c 2\n")
test.up_to_date(chdir = 'work2', options = opts, arguments = '.')
#
test.writable('r.NEW', 1)
test.unlink(['r.NEW', 'foo.c'])
test.writable('r.NEW', 0)
test.run(chdir = 'work2', options = opts, arguments = '.')
test.run(program = work2_foo, stdout = "r.OLD/foo.c 2\n")
test.up_to_date(chdir = 'work2', options = opts, arguments = '.')
#
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
nilq/baby-python
|
python
|
from toykoin.daemon.messages import _verify_headers, add_headers
import pytest
def test_headers():
_verify_headers(add_headers("version", b""))
_verify_headers(add_headers("version", b"\x01"))
_verify_headers(add_headers("a" * 12, b"\x01"))
with pytest.raises(Exception, match="Wrong payload length"):
_verify_headers(add_headers("a" * 13, b"\x01"))
def test_invalid_headers():
err_msg = "Wrong checksum, the message might have been tampered"
with pytest.raises(Exception, match=err_msg):
_verify_headers(b"\x00" * 20)
def test_invalid_length():
err_msg = "Wrong payload length"
with pytest.raises(Exception, match=err_msg):
_verify_headers(add_headers("a", b"") + b"\x01")
|
nilq/baby-python
|
python
|
from django.conf.urls import patterns, include, url
from django.views.generic.base import TemplateView
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name='index.html'), name="home"),
url(r'^about$',
TemplateView.as_view(template_name='about.html'), name="about"),
url(r'^conclusion$',
TemplateView.as_view(template_name='conclusion.html'),
name="conclusion"),
)
urlpatterns += patterns('badguys.vulnerable.views',
# Exercise 01 - Injection Attacks
url(r'^injection$',
TemplateView.as_view(template_name='vulnerable/injection/index.html'),
name="injection"),
url(r'^injection/sql$', 'sql', name="injection-sql"),
url(r'^injection/file-access$', 'file_access',
name="injection-file-access"),
url(r'^user-pic$', 'user_pic', name='user-pic'),
url(r'^injection/code-execution$', 'code_execution',
name="injection-code-execution"),
# Exercise 02 - Broken Authentication & Session Management
url(r'^broken-auth-and-session-management$',
TemplateView.as_view(template_name='vulnerable/broken_auth/index.html'),
name='broken-auth'),
# Exercise 03 - XSS Attacks
url(r'^cross-site-scripting$',
TemplateView.as_view(template_name='vulnerable/xss/index.html'),
name="xss"),
url(r'^cross-site-scripting/path-matching/(?P<path>.+)$', 'xss_path',
name="xss-path"),
url(r'^cross-site-scripting/form-field$', 'xss_form',
name="xss-form"),
url(r'^cross-site-scripting/query-params$', 'xss_query',
name="xss-query"),
# Exercise 04 - Insecure Direct Object References
url(r'^direct-object-references$',
TemplateView.as_view(template_name='vulnerable/direct_object_references/index.html'),
name="direct-object-references"),
url(r'^direct-object-references/users/(?P<userid>\d+)$', 'dor_user_profile',
name='direct-object-references-profile'),
# Exercise 05 - Security Misconfiguration
url(r'^misconfiguration$',
TemplateView.as_view(template_name='vulnerable/misconfig/index.html'),
name='misconfig'),
url(r'^misconfiguration/boom$', 'boom', name='misconfig-boom'),
# Exercise 06 - Sensitive Data Exposure
url(r'^data-exposure$',
TemplateView.as_view(template_name='vulnerable/exposure/index.html'),
name='exposure'),
url(r'^data-exposure/login$', 'exposure_login',
name='exposure-login'),
# Exercise 07 - Missing Function-Level Access Control
url(r'^missing-access-control$',
TemplateView.as_view(template_name='vulnerable/access_control/index.html'),
name='access-control'),
url(r'^missing-access-control/happy-page$', 'missing_access_control',
name='access-control-missing'),
# Exercise 08 - CSRF Attacks
url(r'^csrf$',
TemplateView.as_view(template_name='vulnerable/csrf/index.html'),
name='csrf'),
url(r'^csrf/image$', 'csrf_image',
name='csrf-image'),
url(r'^csrf/third-party-site$',
TemplateView.as_view(template_name='vulnerable/csrf/third_party.html'),
name='csrf-third-party'),
url(r'^csrf/gift-card$',
TemplateView.as_view(template_name='vulnerable/csrf/gift_card.html'),
name='csrf-gift-card'),
# Exercise 09 - Using Known Vulnerable Components
url(r'^vulnerable-components$',
TemplateView.as_view(template_name='vulnerable/components/index.html'),
name='components'),
# Exercise 10 - Unvalidated Redirects & Forwards
url(r'^redirects-and-forwards$',
TemplateView.as_view(template_name='vulnerable/redirects/index.html'),
name='redirects'),
url(r'^redirects-and-forwards/redirects$',
TemplateView.as_view(template_name='vulnerable/redirects/redirects.html'),
name='redirects-redirects'),
url(r'^redirects-and-forwards/redirect$', 'unvalidated_redirect',
name='redirects-redirect'),
url(r'^redirects-and-forwards/forwards$',
TemplateView.as_view(template_name='vulnerable/redirects/forwards.html'),
name='redirects-forwards'),
url(r'^redirects-and-forwards/forward$', 'unvalidated_forward', name='redirects-forward')
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += staticfiles_urlpatterns()
|
nilq/baby-python
|
python
|
import os
from flask import Flask, render_template, url_for, request, redirect
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object('config.DevelopmentConfig')
app.secret_key = app.config.get('SECRET_KEY')
db = SQLAlchemy(app)
from . import routes
|
nilq/baby-python
|
python
|
import pytest
from dredis.commands import REDIS_COMMANDS
# got the list from a real redis server using the following code:
"""
import pprint
import redis
r = redis.StrictRedis()
commands = r.execute_command('COMMAND')
pprint.pprint({c[0]: int(c[1]) for c in commands})
"""
EXPECTED_ARITY = {
'append': 3,
'asking': 1,
'auth': 2,
'bgrewriteaof': 1,
'bgsave': -1,
'bitcount': -2,
'bitfield': -2,
'bitop': -4,
'bitpos': -3,
'blpop': -3,
'brpop': -3,
'brpoplpush': 4,
'client': -2,
'cluster': -2,
'command': 0,
'config': -2,
'dbsize': 1,
'debug': -1,
'decr': 2,
'decrby': 3,
'del': -2,
'discard': 1,
'dump': 2,
'echo': 2,
'eval': -3,
'evalsha': -3,
'exec': 1,
'exists': -2,
'expire': 3,
'expireat': 3,
'flushall': -1,
'flushdb': -1,
'geoadd': -5,
'geodist': -4,
'geohash': -2,
'geopos': -2,
'georadius': -6,
'georadius_ro': -6,
'georadiusbymember': -5,
'georadiusbymember_ro': -5,
'get': 2,
'getbit': 3,
'getrange': 4,
'getset': 3,
'hdel': -3,
'hexists': 3,
'hget': 3,
'hgetall': 2,
'hincrby': 4,
'hincrbyfloat': 4,
'hkeys': 2,
'hlen': 2,
'hmget': -3,
'hmset': -4,
'host:': -1,
'hscan': -3,
'hset': -4,
'hsetnx': 4,
'hstrlen': 3,
'hvals': 2,
'incr': 2,
'incrby': 3,
'incrbyfloat': 3,
'info': -1,
'keys': 2,
'lastsave': 1,
'latency': -2,
'lindex': 3,
'linsert': 5,
'llen': 2,
'lpop': 2,
'lpush': -3,
'lpushx': -3,
'lrange': 4,
'lrem': 4,
'lset': 4,
'ltrim': 4,
'memory': -2,
'mget': -2,
'migrate': -6,
'module': -2,
'monitor': 1,
'move': 3,
'mset': -3,
'msetnx': -3,
'multi': 1,
'object': -2,
'persist': 2,
'pexpire': 3,
'pexpireat': 3,
'pfadd': -2,
'pfcount': -2,
'pfdebug': -3,
'pfmerge': -2,
'pfselftest': 1,
'ping': -1,
'post': -1,
'psetex': 4,
'psubscribe': -2,
'psync': 3,
'pttl': 2,
'publish': 3,
'pubsub': -2,
'punsubscribe': -1,
'randomkey': 1,
'readonly': 1,
'readwrite': 1,
'rename': 3,
'renamenx': 3,
'replconf': -1,
'restore': -4,
'restore-asking': -4,
'role': 1,
'rpop': 2,
'rpoplpush': 3,
'rpush': -3,
'rpushx': -3,
'sadd': -3,
'save': 1,
'scan': -2,
'scard': 2,
'script': -2,
'sdiff': -2,
'sdiffstore': -3,
'select': 2,
'set': -3,
'setbit': 4,
'setex': 4,
'setnx': 3,
'setrange': 4,
'shutdown': -1,
'sinter': -2,
'sinterstore': -3,
'sismember': 3,
'slaveof': 3,
'slowlog': -2,
'smembers': 2,
'smove': 4,
'sort': -2,
'spop': -2,
'srandmember': -2,
'srem': -3,
'sscan': -3,
'strlen': 2,
'subscribe': -2,
'substr': 4,
'sunion': -2,
'sunionstore': -3,
'swapdb': 3,
'sync': 1,
'time': 1,
'touch': -2,
'ttl': 2,
'type': 2,
'unlink': -2,
'unsubscribe': -1,
'unwatch': 1,
'wait': 3,
'watch': -2,
'zadd': -4,
'zcard': 2,
'zcount': 4,
'zincrby': 4,
'zinterstore': -4,
'zlexcount': 4,
'zrange': -4,
'zrangebylex': -4,
'zrangebyscore': -4,
'zrank': 3,
'zrem': -3,
'zremrangebylex': 4,
'zremrangebyrank': 4,
'zremrangebyscore': 4,
'zrevrange': -4,
'zrevrangebylex': -4,
'zrevrangebyscore': -4,
'zrevrank': 3,
'zscan': -3,
'zscore': 3,
'zunionstore': -4,
}
@pytest.mark.parametrize('command, func', REDIS_COMMANDS.items())
def test_arity(command, func):
assert func.arity == EXPECTED_ARITY[command.lower()]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'threading_design.ui'
#
# Created: Thu Aug 6 13:47:18 2015
# by: PyQt4 UI code generator 4.10.4
from PyQt5 import QtCore, QtGui
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from PyQt5.QtWidgets import (QWidget, QTreeView, QMessageBox, QHBoxLayout,
QFileDialog, QLabel, QSlider, QCheckBox,
QLineEdit, QVBoxLayout, QApplication, QPushButton,
QTableWidget, QTableWidgetItem,QSizePolicy,
QGridLayout,QGroupBox, QMainWindow,QAction,QHeaderView,QComboBox,QProgressBar)
from PyQt5.QtCore import Qt, QTimer, QCoreApplication
from matplotlib.figure import Figure
from matplotlib import rcParams
import matplotlib.image as image
import math
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
"""
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
"""
rcParams.update({'figure.autolayout': True})
class MyMplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, width=5, height=4, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(111)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QSizePolicy.Expanding,
QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
FigureCanvas.mpl_connect(self,'button_press_event', self.double_click)
def export(self,event):
filename = "ExportedGraph.pdf"
self.fig.savefig(filename)
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("Saved a copy of the graphics window to {}".format(filename))
#msg.setInformativeText("This is additional information")
msg.setWindowTitle("Saved PDF File")
msg.setDetailedText("The full path of the file is \n{}".format(os.path.abspath(os.getcwd())))
msg.setStandardButtons(QMessageBox.Ok)
msg.setWindowModality(Qt.ApplicationModal)
msg.exec_()
print("Exported PDF file")
def double_click(self, event):
FigureCanvas.mpl_connect(self,'button_press_event', self.export)
class MyDynamicMplCanvas(MyMplCanvas):
"""A canvas that updates itself frequently with a new plot."""
def __init__(self, *args, **kwargs):
MyMplCanvas.__init__(self, *args, **kwargs)
self.axes.set_xlabel("X")
self.axes.set_ylabel("Y")
self.axes.set_title('Truss')
def plotTruss(self,nodeList,beamList):
# Nodelist Format: [X,Y,Fix X, Fix Y, Rx, Ry,Applied Force, Force Angle]
self.axes.cla() #Clear axes
# Plot roller symbol for constraints
rollerSize = 0.1
rollerForX = image.imread('image/RollerH.png')
rollerForY = image.imread('image/RollerV.png')
constraintLocation = [5,0]
off = 0.05
arrowLen = 0.5
for i in range(0,len(nodeList)):
if nodeList[i][2] == True: # X is constrained
self.axes.imshow(rollerForX, extent=(nodeList[i][0]-2*rollerSize, nodeList[i][0], nodeList[i][1] - rollerSize, nodeList[i][1] + rollerSize), zorder=2)
if nodeList[i][3] == True:
self.axes.imshow(rollerForY, extent=(nodeList[i][0]-rollerSize, nodeList[i][0] + rollerSize,
nodeList[i][1] - 2*rollerSize, nodeList[i][1]), zorder=-1)
# Plot arrows for applied forces
if nodeList[i][6] != 0:
dx = arrowLen*math.cos(math.radians(nodeList[i][7]))
dy = arrowLen*math.sin(math.radians(nodeList[i][7]))
self.axes.arrow(nodeList[i][0], nodeList[i][1], dx, dy,color='r',zorder=3,shape='full',head_width=0.075, head_length=0.15)
# Plot nodes
self.axes.plot([nodeList[i][0]],[nodeList[i][1]],'ko')
self.axes.text(nodeList[i][0]+off,nodeList[i][1]+off, '%i'%(i+1), fontsize=10)
# Plot Reaction Forces
if nodeList[i][4] == True: # X is constrained
dx = -arrowLen/1.5
dy = 0
self.axes.arrow(nodeList[i][0]-dx, nodeList[i][1]-dy, dx, dy,color='g',
length_includes_head = True,zorder=3,shape='full',head_width=0.075, head_length=0.15)
if nodeList[i][5] == True:
dx = 0
dy = arrowLen/1.5
self.axes.arrow(nodeList[i][0]-dx, nodeList[i][1]-dy, dx, dy,color='g',
length_includes_head = True,zorder=3,shape='full',head_width=0.075, head_length=0.15)
# Plot mechanism bars
for i in range(0,len(beamList)):
fromNode = beamList[i][0]
toNode = beamList[i][1]
if (fromNode != -1 and toNode != -1):
self.axes.plot([nodeList[fromNode][0],nodeList[toNode][0]],[nodeList[fromNode][1],nodeList[toNode][1]],'k')
midX = (nodeList[fromNode][0]+nodeList[toNode][0])/2
midY = (nodeList[fromNode][1] + nodeList[toNode][1])/2
self.axes.text(midX+off,midY+off, '%i'%(i+1), fontsize=10)
#self.axes.set_xlabel(data_label)
#self.axes.set_ylabel("Estimated Prob. Density Funct.")
#self.axes.set_title(title)
#self.axes.legend(shadow=True)
self.axes.axis('equal')
self.axes.margins(0.2, 0.2)
self.draw()
#print("Finished Drawing Normalized Histogram.")
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
#Builds GUI
# Four input tables:
# one with initial node coordinates (3xn: Node,X,Y)
# one with node connectivity (3xbeams: Element, From Node, To Node)
# one with reactions locations (3xreactions: Reaction, Node, Direction)
# and one with external loading (3xForces: On Node, Force, Angle)
# Dynamic plot updates with triangles for reactions, lines for beams and filled circles for nodes, and arrow for applied forces
# Checks: all nodes have at least one member connectivity
#
# Objective function: Sum(Area[i]*length[i])
# subject to:
# max(stresses) < maxStress
# Any locaton constraints, such as: (generated checklist?)
# Node[1][0] = 1
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(1000, 800)
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
### Controls box ###
controlsBox = QGroupBox("Controls")
controlsBoxLayout = QGridLayout()
# Start Button
self.startButton = QPushButton('Start',self)
controlsBoxLayout.addWidget(self.startButton,0,0)
# Stop Button
self.stopButton = QPushButton('Stop',self)
self.stopButton.setEnabled(False)
controlsBoxLayout.addWidget(self.stopButton,0,1)
# Damping Label and slider
self.dampingLabel = QLabel("Damping = 0.1",self)
controlsBoxLayout.addWidget(self.dampingLabel,1,0)
self.dampingSlider = QSlider(Qt.Horizontal)
self.dampingSlider.setMinimum(1)
self.dampingSlider.setMaximum(1000)
self.dampingSlider.setValue(100)
controlsBoxLayout.addWidget(self.dampingSlider,1,1)
# Cross section selection dropdown menu
# Max Iterations text box
self.maxIterationsLabel = QLabel("Maximum Iterations",self)
controlsBoxLayout.addWidget(self.maxIterationsLabel,2,0)
self.maxIterationsTextBox = QLineEdit(self)
self.maxIterationsTextBox.setText('100')
controlsBoxLayout.addWidget(self.maxIterationsTextBox,2,1)
# Max stress text box
self.maxStressControlLabel = QLabel("Max Allowable Stress",self)
controlsBoxLayout.addWidget(self.maxStressControlLabel,3,0)
self.maxStressTextBox = QLineEdit(self)
self.maxStressTextBox.setText('10')
controlsBoxLayout.addWidget(self.maxStressTextBox,3,1)
# Density optional text box
self.densityLabel = QLabel("Density",self)
controlsBoxLayout.addWidget(self.densityLabel,4,0)
self.densityTextBox = QLineEdit(self)
self.densityTextBox.setText('1')
controlsBoxLayout.addWidget(self.densityTextBox,4,1)
self.crossSectionLabel = QLabel("Cross Section",self)
controlsBoxLayout.addWidget(self.crossSectionLabel,5,0)
self.crossSectionBox = QComboBox(self)
self.crossSectionBox.addItem("Rectangular - Equal Thickness")
self.crossSectionBox.addItem("Rectangular")
self.crossSectionBox.addItem("Rectangular - Hollow")
self.crossSectionBox.addItem("Square")
self.crossSectionBox.addItem("Square - Hollow")
self.crossSectionBox.addItem("Round")
self.crossSectionBox.addItem("Round - Hollow")
self.crossSectionBox.activated[str].connect(self.crossSectionChanged)
controlsBoxLayout.addWidget(self.crossSectionBox,5,1)
controlsBox.setLayout(controlsBoxLayout)
### Input Tables Box ###
inputBox = QGroupBox('Input')
inputBoxLayout = QGridLayout()
# Node Table
self.nodeTableLabel = QLabel("Enter Node Positions",self)
self.nodeTableLabel.setAlignment(Qt.AlignCenter)
inputBoxLayout.addWidget(self.nodeTableLabel,0,0,1,2)
self.nodesTable = QTableWidget()
self.nodesTable.setColumnCount(6)
self.nodesTable.setRowCount(1) # Make 1 longer than number of elements for manual addition of elements
self.nodesTable.setHorizontalHeaderLabels(['X','Y','Fix X','Fix Y','Reaction X','Reaction Y'])
nodeHeader = self.nodesTable.horizontalHeader()
nodeHeader.setSectionResizeMode(0, QHeaderView.Stretch)
nodeHeader.setSectionResizeMode(1, QHeaderView.Stretch)
nodeHeader.setSectionResizeMode(2, QHeaderView.ResizeToContents)
nodeHeader.setSectionResizeMode(3, QHeaderView.ResizeToContents)
nodeHeader.setSectionResizeMode(4, QHeaderView.ResizeToContents)
nodeHeader.setSectionResizeMode(5, QHeaderView.ResizeToContents)
inputBoxLayout.addWidget(self.nodesTable,1,0,1,2)
# Beam Table
self.beamTableLabel = QLabel("Enter Beam Connections",self)
self.beamTableLabel.setAlignment(Qt.AlignCenter)
inputBoxLayout.addWidget(self.beamTableLabel,2,0)
self.beamTable = QTableWidget()
self.beamTable.setColumnCount(2)
self.beamTable.setRowCount(1) # Make 1 longer than number of elements for manual addition of elements
self.beamTable.setHorizontalHeaderLabels(['From Node','To Node'])
beamHeader = self.beamTable.horizontalHeader()
beamHeader.setSectionResizeMode(0, QHeaderView.Stretch)
beamHeader.setSectionResizeMode(1, QHeaderView.Stretch)
inputBoxLayout.addWidget(self.beamTable,3,0)
# External Force Table
self.forceTableLabel = QLabel("Enter Applied Forces",self)
self.forceTableLabel.setAlignment(Qt.AlignCenter)
inputBoxLayout.addWidget(self.forceTableLabel,2,1)
self.forceTable = QTableWidget()
self.forceTable.setColumnCount(3)
self.forceTable.setRowCount(1) # Make 1 longer than number of elements for manual addition of elements
self.forceTable.setHorizontalHeaderLabels(['Node','Force','Angle'])
forceTableHeader = self.forceTable.horizontalHeader()
forceTableHeader.setSectionResizeMode(0, QHeaderView.ResizeToContents)
forceTableHeader.setSectionResizeMode(1, QHeaderView.Stretch)
forceTableHeader.setSectionResizeMode(2, QHeaderView.Stretch)
inputBoxLayout.addWidget(self.forceTable,3,1)
inputBox.setLayout(inputBoxLayout)
# Plot
self.graph_canvas = MyDynamicMplCanvas(self.centralwidget, width=5, height=4, dpi=120)
### Results Tables Box ###
resultsBox = QGroupBox("Results")
resultsBoxLayout = QGridLayout()
self.resultsBarLabel = QLabel("Optimization Progress: ",self)
resultsBoxLayout.addWidget(self.resultsBarLabel,0,0)
self.resultsBar = QProgressBar(self)
resultsBoxLayout.addWidget(self.resultsBar,0,1)
# Node Table
self.nodeResultsTableLabel = QLabel("Optimized Node Positions",self)
self.nodeResultsTableLabel.setAlignment(Qt.AlignCenter)
resultsBoxLayout.addWidget(self.nodeResultsTableLabel,1,0)
self.nodesResultsTable = QTableWidget()
self.nodesResultsTable.setColumnCount(3)
self.nodesResultsTable.setRowCount(1) # Make 1 longer than number of elements for manual addition of elements
self.nodesResultsTable.setHorizontalHeaderLabels(['Node','X','Y'])
nodeResultsHeader = self.nodesResultsTable.horizontalHeader()
nodeResultsHeader.setSectionResizeMode(0, QHeaderView.ResizeToContents)
nodeResultsHeader.setSectionResizeMode(1, QHeaderView.Stretch)
nodeResultsHeader.setSectionResizeMode(2, QHeaderView.Stretch)
resultsBoxLayout.addWidget(self.nodesResultsTable,2,0)
# Beam Table
self.beamResultsTableLabel = QLabel("Optimized Beam Properties",self)
self.beamResultsTableLabel.setAlignment(Qt.AlignCenter)
resultsBoxLayout.addWidget(self.beamResultsTableLabel,1,1)
self.beamResultsTable = QTableWidget()
self.beamResultsTable.setColumnCount(4)
self.beamResultsTable.setRowCount(1) # Make 1 longer than number of elements for manual addition of elements
self.beamResultsTable.setHorizontalHeaderLabels(['Length','OD', 'ID', 'Stress'])
beamResultsHeader = self.beamResultsTable.horizontalHeader()
beamResultsHeader.setSectionResizeMode(0, QHeaderView.Stretch)
beamResultsHeader.setSectionResizeMode(1, QHeaderView.Stretch)
beamResultsHeader.setSectionResizeMode(2, QHeaderView.Stretch)
beamResultsHeader.setSectionResizeMode(3, QHeaderView.Stretch)
resultsBoxLayout.addWidget(self.beamResultsTable,2,1)
resultsBox.setLayout(resultsBoxLayout)
#Now we can set all the previously defined boxes into the main window
master_layout = QGridLayout()
master_layout.addWidget(inputBox,0,0)
master_layout.addWidget(resultsBox,1,1)
master_layout.addWidget(controlsBox,1,0)
master_layout.addWidget(self.graph_canvas,0,1)
#master_layout.addWidget(distribution_box,1,1)
#self.centralwidget.addWidget(master_layout)
self.centralwidget.setLayout(master_layout)
self.setWindowTitle('Four Bar Linkage Optimization')
self.activateWindow()
self.raise_()
self.show()
MainWindow.setCentralWidget(self.centralwidget)
menuBar = self.menuBar()
file_menu = menuBar.addMenu('&File')
open_file = QAction('&Open', self)
open_file.setShortcut('Ctrl+O')
open_file.setStatusTip('Load Truss Design')
open_file.triggered.connect(self.load_data)
file_menu.addAction(open_file)
saveInput_file = QAction('&Save Input Design',self)
saveInput_file.setStatusTip('Save Optimized Design')
saveInput_file.triggered.connect(self.saveInputData)
file_menu.addAction(saveInput_file)
saveOptimized_file = QAction('&Save Optimized Design',self)
saveOptimized_file.setShortcut('Ctrl+S')
saveOptimized_file.setStatusTip('Save Optimized Design')
saveOptimized_file.triggered.connect(self.saveOptimizedData)
file_menu.addAction(saveOptimized_file)
exit_action = QAction('&Exit', self)
exit_action.setShortcut('Ctrl+Q')
exit_action.setStatusTip('Exit application')
exit_action.triggered.connect(self.close) #This is built in
file_menu.addAction(exit_action)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
|
nilq/baby-python
|
python
|
# global
import torch
from typing import Union, Optional, Tuple, List
def argsort(x: torch.Tensor,
axis: int = -1,
descending: bool = False,
stable: bool = True)\
-> torch.Tensor:
return torch.argsort(x, dim=axis, descending=descending)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os, sys
temp=list();
header=1;
sys.path.append('../../Libs/Python')
from BiochemPy import Reactions, Compounds, InChIs
CompoundsHelper = Compounds()
Compounds_Dict = CompoundsHelper.loadCompounds()
Structures_Dict = CompoundsHelper.loadStructures(["InChI"],["ModelSEED"])
diff_file = open("Compound_Charge_Differences.txt", 'w')
for cpd in sorted(Compounds_Dict.keys()):
if(cpd not in Structures_Dict):
diff_file.write("Zero structures for "+cpd+"\n")
continue
if('InChI' not in Structures_Dict[cpd]):
diff_file.write("No InChI structure for "+cpd+"\n")
continue
current_charge = int(Compounds_Dict[cpd]['charge'])
#Parse out InChI formula and layers
(inchi_formula,inchi_layers) = InChIs.parse(Structures_Dict[cpd]['InChI'])
inchi_charge = InChIs.charge(inchi_layers['q'],inchi_layers['p'])
if(inchi_charge != current_charge):
#Proton-specific (i.e. minor difference)
if(inchi_layers['q'] == ""):
diff_file.write("Proton difference for "+cpd+": "+str(current_charge)+" / "+str(inchi_charge)+"\n")
else:
diff_file.write("Charge difference for "+cpd+": "+str(current_charge)+" / "+str(inchi_charge)+"\n")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-10 19:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='GreenSheet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=255)),
('position', models.CharField(max_length=255)),
('supervisor_name', models.CharField(max_length=255)),
('ee_num', models.CharField(max_length=255)),
('aep_status_type', models.IntegerField(choices=[(1, 'Regular'), (2, 'On Call'), (3, 'Temp (six months'), (4, 'Work Study'), (5, 'Intern')])),
('aep_period_type', models.IntegerField(choices=[(1, 'Monthly'), (2, 'Hourly')])),
('aep_hire_type', models.IntegerField(choices=[(1, 'New Hire'), (2, 'Rehire')])),
('aep_amount_time', models.FloatField()),
('aep_pay_rate', models.FloatField()),
('aep_start_date', models.DateField()),
('prc_payroll_change_type', models.IntegerField(choices=[(1, 'Increase'), (2, 'Promotion'), (3, 'Change of Status'), (4, 'Change of FTE / # Hours'), (5, 'Change of Supervisor')])),
('prc_change_from', models.CharField(max_length=255)),
('prc_change_to', models.CharField(max_length=255)),
('prc_effective_date', models.DateField()),
('prc_comments', models.CharField(max_length=1024)),
('es_effective_date', models.DateField()),
('es_voluntary_type', models.IntegerField(choices=[(1, 'Voluntary Seperation'), (2, 'Involuntary Seperation')])),
('es_from_type', models.IntegerField(choices=[(1, 'Voluntary Seperation'), (2, 'Involuntary Seperation')])),
('general_comments', models.CharField(max_length=1024)),
('approved', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('code', models.CharField(max_length=255)),
('phase', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='greensheet',
name='projects_to_charge',
field=models.ManyToManyField(to='myforms.Project'),
),
]
|
nilq/baby-python
|
python
|
import shutil
import os
from dataclasses import dataclass
from typing import Generator
import pytest
import fabsite
from test import path
@dataclass
class Pages:
blog_path: str
undated_path: str
dated_path: str
normal_post_path: str
md_post_path: str
no_md_post_path: str
@pytest.fixture
def pages() -> Generator[Pages, None, None]:
blog_path = path.temppath("blog")
undated_path = os.path.join(blog_path, "foo.txt")
dated_path = os.path.join(blog_path, "2018-01-01-foo.txt")
normal_post_path = os.path.join(blog_path, "baz.txt")
md_post_path = os.path.join(blog_path, "qux.md")
no_md_post_path = os.path.join(blog_path, "qux.txt")
os.makedirs(blog_path)
with open(undated_path, "w") as f:
f.write("hello world")
with open(dated_path, "w") as f:
f.write("hello world")
with open(normal_post_path, "w") as f:
f.write("<!-- a: 1 -->\n<!-- b: 2 -->\nFoo")
with open(md_post_path, "w") as f:
f.write("*Foo*")
with open(no_md_post_path, "w") as f:
f.write("*Foo*")
yield Pages(
blog_path,
undated_path,
dated_path,
normal_post_path,
md_post_path,
no_md_post_path,
)
shutil.rmtree(blog_path)
class TestContent:
def test_content_content(self, pages):
content = fabsite.read_content(pages.undated_path)
assert content["content"] == "hello world"
def test_content_date(self, pages):
content = fabsite.read_content(pages.dated_path)
assert content["date"] == "2018-01-01"
def test_content_date_missing(self, pages):
content = fabsite.read_content(pages.undated_path)
assert content["date"] == "1970-01-01"
def test_content_slug_dated(self, pages):
content = fabsite.read_content(pages.dated_path)
assert content["slug"] == "foo"
def test_content_slug_undated(self, pages):
content = fabsite.read_content(pages.undated_path)
assert content["slug"] == "foo"
def test_content_headers(self, pages):
content = fabsite.read_content(pages.normal_post_path)
assert content["a"] == "1"
assert content["b"] == "2"
assert content["content"] == "Foo"
def test_markdown_rendering(self, pages):
content = fabsite.read_content(pages.md_post_path)
assert content["content"] == "<p><em>Foo</em></p>\n"
@pytest.mark.skip(reason="escape unittest so we can use fixtures")
def test_markdown_import_error(self, pages, caplog):
content = fabsite.read_content(pages.md_post_path)
assert content["content"] == "*Foo*"
err = f"WARNING: Cannot render Markdown in {pages.md_post_path}: Error forced by text"
assert err in caplog.text
def test_no_markdown_rendering(self, pages):
content = fabsite.read_content(pages.no_md_post_path)
assert content["content"] == "*Foo*"
@pytest.mark.skip(reason="escape unittest so we can use fixtures")
def test_no_markdown_import_error(self, pages, caplog):
content = fabsite.read_content(pages.no_md_post_path)
assert content["content"] == "*Foo*"
assert caplog.text is None
|
nilq/baby-python
|
python
|
"""
Configuration utils.
Author: Henrik Thostrup Jensen <htj@ndgf.org>
Copyright: Nordic Data Grid Facility (2009, 2010)
"""
import ConfigParser
import re
from sgas.ext.python import ConfigDict
# configuration defaults
DEFAULT_AUTHZ_FILE = '/etc/sgas.authz'
DEFAULT_HOSTNAME_CHECK_DEPTH = '2'
# server options
SERVER_BLOCK = 'server'
DB = 'db'
AUTHZ_FILE = 'authzfile'
HOSTNAME_CHECK_DEPTH = 'check_depth'
# the following are no longer used, but are used to issue warnings
HOSTKEY = 'hostkey'
HOSTCERT = 'hostcert'
CERTDIR = 'certdir'
REVERSE_PROXY = 'reverse_proxy'
HOSTNAME_CHECK_WHITELIST = 'check_whitelist'
# plugins
PLUGINS = 'plugins'
PLUGIN_CLASS = 'class'
PLUGIN_PACKAGE = 'package'
PLUGIN_TYPE = 'type'
class ConfigurationError(Exception):
pass
def readConfig(filename):
# the dict_type option isn't supported until 2.5
try:
cfg = ConfigParser.SafeConfigParser(dict_type=ConfigDict)
except TypeError:
cfg = ConfigParser.SafeConfigParser()
# add defaults
cfg.add_section(SERVER_BLOCK)
cfg.set(SERVER_BLOCK, AUTHZ_FILE, DEFAULT_AUTHZ_FILE)
cfg.set(SERVER_BLOCK, HOSTNAME_CHECK_DEPTH, DEFAULT_HOSTNAME_CHECK_DEPTH)
fp = open(filename)
proxy_fp = MultiLineFileReader(fp)
# read cfg file
cfg.readfp(proxy_fp)
return cfg
class MultiLineFileReader:
# implements the readline call for lines broken with \
# readline is the only method called by configparser
# so this is enough
# Also implements blocks for large queries
# If the option is "<<<" the parser will read until a
# line starting with "<<<" appears
# An exception is raised if
def __init__(self, fp):
self._fp = fp
def readline(self):
line = self._fp.readline()
# Multi line block
if line.rstrip().endswith('=<<<') and not line.lstrip().startswith("#"):
line = re.sub(r'<<<$',r'',line.rstrip())
while True:
cl = self._fp.readline().rstrip()
if cl == None:
raise ConfigurationError("ReadError: Reached end of file but found no <<<")
if cl.startswith("<<<"):
break
line += cl + " "
return line.rstrip()
# Multi line
while line.endswith('\\\n') or line.endswith('\\ \n'):
if line.endswith('\\\n') : i = -2
if line.endswith('\\ \n') : i = -3
newline = self._fp.readline()
while newline.startswith(' '):
newline = newline[1:]
line = line[:i] + newline
return line
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
'''utility module for package rankorder.'''
import numpy as np
def rankdata(a, method):
'''assigns ranks to values dealing appropriately with ties.
ranks start at zero and increase with increasing value.
the value np.nan is assigned the highest rank.
Parameters
----------
a : array-like
values to be ranked
method : {'ordinal', 'random'}
ranking method to break ties
'ordinal': all values are given a distinct rank. ties
are resolved according to their position in the array.
'random': like 'ordinal' but in the case of ties the ranks
are randomly ordered.
'''
# implementation is inspired by scipy.stats.rankdata
# check if method is valid
if method not in ('ordinal', 'random'):
raise ValueError('unknown method "{}"'.format(method))
# convert array-like to array
a = np.asarray(a)
if method == 'random':
# randomly permute elements
# then continue as for method 'ordinal'
perm = np.random.permutation(a.size)
a = a[perm]
# construct sorting permutation with stable algorithm
# meaning that order of ties are kept
sorter = np.argsort(a, kind='mergesort')
# ranks of data is inverse permutation ranks = sorter^{-1} perm
ranks = np.empty(sorter.size, dtype=np.intp)
ranks[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'random':
# inversely permute rank elements to undo random permutation
inv = np.argsort(perm, kind='mergesort')
ranks = ranks[inv]
return ranks
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# imaparchiver/__main__.py
#
# imaparchiver package start
#
# This file is part of imaparchiver.
# See the LICENSE file for the software license.
# (C) Copyright 2015-2019, Oliver Maurhart, dyle71@gmail.com
# ------------------------------------------------------------
"""This is the imaparchiver package start script."""
import sys
from . import command_line
def main() -> None:
"""imaparchiver main startup."""
try:
command_line.cli(prog_name='imap-archiver')
except Exception as e:
sys.stderr.write(str(e) + '\n')
sys.exit(1)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from __future__ import print_function
import cProfile
import pstats
import argparse
from examples.pybullet.utils.pybullet_tools.kuka_primitives import BodyPose, BodyConf, Command, get_grasp_gen, \
get_stable_gen, get_ik_fn, get_free_motion_gen, \
get_holding_motion_gen, get_movable_collision_test
from examples.pybullet.utils.pybullet_tools.utils import WorldSaver, connect, dump_world, get_pose, set_pose, Pose, \
Point, set_default_camera, stable_z, \
BLOCK_URDF, get_configuration, SINK_URDF, STOVE_URDF, load_model, is_placement, get_body_name, \
disconnect, DRAKE_IIWA_URDF, get_bodies, user_input, HideOutput
from pddlstream.algorithms.focused import solve_focused
from pddlstream.language.generator import from_gen_fn, from_fn, empty_gen
from pddlstream.language.synthesizer import StreamSynthesizer
from pddlstream.utils import print_solution, read, INF, get_file_path, find_unique
import random
USE_SYNTHESIZERS = False
def get_fixed(robot, movable):
rigid = [body for body in get_bodies() if body != robot]
fixed = [body for body in rigid if body not in movable]
return fixed
def place_movable(certified):
placed = []
for literal in certified:
if literal[0] == 'not':
fact = literal[1]
if fact[0] == 'trajcollision':
_, b, p = fact[1:]
set_pose(b, p.pose)
placed.append(b)
return placed
def get_free_motion_synth(robot, movable=[], teleport=False):
fixed = get_fixed(robot, movable)
def fn(outputs, certified):
assert(len(outputs) == 1)
q0, _, q1 = find_unique(lambda f: f[0] == 'freemotion', certified)[1:]
obstacles = fixed + place_movable(certified)
free_motion_fn = get_free_motion_gen(robot, obstacles, teleport)
return free_motion_fn(q0, q1)
return fn
def get_holding_motion_synth(robot, movable=[], teleport=False):
fixed = get_fixed(robot, movable)
def fn(outputs, certified):
assert(len(outputs) == 1)
q0, _, q1, o, g = find_unique(lambda f: f[0] == 'holdingmotion', certified)[1:]
obstacles = fixed + place_movable(certified)
holding_motion_fn = get_holding_motion_gen(robot, obstacles, teleport)
return holding_motion_fn(q0, q1, o, g)
return fn
#######################################################
def pddlstream_from_problem(robot, movable=[], teleport=False, movable_collisions=False, grasp_name='side'):
#assert (not are_colliding(tree, kin_cache))
domain_pddl = read(get_file_path(__file__, 'domain2.pddl'))
stream_pddl = read(get_file_path(__file__, 'stream2.pddl'))
constant_map = {}
print('Robot:', robot)
conf = BodyConf(robot, get_configuration(robot))
init = [('CanMove',),
('Conf', conf),
('AtConf', conf),
('HandEmpty',),
('Cleaned',)]
fixed = get_fixed(robot, movable)
# movable_bodies = [tub_straw, tub_vanilla, bowl1, bowl2, bowl3, wash, scoop_vanilla1, scoop_vanilla2, scoop_vanilla3, scoop_straw1, scoop_straw2, scoop_straw3]
tub_straw = movable[0]
tub_vanilla = movable[1]
bowl1 = movable[2]
bowl2 = movable[3]
wash = movable[4]
vanilla_scoop1 = movable[5]
vanilla_scoop2 = movable[6]
straw_scoop1 = movable[7]
straw_scoop2 = movable[8]
print('Movable:', movable)
print('Fixed:', fixed)
for body in movable:
pose = BodyPose(body, get_pose(body))
init += [('Graspable', body),
('Pose', body, pose),
('AtPose', body, pose)]
for surface in movable:
if body != surface:
# init += [('Stackable', body, surface)]
if is_placement(body, surface):
init += [('Supported', body, pose, surface)]
init += [('isEmpty1',)]
init += [('isEmpty2',)]
init += [('Bowl1', bowl1)]
init += [('Bowl2', bowl2)]
init += [('VanillaScoop', vanilla_scoop1)]
init += [('VanillaScoop', vanilla_scoop2)]
init += [('StrawScoop', straw_scoop1)]
init += [('StrawScoop', straw_scoop2)]
init += [('Wash', wash)]
ss = [straw_scoop1, straw_scoop2]
vs = [vanilla_scoop1, vanilla_scoop2]
for a in ss:
init += [('Stackable', a, bowl1)]
init += [('Stackable', a, bowl2)]
for a in vs:
init += [('Stackable', a, bowl1)]
init += [('Stackable', a, bowl2)]
for a in ss:
for b in vs:
init += [('Stackable', a, b)]
init += [('Stackable', b, a)]
random.shuffle(ss)
random.shuffle(vs)
goal = ('and',
('AtConf', conf),
('First1', ss[0], bowl1),
('Second1', vs[0], ss[0]),
('First2', ss[1], bowl2),
('Second2', vs[1], ss[1]),
# ('First3', vs[2], bowl3),
# ('Second3', vs[2], ss[2]),
# ('Second', vanilla_scoop, straw_scoop),
)
stream_map = {
'sample-pose': from_gen_fn(get_stable_gen(fixed)),
'sample-grasp': from_gen_fn(get_grasp_gen(robot, grasp_name)),
'inverse-kinematics': from_fn(get_ik_fn(robot, fixed, teleport)),
'plan-free-motion': from_fn(get_free_motion_gen(robot, fixed, teleport)),
'plan-holding-motion': from_fn(get_holding_motion_gen(robot, fixed, teleport)),
'TrajCollision': get_movable_collision_test(),
}
if USE_SYNTHESIZERS:
stream_map.update({
'plan-free-motion': empty_gen(),
'plan-holding-motion': empty_gen(),
})
return domain_pddl, constant_map, stream_pddl, stream_map, init, goal
#######################################################
def load_world():
# TODO: store internal world info here to be reloaded
with HideOutput():
robot = load_model(DRAKE_IIWA_URDF)
floor = load_model('models/short_floor.urdf')
tub_straw = load_model('models/tub_straw.urdf', fixed_base=False )
tub_vanilla = load_model('models/tub_vanilla.urdf', fixed_base=False )
wash = load_model('models/tub_wash.urdf', fixed_base=False)
bowl1 = load_model('models/bowl.urdf', fixed_base=False)
bowl2 = load_model('models/bowl.urdf', fixed_base=False)
scoop_vanilla1 = load_model('models/vanilla_scoop.urdf', fixed_base=False)
scoop_straw1 = load_model('models/straw_scoop.urdf', fixed_base=False)
scoop_vanilla2 = load_model('models/vanilla_scoop.urdf', fixed_base=False)
scoop_straw2 = load_model('models/straw_scoop.urdf', fixed_base=False)
body_names = {
tub_straw: 'tub_straw',
tub_vanilla: 'tub_vanilla',
scoop_vanilla1: 'scoop_vanilla1',
scoop_vanilla2: 'scoop_vanilla2',
scoop_straw1: 'scoop_straw1',
scoop_straw2: 'scoop_straw2',
bowl1: 'bowl1',
bowl2: 'bowl2',
wash: 'wash',
}
movable_bodies = [tub_straw, tub_vanilla, bowl1, bowl2, wash, scoop_vanilla1, scoop_vanilla2, scoop_straw1, scoop_straw2]
set_pose(tub_straw, Pose(Point(x=0.5, y=-0.5, z=-0.1)))
set_pose(tub_vanilla, Pose(Point(x=+0.5, y=+0.0, z=-0.1)))
set_pose(scoop_straw1, Pose(Point(x=0.5, y=-0.5, z=stable_z(scoop_straw1, tub_straw))))
set_pose(scoop_vanilla1, Pose(Point(x=+0.5, y=+0.0, z=stable_z(scoop_vanilla1, tub_vanilla))))
set_pose(scoop_straw2, Pose(Point(x=0.65, y=-0.5, z=stable_z(scoop_straw2, tub_straw))))
set_pose(scoop_vanilla2, Pose(Point(x=+0.65, y=+0.0, z=stable_z(scoop_vanilla2, tub_vanilla))))
set_pose(wash, Pose(Point(x=-0.5, y=+0.0, z=-0.1)))
set_pose(bowl1, Pose(Point(x=-0.4, y=+0.5, z=0.0)))
set_pose(bowl2, Pose(Point(x=-0.0, y=+0.5, z=0.0)))
set_default_camera()
return robot, body_names, movable_bodies
def postprocess_plan(plan):
paths = []
for name, args in plan:
if name == 'dump_first':
paths += args[-1].reverse().body_paths
elif name == 'dump_second':
paths += args[-1].reverse().body_paths
elif name in ['move', 'move_free', 'move_holding', 'scoop_vanilla', 'scoop_straw']:
paths += args[-1].body_paths
return Command(paths)
#######################################################
def main(viewer=False, display=True, simulate=False, teleport=False):
# TODO: fix argparse & FastDownward
#parser = argparse.ArgumentParser() # Automatically includes help
#parser.add_argument('-viewer', action='store_true', help='enable viewer.')
#parser.add_argument('-display', action='store_true', help='enable viewer.')
#args = parser.parse_args()
# TODO: getopt
connect(use_gui=viewer)
robot, names, movable = load_world()
saved_world = WorldSaver()
#dump_world()
pddlstream_problem = pddlstream_from_problem(robot, movable=movable,
teleport=teleport, movable_collisions=True)
_, _, _, stream_map, init, goal = pddlstream_problem
synthesizers = [
StreamSynthesizer('safe-free-motion', {'plan-free-motion': 1, 'trajcollision': 0},
from_fn(get_free_motion_synth(robot, movable, teleport))),
StreamSynthesizer('safe-holding-motion', {'plan-holding-motion': 1, 'trajcollision': 0},
from_fn(get_holding_motion_synth(robot, movable, teleport))),
] if USE_SYNTHESIZERS else []
print('Init:', init)
print('Goal:', goal)
print('Streams:', stream_map.keys())
print('Synthesizers:', stream_map.keys())
print(names)
pr = cProfile.Profile()
pr.enable()
solution = solve_focused(pddlstream_problem, synthesizers=synthesizers, max_cost=INF, verbose=False)
print_solution(solution)
plan, cost, evaluations = solution
pr.disable()
# pstats.Stats(pr).sort_stats('tottime').print_stats(10)
if plan is None:
return
if (not display) or (plan is None):
disconnect()
return
if not viewer: # TODO: how to reenable the viewer
disconnect()
connect(use_gui=True)
load_world()
else:
saved_world.restore()
command = postprocess_plan(plan)
# user_input('Execute?')
if simulate:
command.control()
else:
#command.step()
command.refine(num_steps=10).execute(time_step=0.001)
#wait_for_interrupt()
user_input('Finish?')
disconnect()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
"""Calculate collision matrix of direct solution of LBTE."""
# Copyright (C) 2020 Atsushi Togo
# All rights reserved.
#
# This file is part of phono3py.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from phonopy.units import Kb, THzToEv
from phono3py.phonon3.imag_self_energy import ImagSelfEnergy
from phono3py.phonon3.interaction import Interaction
class CollisionMatrix(ImagSelfEnergy):
"""Collision matrix of direct solution of LBTE for one grid point.
Main diagonal part (imag-self-energy) and
the other part are separately stored.
"""
def __init__(
self,
interaction: Interaction,
rotations_cartesian=None,
num_ir_grid_points=None,
rot_grid_points=None,
is_reducible_collision_matrix=False,
log_level=0,
lang="C",
):
"""Init method."""
self._pp: Interaction
self._is_collision_matrix: bool
self._sigma = None
self._frequency_points = None
self._temperature = None
self._grid_point = None
self._lang = None
self._imag_self_energy = None
self._collision_matrix = None
self._pp_strength = None
self._frequencies = None
self._triplets_at_q = None
self._triplets_map_at_q = None
self._weights_at_q = None
self._band_indices = None
self._unit_conversion = None
self._cutoff_frequency = None
self._g = None
self._unit_conversion = None
self._log_level = log_level
super().__init__(interaction, lang=lang)
self._is_reducible_collision_matrix = is_reducible_collision_matrix
self._is_collision_matrix = True
if not self._is_reducible_collision_matrix:
self._num_ir_grid_points = num_ir_grid_points
self._rot_grid_points = np.array(
self._pp.bz_grid.bzg2grg[rot_grid_points], dtype="int_", order="C"
)
self._rotations_cartesian = rotations_cartesian
def run(self):
"""Calculate collision matrix at a grid point."""
if self._pp_strength is None:
self.run_interaction()
num_band0 = self._pp_strength.shape[1]
num_band = self._pp_strength.shape[2]
self._imag_self_energy = np.zeros(num_band0, dtype="double")
if self._is_reducible_collision_matrix:
num_mesh_points = np.prod(self._pp.mesh_numbers)
self._collision_matrix = np.zeros(
(num_band0, num_mesh_points, num_band), dtype="double"
)
else:
self._collision_matrix = np.zeros(
(num_band0, 3, self._num_ir_grid_points, num_band, 3), dtype="double"
)
self._run_with_band_indices()
self._run_collision_matrix()
def get_collision_matrix(self):
"""Return collision matrix at a grid point."""
return self._collision_matrix
def set_grid_point(self, grid_point=None):
"""Set a grid point and prepare for collision matrix calculation."""
if grid_point is None:
self._grid_point = None
else:
self._pp.set_grid_point(grid_point, store_triplets_map=True)
self._pp_strength = None
(
self._triplets_at_q,
self._weights_at_q,
self._triplets_map_at_q,
self._ir_map_at_q,
) = self._pp.get_triplets_at_q()
self._grid_point = grid_point
self._frequencies, self._eigenvectors, _ = self._pp.get_phonons()
def _run_collision_matrix(self):
if self._temperature > 0:
if self._lang == "C":
if self._is_reducible_collision_matrix:
self._run_c_reducible_collision_matrix()
else:
self._run_c_collision_matrix()
else:
if self._is_reducible_collision_matrix:
self._run_py_reducible_collision_matrix()
else:
self._run_py_collision_matrix()
def _run_c_collision_matrix(self):
import phono3py._phono3py as phono3c
phono3c.collision_matrix(
self._collision_matrix,
self._pp_strength,
self._frequencies,
self._g,
self._triplets_at_q,
self._triplets_map_at_q,
self._ir_map_at_q,
self._rot_grid_points, # in GRGrid
self._rotations_cartesian,
self._temperature,
self._unit_conversion,
self._cutoff_frequency,
)
def _run_c_reducible_collision_matrix(self):
import phono3py._phono3py as phono3c
phono3c.reducible_collision_matrix(
self._collision_matrix,
self._pp_strength,
self._frequencies,
self._g,
self._triplets_at_q,
self._triplets_map_at_q,
self._ir_map_at_q,
self._temperature,
self._unit_conversion,
self._cutoff_frequency,
)
def _run_py_collision_matrix(self):
r"""Sum over rotations, and q-points and bands for third phonons.
\Omega' = \sum_R' R' \Omega_{kp,R'k'p'}
pp_strength.shape = (num_triplets, num_band0, num_band, num_band)
"""
num_band0 = self._pp_strength.shape[1]
num_band = self._pp_strength.shape[2]
gp2tp, tp2s, swapped = self._get_gp2tp_map()
for i in range(self._num_ir_grid_points):
r_gps = self._rot_grid_points[i]
for r, r_gp in zip(self._rotations_cartesian, r_gps):
inv_sinh = self._get_inv_sinh(tp2s[r_gp])
ti = gp2tp[r_gp]
for j, k in np.ndindex((num_band0, num_band)):
if swapped[r_gp]:
collision = (
self._pp_strength[ti, j, :, k]
* inv_sinh
* self._g[2, ti, j, :, k]
).sum()
else:
collision = (
self._pp_strength[ti, j, k]
* inv_sinh
* self._g[2, ti, j, k]
).sum()
collision *= self._unit_conversion
self._collision_matrix[j, :, i, k, :] += collision * r
def _run_py_reducible_collision_matrix(self):
r"""Sum over q-points and bands of third phonons.
This corresponds to the second term of right hand side of
\Omega_{q0p0, q1p1} in Chaput's paper.
pp_strength.shape = (num_triplets, num_band0, num_band, num_band)
"""
num_mesh_points = np.prod(self._pp.mesh_numbers)
num_band0 = self._pp_strength.shape[1]
num_band = self._pp_strength.shape[2]
gp2tp, tp2s, swapped = self._get_gp2tp_map()
for gp1 in range(num_mesh_points):
inv_sinh = self._get_inv_sinh(tp2s[gp1])
ti = gp2tp[gp1]
for j, k in np.ndindex((num_band0, num_band)):
if swapped[gp1]:
collision = (
self._pp_strength[ti, j, :, k]
* inv_sinh
* self._g[2, ti, j, :, k]
).sum()
else:
collision = (
self._pp_strength[ti, j, k] * inv_sinh * self._g[2, ti, j, k]
).sum()
collision *= self._unit_conversion
self._collision_matrix[j, gp1, k] += collision
def _get_gp2tp_map(self):
"""Return mapping table from grid point index to triplet index.
triplets_map_at_q contains index mapping of q1 in (q0, q1, q2) to
independet q1 under q0+q1+q2=G with a fixed q0.
Note
----
map_q[gp1] <= gp1.:
Symmetry relation of grid poi nts with a stabilizer q0.
map_triplets[gp1] <= gp1 :
map_q[gp1] == gp1 : map_q[gp2] if map_q[gp2] < gp1 otherwise gp1.
map_q[gp1] != gp1 : map_triplets[map_q[gp1]]
As a rule
1. map_triplets[gp1] == gp1 : [gp0, gp1, gp2]
2. map_triplets[gp1] != gp1 : [gp0, map_q[gp2], gp1'],
map_triplets[gp1] == map_q[gp2]
"""
map_triplets = self._triplets_map_at_q
map_q = self._ir_map_at_q
gp2tp = -np.ones(len(map_triplets), dtype="int_")
tp2s = -np.ones(len(map_triplets), dtype="int_")
swapped = np.zeros(len(map_triplets), dtype="bytes")
num_tps = 0
bzg2grg = self._pp.bz_grid.bzg2grg
for gp1, tp_gp1 in enumerate(map_triplets):
if map_q[gp1] == gp1:
if gp1 == tp_gp1:
gp2tp[gp1] = num_tps
tp2s[gp1] = self._triplets_at_q[num_tps][2]
assert bzg2grg[self._triplets_at_q[num_tps][1]] == gp1
num_tps += 1
else: # q1 <--> q2 swap if swappable.
gp2tp[gp1] = gp2tp[tp_gp1]
tp2s[gp1] = self._triplets_at_q[gp2tp[gp1]][1]
swapped[gp1] = 1
assert map_q[bzg2grg[self._triplets_at_q[gp2tp[gp1]][2]]] == gp1
else: # q1 is not in ir-q1s.
gp2tp[gp1] = gp2tp[map_q[gp1]]
tp2s[gp1] = tp2s[map_q[gp1]]
swapped[gp1] = swapped[map_q[gp1]]
# Alternative implementation of tp2s
# grg2bzg = self._pp.bz_grid.grg2bzg
# addresses = self._pp.bz_grid.addresses
# q0 = addresses[self._triplets_at_q[0][0]]
# q1 = addresses[grg2bzg[gp1]]
# q2 = -q0 - q1
# gp2 = get_grid_point_from_address(q2, self._pp.bz_grid.D_diag)
# tp2s[gp1] = self._pp.bz_grid.grg2bzg[gp2]
return gp2tp, tp2s, swapped
def _get_inv_sinh(self, gp):
"""Return sinh term for bands at a q-point."""
freqs = self._frequencies[gp]
sinh = np.where(
freqs > self._cutoff_frequency,
np.sinh(freqs * THzToEv / (2 * Kb * self._temperature)),
-1.0,
)
inv_sinh = np.where(sinh > 0, 1.0 / sinh, 0)
return inv_sinh
|
nilq/baby-python
|
python
|
import argparse
import dataclasses
from pathlib import Path
from typing import Dict, List, Optional
from omegaconf import DictConfig, OmegaConf as oc
from .. import settings, logger
@dataclasses.dataclass
class Paths:
query_images: Path
reference_images: Path
reference_sfm: Path
query_list: Path
dataset: Optional[Path] = None
dumps: Optional[Path] = None
retrieval_pairs: Optional[Path] = None
results: Optional[Path] = None
global_descriptors: Optional[Path] = None
hloc_logs: Optional[Path] = None
log_path: Optional[Path] = None
ground_truth: Optional[Path] = None
def interpolate(self, **kwargs) -> 'Paths':
args = {}
for f in dataclasses.fields(self):
val = getattr(self, f.name)
if val is not None:
val = str(val)
for k, v in kwargs.items():
val = val.replace(f'{{{k}}}', str(v))
val = Path(val)
args[f.name] = val
return self.__class__(**args)
def asdict(self) -> Dict[str, Path]:
return dataclasses.asdict(self)
@classmethod
def fields(cls) -> List[str]:
return [f.name for f in dataclasses.fields(cls)]
def add_prefixes(self, dataset: Path, dumps: Path,
eval_dir: Optional[Path] = Path('.')) -> 'Paths':
paths = {}
for attr in self.fields():
val = getattr(self, attr)
if val is not None:
if attr in {'dataset', 'dumps'}:
paths[attr] = val
elif attr in {'query_images',
'reference_images',
'ground_truth'}:
paths[attr] = dataset / val
elif attr in {'results'}:
paths[attr] = eval_dir / val
else: # everything else is part of the hloc dumps
paths[attr] = dumps / val
paths['dataset'] = dataset
paths['dumps'] = dumps
return self.__class__(**paths)
def create_argparser(dataset: str) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--results', type=Path)
parser.add_argument('--reference_sfm', type=Path)
parser.add_argument('--retrieval', type=Path)
parser.add_argument('--global_descriptors', type=Path)
parser.add_argument('--hloc_logs', type=Path)
parser.add_argument('--dataset', type=Path,
default=settings.DATA_PATH / dataset)
parser.add_argument('--dumps', type=Path,
default=settings.LOC_PATH / dataset)
parser.add_argument('--eval_dir', type=Path,
default=settings.EVAL_PATH)
parser.add_argument('--from_poses', action='store_true')
parser.add_argument('--inlier_ranking', action='store_true')
parser.add_argument('--skip', type=int)
parser.add_argument('--verbose', action='store_true')
parser.add_argument('dotlist', nargs='*')
return parser
def parse_paths(args, default_paths: Paths) -> Paths:
default_paths = default_paths.add_prefixes(
args.dataset, args.dumps, args.eval_dir)
paths = {}
for attr in Paths.fields():
val = getattr(args, attr, None)
if val is None:
val = getattr(default_paths, attr, None)
if val is None:
continue
paths[attr] = val
return Paths(**paths)
def parse_conf(args, default_confs: Dict) -> DictConfig:
conf = default_confs['from_poses' if args.from_poses else 'from_retrieval']
conf = oc.merge(oc.create(conf), oc.from_cli(args.dotlist))
logger.info('Parsed configuration:\n%s', oc.to_yaml(conf))
return conf
|
nilq/baby-python
|
python
|
from urllib.request import urlopen as ureq
from bs4 import BeautifulSoup
import requests
import os, os.path, csv
my_url = 'https://www.newegg.com/Laptops-Notebooks/Category/ID-223?Tid=17489'
# loading connection/grabbing page
xClient = ureq(my_url)
p_html = xClient.read()
# html parsing
page_soup = BeautifulSoup(p_html, "html.parser")
#grabs each laptop
containers = page_soup.findAll("div", {"class":"item-container"})
filename = "laptops.csv"
f = open(filename, "w")
headers = "brand", "product_name", "shipping\n"
f.write("headers")
# for loop that extracts titles of laptops
for container in containers:
brand = container.div.div.a.img["title"]
title_container = container.findAll("a", {"class":"item-title"})
product_name = title_container[0].text
shipping_container = container.findAll("li", {"class" : "price-ship"})
shipping = shipping_container[0].text.strip()
print("brand: " + brand)
print("product_name: " + product_name)
print("shipping: " + shipping)
f.write(brand + ", " + product_name.replace(",", "|") + ", " + shipping + "\n")
|
nilq/baby-python
|
python
|
# by amounra : http://www.aumhaa.com
from __future__ import with_statement
import contextlib
from _Framework.SubjectSlot import SubjectEvent
from _Framework.Signal import Signal
from _Framework.NotifyingControlElement import NotifyingControlElement
from _Framework.Util import in_range
from _Framework.Debug import debug_print
from _Framework.Disconnectable import Disconnectable
from _Framework.InputControlElement import InputSignal
from MonoDeviceComponent import MonoDeviceComponent
from ModDevices import *
wheel_parameter = {0: 'value', 1: 'mode', 2:'green', 3:'white', 4:'custom'}
LOGO = [[], [], [], [], [], [], [], [],
[[1, 1], [2, 1], [3, 1], [4, 1]],
[[0, 1]],
[[1, 1], [2, 1]],
[[1, 1], [2, 1], [3, 1]],
[[0, 1]],
[[1, 1], [2, 1], [3, 1], [4, 1]],
[[2, 1], [3, 1], [4, 1]],
[],
[[2, 2], [3, 2]],
[[1, 2], [4, 2]],
[[0, 2], [4, 2]],
[[0, 2], [3, 2], [4, 2]],
[[1, 2], [2, 2], [3, 2]],
[],
[[1, 3], [2, 3], [3, 3], [4, 3]],
[[0, 3], [1, 3]],
[[1, 3], [2, 3]],
[[2, 3], [3, 3]],
[[0, 3], [1, 3], [2, 3], [3, 3], [4, 3]],
[],
[[2, 4], [3, 4]],
[[1, 4], [4, 4]],
[[0, 4], [4, 4]],
[[0, 4], [3, 4], [4, 4]],
[[1, 4], [2, 4], [3, 4]],
[],
[[1, 5], [2, 5], [3, 5], [4, 5]],
[[0, 5]],
[[1, 5], [2, 5]],
[[1, 5], [2, 5], [3, 5]],
[[0, 5]],
[[1, 5], [2, 5], [3, 5], [4, 5]],
[[2, 5], [3, 5], [4, 5]],
[],
[[2, 6],[3, 6]],
[[1, 6], [4, 6]],
[[0, 6], [4, 6]],
[[0, 6], [3, 6], [4, 6]],
[[1, 6], [2, 6], [3, 6]],
[],
[[0, 1], [1, 1], [2, 1], [3, 1], [4, 1]],
[[0, 1], [4, 1]],
[[0, 1], [4, 1]],
[[1, 1], [2, 1], [3, 1], [4, 1]],
[[2, 1], [3, 1], [4, 1]],
[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []]
def unpack_values(values):
return [int(i) for i in str(values).split('^')]
class MonoClient(NotifyingControlElement):
__module__ = __name__
__doc__ = ' Class representing a single mod in a Monomodular hosted environment '
__subject_events__ = (SubjectEvent(name='value', signal=InputSignal, override=True),)
_input_signal_listener_count = 0
def __init__(self, script, number, *a, **k):
super(MonoClient, self).__init__(script, number, *a, **k)
self._host = script
self._is_monolink = False
self._active_host = []
self._number = number
self._channel = 0
self._connected = False
self._enabled = True
self.device = None
self._device_parent = None
self._device_component = None
self._swing = 0
self._mute = 0
self._autoselect_enabled = 0
self._offset = [0, 0]
self._color_maps = []
self._report_offset = False
self._local_ring_control = 1
self._c_local_ring_control = 1
self._controls = [{},{}]
self._create_grid()
self._create_keys()
self._create_wheels()
self._create_c_grid()
self._create_c_keys()
self._create_c_wheels()
self._create_c_knobs()
self._absolute_mode = 1
self._c_absolute_mode = 1
self._parameters = []
self._mod_dial = None
self._mod_vol = 127
self._mod_color = 0
self._device_component = MonoDeviceComponent(self, MOD_BANK_DICT, MOD_TYPES)
self._banner_state = 0
self._monomodular = 0
def is_active(self):
return (len(self._active_host) > 0)
def set_enabled(self, val):
self._enabled = val!=0
def _banner(self):
if not self.is_connected() and len(self._active_host)>0:
if self._banner_state < 54:
self.receive_grid_all(0)
for index in range(16):
for y in range(len(LOGO[self._banner_state + index])):
self.receive_grid(index, LOGO[self._banner_state + index][y][0], LOGO[self._banner_state + index][y][1])
self._banner_state += 1
self._host.schedule_message(1, self._banner)
else:
self._banner_state = 0
def script_wants_forwarding(self):
return True
def is_connected(self):
return self._connected
def disconnect(self):
#self._device_component.disconnect()
self._active_host = []
if self._device_parent != None:
if self._device_parent.devices_has_listener(self._device_listener):
self._device_parent.remove_devices_listener(self._device_listener)
super(MonoClient, self).disconnect()
self._enabled = True
self._c_local_ring_control = 1
self._local_ring_control = 1
self._c_absolute_mode = 1
self._absolute_mode = 1
def reset(self):
pass
def _connect_to(self, device):
#self._host.log_message('client ' + str(self._number) + ' connect_to' + str(device.name))
self._connected = True
self.device = device
if self._device_parent != None:
if self._device_parent.devices_has_listener(self._device_listener):
self._device_parent.remove_devices_listener(self._device_listener)
self._device_parent = device.canonical_parent
if not self._device_parent.devices_has_listener(self._device_listener):
self._device_parent.add_devices_listener(self._device_listener)
#self._mute = 0
#self._send('toggle_mute', self._mute)
for host in self._active_host:
host.update()
for host in self._host._hosts:
if hasattr(host, '_notify_new_connection'):
host._notify_new_connection(device)
def _disconnect_client(self, reconnect = False):
#self._host.log_message('disconnect client ' + str(self._number))
self._create_grid()
self._create_keys()
self._create_wheels()
self._create_c_grid()
self._create_c_keys()
self._create_c_wheels()
self._create_c_knobs()
self.set_local_ring_control(1)
self.set_absolute_mode(1)
self.set_c_local_ring_control(1)
self.set_c_absolute_mode(1)
self._mod_vol = 127
self._mod_color = 0
self._monomodular = 0
self._swing = 0
self._report_offset = False
if self._device_parent != None:
if self._device_parent.devices_has_listener(self._device_listener):
self._device_parent.remove_devices_listener(self._device_listener)
if reconnect == True:
self._send('reconnect')
if not self._device_component is None:
self._device_component.disconnect_client()
self._connected = False
self.device = None
for host in self._active_host:
host.update()
if not host.is_enabled() and 'display_mod_colors' in dir(host):
host.display_mod_colors()
def _device_listener(self):
#self._host.log_message('device_listener' + str(self.device))
if self.device == None:
self._disconnect_client()
def linked_device(self):
return self.device
"""initiation methods"""
def _create_grid(self):
self._grid = [None for index in range(16)]
for column in range(16):
self._grid[column] = [None for index in range(16)]
for row in range(16):
self._grid[column][row] = 0
def _create_keys(self):
self._key = [None for index in range(8)]
for index in range(8):
self._key[index] = 0
def _create_wheels(self):
self._wheel = [[] for index in range(9)]
for column in range(9):
self._wheel[column] = [[] for index in range(5)]
for row in range(5):
self._wheel[column][row] = {'log': 0, 'value': 0, 'mode':0, 'white': 0, 'green': 0, 'custom':'00000000', 'pn':' ', 'pv': '0'}
"""send methods (to m4l from host)"""
def _send(self, args1 = None, args2 = None, args3 = None, args4 = None):
if self._enabled is True:
self.notify_value(args1, args2, args3, args4)
def _send_key(self, index, value):
self._send('key', index, value)
def _send_grid(self, column, row, value):
self._send('grid', column, row, value)
def _send_offset(self, x, y):
self._offset = [x, y]
if(self._report_offset is True):
self._send('offset', x, y)
"""receive methods (from m4l)"""
def receive_key(self, index, value=0):
if self._key[index] != value:
self._key[index] = value
for host in self._active_host:
host._send_key(index, value)
def receive_grid(self, column, row, value=0):
if self._grid[column][row] != value:
self._grid[column][row] = value
for host in self._active_host:
host._send_grid(column, row, value)
def receive_grid_row(self, row, value=0):
g_len = len(self._grid)
for column in xrange(g_len):
self._grid[column][row] = value
for host in self._active_host:
for column in xrange(g_len):
host._send_grid(column, row, value)
def receive_grid_column(self, column, value=0):
g_len = len(self._grid[column])
for row in xrange(g_len):
self._grid[column][row] = value
for host in self._active_host:
for row in xrange(g_len):
host._send_grid(column, row, value)
def receive_grid_all(self, value=0):
for column in xrange(len(self._grid)):
for row in xrange(len(self._grid[column])):
self._grid[column][row] = value
#if self.is_active():
for host in self._active_host:
#for column in range(len(self._grid)):
#for row in range(len(self._grid[column])):
host._send_grid(column, row, value)
def receive_mask_key(self, num, value=-1):
#if self.is_active():
if value > -1:
for host in self._active_host:
host._send_key(num, value)
else:
for host in self._active_host:
host._send_key(num, int(self._key[num]))
def receive_mask_grid(self, column, row, value=-1):
if value > -1:
for host in self._active_host:
host._send_grid(column, row, value)
else:
for host in self._active_host:
host._send_grid(column, row, int(self._grid[column][row]))
def receive_mask_column(self, column, value=-1):
if value > -1:
for host in self._active_host:
for index in xrange(16):
host._send_grid(column, index, value)
else:
for host in self._active_host:
for index in xrange(16):
host._send_grid(column, index, self._grid[column][index])
def receive_mask_row(self, row, value=-1):
hosts = self._active_host
if value > -1:
for index in xrange(16):
for host in hosts:
host._send_grid(index, row, value)
else:
for host in self._active_host:
for index in xrange(16):
host._send_grid(index, row, self._grid[index][row])
def receive_mask_all(self, value=-1):
if value > -1:
for host in self._active_host:
for column in xrange(16):
for row in xrange(16):
host._send_grid(column, row, value)
else:
for host in self._active_host:
for column in xrange(16):
for row in xrange(16):
host._send_grid(column, row, self._grid[index][row])
def receive_hotline(self, client, func = None, arguments = None):
#self._host.log_message(str(client) + ' ' + str(func) + ' ' + str(arguments))
if(client == 'all') and (func != None):
for index in xrange(16):
self._host._client[index]._send('hotline', func, arguments)
elif(client in xrange(16)) and (func != None):
self._host._client[client]._send('hotline', func, arguments)
def receive_autoselect_enabled(self, val=0):
self._autoselect_enabled = val
def receive_swing(self, swing=0):
self._swing = swing
self._send('swing', swing)
def report_swing(self, swing=0):
self._send('report_swing', swing)
def toggle_mute(self):
self._mute = abs(self._mute-1)
self._send('toggle_mute', self._mute)
def set_mute(self, val=0):
self._mute = val
def receive_channel(self, channel=0):
if channel in range(16):
self._channel = channel
def autoselect_enabled(self=0):
return self._autoselect_enabled > 0
def _autoselect(self):
if self.autoselect_enabled():
if self.device != None:
for host in self._active_host:
host.set_appointed_device(self.device)
def _set_channel(self, channel):
self._send('channel', channel)
self._channel = channel
def set_report_offset(self, val=0):
self._report_offset = (val == 1)
self._send_offset(self._offset[0], self._offset[1])
def set_monomodular(self, val=0):
self._monomodular = val
def set_color_map(self, color_type, color_map):
for host in self._host._hosts:
#self._host.log_message(str(host._host_name) + str(host_name))
if str(host._script._color_type) == str(color_type):
#new_map = [color_map[i] for i in range(len(color_map))]
#self._host.log_message('mapping ' + str(host_name) + ' to ' + str(self._number))
new_map = color_map.split('*')
for index in xrange(len(new_map)):
new_map[index] = int(new_map[index])
#self._host.log_message(str(host_name) + str(new_map))
host._color_maps[self._number] = new_map
if host._active_client is self:
host._select_client(self._number)
#self._host.log_message(str(host_name) + ' ' + str(color_map.split('*')))
def linked_device(self):
return self.device
"""CNTRL:R specific methods"""
def _create_c_grid(self):
self._c_grid = [None for index in range(4)]
for column in range(4):
self._c_grid[column] = [None for index in range(4)]
for row in range(4):
self._c_grid[column][row] = 0
def _create_c_keys(self):
self._c_key = [None for index in range(32)]
for index in range(32):
self._c_key[index] = 0
def _create_c_knobs(self):
self._knob = [None for index in range(24)]
for index in range(24):
self._knob[index] = 0
def _create_c_wheels(self):
self._c_wheel = [[] for index in range(4)]
for column in range(4):
self._c_wheel[column] = [[] for index in range(3)]
for row in range(3):
self._c_wheel[column][row] = {'log': 0, 'value': 0, 'mode':0, 'white': 0, 'green': 0, 'custom':'00000000', 'pn':' ', 'pv': '0'}
def _send_c_knob(self, index, value=0):
self._send('c_knob', index, value)
def _send_c_key(self, index, value=0):
self._send('c_key', index, value)
def _send_c_grid(self, column, row, value=0):
self._send('c_grid', column, row, value)
def _send_c_dial(self, column, row, value=0):
self._send('c_dial', column, row, value)
def _send_c_dial_button(self, column, row, value=0):
if row > 0:
self._send('c_dial_button', column, row-1, value)
def receive_c_key(self, index, value=0):
if self._c_key[index] != value:
self._c_key[index] = value
for host in self._active_host:
host._send_c_key(index, value)
def receive_c_grid(self, column, row, value=0):
if self._c_grid[column][row] != value:
self._c_grid[column][row] = value
for host in self._active_host:
host._send_c_grid(column, row, value)
def receive_c_grid_row(self, row, value=0):
g_len = len(self._c_grid)
for column in xrange(g_len):
self._c_grid[column][row] = value
for host in self._active_host:
for column in xrange(g_len):
host._send_c_grid(column, row, value)
def receive_c_grid_column(self, column, value=0):
g_len = len(self._c_grid[0])
for row in xrange(g_len):
self._c_grid[column][row] = value
for host in self._active_host:
for row in xrange(g_len):
host._send_c_grid(column, row, value)
def receive_c_grid_all(self, value=0):
g_len = len(self._c_grid)
g_ht = len(self._c_grid[0])
for column in xrange(g_len):
for row in xrange(g_ht):
self._c_grid[column][row] = value
for host in self._active_host:
for column in xrange(g_len):
for row in xrange(g_ht):
host._send_c_grid(column, row, value)
def receive_mask_c_key(self, num, value=-1):
if value > -1:
for host in self._active_host:
host._send_c_key(num, value)
else:
for host in self._active_host:
host._send_c_key(num, int(self._c_key[num]))
def receive_mask_c_grid(self, column, row, value=-1):
if value > -1:
for host in self._active_host:
host._send_c_grid(column, row, value)
else:
for host in self._active_host:
host._send_c_grid(column, row, int(self._c_grid[column][row]))
def receive_mask_c_column(self, column, value=-1):
if value > -1:
for host in self._active_host:
for index in xrange(4):
host._send_c_grid(column, index, value)
else:
for host in self._active_host:
for index in xrange(4):
host._send_c_grid(column, index, self._c_grid[column][index])
def receive_mask_c_row(self, row, value=-1):
if value > -1:
for host in self._active_host:
for index in xrange(4):
host._send_c_grid(index, row, value)
else:
for host in self._active_host:
for index in xrange(4):
host._send_c_grid(index, row, self._c_grid[index][row])
def receive_mask_c_all(self, value=-1):
if value > -1:
for host in self._active_host:
for column in xrange(4):
for row in xrange(4):
host._send_c_grid(column, row, value)
else:
for host in self._active_host:
for column in xrange(4):
for row in xrange(4):
host._send_c_grid(column, row, self._c_grid[index][row])
def receive_c_wheel(self, number, parameter, value):
column = number%4
row = int(number/4)
if self._c_wheel[column]:
if self._c_wheel[column][row]:
wheel = self._c_wheel[column][row]
wheel[parameter] = value
if parameter!='white':
for host in self._active_host:
host._send_c_wheel(column, row, wheel, parameter)
elif row > 0:
for host in self._active_host:
host._send_c_wheel(column, row, wheel, parameter)
def _send_c_dial(self, column, row, value):
self._send('c_dial', column, row, value)
def _send_c_dial_button(self, column, row, value):
if row > 0:
self._send('c_dial_button', column, row-1, value)
def set_c_absolute_mode(self, val=1):
#self._host.log_message('client set absolute mode ' + str(val))
self._c_absolute_mode = val
if self._enabled:
for host in self._active_host:
if 'set_c_absolute_mode' in dir(host):
host.set_c_absolute_mode(self._c_absolute_mode)
def set_c_local_ring_control(self, val = 0):
self._c_local_ring_control = val
if self._enabled:
for host in self._active_host:
if 'set_c_local_ring_control' in dir(host):
host.set_c_local_ring_control(self._c_local_ring_control)
def receive_mod_color(self, val=0):
#self._host.log_message('mod color' + str(val))
if val != 1:
self._mod_color = val
for host in self._active_host:
if '_display_mod_colors' in dir(host):
host._display_mod_colors()
def _mod_dial_parameter(self):
param = None
if not self.device == None:
for parameter in self.device.parameters:
if (parameter.original_name == 'moddial'):
param = parameter
break
return param
def send_midi(self, Type, num, val):
self._host.send_midi(Type, num, val)
"""Codec specific methods"""
def _send_dial(self, column, row, value=0):
self._send('dial', column, row, value)
def _send_dial_button(self, column, row, value=0):
if column < 8 and row < 4:
self._send('dial_button', column, row, value)
elif row is 4:
self._send('column_button', column, value)
else:
self._send('row_button', row, value)
def receive_wheel(self, number, parameter, value):
column = number%9
row = int(number/9)
if self._wheel[column]:
if self._wheel[column][row]:
self._wheel[column][row][parameter] = value
#if self.is_active():
if parameter!='white':
for host in self._active_host:
host._send_wheel(column, row, self._wheel[column][row], parameter)
elif row > -1:
for host in self._active_host:
host._send_wheel(column, row, self._wheel[column][row], parameter)
def set_local_ring_control(self, val = 1):
#self._host.log_message('client set local ring ' + str(val))
self._local_ring_control = val
if self._enabled:
for host in self._active_host:
if 'set_local_ring_control' in dir(host):
host.set_local_ring_control(self._local_ring_control)
def set_absolute_mode(self, val = 1):
#self._host.log_message('client set absolute mode ' + str(val))
self._absolute_mode = val
if self._enabled:
for host in self._active_host:
if 'set_absolute_mode' in dir(host):
host.set_absolute_mode(self._absolute_mode)
"""MonoDevice integration"""
def receive_device(self, command, args0 = None, args1 = None, args2 = None):
if command in dir(self._device_component):
getattr(self._device_component, command)(args0, args1, args2)
|
nilq/baby-python
|
python
|
# -*- encoding: utf-8
import sys
import pytest
import lswifi
# WirelessNetworkBss
class TestElements:
def test_parse_rates(self):
test1 = lswifi.elements.OutObject(
value="1(b) 2(b) 5.5(b) 11(b) 6(b) 9 12(b) 18 24(b) 36 48 54"
)
test2 = lswifi.elements.OutObject(
value="1(b) 2(b) 5.5(b) 11(b) 18 24 36 54 6 9 12 48"
)
test3 = lswifi.elements.OutObject(value="6(b) 9 12(b) 18 24(b) 36 48 54")
assert (
lswifi.elements.WirelessNetworkBss.parse_rates(test1)
== "1(B) 2(B) 5.5(B) 6(B) 9 11(B) 12(B) 18 24(B) 36 48 54"
)
assert (
lswifi.elements.WirelessNetworkBss.parse_rates(test2)
== "1(B) 2(B) 5.5(B) 6 9 11(B) 12 18 24 36 48 54"
)
assert (
lswifi.elements.WirelessNetworkBss.parse_rates(test3)
== "6(B) 9 12(B) 18 24(B) 36 48 54"
)
def test_convert_timestamp_to_uptime(self):
assert (
lswifi.elements.WirelessNetworkBss.convert_timestamp_to_uptime(
13667420576596
)
== "158d 4:30:20"
)
assert (
lswifi.elements.WirelessNetworkBss.convert_timestamp_to_uptime(179295494144)
== "02d 1:48:15"
)
assert (
lswifi.elements.WirelessNetworkBss.convert_timestamp_to_uptime(285837076)
== "00d 0:04:45"
)
|
nilq/baby-python
|
python
|
import kaggle
import pathlib
import shutil
# You need to have ~/.kaggle/kaggle.json in your device.
competition_name = 'tgs-salt-identification-challenge'
out_path = pathlib.Path('Dataset')
def download(train: bool = True) -> None:
fn = 'train' if train else 'test'
print(f'[INFO] Downloading {fn} data.')
kaggle.api.competition_download_file(competition_name, fn + '.zip', path=out_path / '.temp_storage',
force=True, quiet=True)
shutil.rmtree(out_path / fn, ignore_errors=True)
print(f'[INFO] Extracting {fn} data.')
shutil.unpack_archive(str(out_path / '.temp_storage' / fn) + '.zip', out_path / fn)
shutil.rmtree(out_path / '.temp_storage', ignore_errors=True)
print()
if __name__ == '__main__':
download(train=True)
download(train=False)
print('Done')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paasta_tools.autoscaling.pause_service_autoscaler import (
delete_service_autoscale_pause_time,
)
from paasta_tools.autoscaling.pause_service_autoscaler import (
get_service_autoscale_pause_time,
)
from paasta_tools.autoscaling.pause_service_autoscaler import (
update_service_autoscale_pause_time,
)
from paasta_tools.utils import _log_audit
from paasta_tools.utils import paasta_print
MAX_PAUSE_DURATION = 320
def add_subparser(subparsers):
status_parser = subparsers.add_parser(
"pause_service_autoscaler",
help="Pause the service autoscaler for an entire cluster",
description=(
"'paasta pause_service_autoscaler is used to pause the paasta service autoscaler "
"for an entire paasta cluster. "
),
)
status_parser.add_argument(
"-c",
"--cluster",
dest="cluster",
help="which cluster to pause autoscaling in. ie. norcal-prod",
)
status_parser.add_argument(
"-d",
"--pause-duration",
default=120,
dest="duration",
type=int,
help="How long to pause the autoscaler for, defaults to %(default)s minutes",
)
status_parser.add_argument(
"-f",
"--force",
help="Force pause for longer than max duration",
action="store_true",
dest="force",
default=False,
)
status_parser.add_argument(
"-i",
"--info",
help="Print when the autoscaler is paused until",
action="store_true",
dest="info",
default=False,
)
status_parser.add_argument(
"-r",
"--resume",
help="Resume autoscaling (unpause) in a cluster",
action="store_true",
dest="resume",
default=False,
)
status_parser.set_defaults(command=paasta_pause_service_autoscaler)
def paasta_pause_service_autoscaler(args):
"""With a given cluster and duration, pauses the paasta service autoscaler
in that cluster for duration minutes"""
if args.duration > MAX_PAUSE_DURATION:
if not args.force:
paasta_print(
"Specified duration: {d} longer than max: {m}".format(
d=args.duration, m=MAX_PAUSE_DURATION
)
)
paasta_print("If you are really sure, run again with --force")
return 3
if args.info:
return_code = get_service_autoscale_pause_time(args.cluster)
elif args.resume:
return_code = delete_service_autoscale_pause_time(args.cluster)
_log_audit(action="resume-service-autoscaler", cluster=args.cluster)
else:
minutes = args.duration
return_code = update_service_autoscale_pause_time(args.cluster, minutes)
_log_audit(
action="pause-service-autoscaler",
action_details={"duration": minutes},
cluster=args.cluster,
)
return return_code
|
nilq/baby-python
|
python
|
from discord.ext import commands
from ytdl.source import YTDLSource
from asyncio import sleep
class Kakatua(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.playlist = []
self.player = None
@commands.command()
async def check(self, ctx: commands.Context, *, text):
await ctx.send(text)
@commands.command()
async def play(self, ctx: commands.Context, *, url):
self.playlist.extend(await YTDLSource.extract_info(url, loop=self.bot.loop, stream=True))
await self.__play_next(ctx)
@commands.command()
async def stop(self, ctx: commands.Context):
self.playlist.clear()
if ctx.voice_client.is_connected():
await ctx.voice_client.disconnect()
@commands.command()
async def np(self, ctx: commands.Context):
await self.__display_current_playing(ctx)
@commands.command()
async def now_playing(self, ctx: commands.Context):
await self.__display_current_playing(ctx)
@commands.command()
async def queue(self, ctx: commands.Context):
await self.__display_playlist(ctx)
@commands.command()
async def next(self, ctx: commands.Context):
if ctx.voice_client.is_playing():
ctx.voice_client.stop()
@commands.command()
async def pause(self, ctx: commands.Context):
if ctx.voice_client.is_playing():
ctx.voice_client.pause()
@commands.command()
async def resume(self, ctx: commands.Context):
if ctx.voice_client.is_paused():
ctx.voice_client.resume()
await self.__play_next(ctx)
@play.before_invoke
async def ensure_voice(self, ctx: commands.Context):
if ctx.voice_client is None:
if ctx.author.voice:
await ctx.author.voice.channel.connect()
else:
await ctx.send('You are not connected to a voice channel.')
raise commands.CommandError('Author not connected to a voice channel.')
elif ctx.voice_client.is_playing():
ctx.voice_client.stop()
async def __play_next(self, ctx: commands.Context):
if len(self.playlist) > 0:
async with ctx.typing():
data = self.playlist.pop(0)
self.player = await YTDLSource.play_url(data, stream=True)
ctx.voice_client.play(self.player, after=lambda e: print(f'Player error: {e}') if e else None)
await self.__display_current_playing(ctx)
while ctx.voice_client is not None and ctx.voice_client.is_playing():
await sleep(1)
if not ctx.voice_client.is_paused():
await self.__play_next(ctx)
elif ctx.voice_client is not None:
await ctx.voice_client.disconnect()
async def __display_current_playing(self, ctx: commands.Context):
await ctx.send(f'Now playing: {self.player.title}')
async def __display_playlist(self, ctx: commands.Context):
await ctx.send('\n'.join(map(lambda item: item['title'], self.playlist)))
|
nilq/baby-python
|
python
|
from __future__ import division
from mmcv import Config
from mmcv.runner import obj_from_dict
from mmdet import datasets, __version__
from mmdet.apis import (train_detector, get_root_logger)
from mmdet.models import build_detector
import os
import os.path as osp
import getpass
import torch
"""
Author:Yuan Yuan
Date:2019/02/11
Description: This script is used to train detectors with config files.
"""
def main():
configs = \
[
# '../../configs/cvc09/faster_rcnn_r50_c4_cvc.py',
# '../../configs/cvc09/faster_rcnn_r50_fpn_cvc.py',
# '../../configs/cvc09/faster_rcnn_v16_c5_cvc.py',
# '../../configs/cvc09/faster_rcnn_v16_fpn_cvc.py',
# '../../configs/caltech/faster_rcnn_r50_fpn_caltech.py',
# '../../configs/caltech/faster_rcnn_r50_c4_caltech.py'
# '../../configs/kaist/faster_rcnn_r50_c4_rgb_kaist.py',
# '../../configs/kaist/faster_rcnn_r50_fpn_rgb_kaist.py',
# '../../configs/kaist/faster_rcnn_r50_c4_thermal_kaist.py',
# '../../configs/kaist/faster_rcnn_r50_fpn_thermal_kaist.py',
# '../../configs/kaist/faster_rcnn_v16_c5_rgb_kaist.py',
# '../../configs/kaist/faster_rcnn_v16_fpn_rgb_kaist.py',
# '../../configs/kaist/faster_rcnn_v16_c5_thermal_kaist.py',
# '../../configs/kaist/faster_rcnn_v16_fpn_thermal_kaist.py',
# '../../configs/kaist/mul_faster_rcnn_v16_fpn_cat_kaist.py',
#
# '../../configs/kaist/mul_faster_rcnn_r50_c4_add_kaist.py',
# '../../configs/kaist/mul_faster_rcnn_r50_fpn_add_kaist.py',
# '../../configs/kaist/mul_faster_rcnn_v16_c5_add_kaist.py',
# '../../configs/kaist/mul_faster_rcnn_v16_fpn_add_kaist.py',
# '../../configs/kaist-cross/cross_mul_faster_rcnn_r50_fpn_cat_kaist.py',
# '../../configs/kaist-cross/cross_mul_faster_rcnn_v16_fpn_cat_kaist.py'
# '../../configs/kaist/cross_faster_rcnn_v16_c5_cross_kaist.py',
# '../../configs/kaist/cross_faster_rcnn_v16_fpn_cross_kaist.py',
'../../configs/kaist/cross_faster_rcnn_r50_c4_cross_kaist.py',
'../../configs/kaist/cross_faster_rcnn_r50_fpn_cross_kaist.py',
# '../../configs/kaist/mul_faster_rcnn_v16_pre_fpn_add_kaist.py',
# '../../configs/kaist/mul_faster_rcnn_v16_pre_fpn_cat_kaist.py',
#
# '../../configs/kaist/mul_faster_rcnn_r50_pre_fpn_add_kaist.py',
# '../../configs/kaist/mul_faster_rcnn_r50_pre_fpn_cat_kaist.py',
]
for config in configs:
# load dataset
cfg = Config.fromfile(config)
cfg.gpus = 1
if not os.path.exists(cfg.work_dir):
os.mkdir(cfg.work_dir)
if cfg.checkpoint_config is not None:
# save mmdet version in checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__, config=cfg.text)
username = getpass.getuser()
temp_file = '/media/' + username + '/Data/DoubleCircle/temp/temp.txt'
fo = open(temp_file, 'w+')
str_write = cfg.work_dir.replace('../..',
('/media/'+username+'/Data/DoubleCircle/project/mmdetection/mmdetection'))
fo.write(str_write)
fo.close()
distributed = False
# init logger before other steps
logger = get_root_logger(cfg.log_level)
logger.info('Distributed training: {}'.format(distributed))
# build model
model = build_detector(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
# create datasets used for train and validation
train_dataset = obj_from_dict(cfg.data.train, datasets)
# train a detector
train_detector(
model,
train_dataset,
cfg,
distributed=distributed,
validate=True,
logger=logger)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import csv
import time
from gensim.models.doc2vec import Doc2Vec
from chatterbot import ChatBot
data_input_filename = 'training_data.csv'
doc2vec_filename = 'doc2vecmodel'
punctuation = ['.',',',';','!','?','(',')']
data_input_file = open(data_input_filename, 'r', encoding='UTF-8', newline='')
csv_reader = csv.reader(data_input_file, delimiter=',', quotechar='"')
outputs = []
for line in csv_reader:
outputs.append(line[1])
data_input_file.close()
doc2vecmodel = Doc2Vec.load(doc2vec_filename)
chatterbot = ChatBot('chatterbot',
storage_adapter=
# automatically loads data from SQLite
# database with the default name
'chatterbot.storage.SQLStorageAdapter',
preprocessors=[
'chatterbot.preprocessors.clean_whitespace',
'chatterbot.preprocessors.unescape_html'],
logic_adapters=[
{'import_path':
'chatterbot.logic.BestMatch',
'statement_comparison_function':
'chatterbot.comparisons.levenshtein_distance',
'response_selection_method':
'chatterbot.response_selection.get_most_frequent_response'
}],
# stop user interactions from training the bot
read_only=True
)
while True:
print()
new_input = input('you: ')
if new_input.lower() == 'q' or new_input.lower() == 'quit':
break
chatterbot_input = new_input
doc2vec_input = new_input.lower().split()
for i in range(len(doc2vec_input)):
if len(doc2vec_input[i]) > 2: #not emoji
for c in punctuation:
doc2vec_input[i] = doc2vec_input[i].replace(c, '')
if ''.join(new_input) == '': #empty input
continue
start = time.clock()
vect = doc2vecmodel.infer_vector(new_input)
similars = doc2vecmodel.docvecs.most_similar([vect], topn=len(doc2vecmodel.docvecs))
for (i, similarity) in similars:
if 'input' in i:
i = int(i.replace('input', ''))
#corresponding response
print('doc2vecbot: "' + outputs[i] + '"')
break
end = time.clock()
print('doc2vecbot answered in ' + str(round(end-start, 1)) + 's')
start = time.clock()
response = str(chatterbot.get_response(chatterbot_input))
print('chatterbot: "' + response + '"')
end = time.clock()
print('chatterbot answered in ' + str(round(end-start, 1)) + 's')
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
install_requires = [
"setuptools>=41.0.0",
"numpy>=1.16.0",
"joblib",
"scipy"
]
extras_require = {
"tf": ["tensorflow==2.0.0"],
"tf_gpu": ["tensorflow-gpu==2.0.0"]
}
setup(
name="tf2gan",
version="0.0.0",
description="Generative Adversarial Networks using TensorFlow2.x",
url="https://github.com/keiohta/tf2gan",
author="Kei Ohta",
author_email="dev.ohtakei@gmail.com",
license="MIT",
packages=find_packages("."),
install_requires=install_requires,
extras_require=extras_require)
|
nilq/baby-python
|
python
|
from images import api
urlpatterns = api.urlpatterns
|
nilq/baby-python
|
python
|
# coding=utf8
# Author: TomHeaven, hanlin_tan@nudt.edu.cn, 2017.08.19
from __future__ import print_function
from tensorflow.contrib.layers import conv2d, avg_pool2d
import tensorflow as tf
import numpy as np
from data_v3 import DatabaseCreator
import time
import tqdm
import cv2
import re
import os
import argparse
import h5py
# options
DEBUG = False
class Estimator:
"""
A class to train and test a tensorflow estimator.
"""
# predict_op = []
def __init__(self, batchSize = 32, depth = 8, feature_dim = 8, device = '/gpu:0', xshape=[128,128,3], yshape=[128,128,3], lr=1e-4):
self.batchSize = batchSize
self.depth = depth
self.feature_dim = feature_dim
self.device = device
self.xshape = xshape
self.yshape = yshape
self.lr = lr
def init_weights(self, shape, name):
return tf.Variable(tf.random_normal(shape, stddev=0.01), name=name)
def residual_block(self, h, width, kernel_size, depth):
h_in = h
for i in range(depth):
h = conv2d(h, width, kernel_size)
return h_in + h
def build_model(self, bTrain):
assert len(self.xshape) == 3
lmd = 0.25
# place holders
x = tf.placeholder('float', [self.batchSize, self.xshape[0], self.xshape[1], self.xshape[2]], 'x')
if bTrain:
noise_level = tf.placeholder('float', shape=(1), name='noise')
noise = tf.fill([self.batchSize, self.xshape[0], self.xshape[1], 1], noise_level[0])
# y = x
gaussian_noise = tf.random_normal(shape=tf.shape(x), stddev=noise_level[0], dtype=tf.float32)
h = x + gaussian_noise
else:
h = x
# start data flow
block_depth = 4
num_block = self.depth / block_depth
for d in range(0, num_block):
#h = tf.concat([h, noise], axis=3, name='concat_%d' % d)
h = conv2d(h, self.feature_dim, [3, 3])
h = self.residual_block(h, self.feature_dim, [3, 3], block_depth)
h = conv2d(h, 1 , [3, 3])
y_conv = h
scalar_en = tf.reduce_mean(h)
# loss function
if bTrain:
#cost_mat = tf.reduce_mean(tf.square(tf.subtract(noise, y_conv))) * self.batchSize
cost_mat = tf.reduce_sum(tf.square(tf.subtract(noise, y_conv))) / self.batchSize
cost_scalar = tf.square(tf.subtract(scalar_en, noise_level[0]))
cost = lmd * cost_mat + (1 - lmd) * cost_scalar
#cost = tf.nn.l2_loss(y - y_conv)
train_op = tf.train.AdamOptimizer(self.lr).minimize(cost)
#train_op = tf.train.GradientDescentOptimizer(1e-4)(cost)
return y_conv, train_op, cost, x, noise_level
else:
return y_conv, x
def train(self, saveDir, trY, valY, minNoiseLevel, maxNoiseLevel, maxEpoch=1000, part=0):
"""
train
:param trX:
:param trY:
:param maxEpoch:
:param batchSize:
:return:
"""
# add new axis for data
if trY.ndim == 3:
trY = trY[..., np.newaxis]
if valY.ndim == 3:
valY = valY[..., np.newaxis]
# generate model
if not hasattr(self, 'predict_op'):
print('Building model ...')
self.predict_op, self.train_op, self.cost, self.x, self.noise_level = self.build_model(bTrain=True)
# Launch the graph in a session
saver = tf.train.Saver()
if not os.path.isdir(saveDir):
os.mkdir(saveDir)
curEpoch = 0
bestLoss = 99999.0
if os.path.isfile(saveDir + '/loss.txt'):
with open(saveDir + '/loss.txt', 'r') as log_file:
log = log_file.readlines()
if len(log) > 0:
curEpoch = int(log[-1].split(' ')[0]) + 1 + part * maxEpoch
out_file = open(saveDir + '/loss.txt', 'a')
with tf.Session() as sess:
self.sess = sess
with tf.device(self.device):
ckpt = tf.train.get_checkpoint_state(saveDir)
if ckpt and ckpt.model_checkpoint_path:
print('Restored training...')
saver.restore(sess, saveDir + '/tf_estimator.ckpt')
else:
print('Start training...')
# init all variables
tf.global_variables_initializer().run()
for i in range(curEpoch, maxEpoch):
start_time = time.time()
print('Epoch %d ...' % i)
for start, end in zip(range(0, len(trY), self.batchSize),
range(self.batchSize, len(trY) + 1, self.batchSize)):
y = trY[start:end]
n_level = np.random.rand(1) * (maxNoiseLevel - minNoiseLevel) + minNoiseLevel
sess.run(self.train_op, feed_dict={self.x: y, self.noise_level: n_level})
# print loss
for n_level in [5, 15, 25]:
loss = sess.run(self.cost, feed_dict={self.x: trY[:self.batchSize, ...],
self.noise_level: [n_level / 255.0]})
val_loss = sess.run(self.cost, feed_dict={self.x: valY[:self.batchSize, ...],
self.noise_level: [n_level / 255.0]})
print('loss n : ', n_level, loss, ' val loss : ', val_loss)
print(i, n_level, loss, val_loss, file=out_file)
print('time : ', time.time() - start_time, ' s')
if i % 10 == 0:
if val_loss < bestLoss or i < maxEpoch * 4 / 5:
bestLoss = val_loss
saver.save(sess, saveDir + '/tf_estimator.ckpt')
print('Model saved')
print('Best Loss ', bestLoss)
out_file.flush()
if i > maxEpoch * 4 / 5 and val_loss < bestLoss:
bestLoss = val_loss
saver.save(sess, saveDir + '/tf_estimator.ckpt')
print('Model saved')
print('Best Loss ', bestLoss)
out_file.close()
print('Best Loss ', bestLoss)
def load_model(self, saveDir, batchSize=1, xshape=[128, 128, 1], yshape=[128, 128, 3]):
# init model
# generate model
self.batchSize = batchSize
self.xshape = xshape
self.yshape = yshape
self.predict_op, self.x = self.build_model(bTrain=False)
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
sess = tf.Session(config=config)
with tf.device(self.device):
ckpt = tf.train.get_checkpoint_state(saveDir)
if ckpt and ckpt.model_checkpoint_path:
print('loading model ...')
saver.restore(sess, saveDir + '/tf_denoiser.ckpt')
self.sess = sess
def denoise_bayer(self, image, psize, crop):
"""
denoise a bayer image, whose pixels values are in [0, 1]
:param image the image to be denoised
:param noise: estimated noise level of the image
:param psize: size of patch
:param crop: crop of image patch
:return:
"""
assert image.ndim == 3
start_time = time.time()
h, w = image.shape[:2]
psize = min(min(psize, h), w)
psize -= psize % 2
# psize = 1024
patch_step = psize
patch_step -= 2 * crop
# patch_step = 4096
shift_factor = 2
# Result array
R = np.zeros([image.shape[0], image.shape[1], 3], dtype=np.float32)
rangex = range(0, w - 2 * crop, patch_step)
rangey = range(0, h - 2 * crop, patch_step)
ntiles = len(rangex) * len(rangey)
#image = image[..., np.newaxis]
# resize input
sess = self.sess
with tf.device(self.device):
with tqdm.tqdm(total=ntiles, unit='tiles', unit_scale=True) as pbar:
for start_x in rangex:
for start_y in rangey:
a_time = time.time()
end_x = start_x + psize
end_y = start_y + psize
if end_x > w:
end_x = w
end_x = shift_factor * ((end_x) / shift_factor)
start_x = end_x - psize
if end_y > h:
end_y = h
end_y = shift_factor * ((end_y) / shift_factor)
start_y = end_y - psize
tileM = image[np.newaxis, start_y:end_y, start_x:end_x, :]
if DEBUG:
print('tileM.shape : ', tileM.shape)
b_time = time.time()
out = sess.run(self.predict_op, feed_dict={self.x: tileM })
c_time = time.time()
out = out.reshape(out.shape[1], out.shape[2], 1)
s = out.shape[0]
R[start_y + crop:start_y + crop + s,
start_x + crop:start_x + crop + s, :] = out
d_time = time.time()
pbar.update(1)
if DEBUG:
print('image crop : ', (b_time - a_time) * 1000, ' ms')
print('forward : ', (c_time - b_time) * 1000, ' ms')
print('put patch back :', (d_time - c_time) * 1000, ' ms')
R[R < 0] = 0.0
R[R > 1] = 1.0
runtime = (time.time() - start_time) * 1000 # in ms
return R, runtime
#######################################################
# Functions to call Estimator
def mem_divide(x, divider):
# a memory efficient divide function
# when x is huge, this method saves memory
for i in range(0, x.shape[0]):
x[i,...] = x[i, ...] / divider
return x
def train(modelPath, trainPath, valPath, feature_dim, depth, minNoiseLevel, maxNoiseLevel, x_shape=[128,128,1], y_shape=[128,128,3], device='0'):
"""
Training using Estimator class.
:param modelPath: path to save trained model
:param trainPath: path to training dataset
:param valPath: path to validation dataset
:param feature_dim: width of the DNN
:param depth: depth of the DNN
:param minNoiseLevel: minimum noise level added to clean images
:param maxNoiseLevel: maximum noise level added to clean images
:param x_shape: Input patch size
:param y_shape: Output patch size
:param device: which GPU to use (for machines with multiple GPUs, this avoid taking up all GPUs)
:return: Null
"""
os.environ['CUDA_VISIBLE_DEVICES'] = device
estimator = Estimator(device='/gpu:0', depth= depth, feature_dim=feature_dim, xshape=x_shape, yshape=y_shape)
dc = DatabaseCreator()
name = 'rgb'
# res_name = 'gray'
maxEpoch = 3000
valY = dc.load_hdf5_v1(valPath, name)
valY = valY[:estimator.batchSize, ...]
valY = mem_divide(valY, 255.0)
npart = dc.load_hdf5_v1(trainPath, 'npart')
curEpoch = 0
if os.path.isfile(modelPath + '/loss.txt'):
with open(modelPath + '/loss.txt', 'r') as log_file:
log = log_file.readlines()
if len(log) > 0:
curEpoch = int(log[-1].split(' ')[0])
for i in range((curEpoch+1) / (maxEpoch/npart), npart):
#for i in range(0, 1):
print('Data part ', i)
if i > 0:
final_name = '%s_%d' % (name, i)
#final_res_name = '%s_%d' % (res_name, i)
else:
final_name = name
#final_res_name = res_name
trY = dc.load_hdf5_v1(trainPath, final_name)
trY = mem_divide(trY, 255.0)
estimator.train(modelPath, trY, valY, minNoiseLevel, maxNoiseLevel, maxEpoch=maxEpoch / npart * (i+1))
#estimator.train(modelPath, trY, valY, minNoiseLevel, maxNoiseLevel, maxEpoch=maxEpoch)
# estimator.sess.close()
def test(modelPath, feature_dim, depth, device, noise):
"""
Denoise noisy images using Estimator class with pre-trained model.
:param modelPath: path to save trained model
:param feature_dim: width of the DNN
:param depth: depth of the DNN
:param device: which GPU to use (for machines with multiple GPUs, this avoid taking up all GPUs)
:param noise: standard variation of noise of the tested images
:return:
"""
os.environ['CUDA_VISIBLE_DEVICES'] = device
estimator = Estimator(batchSize=1, feature_dim=feature_dim, depth=depth)
regexp = re.compile(r'.*\.(%s)' % '(jpg)|(png)|(bmp)|(tif)')
inputFolder = 'data/mcm'
psize = 500
noise_level = noise / 255.0
print('true noise : ', noise)
max_value = 255.0
crop = 0
n = 0
avg_en = 0
for d, dirs, files in os.walk(inputFolder):
for f in files:
if regexp.match(f):
print('image', n, f)
image = cv2.imread(os.path.join(d, f))
#image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
#cv2.imwrite('%s/%s_rgb.png' % ('output', f), image)
image = image + np.random.randn(image.shape[0], image.shape[1], image.shape[2]) * noise
if DEBUG:
print ('image.shape : ', image.shape)
if n == 0:
xshape = [psize, psize, 3]
yshape = [psize, psize, 3]
estimator.load_model(modelPath, batchSize=1, xshape = xshape, yshape=yshape)
#cv2.imwrite('%s/%s_in.bmp' % ('output', f), np.uint8(image / max_value * 255.0 + 0.5))
image = image / max_value
#cv2.imwrite('%s/%s_in.png' % ('output', f), np.uint8(image * 255 + 0.5))
R, runtime = estimator.denoise_bayer(image, psize, crop)
out = np.uint8(R * 255 + 0.5)
estimated_noise = np.mean(np.mean(np.mean(R, axis=0), axis=0), axis=0)
if DEBUG:
print('max value = ', np.max(np.abs(R)))
print('time : ', runtime, ' ms')
#cv2.imwrite('data/dnn_res.bmp', out)
print('estimate_noise : ', estimated_noise * 255.0)
cv2.imwrite('%s/%s.png' % ('output', f), out)
with open('data/time.txt', 'w') as out_file:
print(runtime, file=out_file)
n += 1
avg_en += estimated_noise
print('avg_en : ', avg_en / n * 255.0)
estimator.sess.close()
def test_real(modelPath, feature_dim, depth, device):
"""
Denoise noisy images using Estimator class with pre-trained model.
:param modelPath: path to save trained model
:param feature_dim: width of the DNN
:param depth: depth of the DNN
:param device: which GPU to use (for machines with multiple GPUs, this avoid taking up all GPUs)
:param noise: standard variation of noise of the tested images
:return:
"""
os.environ['CUDA_VISIBLE_DEVICES'] = device
estimator = Estimator(batchSize=1, feature_dim=feature_dim, depth=depth)
regexp = re.compile(r'.*\.(%s)' % '(jpg)|(png)|(bmp)|(tif)')
inputFolder = 'data/real'
psize = 500
max_value = 255.0
crop = 0
n = 0
avg_en = 0
for d, dirs, files in os.walk(inputFolder):
for f in files:
if regexp.match(f):
print('image', n, f)
image = cv2.imread(os.path.join(d, f))
#image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
#cv2.imwrite('%s/%s_rgb.png' % ('output', f), image)
if DEBUG:
print ('image.shape : ', image.shape)
if n == 0:
xshape = [psize, psize, 3]
yshape = [psize, psize, 3]
estimator.load_model(modelPath, batchSize=1, xshape = xshape, yshape=yshape)
#cv2.imwrite('%s/%s_in.bmp' % ('output', f), np.uint8(image / max_value * 255.0 + 0.5))
image = image / max_value
# cv2.imwrite('%s/%s_in.png' % ('output', f), np.uint8(image * 255 + 0.5))
R, runtime = estimator.denoise_bayer(image, psize, crop)
# out = np.uint8(R * 255 + 0.5)
estimated_noise = np.mean(np.mean(np.mean(R, axis=0), axis=0), axis=0)
if DEBUG:
print('max value = ', np.max(np.abs(R)))
print('time : ', runtime, ' ms')
#cv2.imwrite('data/ne_res.png', out)
with h5py.File('data/ne_res.h5', "w") as outFile:
outFile.create_dataset('out', data=R * 255, compression='gzip')
print('estimate_noise : ', estimated_noise * 255.0)
# cv2.imwrite('%s/%s.png' % ('output', f), out)
with open('data/time.txt', 'w') as out_file:
print(runtime, file=out_file)
n += 1
avg_en += estimated_noise
print('avg_en : ', avg_en / n * 255.0)
estimator.sess.close()
def test(modelPath, feature_dim, depth, device, noise, use_scalar_noise=True):
"""
Denoise noisy images using Denoiser class with pre-trained model.
:param modelPath: path to save trained model
:param feature_dim: width of the DNN
:param depth: depth of the DNN
:param device: which GPU to use (for machines with multiple GPUs, this avoid taking up all GPUs)
:param noise: standard variation of noise of the tested images
:return:
"""
os.environ['CUDA_VISIBLE_DEVICES'] = device
denoiser = Denoiser(batchSize=1, feature_dim=feature_dim, depth=depth, use_scalar_noise=use_scalar_noise)
regexp = re.compile(r'.*\.(%s)' % '(jpg)|(png)')
inputFolder = 'data'
psize = 500
noise_level = noise / 255.0
print('noise_level: ', noise_level)
max_value = 255.0
crop = 0
n = 0
dc = DatabaseCreator()
for d, dirs, files in os.walk(inputFolder):
for f in files:
if regexp.match(f):
print('image', n, f)
image = cv2.imread(os.path.join(d, f))
#image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
cv2.imwrite('%s/%s_rgb.png' % ('output', f), image)
image = image + np.random.randn(image.shape[0], image.shape[1], image.shape[2]) * noise
image = dc.rgb2bayer3d(image)
if DEBUG:
print ('image.shape : ', image.shape)
if n == 0:
xshape = [psize, psize, 3]
yshape = [psize, psize, 3]
denoiser.load_model(modelPath, batchSize=1, xshape = xshape, yshape=yshape)
#cv2.imwrite('%s/%s_in.bmp' % ('output', f), np.uint8(image / max_value * 255.0 + 0.5))
image = image / max_value
cv2.imwrite('%s/%s_in.png' % ('output', f), np.uint8(image * 255 + 0.5))
R, runtime = denoiser.denoise_bayer(image, noise_level, psize, crop)
out = np.uint8(R * 255 + 0.5)
#print('out.shape = ', out.shape)
if DEBUG:
print('max value = ', np.max(np.abs(R)))
print('time : ', runtime, ' ms')
#cv2.imwrite('data/dnn_res.bmp', out)
cv2.imwrite('%s/%s.png' % ('output', f), out)
with open('data/time.txt', 'w') as out_file:
print(runtime, file=out_file)
n += 1
denoiser.sess.close()
if __name__ == '__main__':
## configuration
bTrain = False
modelPath = 'ne_w64d16_v2_sigma0_30'
width = 64
depth = 16 - 4
device = '0'
minNoiseLevel = 0.0 / 255.0
maxNoiseLevel = 30.0 / 255.0
#### end configuration
if bTrain:
train('models/%s' % modelPath, 'data/pristine_rgb2gray.h5',
'data/kodak_rgb2gray.h5', width, depth, minNoiseLevel, maxNoiseLevel, device=device, x_shape=[128, 128, 3],
y_shape=[128, 128, 3])
else:
parser = argparse.ArgumentParser()
parser.add_argument('--noise', type=float, default=0.0,
help='standard deviation of additive Gaussian noise, w.r.t to a [0,1] intensity scale.')
args = parser.parse_args()
noise = 5
test('models/%s' % modelPath, width, depth=depth, device=device, noise=noise)
#test_real('models/%s' % modelPath, width, depth=depth, device=device)
|
nilq/baby-python
|
python
|
# ------------------------------------------------------------------------------
# Program: The LDAR Simulator (LDAR-Sim)
# File: Operator
# Purpose: Initialize and manage operator detection module
#
# Copyright (C) 2018-2020 Thomas Fox, Mozhou Gao, Thomas Barchyn, Chris Hugenholtz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the MIT License as published
# by the Free Software Foundation, version 3.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
# You should have received a copy of the MIT License
# along with this program. If not, see <https://opensource.org/licenses/MIT>.
#
# ------------------------------------------------------------------------------
import numpy as np
class OperatorAgent:
def __init__(self, timeseries, parameters, state):
"""
Constructs an operator who visits all sites and occasionally finds
a leak.
"""
self.parameters = parameters
self.state = state
self.timeseries = timeseries
self.init_mean_leaks = np.mean(self.state['init_leaks'])
self.init_sum_leaks = np.sum(self.state['init_leaks'])
self.n_sites = len(self.state['sites'])
return
def work_a_day(self):
"""
Detect leaks during operator visits.
Detection can be a function of leak-size.
"""
active_leaks = self.timeseries['active_leaks'][self.state['t'].current_timestep]
if active_leaks > 0:
leak_term = (self.init_sum_leaks / (active_leaks)) * self.init_mean_leaks
for leak in self.state['leaks']:
if leak['status'] == 'active':
prob_detect = self.parameters['LPR'] * 7 / leak_term
prob_detect \
+= self.parameters['max_det_op'] \
* (leak['rate'] / (self.state['max_leak_rate']))
if prob_detect > 1:
prob_detect = 1
if prob_detect < 0:
prob_detect = 0
try:
prob_detect = prob_detect * self.parameters['operator_strength']
except KeyError:
prob_detect = 0
detect = np.random.binomial(1, prob_detect)
if detect:
if leak['tagged']:
self.timeseries['operator_redund_tags'][
self.state['t'].current_timestep] += 1
elif not leak['tagged']:
# Add these leaks to the 'tag pool'
leak['tagged'] = True
leak['date_tagged'] = self.state['t'].current_date
leak['tagged_by_company'] = 'operator'
leak['tagged_by_crew'] = 1
self.state['tags'].append(leak)
self.timeseries['operator_tags'][self.state['t'].current_timestep] += 1
return
|
nilq/baby-python
|
python
|
# Generated from parser/TinyPy.g4 by ANTLR 4.5.1
from antlr4 import *
if __name__ is not None and "." in __name__:
from .TinyPyParser import TinyPyParser
else:
from TinyPyParser import TinyPyParser
# This class defines a complete listener for a parse tree produced by TinyPyParser.
class TinyPyListener(ParseTreeListener):
# Enter a parse tree produced by TinyPyParser#file_input.
def enterFile_input(self, ctx:TinyPyParser.File_inputContext):
pass
# Exit a parse tree produced by TinyPyParser#file_input.
def exitFile_input(self, ctx:TinyPyParser.File_inputContext):
pass
# Enter a parse tree produced by TinyPyParser#single_input.
def enterSingle_input(self, ctx:TinyPyParser.Single_inputContext):
pass
# Exit a parse tree produced by TinyPyParser#single_input.
def exitSingle_input(self, ctx:TinyPyParser.Single_inputContext):
pass
# Enter a parse tree produced by TinyPyParser#eval_input.
def enterEval_input(self, ctx:TinyPyParser.Eval_inputContext):
pass
# Exit a parse tree produced by TinyPyParser#eval_input.
def exitEval_input(self, ctx:TinyPyParser.Eval_inputContext):
pass
# Enter a parse tree produced by TinyPyParser#stmt.
def enterStmt(self, ctx:TinyPyParser.StmtContext):
pass
# Exit a parse tree produced by TinyPyParser#stmt.
def exitStmt(self, ctx:TinyPyParser.StmtContext):
pass
# Enter a parse tree produced by TinyPyParser#simple_stmt.
def enterSimple_stmt(self, ctx:TinyPyParser.Simple_stmtContext):
pass
# Exit a parse tree produced by TinyPyParser#simple_stmt.
def exitSimple_stmt(self, ctx:TinyPyParser.Simple_stmtContext):
pass
# Enter a parse tree produced by TinyPyParser#small_stmt.
def enterSmall_stmt(self, ctx:TinyPyParser.Small_stmtContext):
pass
# Exit a parse tree produced by TinyPyParser#small_stmt.
def exitSmall_stmt(self, ctx:TinyPyParser.Small_stmtContext):
pass
# Enter a parse tree produced by TinyPyParser#compound_stmt.
def enterCompound_stmt(self, ctx:TinyPyParser.Compound_stmtContext):
pass
# Exit a parse tree produced by TinyPyParser#compound_stmt.
def exitCompound_stmt(self, ctx:TinyPyParser.Compound_stmtContext):
pass
# Enter a parse tree produced by TinyPyParser#if_stmt.
def enterIf_stmt(self, ctx:TinyPyParser.If_stmtContext):
pass
# Exit a parse tree produced by TinyPyParser#if_stmt.
def exitIf_stmt(self, ctx:TinyPyParser.If_stmtContext):
pass
# Enter a parse tree produced by TinyPyParser#if_elif.
def enterIf_elif(self, ctx:TinyPyParser.If_elifContext):
pass
# Exit a parse tree produced by TinyPyParser#if_elif.
def exitIf_elif(self, ctx:TinyPyParser.If_elifContext):
pass
# Enter a parse tree produced by TinyPyParser#if_else.
def enterIf_else(self, ctx:TinyPyParser.If_elseContext):
pass
# Exit a parse tree produced by TinyPyParser#if_else.
def exitIf_else(self, ctx:TinyPyParser.If_elseContext):
pass
# Enter a parse tree produced by TinyPyParser#while_stmt.
def enterWhile_stmt(self, ctx:TinyPyParser.While_stmtContext):
pass
# Exit a parse tree produced by TinyPyParser#while_stmt.
def exitWhile_stmt(self, ctx:TinyPyParser.While_stmtContext):
pass
# Enter a parse tree produced by TinyPyParser#for_stmt.
def enterFor_stmt(self, ctx:TinyPyParser.For_stmtContext):
pass
# Exit a parse tree produced by TinyPyParser#for_stmt.
def exitFor_stmt(self, ctx:TinyPyParser.For_stmtContext):
pass
# Enter a parse tree produced by TinyPyParser#funcdef.
def enterFuncdef(self, ctx:TinyPyParser.FuncdefContext):
pass
# Exit a parse tree produced by TinyPyParser#funcdef.
def exitFuncdef(self, ctx:TinyPyParser.FuncdefContext):
pass
# Enter a parse tree produced by TinyPyParser#parameters.
def enterParameters(self, ctx:TinyPyParser.ParametersContext):
pass
# Exit a parse tree produced by TinyPyParser#parameters.
def exitParameters(self, ctx:TinyPyParser.ParametersContext):
pass
# Enter a parse tree produced by TinyPyParser#param_argslist.
def enterParam_argslist(self, ctx:TinyPyParser.Param_argslistContext):
pass
# Exit a parse tree produced by TinyPyParser#param_argslist.
def exitParam_argslist(self, ctx:TinyPyParser.Param_argslistContext):
pass
# Enter a parse tree produced by TinyPyParser#suite.
def enterSuite(self, ctx:TinyPyParser.SuiteContext):
pass
# Exit a parse tree produced by TinyPyParser#suite.
def exitSuite(self, ctx:TinyPyParser.SuiteContext):
pass
# Enter a parse tree produced by TinyPyParser#ExprStmtExpr.
def enterExprStmtExpr(self, ctx:TinyPyParser.ExprStmtExprContext):
pass
# Exit a parse tree produced by TinyPyParser#ExprStmtExpr.
def exitExprStmtExpr(self, ctx:TinyPyParser.ExprStmtExprContext):
pass
# Enter a parse tree produced by TinyPyParser#ExprStmtAssign.
def enterExprStmtAssign(self, ctx:TinyPyParser.ExprStmtAssignContext):
pass
# Exit a parse tree produced by TinyPyParser#ExprStmtAssign.
def exitExprStmtAssign(self, ctx:TinyPyParser.ExprStmtAssignContext):
pass
# Enter a parse tree produced by TinyPyParser#ExprStmtAugmented.
def enterExprStmtAugmented(self, ctx:TinyPyParser.ExprStmtAugmentedContext):
pass
# Exit a parse tree produced by TinyPyParser#ExprStmtAugmented.
def exitExprStmtAugmented(self, ctx:TinyPyParser.ExprStmtAugmentedContext):
pass
# Enter a parse tree produced by TinyPyParser#augassign.
def enterAugassign(self, ctx:TinyPyParser.AugassignContext):
pass
# Exit a parse tree produced by TinyPyParser#augassign.
def exitAugassign(self, ctx:TinyPyParser.AugassignContext):
pass
# Enter a parse tree produced by TinyPyParser#flow_stmt.
def enterFlow_stmt(self, ctx:TinyPyParser.Flow_stmtContext):
pass
# Exit a parse tree produced by TinyPyParser#flow_stmt.
def exitFlow_stmt(self, ctx:TinyPyParser.Flow_stmtContext):
pass
# Enter a parse tree produced by TinyPyParser#return_stmt.
def enterReturn_stmt(self, ctx:TinyPyParser.Return_stmtContext):
pass
# Exit a parse tree produced by TinyPyParser#return_stmt.
def exitReturn_stmt(self, ctx:TinyPyParser.Return_stmtContext):
pass
# Enter a parse tree produced by TinyPyParser#pass_stmt.
def enterPass_stmt(self, ctx:TinyPyParser.Pass_stmtContext):
pass
# Exit a parse tree produced by TinyPyParser#pass_stmt.
def exitPass_stmt(self, ctx:TinyPyParser.Pass_stmtContext):
pass
# Enter a parse tree produced by TinyPyParser#break_stmt.
def enterBreak_stmt(self, ctx:TinyPyParser.Break_stmtContext):
pass
# Exit a parse tree produced by TinyPyParser#break_stmt.
def exitBreak_stmt(self, ctx:TinyPyParser.Break_stmtContext):
pass
# Enter a parse tree produced by TinyPyParser#continue_stmt.
def enterContinue_stmt(self, ctx:TinyPyParser.Continue_stmtContext):
pass
# Exit a parse tree produced by TinyPyParser#continue_stmt.
def exitContinue_stmt(self, ctx:TinyPyParser.Continue_stmtContext):
pass
# Enter a parse tree produced by TinyPyParser#Comparison.
def enterComparison(self, ctx:TinyPyParser.ComparisonContext):
pass
# Exit a parse tree produced by TinyPyParser#Comparison.
def exitComparison(self, ctx:TinyPyParser.ComparisonContext):
pass
# Enter a parse tree produced by TinyPyParser#OrTest.
def enterOrTest(self, ctx:TinyPyParser.OrTestContext):
pass
# Exit a parse tree produced by TinyPyParser#OrTest.
def exitOrTest(self, ctx:TinyPyParser.OrTestContext):
pass
# Enter a parse tree produced by TinyPyParser#AndTest.
def enterAndTest(self, ctx:TinyPyParser.AndTestContext):
pass
# Exit a parse tree produced by TinyPyParser#AndTest.
def exitAndTest(self, ctx:TinyPyParser.AndTestContext):
pass
# Enter a parse tree produced by TinyPyParser#TestExpr.
def enterTestExpr(self, ctx:TinyPyParser.TestExprContext):
pass
# Exit a parse tree produced by TinyPyParser#TestExpr.
def exitTestExpr(self, ctx:TinyPyParser.TestExprContext):
pass
# Enter a parse tree produced by TinyPyParser#NotTest.
def enterNotTest(self, ctx:TinyPyParser.NotTestContext):
pass
# Exit a parse tree produced by TinyPyParser#NotTest.
def exitNotTest(self, ctx:TinyPyParser.NotTestContext):
pass
# Enter a parse tree produced by TinyPyParser#comp_op.
def enterComp_op(self, ctx:TinyPyParser.Comp_opContext):
pass
# Exit a parse tree produced by TinyPyParser#comp_op.
def exitComp_op(self, ctx:TinyPyParser.Comp_opContext):
pass
# Enter a parse tree produced by TinyPyParser#MulDivMod.
def enterMulDivMod(self, ctx:TinyPyParser.MulDivModContext):
pass
# Exit a parse tree produced by TinyPyParser#MulDivMod.
def exitMulDivMod(self, ctx:TinyPyParser.MulDivModContext):
pass
# Enter a parse tree produced by TinyPyParser#BitXor.
def enterBitXor(self, ctx:TinyPyParser.BitXorContext):
pass
# Exit a parse tree produced by TinyPyParser#BitXor.
def exitBitXor(self, ctx:TinyPyParser.BitXorContext):
pass
# Enter a parse tree produced by TinyPyParser#BitOr.
def enterBitOr(self, ctx:TinyPyParser.BitOrContext):
pass
# Exit a parse tree produced by TinyPyParser#BitOr.
def exitBitOr(self, ctx:TinyPyParser.BitOrContext):
pass
# Enter a parse tree produced by TinyPyParser#AddSub.
def enterAddSub(self, ctx:TinyPyParser.AddSubContext):
pass
# Exit a parse tree produced by TinyPyParser#AddSub.
def exitAddSub(self, ctx:TinyPyParser.AddSubContext):
pass
# Enter a parse tree produced by TinyPyParser#BitAnd.
def enterBitAnd(self, ctx:TinyPyParser.BitAndContext):
pass
# Exit a parse tree produced by TinyPyParser#BitAnd.
def exitBitAnd(self, ctx:TinyPyParser.BitAndContext):
pass
# Enter a parse tree produced by TinyPyParser#Shifts.
def enterShifts(self, ctx:TinyPyParser.ShiftsContext):
pass
# Exit a parse tree produced by TinyPyParser#Shifts.
def exitShifts(self, ctx:TinyPyParser.ShiftsContext):
pass
# Enter a parse tree produced by TinyPyParser#FactorExpr.
def enterFactorExpr(self, ctx:TinyPyParser.FactorExprContext):
pass
# Exit a parse tree produced by TinyPyParser#FactorExpr.
def exitFactorExpr(self, ctx:TinyPyParser.FactorExprContext):
pass
# Enter a parse tree produced by TinyPyParser#unaryExpr.
def enterUnaryExpr(self, ctx:TinyPyParser.UnaryExprContext):
pass
# Exit a parse tree produced by TinyPyParser#unaryExpr.
def exitUnaryExpr(self, ctx:TinyPyParser.UnaryExprContext):
pass
# Enter a parse tree produced by TinyPyParser#parenExpr.
def enterParenExpr(self, ctx:TinyPyParser.ParenExprContext):
pass
# Exit a parse tree produced by TinyPyParser#parenExpr.
def exitParenExpr(self, ctx:TinyPyParser.ParenExprContext):
pass
# Enter a parse tree produced by TinyPyParser#atomExpr.
def enterAtomExpr(self, ctx:TinyPyParser.AtomExprContext):
pass
# Exit a parse tree produced by TinyPyParser#atomExpr.
def exitAtomExpr(self, ctx:TinyPyParser.AtomExprContext):
pass
# Enter a parse tree produced by TinyPyParser#atom.
def enterAtom(self, ctx:TinyPyParser.AtomContext):
pass
# Exit a parse tree produced by TinyPyParser#atom.
def exitAtom(self, ctx:TinyPyParser.AtomContext):
pass
# Enter a parse tree produced by TinyPyParser#FuncInvoke.
def enterFuncInvoke(self, ctx:TinyPyParser.FuncInvokeContext):
pass
# Exit a parse tree produced by TinyPyParser#FuncInvoke.
def exitFuncInvoke(self, ctx:TinyPyParser.FuncInvokeContext):
pass
# Enter a parse tree produced by TinyPyParser#PlainName.
def enterPlainName(self, ctx:TinyPyParser.PlainNameContext):
pass
# Exit a parse tree produced by TinyPyParser#PlainName.
def exitPlainName(self, ctx:TinyPyParser.PlainNameContext):
pass
# Enter a parse tree produced by TinyPyParser#DottedName.
def enterDottedName(self, ctx:TinyPyParser.DottedNameContext):
pass
# Exit a parse tree produced by TinyPyParser#DottedName.
def exitDottedName(self, ctx:TinyPyParser.DottedNameContext):
pass
# Enter a parse tree produced by TinyPyParser#SubName.
def enterSubName(self, ctx:TinyPyParser.SubNameContext):
pass
# Exit a parse tree produced by TinyPyParser#SubName.
def exitSubName(self, ctx:TinyPyParser.SubNameContext):
pass
# Enter a parse tree produced by TinyPyParser#DictMaker.
def enterDictMaker(self, ctx:TinyPyParser.DictMakerContext):
pass
# Exit a parse tree produced by TinyPyParser#DictMaker.
def exitDictMaker(self, ctx:TinyPyParser.DictMakerContext):
pass
# Enter a parse tree produced by TinyPyParser#ListMaker.
def enterListMaker(self, ctx:TinyPyParser.ListMakerContext):
pass
# Exit a parse tree produced by TinyPyParser#ListMaker.
def exitListMaker(self, ctx:TinyPyParser.ListMakerContext):
pass
# Enter a parse tree produced by TinyPyParser#TupleMaker.
def enterTupleMaker(self, ctx:TinyPyParser.TupleMakerContext):
pass
# Exit a parse tree produced by TinyPyParser#TupleMaker.
def exitTupleMaker(self, ctx:TinyPyParser.TupleMakerContext):
pass
# Enter a parse tree produced by TinyPyParser#testlist_comp.
def enterTestlist_comp(self, ctx:TinyPyParser.Testlist_compContext):
pass
# Exit a parse tree produced by TinyPyParser#testlist_comp.
def exitTestlist_comp(self, ctx:TinyPyParser.Testlist_compContext):
pass
# Enter a parse tree produced by TinyPyParser#funcinvoke.
def enterFuncinvoke(self, ctx:TinyPyParser.FuncinvokeContext):
pass
# Exit a parse tree produced by TinyPyParser#funcinvoke.
def exitFuncinvoke(self, ctx:TinyPyParser.FuncinvokeContext):
pass
# Enter a parse tree produced by TinyPyParser#arglist.
def enterArglist(self, ctx:TinyPyParser.ArglistContext):
pass
# Exit a parse tree produced by TinyPyParser#arglist.
def exitArglist(self, ctx:TinyPyParser.ArglistContext):
pass
# Enter a parse tree produced by TinyPyParser#SubscriptIndex.
def enterSubscriptIndex(self, ctx:TinyPyParser.SubscriptIndexContext):
pass
# Exit a parse tree produced by TinyPyParser#SubscriptIndex.
def exitSubscriptIndex(self, ctx:TinyPyParser.SubscriptIndexContext):
pass
# Enter a parse tree produced by TinyPyParser#SubscriptSlice.
def enterSubscriptSlice(self, ctx:TinyPyParser.SubscriptSliceContext):
pass
# Exit a parse tree produced by TinyPyParser#SubscriptSlice.
def exitSubscriptSlice(self, ctx:TinyPyParser.SubscriptSliceContext):
pass
# Enter a parse tree produced by TinyPyParser#dictorsetmaker.
def enterDictorsetmaker(self, ctx:TinyPyParser.DictorsetmakerContext):
pass
# Exit a parse tree produced by TinyPyParser#dictorsetmaker.
def exitDictorsetmaker(self, ctx:TinyPyParser.DictorsetmakerContext):
pass
# Enter a parse tree produced by TinyPyParser#dictormaker.
def enterDictormaker(self, ctx:TinyPyParser.DictormakerContext):
pass
# Exit a parse tree produced by TinyPyParser#dictormaker.
def exitDictormaker(self, ctx:TinyPyParser.DictormakerContext):
pass
# Enter a parse tree produced by TinyPyParser#setmaker.
def enterSetmaker(self, ctx:TinyPyParser.SetmakerContext):
pass
# Exit a parse tree produced by TinyPyParser#setmaker.
def exitSetmaker(self, ctx:TinyPyParser.SetmakerContext):
pass
# Enter a parse tree produced by TinyPyParser#number.
def enterNumber(self, ctx:TinyPyParser.NumberContext):
pass
# Exit a parse tree produced by TinyPyParser#number.
def exitNumber(self, ctx:TinyPyParser.NumberContext):
pass
# Enter a parse tree produced by TinyPyParser#integer.
def enterInteger(self, ctx:TinyPyParser.IntegerContext):
pass
# Exit a parse tree produced by TinyPyParser#integer.
def exitInteger(self, ctx:TinyPyParser.IntegerContext):
pass
# Enter a parse tree produced by TinyPyParser#string.
def enterString(self, ctx:TinyPyParser.StringContext):
pass
# Exit a parse tree produced by TinyPyParser#string.
def exitString(self, ctx:TinyPyParser.StringContext):
pass
|
nilq/baby-python
|
python
|
import os
import sys
import argparse
import pathlib
import fpipelite.data.project
import fpipelite.data.data
import json
def print_parser(parser:argparse.ArgumentParser):
parser.add_argument("path", type=pathlib.Path,nargs="?", default=".")
parser.description = "Prints the data for a found project via {path}."
parser.set_defaults(func=print_exec)
def print_exec(args:argparse.Namespace):
found, data = fpipelite.data.project.FindProjectFromPath(os.path.abspath(str(args.path)))
if not found:
exit(-1)
else:
print(json.dumps(data.json_data,indent=4,sort_keys=True))
exit()
def new_parser(parser:argparse.ArgumentParser):
parser.description = "Creates a new project with the given data, at {dir}."
parser.add_argument("--short",required=True, type=str, help="The short name of the project.")
parser.add_argument("--long",required=True, type=str, help="The long name of the project.")
parser.add_argument("dir", type=pathlib.Path,nargs="?", default=".", help="The directory to put the project in. Current directory if omitted.")
parser.set_defaults(func=new_exec)
def new_exec(args:argparse.Namespace):
dir = os.path.join(os.path.abspath(args.dir), fpipelite.data.data._FPipeLiteDirName)
os.makedirs(dir,exist_ok=True)
fname = os.path.join(dir, fpipelite.data.data.FPipeLiteDataFilenameFromType("project"))
data = fpipelite.data.project.NewProject(fname, args.short, args.long)
data.save()
def delete_parser(parser:argparse.ArgumentParser):
parser.description = "Deletes a project's data at the {dir}"
parser.add_argument("-f", help="Forces the deletion without asking for confirmation.")
parser.add_argument("path", type=pathlib.Path,nargs="?", default=".", help="The path to search from. defaults to '.' if not specified.")
parser.set_defaults(func=delete_exec)
def delete_exec(args:argparse.Namespace):
found, data = fpipelite.data.project.FindProjectFromPath(os.path.abspath(str(args.path)))
if not found:
print("No project found at path: " + os.path.abspath(str(args.path)))
exit(-1)
dir = data.get_fpipelite_dir_path()
path = os.path.join(dir,fpipelite.data.data.FPipeLiteDataFilenameFromType("project"))
if not os.path.exists(path):
print("The project file path does not exist: " + path)
exit(-1)
if not os.path.isfile(path):
print("The project file path does not point to a file: " + path)
exit(-1)
if not args.f:
#f is not specified.
answer = input("Are you sure? (y/n): ")
if answer.lower() != "y":
print("canceling deletion.")
exit()
os.unlink(path)
exit()
|
nilq/baby-python
|
python
|
class SesDevException(Exception):
pass
class AddRepoNoUpdateWithExplicitRepo(SesDevException):
def __init__(self):
super().__init__(
"The --update option does not work with an explicit custom repo."
)
class BadMakeCheckRolesNodes(SesDevException):
def __init__(self):
super().__init__(
"\"makecheck\" deployments only work with a single node with role "
"\"makecheck\". Since this is the default, you can simply omit "
"the --roles option when running \"sesdev create makecheck\"."
)
class BoxDoesNotExist(SesDevException):
def __init__(self, box_name):
super().__init__(
"There is no Vagrant Box called \"{}\"".format(box_name)
)
class CmdException(SesDevException):
def __init__(self, command, retcode, stderr):
super().__init__(
"Command '{}' failed: ret={} stderr:\n{}"
.format(command, retcode, stderr)
)
self.command = command
self.retcode = retcode
self.stderr = stderr
class DebugWithoutLogFileDoesNothing(SesDevException):
def __init__(self):
super().__init__(
"--debug without --log-file has no effect (maybe you want --verbose?)"
)
class DepIDIllegalChars(SesDevException):
def __init__(self, dep_id):
super().__init__(
"Deployment ID \"{}\" contains illegal characters. Valid characters for "
"hostnames are ASCII(7) letters from a to z, the digits from 0 to 9, and "
"the hyphen (-).".format(dep_id)
)
class DepIDWrongLength(SesDevException):
def __init__(self, length):
super().__init__(
"Deployment ID must be from 1 to 63 characters in length "
"(yours had {} characters)".format(length)
)
class DeploymentAlreadyExists(SesDevException):
def __init__(self, dep_id):
super().__init__(
"A deployment with the same id '{}' already exists".format(dep_id)
)
class DeploymentDoesNotExists(SesDevException):
def __init__(self, dep_id):
super().__init__(
"Deployment '{}' does not exist".format(dep_id)
)
class DuplicateRolesNotSupported(SesDevException):
def __init__(self, role):
super().__init__(
"A node with more than one \"{r}\" role was detected. "
"sesdev does not support more than one \"{r}\" role per node.".format(r=role)
)
class ExclusiveRoles(SesDevException):
def __init__(self, role_a, role_b):
super().__init__(
"Cannot have both roles '{}' and '{}' in the same deployment"
.format(role_a, role_b)
)
class ExplicitAdminRoleNotAllowed(SesDevException):
def __init__(self):
super().__init__(
"Though it is still recognized in existing deployments, the explicit "
"\"admin\" role is deprecated and new deployments are not allowed to "
"have it. When sesdev deploys Ceph/SES versions that use an \"admin\" "
"role, all nodes in the deployment will get that role implicitly. "
"(TL;DR remove the \"admin\" role and try again!)"
)
class MultipleRolesPerMachineNotAllowedInCaaSP(SesDevException):
def __init__(self):
super().__init__(
"Multiple roles per machine detected. This is not allowed in CaaSP "
"clusters. For a single-node cluster, use the --single-node option "
"or --roles=\"[master]\" (in this special case, the master node "
"will function also as a worker node)"
)
class NodeDoesNotExist(SesDevException):
def __init__(self, node):
super().__init__(
"Node '{}' does not exist in this deployment".format(node)
)
class NodeMustBeAdminAsWell(SesDevException):
def __init__(self, role):
super().__init__(
"Detected node with \"{role}\" role but no \"admin\" role. "
"The {role} node must have the \"admin\" role -- otherwise "
"\"ceph-salt apply\" will fail. Please make sure the node with "
"the \"{role}\" role has the \"admin\" role as well"
.format(role=role)
)
class NoGaneshaRolePostNautilus(SesDevException):
def __init__(self):
super().__init__(
"You specified a \"ganesha\" role. In cephadm, NFS-Ganesha daemons "
"are referred to as \"nfs\" daemons, so in sesdev the role has been "
"renamed to \"nfs\". Please change all instances of \"ganesha\" to "
"\"nfs\" in your roles string and try again"
)
class NoExplicitRolesWithSingleNode(SesDevException):
def __init__(self):
super().__init__(
"The --roles and --single-node options are mutually exclusive. "
"One may be given, or the other, but not both at the same time."
)
class NoPrometheusGrafanaInSES5(SesDevException):
def __init__(self):
super().__init__(
"The DeepSea version used in SES5 does not recognize 'prometheus' "
"or 'grafana' as roles in policy.cfg (instead, it _always_ deploys "
"these two services on the Salt Master node. For this reason, sesdev "
"does not permit these roles to be used with ses5."
)
class NoStorageRolesCephadm(SesDevException):
def __init__(self, offending_role):
super().__init__(
"No \"storage\" roles were given, but currently sesdev does not "
"support this due to the presence of one or more {} roles in the "
"cluster configuration.".format(offending_role)
)
class NoStorageRolesDeepsea(SesDevException):
def __init__(self, version):
super().__init__(
"No \"storage\" roles were given, but currently sesdev does not "
"support this configuration when deploying a {} "
"cluster.".format(version)
)
class NoSourcePortForPortForwarding(SesDevException):
def __init__(self):
super().__init__(
"No source port specified for port forwarding"
)
class NoSupportConfigTarballFound(SesDevException):
def __init__(self, node):
super().__init__(
"No supportconfig tarball found on node {}".format(node)
)
class OptionFormatError(SesDevException):
def __init__(self, option, expected_type, value):
super().__init__(
"Wrong format for option '{}': expected format: '{}', actual format: '{}'"
.format(option, expected_type, value)
)
class OptionNotSupportedInVersion(SesDevException):
def __init__(self, option, version):
super().__init__(
"Option '{}' not supported with version '{}'".format(option, version)
)
class OptionValueError(SesDevException):
def __init__(self, option, message, value):
super().__init__(
"Wrong value for option '{}'. {}. Actual value: '{}'"
.format(option, message, value)
)
class ProductOptionOnlyOnSES(SesDevException):
def __init__(self, version):
super().__init__(
"You asked to create a {} cluster with the --product option, "
"but this option only works with versions starting with \"ses\""
.format(version)
)
class RemoveBoxNeedsBoxNameOrAllOption(SesDevException):
def __init__(self):
super().__init__(
"Either provide the name of a box to be removed or the --all option "
"to remove all boxes at once"
)
class RoleNotKnown(SesDevException):
def __init__(self, role):
super().__init__(
"Role '{}' is not supported by sesdev".format(role)
)
class RoleNotSupported(SesDevException):
def __init__(self, role, version):
super().__init__(
"Role '{}' is not supported in version '{}'".format(role, version)
)
class ScpInvalidSourceOrDestination(SesDevException):
def __init__(self):
super().__init__(
"Either source or destination must contain a ':' - not both or neither"
)
class ServiceNotFound(SesDevException):
def __init__(self, service):
super().__init__(
"Service '{}' was not found in this deployment".format(service)
)
class ServicePortForwardingNotSupported(SesDevException):
def __init__(self, service):
super().__init__(
"Service '{}' not supported for port forwarding. Specify manually the service source "
"and destination ports".format(service)
)
class SettingIncompatibleError(SesDevException):
def __init__(self, setting1, value1, setting2, value2):
super().__init__(
"Setting {} = {} and {} = {} are incompatible"
.format(setting1, value1, setting2, value2)
)
class SettingNotKnown(SesDevException):
def __init__(self, setting):
super().__init__(
"Setting '{}' is not known - please open a bug report!".format(setting)
)
class SettingTypeError(SesDevException):
def __init__(self, setting, expected_type, value):
super().__init__(
"Wrong value type for setting '{}': expected type: '{}', actual value='{}' ('{}')"
.format(setting, expected_type, value, type(value))
)
class SubcommandNotSupportedInVersion(SesDevException):
def __init__(self, subcmd, version):
super().__init__(
"Subcommand {} not supported in '{}'".format(subcmd, version)
)
class SupportconfigOnlyOnSLE(SesDevException):
def __init__(self):
super().__init__(
"sesdev supportconfig depends on the 'supportconfig' RPM, which is "
"available only on SUSE Linux Enterprise"
)
class UniqueRoleViolation(SesDevException):
def __init__(self, role, number):
super().__init__(
"There must be one, and only one, '{role}' role "
"(you gave {number} '{role}' roles)".format(role=role, number=number)
)
class VagrantSshConfigNoHostName(SesDevException):
def __init__(self, name):
super().__init__(
"Could not get HostName info from 'vagrant ssh-config {}' command"
.format(name)
)
class VersionNotKnown(SesDevException):
def __init__(self, version):
super().__init__(
"Unknown deployment version: '{}'".format(version)
)
class VersionOSNotSupported(SesDevException):
def __init__(self, version, operating_system):
super().__init__(
"sesdev does not know how to deploy \"{}\" on operating system \"{}\""
.format(version, operating_system)
)
class UnsupportedVMEngine(SesDevException):
def __init__(self, engine):
super().__init__(
"Unsupported VM engine ->{}<- encountered. This is a bug: please "
"report it to the maintainers".format(engine)
)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# main.py
"""
@author: Maxime Dréan.
Github: https://github.com/maximedrn
Telegram: https://t.me/maximedrn
Copyright © 2022 Maxime Dréan. All rights reserved.
Any distribution, modification or commercial use is strictly prohibited.
"""
# Selenium module imports: pip install selenium
from selenium.webdriver.support.ui import WebDriverWait as WDW
from selenium.common.exceptions import TimeoutException as TE
# Python internal imports.
from app.hcaptcha import hCaptcha
from app.utils.colors import GREEN, RED, RESET
from app.utils.const import DEMONSTRATION_URL
def demonstration(hcaptcha: object) -> None:
"""Demonstration of the hCAPTCHA solver."""
try:
print('Solving the hCAPTCHA.', end=' ')
hcaptcha.driver.get(DEMONSTRATION_URL) # hCAPTCHA solver test URL.
# Check if the lenght of "data-hcaptcha-response" attribute is
# not null. If it's not null, the hCAPTCHA is solved.
WDW(hcaptcha.driver, 600).until(lambda _: len(hcaptcha.visible(
'//div[@class="h-captcha"]/iframe').get_attribute(
'data-hcaptcha-response')) > 0)
print(f'{GREEN}Solved.{RESET}')
except TE: # Something went wrong.
print(f'{RED}Failed.{RESET}')
if __name__ == '__main__':
hcaptcha = hCaptcha( # Initialize the hCAPTCHA class.
browser=1, headless=False, comments=True, download=False)
hcaptcha.download_userscript() # Download the userscript.
demonstration(hcaptcha) # Demonstrate the hCAPTCHA solver.
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""Driver for controlling leg position"""
from inpromptu import Inpromptu, cli_method
from .five_bar_kins import FiveBarKinematics2D
from .odrive_driver import OdriveDriver
import odrive
class ParetoLeg(Inpromptu):
#class ParetoLeg(object):
# constants
CALIB_ANGLE_DEGS = [90, 90]
def __init__(self, odrive, l1_len, l2_len):
"""constructor. Assumes odrive motors have already been pre-configured."""
super().__init__()
self.odd = OdriveDriver(odrive)
self.kins = FiveBarKinematics2D(l1_len, l2_len)
def set_cartesian_position(self, x, y):
"""Set the position of the robot leg in cartesian coordinates."""
theta_axis0, theta_axis1 = self.kins.cartesian_to_joint(x, y)
# Kinematic model assumes flipped angle0 from how it is installed
# based on leg configuration.
theta_axis0 = -theta_axis0
self.odd.set_positions(theta_axis0, theta_axis1)
@cli_method
def get_cartesian_position(self):
"""Get the position of the robot leg in cartesian coordinates."""
theta_axis0, theta_axis1 = self.odd.get_motor_angles() # radians.
return self.kins.joint_to_cartesian([theta_axis0, theta_axis1])
@cli_method
def get_joint_angles(self):
"""Get the angular position of each joint according to configuration assumptions.
Note: with robot orientation is Motor0 closer to Motor1, both motor
angles are positive when the leg moves CCW in this orientation.
"""
motor_angles = self.odd.get_motor_angles()
# Handle Joey configuration convention.
return (-motor_angles[0], motor_angles[1])
def set_joint_angles(self, motor0_theta, motor1_theta):
"""Set the angular position of each joint according to configuration assumptions.
Angle units are in radians.
"""
self.odd.set_motor_angles(-motor0_theta, motor1_theta)
@cli_method
def apply_calibration(self):
"""Tell the ODrive that the current position is the calibration position."""
# Configure the motor in joint space.
# i.e: CCW rotation is positive angle looking top-down at each motor.
self.odd.configure_motor_angle_degs_as(ParetoLeg.CALIB_ANGLE_DEGS[0],
ParetoLeg.CALIB_ANGLE_DEGS[1])
def get_joint_velocities(self):
"""Get the angular velocity of each joint."""
pass # TODO!
|
nilq/baby-python
|
python
|
import _km_omp as _cp
import networkx as nx
from itertools import compress
import numpy as np
import scipy
from scipy.sparse import triu
def detect(G, nodes_in_part1, nodes_in_part2, part_to_project, resol = 1, node_capacity = {}, num_samples = 100, consensus_threshold=0.9, significance_level = 0.05, num_rand_nets = 500):
""" INPUT VALIDATION """
if part_to_project == 'part1':
_nodes_side_A = nodes_in_part1
_nodes_side_B = nodes_in_part2
pass
elif part_to_project == 'part2':
_nodes_side_A = nodes_in_part2
_nodes_side_B = nodes_in_part1
else:
raise Exception("Invalid input part_to_project. Set either part_to_project = 'part1' or part_to_project = 'part2'.")
_nodes_side_A = set(_nodes_side_A)
_nodes_side_B = set(_nodes_side_B)
if len(list(_nodes_side_A.intersection(_nodes_side_B))) >0:
raise Exception("Invalid inputs nodes_in_part1 and nodes_in_part2. nodes_in_part1 and nodes_in_part2 should not contain the same node.")
if _nodes_side_A.union(_nodes_side_B) != set(G.nodes()):
raise Exception("Invalid inputs nodes_in_part1 and nodes_in_part2. Some nodes are missing.")
if len(node_capacity) == 0:
node_capacity = np.array(np.ones(len(_nodes_side_B))).astype(float)
_nodes_side_A = list(_nodes_side_A)
_nodes_side_B = list(_nodes_side_B)
else:
if len(set(node_capacity.keys()).symmetric_difference(_nodes_side_B))>0:
raise Exception("Invalid input node_capacity. Some nodes are missing in node_capacity.")
_nodes_side_A = list(_nodes_side_A)
_nodes_side_B = list(_nodes_side_B)
node_capacity = np.array([node_capacity[r] for r in _nodes_side_B]).astype(float)
""" CORE-PERIPHERY DETECTION """
# Make the list of edges in the given network
A = nx.adjacency_matrix(G, _nodes_side_A + _nodes_side_B)
r, c = triu(A).nonzero()
edges = np.array([[rc[0], rc[1]] for rc in zip(r, c)]).astype(int)
Np = len(_nodes_side_A)
Nr = len(_nodes_side_B)
# Pass the edge list to a c++ function (src/_km_ompnet.cpp)
results = _cp._detect(edges = edges,\
_nodes_side_A = np.array(list(range(Np))).astype(int),\
_nodes_side_B = np.array(list(range(Np, Nr + Np))).astype(int),\
node_capacity = np.array(node_capacity).astype(float),\
resol = float(resol),\
num_samples = int(num_samples),\
num_runs = 10,\
consensus_threshold = float(consensus_threshold),\
significance_level = float(significance_level),\
num_rand_nets = int(num_rand_nets))
""" RETRIEVE THE RESULTS """
c = results[0].astype(int)
x = results[1]
# Exclude homeless nodes that do not belong to any consensus CP pairs
b = c>=0
c = dict(zip(compress(_nodes_side_A, b), c[b]))
x = dict(zip(compress(_nodes_side_A, b), x[b]))
return c, x
|
nilq/baby-python
|
python
|
# Webhooks for external integrations.
from __future__ import absolute_import
from typing import Text
from django.http import HttpRequest, HttpResponse
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile, Client
@api_key_only_webhook_view("Heroku")
@has_request_variables
def api_heroku_webhook(request, user_profile, client, stream=REQ(default="heroku"),
head=REQ(), app=REQ(), user=REQ(), url=REQ(), git_log=REQ()):
# type: (HttpRequest, UserProfile, Client, Text, Text, Text, Text, Text, Text) -> HttpResponse
template = "{} deployed version {} of [{}]({})\n> {}"
content = template.format(user, head, app, url, git_log)
check_send_message(user_profile, client, "stream", [stream], app, content)
return json_success()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""The interface for Windows Registry objects."""
import abc
from plaso.dfwinreg import definitions
class WinRegistryFile(object):
"""Class that defines a Windows Registry file."""
_KEY_PATH_SEPARATOR = u'\\'
def __init__(self, ascii_codepage=u'cp1252', key_path_prefix=u''):
"""Initializes the Windows Registry file.
Args:
ascii_codepage: optional ASCII string codepage.
key_path_prefix: optional Windows Registry key path prefix.
"""
super(WinRegistryFile, self).__init__()
self._ascii_codepage = ascii_codepage
self._key_path_prefix = key_path_prefix
self._key_path_prefix_length = len(key_path_prefix)
self._key_path_prefix_upper = key_path_prefix.upper()
def _SplitKeyPath(self, path):
"""Splits the key path into path segments.
Args:
path: a string containing the path.
Returns:
A list of path segements without the root path segment, which is an
empty string.
"""
# Split the path with the path separator and remove empty path segments.
return filter(None, path.split(self._KEY_PATH_SEPARATOR))
@abc.abstractmethod
def Close(self):
"""Closes the Windows Registry file."""
@abc.abstractmethod
def GetKeyByPath(self, key_path):
"""Retrieves the key for a specific path.
Args:
key_path: the Windows Registry key path.
Returns:
A Windows Registry key (instance of WinRegistryKey) or None if
not available.
"""
@abc.abstractmethod
def GetRootKey(self):
"""Retrieves the root key.
Returns:
The Windows Registry root key (instance of WinRegistryKey) or
None if not available.
"""
@abc.abstractmethod
def Open(self, file_object):
"""Opens the Windows Registry file using a file-like object.
Args:
file_object: the file-like object.
Returns:
A boolean containing True if successful or False if not.
"""
def RecurseKeys(self):
"""Recurses the Windows Registry keys starting with the root key.
Yields:
A Windows Registry key (instance of WinRegistryKey).
"""
root_key = self.GetRootKey()
if root_key:
for registry_key in root_key.RecurseKeys():
yield registry_key
def SetKeyPathPrefix(self, key_path_prefix):
"""Sets the Window Registry key path prefix.
Args:
key_path_prefix: the Windows Registry key path prefix.
"""
self._key_path_prefix = key_path_prefix
self._key_path_prefix_length = len(key_path_prefix)
self._key_path_prefix_upper = key_path_prefix.upper()
class WinRegistryFileReader(object):
"""Class to represent the Windows Registry file reader interface."""
@abc.abstractmethod
def Open(self, path, ascii_codepage=u'cp1252'):
"""Opens the Windows Registry file specified by the path.
Args:
path: string containing the path of the Windows Registry file. The path
is a Windows path relative to the root of the file system that
contains the specfic Windows Registry file. E.g.
C:\\Windows\\System32\\config\\SYSTEM
ascii_codepage: optional ASCII string codepage.
Returns:
The Windows Registry file (instance of WinRegistryFile) or None.
"""
class WinRegistryKey(object):
"""Class to represent the Windows Registry key interface."""
_PATH_SEPARATOR = u'\\'
def __init__(self, key_path=u''):
"""Initializes a Windows Registry key object.
Args:
key_path: optional Windows Registry key path.
"""
super(WinRegistryKey, self).__init__()
self._key_path = self._JoinKeyPath([key_path])
@abc.abstractproperty
def last_written_time(self):
"""The last written time of the key (contains a FILETIME timestamp)."""
@abc.abstractproperty
def name(self):
"""The name of the key."""
@abc.abstractproperty
def number_of_subkeys(self):
"""The number of subkeys within the key."""
@abc.abstractproperty
def number_of_values(self):
"""The number of values within the key."""
@abc.abstractproperty
def offset(self):
"""The offset of the key within the Windows Registry file."""
@property
def path(self):
"""The Windows Registry key path."""
return self._key_path
def _JoinKeyPath(self, path_segments):
"""Joins the path segments into key path.
Args:
path_segment: list of Windows Registry key path segments.
"""
# This is an optimized way to combine the path segments into a single path
# and combine multiple successive path separators to one.
# Split all the path segments based on the path (segment) separator.
path_segments = [
segment.split(self._PATH_SEPARATOR) for segment in path_segments]
# Flatten the sublists into one list.
path_segments = [
element for sublist in path_segments for element in sublist]
# Remove empty path segments.
path_segments = filter(None, path_segments)
key_path = self._PATH_SEPARATOR.join(path_segments)
if not key_path.startswith(u'HKEY_'):
key_path = u'{0:s}{1:s}'.format(self._PATH_SEPARATOR, key_path)
return key_path
@abc.abstractmethod
def GetSubkeyByName(self, name):
"""Retrieves a subkey by name.
Args:
name: The name of the subkey.
Returns:
The Windows Registry subkey (instances of WinRegistryKey) or
None if not found.
"""
@abc.abstractmethod
def GetSubkeys(self):
"""Retrieves all subkeys within the key.
Yields:
Windows Registry key objects (instances of WinRegistryKey) that represent
the subkeys stored within the key.
"""
@abc.abstractmethod
def GetValueByName(self, name):
"""Retrieves a value by name.
Args:
name: the name of the value or an empty string for the default value.
Returns:
A Windows Registry value object (instance of WinRegistryValue) if
a corresponding value was found or None if not.
"""
@abc.abstractmethod
def GetValues(self):
"""Retrieves all values within the key.
Yields:
Windows Registry value objects (instances of WinRegistryValue) that
represent the values stored within the key.
"""
def RecurseKeys(self):
"""Recurses the subkeys starting with the key.
Yields:
A Windows Registry key (instance of WinRegistryKey).
"""
yield self
for subkey in self.GetSubkeys():
for key in subkey.RecurseKeys():
yield key
class WinRegistryValue(object):
"""Class to represent the Windows Registry value interface."""
_DATA_TYPE_STRINGS = {
0: u'REG_NONE',
1: u'REG_SZ',
2: u'REG_EXPAND_SZ',
3: u'REG_BINARY',
4: u'REG_DWORD_LE',
5: u'REG_DWORD_BE',
6: u'REG_LINK',
7: u'REG_MULTI_SZ',
8: u'REG_RESOURCE_LIST',
9: u'REG_FULL_RESOURCE_DESCRIPTOR',
10: u'REG_RESOURCE_REQUIREMENT_LIST',
11: u'REG_QWORD'
}
_INTEGER_VALUE_TYPES = frozenset([
definitions.REG_DWORD, definitions.REG_DWORD_BIG_ENDIAN,
definitions.REG_QWORD])
_STRING_VALUE_TYPES = frozenset([
definitions.REG_SZ, definitions.REG_EXPAND_SZ, definitions.REG_LINK])
@abc.abstractproperty
def data(self):
"""The value data as a byte string."""
@abc.abstractproperty
def data_type(self):
"""Numeric value that contains the data type."""
@property
def data_type_string(self):
"""String representation of the data type."""
return self._DATA_TYPE_STRINGS.get(self.data_type, u'UNKNOWN')
@abc.abstractproperty
def name(self):
"""The name of the value."""
@abc.abstractproperty
def offset(self):
"""The offset of the value within the Windows Registry file."""
def DataIsInteger(self):
"""Determines, based on the data type, if the data is an integer.
The data types considered strings are: REG_DWORD (REG_DWORD_LITTLE_ENDIAN),
REG_DWORD_BIG_ENDIAN and REG_QWORD.
Returns:
True if the data is an integer, false otherwise.
"""
return self.data_type in [
definitions.REG_DWORD, definitions.REG_DWORD_BIG_ENDIAN,
definitions.REG_QWORD]
def DataIsBinaryData(self):
"""Determines, based on the data type, if the data is binary data.
The data types considered binary data are: REG_BINARY.
Returns:
True if the data is a multi string, false otherwise.
"""
return self.data_type == definitions.REG_BINARY
def DataIsMultiString(self):
"""Determines, based on the data type, if the data is a multi string.
The data types considered multi strings are: REG_MULTI_SZ.
Returns:
True if the data is a multi string, false otherwise.
"""
return self.data_type == definitions.REG_MULTI_SZ
def DataIsString(self):
"""Determines, based on the data type, if the data is a string.
The data types considered strings are: REG_SZ and REG_EXPAND_SZ.
Returns:
True if the data is a string, false otherwise.
"""
return self.data_type in [definitions.REG_SZ, definitions.REG_EXPAND_SZ]
@abc.abstractmethod
def GetDataAsObject(self):
"""Retrieves the data as an object.
Returns:
The data as a Python type.
"""
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import sys
sys.path.append('../')
from logparser import SLCT
input_dir = '../logs/HDFS/' # The input directory of log file
output_dir = 'SLCT_result/' # The output directory of parsing results
log_file = 'HDFS_2k.log' # The input log file name
log_format = '<Date> <Time> <Pid> <Level> <Component>: <Content>' # HDFS log format
support = 10 # The minimum support threshold
regex = [] # Regular expression list for optional preprocessing (default: [])
parser = SLCT.LogParser(log_format=log_format, indir=input_dir, outdir=output_dir,
support=support, rex=regex)
parser.parse(log_file)
|
nilq/baby-python
|
python
|
import logging
import os
import queue
import threading
import time
import traceback
import uuid
from signal import SIGINT, SIGTERM, signal
import zmq
from pyrsistent import pmap
from rx.subject import Subject
from .mixins import (
AuthenticationMixin,
NotificationsMixin,
RouterClientMixin,
WebserverMixin,
)
from .utils import Logger, stdout_logger
log = stdout_logger(__name__, level=logging.DEBUG)
class Agent(RouterClientMixin, NotificationsMixin, AuthenticationMixin, WebserverMixin):
def __init__(self, *args, name=None, **kwargs):
self.name = name or uuid.uuid4().hex
self.log = Logger(log, {"agent": self.name})
self.initialized_event = threading.Event()
self.exit_event = threading.Event()
self.zmq_sockets = {}
self.zmq_poller = zmq.Poller()
self.threads = []
self.disposables = []
# signals for graceful shutdown
signal(SIGTERM, self._shutdown)
signal(SIGINT, self._shutdown)
# boot in thread
t = threading.Thread(target=self.boot, args=args, kwargs=kwargs)
self.threads.append(t)
t.start()
self.initialized_event.wait()
# call initialized hook
self.initialized()
def setup(self):
"""
User override
"""
def initialized(self):
"""
User override
"""
def boot(self, *args, **kwargs):
try:
start = time.time()
self.log.info("Booting up ...")
self.zmq_context = zmq.Context()
# user setup
self.log.info("Running user setup ...")
self.setup(*args, **kwargs)
# setup bases
for base in Agent.__bases__:
if hasattr(base, "setup"):
self.log.info(f"Initiating {base.__name__} setup procedure")
base.setup(self, *args, **kwargs)
# process sockets
t = threading.Thread(target=self.process_sockets)
self.threads.append(t)
t.start()
self.initialized_event.set()
self.log.info(f"Booted in {time.time() - start} seconds ...")
except Exception as e:
self.log.error(f"Failed to boot ...\n\n{traceback.format_exc()}")
self.initialized_event.set()
os.kill(os.getpid(), SIGINT)
def shutdown(self):
"""
Shutdown procedure, call super().shutdown() if overriding
"""
# run shutdown procedures of all bases
for base in Agent.__bases__:
if hasattr(base, "shutdown"):
self.log.info(f"Initiating {base.__name__} shutdown procedure")
base.shutdown(self)
# dispose observables
for d in self.disposables:
self.log.info(f"disposing {d} ...")
d.dispose()
self.log.info("set exit event ...")
self.exit_event.set()
self.log.info("wait for initialization before cleaning up ...")
self.initialized_event.wait()
# join threads
self.log.info("joining threads ...")
for t in self.threads:
self.log.info(f"joining {t}")
t.join()
self.log.info("joining threads complete ...")
# destroy zmq sockets
for k, v in self.zmq_sockets.items():
self.log.info(f"closing socket {k} ...")
v["socket"].close()
self.zmq_context.term()
def _shutdown(self, signum, frame):
self.shutdown()
########################################################################################
## networking
########################################################################################
def bind_socket(self, socket_type, options, address):
self.log.info(f"binding {socket_type} socket on {address} ...")
socket = self.zmq_context.socket(socket_type)
for k, v in options.items():
if type(v) == str:
socket.setsockopt_string(k, v)
else:
socket.setsockopt(k, v)
socket.bind(address)
observable = Subject()
socket_name = f"{socket_type}:{address}"
send_queue = queue.Queue()
self.zmq_sockets[socket_name] = pmap(
{
"socket": socket,
"address": address,
"type": socket_type,
"options": options,
"observable": observable,
"send_queue": send_queue,
"send": lambda x: send_queue.put(x),
}
)
self.zmq_poller.register(socket, zmq.POLLIN)
return self.zmq_sockets[socket_name]
def connect_socket(self, socket_type, options, address):
self.log.info(f"connecting {socket_type} socket to {address} ...")
socket = self.zmq_context.socket(socket_type)
for k, v in options.items():
if type(v) == str:
socket.setsockopt_string(k, v)
else:
socket.setsockopt(k, v)
socket.connect(address)
observable = Subject()
socket_name = f"{socket_type}:{address}"
send_queue = queue.Queue()
self.zmq_sockets[socket_name] = pmap(
{
"socket": socket,
"address": address,
"type": socket_type,
"options": options,
"observable": observable,
"send_queue": send_queue,
"send": lambda x: send_queue.put(x),
}
)
self.zmq_poller.register(socket, zmq.POLLIN)
return self.zmq_sockets[socket_name]
def process_sockets(self):
# wait for initialization
self.initialized_event.wait()
self.log.info(
f"start processing sockets in thread {threading.current_thread()} ..."
)
while not self.exit_event.is_set():
if self.zmq_sockets:
sockets = dict(self.zmq_poller.poll(50))
for k, v in self.zmq_sockets.items():
# receive socket into observable
if v.socket in sockets and sockets[v.socket] == zmq.POLLIN:
v.observable.on_next(v.socket.recv_multipart())
# send queue to socket (zmq is not thread safe)
while not v.send_queue.empty() and not self.exit_event.is_set():
try:
v.socket.send_multipart(v.send_queue.get(block=False))
except queue.Empty:
pass
else:
time.sleep(1)
|
nilq/baby-python
|
python
|
str1 = “Hello”
str2 = “World”
str1 + str2
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
appid.py -- Chromium appid header file generation utility.
"""
import optparse
import sys
GENERATED_APPID_INCLUDE_FILE_CONTENTS = """
// This file is automatically generated by appid.py.
// It contains the Google Update Appid used for this build. Note that
// the Appid will be empty for non Google Chrome builds.
namespace google_update {
const wchar_t kChromeGuid[] = L"%s";
}
"""
def GenerateAppIdHeader(opts):
contents = GENERATED_APPID_INCLUDE_FILE_CONTENTS % opts.appid
try:
ofp = open(opts.output_file, 'r')
except EnvironmentError:
current_contents = None
else:
current_contents = ofp.read()
if contents != current_contents:
open(opts.output_file, 'w').write(contents)
def main():
parser = optparse.OptionParser()
parser.add_option('-a', '--appid',
help='The Google Update App Id of the Chrome being built.')
parser.add_option('-o', '--output_file',
help='The path to the generated output header file')
(opts, args) = parser.parse_args()
if opts.appid is None or not opts.output_file:
parser.print_help()
return 1
# Log a trace in the build output when we run.
print "Generating appid header... ",
GenerateAppIdHeader(opts)
print "Done."
if __name__ == '__main__':
sys.exit(main())
|
nilq/baby-python
|
python
|
"""
Torrent Search Plugin for Userbot.
CMD:
`.tsearch` <query>\n
`.ts` <query or reply>\n
`.movie torrentz2.eu|idop.se` <query>
"""
import cfscrape # https://github.com/Anorov/cloudflare-scrape
from bs4 import BeautifulSoup as bs
import requests
import asyncio
from uniborg.util import admin_cmd, humanbytes
from datetime import datetime
from uniborg import MODULE
MODULE.append("torrents")
def dogbin(magnets):
counter = 0
urls = []
while counter != len(magnets):
message = magnets[counter]
url = "https://del.dog/documents"
r = requests.post(url, data=message.encode("UTF-8")).json()
url = f"https://del.dog/{r['key']}"
urls.append(url)
counter = counter + 1
return urls
@borg.on(admin_cmd(pattern="tsearch ?(.*)", allow_sudo=True))
async def tor_search(event):
if event.fwd_from:
return
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'}
search_str = event.pattern_match.group(1)
print(search_str)
await event.edit("Searching for " + search_str + ".....")
if " " in search_str:
search_str = search_str.replace(" ", "+")
print(search_str)
res = requests.get(
"https://www.torrentdownloads.me/search/?new=1&s_cat=0&search=" +
search_str,
headers)
else:
res = requests.get(
"https://www.torrentdownloads.me/search/?search=" +
search_str,
headers)
source = bs(res.text, 'lxml')
urls = []
magnets = []
titles = []
counter = 0
for div in source.find_all('div', {'class': 'grey_bar3 back_none'}):
# print("https://www.torrentdownloads.me"+a['href'])
try:
title = div.p.a['title']
title = title[20:]
titles.append(title)
urls.append("https://www.torrentdownloads.me" + div.p.a['href'])
except KeyError:
pass
except TypeError:
pass
except AttributeError:
pass
if counter == 11:
break
counter = counter + 1
if not urls:
await event.edit("Either the Keyword was restricted or not found..")
return
print("Found URLS...")
for url in urls:
res = requests.get(url, headers)
# print("URl: "+url)
source = bs(res.text, 'lxml')
for div in source.find_all('div', {'class': 'grey_bar1 back_none'}):
try:
mg = div.p.a['href']
magnets.append(mg)
except Exception:
pass
print("Found Magnets...")
shorted_links = dogbin(magnets)
print("Dogged Magnets to del.dog...")
msg = ""
try:
search_str = search_str.replace("+", " ")
except BaseException:
pass
msg = "**Torrent Search Query**\n`{}`".format(
search_str) + "\n**Results**\n"
counter = 0
while counter != len(titles):
msg = msg + "⁍ [{}]".format(titles[counter]) + \
"({})".format(shorted_links[counter]) + "\n\n"
counter = counter + 1
await event.edit(msg, link_preview=False)
@borg.on(admin_cmd(pattern="ts ?(.*)"))
async def ts_message_f(message):
i_m_sefg = await message.edit("`Searching For Torrent...`")
query = message.pattern_match.group(1)
replied = await message.get_reply_message()
if replied:
query = replied.text
if not query and not replied:
await message.edit("`Can't search void`")
return
r = requests.get(
"https://sjprojectsapi.herokuapp.com/torrent/?query=" +
query)
try:
torrents = r.json()
reply_ = ""
for torrent in torrents:
if len(reply_) < 4096:
try:
reply_ = (reply_ + f"\n\n<b>{torrent['name']}</b>\n"
f"<b>Size:</b> {torrent['size']}\n"
f"<b>Seeders:</b> {torrent['seeder']}\n"
f"<b>Leechers:</b> {torrent['leecher']}\n"
f"<code>{torrent['magnet']}</code>")
await asyncio.sleep(3)
await i_m_sefg.edit(reply_, parse_mode="html")
except Exception:
pass
if reply_ == "":
await i_m_sefg.edit(f"`No torrents found for {query}!`")
return
except Exception:
await i_m_sefg.edit("`Torrent Search API is Down!\nTry again later`")
@borg.on(admin_cmd( # pylint:disable=E0602
pattern=r"movie (torrentz2\.eu|idop\.se) (.*)"
))
async def _(event):
if event.fwd_from:
return
start = datetime.now()
await event.edit("`Processing...`")
input_type = event.pattern_match.group(1)
input_str = event.pattern_match.group(2)
search_results = []
if input_type == "torrentz2.eu":
search_results = search_torrentz_eu(input_str)
elif input_type == "idop.se":
search_results = search_idop_se(input_str)
logger.info(search_results) # pylint:disable=E0602
output_str = ""
i = 0
for result in search_results:
if i > 10:
break
message_text = "👉 <a href=https://t.me/TorrentSearchRoBot?start=" + \
result["hash"] + ">" + result["title"] + ": " + "</a>" + " \r\n"
message_text += " FILE SIZE: " + result["size"] + "\r\n"
# message_text += " Uploaded " + result["date"] + "\r\n"
message_text += " SEEDS: " + \
result["seeds"] + " PEERS: " + result["peers"] + " \r\n"
message_text += "===\r\n"
output_str += message_text
i = i + 1
end = datetime.now()
ms = (end - start).seconds
await event.edit(
f"Scrapped {input_type} for {input_str} in {ms} seconds. Obtained Results: \n {output_str}",
link_preview=False,
parse_mode="html"
)
def search_idop_se(search_query):
r = []
url = "https://idope.se/search/{}/".format(search_query)
raw_json = requests.get(url).json()
results = raw_json["result"]["items"]
for item in results:
""" The content scrapped on 24.09.2018 22:56:45
"""
title = item["name"]
hash = item["info_hash"]
age = item["create_time"]
size = item["length"]
seeds = str(item["seeds"])
r.append({
"title": title,
"hash": hash,
"age": age,
"size": humanbytes(size),
"seeds": seeds,
"peers": "NA"
})
return r
def search_torrentz_eu(search_query):
r = []
url = "https://torrentz2.eu/searchA?safe=1&f=" + search_query + ""
scraper = cfscrape.create_scraper() # returns a CloudflareScraper instance
raw_html = scraper.get(url).content
# print(raw_html)
soup = bs(raw_html, "html.parser")
results = soup.find_all("div", {"class": "results"})
# print(results)
if len(results) > 0:
results = results[0]
for item in results.find_all("dl"):
# print(item)
"""The content scrapped on 23.06.2018 15:40:35
"""
dt = item.find_all("dt")[0]
dd = item.find_all("dd")[0]
#
try:
link_and_text = dt.find_all("a")[0]
link = link_and_text.get("href")[1:]
title = link_and_text.get_text()
span_elements = dd.find_all("span")
date = span_elements[1].get_text()
size = span_elements[2].get_text()
seeds = span_elements[3].get_text()
peers = span_elements[4].get_text()
#
r.append({
"title": title,
"hash": link,
"date": date,
"size": size,
"seeds": seeds,
"peers": peers
})
except BaseException:
pass
return r
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.4 on 2021-06-16 10:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20210616_1024'),
]
operations = [
migrations.AddField(
model_name='address',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='Created at'),
),
migrations.AddField(
model_name='address',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True, verbose_name='Updated at'),
),
]
|
nilq/baby-python
|
python
|
"""Test ComprehensionChecker"""
def should_be_a_list_copy():
"""Using the copy() method would be more efficient."""
original = range(10_000)
filtered = []
for i in original:
filtered.append(i)
def should_be_a_list_comprehension_filtered():
"""A List comprehension would be more efficient."""
original = range(10_000)
filtered = []
for i in original:
if i % 2:
filtered.append(i)
def should_be_a_dict_comprehension():
pairs = (("a", 1), ("b", 2))
result = {}
for x, y in pairs:
result[x] = y
def should_be_a_dict_comprehension_filtered():
pairs = (("a", 1), ("b", 2))
result = {}
for x, y in pairs:
if y % 2:
result[x] = y
def should_not_be_a_list_comprehension(args):
"""Internal helper for get_args."""
res = []
for arg in args:
if not isinstance(arg, tuple):
res.append(arg)
elif is_callable_type(arg[0]):
if len(arg) == 2:
res.append(Callable[[], arg[1]])
elif arg[1] is Ellipsis:
res.append(Callable[..., arg[2]])
else:
res.append(Callable[list(arg[1:-1]), arg[-1]])
else:
res.append(type(arg[0]).__getitem__(arg[0], _eval_args(arg[1:])))
return tuple(res)
|
nilq/baby-python
|
python
|
start = [90.0, 30.0, 30.0, 0.0, 0.0, 0.0]
scan = [90.0, 90.0, 0.0, 0.0, 0.0, 0.0]
grip = [90.0, 120.0, 30.0, 0.0, 0.0, 0.0]
evaluate = [90.0, 120.0, -30.0, 0.0, 0.0, 0.0]
trash = [-90.0, 120.0, 30.0, 0.0, 0.0, 0.0]
transport_a = [180.0, 120.0, 30.0, 0.0, 0.0, 0.0]
transport_b = [0.0, 120.0, 30.0, 0.0, 0.0, 0.0]
detach = [-90.0, 140.0, 20.0, 0.0, 0.0, 0.0]
detach_a = [180.0, 140.0, 20.0, 0.0, 0.0, 0.0]
detach_b = [0.0, 140.0, 20.0, 0.0, 0.0, 0.0]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import numpy as np;
import sys;
import string;
import numpy.linalg as lg
import argparse as ap
atb=1.88971616463
parser=ap.ArgumentParser(description="Parse polarisation from Gaussian logfile")
parser.add_argument("-f","--logfile", help="Gaussian logfile")
args=parser.parse_args()
inputfile=args.logfile
check=False
with open (inputfile,"r") as f:
for line in f:
if "Exact polarizability" in line :
check=True
polarstring=(line.split(":")[1]);
print polarstring
xx=float(polarstring[0:8])
xy=float(polarstring[8:16])
yy=float(polarstring[16:24])
xz=float(polarstring[24:32])
yz=float(polarstring[32:40])
zz=float(polarstring[40:58])
polartensor=np.array([[xx,xy,xz],[xy,yy,yz],[xz,yz,zz]])
if check==False:
print "There is no polarisability in file. Leaving"
sys.exit()
else:
polartensorangstrom=polartensor/(atb**3)
polartensorangstromdiag=np.diag(lg.eigvalsh(polartensorangstrom))
print "Read in string"
print polarstring
print "Convert to tensor"
print "xx, xy, xz, yy, yz, zz"
print "{0:4.4f} {1:4.4f} {2:4.4f} {3:4.4f} {4:4.4f} {5:4.4f}".format(polartensor[0,0],polartensor[0,1],polartensor[0,2],polartensor[1,1],polartensor[1,2],polartensor[2,2])
print "Polarisability tensor in A^3, non diagonal"
print "xx, xy, xz, yy, yz, zz"
print "{0:4.4f} {1:4.4f} {2:4.4f} {3:4.4f} {4:4.4f} {5:4.4f}".format(polartensorangstrom[0,0],polartensorangstrom[0,1],polartensorangstrom[0,2],polartensorangstrom[1,1],polartensorangstrom[1,2],polartensorangstrom[2,2])
print "Diagonal tensor in A^3"
print "xx, xy, xz, yy, yz, zz"
print "{0:4.4f} 0.0 0.0 {1:4.4f} 0.0 {2:4.4f}".format(polartensorangstromdiag[0,0],polartensorangstromdiag[1,1],polartensorangstromdiag[2,2])
|
nilq/baby-python
|
python
|
"""Import all the LINQ observable extension methods."""
# flake8: noqa
from . import all
from . import amb
from . import and_
from . import some
from . import asobservable
from . import average
from . import buffer
from . import bufferwithtime
from . import bufferwithtimeorcount
from . import case
from . import catch
from . import concat
from . import contains
from . import combinelatest
from . import count
from . import create
from . import debounce
from . import defaultifempty
from . import defer
from . import delay
from . import delaysubscription
from . import delaywithselector
from . import dematerialize
from . import distinct
from . import distinctuntilchanged
from . import doaction
from . import dowhile
from . import elementat
from . import elementatordefault
from . import empty
from . import exclusive
from . import expand
from . import finallyaction
from . import find
from . import findindex
from . import first
from . import firstordefault
from . import forin
from . import fromiterable
from . import fromcallback
from . import fromfuture
from . import generate
from . import generatewithrelativetime
from . import groupby
from . import groupbyuntil
from . import groupjoin
from . import ifthen
from . import ignoreelements
from . import interval
from . import isempty
from . import join
from . import last
from . import let
from . import lastordefault
from . import manyselect
from . import materialize
from . import merge
from . import max
from . import maxby
from . import min
from . import minby
from . import multicast
from . import never
from . import observeon
from . import onerrorresumenext
from . import of
from . import pairwise
from . import partition
from . import pluck
from . import publish
from . import publishvalue
from . import range
from . import reduce
from . import repeat
from . import replay
from . import retry
from . import returnvalue
from . import scan
from . import sample
from . import select
from . import selectswitch
from . import selectmany
from . import sequenceequal
from . import single
from . import singleordefault
from . import skip
from . import skiplast
from . import skiplastwithtime
from . import skipuntil
from . import skipuntilwithtime
from . import skipwhile
from . import skipwithtime
from . import slice
from . import start
from . import startasync
from . import startswith
from . import statistics
from . import subscribeon
from . import sum
from . import switchlatest
from . import take
from . import takelast
from . import takelastbuffer
from . import takelastwithtime
from . import takeuntil
from . import takeuntilwithtime
from . import takewhile
from . import takewithtime
from . import thendo
from . import throttlefirst
from . import throw
from . import timeinterval
from . import timer
from . import timeout
from . import timeoutwithselector
from . import timestamp
from . import toasync
from . import toblocking
from . import todict
from . import tofuture
from . import toiterable
from . import tolist
from . import toset
from . import transduce
from . import using
from . import when
from . import where
from . import whiledo
from . import window
from . import windowwithcount
from . import windowwithtime
from . import windowwithtimeorcount
from . import withlatestfrom
from . import zip
from . import ziparray
from . import blocking
|
nilq/baby-python
|
python
|
from django.shortcuts import render, redirect
from django.core.files.storage import FileSystemStorage
from .models import Document
from .forms import DocumentForm
import logging
from cid.locals import get_cid
from collections import OrderedDict
from common.mq.kafka import producer
import os
KAFKA_BROKER_URL = os.environ.get('KAFKA_BROKER_URL')
TOPIC = os.environ.get('UPLOAD_TOPIC')
logger = logging.getLogger('notzam')
logger.info("KAFKA_BROKER_URL: " + KAFKA_BROKER_URL)
def home(request):
documents = Document.objects.all()
return render(request, 'home.html', {'documents': documents})
def backgrounds(request):
return save_file(request, 'backgrounds')
def activates(request):
return save_file(request, 'activates')
def negatives(request):
return save_file(request, 'negatives')
def save_file(request, path=None):
if request.method == 'POST' and request.FILES['myfile']:
myfile = request.FILES['myfile']
fs = FileSystemStorage()
filename = fs.save(myfile.name if path is None else path + '/' + myfile.name, myfile)
uploaded_file_url = fs.url(filename)
logger.info(fs.path(filename))
make_json(fs.path(filename))
return render(request, path+'_uploads.html', {
'uploaded_file_url': uploaded_file_url
})
return render(request, path+'_uploads.html' if path is not None else 'home.html')
msg_q = producer(KAFKA_BROKER_URL)
def make_json(path):
jsondict = OrderedDict()
jsondict["cid"] = get_cid()
jsondict["path"] = path
msg_q(TOPIC, jsondict)
|
nilq/baby-python
|
python
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'AutomationRunbookReceiverArgs',
'AzureAppPushReceiverArgs',
'AzureFunctionReceiverArgs',
'DynamicMetricCriteriaArgs',
'DynamicThresholdFailingPeriodsArgs',
'EmailReceiverArgs',
'ItsmReceiverArgs',
'LogicAppReceiverArgs',
'MetricAlertActionArgs',
'MetricAlertMultipleResourceMultipleMetricCriteriaArgs',
'MetricAlertSingleResourceMultipleMetricCriteriaArgs',
'MetricCriteriaArgs',
'MetricDimensionArgs',
'SmsReceiverArgs',
'VoiceReceiverArgs',
'WebhookReceiverArgs',
'WebtestLocationAvailabilityCriteriaArgs',
]
@pulumi.input_type
class AutomationRunbookReceiverArgs:
def __init__(__self__, *,
automation_account_id: pulumi.Input[str],
is_global_runbook: pulumi.Input[bool],
runbook_name: pulumi.Input[str],
webhook_resource_id: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
service_uri: Optional[pulumi.Input[str]] = None):
"""
The Azure Automation Runbook notification receiver.
:param pulumi.Input[str] automation_account_id: The Azure automation account Id which holds this runbook and authenticate to Azure resource.
:param pulumi.Input[bool] is_global_runbook: Indicates whether this instance is global runbook.
:param pulumi.Input[str] runbook_name: The name for this runbook.
:param pulumi.Input[str] webhook_resource_id: The resource id for webhook linked to this runbook.
:param pulumi.Input[str] name: Indicates name of the webhook.
:param pulumi.Input[str] service_uri: The URI where webhooks should be sent.
"""
pulumi.set(__self__, "automation_account_id", automation_account_id)
pulumi.set(__self__, "is_global_runbook", is_global_runbook)
pulumi.set(__self__, "runbook_name", runbook_name)
pulumi.set(__self__, "webhook_resource_id", webhook_resource_id)
if name is not None:
pulumi.set(__self__, "name", name)
if service_uri is not None:
pulumi.set(__self__, "service_uri", service_uri)
@property
@pulumi.getter(name="automationAccountId")
def automation_account_id(self) -> pulumi.Input[str]:
"""
The Azure automation account Id which holds this runbook and authenticate to Azure resource.
"""
return pulumi.get(self, "automation_account_id")
@automation_account_id.setter
def automation_account_id(self, value: pulumi.Input[str]):
pulumi.set(self, "automation_account_id", value)
@property
@pulumi.getter(name="isGlobalRunbook")
def is_global_runbook(self) -> pulumi.Input[bool]:
"""
Indicates whether this instance is global runbook.
"""
return pulumi.get(self, "is_global_runbook")
@is_global_runbook.setter
def is_global_runbook(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_global_runbook", value)
@property
@pulumi.getter(name="runbookName")
def runbook_name(self) -> pulumi.Input[str]:
"""
The name for this runbook.
"""
return pulumi.get(self, "runbook_name")
@runbook_name.setter
def runbook_name(self, value: pulumi.Input[str]):
pulumi.set(self, "runbook_name", value)
@property
@pulumi.getter(name="webhookResourceId")
def webhook_resource_id(self) -> pulumi.Input[str]:
"""
The resource id for webhook linked to this runbook.
"""
return pulumi.get(self, "webhook_resource_id")
@webhook_resource_id.setter
def webhook_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "webhook_resource_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Indicates name of the webhook.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="serviceUri")
def service_uri(self) -> Optional[pulumi.Input[str]]:
"""
The URI where webhooks should be sent.
"""
return pulumi.get(self, "service_uri")
@service_uri.setter
def service_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_uri", value)
@pulumi.input_type
class AzureAppPushReceiverArgs:
def __init__(__self__, *,
email_address: pulumi.Input[str],
name: pulumi.Input[str]):
"""
The Azure mobile App push notification receiver.
:param pulumi.Input[str] email_address: The email address registered for the Azure mobile app.
:param pulumi.Input[str] name: The name of the Azure mobile app push receiver. Names must be unique across all receivers within an action group.
"""
pulumi.set(__self__, "email_address", email_address)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="emailAddress")
def email_address(self) -> pulumi.Input[str]:
"""
The email address registered for the Azure mobile app.
"""
return pulumi.get(self, "email_address")
@email_address.setter
def email_address(self, value: pulumi.Input[str]):
pulumi.set(self, "email_address", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the Azure mobile app push receiver. Names must be unique across all receivers within an action group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class AzureFunctionReceiverArgs:
def __init__(__self__, *,
function_app_resource_id: pulumi.Input[str],
function_name: pulumi.Input[str],
http_trigger_url: pulumi.Input[str],
name: pulumi.Input[str]):
"""
An azure function receiver.
:param pulumi.Input[str] function_app_resource_id: The azure resource id of the function app.
:param pulumi.Input[str] function_name: The function name in the function app.
:param pulumi.Input[str] http_trigger_url: The http trigger url where http request sent to.
:param pulumi.Input[str] name: The name of the azure function receiver. Names must be unique across all receivers within an action group.
"""
pulumi.set(__self__, "function_app_resource_id", function_app_resource_id)
pulumi.set(__self__, "function_name", function_name)
pulumi.set(__self__, "http_trigger_url", http_trigger_url)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="functionAppResourceId")
def function_app_resource_id(self) -> pulumi.Input[str]:
"""
The azure resource id of the function app.
"""
return pulumi.get(self, "function_app_resource_id")
@function_app_resource_id.setter
def function_app_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "function_app_resource_id", value)
@property
@pulumi.getter(name="functionName")
def function_name(self) -> pulumi.Input[str]:
"""
The function name in the function app.
"""
return pulumi.get(self, "function_name")
@function_name.setter
def function_name(self, value: pulumi.Input[str]):
pulumi.set(self, "function_name", value)
@property
@pulumi.getter(name="httpTriggerUrl")
def http_trigger_url(self) -> pulumi.Input[str]:
"""
The http trigger url where http request sent to.
"""
return pulumi.get(self, "http_trigger_url")
@http_trigger_url.setter
def http_trigger_url(self, value: pulumi.Input[str]):
pulumi.set(self, "http_trigger_url", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the azure function receiver. Names must be unique across all receivers within an action group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class DynamicMetricCriteriaArgs:
def __init__(__self__, *,
alert_sensitivity: pulumi.Input[Union[str, 'DynamicThresholdSensitivity']],
criterion_type: pulumi.Input[str],
failing_periods: pulumi.Input['DynamicThresholdFailingPeriodsArgs'],
metric_name: pulumi.Input[str],
name: pulumi.Input[str],
operator: pulumi.Input[Union[str, 'DynamicThresholdOperator']],
time_aggregation: pulumi.Input[Union[str, 'AggregationTypeEnum']],
dimensions: Optional[pulumi.Input[Sequence[pulumi.Input['MetricDimensionArgs']]]] = None,
ignore_data_before: Optional[pulumi.Input[str]] = None,
metric_namespace: Optional[pulumi.Input[str]] = None,
skip_metric_validation: Optional[pulumi.Input[bool]] = None):
"""
Criterion for dynamic threshold.
:param pulumi.Input[Union[str, 'DynamicThresholdSensitivity']] alert_sensitivity: The extent of deviation required to trigger an alert. This will affect how tight the threshold is to the metric series pattern.
:param pulumi.Input[str] criterion_type: Specifies the type of threshold criteria
Expected value is 'DynamicThresholdCriterion'.
:param pulumi.Input['DynamicThresholdFailingPeriodsArgs'] failing_periods: The minimum number of violations required within the selected lookback time window required to raise an alert.
:param pulumi.Input[str] metric_name: Name of the metric.
:param pulumi.Input[str] name: Name of the criteria.
:param pulumi.Input[Union[str, 'DynamicThresholdOperator']] operator: The operator used to compare the metric value against the threshold.
:param pulumi.Input[Union[str, 'AggregationTypeEnum']] time_aggregation: the criteria time aggregation types.
:param pulumi.Input[Sequence[pulumi.Input['MetricDimensionArgs']]] dimensions: List of dimension conditions.
:param pulumi.Input[str] ignore_data_before: Use this option to set the date from which to start learning the metric historical data and calculate the dynamic thresholds (in ISO8601 format)
:param pulumi.Input[str] metric_namespace: Namespace of the metric.
:param pulumi.Input[bool] skip_metric_validation: Allows creating an alert rule on a custom metric that isn't yet emitted, by causing the metric validation to be skipped.
"""
pulumi.set(__self__, "alert_sensitivity", alert_sensitivity)
pulumi.set(__self__, "criterion_type", 'DynamicThresholdCriterion')
pulumi.set(__self__, "failing_periods", failing_periods)
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "operator", operator)
pulumi.set(__self__, "time_aggregation", time_aggregation)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if ignore_data_before is not None:
pulumi.set(__self__, "ignore_data_before", ignore_data_before)
if metric_namespace is not None:
pulumi.set(__self__, "metric_namespace", metric_namespace)
if skip_metric_validation is not None:
pulumi.set(__self__, "skip_metric_validation", skip_metric_validation)
@property
@pulumi.getter(name="alertSensitivity")
def alert_sensitivity(self) -> pulumi.Input[Union[str, 'DynamicThresholdSensitivity']]:
"""
The extent of deviation required to trigger an alert. This will affect how tight the threshold is to the metric series pattern.
"""
return pulumi.get(self, "alert_sensitivity")
@alert_sensitivity.setter
def alert_sensitivity(self, value: pulumi.Input[Union[str, 'DynamicThresholdSensitivity']]):
pulumi.set(self, "alert_sensitivity", value)
@property
@pulumi.getter(name="criterionType")
def criterion_type(self) -> pulumi.Input[str]:
"""
Specifies the type of threshold criteria
Expected value is 'DynamicThresholdCriterion'.
"""
return pulumi.get(self, "criterion_type")
@criterion_type.setter
def criterion_type(self, value: pulumi.Input[str]):
pulumi.set(self, "criterion_type", value)
@property
@pulumi.getter(name="failingPeriods")
def failing_periods(self) -> pulumi.Input['DynamicThresholdFailingPeriodsArgs']:
"""
The minimum number of violations required within the selected lookback time window required to raise an alert.
"""
return pulumi.get(self, "failing_periods")
@failing_periods.setter
def failing_periods(self, value: pulumi.Input['DynamicThresholdFailingPeriodsArgs']):
pulumi.set(self, "failing_periods", value)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
Name of the metric.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the criteria.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[Union[str, 'DynamicThresholdOperator']]:
"""
The operator used to compare the metric value against the threshold.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[Union[str, 'DynamicThresholdOperator']]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter(name="timeAggregation")
def time_aggregation(self) -> pulumi.Input[Union[str, 'AggregationTypeEnum']]:
"""
the criteria time aggregation types.
"""
return pulumi.get(self, "time_aggregation")
@time_aggregation.setter
def time_aggregation(self, value: pulumi.Input[Union[str, 'AggregationTypeEnum']]):
pulumi.set(self, "time_aggregation", value)
@property
@pulumi.getter
def dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MetricDimensionArgs']]]]:
"""
List of dimension conditions.
"""
return pulumi.get(self, "dimensions")
@dimensions.setter
def dimensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MetricDimensionArgs']]]]):
pulumi.set(self, "dimensions", value)
@property
@pulumi.getter(name="ignoreDataBefore")
def ignore_data_before(self) -> Optional[pulumi.Input[str]]:
"""
Use this option to set the date from which to start learning the metric historical data and calculate the dynamic thresholds (in ISO8601 format)
"""
return pulumi.get(self, "ignore_data_before")
@ignore_data_before.setter
def ignore_data_before(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ignore_data_before", value)
@property
@pulumi.getter(name="metricNamespace")
def metric_namespace(self) -> Optional[pulumi.Input[str]]:
"""
Namespace of the metric.
"""
return pulumi.get(self, "metric_namespace")
@metric_namespace.setter
def metric_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metric_namespace", value)
@property
@pulumi.getter(name="skipMetricValidation")
def skip_metric_validation(self) -> Optional[pulumi.Input[bool]]:
"""
Allows creating an alert rule on a custom metric that isn't yet emitted, by causing the metric validation to be skipped.
"""
return pulumi.get(self, "skip_metric_validation")
@skip_metric_validation.setter
def skip_metric_validation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_metric_validation", value)
@pulumi.input_type
class DynamicThresholdFailingPeriodsArgs:
def __init__(__self__, *,
min_failing_periods_to_alert: pulumi.Input[float],
number_of_evaluation_periods: pulumi.Input[float]):
"""
The minimum number of violations required within the selected lookback time window required to raise an alert.
:param pulumi.Input[float] min_failing_periods_to_alert: The number of violations to trigger an alert. Should be smaller or equal to numberOfEvaluationPeriods.
:param pulumi.Input[float] number_of_evaluation_periods: The number of aggregated lookback points. The lookback time window is calculated based on the aggregation granularity (windowSize) and the selected number of aggregated points.
"""
pulumi.set(__self__, "min_failing_periods_to_alert", min_failing_periods_to_alert)
pulumi.set(__self__, "number_of_evaluation_periods", number_of_evaluation_periods)
@property
@pulumi.getter(name="minFailingPeriodsToAlert")
def min_failing_periods_to_alert(self) -> pulumi.Input[float]:
"""
The number of violations to trigger an alert. Should be smaller or equal to numberOfEvaluationPeriods.
"""
return pulumi.get(self, "min_failing_periods_to_alert")
@min_failing_periods_to_alert.setter
def min_failing_periods_to_alert(self, value: pulumi.Input[float]):
pulumi.set(self, "min_failing_periods_to_alert", value)
@property
@pulumi.getter(name="numberOfEvaluationPeriods")
def number_of_evaluation_periods(self) -> pulumi.Input[float]:
"""
The number of aggregated lookback points. The lookback time window is calculated based on the aggregation granularity (windowSize) and the selected number of aggregated points.
"""
return pulumi.get(self, "number_of_evaluation_periods")
@number_of_evaluation_periods.setter
def number_of_evaluation_periods(self, value: pulumi.Input[float]):
pulumi.set(self, "number_of_evaluation_periods", value)
@pulumi.input_type
class EmailReceiverArgs:
def __init__(__self__, *,
email_address: pulumi.Input[str],
name: pulumi.Input[str]):
"""
An email receiver.
:param pulumi.Input[str] email_address: The email address of this receiver.
:param pulumi.Input[str] name: The name of the email receiver. Names must be unique across all receivers within an action group.
"""
pulumi.set(__self__, "email_address", email_address)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="emailAddress")
def email_address(self) -> pulumi.Input[str]:
"""
The email address of this receiver.
"""
return pulumi.get(self, "email_address")
@email_address.setter
def email_address(self, value: pulumi.Input[str]):
pulumi.set(self, "email_address", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the email receiver. Names must be unique across all receivers within an action group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ItsmReceiverArgs:
def __init__(__self__, *,
connection_id: pulumi.Input[str],
name: pulumi.Input[str],
region: pulumi.Input[str],
ticket_configuration: pulumi.Input[str],
workspace_id: pulumi.Input[str]):
"""
An Itsm receiver.
:param pulumi.Input[str] connection_id: Unique identification of ITSM connection among multiple defined in above workspace.
:param pulumi.Input[str] name: The name of the Itsm receiver. Names must be unique across all receivers within an action group.
:param pulumi.Input[str] region: Region in which workspace resides. Supported values:'centralindia','japaneast','southeastasia','australiasoutheast','uksouth','westcentralus','canadacentral','eastus','westeurope'
:param pulumi.Input[str] ticket_configuration: JSON blob for the configurations of the ITSM action. CreateMultipleWorkItems option will be part of this blob as well.
:param pulumi.Input[str] workspace_id: OMS LA instance identifier.
"""
pulumi.set(__self__, "connection_id", connection_id)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "region", region)
pulumi.set(__self__, "ticket_configuration", ticket_configuration)
pulumi.set(__self__, "workspace_id", workspace_id)
@property
@pulumi.getter(name="connectionId")
def connection_id(self) -> pulumi.Input[str]:
"""
Unique identification of ITSM connection among multiple defined in above workspace.
"""
return pulumi.get(self, "connection_id")
@connection_id.setter
def connection_id(self, value: pulumi.Input[str]):
pulumi.set(self, "connection_id", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the Itsm receiver. Names must be unique across all receivers within an action group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def region(self) -> pulumi.Input[str]:
"""
Region in which workspace resides. Supported values:'centralindia','japaneast','southeastasia','australiasoutheast','uksouth','westcentralus','canadacentral','eastus','westeurope'
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: pulumi.Input[str]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="ticketConfiguration")
def ticket_configuration(self) -> pulumi.Input[str]:
"""
JSON blob for the configurations of the ITSM action. CreateMultipleWorkItems option will be part of this blob as well.
"""
return pulumi.get(self, "ticket_configuration")
@ticket_configuration.setter
def ticket_configuration(self, value: pulumi.Input[str]):
pulumi.set(self, "ticket_configuration", value)
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> pulumi.Input[str]:
"""
OMS LA instance identifier.
"""
return pulumi.get(self, "workspace_id")
@workspace_id.setter
def workspace_id(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_id", value)
@pulumi.input_type
class LogicAppReceiverArgs:
def __init__(__self__, *,
callback_url: pulumi.Input[str],
name: pulumi.Input[str],
resource_id: pulumi.Input[str]):
"""
A logic app receiver.
:param pulumi.Input[str] callback_url: The callback url where http request sent to.
:param pulumi.Input[str] name: The name of the logic app receiver. Names must be unique across all receivers within an action group.
:param pulumi.Input[str] resource_id: The azure resource id of the logic app receiver.
"""
pulumi.set(__self__, "callback_url", callback_url)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="callbackUrl")
def callback_url(self) -> pulumi.Input[str]:
"""
The callback url where http request sent to.
"""
return pulumi.get(self, "callback_url")
@callback_url.setter
def callback_url(self, value: pulumi.Input[str]):
pulumi.set(self, "callback_url", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the logic app receiver. Names must be unique across all receivers within an action group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Input[str]:
"""
The azure resource id of the logic app receiver.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class MetricAlertActionArgs:
def __init__(__self__, *,
action_group_id: Optional[pulumi.Input[str]] = None,
web_hook_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
An alert action.
:param pulumi.Input[str] action_group_id: the id of the action group to use.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] web_hook_properties: This field allows specifying custom properties, which would be appended to the alert payload sent as input to the webhook.
"""
if action_group_id is not None:
pulumi.set(__self__, "action_group_id", action_group_id)
if web_hook_properties is not None:
pulumi.set(__self__, "web_hook_properties", web_hook_properties)
@property
@pulumi.getter(name="actionGroupId")
def action_group_id(self) -> Optional[pulumi.Input[str]]:
"""
the id of the action group to use.
"""
return pulumi.get(self, "action_group_id")
@action_group_id.setter
def action_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action_group_id", value)
@property
@pulumi.getter(name="webHookProperties")
def web_hook_properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
This field allows specifying custom properties, which would be appended to the alert payload sent as input to the webhook.
"""
return pulumi.get(self, "web_hook_properties")
@web_hook_properties.setter
def web_hook_properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "web_hook_properties", value)
@pulumi.input_type
class MetricAlertMultipleResourceMultipleMetricCriteriaArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
all_of: Optional[pulumi.Input[Sequence[pulumi.Input[Union['DynamicMetricCriteriaArgs', 'MetricCriteriaArgs']]]]] = None):
"""
Specifies the metric alert criteria for multiple resource that has multiple metric criteria.
:param pulumi.Input[str] odata_type: specifies the type of the alert criteria.
Expected value is 'Microsoft.Azure.Monitor.MultipleResourceMultipleMetricCriteria'.
:param pulumi.Input[Sequence[pulumi.Input[Union['DynamicMetricCriteriaArgs', 'MetricCriteriaArgs']]]] all_of: the list of multiple metric criteria for this 'all of' operation.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Monitor.MultipleResourceMultipleMetricCriteria')
if all_of is not None:
pulumi.set(__self__, "all_of", all_of)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of the alert criteria.
Expected value is 'Microsoft.Azure.Monitor.MultipleResourceMultipleMetricCriteria'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="allOf")
def all_of(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union['DynamicMetricCriteriaArgs', 'MetricCriteriaArgs']]]]]:
"""
the list of multiple metric criteria for this 'all of' operation.
"""
return pulumi.get(self, "all_of")
@all_of.setter
def all_of(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Union['DynamicMetricCriteriaArgs', 'MetricCriteriaArgs']]]]]):
pulumi.set(self, "all_of", value)
@pulumi.input_type
class MetricAlertSingleResourceMultipleMetricCriteriaArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
all_of: Optional[pulumi.Input[Sequence[pulumi.Input['MetricCriteriaArgs']]]] = None):
"""
Specifies the metric alert criteria for a single resource that has multiple metric criteria.
:param pulumi.Input[str] odata_type: specifies the type of the alert criteria.
Expected value is 'Microsoft.Azure.Monitor.SingleResourceMultipleMetricCriteria'.
:param pulumi.Input[Sequence[pulumi.Input['MetricCriteriaArgs']]] all_of: The list of metric criteria for this 'all of' operation.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Monitor.SingleResourceMultipleMetricCriteria')
if all_of is not None:
pulumi.set(__self__, "all_of", all_of)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of the alert criteria.
Expected value is 'Microsoft.Azure.Monitor.SingleResourceMultipleMetricCriteria'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="allOf")
def all_of(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MetricCriteriaArgs']]]]:
"""
The list of metric criteria for this 'all of' operation.
"""
return pulumi.get(self, "all_of")
@all_of.setter
def all_of(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MetricCriteriaArgs']]]]):
pulumi.set(self, "all_of", value)
@pulumi.input_type
class MetricCriteriaArgs:
def __init__(__self__, *,
criterion_type: pulumi.Input[str],
metric_name: pulumi.Input[str],
name: pulumi.Input[str],
operator: pulumi.Input[Union[str, 'Operator']],
threshold: pulumi.Input[float],
time_aggregation: pulumi.Input[Union[str, 'AggregationTypeEnum']],
dimensions: Optional[pulumi.Input[Sequence[pulumi.Input['MetricDimensionArgs']]]] = None,
metric_namespace: Optional[pulumi.Input[str]] = None,
skip_metric_validation: Optional[pulumi.Input[bool]] = None):
"""
Criterion to filter metrics.
:param pulumi.Input[str] criterion_type: Specifies the type of threshold criteria
Expected value is 'StaticThresholdCriterion'.
:param pulumi.Input[str] metric_name: Name of the metric.
:param pulumi.Input[str] name: Name of the criteria.
:param pulumi.Input[Union[str, 'Operator']] operator: the criteria operator.
:param pulumi.Input[float] threshold: the criteria threshold value that activates the alert.
:param pulumi.Input[Union[str, 'AggregationTypeEnum']] time_aggregation: the criteria time aggregation types.
:param pulumi.Input[Sequence[pulumi.Input['MetricDimensionArgs']]] dimensions: List of dimension conditions.
:param pulumi.Input[str] metric_namespace: Namespace of the metric.
:param pulumi.Input[bool] skip_metric_validation: Allows creating an alert rule on a custom metric that isn't yet emitted, by causing the metric validation to be skipped.
"""
pulumi.set(__self__, "criterion_type", 'StaticThresholdCriterion')
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "operator", operator)
pulumi.set(__self__, "threshold", threshold)
pulumi.set(__self__, "time_aggregation", time_aggregation)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if metric_namespace is not None:
pulumi.set(__self__, "metric_namespace", metric_namespace)
if skip_metric_validation is not None:
pulumi.set(__self__, "skip_metric_validation", skip_metric_validation)
@property
@pulumi.getter(name="criterionType")
def criterion_type(self) -> pulumi.Input[str]:
"""
Specifies the type of threshold criteria
Expected value is 'StaticThresholdCriterion'.
"""
return pulumi.get(self, "criterion_type")
@criterion_type.setter
def criterion_type(self, value: pulumi.Input[str]):
pulumi.set(self, "criterion_type", value)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
Name of the metric.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the criteria.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[Union[str, 'Operator']]:
"""
the criteria operator.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[Union[str, 'Operator']]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[float]:
"""
the criteria threshold value that activates the alert.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[float]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter(name="timeAggregation")
def time_aggregation(self) -> pulumi.Input[Union[str, 'AggregationTypeEnum']]:
"""
the criteria time aggregation types.
"""
return pulumi.get(self, "time_aggregation")
@time_aggregation.setter
def time_aggregation(self, value: pulumi.Input[Union[str, 'AggregationTypeEnum']]):
pulumi.set(self, "time_aggregation", value)
@property
@pulumi.getter
def dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MetricDimensionArgs']]]]:
"""
List of dimension conditions.
"""
return pulumi.get(self, "dimensions")
@dimensions.setter
def dimensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MetricDimensionArgs']]]]):
pulumi.set(self, "dimensions", value)
@property
@pulumi.getter(name="metricNamespace")
def metric_namespace(self) -> Optional[pulumi.Input[str]]:
"""
Namespace of the metric.
"""
return pulumi.get(self, "metric_namespace")
@metric_namespace.setter
def metric_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metric_namespace", value)
@property
@pulumi.getter(name="skipMetricValidation")
def skip_metric_validation(self) -> Optional[pulumi.Input[bool]]:
"""
Allows creating an alert rule on a custom metric that isn't yet emitted, by causing the metric validation to be skipped.
"""
return pulumi.get(self, "skip_metric_validation")
@skip_metric_validation.setter
def skip_metric_validation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_metric_validation", value)
@pulumi.input_type
class MetricDimensionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
operator: pulumi.Input[str],
values: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
Specifies a metric dimension.
:param pulumi.Input[str] name: Name of the dimension.
:param pulumi.Input[str] operator: the dimension operator. Only 'Include' and 'Exclude' are supported
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: list of dimension values.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "operator", operator)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the dimension.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
the dimension operator. Only 'Include' and 'Exclude' are supported
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
list of dimension values.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class SmsReceiverArgs:
def __init__(__self__, *,
country_code: pulumi.Input[str],
name: pulumi.Input[str],
phone_number: pulumi.Input[str]):
"""
An SMS receiver.
:param pulumi.Input[str] country_code: The country code of the SMS receiver.
:param pulumi.Input[str] name: The name of the SMS receiver. Names must be unique across all receivers within an action group.
:param pulumi.Input[str] phone_number: The phone number of the SMS receiver.
"""
pulumi.set(__self__, "country_code", country_code)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "phone_number", phone_number)
@property
@pulumi.getter(name="countryCode")
def country_code(self) -> pulumi.Input[str]:
"""
The country code of the SMS receiver.
"""
return pulumi.get(self, "country_code")
@country_code.setter
def country_code(self, value: pulumi.Input[str]):
pulumi.set(self, "country_code", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the SMS receiver. Names must be unique across all receivers within an action group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="phoneNumber")
def phone_number(self) -> pulumi.Input[str]:
"""
The phone number of the SMS receiver.
"""
return pulumi.get(self, "phone_number")
@phone_number.setter
def phone_number(self, value: pulumi.Input[str]):
pulumi.set(self, "phone_number", value)
@pulumi.input_type
class VoiceReceiverArgs:
def __init__(__self__, *,
country_code: pulumi.Input[str],
name: pulumi.Input[str],
phone_number: pulumi.Input[str]):
"""
A voice receiver.
:param pulumi.Input[str] country_code: The country code of the voice receiver.
:param pulumi.Input[str] name: The name of the voice receiver. Names must be unique across all receivers within an action group.
:param pulumi.Input[str] phone_number: The phone number of the voice receiver.
"""
pulumi.set(__self__, "country_code", country_code)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "phone_number", phone_number)
@property
@pulumi.getter(name="countryCode")
def country_code(self) -> pulumi.Input[str]:
"""
The country code of the voice receiver.
"""
return pulumi.get(self, "country_code")
@country_code.setter
def country_code(self, value: pulumi.Input[str]):
pulumi.set(self, "country_code", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the voice receiver. Names must be unique across all receivers within an action group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="phoneNumber")
def phone_number(self) -> pulumi.Input[str]:
"""
The phone number of the voice receiver.
"""
return pulumi.get(self, "phone_number")
@phone_number.setter
def phone_number(self, value: pulumi.Input[str]):
pulumi.set(self, "phone_number", value)
@pulumi.input_type
class WebhookReceiverArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
service_uri: pulumi.Input[str]):
"""
A webhook receiver.
:param pulumi.Input[str] name: The name of the webhook receiver. Names must be unique across all receivers within an action group.
:param pulumi.Input[str] service_uri: The URI where webhooks should be sent.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "service_uri", service_uri)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the webhook receiver. Names must be unique across all receivers within an action group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="serviceUri")
def service_uri(self) -> pulumi.Input[str]:
"""
The URI where webhooks should be sent.
"""
return pulumi.get(self, "service_uri")
@service_uri.setter
def service_uri(self, value: pulumi.Input[str]):
pulumi.set(self, "service_uri", value)
@pulumi.input_type
class WebtestLocationAvailabilityCriteriaArgs:
def __init__(__self__, *,
component_id: pulumi.Input[str],
failed_location_count: pulumi.Input[float],
odata_type: pulumi.Input[str],
web_test_id: pulumi.Input[str]):
"""
Specifies the metric alert rule criteria for a web test resource.
:param pulumi.Input[str] component_id: The Application Insights resource Id.
:param pulumi.Input[float] failed_location_count: The number of failed locations.
:param pulumi.Input[str] odata_type: specifies the type of the alert criteria.
Expected value is 'Microsoft.Azure.Monitor.WebtestLocationAvailabilityCriteria'.
:param pulumi.Input[str] web_test_id: The Application Insights web test Id.
"""
pulumi.set(__self__, "component_id", component_id)
pulumi.set(__self__, "failed_location_count", failed_location_count)
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Monitor.WebtestLocationAvailabilityCriteria')
pulumi.set(__self__, "web_test_id", web_test_id)
@property
@pulumi.getter(name="componentId")
def component_id(self) -> pulumi.Input[str]:
"""
The Application Insights resource Id.
"""
return pulumi.get(self, "component_id")
@component_id.setter
def component_id(self, value: pulumi.Input[str]):
pulumi.set(self, "component_id", value)
@property
@pulumi.getter(name="failedLocationCount")
def failed_location_count(self) -> pulumi.Input[float]:
"""
The number of failed locations.
"""
return pulumi.get(self, "failed_location_count")
@failed_location_count.setter
def failed_location_count(self, value: pulumi.Input[float]):
pulumi.set(self, "failed_location_count", value)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of the alert criteria.
Expected value is 'Microsoft.Azure.Monitor.WebtestLocationAvailabilityCriteria'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="webTestId")
def web_test_id(self) -> pulumi.Input[str]:
"""
The Application Insights web test Id.
"""
return pulumi.get(self, "web_test_id")
@web_test_id.setter
def web_test_id(self, value: pulumi.Input[str]):
pulumi.set(self, "web_test_id", value)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020, Yoel Cortes-Pena <yoelcortes@gmail.com>
# Bioindustrial-Park: BioSTEAM's Premier Biorefinery Models and Results
# Copyright (C) 2020, Yalin Li <yalinli2@illinois.edu>,
# Sarang Bhagwat <sarangb2@illinois.edu>, and Yoel Cortes-Pena (this biorefinery)
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
Created on Tue Sep 1 17:43:13 2020
Modified from the biorefineries constructed in [1] and [2] for the production of
lactic acid from lignocellulosic feedstocks
[1] Cortes-Peña et al., BioSTEAM: A Fast and Flexible Platform for the Design,
Simulation, and Techno-Economic Analysis of Biorefineries under Uncertainty.
ACS Sustainable Chem. Eng. 2020, 8 (8), 3302–3310.
https://doi.org/10.1021/acssuschemeng.9b07040
[2] Li et al., Tailored Pretreatment Processes for the Sustainable Design of
Lignocellulosic Biorefineries across the Feedstock Landscape. Submitted,
2020.
@author: yalinli_cabbi
"""
# %%
# =============================================================================
# Setup
# =============================================================================
import numpy as np
import pandas as pd
import biosteam as bst
from biosteam.utils import TicToc
from lactic import system_diluted
from lactic._utils import set_yield
# %%
# =============================================================================
# Evaluate system at different lactic acid titer and yield (conversion),
# using either regular strain (need lime addition during fermentation to neutralize
# the produced lactic acid) or acid-resistant strain (no neutralization need)
# =============================================================================
# Initiate a timer
timer = TicToc('timer')
timer.tic()
run_number = 0
limits = [{}, {}] # regular, acid-resistant
lactics = {'yield': [],
'titer': []}
MPSPs = {0.89: [],
0.18: [],
1.92: []}
NPVs = {0.89: [],
0.18: [],
1.92: []}
GWPs = {0.89: [],
0.18: [],
1.92: []}
FECs = {0.89: [],
0.18: [],
1.92: []}
yield_range = np.arange(0.3, 1.01, 0.025) - 1e-6
# yield_range = np.arange(0.3, 1.01, 0.5) - 1e-6
R301 = system_diluted.R301
R302 = system_diluted.R302
R401 = system_diluted.R401
S402 = system_diluted.S402
lactic_acid = system_diluted.lactic_acid
lactic_sys = system_diluted.lactic_sys
lactic_tea = system_diluted.lactic_tea
def solve_TEA():
lactic_acid.price = 0
for i in range(3):
MPSP = lactic_acid.price = lactic_tea.solve_price(lactic_acid)
return MPSP
def update_productivity(productivity):
R301.productivity = productivity
R302.productivity = productivity * R302.ferm_ratio
for unit in (R301, R302):
unit._design()
unit._cost()
def simulate_log_results(return_limit=False):
try:
lactic_sys.simulate()
limit = R301.effluent_titer
lactics['yield'].append(R301.cofermentation_rxns.X[0])
lactics['titer'].append(R301.effluent_titer)
for productivity in (0.89, 0.18, 1.92):
update_productivity(productivity)
MPSPs[productivity].append(solve_TEA())
NPVs[productivity].append(lactic_tea.NPV)
GWPs[productivity].append(system_diluted.get_GWP())
FECs[productivity].append(system_diluted.get_FEC())
except:
limit = np.nan
lactic_sys.empty_recycles()
lactic_sys.reset_cache()
for m in (lactics, MPSPs, NPVs, GWPs, FECs):
for n in m.keys():
m[n].append(np.nan)
global run_number
run_number += 1
print(f'Run #{run_number}: {timer.elapsed_time:.0f} sec')
if return_limit:
return limit
def save_data_clear():
df = pd.DataFrame({
('Lactic acid', 'Yield [g/g]'): lactics['yield'],
('Lactic acid', 'Titer [g/L]'): lactics['titer'],
('0.89 [g/L/hr]', 'MPSP [$/kg]'): MPSPs[0.89],
('0.89 [g/L/hr]', 'NPV [$]'): NPVs[0.89],
('0.89 [g/L/hr]', 'GWP [kg CO2-eq/kg]'): GWPs[0.89],
('0.89 [g/L/hr]', 'FEC [MJ/kg]'): FECs[0.89],
('0.18 [g/L/hr]', 'MPSP [$/kg]'): MPSPs[0.18],
('0.18 [g/L/hr]', 'NPV [$]'): NPVs[0.18],
('0.18 [g/L/hr]', 'GWP [kg CO2-eq/kg]'): GWPs[0.18],
('0.18 [g/L/hr]', 'FEC [MJ/kg]'): FECs[0.18],
('1.92 [g/L/hr]', 'MPSP [$/kg]'): MPSPs[1.92],
('1.92 [g/L/hr]', 'NPV [$]'): NPVs[1.92],
('1.92 [g/L/hr]', 'GWP [kg CO2-eq/kg]'): GWPs[1.92],
('1.92 [g/L/hr]', 'FEC [MJ/kg]'): FECs[1.92]
})
for i in (lactics, MPSPs, NPVs, GWPs, FECs):
for j in i.keys():
i[j] = []
return df
# %%
# =============================================================================
# Regular strain
# =============================================================================
bst.speed_up()
R301.neutralization = True
R301.allow_concentration = False
R401.bypass = False
S402.bypass = False
print('\n---------- Regular Strain ----------')
# First determine the maximum achievable titer at a given yield
R301.allow_dilution = False
yield_with_baseline = [0.76] + yield_range.tolist()
for i in yield_with_baseline:
R301.yield_limit = i
set_yield(i, R301, R302)
limits[0][i] = simulate_log_results(return_limit=True)
regular_limit = save_data_clear()
# Dilute the saccharified stream to achieve lower titers
R301.allow_dilution = True
for i in yield_range:
titer_range = np.arange(40, limits[0][i], 2.5)
titer_range = titer_range.tolist() + [limits[0][i]]
for j in titer_range:
R301.yield_limit = i
R301.titer_limit = j
set_yield(i, R301, R302)
simulate_log_results(return_limit=False)
regular_data = save_data_clear()
with pd.ExcelWriter('regular1.xlsx') as writer:
regular_limit.to_excel(writer, sheet_name='Regular limit')
regular_data.to_excel(writer, sheet_name='Regular data')
# %%
# =============================================================================
# Acid-resistant strain
# =============================================================================
bst.speed_up()
R301.neutralization = False
R301.allow_concentration = False
R401.bypass = True
S402.bypass = True
print('\n---------- Acid-resistant Strain ----------')
# First determine the maximum achievable titer at a given yield
R301.allow_dilution = False
for i in yield_with_baseline:
R301.yield_limit = i
set_yield(i, R301, R302)
limits[1][i] = simulate_log_results(return_limit=True)
resistant_limit = save_data_clear()
# Only simulate for achievable titers
R301.allow_dilution = True
for i in yield_range:
titer_range = np.arange(40, limits[0][i], 2.5)
titer_range = titer_range.tolist() + [limits[0][i]]
for j in titer_range:
R301.yield_limit = i
R301.titer_limit = j
set_yield(i, R301, R302)
simulate_log_results(return_limit=False)
resistant_data = save_data_clear()
# %%
'''Output to Excel'''
with pd.ExcelWriter('2-1_ferm_diluted.xlsx') as writer:
regular_limit.to_excel(writer, sheet_name='Regular limit')
regular_data.to_excel(writer, sheet_name='Regular data')
resistant_limit.to_excel(writer, sheet_name='Acid-resistant limit')
resistant_data.to_excel(writer, sheet_name='Acid-resistant data')
time = timer.elapsed_time / 60
print(f'\nSimulation time for {run_number} runs is: {time:.1f} min')
|
nilq/baby-python
|
python
|
# Taken from https://ubuntuforums.org/showthread.php?t=2117981
import os
import re
import subprocess
import time
if __name__ == '__main__':
cmd = 'synclient -m 100'
p = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell = True)
skip = False
first = True
start = False
start_x = 0
start_y = 0
diff_x = 0
diff_y = 0
timeStart = 0
timeEnd = 0
try:
while True:
line = p.stdout.readline().decode('utf-8')
print(line)
if not line:
break
try:
tokens = [x for x in re.split(r'([^0-9\.])+', line.strip()) if x.strip()]
x, y, fingers = int(tokens[1]), int(tokens[2]), int(tokens[4])
print(x, y, fingers)
if fingers==3:
if not start:
start_x = x
start_y = y
start = True
timeStart = time.time()
if start and not fingers==3:
if time.time()-timeStart>1.00:
print('too long')
start = False
start_x = 0
start_y = 0
diff_y = 0
diff_x = 0
else:
diff_x = x-start_x
diff_y = y-start_y
#MODIFY THE NUMBERS BELLOW FOR SENSITIVITY
if abs(diff_x) > abs(diff_y):
# Move in left/right direction
if diff_x > 200:
os.system('xdotool key ctrl+alt+Right')
elif diff_x < -200:
os.system('xdotool key ctrl+alt+Left')
else:
# Move in up/down direction
if diff_y > 200:
os.system('xdotool key ctrl+alt+Down')
elif diff_y < -200:
os.system('xdotool key ctrl+alt+Up')
start = False
start_x = 0
start_y = 0
diff_y = 0
diff_x = 0
except (IndexError, ValueError):
pass
except KeyboardInterrupt:
pass
|
nilq/baby-python
|
python
|
import pyxel,math
from pyxel import init,image,tilemap,mouse,run,btn,cls,KEY_SPACE,btnp,KEY_Q,quit,text,clip,pix,line,rect,rectb,circ,circb,blt,bltm,pal
class App:
def __init__(self):
init(256, 256, caption="Pyxel Draw API",scale=1)
image(0).load(0, 0, "assets/cat_16x16.png")
image(1).load(0, 0, "assets/tileset_24x32.png")
tilemap(0).set( 0,0, ["022000002004001000060061062000040", "042003020021022003000001002003060"],)
tilemap(0).refimg = 1
self.pallet_test=0
self.clip_test =0
mouse(1)
run(self.update, self.draw)
def update(self):
self.pallet_test=(pyxel.frame_count//20)%4
print(self.pallet_test)
self.clip_test=btn(KEY_SPACE)
if btnp(KEY_Q):quit()
def draw(self):
fc=pyxel.frame_count
if self.pallet_test==1:pal(2,3)# pallet swap - test_pal1
if self.pallet_test==2:pal(4,7)
if self.pallet_test==3:pal(7,10)
_,__=cls(2),text(6,6, "cls(col)",7) #self.test_cls(6, 6)
text(6, 20, "pix(x,y,col)", 7)
for i in range(16):pix(10 + i*2, 30, i)
text(106, 6, "line(x1,y1,x2,y2,col)", 7)
for i in range(3):line(110, 15 + i * 8, 158, 15 + i * 8, 5+i)
for i in range(4):line(110 + i*16, 15,110 + i * 16,31, 8+i)
for i in range(4):line(110 + i*16, 15,110+ (3 - i) * 16,31, 12+i)
text(6, 38, "rect(x,y,w,h,col)", 7)
for i in range(8):rect(10 + i * 8, 54 - i, i + 1, i + 1, i + 8)
text(106, 38, "rectb(x,y,w,h,col)", 7)
for i in range(8):rectb(110+i*8,54- i, i + 1, i + 1, i + 8)
text(6,61, "circ(x,y,r,col)", 7)
for i in range(8):circ(10+ i * 8,76, i, i + 8)
text(106, 61, "circb(x,y,r,col)", 7)
for i in range(8):circb(110+i*8,76,i,i+8)
text(6,88, "blt(x,y,img,u,v,\n w,h,[colkey])", 7)
x,y=6,103
blt(x, y, 0, 0, 0, 16, 16)
blt(x + math.sin(fc * 0.1) * 2 + 19, y, 0, 0, 0, 16, 16, 5)
blt(x + 38, y, 0, 0, 0, -16, 16, 5)
blt(x + 57, y, 0, 0, 0, 16, -16, 5)
blt(x + 76, y, 0, 0, 0, -16, -16, 5)
text(106, 88, "bltm(x,y,tm,u,v,\n w,h,[colkey])", 7)
bltm(106, 103, 0, 0, 0, 11, 2, 2)
text(6, 124, "text(x,y,s,col)",7)
s = "Elapsed frame count is {}\n" "Current mouse position is ({},{})".format(fc,pyxel.mouse_x,pyxel.mouse_y)
text(11,133,s,1)# shadow
text(10,132,s,9)
_,__=text(106, 124, "pal(col1,col2)", 4),pal()# test_pal2
if not self.clip_test:return
clip()
x,y,w,h=math.sin(fc*0.02)*39+40,math.sin(fc*0.03)*29+30,120,90
text(x,y-8,"clip(x,y,w,h)",14)
rectb(x-1,y-1,w+2,h+2,14)
clip(x,y,w,h)
App()
|
nilq/baby-python
|
python
|
import os
import shutil
from django.conf import settings
from django.core.management import BaseCommand
import django
class Command(BaseCommand):
help = "Update django locales for three-digit codes"
args = ""
def handle(self, *args, **options):
# if we were feeling ambitious we could get this from something more
# formal/standard, but this seems totally workable for our needs
HQ_TO_DJANGO_MAP = {
'fra': 'fr',
'hin': 'hi',
'por': 'pt',
}
def _get_django_home():
return os.path.abspath(os.path.dirname(django.__file__))
def _get_django_locale_directories():
return [
os.path.join(_get_django_home(), 'conf', 'locale'),
os.path.join(_get_django_home(), 'contrib', 'auth', 'locale'),
os.path.join(_get_django_home(), 'contrib', 'humanize', 'locale'),
]
print 'updating django locale files for local languages'
locale_dirs = _get_django_locale_directories()
for langcode, display in settings.LANGUAGES:
for locale_dir in locale_dirs:
path = os.path.join(locale_dir, langcode)
if not os.path.exists(path):
# will intentionally fail hard since this will result in a bad locale config
mapped_code = HQ_TO_DJANGO_MAP[langcode]
django_path = os.path.join(locale_dir, mapped_code)
shutil.copytree(django_path, path)
print 'copied {src} to {dst}'.format(src=django_path, dst=path)
else:
print '%s all good' % langcode
|
nilq/baby-python
|
python
|
import pytest
pytestmark = [pytest.mark.django_db]
def test_single_member(mailchimp, post, mailchimp_member):
mailchimp.mass_subscribe(
list_id='test1-list-id',
members=[mailchimp_member],
)
post.assert_called_once_with(
url='lists/test1-list-id',
payload={
'members': [{
'email_address': 'test@e.mail',
'merge_fields': {
'FNAME': 'Rulon',
'LNAME': 'Oboev',
},
'status': 'subscribed',
}],
'update_existing': True,
},
)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator
from django.db import models
from django.template.defaultfilters import slugify
from django.utils.functional import cached_property
from ingredients.models import Ingredient
from ingredients.utils import add_nutrition_ratios
# TODO utils?
not_negative = MinValueValidator(0)
# Schema overview:
# Recipe - A recipe made from some number of Ingredients
# Quantity - Through-model between Ingredient and Recipe (with weight in g / ml)
# RecipeGroup - A set of one or more recipes added together to e.g.
# show a daily diet or allow easy comparison between them
# RecipeNutrient - (Not yet implemented) - override ingredient nutrients
class RecipeTag(models.Model):
"""
Tags for recipes - just a name and optional description.
Name follows slug rules (only lowercase, hyphens and underscores)
e.g. 'stew', 'baked', 'gluten-free', 'no_cook'
"""
verbose_name_plural = "Recipe Tags"
name = models.SlugField(
max_length=settings.TAG_LENGTH,
blank=False,
unique=True,
)
description = models.CharField(max_length=settings.DESCR_LENGTH,blank=True)
def __str__(self):
return self.name
class RecipeFlag(models.Model):
"""
Flags for recipes - these differ from tags in two ways:
- Only one flag applies at a time (but multiple tags can)
- Each flag has a one-character label, which is shown next to the
item in lists (etc) if it is flagged.
"""
verbose_name_plural = "Recipe Flags"
# e.g. flag usage for showing the testing/other status of recipes
# awaiting initial testing A "alpha"
# tested but working on issues B "beta"
# confirmed working o "ok"
# deprecated; no longer used D "depr"
char = models.SlugField(
max_length=1,
blank=False,
unique=True,
)
name = models.SlugField(
max_length=settings.TAG_LENGTH,
blank=False,
unique=True,
)
description = models.CharField(max_length=settings.DESCR_LENGTH,blank=True)
def __str__(self):
return self.name
class Recipe(models.Model):
"""
A recipe made from some number of Ingredients, with a method stored for
display (not relevant to PANTS itself).
"""
class Meta:
ordering = ["-updated_at"]
name = models.CharField(
max_length=settings.NAME_LENGTH,
blank=False,
unique=True,
)
slug = models.CharField(
max_length=settings.SLUG_LENGTH,
blank=True, # Set automatically; null=False still applies
unique=True,
)
description = models.CharField(max_length=settings.DESCR_LENGTH,blank=True)
tags = models.ManyToManyField(RecipeTag, blank=True)
flag = models.ForeignKey(
RecipeFlag,
on_delete=models.SET_NULL,
null=True,
blank=True,
)
# Owner is null for "global" Recipes.
# Only owner can see/edit their own ones, only admin can edit global ones
owner = models.ForeignKey(
User,
blank=True,
null=True,
on_delete=models.CASCADE,
related_name='+', # Prevents User-> related name being created
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
last_tested = models.DateTimeField(
blank=True,
null=True,
help_text="When this recipe was last made to check it works, and it did",
) # TODO: Auto-update when added to diary?
# TODO How to handle testing when a recipe is working well then
# alterations are made to it? Flag if last_tested < updated_at ?
serves = models.DecimalField(
decimal_places=2,
max_digits=4,
validators=[not_negative],
)
# These are all large plain text fields shown on detail page.
# Page order: Introduction, Ingredients List, Method, Notes
introduction = models.TextField(blank=True)
method = models.TextField(blank=True)
notes = models.TextField(blank=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name) # NOTE will Exception on clash
super(Recipe, self).save(*args, **kwargs)
# TODO: How much of this logic should be in the template or client side?
@cached_property
def nutrition_data(self):
"""
Returns cost/protein/fibre/kj of total and one serve of the recipe,
plus protein and fibre per J and dollar etc.
Returns None for a value (and any dependent values) if ANY
ingredients are missing that value (e.g. missing Fibre or Price data)
"""
# init
data = dict()
for k in settings.NUTRITION_DATA_ITEMS:
data[k] = 0
# Sum cost and basic macros - if any missing, make the sum None
for c in self.components.iterator():
comp_data = c.nutrition_data
for key in settings.NUTRITION_DATA_ITEMS:
if data[key] is not None:
if comp_data[key] is None:
data[key] = None
else:
data[key] += comp_data[key]
# For all valid values currently there, include per-serve data
serves = self.serves if self.serves else 1
keys = dict.fromkeys(data)
for k in keys:
if data[k] is not None:
# XXX: Note per-serve additions done before ratios - we
# don't want to redundantly calculate ratios per serve
data["%s_serve"%k]=data[k]/serves
# Convert KJ to Kcal/serve
if 'kilojoules_serve' in data and data['kilojoules_serve'] > 0:
data['kilocalories_serve']=data['kilojoules_serve'] / settings.KJ_PER_KCAL
# Finally determine desired weights per other weights
return add_nutrition_ratios(data)
# NOTE: This is deprecated, only required for dictsort being flaky
@cached_property
def sort_rank(self):
'''
Returns the rank from nutrition data, as an integer.
Returns 0 if there is no rank.
Used for sorting things that can't sort floating point numbers
'''
try:
return self.nutrition_data['rank']
except:
return 0
@cached_property
def used_in_recipes(self):
"""
Returns a dict (slug->name) of Recipes that this ingredient
is a part of (including child recipes)
Iterations/queries are proportional to the number of generations
(not the raw number of recipes).
"""
values = {}
rqset = Recipe.objects.filter(components__of_recipe__pk=self.pk)
while rqset.count(): # until no more child recipes
values.update(rqset.values_list('slug','name')) # Add to return dict
rqset = Recipe.objects.filter(components__of_recipe__in=rqset) # Recurse
return values
class Component(models.Model):
"""
Component of a recipe; could be a (generic) ingredient or another recipe.
Caches nutrition data so it can be queried generically regardless
of the type of component.
"""
in_recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
related_name='components',
)
# NOTE one and only one of these must be active (validated)
of_ingredient = models.ForeignKey(
Ingredient,
on_delete=models.PROTECT,
null=True,
blank=True,
related_name='used_in',
)
of_recipe = models.ForeignKey(
Recipe,
on_delete=models.PROTECT,
null=True,
blank=True,
related_name='used_in',
)
# NOTE one and only one of these must be active (validated)
servings = models.DecimalField(
decimal_places=2,
max_digits=5,
validators=[not_negative],
help_text="WARNING: Overrides weight if used!",
null=True,
blank=True,
)
weight = models.DecimalField(
decimal_places=3,
max_digits=7,
validators=[not_negative],
help_text="In grams; WARNING will be overridden by servings if that is used",
null=True,
blank=True,
)
# TODO: weight in in g but nutrients measured per kg!
note = models.CharField(max_length=settings.DESCR_LENGTH,blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def clean(self):
"""
Validate component - we must have either a recipe or an
ingredient (but not both), specified in servings or in grams
(but not both), and if servings are used on an ingredient it
must have that setting.
"""
if self.of_ingredient:
if self.of_recipe:
raise ValidationError('Must specify either recipe or ingredient, but not both')
elif self.servings and (not self.of_ingredient.serving):
raise ValidationError('That ingredient does not have servings listed - use raw weight instead')
else:
if not self.of_recipe:
raise ValidationError('Must specify either a recipe or ingredient for this component')
if (self.servings and self.weight):
raise ValidationError('Must specify either servings or weight, not both')
elif not (self.servings or self.weight):
raise ValidationError('Must specify the amount of either weight or servings')
super(Component, self).clean()
@cached_property
def quantity(self):
"""
Returns the weight or number of servings of this component, whichever is applicable.
"""
return self.weight if self.weight else self.servings
@cached_property
def name(self):
if self.of_ingredient:
return self.of_ingredient.name
elif self.of_recipe:
return self.of_recipe.name
return "Invalid Component!"
def __str__(self):
return "%f g %s"%(self.quantity, self.name)
@cached_property
def nutrition_data(self):
"""
Returns cost/protein/fibre/kj of this component
(multiplying by weight and doing any kg->g conversion required),
plus protein and fibre per J and dollar etc.
Returns None for a value if that value is missing from the source object.
"""
# init TODO consider dependent values here too?
data = dict()
for k in settings.NUTRITION_DATA_ITEMS:
data[k] = None
# Get ingredient->nutrients data if ingredient
# NOTE: Requires conversion kg to grams
if self.of_ingredient:
weight = self.weight or self.of_ingredient.serving * self.servings
# Special cases
data['grams']=weight
if self.of_ingredient.best_price: # 0 should not be valid
data['cost'] = weight * settings.G_PER_KG * self.of_ingredient.best_price
# get main macronutrient data directly from ingredient
for k in settings.NUTRITION_DATA_ITEMS_BASIC:
val = getattr(self.of_ingredient,k)
if val is not None: # Allow 0 to be valid
data[k] = weight * settings.G_PER_KG * val
else:
data[k] = None
# Get data from similar property in recipe
elif self.of_recipe:
r_data = self.of_recipe.nutrition_data
if self.servings:
for k in settings.NUTRITION_DATA_ITEMS:
try:
data[k] = self.servings* r_data["%s_serve"%k]
except KeyError:
pass # Already = None
else: # using self.weight # TODO simplify weight calc and merge if possible
grams_serve = r_data["grams_serve"]
for k in settings.NUTRITION_DATA_ITEMS:
try:
data[k] = self.weight * r_data["%s_serve"%k]/grams_serve
except KeyError:
pass # Already = None
# Finally determine desired weights per other weights
return add_nutrition_ratios(data)
|
nilq/baby-python
|
python
|
# import sys
# sys.path.append("/root/csdc3/src/sensors/")
# sys.path.append("/root/csdc3/src/utils/")
# from sensor_manager import SensorManager
# from sensor_constants import *
# from statistics import median
def returnRandInt(minValue, maxValue):
return int(random.random()*(maxValue - minValue + 1)) % (maxValue + 1) + minValue
def BatteryHeatersReader():
#Get temperature inputs
tempIdentifiers = (TEMP_BAT_4, TEMP_BAT_2, TEMP_BAT_3, TEMP_BAT_1)
tempValues = []
for iden in tempIdentifiers:
SensorManager.init_temp_sensor(iden)
tempValue = SensorManager.read_temp_sensor(iden)
SensorManager.stop_temp_sensor(iden)
# Keep final value of sensor
tempValues.append(tempValue)
# Get status identifiers
statusIdentifiers = (PSS_HTR_STAT_1_GPIO, PSS_HTR_STAT_2_GPIO,\
PSS_HTR_STAT_3_GPIO, PSS_HTR_STAT_4_GPIO)
statusValues = []
for iden in statusIdentifiers:
statusValues.append(SensorManager.gpio_input(iden,0))
# state0 = [23, 34, -32, 1]
# state1 = [27, 21, 34, 24]
# state2 = [30, 11, 22, 12]
# state3 = [35, 2, 1, 15]
# statusValues = [True, False, True, False]
# tempValues = []
# try:
# f = open('state.tmp', 'r')
# state = int(f.read())
# except:
# f = open('state.tmp', 'w')
# f.write(str(0))
# state = 1
# finally:
# f.close()
# if state == 0:
# tempValues = state0
# elif state == 1:
# tempValues = state1
# elif state == 2:
# tempValues = state2
# else:
# tempValues = state3
# state = -1
# try:
# f = open('state.tmp', 'w')
# f.write(str(state + 1))
# finally:
# f.close()
# Set up dict containing result
# result = {"control": "", "batteries": []}
# Populate battery heater list with acquired values
# for i in range(0,len(tempValues)):
# if i < len(tempValues) and i < len(statusValues):
# result["batteries"].append({"temp": tempValues[i], "heaters": statusValues[i]})
# Update control status
if SensorManager.gpio_input(PSS_HTR_MUX_SEL_GPIO,0):
result["control"] = "ANAL"
else:
result["control"] = "OBC"
print(result)
return result
def functionality():
result = BatteryHeatersReader()
print(result)
if __name__ == '__main__':
functionality()
|
nilq/baby-python
|
python
|
# Generated by Django 2.1.1 on 2018-10-27 17:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('music', '0007_auto_20181027_1618'),
]
operations = [
migrations.AddField(
model_name='favorite',
name='album',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='music.Album'),
preserve_default=False,
),
]
|
nilq/baby-python
|
python
|
from st2common.runners.base_action import Action
from thehive4pyextended import TheHiveApiExtended
__all__ = [
'PromoteAlertToCaseAction'
]
class PromoteAlertToCaseAction(Action):
def run(self, alert_id, case_template=None):
api = TheHiveApiExtended(self.config['thehive_url'], self.config['thehive_api_key'])
response = api.promote_alert_to_case(alert_id, case_template)
return response.json()
|
nilq/baby-python
|
python
|
from django.contrib import admin
from django.urls import path
from . import views
app_name = 'todoapp'
urlpatterns = [
# ex: /todo/
path('', views.index, name='index'),
# ex: /todo/tasks/
path('tasks/', views.tasks, name='tasks'),
# ex: /todo/hashtags/
path('hashtags/', views.hashtags, name='hashtags'),
# ex: /todo/task/3/
path('task/<int:task_id>/', views.task, name='task'),
# ex: /todo/hashtag/1/
path('hashtag/<int:hashtag_id>/', views.hashtag, name='hashtag'),
# ex /todo/new_task/
path('new_task/', views.new_task, name='new_task'),
# ex /todo/new_hashtag/
path('new_hashtag/', views.new_hashtag, name='new_hashtag'),
# ex /todo/tasks/3/edit/
path('task/<int:task_id>/edit/', views.edit_task, name='edit_task'),
# ex /todo/hashtag/1/edit/
path('hashtag/<int:hashtag_id>/edit/', views.edit_hashtag,
name='edit_hashtag'),
# ex /todo/task/3/delete/
path('task/<int:task_id>/delete/', views.delete_task, name='delete_task'),
# ex /todo/hashtag/3/delete/
path('hashtag/<int:hashtag_id>/delete/', views.delete_hashtag,
name='delete_hashtag'),
]
|
nilq/baby-python
|
python
|
import sys
from time import sleep
from sense_hat import SenseHat
class Morse:
senseHat = None
sentence = None
multiplierSpeed = 1
transcriber = None
flashColor = [255,255,255]
loop = False
def __init__(self):
if len(sys.argv) < 2:
print("ERROR: This script requires a string argument to convert to morse.")
sys.exit(1)
if len(sys.argv) == 3:
self.flashColor = list(map(int, sys.argv[2].split(',')))
print("Custom Flash Color Added!")
if len(sys.argv) == 4:
if sys.argv[3] == "True":
self.loop = True
self.senseHat = SenseHat()
self.senseHat.low_light = False
self.sentence = sys.argv[1]
self.multiplierSpeed = 1
self.transcriber = MorseTranscriber()
self.transcriber.set_color(self.flashColor)
print("RPI Sense Hat Morse Code Flasher Initialized!")
self.flash_sentence()
def flash_sentence(self):
morseSentence = self.transcriber.sentence_to_morse(self.sentence, self.senseHat, self.flashColor, self.loop)
print(morseSentence)
class MorseTranscriber:
morseDefinitions = {
'a':'.-',
'b':'-...',
'c':'-.-.',
'd': '-..',
'e': '.',
'f': '..-.',
'g': '--.',
'h': '....',
'i': '..',
'j': '.---',
'k': ' -.-',
'l': '.-..',
'm': '--',
'n': '-.',
'o': '---',
'p': '.--.',
'q': '--.-',
'r': '.-.',
's': '...',
't': '-',
'u': '..-',
'v': '...-',
'w': '.--',
'x': '-..-',
'y': '-.--',
'z': '--..',
'1': '.----',
'2': '..---',
'3': '...--',
'4': '....-',
'5': '.....',
'6': '-....',
'7': '--...',
'8': '---..',
'9': '----.',
'0': '-----',
'.': '.-.-.-',
',': '--..--',
':': '---...',
'?': '..--..',
'\'': '.----.',
'/': '-..-.',
'(': '-.--.-',
')': '-.--.-',
'@': '.--.-.',
'=': '-...-',
'-': '-....-',
'\"': '.-..-.',
' ': ' '
}
standardDur = 500 # ms
multiplierDur = 3
frequency = 550
def set_color(self, color):
self.X = color
def sentence_to_morse(self, sentence, senseHat, morseFlash, loop):
morseSentence = ""
senseHat.clear()
print(sentence)
morseFlasher = [morseFlash] * 64
print(morseFlasher)
while True:
for letter in sentence.lower():
morseChar = str(self.morseDefinitions.get(letter))
morseSentence += morseChar
senseHat.clear()
print(morseChar)
for char in morseChar:
senseHat.clear()
if char == ".":
senseHat.set_pixels(morseFlasher)
sleep(self.dot() / (1000))
print(self.dot()/1000)
senseHat.clear()
elif char == "-":
senseHat.set_pixels(morseFlasher)
sleep(self.dash() / (1000))
print(self.dash() / 1000)
senseHat.clear()
elif char == " ":
senseHat.clear()
sleep(self.spaceWords()/(1000))
print(self.spaceWords() / 1000)
continue
sleep(self.dot()/1000)
sleep(self.spaceLetters()/(5000))
sleep(2)
if not loop:
break
return morseSentence
def dot(self):
return int(self.standardDur / self.multiplierDur)
def dash(self):
return int(3 * (self.standardDur / self.multiplierDur))
def spaceChar(self):
return self.dot()
def spaceLetters(self):
return self.dash()
def spaceWords(self):
return int(7 * (self.standardDur / self.multiplierDur))
if __name__ == '__main__':
newInstance = Morse()
|
nilq/baby-python
|
python
|
from __future__ import print_function
import os
import sys, logging
import json
import re
import mechanize
import boto3
mechlog = logging.getLogger("mechanize")
mechlog.addHandler(logging.StreamHandler(sys.stdout))
if os.getenv('DEBUG') != None:
logging.basicConfig(level=logging.DEBUG)
mechlog.setLevel(logging.DEBUG)
confirm_url = re.compile("https://.*\.certificates.amazon.com/approvals\?[A-Za-z0-9=&-]+")
approval_text = re.compile("You have approved")
domain_re = re.compile(".*<b>Domain name</b>.*?<td class='right-column'>\s+(.*?)\s.*", re.DOTALL)
accountid_re = re.compile(".*<b>AWS account number</b>.*?<td class='right-column'>\s+(.*?)\s.*", re.DOTALL)
region_re = re.compile(".*<b>AWS Region</b>.*?<td class='right-column'>\s+(.*?)\s.*", re.DOTALL)
certid_re = re.compile(".*<b>Certificate identifier</b>.*?<td class='right-column'>\s+(.*?)\s.*", re.DOTALL)
def panic(msg):
raise Exception(msg)
def validate(event, context):
msg = json.loads(event['Records'][0]['Sns']['Message'])
match = confirm_url.search(msg['content'])
# Ignore emails that don't match the certificate confirm URL
if not match:
return
url = match.group(0)
logging.info("CONFIRMATION URL: %s" % url)
br = mechanize.Browser()
br.set_handle_robots(False)
# Fetch approval page
logging.debug("OPENING CONFIRMATION URL")
response = br.open(url)
logging.debug("OPENED CONFIRMATION URL")
content = response.get_data()
# Extract confirmation page details
domain, account_id, region, cert_id = [regex.match(content).group(1)
if regex.match(content) else panic("Couldn't parse confirmation page!")
for regex in (domain_re, accountid_re, region_re, certid_re)]
# Remove dashes from account_id
account_id = account_id.translate(None, '-')
# Always log what we're confirming
print("Validation URL: '%s'" % url)
print("Domain: '%s'" % domain)
print("Account ID: '%s'" % account_id)
print("Region: '%s'" % region)
print("Certificate ID: '%s'" % cert_id)
# Check if the cert is pending validation
acm = boto3.client('acm', region_name=region)
cert = acm.describe_certificate(CertificateArn="arn:aws:acm:%s:%s:certificate/%s"
% (region, account_id, cert_id))
logging.debug(cert)
if cert['Certificate']['Status'] != 'PENDING_VALIDATION':
panic("Confirmation certificate is not pending validation!")
# It's the first and only form on the page
# Could we match on action="/approvals"?
br.select_form(nr=0)
logging.info("SUBMITTING CONFIRMATION FORM")
response = br.submit(name='commit')
logging.info("SUBMITTED CONFIRMATION FORM")
content = response.get_data()
match = approval_text.search(content)
if match:
print("Certificate for %s approved!" % domain)
else:
logging.error(content)
panic("No confirmation of certificate approval!")
|
nilq/baby-python
|
python
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014, Lars Asplund lars.anders.asplund@gmail.com
from vunit.color_printer import ColorPrinter
from xml.etree import ElementTree
from sys import version_info
class TestReport:
"""
Collect reports from running testcases
"""
def __init__(self, printer=ColorPrinter):
self._test_results = {}
self._test_names_in_order = []
self._printer = printer
def num_tests(self):
"""
Return the number of tests in the report
"""
return len(self._test_results)
def add_result(self, *args, **kwargs):
"""
Add a a test result
"""
result = TestResult(*args, **kwargs)
self._test_results[result.name] = result
self._test_names_in_order.append(result.name)
def _last_test_result(self):
return self._test_results[self._test_names_in_order[-1]]
def _test_results_in_order(self):
for name in self._test_names_in_order:
yield self.result_of(name)
def print_latest_status(self, total_tests):
result = self._last_test_result()
passed, failed, skipped = self._split()
if result.passed:
self._printer.write("pass", fg='gi')
elif result.failed:
self._printer.write("fail", fg='ri')
elif result.skipped:
self._printer.write("skip", fg='rgi')
else:
assert False
self._printer.write(" (P=%i F=%i T=%i) %s\n" %
(len(passed),
len(failed),
total_tests,
result.name))
def all_ok(self):
"""
Return true if all test passed
"""
return all(test_result.passed for test_result in self._test_results.values())
def has_test(self, test_name):
return test_name in self._test_results
def result_of(self, test_name):
return self._test_results[test_name]
def print_str(self):
"""
Print the report as a colored string
"""
passed, failures, skipped = self._split()
for test_result in passed + skipped + failures:
test_result.print_status(self._printer)
self._printer.write("\n")
n_failed = len(failures)
n_skipped = len(skipped)
n_passed = len(passed)
total = n_failed + n_passed + n_skipped
total_time = sum((result.time for result in self._test_results.values()))
self._printer.write("Total time %.1f seconds\n" % total_time)
self._printer.write("%i of %i passed\n" % (n_passed, total))
if n_skipped > 0:
self._printer.write("%i of %i skipped\n" % (n_skipped, total))
if n_failed > 0:
self._printer.write("%i of %i failed\n" % (n_failed, total))
self._printer.write("Some failed!\n", fg='ri')
else:
self._printer.write("All passed!\n", fg='gi')
def _split(self):
"""
Split the test cases into passed and failures
"""
failures = []
passed = []
skipped = []
for result in self._test_results_in_order():
if result.passed:
passed.append(result)
elif result.failed:
failures.append(result)
elif result.skipped:
skipped.append(result)
return passed, failures, skipped
def to_junit_xml_str(self):
"""
Convert test report to a junit xml string
"""
passed, failures, skipped = self._split()
root = ElementTree.Element("testsuite")
root.attrib["name"] = "testsuite"
root.attrib["errors"] = "0"
root.attrib["failures"] = str(len(failures))
root.attrib["skipped"] = str(len(skipped))
root.attrib["tests"] = str(len(self._test_results))
for result in self._test_results_in_order():
root.append(result.to_xml())
if version_info >= (3, 0):
# Python 3.x
xml = ElementTree.tostring(root, encoding="unicode")
else:
# Python 2.x
xml = ElementTree.tostring(root, encoding="utf-8")
return xml
class TestStatus:
def __init__(self, name):
self._name = name
def __eq__(self, other):
return (type(self) == type(other) and
self._name == other._name)
def __repr__(self):
return "TestStatus(%r)" % self._name
PASSED = TestStatus("passed")
SKIPPED = TestStatus("skipped")
FAILED = TestStatus("failed")
class TestResult:
"""
Represents the result of a single test case
"""
def __init__(self, name, status, time, output_file_name):
assert status in (PASSED,
FAILED,
SKIPPED)
self.name = name
self._status = status
self.time = time
self._output_file_name = output_file_name
@property
def output(self):
with open(self._output_file_name, "r") as fread:
return fread.read()
@property
def passed(self):
return self._status == PASSED
@property
def skipped(self):
return self._status == SKIPPED
@property
def failed(self):
return self._status == FAILED
def print_status(self, printer):
if self.passed:
printer.write("pass", fg='gi')
printer.write(" ")
elif self.failed:
printer.write("fail", fg='ri')
printer.write(" ")
elif self.skipped:
printer.write("skip", fg='rgi')
printer.write(" ")
printer.write("%s after %.1f seconds\n" % (self.name, self.time))
def to_xml(self):
test = ElementTree.Element("testcase")
test.attrib["name"] = self.name
test.attrib["time"] = "%.1f" % self.time
if self.failed:
failure = ElementTree.SubElement(test, "failure")
failure.attrib["message"] = "Failed"
elif self.skipped:
skipped = ElementTree.SubElement(test, "skipped")
skipped.attrib["message"] = "Skipped"
system_out = ElementTree.SubElement(test, "system-out")
system_out.text = self.output
return test
|
nilq/baby-python
|
python
|
# META: timeout=long
import pytest
from webdriver import Element
from tests.support.asserts import (
assert_element_has_focus,
assert_error,
assert_events_equal,
assert_in_events,
assert_success,
)
from tests.support.inline import inline
@pytest.fixture
def tracked_events():
return [
"blur",
"change",
"focus",
]
def element_clear(session, element):
return session.transport.send(
"POST", "/session/{session_id}/element/{element_id}/clear".format(
session_id=session.session_id,
element_id=element.id))
@pytest.fixture(scope="session")
def text_file(tmpdir_factory):
fh = tmpdir_factory.mktemp("tmp").join("hello.txt")
fh.write("hello")
return fh
def test_null_response_value(session):
session.url = inline("<input>")
element = session.find.css("input", all=False)
response = element_clear(session, element)
value = assert_success(response)
assert value is None
def test_no_browsing_context(session, closed_window):
element = Element("foo", session)
response = element_clear(session, element)
assert_error(response, "no such window")
def test_connected_element(session):
session.url = inline("<input>")
element = session.find.css("input", all=False)
session.url = inline("<input>")
response = element_clear(session, element)
assert_error(response, "stale element reference")
def test_pointer_interactable(session):
session.url = inline("<input style='margin-left: -1000px' value=foobar>")
element = session.find.css("input", all=False)
response = element_clear(session, element)
assert_error(response, "element not interactable")
def test_keyboard_interactable(session):
session.url = inline("""
<input value=foobar>
<div></div>
<style>
div {
position: absolute;
background: blue;
top: 0;
}
</style>
""")
element = session.find.css("input", all=False)
assert element.property("value") == "foobar"
response = element_clear(session, element)
assert_success(response)
assert element.property("value") == ""
@pytest.mark.parametrize("type,value,default",
[("number", "42", ""),
("range", "42", "50"),
("email", "foo@example.com", ""),
("password", "password", ""),
("search", "search", ""),
("tel", "999", ""),
("text", "text", ""),
("url", "https://example.com/", ""),
("color", "#ff0000", "#000000"),
("date", "2017-12-26", ""),
("datetime", "2017-12-26T19:48", ""),
("datetime-local", "2017-12-26T19:48", ""),
("time", "19:48", ""),
("month", "2017-11", ""),
("week", "2017-W52", "")])
def test_input(session, add_event_listeners, tracked_events, type, value, default):
session.url = inline("<input type=%s value='%s'>" % (type, value))
element = session.find.css("input", all=False)
add_event_listeners(element, tracked_events)
assert element.property("value") == value
response = element_clear(session, element)
assert_success(response)
assert element.property("value") == default
assert_in_events(session, ["focus", "change", "blur"])
assert_element_has_focus(session.execute_script("return document.body"))
@pytest.mark.parametrize("type",
["number",
"range",
"email",
"password",
"search",
"tel",
"text",
"url",
"color",
"date",
"datetime",
"datetime-local",
"time",
"month",
"week",
"file"])
def test_input_disabled(session, type):
session.url = inline("<input type=%s disabled>" % type)
element = session.find.css("input", all=False)
response = element_clear(session, element)
assert_error(response, "invalid element state")
@pytest.mark.parametrize("type",
["number",
"range",
"email",
"password",
"search",
"tel",
"text",
"url",
"color",
"date",
"datetime",
"datetime-local",
"time",
"month",
"week",
"file"])
def test_input_readonly(session, type):
session.url = inline("<input type=%s readonly>" % type)
element = session.find.css("input", all=False)
response = element_clear(session, element)
assert_error(response, "invalid element state")
def test_textarea(session, add_event_listeners, tracked_events):
session.url = inline("<textarea>foobar</textarea>")
element = session.find.css("textarea", all=False)
add_event_listeners(element, tracked_events)
assert element.property("value") == "foobar"
response = element_clear(session, element)
assert_success(response)
assert element.property("value") == ""
assert_in_events(session, ["focus", "change", "blur"])
def test_textarea_disabled(session):
session.url = inline("<textarea disabled></textarea>")
element = session.find.css("textarea", all=False)
response = element_clear(session, element)
assert_error(response, "invalid element state")
def test_textarea_readonly(session):
session.url = inline("<textarea readonly></textarea>")
element = session.find.css("textarea", all=False)
response = element_clear(session, element)
assert_error(response, "invalid element state")
def test_input_file(session, text_file):
session.url = inline("<input type=file>")
element = session.find.css("input", all=False)
element.send_keys(str(text_file))
response = element_clear(session, element)
assert_success(response)
assert element.property("value") == ""
def test_input_file_multiple(session, text_file):
session.url = inline("<input type=file multiple>")
element = session.find.css("input", all=False)
element.send_keys(str(text_file))
element.send_keys(str(text_file))
response = element_clear(session, element)
assert_success(response)
assert element.property("value") == ""
def test_select(session):
session.url = inline("""
<select>
<option>foo
</select>
""")
select = session.find.css("select", all=False)
option = session.find.css("option", all=False)
response = element_clear(session, select)
assert_error(response, "invalid element state")
response = element_clear(session, option)
assert_error(response, "invalid element state")
def test_button(session):
session.url = inline("<button></button>")
button = session.find.css("button", all=False)
response = element_clear(session, button)
assert_error(response, "invalid element state")
def test_button_with_subtree(session):
"""
Whilst an <input> is normally editable, the focusable area
where it is placed will default to the <button>. I.e. if you
try to click <input> to focus it, you will hit the <button>.
"""
session.url = inline("""
<button>
<input value=foobar>
</button>
""")
text_field = session.find.css("input", all=False)
response = element_clear(session, text_field)
assert_error(response, "element not interactable")
def test_contenteditable(session, add_event_listeners, tracked_events):
session.url = inline("<p contenteditable>foobar</p>")
element = session.find.css("p", all=False)
add_event_listeners(element, tracked_events)
assert element.property("innerHTML") == "foobar"
response = element_clear(session, element)
assert_success(response)
assert element.property("innerHTML") == ""
assert_events_equal(session, ["focus", "change", "blur"])
assert_element_has_focus(session.execute_script("return document.body"))
def test_designmode(session):
session.url = inline("foobar")
element = session.find.css("body", all=False)
assert element.property("innerHTML") == "foobar"
session.execute_script("document.designMode = 'on'")
response = element_clear(session, element)
assert_success(response)
assert element.property("innerHTML") == "<br>"
assert_element_has_focus(session.execute_script("return document.body"))
def test_resettable_element_focus_when_empty(session, add_event_listeners, tracked_events):
session.url = inline("<input>")
element = session.find.css("input", all=False)
add_event_listeners(element, tracked_events)
assert element.property("value") == ""
response = element_clear(session, element)
assert_success(response)
assert element.property("value") == ""
assert_events_equal(session, [])
@pytest.mark.parametrize("type,invalid_value",
[("number", "foo"),
("range", "foo"),
("email", "foo"),
("url", "foo"),
("color", "foo"),
("date", "foo"),
("datetime", "foo"),
("datetime-local", "foo"),
("time", "foo"),
("month", "foo"),
("week", "foo")])
def test_resettable_element_does_not_satisfy_validation_constraints(session, type, invalid_value):
"""
Some UAs allow invalid input to certain types of constrained
form controls. For example, Gecko allows non-valid characters
to be typed into <input type=number> but Chrome does not.
Since we want to test that Element Clear works for clearing the
invalid characters in these UAs, it is fine to skip this test
where UAs do not allow the element to not satisfy its constraints.
"""
session.url = inline("<input type=%s>" % type)
element = session.find.css("input", all=False)
def is_valid(element):
return session.execute_script("""
var input = arguments[0];
return input.validity.valid;
""", args=(element,))
# value property does not get updated if the input is invalid
element.send_keys(invalid_value)
# UA does not allow invalid input for this form control type
if is_valid(element):
return
response = element_clear(session, element)
assert_success(response)
assert is_valid(element)
@pytest.mark.parametrize("type",
["checkbox",
"radio",
"hidden",
"submit",
"button",
"image"])
def test_non_editable_inputs(session, type):
session.url = inline("<input type=%s>" % type)
element = session.find.css("input", all=False)
response = element_clear(session, element)
assert_error(response, "invalid element state")
def test_scroll_into_view(session):
session.url = inline("""
<input value=foobar>
<div style='height: 200vh; width: 5000vh'>
""")
element = session.find.css("input", all=False)
assert element.property("value") == "foobar"
assert session.execute_script("return window.pageYOffset") == 0
# scroll to the bottom right of the page
session.execute_script("""
var body = document.body;
window.scrollTo(body.scrollWidth, body.scrollHeight);
""")
# clear and scroll back to the top of the page
response = element_clear(session, element)
assert_success(response)
assert element.property("value") == ""
# check if element cleared is scrolled into view
rect = session.execute_script("""
var input = arguments[0];
var rect = input.getBoundingClientRect();
return {"top": rect.top,
"left": rect.left,
"height": rect.height,
"width": rect.width};
""", args=(element,))
window = session.execute_script("""
return {"innerHeight": window.innerHeight,
"innerWidth": window.innerWidth,
"pageXOffset": window.pageXOffset,
"pageYOffset": window.pageYOffset};
""")
assert rect["top"] < (window["innerHeight"] + window["pageYOffset"]) and \
rect["left"] < (window["innerWidth"] + window["pageXOffset"]) and \
(rect["top"] + element.rect["height"]) > window["pageYOffset"] and \
(rect["left"] + element.rect["width"]) > window["pageXOffset"]
|
nilq/baby-python
|
python
|
"""
References:
https://fdc.nal.usda.gov/api-guide.html#food-detail-endpoint
https://fdc.nal.usda.gov/portal-data/external/dataDictionary
"""
import datetime
from typing import List, Dict, Union
from datatrans import utils
from datatrans.fooddata.detail.base import IdMixin
from datatrans.fooddata.detail.nutrient import FoodNutrient, NutrientConversionFactor
from datatrans.fooddata.search.request import FoodDataType
from datatrans.utils.classes import JSONEnum as Enum
__all__ = ['FoodClass', 'FoodCategory', 'FoodCategoryInstance', 'FoodAttribute', 'FoodAttributeType',
'FoodAttributeTypeInstance', 'MeasureUnit', 'FoodPortion', 'BrandedFood', 'SrLegacyFood']
def parse_fooddata_date(date_str: str) -> datetime.date:
""" Wrapper specific for fooddata's format """
return utils.fooddata.parse_date(date_str, sep='/', format='MDY')
def parse_food_nutrients(data: List[Dict[str, Union[str, int, float]]]) -> List[FoodNutrient]:
return [FoodNutrient(_dict_=d) for d in data]
def parse_label_nutrients(data: Dict[str, Dict[str, float]]) -> List[Dict[str, float]]:
""" Change incoming data to be in list format. """
return [{k: v['value']} for k, v in data.items()]
def parse_nutrient_conversion_factors(data: List[Dict[str, Union[str, float]]]) -> List[NutrientConversionFactor]:
return [NutrientConversionFactor(_dict_=d) for d in data]
def parse_food_portions(data: List[Dict[str, Union[str, float, int]]]) -> List['FoodPortion']:
return [FoodPortion(_dict_=d) for d in data]
def parse_food_attributes(data: List[Dict[str, Union[int, str, dict]]]) -> List['FoodAttribute']:
return [FoodAttribute(_dict_=d) for d in data]
class FoodClass(Enum):
FOUNDATION = 'FinalFood'
SURVEY = 'Survey'
BRANDED = 'Branded'
LEGACY = 'FinalFood'
class FoodCategory(IdMixin, utils.DataClass):
"""Foods of defined similarity
Attributes:
id (int):
code (str): Food group code
description (str): Description of the food group
"""
__slots__ = ('id', 'code', 'description')
__attr__ = (
('id', int),
('code', str),
('description', str),
)
class FoodCategoryInstance(Enum):
DAIRY_AND_EGG_PRODUCTS = FoodCategory(_dict_={'id': 1, 'code': '0100', 'description': 'Dairy and Egg Products'})
SPICES_AND_HERBS = FoodCategory(_dict_={'id': 2, 'code': '0200', 'description': 'Spices and Herbs'})
BABY_FOODS = FoodCategory(_dict_={'id': 3, 'code': '0300', 'description': 'Baby Foods'})
FATS_AND_OILS = FoodCategory(_dict_={'id': 4, 'code': '0400', 'description': 'Fats and Oils'})
POULTRY_PRODUCTS = FoodCategory(_dict_={'id': 5, 'code': '0500', 'description': 'Poultry Products'})
SOUPS_SAUCES_AND_GRAVIES = FoodCategory(_dict_={'id': 6, 'code': '0600', 'description': 'Soups, Sauces, and Gravies'})
SAUSAGES_AND_LUNCHEON_MEATS = FoodCategory(_dict_={'id': 7, 'code': '0700', 'description': 'Sausages and Luncheon Meats'})
BREAKFAST_CEREALS = FoodCategory(_dict_={'id': 8, 'code': '0800', 'description': 'Breakfast Cereals'})
FRUITS_AND_FRUIT_JUICES = FoodCategory(_dict_={'id': 9, 'code': '0900', 'description': 'Fruits and Fruit Juices'})
PORK_PRODUCTS = FoodCategory(_dict_={'id': 10, 'code': '1000', 'description': 'Pork Products'})
VEGETABLES_AND_VEGETABLE_PRODUCTS = FoodCategory(_dict_={'id': 11, 'code': '1100', 'description': 'Vegetables and Vegetable Products'})
NUT_AND_SEED_PRODUCTS = FoodCategory(_dict_={'id': 12, 'code': '1200', 'description': 'Nut and Seed Products'})
BEEF_PRODUCTS = FoodCategory(_dict_={'id': 13, 'code': '1300', 'description': 'Beef Products'})
BEVERAGES = FoodCategory(_dict_={'id': 14, 'code': '1400', 'description': 'Beverages'})
FINFISH_AND_SHELLFISH_PRODUCTS = FoodCategory(_dict_={'id': 15, 'code': '1500', 'description': 'Finfish and Shellfish Products'})
LEGUMES_AND_LEGUME_PRODUCTS = FoodCategory(_dict_={'id': 16, 'code': '1600', 'description': 'Legumes and Legume Products'})
LAMB_VEAL_AND_GAME_PRODUCTS = FoodCategory(_dict_={'id': 17, 'code': '1700', 'description': 'Lamb, Veal, and Game Products'})
BAKED_PRODUCTS = FoodCategory(_dict_={'id': 18, 'code': '1800', 'description': 'Baked Products'})
SWEETS = FoodCategory(_dict_={'id': 19, 'code': '1900', 'description': 'Sweets'})
CEREAL_GRAINS_AND_PASTA = FoodCategory(_dict_={'id': 20, 'code': '2000', 'description': 'Cereal Grains and Pasta'})
FAST_FOODS = FoodCategory(_dict_={'id': 21, 'code': '2100', 'description': 'Fast Foods'})
MEALS_ENTREES_AND_SIDE_DISHES = FoodCategory(_dict_={'id': 22, 'code': '2200', 'description': 'Meals, Entrees, and Side Dishes'})
SNACKS = FoodCategory(_dict_={'id': 23, 'code': '2500', 'description': 'Snacks'})
AMERICAN_INDIAN_ALASKA_NATIVE_FOODS = FoodCategory(_dict_={'id': 24, 'code': '3500', 'description': 'American Indian/Alaska Native Foods'})
RESTAURANT_FOODS = FoodCategory(_dict_={'id': 25, 'code': '3600', 'description': 'Restaurant Foods'})
BRANDED_FOOD_PRODUCTS_DATABASE = FoodCategory(_dict_={'id': 26, 'code': '4500', 'description': 'Branded Food Products Database'})
QUALITY_CONTROL_MATERIALS = FoodCategory(_dict_={'id': 27, 'code': '2600', 'description': 'Quality Control Materials'})
ALCOHOLIC_BEVERAGES = FoodCategory(_dict_={'id': 28, 'code': '1410', 'description': 'Alcoholic Beverages'})
class FoodAttributeType(IdMixin, utils.DataClass):
"""The list of supported attributes associated with a food
Attributes:
id (int):
name (str): Name of the attribute associated with the food - should be displayable to users
description (str): Description of the attribute
"""
__attr__ = (
('id', int),
('name', str),
('description', str),
)
class FoodAttributeTypeInstance(Enum):
ATTRIBUTE = FoodAttributeType(_dict_={'id': 999, 'name': 'Attribute', 'description': 'Generic attributes'})
COMMON_NAME = FoodAttributeType(_dict_={'id': 1000, 'name': 'Common Name', 'description': 'Common names associated with a food.'})
ADDITIONAL_DESCRIPTION = FoodAttributeType(_dict_={'id': 1001, 'name': 'Additional Description', 'description': 'Additional descriptions for the food.'})
ADJUSTMENTS = FoodAttributeType(_dict_={'id': 1002, 'name': 'Adjustments', 'description': 'Adjustments made to foods, including moisture and fat changes.'})
class FoodAttribute(IdMixin, utils.DataClass):
"""The value for a generic property of a food
Attributes:
id (int):
fdc_id (int): ID of the food this food attribute pertains to
sequence_number (int): The order the attribute will be displayed on the released food.
food_attribute_type (FoodAttributeType): Type of food attribute to which this value is associated for a specific food
name (str): Name of food attribute
value: The actual value of the attribute
"""
__attr__ = (
('id', int),
('fdc_id', int),
('sequence_number', int),
('food_attribute_type', FoodAttributeType),
('name', str),
('value', str),
)
class MeasureUnit(IdMixin, utils.DataClass):
"""units for measuring quantities of foods
Attributes:
id (int):
name: name of the unit
abbreviation: abbreviated name of the unit
"""
__slots__ = ('id', 'name', 'abbreviation')
__attr__ = (
('id', int),
('name', str),
('abbreviation', str),
)
class FoodPortion(IdMixin, utils.DataClass):
"""Discrete amount of food
Attributes:
id (int):
fdc_id: ID of the food this food portion pertains to
seq_num: The order the measure will be displayed on the released food.
amount: The number of measure units that comprise the measure (e.g. if measure is 3 tsp, the amount is 3). Not defined for survey (FNDDS) foods (amount is instead embedded in portion description).
measure_unit: The unit used for the measure (e.g. if measure is 3 tsp, the unit is tsp). For food types that do not use measure SR legacy foods and survey (FNDDS) foods), a value of '9999' is assigned to this field.
portion_description: Foundation foods: Comments that provide more specificity on the measure. For example, for a pizza measure the dissemination text might be 1 slice is 1/8th of a 14 inch pizza"." Survey (FNDDS) foods: The household description of the portion.
modifier: Foundation foods: Qualifier of the measure (e.g. related to food shape or form) (e.g. melted, crushed, diced). Survey (FNDDS) foods: The portion code. SR legacy foods: description of measures, including the unit of measure and the measure modifier (e.g. waffle round (4" dia)).
gram_weight: The weight of the measure in grams
data_points: The number of observations on which the measure is based
footnote: Comments on any unusual aspects of the measure. These are released to the public. Examples might include caveats on the usage of a measure, or reasons why a measure gram weight is an unexpected value.
min_year_acquired: Minimum purchase year of all acquisitions used to derive the measure value
"""
__slots__ = ('id', 'measure_unit', 'modifier', 'gram_weight', 'data_points', 'amount', 'sequence_number')
__attr__ = (
('id', int),
('measure_unit', MeasureUnit),
('modifier', str),
('gram_weight', float),
('data_points', int),
('amount', float),
('sequence_number', int),
('portion_description', str), # Unit
('min_year_acquired', int),
)
class BrandedFood(utils.DataClass):
"""
Foods whose nutrient values are typically obtained from food label
data provided by food brand owners.
Attributes:
fdc_id (int): ID of the food in the food table
brand_owner: Brand owner for the food
gtin_upc: GTIN or UPC code identifying the food
ingredients: The list of ingredients (as it appears on the product label)
serving_size (float): The amount of the serving size when expressed as gram or ml
serving_size_unit: The unit used to express the serving size (gram or ml)
household_serving_fulltext: amount and unit of serving size when
expressed in household units
branded_food_category: The category of the branded food, assigned
by GDSN or Label Insight
data_source: The source of the data for this food. GDSN (for GS1)
or LI (for Label Insight).
modified_date (datetime.date): This date reflects when the product data was last
modified by the data provider, i.e., the manufacturer
available_date (datetime.date): This is the date when the product record was
available for inclusion in the database.
"""
__slots__ = ('fdc_id', 'brand_owner', 'gtin_upc', 'ingredients', 'serving_size', 'household_serving_full_text',
'branded_food_category', 'data_source', 'modified_date', 'available_date', 'food_class', 'description',
'food_nutrients', 'food_components', 'food_attributes', 'table_alias_name', 'serving_size_unit',
'label_nutrients', 'data_type', 'publication_date', 'food_portions', 'changes')
__attr__ = (
# Excel
('fdc_id', int),
('brand_owner', str),
('gtin_upc', str), # 11 digits of number (0-9)
('ingredients', str), # csv (with spaces)
('serving_size', float), # may be int
('household_serving_full_text', str), # cup
('branded_food_category', str),
('data_source', str), # "LI"
('modified_date', datetime.date,
parse_fooddata_date),
('available_date', datetime.date,
parse_fooddata_date),
# actual JSON
('food_class', FoodClass), # FoodClass.BRANDED
('description', str),
('food_nutrients', list,
parse_food_nutrients),
('food_components', list),
('food_attributes', list),
('table_alias_name', str), # "branded_food"
('serving_size_unit', str), # lowercase g
('label_nutrients', list, # type: List[Dict[str, float]]
parse_label_nutrients),
('data_type', FoodDataType),
('publication_date', datetime.date,
parse_fooddata_date),
('food_portions', list, # type: List[FoodPortion]
parse_food_portions),
('changes', str),
)
class Food(utils.DataClass):
"""Any substance consumed by humans for nutrition, taste and/or aroma.
Attributes:
fdc_id (int): Unique permanent identifier of the food
food_class (FoodClass): For internal use only
data_type (FoodDataType): Type of food data
(see Files tab for possible values).
description (str): Description of the food
food_category_id: Id of the food category the food belongs to
publication_date (datetime.date): Date when the food was published to FoodData Central
scientific_name (datetime.date): The scientific name for the food
food_key: A string of characters used to identify both the
current and all historical records for a specific food.
"""
__slots__ = (
'fdc_id', 'description', 'data_type', 'published_date', 'all_highlight_fields', 'score', 'food_code',
'gtin_upc',
'brand_owner', 'additional_descriptions')
__attr__ = (
('fdc_id', int),
('food_class', FoodClass),
('data_type', FoodDataType),
('description', str),
('food_category_id', str),
('publication_date', datetime.date,
parse_fooddata_date),
('scientific_name', str),
('food_key', str),
)
class FoundationFood(utils.DataClass):
"""
Foods whose nutrient and food component values are derived
primarily by chemical analysis. Foundation data also include
extensive underlying metadata, such as the number of samples,
the location and dates on which samples were obtained, analytical
approaches used, and if appropriate, cultivar, genotype, and
production practices.
Attributes:
fdc_id (int): ID of the food in the food table
NDB_number: Unique number assigned for the food, different from
fdc_id, assigned in SR
footnote (str): Comments on any unusual aspects. These are
released to the public Examples might include unusual
aspects of the food overall.
"""
__attr__ = (
('fdc_id', int),
('NDB_number', str), # temp
('footnote', str),
# actual JSON
('food_class', FoodClass),
('food_nutrients', list, # type: List[FoodNutrient]
parse_food_nutrients),
('description', str),
('food_components', list),
('food_attributes', list, # type: List[FoodAttribute]
parse_food_attributes),
('table_alias_name', str),
('nutrient_conversion_factors', list, # type: List[NutrientConversionFactor]
parse_nutrient_conversion_factors),
('is_historical_reference', bool),
('ndb_number', str),
('publication_date', datetime.date,
parse_fooddata_date),
('food_category', FoodCategory),
('food_portions', list, # type: List[FoodPortion]
parse_food_portions),
('data_type', FoodDataType),
('input_foods', list),
('changes', str),
)
class SrLegacyFood(utils.DataClass):
"""
Foods from the April 2018 release of the USDA National Nutrient
Database for Standard Reference. Nutrient and food component values
are derived from chemical analysis and calculation.
Attributes:
fdc_id (int): ID of the food in the food table
NDB_number: Unique number assigned for the food, different from
fdc_id, assigned in SR
"""
__slots__ = (
'ndb_number', 'fdc_id', 'food_class', 'description', 'food_nutrients', 'food_components', 'food_attributes',
'table_alias_name', 'nutrient_conversion_factors', 'is_historical_reference', 'data_type', 'data_type',
'food_category', 'food_portions', 'input_foods', 'publication_date', 'changes')
__attr__ = (
# Excel
('ndb_number', str),
('fdc_id', int),
# actual JSON
('food_class', FoodClass),
('description', str),
('food_nutrients', list, # type: List[FoodNutrient]
parse_food_nutrients),
('food_components', list),
('scientific_name', str),
('food_attributes', list, # type: List[FoodAttribute]
parse_food_attributes),
('table_alias_name', str),
('nutrient_conversion_factors', list, # type: List[NutrientConversionFactor]
parse_nutrient_conversion_factors),
('is_historical_reference', bool),
('data_type', FoodDataType),
('food_category', FoodCategory),
('food_portions', list, # type: List[FoodPortion]
parse_food_portions),
('input_foods', list),
('publication_date', datetime.date,
parse_fooddata_date),
('changes', str),
('footnote', str),
)
def __init__(self, _dict_: dict = None, **kwargs):
super().__init__(_dict_, **kwargs)
if self.food_class is not FoodClass.LEGACY:
raise ValueError('invalid value for \'{}\': \'{}\' \'{}\''
.format(self.__class__.__name__, 'food_class', self.food_class))
if self.data_type is not FoodDataType.LEGACY:
raise ValueError('invalid value for \'{}\': \'{}\' \'{}\''
.format(self.__class__.__name__, 'data_type', self.data_type))
if self.table_alias_name != 'sr_legacy_food':
raise ValueError('invalid value for \'{}\': \'{}\' \'{}\''
.format(self.__class__.__name__, 'table_alias_name', self.table_alias_name))
@property
def common_names(self):
""" Returns the common name if any, else None """
for attr in self.food_attributes:
if attr.food_attribute_type == FoodAttributeTypeInstance.COMMON_NAME.value:
return attr.value
class SurveyFnddsFood(utils.DataClass):
"""
Foods whose consumption is measured by the What We Eat In America
dietary survey component of the National Health and Nutrition
Examination Survey (NHANES). Survey nutrient values are usually
calculated from Branded and SR Legacy data.
Attributes:
fdc_id (int): ID of the food in the food table
food_code (str): A unique ID identifying the food within FNDDS
wweia_category_code: Unique Identification code for WWEIA food category to which this food is assigned
start_date (datetime.date): Start date indicates time period corresponding to WWEIA data
end_date (datetime.date): End date indicates time period corresponding to WWEIA data
"""
__attr__ = (
('fdc_id', int),
('food_code', str),
('start_date', datetime.date,
parse_fooddata_date),
('end_date', datetime.date,
parse_fooddata_date),
('food_class', FoodClass),
('description', str),
('food_nutrients', list, # type: List[FoodNutrient]
parse_food_nutrients),
('food_components', list),
('scientific_name', str),
('food_attributes', list, # type: List[FoodAttribute]
parse_food_attributes),
('table_alias_name', str),
('wweia_category', str), # need further process
('wweia_food_category', str), # need further process
('data_type', FoodDataType),
('publication_date', datetime.date,
parse_fooddata_date),
('food_portions', list, # type: List[FoodPortion]
parse_food_portions),
('input_foods', list),
('changes', str),
)
|
nilq/baby-python
|
python
|
import sys
sys.modules['pkg_resources'] = None
import pygments.styles
import prompt_toolkit
|
nilq/baby-python
|
python
|
# --
# File: aelladata/aelladata_consts.py
#
# Copyright (c) Aella Data Inc, 2018
#
# This unpublished material is proprietary to Aella Data.
# All rights reserved. The methods and
# techniques described herein are considered trade secrets
# and/or confidential. Reproduction or distribution, in whole
# or in part, is forbidden except by express written permission
# of Aella Data.
#
# --
AELLADATA_JSON_DEVICE_URL = "url"
AELLADATA_JSON_QUERY = "query"
AELLADATA_JSON_INDEX = "index"
AELLADATA_JSON_TYPE = "type"
AELLADATA_JSON_ROUTING = "routing"
AELLADATA_JSON_TOTAL_HITS = "total_hits"
AELLADATA_JSON_TIMED_OUT = "timed_out"
AELLADATA_ERR_CONNECTIVITY_TEST = "Connectivity test failed"
AELLADATA_SUCC_CONNECTIVITY_TEST = "Connectivity test passed"
AELLADATA_ERR_SERVER_CONNECTION = "Connection failed"
AELLADATA_ERR_FROM_SERVER = "API failed, Status code: {status}, Detail: {detail}"
AELLADATA_MSG_CLUSTER_HEALTH = "Querying cluster health to check connectivity"
AELLADATA_ERR_API_UNSUPPORTED_METHOD = "Unsupported method"
AELLADATA_USING_BASE_URL = "Using url: {base_url}"
AELLADATA_ERR_JSON_PARSE = "Unable to parse reply as a Json, raw string reply: '{raw_text}'"
|
nilq/baby-python
|
python
|
from typing import Dict, List
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from rest_framework.fields import empty
from rest_framework.reverse import reverse
from rest_framework_nested import serializers as nested_serializers
from api.helpers import uuid_helpers
from bettersocial.models import *
class AuthorSerializer(serializers.ModelSerializer):
type = models.CharField(max_length = 32)
id = serializers.HyperlinkedIdentityField(
view_name = 'api:author-detail',
)
url = serializers.Field(default = None)
host = serializers.SerializerMethodField(
method_name = 'get_host'
)
displayName = serializers.SerializerMethodField(
method_name = 'get_name'
)
github = serializers.SerializerMethodField(
method_name = 'get_github'
)
profileImage = serializers.Field(default = None)
# Not required, but for convenience
posts = serializers.HyperlinkedIdentityField(
view_name = 'api:post-list',
lookup_url_kwarg = 'author_pk',
)
def get_github(self, instance: Author):
return instance.github_url if instance.github_url else ""
def get_host(self, instance: Author):
return reverse('api:api-root', request = self.context['request'])
def get_name(self, instance: Author):
# TODO: 2021-10-25 simplify
return instance.display_name
def to_representation(self, instance):
json = super().to_representation(instance)
json['id'] = uuid_helpers.remove_uuid_dashes(json['id'])
json['url'] = json['id']
return json
class Meta:
model = Author
fields = [
'type',
'id',
'url',
'host',
'displayName',
'github',
'profileImage',
'posts'
]
extra_kwargs = {
'github': {
'source': 'github_url'
}
}
class FollowerSerializer(serializers.ModelSerializer):
class Meta:
model = Follower
fields = '__all__'
class CommentSerializer(serializers.ModelSerializer):
type = models.CharField(max_length = 32)
id = nested_serializers.NestedHyperlinkedIdentityField(
view_name = 'api:comment-detail',
parent_lookup_kwargs = {
'post_pk': 'post__pk',
'author_pk': 'post__author__pk',
}
)
author = serializers.SerializerMethodField(
method_name = 'get_author'
)
def get_author(self, instance: Comment):
# TODO: 2021-11-22 refactor for remote authors
return AuthorSerializer(instance = instance.author_local, context = self.context, read_only = True).data
def to_representation(self, instance):
json = super().to_representation(instance)
json['id'] = uuid_helpers.remove_uuid_dashes(json['id'])
return json
class Meta:
model = Comment
fields = [
'type',
'author',
'comment',
'contentType',
'published',
'id',
]
extra_kwargs = {
'contentType': {
'source': 'content_type',
}
}
class PostSerializer(serializers.ModelSerializer):
id = nested_serializers.NestedHyperlinkedIdentityField(
view_name = 'api:post-detail',
parent_lookup_kwargs = { 'author_pk': 'author__pk' }
)
type = models.CharField(max_length = 32)
count = serializers.IntegerField(
source = 'comments.count',
read_only = True,
)
comments = nested_serializers.NestedHyperlinkedIdentityField(
view_name = 'api:comment-list',
read_only = True,
lookup_url_kwarg = 'post_pk',
parent_lookup_kwargs = {
'author_pk': 'author__pk',
}
)
author = AuthorSerializer(read_only = True)
commentsSrc = serializers.SerializerMethodField(
method_name = 'get_comments'
)
def get_comments(self, instance: Post):
# Gotta hardcode this stuff because there's no way to get the "list representation" without a circular import
return {
'type': 'comments',
'page': 1,
'size': 5,
'post': None, # Both to be filled in to_representation because we can't reference an existing field here, apparently.
'id': None,
'comments': CommentSerializer(instance.comments.order_by('-published')[:5], context = self.context, many = True).data,
}
published = serializers.DateTimeField(format = 'iso-8601')
def to_representation(self, instance):
json = super().to_representation(instance)
json['id'] = uuid_helpers.remove_uuid_dashes(json['id'])
json['comments'] = uuid_helpers.remove_uuid_dashes(json['comments'])
# Set defaults for source and origin, if they don't exist. This shouldn't really happen but just in case
if json['source'] is None:
json['source'] = json['id']
if json['origin'] is None:
json['origin'] = json['id']
# Fill in repeated data because that's the spec
json['commentsSrc']['post'] = json['id']
json['commentsSrc']['id'] = json['comments']
return json
class Meta:
model = Post
fields = [
'type',
'title',
'id',
'source',
'origin',
'description',
'contentType',
'content',
'author',
'categories',
'count',
'comments',
'commentsSrc',
'published',
'visibility',
'unlisted'
]
extra_kwargs = {
'contentType': {
'source': 'content_type',
}
}
class BaseLikeSerializer(serializers.ModelSerializer):
summary = serializers.SerializerMethodField(
method_name = 'get_summary'
)
type = models.CharField(max_length = 32)
author = serializers.SerializerMethodField(
method_name = 'get_author'
)
# TODO: 2021-10-25 refactor later for remote authors
@staticmethod
def helper_get_author(instance: Like) -> Author:
return instance.author_local
def get_summary(self, instance: Like):
return f'{self.helper_get_author(instance).display_name} Likes your post'
def get_author(self, instance: Like):
return AuthorSerializer(self.helper_get_author(instance), context = self.context).data
def to_representation(self, instance):
json = super().to_representation(instance)
json['@context'] = 'https://www.w3.org/ns/activitystreams'
json.move_to_end('@context', last = False)
return json
class Meta:
model = Like
fields = [
# '@context',
'summary',
'type',
'author',
# 'object',
]
class PostLikeSerializer(BaseLikeSerializer):
pass
class CommentLikeSerializer(BaseLikeSerializer):
pass
class InboxItemSerializer(serializers.ModelSerializer):
"""The ModelSerializer part of this is only for the GET part -- POST is kind of custom"""
def __init__(self, instance = None, data = empty, **kwargs):
super().__init__(instance, data, **kwargs)
self.types = {
'post': {
'model': Post,
'validator': self._validate_post
},
'comment': {
'model': Comment,
'validator': self._validate_comment
},
'like': {
'model': Like,
'validator': self._validate_like
},
'follow': {
'model': Follower,
'validator': self._validate_follower
},
}
def create(self, validated_data):
# We have to access the raw request since DRF blows any fields that are not part of the Model
data: Dict = self.context['request'].data
inbox_item = InboxItem.objects.create(
author_id = self.context['author_id'],
dj_content_type = DjangoContentType.objects.get_for_model(model = self.types[data['type']]['model']),
inbox_object = data
)
return inbox_item
def validate(self, attrs):
# We have to access the raw request since DRF blows any fields that are not part of the Model
data: Dict = self.context['request'].data
# Make sure that type is there
self._validate_required(data, ['type'])
# Fix type field
data['type'] = data['type'].strip().lower()
if data['type'] not in self.types.keys():
raise ValidationError({ 'type': f'type must be one of {{{", ".join(self.types.keys())}}}!' })
# Access the validator for that type and call it. It might change request.data somehow
self.types[data['type']]['validator'](data)
# return attrs, not data, to make DRF happy
return attrs
def _validate_required(self, data: Dict, required_fields: List):
for field in required_fields:
if field not in data:
raise ValidationError({ field: 'This field is required!' })
def _validate_post(self, data: Dict):
# Don't really care about the other fields
self._validate_required(data, [
'title',
'id',
'description',
'contentType',
'content',
'author',
'visibility'
])
return data
def _validate_comment(self, data: Dict):
return data
def _validate_like(self, data: Dict):
return data
def _validate_follower(self, data: Dict):
# Don't really care about the other fields
self._validate_required(data, [
'actor',
'object',
])
if not isinstance(data['actor'], dict):
raise ValidationError({ 'actor': 'This field must be an object containing an author!' })
# Supposedly our author
if not isinstance(data['object'], dict):
raise ValidationError({ 'object': 'This field must be an object containing an author!' })
self._validate_required(data['actor'], [
'type',
'id',
'host'
])
# Supposedly our author
self._validate_required(data['object'], [
'type',
'id',
'host'
])
author_uuid = uuid_helpers.extract_author_uuid_from_id(data['object']['id'])
if author_uuid is None:
raise ValidationError({ 'object': 'The author\'s `id` field must have a valid author UUID!' })
# Make sure the target is our author
if UUID(self.context['author_id']) != author_uuid:
raise ValidationError({ 'object': 'The author\'s `id` field must match the author you\'re sending it to!' })
return data
def to_representation(self, instance):
json = super().to_representation(instance)
# Flatten representation
json = json['inbox_object']
return json
class Meta:
model = InboxItem
fields = ['inbox_object']
class RemotePostSerializer(serializers.Serializer):
post = models.JSONField()
author_uuids = models.JSONField()
def update(self, instance, validated_data):
pass
def create(self, validated_data):
pass
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-07 07:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0002_auto_20160607_1133'),
]
operations = [
migrations.AddField(
model_name='regexpchecker',
name='flag_ignore_case',
field=models.BooleanField(default=False, help_text="Python's re.IGNORECASE (re.I)"),
),
]
|
nilq/baby-python
|
python
|
from os import path
import numpy as np
import torch
import torch.utils.data as td
import torchvision.datasets as dsets
import torchvision.transforms as tt
import vsrl_utils as vu
from faster_rcnn.datasets.factory import get_imdb
import faster_rcnn.roi_data_layer.roidb as rdl_roidb
import faster_rcnn.fast_rcnn.config as cf
import model as md
"""
Due to different conventions used by 3rd party software :(, you need the
following:
You need a VCOCO_ROOT directory that has images stored like this:
$VCOCO_ROOT/coco/images/train2014/
$VCOCO_ROOT/coco/images/val2014/
You also need a COCO_ROOT directory that has images stored like this:
$COCO_ROOT/images/
"""
HOME = path.expanduser("~")
VCOCO_ROOT = path.join(HOME, "data/v-coco/")
COCO_ROOT = path.join(HOME, "data/coco/")
class PathManager(object):
def __init__(self, coco_root=COCO_ROOT, vcoco_root=VCOCO_ROOT):
self.coco_root = coco_root
self.vcoco_root = vcoco_root
@property
def coco_imgs(self):
return path.join(self.coco_root, "images/")
@property
def coco_vcoco_ann(self):
return path.join(
self.vcoco_root, "data/instances_vcoco_all_2014.json")
defaultpm = PathManager()
def get_vsrl_labels(vcoco_set):
return path.join(VCOCO_ROOT, "data/vcoco/%s.json" % vcoco_set)
def get_ids(vcoco_set):
return path.join(VCOCO_ROOT, "data/splits/%s.ids" % vcoco_set)
def get_imgid_2_vcoco_labels(vcoco_all, coco):
"""
Get a dict from annotation id to vcoco image labels.
"""
ret = {}
for verb_dict in vcoco_all:
verb_dict = vu.attach_gt_boxes(verb_dict, coco)
action_name = verb_dict["action_name"]
for i in xrange(len(verb_dict["image_id"])):
img_id = verb_dict["image_id"][i][0]
if img_id not in ret:
ret[img_id] = {
"image_id": img_id,
#"image_path": coco.loadImgs([img_id])[0]["filename"],
"verbs": {},
}
# Don't overwrite verb_dict while iterating.
ret[img_id]["verbs"][action_name] = \
{
"role_object_id": verb_dict["role_object_id"][i],
"role_name": verb_dict["role_name"],
"label": verb_dict["label"][i],
"role_bbox": verb_dict["role_bbox"][i],
"include": verb_dict["include"],
"bbox": verb_dict["bbox"][i],
}
return ret
def role_is_not_agent(agentrole_list):
return "agent" != x[1]
def split_action_role(agentrole):
return agentrole.split("-")
class VCocoTranslator(object):
def __init__(self, vcoco_all, categories):
self.ids_2_actions = sorted([x['action_name'] for x in vcoco_all])
self.actions_2_ids = {
name: i for i, name in enumerate(self.ids_2_actions)}
# We expect that "categories" already contains a background category
self.ids_2_nouns = categories
self.nouns_2_ids = {x: i for i, x in enumerate(self.ids_2_nouns)}
#classes = ['__background__'] + sorted([x['
#self.ids_2_classes = sortedo
# this is a mapping that combines verb with role for localization
# tasks.
actionroles = []
actionroles_nonagent = []
self.action_roles_lookup = {}
for verb in vcoco_all:
roles = verb["role_name"]
self.action_roles_lookup[verb["action_name"]] = roles
for role in roles:
actionrole_name = "%s-%s" % (verb["action_name"], role)
actionroles.append(actionrole_name)
if role != "agent":
actionroles_nonagent.append(actionrole_name)
self.ids_2_actionroles = sorted(actionroles)
self.ids_2_actionrolesnonagent = sorted(actionroles_nonagent)
self.actionroles_2_ids = {
x: i for i, x in enumerate(self.ids_2_actionroles)}
self.actionrolesnonagent_2_ids = {
x: i for i, x in enumerate(self.ids_2_actionrolesnonagent)}
@property
def num_actions(self):
return len(self.ids_2_actions)
@property
def num_action_roles(self):
return len(self.ids_2_actionroles)
@property
def num_action_nonagent_roles(self):
return len(self.ids_2_actionrolesnonagent)
def get_action_labels(self, vcoco_labels):
"""
Get numeric labels for v-coco action classes
vcoco_labels: a dict like: {"verbs": {"verb_name": {"label": 0 or 1}}}
"""
ret = np.empty(self.num_actions)
for verb_name, labels in vcoco_labels["verbs"].iteritems():
ret[self.actions_2_ids[verb_name]] = labels["label"]
return ret
def get_action_nonagent_role_locations(self, vcoco_labels):
"""
Get a np.ndarray with size [1 x NActionRolesNonagent x 5]
(The first index of the bounding box is the label)
"""
ret = np.empty([1, self.num_action_nonagent_roles, 5], dtype=np.float)
for index, actionrole in enumerate(self.ids_2_actionrolesnonagent):
action, role = actionrole.split("-")
position = vcoco_labels["verbs"][action]["role_name"].index(role)
ret[0,index,1:] = self.get_nth_role_bbox(
vcoco_labels["verbs"][action]["role_bbox"], position)
ret[0,index,0] = vcoco_labels["verbs"][action]["label"] * 1.
return ret
def action_role_iter(self):
return it.ifilter(role_is_not_agent, it.imap(split_action_role, a))
def get_nth_role_bbox(self, numpy_data, index):
return numpy_data[(4*index):(4*(index + 1))]
def get_human_object_gt_pairs(self, vcoco_labels):
"""
TODO should a human-object pair only be trained for the single action
on which its label appears?
NBoxes will be the number of positive instances where a g.t. object and
human have a positive label.
Returns a tuple:
tup[0] - a [NBoxes x 4] numpy.ndarray of human boxes
tup[1] - a [NBoxes x 4] numpy.ndarray of object boxes
tup[2] - a [NBoxes x NActionNonagentRoles] numpy.ndarray of gt labels
It also ignores boxes in vcoco_labels that don't have a dimensions.
"""
tup0 = []
tup1 = []
tup2 = []
for index, actionrole in enumerate(self.ids_2_actionrolesnonagent):
action, role = actionrole.split("-")
if vcoco_labels["verbs"][action]["label"]:
# This h_position quantity is always 0, AFAIK. Since agents are
# always listed first.
h_position = vcoco_labels["verbs"][action]["role_name"].index(
"agent")
o_position = vcoco_labels["verbs"][action]["role_name"].index(
role)
role_bbox = vcoco_labels["verbs"][action]["role_bbox"]
if np.any(np.isnan(self.get_nth_role_bbox(
role_bbox, o_position))):
continue
tup0.append(self.get_nth_role_bbox(role_bbox, h_position))
tup1.append(self.get_nth_role_bbox(role_bbox, o_position))
gt_labels = np.zeros(self.num_action_nonagent_roles)
gt_labels[index] = 1.
tup2.append(gt_labels)
if len(tup0) == 0:
return None, None, None
return map(np.vstack, [tup0, tup1, tup2])
def human_scores_to_agentrolenonagent(self, h_scores):
# Make something that is [NxNActions] into something that puts those
# action scores only in locations corresponding to action-nonagent
# prediction slots.
ret = np.empty([h_scores.shape[0], self.num_action_nonagent_roles])
for index, action in enumerate(self.ids_2_actions, start=0):
roles = self.action_roles_lookup[action]
for role in roles:
if role == "agent":
continue
actionrole = "%s-%s" % (action, role)
ret_ind = self.actionrolesnonagent_2_ids[actionrole]
ret[:, ret_ind] = h_scores[:, index]
return ret
# TODO possibly the only thing that needs COCO_ROOT is this? consider removing
class VCocoBoxes(dsets.coco.CocoDetection):
"""
Subclass of CocoDetection dataset offered by pytorch's torchvision library
https://github.com/pytorch/vision/blob/master/torchvision/datasets/coco.py
"""
def __init__(
self, vcoco_set, coco_root, transform=None, coco_transform=None,
combined_transform=None):
# Don't call the superconstructor (we don't have an annFile)
pm = PathManager(coco_root=coco_root)
self.root = pm.coco_imgs
self.coco = vu.load_coco()
self.vcoco_all = vu.load_vcoco(vcoco_set)
# If we don't convert to int, COCO library index lookup fails :(
self.ids = [int(x) for x in self.vcoco_all[0]["image_id"].ravel()]
self.transform = transform
self.target_transform = coco_transform
self.combined_transform = combined_transform
# Get per-image vcoco labels, indexed by image id.
self.imgid_2_vcoco = get_imgid_2_vcoco_labels(self.vcoco_all, self.coco)
def __getitem__(self, index):
img_id = self.ids[index]
vcoco_ann = self.imgid_2_vcoco[img_id]
img, coco_ann = super(VCocoBoxes, self).__getitem__(index)
target = (coco_ann, vcoco_ann)
if self.combined_transform is not None:
target = self.combined_transform(target)
return (img, target)
class RoiVCocoBoxes(VCocoBoxes):
"""
Subclass of CocoDetection dataset offered by pytorch's torchvision library
https://github.com/pytorch/vision/blob/master/torchvision/datasets/coco.py
"""
def __init__(
self, vcoco_set, coco_root, vcoco_root):
super(RoiVCocoBoxes, self).__init__(vcoco_set, coco_root)
# TODO this sets a global config, which I prefer not to do. But the
# faster_rcnn code depends on it.
cf.cfg_from_list(["DATA_DIR", vcoco_root])
if vcoco_set == "vcoco_train":
coco_split = "train"
elif vcoco_set == "vcoco_val":
coco_split = "val"
else:
raise ValueError("Invalid vcoco_set '%s'" % vcoco_set)
imdb_name = "coco_2014_" + coco_split
self._imdb = get_imdb(imdb_name)
rdl_roidb.prepare_roidb(self._imdb)
self._roidb = self._imdb.roidb
self.cocoimgid_2_roidbindex = {
index: i for i, index in enumerate(self._imdb._image_index)}
def __getitem__(self, index):
img_id = self.ids[index]
vcoco_ann = self.imgid_2_vcoco[img_id]
roidb_entry = self._roidb[self.cocoimgid_2_roidbindex[img_id]]
return (roidb_entry, vcoco_ann)
def get_classes(self):
return self._imdb._classes
def targ_trans(target):
return torch.Tensor(target[1]["verbs"]["throw"]["label"])
# TODO delete this.
def get_loader(vcoco_set, coco_dir):
transforms = tt.Compose([
tt.Scale(md.IMSIZE),
tt.ToTensor(),
])
dataset = VCocoBoxes(
vcoco_set, coco_dir, transform=transforms,
combined_transform=targ_trans)
return td.DataLoader(dataset, batch_size=16, shuffle=True, num_workers=4)
def get_label_loader(vcoco_set, coco_dir, test=False):
# Similar to get_loader(), but gives a loader that gives all the full labels
transforms = tt.Compose([
tt.Scale(md.IMSIZE),
tt.ToTensor(),
])
if not test:
cls = VCocoBoxes
else:
cls = FakeVCocoBoxes
dataset = cls(vcoco_set, coco_dir, transform=transforms)
return td.DataLoader(dataset, batch_size=16, shuffle=True, num_workers=4)
class FakeVCocoBoxes(VCocoBoxes):
def __len__(self):
return 40
class FakeDatasetLoader(object):
def __init__(self, data):
self.data = data
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def make_vcoco_test_loader():
loader = get_label_loader("vcoco_train", defaultpm.coco_imgs)
outloc = "data/test_data.th"
make_test_loader(loader, outloc)
def get_test_dataset(loader):
items = []
for i, data in enumerate(loader):
items.append(data)
if i > 0:
break
dataset = FakeDatasetLoader(items)
return dataset
def make_test_loader(loader, outloc):
dataset = get_test_dataset(loader)
torch.save(dataset, outloc)
def get_test_loader():
dataset = torch.load("data/test_data.th")
return dataset
def make_roi_test_loader():
loader = RoiVCocoBoxes(
"vcoco_train", defaultpm.coco_root, defaultpm.vcoco_root)
outloc = "data/test_roi_data.th"
dataset = get_test_dataset(loader)
torch.save((dataset, loader._imdb._classes), outloc)
def get_roi_test_loader():
dataset, classes = torch.load("data/test_roi_data.th")
return dataset, classes
|
nilq/baby-python
|
python
|
#Client name: Laura Atkins
#Programmer name: Griffin Cosgrove
#PA purpose: Program to determine net pay check of employees and provide new access codes.
#My submission of this program indicates that I have neither received nor given unauthorized assistance in writing this program
#Creating the prompts for the user to input their information
s1=input("Enter employee name (first last):")
no1=eval(input("Enter number of hours worked this last week:"))
no2=eval(input("Enter hourly pay rate:"))
s2=input("Enter Single or married (s or m):")
no3=eval(input("Enter state tax witholding rate (as a decimal):"))
#formula for gross pay
pay=no1*no2
#formula for state witholding deduction.
no4=no3*pay
#conditional for applying the single or married tax rate.
if s2=="s":
witholding=.15*pay
if s2=="m":
witholding=.1*pay
#formula for total deductions
no5=witholding+no4
#Creating the access code
y=len(s1)//3
#displaying the information the user entered and calculating and gross pay and deductions.
#displaying the access code
print(("\nEmployee Name:"),s1)
print(("\nHours Worked:"), no1)
print(("Pay Rate"),no2)
print("Gross Pay: ",format(pay,',.2f'),sep='$')
print("Deductions:")
print("\tFederal Witholding: ",format(witholding,',.2f'),sep='$')
print("\tState Witholding: ",format(no4,',.2f'),sep='$')
print("\tTotal Deduction: ",format(no5,',.2f'),sep='$')
print(("\nYour new access code is:"),s1[y:7],s1.upper()[-3:],min(s1),max(s1))
|
nilq/baby-python
|
python
|
"""VBR ContainerType routes"""
from typing import Dict
from fastapi import APIRouter, Body, Depends, HTTPException
from vbr.api import VBR_Api
from vbr.utils.barcode import generate_barcode_string, sanitize_identifier_string
from ..dependencies import *
from .models import ContainerType, CreateContainerType, GenericResponse, transform
from .utils import parameters_to_query
router = APIRouter(
prefix="/container_types",
tags=["container_types"],
responses={404: {"description": "Not found"}},
route_class=LoggingRoute,
)
@router.get(
"/", dependencies=[Depends(vbr_read_public)], response_model=List[ContainerType]
)
def list_container_types(
# See views/container_types_public.sql for possible filter names
container_type_id: Optional[str] = None,
name: Optional[str] = None,
client: VBR_Api = Depends(vbr_admin_client),
common=Depends(limit_offset),
):
"""List ContainerTypes.
Refine results using filter parameters.
Requires: **VBR_READ_PUBLIC**"""
query = parameters_to_query(container_type_id=container_type_id, name=name)
rows = [
transform(c)
for c in client.vbr_client.query_view_rows(
view_name="container_types_public",
query=query,
limit=common["limit"],
offset=common["offset"],
)
]
return rows
@router.get(
"/{container_type_id}",
dependencies=[Depends(vbr_read_public)],
response_model=ContainerType,
)
def get_container_type_by_id(
container_type_id: str,
client: VBR_Api = Depends(vbr_admin_client),
):
"""Get a ContainerType by ID.
Requires: **VBR_READ_PUBLIC**"""
query = {"container_type_id": {"operator": "eq", "value": container_type_id}}
row = transform(
client.vbr_client.query_view_rows(
view_name="container_types_public", query=query, limit=1, offset=0
)[0]
)
return row
# POST /
@router.post(
"/", dependencies=[Depends(vbr_write_public)], response_model=ContainerType
)
def create_container_type(
body: CreateContainerType = Body(...),
client: Tapis = Depends(vbr_admin_client),
):
"""Create a new ContainerType.
Requires: **VBR_WRITE_PUBLIC**
"""
try:
container_type = client.create_container_type(
name=body.name, description=body.description
)
except Exception as exc:
raise HTTPException(500, "Failed to create new container type: {0}".format(exc))
query = {"container_type_id": {"operator": "eq", "value": container_type.local_id}}
row = transform(
client.vbr_client.query_view_rows(
view_name="container_types_public", query=query, limit=1, offset=0
)[0]
)
return row
# DELETE /{container_type_id}
@router.delete(
"/{container_type_id}",
dependencies=[Depends(vbr_admin)],
response_model=GenericResponse,
)
def delete_container_type(
container_type_id: str,
client: VBR_Api = Depends(vbr_admin_client),
):
"""Delete a ContainerType.
Requires: **VBR_ADMIN**"""
container_type_id = vbr.utils.sanitize_identifier_string(container_type_id)
container_type = client.get_container_type_by_local_id(container_type_id)
client.vbr_client.delete_row(container_type)
return {"message": "ContainerType deleted"}
# TODO Later
# PUT /{container_type_id} - update container_type
|
nilq/baby-python
|
python
|
n1 = int(input("qual o salario do funcionario?"))
s = 15 * n1
des = s/100
prom = n1 + des
print("O salario atual do funcionario é de {} e com o aumento de 15% vai para {}".format(n1,prom))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-11-03 20:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('query', '0023_auto_20181101_2104'),
]
operations = [
migrations.RemoveField(
model_name='person',
name='frame',
),
migrations.RemoveField(
model_name='pose',
name='person',
),
migrations.AlterField(
model_name='face',
name='frame',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='query.Frame'),
),
migrations.AlterField(
model_name='pose',
name='frame',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='query.Frame'),
),
migrations.RemoveField(
model_name='face',
name='person',
),
migrations.AlterUniqueTogether(
name='face',
unique_together=set([('labeler', 'frame', 'bbox_x1', 'bbox_x2', 'bbox_y1', 'bbox_y2')]),
),
migrations.DeleteModel(
name='Person',
),
]
|
nilq/baby-python
|
python
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import absolute_import, print_function, unicode_literals
import collections
import re
from textwrap import dedent
import pytablewriter as ptw
import pytest
import six # noqa: W0611
from pytablewriter.style import Align, FontSize, Style, ThousandSeparator
from tabledata import TableData
from termcolor import colored
from ..._common import print_test_result
from ...data import (
float_header_list,
float_value_matrix,
headers,
mix_header_list,
mix_value_matrix,
style_tabledata,
styles,
value_matrix,
value_matrix_iter,
value_matrix_iter_1,
value_matrix_with_none,
)
try:
import pandas as pd
SKIP_DATAFRAME_TEST = False
except ImportError:
SKIP_DATAFRAME_TEST = True
Data = collections.namedtuple("Data", "table indent header value is_formatting_float expected")
normal_test_data_list = [
Data(
table="",
indent=0,
header=headers,
value=value_matrix,
is_formatting_float=True,
expected=dedent(
"""\
| a | b | c |dd | e |
|--:|----:|---|--:|----|
| 1|123.1|a |1.0| 1|
| 2| 2.2|bb |2.2| 2.2|
| 3| 3.3|ccc|3.0|cccc|
"""
),
),
Data(
table="",
indent=0,
header=headers,
value=None,
is_formatting_float=True,
expected=dedent(
"""\
| a | b | c |dd | e |
|---|---|---|---|---|
"""
),
),
Data(
table="floating point",
indent=0,
header=headers,
value=[
["1", 123.09999999999999, "a", "1", 1],
[2, 2.2000000000000002, "bb", "2.2", 2.2000000000000002],
[3, 3.2999999999999998, "ccc", "3.2999999999999998", "cccc"],
],
is_formatting_float=True,
expected=dedent(
"""\
# floating point
| a | b | c |dd | e |
|--:|----:|---|--:|----|
| 1|123.1|a |1.0| 1|
| 2| 2.2|bb |2.2| 2.2|
| 3| 3.3|ccc|3.3|cccc|
"""
),
),
Data(
table="tablename",
indent=1,
header=headers,
value=value_matrix,
is_formatting_float=True,
expected=dedent(
"""\
## tablename
| a | b | c |dd | e |
|--:|----:|---|--:|----|
| 1|123.1|a |1.0| 1|
| 2| 2.2|bb |2.2| 2.2|
| 3| 3.3|ccc|3.0|cccc|
"""
),
),
Data(
table="",
indent=0,
header=headers,
value=value_matrix_with_none,
is_formatting_float=True,
expected=dedent(
"""\
| a | b | c |dd | e |
|--:|--:|---|--:|----|
| 1| |a |1.0| |
| |2.2| |2.2| 2.2|
| 3|3.3|ccc| |cccc|
| | | | | |
"""
),
),
Data(
table="",
indent=0,
header=mix_header_list,
value=mix_value_matrix,
is_formatting_float=True,
expected=dedent(
"""\
| i | f | c | if |ifc|bool| inf |nan|mix_num | time |
|--:|---:|----|---:|---|----|--------|---|-------:|-------------------------|
| 1|1.10|aa | 1.0| 1|X |Infinity|NaN| 1|2017-01-01T00:00:00 |
| 2|2.20|bbb | 2.2|2.2| |Infinity|NaN|Infinity|2017-01-02 03:04:05+09:00|
| 3|3.33|cccc|-3.0|ccc|X |Infinity|NaN| NaN|2017-01-01T00:00:00 |
"""
),
),
Data(
table="formatting float 1",
indent=0,
header=headers,
value=value_matrix,
is_formatting_float=True,
expected=dedent(
"""\
# formatting float 1
| a | b | c |dd | e |
|--:|----:|---|--:|----|
| 1|123.1|a |1.0| 1|
| 2| 2.2|bb |2.2| 2.2|
| 3| 3.3|ccc|3.0|cccc|
"""
),
),
Data(
table="formatting float 2",
indent=0,
header=float_header_list,
value=float_value_matrix,
is_formatting_float=True,
expected=dedent(
"""\
# formatting float 2
| a | b | c |
|---:|----------:|----:|
|0.01| 0.0012|0.000|
|1.00| 99.9000|0.010|
|1.20|999999.1230|0.001|
"""
),
),
Data(
table="not formatting float 1",
indent=0,
header=headers,
value=value_matrix,
is_formatting_float=False,
expected=dedent(
"""\
# not formatting float 1
| a | b | c |dd | e |
|--:|----:|---|--:|----|
| 1|123.1|a | 1| 1|
| 2| 2.2|bb |2.2| 2.2|
| 3| 3.3|ccc| 3|cccc|
"""
),
),
Data(
table="not formatting float 2",
indent=0,
header=float_header_list,
value=float_value_matrix,
is_formatting_float=False,
expected=dedent(
"""\
# not formatting float 2
| a | b | c |
|---:|---------:|----:|
|0.01| 0.00125| 0|
| 1| 99.9| 0.01|
| 1.2|999999.123|0.001|
"""
),
),
Data(
table="",
indent=0,
header=["Name", "xUnit", "Source", "Remarks"],
value=[
[
"Crotest",
"",
"[160]",
"MIT License. A tiny and simple test framework for Crystal\nwith common assertions and no pollution into Object class.",
"",
]
],
is_formatting_float=True,
expected=dedent(
"""\
| Name |xUnit|Source| Remarks |
|-------|-----|------|--------------------------------------------------------------------------------------------------------------------|
|Crotest| |[160] |MIT License. A tiny and simple test framework for Crystal with common assertions and no pollution into Object class.|
"""
),
),
Data(
table="",
indent=0,
header=["姓", "名", "生年月日", "郵便番号", "住所", "電話番号"],
value=[
["山田", "太郎", "2001/1/1", "100-0002", "東京都千代田区皇居外苑", "03-1234-5678"],
["山田", "次郎", "2001/1/2", "251-0036", "神奈川県藤沢市江の島1丁目", "03-9999-9999"],
],
is_formatting_float=True,
expected=dedent(
"""\
| 姓 | 名 |生年月日|郵便番号| 住所 | 電話番号 |
|----|----|--------|--------|--------------------------|------------|
|山田|太郎|2001/1/1|100-0002|東京都千代田区皇居外苑 |03-1234-5678|
|山田|次郎|2001/1/2|251-0036|神奈川県藤沢市江の島1丁目|03-9999-9999|
"""
),
),
Data(
table="quoted values",
indent=0,
header=['"quote"', '"abc efg"'],
value=[['"1"', '"abc"'], ['"-1"', '"efg"']],
is_formatting_float=True,
expected=dedent(
"""\
# quoted values
|quote|abc efg|
|----:|-------|
| 1|abc |
| -1|efg |
"""
),
),
Data(
table="not str headers",
indent=0,
header=[None, 1, 0.1],
value=[[None, 1, 0.1]],
is_formatting_float=True,
expected=dedent(
"""\
# not str headers
| | 1 |0.1|
|---|--:|--:|
| | 1|0.1|
"""
),
),
Data(
table="no uniform matrix",
indent=0,
header=["a", "b", "c"],
value=[["a", 0], ["b", 1, "bb"], ["c", 2, "ccc", 0.1]],
is_formatting_float=True,
expected=dedent(
"""\
# no uniform matrix
| a | b | c |
|---|--:|---|
|a | 0| |
|b | 1|bb |
|c | 2|ccc|
"""
),
),
Data(
table="line breaks",
indent=0,
header=["a\nb", "\nc\n\nd\n", "e\r\nf"],
value=[["v1\nv1", "v2\n\nv2", "v3\r\nv3"]],
is_formatting_float=True,
expected=dedent(
"""\
# line breaks
| a b | c d | e f |
|-----|-----|-----|
|v1 v1|v2 v2|v3 v3|
"""
),
),
Data(
table="empty header",
indent=0,
header=[],
value=value_matrix,
is_formatting_float=True,
expected=dedent(
"""\
# empty header
| A | B | C | D | E |
|--:|----:|---|--:|----|
| 1|123.1|a |1.0| 1|
| 2| 2.2|bb |2.2| 2.2|
| 3| 3.3|ccc|3.0|cccc|
"""
),
),
Data(
table="vertical bar",
indent=1,
header=["a|b", "|c||d|"],
value=[["|v1|v1|", "v2|v2"]],
is_formatting_float=True,
expected=r"""## vertical bar
| a\|b |\|c\|\|d\||
|-------|------|
|\|v1\|v1\||v2\|v2 |
""",
),
Data(
table="mixed value types",
indent=0,
header=["data", "v"],
value=[
[3.4375, 65.5397978633],
[65.5397978633, 127.642095727],
[189.74439359, 189.74439359],
[10064.0097539, 10001.907456],
["next", 10250.3166474],
],
is_formatting_float=True,
expected=dedent(
"""\
# mixed value types
| data | v |
|---------|-------:|
| 3.437| 65.54|
| 65.540| 127.64|
| 189.744| 189.74|
|10064.010|10001.91|
|next |10250.32|
"""
),
),
Data(
table="list of dict",
indent=0,
header=["A", "B", "C"],
value=[
{"A": 1},
{"B": 2.1, "C": "hoge"},
{"A": 0, "B": 0.1, "C": "foo"},
{},
{"A": -1, "B": -0.1, "C": "bar", "D": "extra"},
],
is_formatting_float=False,
expected=dedent(
"""\
# list of dict
| A | B | C |
|--:|---:|----|
| 1| | |
| | 2.1|hoge|
| 0| 0.1|foo |
| | | |
| -1|-0.1|bar |
"""
),
),
]
exception_test_data_list = [
Data(
table="",
indent=0,
header=[],
value=[],
is_formatting_float=True,
expected=ptw.EmptyTableDataError,
)
]
table_writer_class = ptw.MarkdownTableWriter
def trans_func(value):
if value is None:
return ""
if value is True:
return "X"
if value is False:
return ""
return value
class Test_MarkdownTableWriter_write_new_line(object):
def test_normal(self, capsys):
writer = table_writer_class()
writer.write_null_line()
out, _err = capsys.readouterr()
assert out == "\n"
class Test_MarkdownTableWriter_write_table(object):
@pytest.mark.parametrize(
["table", "indent", "header", "value", "is_formatting_float", "expected"],
[
[
data.table,
data.indent,
data.header,
data.value,
data.is_formatting_float,
data.expected,
]
for data in normal_test_data_list
],
)
def test_normal(self, capsys, table, indent, header, value, is_formatting_float, expected):
writer = table_writer_class()
writer.table_name = table
writer.set_indent_level(indent)
writer.headers = header
writer.value_matrix = value
writer.is_formatting_float = is_formatting_float
writer.register_trans_func(trans_func)
writer.write_table()
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
assert writer.dumps() == expected
def test_normal_single_tabledata(self, capsys):
writer = table_writer_class()
writer.from_tabledata(
TableData(
"loader_mapping",
["Name", "Loader"],
[
["csv", "CsvTableFileLoader"],
["excel", "ExcelTableFileLoader"],
["html", "HtmlTableFileLoader"],
["markdown", "MarkdownTableFileLoader"],
["mediawiki", "MediaWikiTableFileLoader"],
["json", "JsonTableFileLoader"],
["Long Format Name", "Loader"],
],
)
)
writer.write_table()
expected = dedent(
"""\
# loader_mapping
| Name | Loader |
|----------------|------------------------|
|csv |CsvTableFileLoader |
|excel |ExcelTableFileLoader |
|html |HtmlTableFileLoader |
|markdown |MarkdownTableFileLoader |
|mediawiki |MediaWikiTableFileLoader|
|json |JsonTableFileLoader |
|Long Format Name|Loader |
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_multiple_write(self, capsys):
writer = table_writer_class()
writer.is_write_null_line_after_table = True
writer.from_tabledata(
TableData(
"first",
["Name", "Loader"],
[["csv", "CsvTableFileLoader"], ["excel", "ExcelTableFileLoader"]],
)
)
writer.write_table()
writer.from_tabledata(
TableData("second", ["a", "b", "c"], [["1", "AA", "abc"], ["2", "BB", "zzz"]])
)
writer.write_table()
expected = dedent(
"""\
# first
|Name | Loader |
|-----|--------------------|
|csv |CsvTableFileLoader |
|excel|ExcelTableFileLoader|
# second
| a | b | c |
|--:|---|---|
| 1|AA |abc|
| 2|BB |zzz|
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_style_align(self):
writer = table_writer_class()
writer.from_tabledata(
TableData(
"auto align",
["left", "right", "center", "auto", "auto", "None"],
[
[0, "r", "center align", 0, "a", "n"],
[11, "right align", "bb", 11, "auto", "none (auto)"],
],
)
)
expected = dedent(
"""\
# auto align
|left| right | center |auto|auto| None |
|---:|-----------|------------|---:|----|-----------|
| 0|r |center align| 0|a |n |
| 11|right align|bb | 11|auto|none (auto)|
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
writer.table_name = "specify alignment for each column manually"
writer.styles = [
Style(align=Align.LEFT),
Style(align=Align.RIGHT),
Style(align=Align.CENTER),
Style(align=Align.AUTO),
Style(align=Align.AUTO),
None,
]
expected = dedent(
"""\
# specify alignment for each column manually
|left| right | center |auto|auto| None |
|----|----------:|:----------:|---:|----|-----------|
|0 | r|center align| 0|a |n |
|11 |right align| bb | 11|auto|none (auto)|
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
# test for backward compatibility
writer.styles = None
writer.align_list = [Align.LEFT, Align.RIGHT, Align.CENTER, Align.AUTO, Align.AUTO, None]
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_style_thousand_separator(self, capsys):
writer = table_writer_class()
writer.from_tabledata(
TableData(
"",
["none_format", "thousand_separator_i", "thousand_separator_f", "f", "wo_f"],
[
[1000, 1234567, 1234567.8, 1234.5678, 1234567.8],
[1000, 1234567, 1234567.8, 1234.5678, 1234567.8],
],
)
)
writer.styles = [
Style(thousand_separator=ThousandSeparator.NONE),
Style(thousand_separator=ThousandSeparator.COMMA),
Style(thousand_separator=ThousandSeparator.COMMA),
Style(thousand_separator=ThousandSeparator.SPACE),
]
out = writer.dumps()
expected = dedent(
"""\
|none_format|thousand_separator_i|thousand_separator_f| f | wo_f |
|----------:|-------------------:|-------------------:|------:|--------:|
| 1000| 1,234,567| 1,234,567.8|1 234.6|1234567.8|
| 1000| 1,234,567| 1,234,567.8|1 234.6|1234567.8|
"""
)
print_test_result(expected=expected, actual=out)
assert out == expected
writer.styles = None
writer.format_list = [
ptw.Format.NONE,
ptw.Format.THOUSAND_SEPARATOR,
ptw.Format.THOUSAND_SEPARATOR,
ptw.Format.THOUSAND_SEPARATOR,
]
out = writer.dumps()
expected = dedent(
"""\
|none_format|thousand_separator_i|thousand_separator_f| f | wo_f |
|----------:|-------------------:|-------------------:|------:|--------:|
| 1000| 1,234,567| 1,234,567.8|1,234.6|1234567.8|
| 1000| 1,234,567| 1,234,567.8|1,234.6|1234567.8|
"""
)
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_style_font_size(self):
writer = table_writer_class()
writer.table_name = "style test: font size will not be affected"
writer.headers = ["none", "empty_style", "tiny", "small", "medium", "large"]
writer.value_matrix = [[111, 111, 111, 111, 111, 111], [1234, 1234, 1234, 1234, 1234, 1234]]
writer.styles = [
None,
Style(),
Style(font_size=FontSize.TINY),
Style(font_size=FontSize.SMALL),
Style(font_size=FontSize.MEDIUM),
Style(font_size=FontSize.LARGE),
]
expected = dedent(
"""\
# style test: font size will not be affected
|none|empty_style|tiny|small|medium|large|
|---:|----------:|---:|----:|-----:|----:|
| 111| 111| 111| 111| 111| 111|
|1234| 1234|1234| 1234| 1234| 1234|
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_style_font_weight(self):
writer = table_writer_class()
writer.table_name = "style test: bold"
writer.headers = ["normal", "bold"]
writer.value_matrix = [[11, 11], [123456, 123456]]
writer.styles = [Style(font_weight="normal"), Style(font_weight="bold")]
expected = dedent(
"""\
# style test: bold
|normal| bold |
|-----:|---------:|
| 11| **11**|
|123456|**123456**|
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_style_mix(self):
writer = table_writer_class()
writer.from_tabledata(style_tabledata)
writer.styles = styles
expected = dedent(
"""\
# style test
|none|empty|tiny|small|medium|large|null w/ bold| L bold |S italic|L bold italic|
|---:|----:|---:|----:|-----:|----:|------------|-------:|-------:|------------:|
| 111| 111| 111| 111| 111| 111| | **111**| _111_| _**111**_|
|1234| 1234|1234| 1234| 1,234|1 234| |**1234**| _1234_| _**1234**_|
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_set_style(self):
writer = table_writer_class()
writer.table_name = "set style method"
writer.headers = ["normal", "style by idx", "style by header"]
writer.value_matrix = [[11, 11, 11], [123456, 123456, 123456]]
writer.set_style(1, Style(font_weight="bold", thousand_separator=","))
writer.set_style(
"style by header", Style(align="center", font_weight="bold", thousand_separator=" ")
)
expected = dedent(
"""\
# set style method
|normal|style by idx|style by header|
|-----:|-----------:|:-------------:|
| 11| **11**| **11** |
|123456| **123,456**| **123 456** |
"""
)
output = writer.dumps()
print_test_result(expected=expected, actual=output)
assert output == expected
writer.table_name = "change style"
writer.set_style(1, Style(align="right", font_style="italic"))
writer.set_style("style by header", Style())
expected = dedent(
"""\
# change style
|normal|style by idx|style by header|
|-----:|-----------:|--------------:|
| 11| _11_| 11|
|123456| _123456_| 123456|
"""
)
output = writer.dumps()
print_test_result(expected=expected, actual=output)
assert output == expected
def test_normal_ansi_color(self, capsys):
writer = table_writer_class()
writer.table_name = "ANCI escape sequence"
writer.headers = ["colored_i", "colored_f", "colored_s", "wo_anci"]
writer.value_matrix = [
[colored(111, "red"), colored(1.1, "green"), colored("abc", "blue"), "abc"],
[colored(0, "red"), colored(0.12, "green"), colored("abcdef", "blue"), "abcdef"],
]
writer.write_table()
expected = dedent(
"""\
# ANCI escape sequence
|colored_i|colored_f|colored_s|wo_anci|
|--------:|--------:|---------|-------|
| 111| 1.1|abc |abc |
| 0| 0.12|abcdef |abcdef |
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
_ansi_escape = re.compile(r"(\x9b|\x1b\[)[0-?]*[ -\/]*[@-~]", re.IGNORECASE)
assert _ansi_escape.sub("", out) == expected
def test_normal_margin_1(self, capsys):
writer = table_writer_class()
writer.from_tabledata(TableData("", headers, value_matrix))
writer.margin = 1
writer.write_table()
expected = dedent(
"""\
| a | b | c | dd | e |
|----:|------:|-----|----:|------|
| 1 | 123.1 | a | 1.0 | 1 |
| 2 | 2.2 | bb | 2.2 | 2.2 |
| 3 | 3.3 | ccc | 3.0 | cccc |
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_margin_2(self, capsys):
writer = table_writer_class()
writer.from_tabledata(TableData("", headers, value_matrix))
writer.margin = 2
writer.write_table()
expected = dedent(
"""\
| a | b | c | dd | e |
|------:|--------:|-------|------:|--------|
| 1 | 123.1 | a | 1.0 | 1 |
| 2 | 2.2 | bb | 2.2 | 2.2 |
| 3 | 3.3 | ccc | 3.0 | cccc |
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_value_map(self):
writer = table_writer_class()
writer.headers = ["a", "b"]
writer.value_matrix = [["foo", True], ["bar", False]]
writer.register_trans_func(trans_func)
expected = dedent(
"""\
| a | b |
|---|---|
|foo|X |
|bar| |
"""
)
output = writer.dumps()
print_test_result(expected=expected, actual=output)
assert output == expected
def test_normal_avoid_overwrite_stream_by_dumps(self):
writer = table_writer_class()
writer.headers = ["a", "b"]
writer.value_matrix = [["foo", "bar"]]
writer.stream = six.StringIO()
expected = dedent(
"""\
| a | b |
|---|---|
|foo|bar|
"""
)
output = writer.dumps()
print_test_result(expected=expected, actual=output)
assert output == expected
print("--------------------")
writer.write_table()
output = writer.stream.getvalue()
print_test_result(expected=expected, actual=output)
assert output == expected
@pytest.mark.skipif("six.PY2")
def test_normal_escape_html_tag(self, capsys):
writer = table_writer_class()
writer.headers = ["no", "text"]
writer.value_matrix = [[1, "<caption>Table 'formatting for Jupyter Notebook.</caption>"]]
writer.is_escape_html_tag = True
writer.write_table()
expected = dedent(
"""\
|no | text |
|--:|---------------------------------------------------------------------------|
| 1|<caption>Table 'formatting for Jupyter Notebook.</caption>|
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
@pytest.mark.skipif("six.PY2")
def test_normal_escape_html_tag_from_tabledata(self, capsys):
writer = table_writer_class()
writer.from_tabledata(
TableData(
"",
["no", "text"],
[[1, "<caption>Table 'formatting for Jupyter Notebook.</caption>"]],
)
)
writer.is_escape_html_tag = True
writer.write_table()
expected = dedent(
"""\
|no | text |
|--:|---------------------------------------------------------------------------|
| 1|<caption>Table 'formatting for Jupyter Notebook.</caption>|
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
@pytest.mark.parametrize(
["table", "indent", "header", "value", "expected"],
[
[data.table, data.indent, data.header, data.value, data.expected]
for data in exception_test_data_list
],
)
def test_exception(self, table, indent, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.set_indent_level(indent)
writer.headers = header
writer.value_matrix = value
with pytest.raises(expected):
writer.write_table()
class Test_MarkdownTableWriter_write_table_iter(object):
@pytest.mark.parametrize(
["table", "header", "value", "expected"],
[
[
"tablename",
["ha", "hb", "hc"],
value_matrix_iter,
dedent(
"""\
# tablename
| ha | hb | hc |
|---:|---:|---:|
| 1| 2| 3|
| 11| 12| 13|
| 1| 2| 3|
| 11| 12| 13|
| 101| 102| 103|
|1001|1002|1003|
"""
),
],
[
"mix length",
["string", "hb", "hc"],
value_matrix_iter_1,
dedent(
"""\
# mix length
| string | hb | hc |
|-----------------------------|----:|---:|
|a b c d e f g h i jklmn | 2.1| 3|
|aaaaa | 12.1| 13|
|bbb | 2| 3|
|cc | 12| 13|
|a | 102| 103|
| | 1002|1003|
"""
),
],
],
)
def test_normal(self, capsys, table, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.headers = header
writer.value_matrix = value
writer.iteration_length = len(value)
writer.write_table_iter()
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
@pytest.mark.parametrize(
["table", "header", "value", "expected"],
[[data.table, data.header, data.value, data.expected] for data in exception_test_data_list],
)
def test_exception(self, table, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.headers = header
writer.value_matrix = value
with pytest.raises(expected):
writer.write_table_iter()
class Test_MarkdownTableWriter_dump(object):
def test_normal(self, tmpdir):
test_filepath = str(tmpdir.join("test.sqlite"))
writer = table_writer_class()
writer.headers = ["a", "b"]
writer.value_matrix = [["foo", "bar"]]
writer.dump(test_filepath)
expected = dedent(
"""\
| a | b |
|---|---|
|foo|bar|
"""
)
with open(test_filepath) as f:
output = f.read()
print_test_result(expected=expected, actual=output)
assert output == expected
class Test_MarkdownTableWriter_from_tablib(object):
def test_normal_multiple_write(self, capsys):
try:
import tablib
except ImportError:
pytest.skip("requires tablib")
data = tablib.Dataset()
data.headers = ["a", "b", "c"]
data.append(["1", "AA", "abc"])
data.append(["2", "BB", "zzz"])
writer = table_writer_class()
writer.from_tablib(data)
writer.write_table()
expected = dedent(
"""\
| a | b | c |
|--:|---|---|
| 1|AA |abc|
| 2|BB |zzz|
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
class Test_MarkdownTableWriter_line_break_handling(object):
@pytest.mark.parametrize(
["value", "expected"],
[
[
ptw.LineBreakHandling.REPLACE,
dedent(
"""\
|no | text |
|--:|------------|
| 1|first second|
"""
),
],
[
ptw.LineBreakHandling.ESCAPE,
r"""|no | text |
|--:|-------------|
| 1|first\nsecond|
""",
],
[
"escape",
r"""|no | text |
|--:|-------------|
| 1|first\nsecond|
""",
],
],
)
def test_normal_line(self, value, expected):
writer = table_writer_class()
writer.headers = ["no", "text"]
writer.value_matrix = [[1, "first\nsecond"]]
writer.line_break_handling = value
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
@pytest.mark.skipif("SKIP_DATAFRAME_TEST is True")
class Test_MarkdownTableWriter_from_dataframe(object):
@pytest.mark.parametrize(
["add_index_column", "expected"],
[
[
False,
dedent(
"""\
# add_index_column: False
| A | B |
|--:|--:|
| 1| 10|
| 2| 11|
"""
),
],
[
True,
dedent(
"""\
# add_index_column: True
| | A | B |
|---|--:|--:|
|a | 1| 10|
|b | 2| 11|
"""
),
],
],
)
def test_normal(self, tmpdir, add_index_column, expected):
writer = table_writer_class()
writer.table_name = "add_index_column: {}".format(add_index_column)
df = pd.DataFrame({"A": [1, 2], "B": [10, 11]}, index=["a", "b"])
writer.from_dataframe(df, add_index_column=add_index_column)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
# pickle test
df_pkl_filepath = str(tmpdir.join("df.pkl"))
df.to_pickle(df_pkl_filepath)
writer.from_dataframe(df_pkl_filepath, add_index_column=add_index_column)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
@pytest.mark.skipif("SKIP_DATAFRAME_TEST is True")
class Test_MarkdownTableWriter_from_series(object):
@pytest.mark.parametrize(
["add_index_column", "expected"],
[
[
False,
dedent(
"""\
# add_index_column: False
|value |
|-----:|
|100.00|
| 49.50|
| 29.01|
| 0.00|
| 24.75|
| 49.50|
| 74.25|
| 99.00|
"""
),
],
[
True,
dedent(
"""\
# add_index_column: True
| |value |
|-----|-----:|
|count|100.00|
|mean | 49.50|
|std | 29.01|
|min | 0.00|
|25% | 24.75|
|50% | 49.50|
|75% | 74.25|
|max | 99.00|
"""
),
],
],
)
def test_normal(self, add_index_column, expected):
writer = table_writer_class()
writer.table_name = "add_index_column: {}".format(add_index_column)
writer.from_series(
pd.Series(list(range(100))).describe(), add_index_column=add_index_column
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
|
nilq/baby-python
|
python
|
class Game:
def __init__(self):
self.is_active = True
self.LEVEL_CLASSES = [Level_1, Level_2, Level_3]
self.curr_level_index = 0
self.curr_level = self.LEVEL_CLASSES[self.curr_level_index](
self.go_to_next_level
)
def go_to_next_level(self):
self.curr_level_index += 1
self.curr_level = self.LEVEL_CLASSES[self.curr_level_index](
self.end_game
if self.curr_level_index == len(self.LEVEL_CLASSES) - 1
else self.go_to_next_level
)
def end_game(self):
self.is_active = False
def process_input(self, raw_input):
# unintrusive cleansing & caps normalization
cleaned_input = raw_input.lower().strip()
return self.curr_level.process_cleaned_input(cleaned_input)
class Level_1:
def __init__(self, go_to_next_level):
self.go_to_next_level = go_to_next_level
self.LEVEL_NUMBER = 1
# things and actions should never be changed, if thing/action no longer exists
# then set that as one of its properties e.g. phone_exists: False
self.THINGS = {"room", "phone", "desk", "bed"}
self.ACTIONS = {
"look",
"pickup",
"approach",
"answer",
"sleep",
"hit",
"open",
"help",
"quit",
"read",
"draw",
"place",
"jump",
}
self.FUNCTIONS = {
"bed": {"hit": self.hit_bed},
"phone": {"pickup": self.pickup_phone, "answer": self.answer_phone},
}
self.SYNONYMS_FOR_ACTION = {
"look": {
"look",
"look around",
"see",
"view",
"survey",
"observe",
"observe around",
"inspect",
"scrutinize",
"examine",
"investigate",
"check",
"checkout",
"review",
"monitor",
"search",
"watch",
"identify",
"analyze",
"peek",
"describe",
"find",
},
"pickup": {
"pickup",
"pick up",
"pick",
"get",
"take",
"grab",
"weild",
"hold",
"lift",
},
"approach": {"approach", "go", "goto", "reach", "walk"},
"answer": {"answer", "respond", "talk"},
"sleep": {"sleep", "rest", "nap"},
"hit": {
"hit",
"kick",
"smack",
"slap",
"punch",
"pound",
"fight",
"headbutt",
"attack",
},
"open": {"open", "unlock", "enter"},
"help": {"help", "h"},
"quit": {"quit"},
"read": {"read"},
"draw": {"draw", "illustrate", "paint", "inscribe", "mark"},
"place": {"place", "put", "set", "lie"},
"jump": {"jump", "bounce"},
}
self.SYNONYMS_FOR_THING = {
"room": {
"room",
"floor",
"wall",
"walls",
"ceiling",
"space",
"area",
"environment",
},
"phone": {"phone", "device", "cellphone"},
"desk": {"desk", "table"},
"bed": {"bed", "mattress", "sheets", "pillow"},
}
# never delete a thing/action, just update
thing_props = {
"bed": {"wasHit": False},
}
responses = {
"room": {
"look": (
"You can see a bed and a desk with a phone resting on top. "
"There's nothing else."
),
"pickup": "Don't be ridiculous.",
"approach": "You're already in the room, man. No need.",
"hit": "You kick and hit around the room. Nothing happens.",
},
"phone": {
"look": "A small cheap phone. It appears to be ringing.",
"pickup": "You have taken the phone. It is still ringing.",
"approach": "You have approached the phone.",
"answer": (
"You answer it, the voice on the other line says 'You find "
"yourself in a room.' As the voice speaks, the room around you"
" starts to shift. You are now in a completely different room."
),
"hit": "Why? Stop being so violent.",
},
"desk": {
"look": "A flimsy wooden desk.",
"pickup": (
"Please. This desk is too heavy to pick up and take with you."
),
"approach": "You have approached the desk.",
"hit": "You hit the desk. That was pointless.",
},
"bed": {
"look": "The bed you woke up from. Not sure how you got here.",
"pickup": "The bed's too big for that.",
"approach": "You have approached the bed.",
"sleep": "But you've just woke up. Get your head in the game, man!",
"hit": "You attack and hit the bed mercilessly. Nothing happens.",
"jump": (
"You jump on the bed for a bit, smiling and having a grand 'ol "
"time. Wow that was fun."
),
},
}
inventory = set()
self.state = {
"thing_props": thing_props,
"responses": responses,
"inventory": inventory,
}
# all these dicts should include all things or actions
assert self.THINGS == set(self.state["responses"].keys())
assert self.THINGS == set(self.SYNONYMS_FOR_THING.keys())
assert self.ACTIONS == set(self.SYNONYMS_FOR_ACTION.keys())
def process_cleaned_input(self, cleaned_input):
# quickly return response for look shortcuts
if cleaned_input in self.SYNONYMS_FOR_ACTION["look"] | {"l"}:
return self.process_command("look", "room")
# extracting verb and object out of input, then process verb object command
input_words = cleaned_input.split()
action, thing = self.extract_action_and_thing(input_words)
return self.process_command(action, thing)
def extract_action_and_thing(self, input_words):
action, thing = "", ""
action_Found, thing_Found = False, False
# iterating through words to check for a direct match with any actions and
# things available or their synonyms
for input_word in input_words:
if not action_Found:
for action_key, synonyms in self.SYNONYMS_FOR_ACTION.items():
if input_word in synonyms:
action = action_key
action_Found = True
if not thing_Found:
for thing_key, synonyms in self.SYNONYMS_FOR_THING.items():
if input_word in synonyms:
thing = thing_key
thing_Found = True
# print(f"ACTION:", action)
# print(f"THING:", thing)
return action, thing
def process_command(self, action, thing):
# if theres a game function for this input, do that, otherwise just get the
# text resource
try:
do_action = self.FUNCTIONS[thing][action]
except KeyError:
return self.get_response_for_command(action, thing)
else:
return do_action()
def get_response_for_command(self, action, thing):
THING_HAS_NO_ACTION = f"You can't perform action '{action}' on the {thing}."
response = None
if action and thing:
response = self.state["responses"][thing].get(action, THING_HAS_NO_ACTION)
elif not action and thing:
response = f"Not sure what you want to do to the {thing}."
elif action and not thing:
response = f"I can't perform action '{action}' on that."
elif not action and not thing:
response = (
"Couldn't find an eligible verb or object in your command.\n"
"Example of a good command:\n"
"hit desk\n"
"Here, 'hit' is the verb and 'desk' is the object."
)
return response + "\n"
# // ----------------------------------
# SMART ACTIONS
# this section has all the specific game actions that need to do things other than
# just give back the string resource to the player
def pickup_phone(self):
response = self.get_response_for_command("pickup", "phone")
responses, inventory = (self.state[k] for k in ("responses", "inventory"))
if "phone" not in inventory:
inventory.add("phone")
# room
responses["room"][
"look"
] = "You can see a bed and a desk. There's nothing else."
# phone
responses["phone"]["look"] = (
"A small cheap phone. It is ringing. Now that it is on your person, "
"you can feel an unexplainable force emanating from it."
)
responses["phone"]["pickup"] = "You already have the phone!"
responses["phone"][
"approach"
] = "You can't approach something that's already on your person!"
return response
def answer_phone(self):
response = self.get_response_for_command("answer", "phone")
self.go_to_next_level()
return response
def hit_bed(self):
response = self.get_response_for_command("hit", "bed")
thing_props, responses = (self.state[k] for k in ("thing_props", "responses"))
if not thing_props["bed"]["wasHit"]:
thing_props["bed"]["wasHit"] = True
responses["bed"]["hit"] = (
"You attack and hit the bed mercilessly. Nothing continues to happen. "
"Do you need help?"
)
return response
class Level_2:
def __init__(self, go_to_next_level):
self.go_to_next_level = go_to_next_level
self.LEVEL_NUMBER = 2
# things and actions should never be changed, if thing/action no longer exists
# then set that as one of its properties e.g. phone_exists: False
self.THINGS = {"room", "chalk", "note", "door"}
self.ACTIONS = {
"look",
"pickup",
"approach",
"answer",
"sleep",
"hit",
"open",
"help",
"quit",
"read",
"draw",
"place",
"jump",
}
self.SYNONYMS_FOR_ACTION = {
"look": {
"look",
"look around",
"see",
"view",
"survey",
"observe",
"observe around",
"inspect",
"scrutinize",
"examine",
"investigate",
"check",
"checkout",
"review",
"monitor",
"search",
"watch",
"identify",
"analyze",
"peek",
"describe",
"find",
},
"pickup": {
"pickup",
"pick up",
"pick",
"get",
"take",
"grab",
"weild",
"hold",
"lift",
},
"approach": {"approach", "go", "goto", "reach", "walk"},
"answer": {"answer", "respond", "talk"},
"sleep": {"sleep", "rest", "nap"},
"hit": {
"hit",
"kick",
"smack",
"slap",
"punch",
"pound",
"fight",
"headbutt",
"attack",
},
"open": {"open", "unlock", "enter"},
"help": {"help", "h"},
"quit": {"quit"},
"read": {"read"},
"draw": {"draw", "illustrate", "paint", "inscribe", "mark"},
"place": {"place", "put", "set", "lie"},
"jump": {"jump", "bounce"},
}
self.SYNONYMS_FOR_THING = {
"room": {
"room",
"floor",
"wall",
"walls",
"ceiling",
"space",
"area",
"environment",
},
"chalk": {"chalk", "chalks", "chlak"},
"note": {
"note",
"paper",
"message",
"writing",
"writings",
"markings",
"marks",
"sticky",
},
"door": {"door", "gate"},
}
self.FUNCTIONS = {
"chalk": {"pickup": self.pickup_chalk},
"door": {"draw": self.draw_door, "open": self.open_door},
}
# never delete a thing/action, just update
thing_props = {"door": {"exists": False}}
responses = {
"room": {
"look": (
"Except for a piece of chalk you see rested on the center of "
"the floor, this room is completely bare."
),
"pickup": "Don't be ridiculous.",
"approach": "You're already in the room, man. No need.",
},
"chalk": {
"look": (
"A normal piece of chalk. There is a sticky note attached to it."
),
"pickup": "You have picked up the chalk.",
"approach": "You have approached the chalk.",
},
"note": {
"look": (
"A sticky note with a message written on it:\nYOU'VE FOUND THE "
"KEY. NOW FIND THE DOOR."
),
"approach": "You have approached the note.",
"read": "YOU'VE FOUND THE KEY. NOW FIND THE DOOR.",
},
"door": {
"look": (
"You try to look for a door, but alas. There is none to be found."
),
"pickup": "Even if there was a door, that's quite silly.",
"approach": "There is no door to approach.",
"draw": "Can't draw a door without a writing utensil.",
"open": "You can't open a non-existent door.",
},
}
inventory = set()
self.state = {
"thing_props": thing_props,
"responses": responses,
"inventory": inventory,
}
# all these dicts should include all things or actions
assert self.THINGS == set(self.state["responses"].keys())
assert self.THINGS == set(self.SYNONYMS_FOR_THING.keys())
assert self.ACTIONS == set(self.SYNONYMS_FOR_ACTION.keys())
def process_cleaned_input(self, cleaned_input):
# quickly return response for look shortcuts
if cleaned_input in self.SYNONYMS_FOR_ACTION["look"] | {"l"}:
return self.process_command("look", "room")
# extracting verb and object out of input, then process verb object command
input_words = cleaned_input.split()
action, thing = self.extract_action_and_thing(input_words)
return self.process_command(action, thing)
def extract_action_and_thing(self, input_words):
action, thing = "", ""
action_Found, thing_Found = False, False
# iterating through words to check for a direct match with any actions and
# things available or their synonyms
for input_word in input_words:
if not action_Found:
for action_key, synonyms in self.SYNONYMS_FOR_ACTION.items():
if input_word in synonyms:
action = action_key
action_Found = True
if not thing_Found:
for thing_key, synonyms in self.SYNONYMS_FOR_THING.items():
if input_word in synonyms:
thing = thing_key
thing_Found = True
# print(f"ACTION:", action)
# print(f"THING:", thing)
return action, thing
def process_command(self, action, thing):
# if theres a game function for this input, do that, otherwise just get the
# text resource
try:
do_action = self.FUNCTIONS[thing][action]
except KeyError:
return self.get_response_for_command(action, thing)
else:
return do_action()
def get_response_for_command(self, action, thing):
THING_HAS_NO_ACTION = f"You can't perform action '{action}' on the {thing}."
response = None
if action and thing:
response = self.state["responses"][thing].get(action, THING_HAS_NO_ACTION)
elif not action and thing:
response = f"Not sure what you want to do to the {thing}."
elif action and not thing:
response = f"I can't perform action '{action}' on that."
elif not action and not thing:
response = (
"Couldn't find an eligible verb or object in your command.\n"
"Example of a good command:\n"
"hit desk\n"
"Here, 'hit' is the verb and 'desk' is the object."
)
return response + "\n"
# // ----------------------------------
# SMART ACTIONS
# this section has all the specific game actions that need to do things other than
# just give back the string resource to the player
def pickup_chalk(self):
response = self.get_response_for_command("pickup", "chalk")
responses, inventory = (self.state[k] for k in ("responses", "inventory"))
if "chalk" not in inventory:
inventory.add("chalk")
# room
responses["room"]["look"] = "The room is completely bare."
# chalk
responses["chalk"]["pickup"] = "You already have the chalk!"
responses["chalk"][
"approach"
] = "No need to approach the chalk since you have it already."
# note
responses["note"][
"approach"
] = "No need to approach the note since you have it already."
# door
responses["door"]["draw"] = "You draw the door."
return response
def draw_door(self):
response = self.get_response_for_command("draw", "door")
thing_props, responses, inventory = (
self.state[k] for k in ("thing_props", "responses", "inventory")
)
if not thing_props["door"]["exists"] and "chalk" in inventory:
thing_props["door"]["exists"] = True
# room
responses["room"][
"look"
] = "The room is completely bare, except for a crudely drawn chalk door."
# door
responses["door"][
"look"
] = "A badly drawn, human-sized door drawn with chalk."
responses["door"]["pickup"] = "You can't do that to the door silly."
responses["door"]["approach"] = "You approach the door."
responses["door"]["draw"] = "You've already drawn the door!"
responses["door"][
"open"
] = "You try to open the door and somehow it works? You enter and are now in a completely different room."
return response
def open_door(self):
response = self.get_response_for_command("open", "door")
thing_props = self.state["thing_props"]
if thing_props["door"]["exists"]:
self.go_to_next_level()
return response
class Level_3:
def __init__(self, end_game):
self.end_game = end_game
self.LEVEL_NUMBER = 3
# things and actions should never be changed, if thing/action no longer exists
# then set that as one of its properties e.g. phone_exists: False
self.THINGS = {"room", "wall", "table", "rack", "clock", "cube"}
self.ACTIONS = {
"look",
"pickup",
"approach",
"answer",
"sleep",
"hit",
"open",
"help",
"quit",
"read",
"draw",
"place",
"jump",
}
self.SYNONYMS_FOR_ACTION = {
"look": {
"look",
"look around",
"see",
"view",
"survey",
"observe",
"observe around",
"inspect",
"scrutinize",
"examine",
"investigate",
"check",
"checkout",
"review",
"monitor",
"search",
"watch",
"identify",
"analyze",
"peek",
"describe",
"find",
},
"pickup": {
"pickup",
"pick up",
"pick",
"get",
"take",
"grab",
"weild",
"hold",
"lift",
},
"approach": {"approach", "go", "goto", "reach", "walk"},
"answer": {"answer", "respond", "talk"},
"sleep": {"sleep", "rest", "nap"},
"hit": {
"hit",
"kick",
"smack",
"slap",
"punch",
"pound",
"fight",
"headbutt",
"attack",
},
"open": {"open", "unlock", "enter"},
"help": {"help", "h"},
"quit": {"quit"},
"read": {"read"},
"draw": {"draw", "illustrate", "paint", "inscribe", "mark"},
"place": {"place", "put", "set", "lie"},
"jump": {"jump", "bounce"},
}
self.SYNONYMS_FOR_THING = {
"room": {"room", "floor", "ceiling", "space", "area", "environment"},
"wall": {
"wall",
"walls",
"marks",
"markings",
"writing",
"writings",
"drawing",
"drawings",
"symbol",
"hint",
"numbers",
},
"table": {"table", "desk"},
"rack": {"rack", "triangle"},
"clock": {"clock", "circle", "circular"},
"cube": {"cube", "rubix", "square"},
}
self.FUNCTIONS = {}
# never delete a thing/action, just update
thing_props = {}
responses = {
"room": {
"look": (
"The north wall that's facing me has some strange "
"writings/marks on it. There is a billiards table in the center "
"of the room in front of you. There is a clock hanging on the same "
"wall. There is a rubix cube lying on the floor."
),
"pickup": "Don't be ridiculous.",
"approach": "You're already in the room, man. No need.",
},
"wall": {
"look": (
"You see a clock hanging on the wall. Below that are some markings:"
"\n3 -> 1 -> 4"
),
"pickup": "Don't be ridiculous.",
"approach": "You have approached the wall.",
"hit": "You hit the wall. Completely useless.",
"read": "3 -> 1 -> 4",
},
"table": {
"look": (
"An old no longer working billiards table. There is a triangle rack"
" on it. It would probably be an ideal location to PLACE objects "
"onto this table."
),
"pickup": "Don't be silly.",
"approach": "You have approached the table.",
},
"rack": {
"look": "A large triangle rack used to play pool.",
"pickup": "You picked up the large triangle rack.",
"approach": "You have approached the large triangle rack.",
"place": (
"You need to have the rack on your person if you want to place it."
),
},
"clock": {
"look": "A medium-sized circular clock.",
"pickup": "You picked up the medium-sized clock.",
"approach": "You have approached the clock.",
"place": (
"You need to have the clock on your person if you want to place it."
),
},
"cube": {
"look": (
"A small rubix cube. Unfortunately doesn't work anymore and "
"might as well be a paperweight."
),
"pickup": "You picked up the small rubix cube.",
"approach": "You have approached the small rubix cube.",
"place": (
"You need to have the cube on your person if you want to place it."
),
},
}
inventory = set()
self.state = {
"thing_props": thing_props,
"responses": responses,
"inventory": inventory,
}
# all these dicts should include all things or actions
assert self.THINGS == set(self.state["responses"].keys())
assert self.THINGS == set(self.SYNONYMS_FOR_THING.keys())
assert self.ACTIONS == set(self.SYNONYMS_FOR_ACTION.keys())
def process_cleaned_input(self, cleaned_input):
# quickly return response for look shortcuts
if cleaned_input in self.SYNONYMS_FOR_ACTION["look"] | {"l"}:
return self.process_command("look", "room")
# extracting verb and object out of input, then process verb object command
input_words = cleaned_input.split()
action, thing = self.extract_action_and_thing(input_words)
return self.process_command(action, thing)
def extract_action_and_thing(self, input_words):
action, thing = "", ""
action_Found, thing_Found = False, False
# iterating through words to check for a direct match with any actions and
# things available or their synonyms
for input_word in input_words:
if not action_Found:
for action_key, synonyms in self.SYNONYMS_FOR_ACTION.items():
if input_word in synonyms:
action = action_key
action_Found = True
if not thing_Found:
for thing_key, synonyms in self.SYNONYMS_FOR_THING.items():
if input_word in synonyms:
thing = thing_key
thing_Found = True
# print(f"ACTION:", action)
# print(f"THING:", thing)
return action, thing
def process_command(self, action, thing):
# if theres a game function for this input, do that, otherwise just get the
# text resource
try:
do_action = self.FUNCTIONS[thing][action]
except KeyError:
return self.get_response_for_command(action, thing)
else:
return do_action()
def get_response_for_command(self, action, thing):
THING_HAS_NO_ACTION = f"You can't perform action '{action}' on the {thing}."
response = None
if action and thing:
response = self.state["responses"][thing].get(action, THING_HAS_NO_ACTION)
elif not action and thing:
response = f"Not sure what you want to do to the {thing}."
elif action and not thing:
response = f"I can't perform action '{action}' on that."
elif not action and not thing:
response = (
"Couldn't find an eligible verb or object in your command.\n"
"Example of a good command:\n"
"hit desk\n"
"Here, 'hit' is the verb and 'desk' is the object."
)
return response + "\n"
# // ----------------------------------
# SMART ACTIONS
# this section has all the specific game actions that need to do things other than
# just give back the string resource to the player
|
nilq/baby-python
|
python
|
#!/usr/bin/python
from my_func3 import hello3
hello3()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
from ros_rover.msg import Rover
from numpy import interp
from PanTilt import PanTilt
pt = PanTilt()
def callback(data):
#rospy.loginfo(rospy.get_caller_id() + 'I heard %s', data.speed)
pan=int(interp(data.camera_pan_axis,[-255,255],[-90,90]))
tilt=int(interp(data.camera_tilt_axis,[-255,255],[-90,90]))
## Switch!
# pt.panTilt(pan,tilt)
pt.panTilt(-pan,tilt)
## Switch!
#pt.increaseOffset(data.camera_tilt_button,data.camera_pan_button)
pt.increaseOffset(-data.camera_pan_button,-data.camera_tilt_button)
if (data.command=='CAMERA_CENTER'):
pt.offset_pan=0
pt.offset_tilt=0
print "(%s,%s) Pan: %s Tilt: %s "%(data.camera_pan_button,data.camera_tilt_button,pan,tilt)
def listener():
# In ROS, nodes are uniquely named. If two nodes with the same
# name are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
rospy.init_node('pantilt_listener', anonymous=True)
rospy.Subscriber('chatter', Rover, callback,queue_size=1)
rate = rospy.Rate(5)
rate.sleep()
rospy.spin()
if __name__ == '__main__':
listener()
|
nilq/baby-python
|
python
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TIFFIODataset"""
import tensorflow as tf
from tensorflow_io.python.ops import core_ops
class TIFFIODataset(tf.data.Dataset):
"""TIFFIODataset"""
def __init__(self, filename, internal=True):
if not internal:
raise ValueError(
"TIFFIODataset constructor is private; please use one "
"of the factory methods instead (e.g., "
"IODataset.from_pcap())"
)
with tf.name_scope("TIFFIODataset"):
content = tf.io.read_file(filename)
_, dtype = core_ops.io_decode_tiff_info(content)
# use dtype's rank to find out the number of elements
dataset = tf.data.Dataset.range(tf.cast(tf.shape(dtype)[0], tf.int64))
dataset = dataset.map(lambda index: core_ops.io_decode_tiff(content, index))
self._dataset = dataset
self._content = content
super().__init__(
self._dataset._variant_tensor
) # pylint: disable=protected-access
def _inputs(self):
return []
@property
def element_spec(self):
return self._dataset.element_spec
|
nilq/baby-python
|
python
|
"""Trivial filter that adds an empty collection to the session."""
# pylint: disable=no-self-use,unused-argument
from typing import TYPE_CHECKING, Any, Dict
import dlite
from oteapi.models import FilterConfig
from pydantic import Field
from pydantic.dataclasses import dataclass
from oteapi_dlite.models import DLiteSessionUpdate
if TYPE_CHECKING:
from typing import Optional
@dataclass
class CreateCollectionStrategy:
"""Trivial filter that adds an empty collection to the session.
**Registers strategies**:
- `("filterType", "dlite/create-collection")`
"""
filter_config: FilterConfig
# Find a better way to keep collections alive!!!
# Need to be `Any`, because otherwise `pydantic` complains.
collection_refs: Dict[str, Any] = Field(
{},
description="A dictionary of DLite Collections.",
)
def initialize(
self, session: "Optional[Dict[str, Any]]" = None
) -> DLiteSessionUpdate:
"""Initialize."""
if session is None:
raise ValueError("Missing session")
if "collection_id" in session:
raise KeyError("`collection_id` already exists in session.")
coll = dlite.Collection()
# Make sure that collection stays alive
# It will never be deallocated...
coll.incref()
return DLiteSessionUpdate(collection_id=coll.uuid)
def get(self, session: "Optional[Dict[str, Any]]" = None) -> DLiteSessionUpdate:
"""Execute the strategy."""
if session is None:
raise ValueError("Missing session")
return DLiteSessionUpdate(collection_id=session["collection_id"])
|
nilq/baby-python
|
python
|
from ..serializers import ClienteSerializer
from ..models import Cliente
class ControllerCliente:
serializer_class = ClienteSerializer
def crearcliente(request):
datosCliente= request.data
try:
ClienteNuevo = Cliente()
ClienteNuevo.cliente = datosCliente['cliente']
ClienteNuevo.calle = datosCliente['calle']
ClienteNuevo.colonia = datosCliente['colonia']
ClienteNuevo.cp = datosCliente['cp']
ClienteNuevo.municipio = datosCliente['municipio']
ClienteNuevo.estado = datosCliente['estado']
ClienteNuevo.pais = datosCliente['pais']
ClienteNuevo.razon_social = datosCliente['razon_social']
ClienteNuevo.rfc = datosCliente['rfc']
ClienteNuevo.telefono = datosCliente['telefono']
ClienteNuevo.contacto = datosCliente['contacto']
ClienteNuevo.email = datosCliente['email']
ClienteNuevo.pagina_web = datosCliente['pagina_web']
ClienteNuevo.foto_cliente = datosCliente['foto_cliente']
ClienteNuevo.id_zona_horaria = datosCliente['id_zona_horaria']
ClienteNuevo.usar_inventario = datosCliente['usar_inventario']
ClienteNuevo.alertas_email = datosCliente['alertas_email']
ClienteNuevo.registro = datosCliente['registro']
ClienteNuevo.save()
except Exception:
return {"estatus":"Error"}
return {"estatus":"Ok", 'nuevo_cliente': ClienteNuevo.cliente}
def listarcliente(id_cliente=None):
if id_cliente:
try:
queryset = Cliente.objects.get(id_cliente=id_cliente)
except Cliente.DoesNotExist:
return ({'result': 'No se encontró el cliente deseado'})
serializer = ClienteSerializer(queryset)
return serializer.data
else:
queryset = Cliente.objects.all()
serializer = ClienteSerializer(queryset, many=True)
return serializer.data
def modificarcliente(request,id_cliente=None):
if id_cliente:
datosCliente = request.data
try:
clienteModificar = Cliente.objects.get(id_cliente=id_cliente)
except Cliente.DoesNotExist:
return ({'result': 'No se encontró el cliente deseado'})
try:
clienteModificar.cliente = datosCliente['cliente']
clienteModificar.calle = datosCliente['calle']
clienteModificar.colonia = datosCliente['colonia']
clienteModificar.cp = datosCliente['cp']
clienteModificar.municipio = datosCliente['municipio']
clienteModificar.estado = datosCliente['estado']
clienteModificar.pais = datosCliente['pais']
clienteModificar.razon_social = datosCliente['razon_social']
clienteModificar.rfc = datosCliente['rfc']
clienteModificar.telefono = datosCliente['telefono']
clienteModificar.contacto = datosCliente['contacto']
clienteModificar.email = datosCliente['email']
clienteModificar.pagina_web = datosCliente['pagina_web']
clienteModificar.foto_cliente = datosCliente['foto_cliente']
clienteModificar.id_zona_horaria = datosCliente['id_zona_horaria']
clienteModificar.usar_inventario = datosCliente['usar_inventario']
clienteModificar.alertas_email = datosCliente['alertas_email']
clienteModificar.registro = datosCliente['registro']
clienteModificar.save()
clienteModificar.save()
except Exception:
return {"estatus":"Error"}
return {"estatus":"Ok", 'cliente_modificado': clienteModificar.cliente}
else:
return {"result":"Ingrese el Id del cliente que desea modificar"}
|
nilq/baby-python
|
python
|
from random import randint
from typing import Tuple
from pymetaheuristics.genetic_algorithm.types import Genome
from pymetaheuristics.genetic_algorithm.exceptions import CrossOverException
def single_point_crossover(
g1: Genome, g2: Genome, **kwargs
) -> Tuple[Genome, Genome]:
"""Cut 2 Genomes on index p (randomly choosen) and swap its parts."""
if len(g1) == len(g2):
length = len(g1)
else:
raise CrossOverException(
"Genomes has to have the same length, got %d, %d" % (
len(g1), len(g2)))
if length < 2:
return g1, g2
p = randint(1, length - 1)
return g1[0:p] + g2[p:length], g2[0:p] + g1[p:length]
def pmx_single_point(
g1: Genome, g2: Genome, **kwargs
) -> Tuple[Genome, Genome]:
"""
PMX is a crossover function which consider a Genome as a sequence of
nom-repetitive genes through the Genome. So before swapping, checks if
repetition is going to occur, and swap the pretitive gene with its partner
on the other Genome and them swap with other gene on the same Genome.
See more at
https://user.ceng.metu.edu.tr/~ucoluk/research/publications/tspnew.pdf .
This implementation suites very well the TSP problem.
"""
if len(g1) == len(g2):
length = len(g1)
else:
raise CrossOverException(
"Genomes has to have the same length, got %d, %d" % (
len(g1), len(g2)))
if length < 2:
return g1, g2
p = randint(1, length - 1)
g1child = g1[:]
for i in range(p):
ans = g1child.index(g2[i])
g1child[ans] = g1child[i]
g1child[i] = g2[i]
g2child = g2[:]
for i in range(p):
ans = g2child.index(g1[i])
g2child[ans] = g2child[i]
g2child[i] = g1[i]
return g1child, g2child
|
nilq/baby-python
|
python
|
from src.floyd_warshall import (floyd_warshall,
floyd_warshall_with_path_reconstruction, reconstruct_path)
def test_floyd_warshall(graph1):
dist = floyd_warshall(graph1)
assert dist[0][2] == 300
assert dist[0][3] == 200
assert dist[1][2] == 200
def test_floyd_warshall_with_path_reconstruction(graph1):
dist, next_move = floyd_warshall_with_path_reconstruction(graph1)
path = reconstruct_path(next_move, 0, 3)
assert path == [0, 1, 3]
path = reconstruct_path(next_move, 0, 2)
assert path == [0, 1, 3, 2]
|
nilq/baby-python
|
python
|
# Generated by Django 2.0.8 on 2019-08-18 19:16
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tasks', '0022_auto_20190812_2145'),
]
operations = [
migrations.AddField(
model_name='task',
name='metadata',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}),
),
]
|
nilq/baby-python
|
python
|
"Genedropping with IBD constraints"
from pydigree.common import random_choice
from pydigree.genotypes import AncestralAllele
from .simulation import GeneDroppingSimulation
from pydigree.exceptions import SimulationError
from pydigree import paths
from pydigree import Individual
import collections
class ConstrainedMendelianSimulation(GeneDroppingSimulation):
"""
Performs a gene-dropping simulation constrained to a specific
IBD pattern
"""
def __init__(self, template=None, label=None, replications=1000, only=None):
GeneDroppingSimulation.__init__(self, template=template, label=label,
replications=replications, only=only)
for ind in self.template.individuals:
if ind.is_founder():
continue
if not (ind.father.is_founder() or ind.mother.is_founder()):
raise ValueError("ConstrainedMendelian only available"
"for outbred pedigrees")
def replicate(self, writeibd=False, verbose=False, replicatenumber=0):
"Creates a replicate from the simulation"
self.template.clear_genotypes()
for x in self.template.founders():
x.label_genotypes()
for ind in sorted(self.constraints['ibd'],
key=lambda x: x.depth, reverse=True):
if ind.has_genotypes():
# If the individual we're looking at has genotypes
# already, we've seen them earlier while getting
# genotypes for someone deeper in the pedigree
continue
constraints = self.constraints['ibd'][ind]
# TODO: Multiple constraints per individual
# Right now we're only using the first ([0]) constraint
constraints = [(x[1], AncestralAllele(x[0], x[2]))
for x in constraints]
location, allele = constraints[0]
ancestor = allele.ancestor
descent_path = random_choice(paths(ancestor, ind))
for path_member in descent_path:
if path_member.is_founder():
continue
fa, mo = path_member.parents()
if fa in descent_path:
paternal_gamete = fa.constrained_gamete(constraints)
else:
paternal_gamete = fa.gamete()
if mo in descent_path:
maternal_gamete = mo.constrained_gamete(constraints)
else:
maternal_gamete = mo.gamete()
genotypes = Individual.fertilize(paternal_gamete,
maternal_gamete)
path_member._set_genotypes(genotypes)
# Get genotypes for everybody else that we're not constraining.
for ind in self.template.individuals:
ind.get_genotypes()
if writeibd:
self._writeibd(replicatenumber)
# Now replace the label genotypes in founders with real ones.
self.get_founder_genotypes()
# Now replace the label genotypes in the nonfounders with the
# genotypes of the founders
if isinstance(self.only, collections.Callable):
siminds = [x for x in self.template.nonfounders() if self.only(x)]
else:
siminds = self.template.nonfounders()
for nf in siminds:
nf.delabel_genotypes()
# Predict phenotypes
if self.trait:
for ind in siminds:
ind.predict_phenotype(self.trait)
if verbose:
for ind in siminds:
print(ind, ind.get_genotype(location))
|
nilq/baby-python
|
python
|
# The parameters were taken from the ReaxFF module in lammps:
#! at1; at2; De(sigma); De(pi); De(pipi); p(be1); p(bo5); 13corr; p(bo6), p(ovun1); p(be2); p(bo3); p(bo4); n.u.; p(bo1); p(bo2)
# 1 1 156.5953 100.0397 80.0000 -0.8157 -0.4591 1.0000 37.7369 0.4235 0.4527 -0.1000 9.2605 1.0000 -0.0750 6.8316 1.0000 0.0000
# 1 2 170.2316 0.0000 0.0000 -0.5931 0.0000 1.0000 6.0000 0.7140 5.2267 1.0000 0.0000 1.0000 -0.0500 6.8315 0.0000 0.0000
# 2 2 156.0973 0.0000 0.0000 -0.1377 0.0000 1.0000 6.0000 0.8240 2.9907 1.0000 0.0000 1.0000 -0.0593 4.8358 0.0000 0.0000
# 1 3 160.4802 105.1693 23.3059 -0.3873 -0.1613 1.0000 10.8851 1.0000 0.5341 -0.3174 7.0303 1.0000 -0.1463 5.2913 0.0000 0.0000
# 3 3 60.1463 176.6202 51.1430 -0.2802 -0.1244 1.0000 29.6439 0.9114 0.2441 -0.1239 7.6487 1.0000 -0.1302 6.2919 1.0000 0.0000
# 2 3 180.4373 0.0000 0.0000 -0.8074 0.0000 1.0000 6.0000 0.5514 1.2490 1.0000 0.0000 1.0000 -0.0657 5.0451 0.0000 0.0000
# atomID; ro(sigma); Val; atom mass; Rvdw; Dij; gamma; ro(pi); Val(e), alfa; gamma(w); Val(angle); p(ovun5); n.u.; chiEEM; etaEEM; n.u.; ro(pipi) ;p(lp2); Heat increment; p(boc4); p(boc3); p(boc5), n.u.; n.u.; p(ovun2); p(val3); n.u.; Val(boc); p(val5);n.u.;n.u.;n.u.
# C 1.3825 4.0000 12.0000 1.9133 0.1853 0.9000 1.1359 4.0000 9.7602 2.1346 4.0000 33.2433 79.5548 5.8678 7.0000 0.0000 1.2104 0.0000 199.0303 8.6991 34.7289 13.3894 0.8563 0.0000 -2.8983 2.5000 1.0564 4.0000 2.9663 0.0000 0.0000 0.0000
# H 0.7853 1.0000 1.0080 1.5904 0.0419 1.0206 -0.1000 1.0000 9.3557 5.0518 1.0000 0.0000 121.1250 5.3200 7.4366 1.0000 -0.1000 0.0000 62.4879 1.9771 3.3517 0.7571 1.0698 0.0000 -15.7683 2.1488 1.0338 1.0000 2.8793 0.0000 0.0000 0.0000
# O 1.2477 2.0000 15.9990 1.9236 0.0904 1.0503 1.0863 6.0000 10.2127 7.7719 4.0000 36.9573 116.0768 8.5000 8.9989 2.0000 0.9088 1.0003 60.8726 20.4140 3.3754 0.2702 0.9745 0.0000 -3.6141 2.7025 1.0493 4.0000 2.9225 0.0000 0.0000 0.0000
num_to_atom_type = { 0 : "c"
, 1 : "h"
, 2 : "o"
}
atom_type_to_num = { "c" : 0
, "h" : 1
, "o" : 2
}
#
# Equilibrium distances for the atom types
#
r_s = [1.3825,0.7853,1.2477]
r_pi = [1.1359,-0.1000,1.0863]
r_pi2 = [1.2104,-0.1000,0.9088]
#
# Exponents etc. for calculating uncorrected bond order
#
pbo1 = [ [-0.0750,-0.0500,-0.1463]
, [-0.0500,-0.0593,-0.0657]
, [-0.1463,-0.0657,-0.1302]
]
pbo2 = [ [6.8316,6.8315,5.2913]
, [6.8315,4.8358,5.0451]
, [5.2913,5.0451,6.2919]
]
pbo3 = [ [-0.1000,1.0000,-0.3174]
, [1.0000,1.0000,1.0000]
, [-0.3174,1.0000,-0.1239]
]
pbo4 = [ [9.2605,0.0000,7.0303]
, [0.0000,0.0000,0.0000]
, [7.0303,0.0000,7.6487]
]
pbo5 = [ [-0.4591,0.0000,-0.1613]
, [ 0.0000,0.0000, 0.0000]
, [-0.1613,0.0000,-0.1244]
]
pbo6 = [ [37.7369, 6.0000,10.8851]
, [6.0000, 6.0000, 6.0000]
, [10.8851, 6.0000,29.6439]
]
#
# Valency of the atoms (needed to correct for over coordination)
#
valency = [ 4.0, 1.0, 2.0 ]
valency_val = [ 4.0, 1.0, 4.0 ]
#
# parameters to for over coordinaten correction
#
pboc4 = [8.6991, 1.9771, 20.4140]
pboc3 = [34.7289, 3.3517, 3.3754]
pboc5 = [13.3894, 0.7571, 0.2702]
#
# between which atoms the 1-3 over coordination should be corrected
#
v13cor = [ [1.0, 1.0, 1.0]
, [1.0, 1.0, 1.0]
, [1.0, 1.0, 1.0]
]
#
# between which atoms over coordination should be corrected
#
ovc = [ [1.0, 0.0, 0.0]
, [0.0, 0.0, 0.0]
, [0.0, 0.0, 1.0]
]
|
nilq/baby-python
|
python
|
#codeby : Dileep
#Write a Python program to simulate the SSTF program disk scheduling algorithms.
num=int(input("Enter the Number:"))
print("Enter the Queue:")
requestqueue=list(map(int,input().split()))
head_value=int(input("Head Value Starts at: "))
final=[]
for i in requestqueue:
emptylist=[]
for j in requestqueue:
if(j!=None and head_value!=None):
emptylist.append(abs(head_value-j))
else:
emptylist.append(float('inf'))
final.append(min(emptylist))
head_value=requestqueue[emptylist.index(min(emptylist))]
requestqueue[requestqueue.index(head_value)]=None
print("Head Difference:")
for i in final:
print(i)
print("Totoal Head Movements are:"+str(sum(final)))
|
nilq/baby-python
|
python
|
# -*- coding: utf8 -*-
from nose.tools import *
from mbdata.api.tests import with_client, assert_json_response_equal
@with_client
def test_label_get(client):
rv = client.get('/v1/label/get?id=ecc049d0-88a6-4806-a5b7-0f1367a7d6e1&include=area&include=ipi&include=isni')
expected = {
u"response": {
u"status": {
u"message": u"success",
u"code": 0,
u"version": u"1.0"
},
u"label": {
u"begin_date": {
u"year": 1985,
u"month": 6
},
u"name": u"\u30b9\u30bf\u30b8\u30aa\u30b8\u30d6\u30ea",
u"area": {
u"name": u"Japan"
},
u"ipis": [
u"00173517959",
u"00473554732"
],
u"isnis": [
u"000000011781560X"
],
u"type": u"Production",
u"id": u"ecc049d0-88a6-4806-a5b7-0f1367a7d6e1"
}
}
}
assert_json_response_equal(rv, expected)
|
nilq/baby-python
|
python
|
import datetime
import logging
import os
import traceback
import flask
import google.auth
from google.auth.transport import requests as grequests
from google.oauth2 import id_token, credentials
import googleapiclient.discovery
from typing import Optional, NamedTuple
from app.util.exceptions import BadPubSubTokenException
IMPORT_SERVICE_SCOPES = [
"https://www.googleapis.com/auth/userinfo.email",
"https://www.googleapis.com/auth/userinfo.profile",
"https://www.googleapis.com/auth/devstorage.read_write",
"https://www.googleapis.com/auth/pubsub"
]
class CachedCreds:
def __init__(self, access_token: str, expire_str: str):
self.creds = credentials.Credentials(access_token, scopes=IMPORT_SERVICE_SCOPES)
self.expiry = self._google_expiretime_to_datetime(expire_str)
@classmethod
def _google_expiretime_to_datetime(cls, expire_time: str) -> datetime.datetime:
"""Google's documentation for the generateAccessToken endpoint describes the expireTime field as a timestamp in
RFC3339 format, providing the example "2014-10-02T15:01:23.045123456Z" -- i.e. the time all the way to nanoseconds.
In practice, the endpoint currently omits the nanoseconds entirely. Google verified this behaviour in a support
ticket, unhelpfully adding "At some point in the future we may start supporting fractional times, and would not
consider that a breaking change."
Therefore we need to handle timestamps both with and without nanoseconds. Since this is just a token expiry,
dropping the nanoseconds component will mean at worse we refresh the token (one second minus one nanosecond) early.
https://cloud.google.com/iam/docs/reference/credentials/rest/v1/projects.serviceAccounts/generateAccessToken
https://console.cloud.google.com/support/cases/detail/21652153"""
# if there are nanoseconds, everything left of the dot will be the time (with no Z, so we put it back).
# if there aren't nanoseconds, there'll be no dot, so we don't need to reinstate the Z.
trunc_time = expire_time.split('.')[0]
if trunc_time[-1] != 'Z':
trunc_time += 'Z'
return datetime.datetime.strptime(trunc_time, "%Y-%m-%dT%H:%M:%SZ")
_cached_isvc_creds: Optional[CachedCreds] = None
def get_isvc_credential() -> credentials.Credentials:
"""Get a Google oauth2 Credentials object for the import service SA."""
return _get_isvc_cached_creds().creds
def get_isvc_token() -> str:
"""Get an access token for the import service SA."""
return _get_isvc_cached_creds().creds.token
def _get_isvc_cached_creds() -> CachedCreds:
"""Use the cached creds if it still exists and we have at least 5 minutes until it expires."""
if _cached_isvc_creds is not None and \
_cached_isvc_creds.expiry > datetime.datetime.utcnow() + datetime.timedelta(minutes=5):
logging.info("using cached creds for import service SA")
return _cached_isvc_creds
else:
logging.info("generating new creds for import service SA")
return _update_isvc_creds()
def _update_isvc_creds() -> CachedCreds:
"""The app engine SA has token creator on the import service SA"""
token_response = _get_isvc_token_from_google()
global _cached_isvc_creds
_cached_isvc_creds = CachedCreds(token_response["accessToken"], token_response["expireTime"])
return _cached_isvc_creds
def _get_isvc_token_from_google() -> dict:
# create service account name
email = os.environ.get('IMPORT_SVC_SA_EMAIL')
name = 'projects/-/serviceAccounts/{}'.format(email)
# create body for request
body = {
'scope': IMPORT_SERVICE_SCOPES
}
credentials, project = google.auth.default()
iam = googleapiclient.discovery.build('iamcredentials', 'v1', credentials=credentials)
return iam.projects().serviceAccounts().generateAccessToken(
name=name,
body=body,
).execute()
def verify_pubsub_jwt(request: flask.Request) -> None:
"""Verify that this request came from Cloud Pub/Sub.
This looks for a secret token in a queryparam, then decodes the Bearer token
and checks identity and audience.
See here: https://cloud.google.com/pubsub/docs/push#using_json_web_tokens_jwts"""
if request.args.get('token', '') != os.environ.get('PUBSUB_TOKEN'):
logging.info("Bad Pub/Sub token")
raise BadPubSubTokenException()
bearer_token = request.headers.get('Authorization', '')
token = bearer_token.split(' ', maxsplit=1)[1]
try:
claim = id_token.verify_oauth2_token(token, grequests.Request(),
audience=os.environ.get('PUBSUB_AUDIENCE'))
if claim['iss'] not in [
'accounts.google.com',
'https://accounts.google.com'
]:
# bad issuer
logging.info("Bad issuer")
raise BadPubSubTokenException()
if claim['email'] != os.environ.get('PUBSUB_ACCOUNT'):
logging.info("Incorrect email address")
raise BadPubSubTokenException()
except Exception as e:
# eats all exceptions, including ones thrown by verify_oauth2_token if e.g. audience is wrong
logging.info(traceback.format_exc())
raise BadPubSubTokenException()
|
nilq/baby-python
|
python
|
from TunAugmentor import transformations
def test_transform1():
assert transformations.transform1('mahdi')=='mahdi'
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.