hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
292c9626c9e04b5cf826e77da7923dbc37aaacf1 | 9,595 | py | Python | perfkitbenchmarker/linux_packages/cassandra.py | parsa-epfl/PerfKitBenchmarker | 7a7981100ebf68d6a24a8a347464800f40c47aa5 | [
"Apache-2.0"
] | 1 | 2016-12-07T19:49:58.000Z | 2016-12-07T19:49:58.000Z | perfkitbenchmarker/linux_packages/cassandra.py | parsa-epfl/PerfKitBenchmarker | 7a7981100ebf68d6a24a8a347464800f40c47aa5 | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/linux_packages/cassandra.py | parsa-epfl/PerfKitBenchmarker | 7a7981100ebf68d6a24a8a347464800f40c47aa5 | [
"Apache-2.0"
] | 1 | 2021-12-07T13:29:48.000Z | 2021-12-07T13:29:48.000Z | # Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Installs/Configures Cassandra.
See 'perfkitbenchmarker/data/cassandra/' for configuration files used.
Cassandra homepage: http://cassandra.apache.org
"""
import logging
import os
import posixpath
import time
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import os_types
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import INSTALL_DIR
from perfkitbenchmarker.linux_packages.ant import ANT_HOME_DIR
JNA_JAR_URL = ('https://maven.java.net/content/repositories/releases/'
'net/java/dev/jna/jna/4.1.0/jna-4.1.0.jar')
CASSANDRA_GIT_REPRO = 'https://github.com/apache/cassandra.git'
CASSANDRA_VERSION = 'cassandra-2.1.10'
CASSANDRA_YAML_TEMPLATE = 'cassandra/cassandra.yaml.j2'
CASSANDRA_ENV_TEMPLATE = 'cassandra/cassandra-env.sh.j2'
CASSANDRA_DIR = posixpath.join(INSTALL_DIR, 'cassandra')
CASSANDRA_PID = posixpath.join(CASSANDRA_DIR, 'cassandra.pid')
CASSANDRA_OUT = posixpath.join(CASSANDRA_DIR, 'cassandra.out')
CASSANDRA_ERR = posixpath.join(CASSANDRA_DIR, 'cassandra.err')
NODETOOL = posixpath.join(CASSANDRA_DIR, 'bin', 'nodetool')
# Number of times to attempt to start the cluster.
CLUSTER_START_TRIES = 10
CLUSTER_START_SLEEP = 60
# Time, in seconds, to sleep between node starts.
NODE_START_SLEEP = 5
FLAGS = flags.FLAGS
flags.DEFINE_integer('cassandra_concurrent_reads', 32,
'Concurrent read requests each server accepts.')
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
for resource in (CASSANDRA_YAML_TEMPLATE,
CASSANDRA_ENV_TEMPLATE):
data.ResourcePath(resource)
def _Install(vm):
"""Installs Cassandra from a tarball."""
vm.Install('ant')
vm.Install('build_tools')
vm.Install('openjdk')
vm.Install('curl')
vm.RemoteCommand(
'cd {0}; git clone {1}; cd {2}; git checkout {3}; {4}/bin/ant'.format(
INSTALL_DIR,
CASSANDRA_GIT_REPRO,
CASSANDRA_DIR,
CASSANDRA_VERSION,
ANT_HOME_DIR))
# Add JNA
vm.RemoteCommand('cd {0} && curl -LJO {1}'.format(
posixpath.join(CASSANDRA_DIR, 'lib'),
JNA_JAR_URL))
def YumInstall(vm):
"""Installs Cassandra on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs Cassandra on the VM."""
_Install(vm)
def JujuInstall(vm, vm_group_name):
"""Installs the Cassandra charm on the VM."""
vm.JujuDeploy('cs:trusty/cassandra', vm_group_name)
# The charm defaults to Cassandra 2.2.x, which has deprecated
# cassandra-cli. Specify the sources to downgrade to Cassandra 2.1.x
# to match the cassandra benchmark(s) expectations.
sources = ['deb http://www.apache.org/dist/cassandra/debian 21x main',
'ppa:openjdk-r/ppa',
'ppa:stub/cassandra']
keys = ['F758CE318D77295D',
'null',
'null']
vm.JujuSet('cassandra', [
# Allow authentication from all units
'authenticator=AllowAllAuthenticator',
'install_sources="[%s]"' %
', '.join(map(lambda x: "'" + x + "'", sources)),
'install_keys="[%s]"'
% ', '.join(keys)
])
# Wait for cassandra to be installed and configured
vm.JujuWait()
for unit in vm.units:
# Make sure the cassandra/conf dir is created, since we're skipping
# the manual installation to /opt/pkb.
remote_path = posixpath.join(CASSANDRA_DIR, 'conf')
unit.RemoteCommand('mkdir -p %s' % remote_path)
def Configure(vm, seed_vms):
"""Configure Cassandra on 'vm'.
Args:
vm: VirtualMachine. The VM to configure.
seed_vms: List of VirtualMachine. The seed virtual machine(s).
"""
context = {'ip_address': vm.internal_ip,
'data_path': posixpath.join(vm.GetScratchDir(), 'cassandra'),
'seeds': ','.join(vm.internal_ip for vm in seed_vms),
'num_cpus': vm.num_cpus,
'cluster_name': 'Test cluster',
'concurrent_reads': FLAGS.cassandra_concurrent_reads}
for config_file in [CASSANDRA_ENV_TEMPLATE, CASSANDRA_YAML_TEMPLATE]:
local_path = data.ResourcePath(config_file)
remote_path = posixpath.join(
CASSANDRA_DIR, 'conf',
os.path.splitext(os.path.basename(config_file))[0])
vm.RenderTemplate(local_path, remote_path, context=context)
def Start(vm):
"""Start Cassandra on a VM.
Args:
vm: The target vm. Should already be configured via 'Configure'.
"""
if vm.OS_TYPE == os_types.JUJU:
return
vm.RemoteCommand(
'nohup {0}/bin/cassandra -p "{1}" 1> {2} 2> {3} &'.format(
CASSANDRA_DIR, CASSANDRA_PID, CASSANDRA_OUT, CASSANDRA_ERR))
def Stop(vm):
"""Stops Cassandra on 'vm'."""
if vm.OS_TYPE == os_types.JUJU:
return
vm.RemoteCommand('kill $(cat {0})'.format(CASSANDRA_PID),
ignore_failure=True)
def IsRunning(vm):
"""Returns a boolean indicating whether Cassandra is running on 'vm'."""
cassandra_pid = vm.RemoteCommand(
'cat {0} || true'.format(CASSANDRA_PID))[0].strip()
if not cassandra_pid:
return False
try:
vm.RemoteCommand('kill -0 {0}'.format(cassandra_pid))
return True
except errors.VirtualMachine.RemoteCommandError:
logging.warn('%s: Cassandra is not running. '
'Startup STDOUT:\n%s\n\nSTDERR:\n%s',
vm,
vm.RemoteCommand('cat ' + CASSANDRA_OUT),
vm.RemoteCommand('cat ' + CASSANDRA_ERR))
return False
def CleanNode(vm):
"""Remove Cassandra data from 'vm'.
Args:
vm: VirtualMachine. VM to clean.
"""
if vm.OS_TYPE == os_types.JUJU:
return
data_path = posixpath.join(vm.GetScratchDir(), 'cassandra')
vm.RemoteCommand('rm -rf {0}'.format(data_path))
def _StartCassandraIfNotRunning(vm):
"""Starts Cassandra on 'vm' if not currently running."""
if not IsRunning(vm):
logging.info('Retrying starting cassandra on %s', vm)
Start(vm)
def GetCassandraCliPath(vm):
if vm.OS_TYPE == os_types.JUJU:
# Replace the stock CASSANDRA_CLI so that it uses the binary
# installed by the cassandra charm.
return '/usr/bin/cassandra-cli'
return posixpath.join(CASSANDRA_DIR, 'bin',
'cassandra-cli')
def GetCassandraStressPath(vm):
if vm.OS_TYPE == os_types.JUJU:
# Replace the stock CASSANDRA_STRESS so that it uses the binary
# installed by the cassandra-stress charm.
return '/usr/bin/cassandra-stress'
return posixpath.join(CASSANDRA_DIR, 'tools', 'bin',
'cassandra-stress')
def GetNumberOfNodesUp(vm):
"""Gets the number of VMs which are up in a Cassandra cluster.
Args:
vm: VirtualMachine. The VM to use to check the cluster status.
"""
vms_up = vm.RemoteCommand(
'{0} status | grep -c "^UN"'.format(NODETOOL))[0].strip()
return int(vms_up)
def StartCluster(seed_vm, vms):
"""Starts a Cassandra cluster.
Starts a Cassandra cluster, first starting 'seed_vm', then remaining VMs in
'vms'.
Args:
seed_vm: VirtualMachine. Machine which will function as the sole seed. It
will be started before all other VMs.
vms: list of VirtualMachines. VMs *other than* seed_vm which should be
started.
"""
if seed_vm.OS_TYPE == os_types.JUJU:
# Juju automatically configures and starts the Cassandra cluster.
return
vm_count = len(vms) + 1
# Cassandra setup
logging.info('Starting seed VM %s', seed_vm)
Start(seed_vm)
logging.info('Waiting %ds for seed to start', NODE_START_SLEEP)
time.sleep(NODE_START_SLEEP)
for i in xrange(5):
if not IsRunning(seed_vm):
logging.warn('Seed %s: Cassandra not running yet (try %d). Waiting %ds.',
seed_vm, i, NODE_START_SLEEP)
time.sleep(NODE_START_SLEEP)
else:
break
else:
raise ValueError('Cassandra failed to start on seed.')
if vms:
logging.info('Starting remaining %d nodes', len(vms))
# Start the VMs with a small pause in between each, to allow the node to
# join.
# Starting Cassandra nodes fails when multiple nodes attempt to join the
# cluster concurrently.
for i, vm in enumerate(vms):
time.sleep(NODE_START_SLEEP)
logging.info('Starting non-seed VM %d/%d.', i + 1, len(vms))
Start(vm)
logging.info('Waiting %ds for nodes to join', CLUSTER_START_SLEEP)
time.sleep(CLUSTER_START_SLEEP)
for i in xrange(CLUSTER_START_TRIES):
vms_up = GetNumberOfNodesUp(seed_vm)
if vms_up == vm_count:
logging.info('All %d nodes up!', vm_count)
break
logging.warn('Try %d: only %s of %s up. Restarting and sleeping %ds', i,
vms_up, vm_count, NODE_START_SLEEP)
vm_util.RunThreaded(_StartCassandraIfNotRunning, vms)
time.sleep(NODE_START_SLEEP)
else:
raise IOError('Failed to start Cassandra cluster.')
| 31.05178 | 79 | 0.682751 |
2b56b125fcb2cd5b2bdd734923280950dbb1595e | 9,606 | py | Python | speaker_id.py | Dodger23/SincNet | bf848e88dc8d6cbeb4484e89486ec0a4ab237cb1 | [
"MIT"
] | null | null | null | speaker_id.py | Dodger23/SincNet | bf848e88dc8d6cbeb4484e89486ec0a4ab237cb1 | [
"MIT"
] | null | null | null | speaker_id.py | Dodger23/SincNet | bf848e88dc8d6cbeb4484e89486ec0a4ab237cb1 | [
"MIT"
] | null | null | null | # speaker_id.py
# Mirco Ravanelli
# Mila - University of Montreal
# July 2018
# Description:
# This code performs a speaker_id experiments with SincNet.
# How to run it:
# python speaker_id.py --cfg=cfg/SincNet_TIMIT.cfg
import os
#import scipy.io.wavfile
import soundfile as sf
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import sys
import numpy as np
from dnn_models import MLP,flip
from dnn_models import SincNet as CNN
from data_io import ReadList,read_conf,str_to_bool
def create_batches_rnd(batch_size,data_folder,wav_lst,N_snt,wlen,lab_dict,fact_amp):
# Initialization of the minibatch (batch_size,[0=>x_t,1=>x_t+N,1=>random_samp])
sig_batch=np.zeros([batch_size,wlen])
lab_batch=np.zeros(batch_size)
snt_id_arr=np.random.randint(N_snt, size=batch_size)
rand_amp_arr = np.random.uniform(1.0-fact_amp,1+fact_amp,batch_size)
for i in range(batch_size):
# select a random sentence from the list
#[fs,signal]=scipy.io.wavfile.read(data_folder+wav_lst[snt_id_arr[i]])
#signal=signal.astype(float)/32768
[signal, fs] = sf.read(data_folder+wav_lst[snt_id_arr[i]])
# accesing to a random chunk
snt_len=signal.shape[0]
snt_beg=np.random.randint(snt_len-wlen-1) #randint(0, snt_len-2*wlen-1)
snt_end=snt_beg+wlen
channels = len(signal.shape)
if channels == 2:
print('WARNING: stereo to mono: '+data_folder+wav_lst[snt_id_arr[i]])
signal = signal[:,0]
sig_batch[i,:]=signal[snt_beg:snt_end]*rand_amp_arr[i]
lab_batch[i]=lab_dict[wav_lst[snt_id_arr[i]]]
inp=Variable(torch.from_numpy(sig_batch).float().cuda().contiguous())
lab=Variable(torch.from_numpy(lab_batch).float().cuda().contiguous())
return inp,lab
# Reading cfg file
options=read_conf()
#[data]
tr_lst=options.tr_lst
te_lst=options.te_lst
pt_file=options.pt_file
class_dict_file=options.lab_dict
data_folder=options.data_folder+'/'
output_folder=options.output_folder
#[windowing]
fs=int(options.fs)
cw_len=int(options.cw_len)
cw_shift=int(options.cw_shift)
#[cnn]
cnn_N_filt=list(map(int, options.cnn_N_filt.split(',')))
cnn_len_filt=list(map(int, options.cnn_len_filt.split(',')))
cnn_max_pool_len=list(map(int, options.cnn_max_pool_len.split(',')))
cnn_use_laynorm_inp=str_to_bool(options.cnn_use_laynorm_inp)
cnn_use_batchnorm_inp=str_to_bool(options.cnn_use_batchnorm_inp)
cnn_use_laynorm=list(map(str_to_bool, options.cnn_use_laynorm.split(',')))
cnn_use_batchnorm=list(map(str_to_bool, options.cnn_use_batchnorm.split(',')))
cnn_act=list(map(str, options.cnn_act.split(',')))
cnn_drop=list(map(float, options.cnn_drop.split(',')))
#[dnn]
fc_lay=list(map(int, options.fc_lay.split(',')))
fc_drop=list(map(float, options.fc_drop.split(',')))
fc_use_laynorm_inp=str_to_bool(options.fc_use_laynorm_inp)
fc_use_batchnorm_inp=str_to_bool(options.fc_use_batchnorm_inp)
fc_use_batchnorm=list(map(str_to_bool, options.fc_use_batchnorm.split(',')))
fc_use_laynorm=list(map(str_to_bool, options.fc_use_laynorm.split(',')))
fc_act=list(map(str, options.fc_act.split(',')))
#[class]
class_lay=list(map(int, options.class_lay.split(',')))
class_drop=list(map(float, options.class_drop.split(',')))
class_use_laynorm_inp=str_to_bool(options.class_use_laynorm_inp)
class_use_batchnorm_inp=str_to_bool(options.class_use_batchnorm_inp)
class_use_batchnorm=list(map(str_to_bool, options.class_use_batchnorm.split(',')))
class_use_laynorm=list(map(str_to_bool, options.class_use_laynorm.split(',')))
class_act=list(map(str, options.class_act.split(',')))
#[optimization]
lr=float(options.lr)
batch_size=int(options.batch_size)
N_epochs=int(options.N_epochs)
N_batches=int(options.N_batches)
N_eval_epoch=int(options.N_eval_epoch)
seed=int(options.seed)
# training list
wav_lst_tr=ReadList(tr_lst)
snt_tr=len(wav_lst_tr)
# test list
wav_lst_te=ReadList(te_lst)
snt_te=len(wav_lst_te)
# Folder creation
try:
os.stat(output_folder)
except:
os.mkdir(output_folder)
# setting seed
torch.manual_seed(seed)
np.random.seed(seed)
# loss function
cost = nn.NLLLoss()
# Converting context and shift in samples
wlen=int(fs*cw_len/1000.00)
wshift=int(fs*cw_shift/1000.00)
# Batch_dev
Batch_dev=128
# Feature extractor CNN
CNN_arch = {'input_dim': wlen,
'fs': fs,
'cnn_N_filt': cnn_N_filt,
'cnn_len_filt': cnn_len_filt,
'cnn_max_pool_len':cnn_max_pool_len,
'cnn_use_laynorm_inp': cnn_use_laynorm_inp,
'cnn_use_batchnorm_inp': cnn_use_batchnorm_inp,
'cnn_use_laynorm':cnn_use_laynorm,
'cnn_use_batchnorm':cnn_use_batchnorm,
'cnn_act': cnn_act,
'cnn_drop':cnn_drop,
}
CNN_net=CNN(CNN_arch)
CNN_net.cuda()
# Loading label dictionary
lab_dict=np.load(class_dict_file , allow_pickle = True).item()
DNN1_arch = {'input_dim': CNN_net.out_dim,
'fc_lay': fc_lay,
'fc_drop': fc_drop,
'fc_use_batchnorm': fc_use_batchnorm,
'fc_use_laynorm': fc_use_laynorm,
'fc_use_laynorm_inp': fc_use_laynorm_inp,
'fc_use_batchnorm_inp':fc_use_batchnorm_inp,
'fc_act': fc_act,
}
DNN1_net=MLP(DNN1_arch)
DNN1_net.cuda()
DNN2_arch = {'input_dim':fc_lay[-1] ,
'fc_lay': class_lay,
'fc_drop': class_drop,
'fc_use_batchnorm': class_use_batchnorm,
'fc_use_laynorm': class_use_laynorm,
'fc_use_laynorm_inp': class_use_laynorm_inp,
'fc_use_batchnorm_inp':class_use_batchnorm_inp,
'fc_act': class_act,
}
DNN2_net=MLP(DNN2_arch)
DNN2_net.cuda()
if pt_file!='none':
checkpoint_load = torch.load(pt_file)
CNN_net.load_state_dict(checkpoint_load['CNN_model_par'])
DNN1_net.load_state_dict(checkpoint_load['DNN1_model_par'])
DNN2_net.load_state_dict(checkpoint_load['DNN2_model_par'])
optimizer_CNN = optim.RMSprop(CNN_net.parameters(), lr=lr,alpha=0.95, eps=1e-8)
optimizer_DNN1 = optim.RMSprop(DNN1_net.parameters(), lr=lr,alpha=0.95, eps=1e-8)
optimizer_DNN2 = optim.RMSprop(DNN2_net.parameters(), lr=lr,alpha=0.95, eps=1e-8)
for epoch in range(N_epochs):
test_flag=0
CNN_net.train()
DNN1_net.train()
DNN2_net.train()
loss_sum=0
err_sum=0
for i in range(N_batches):
[inp,lab]=create_batches_rnd(batch_size,data_folder,wav_lst_tr,snt_tr,wlen,lab_dict,0.2)
pout=DNN2_net(DNN1_net(CNN_net(inp)))
pred=torch.max(pout,dim=1)[1]
loss = cost(pout, lab.long())
err = torch.mean((pred!=lab.long()).float())
optimizer_CNN.zero_grad()
optimizer_DNN1.zero_grad()
optimizer_DNN2.zero_grad()
loss.backward()
optimizer_CNN.step()
optimizer_DNN1.step()
optimizer_DNN2.step()
loss_sum=loss_sum+loss.detach()
err_sum=err_sum+err.detach()
loss_tot=loss_sum/N_batches
err_tot=err_sum/N_batches
# Full Validation new
if epoch%N_eval_epoch==0:
CNN_net.eval()
DNN1_net.eval()
DNN2_net.eval()
test_flag=1
loss_sum=0
err_sum=0
err_sum_snt=0
with torch.no_grad():
for i in range(snt_te):
#[fs,signal]=scipy.io.wavfile.read(data_folder+wav_lst_te[i])
#signal=signal.astype(float)/32768
[signal, fs] = sf.read(data_folder+wav_lst_te[i])
signal=torch.from_numpy(signal).float().cuda().contiguous()
lab_batch=lab_dict[wav_lst_te[i]]
# split signals into chunks
beg_samp=0
end_samp=wlen
N_fr=int((signal.shape[0]-wlen)/(wshift))
sig_arr=torch.zeros([Batch_dev,wlen]).float().cuda().contiguous()
lab= Variable((torch.zeros(N_fr+1)+lab_batch).cuda().contiguous().long())
pout=Variable(torch.zeros(N_fr+1,class_lay[-1]).float().cuda().contiguous())
count_fr=0
count_fr_tot=0
while end_samp<signal.shape[0]:
sig_arr[count_fr,:]=signal[beg_samp:end_samp]
beg_samp=beg_samp+wshift
end_samp=beg_samp+wlen
count_fr=count_fr+1
count_fr_tot=count_fr_tot+1
if count_fr==Batch_dev:
inp=Variable(sig_arr)
pout[count_fr_tot-Batch_dev:count_fr_tot,:]=DNN2_net(DNN1_net(CNN_net(inp)))
count_fr=0
sig_arr=torch.zeros([Batch_dev,wlen]).float().cuda().contiguous()
if count_fr>0:
inp=Variable(sig_arr[0:count_fr])
pout[count_fr_tot-count_fr:count_fr_tot,:]=DNN2_net(DNN1_net(CNN_net(inp)))
pred=torch.max(pout,dim=1)[1]
loss = cost(pout, lab.long())
err = torch.mean((pred!=lab.long()).float())
[val,best_class]=torch.max(torch.sum(pout,dim=0),0)
err_sum_snt=err_sum_snt+(best_class!=lab[0]).float()
loss_sum=loss_sum+loss.detach()
err_sum=err_sum+err.detach()
err_tot_dev_snt=err_sum_snt/snt_te
loss_tot_dev=loss_sum/snt_te
err_tot_dev=err_sum/snt_te
print("epoch %i, loss_tr=%f err_tr=%f loss_te=%f err_te=%f err_te_snt=%f" % (epoch+1, loss_tot,err_tot,loss_tot_dev,err_tot_dev,err_tot_dev_snt))
with open(output_folder+"/res.res", "a") as res_file:
res_file.write("epoch %i, loss_tr=%f err_tr=%f loss_te=%f err_te=%f err_te_snt=%f\n" % (epoch+1, loss_tot,err_tot,loss_tot_dev,err_tot_dev,err_tot_dev_snt))
checkpoint={'CNN_model_par': CNN_net.state_dict(),
'DNN1_model_par': DNN1_net.state_dict(),
'DNN2_model_par': DNN2_net.state_dict(),
}
torch.save(checkpoint,output_folder+'/model_raw.pkl')
else:
print("epoch %i, loss_tr=%f err_tr=%f" % (epoch+1, loss_tot,err_tot))
| 28.087719 | 163 | 0.706642 |
081f27c4b0326aa1f19355d021c99164785418c5 | 3,548 | py | Python | elasticquery/dsl.py | Fizzadar/ElasticQuery | b9640623fac206b93ed9f56519c99c3a35916aca | [
"MIT"
] | 86 | 2015-02-17T00:54:58.000Z | 2022-03-29T02:27:01.000Z | elasticquery/dsl.py | Fizzadar/ElasticQuery | b9640623fac206b93ed9f56519c99c3a35916aca | [
"MIT"
] | 19 | 2015-02-26T12:41:29.000Z | 2021-11-02T17:17:15.000Z | elasticquery/dsl.py | Fizzadar/ElasticQuery | b9640623fac206b93ed9f56519c99c3a35916aca | [
"MIT"
] | 24 | 2015-03-27T09:07:05.000Z | 2022-01-12T08:45:59.000Z | # ElasticQuery
# File: elasticquery/filterquery.py
# Desc: The base queryfilter/aggregate classes & metaclasses
from .dsl_util import make_struct, unroll_definitions, unroll_struct
class MetaQuery(type):
'''
Metaclass mapping attributes to dsl objects on Filter/Query getattr.
'''
def __init__(cls, name, bases, d):
super(MetaQuery, cls).__init__(name, bases, d)
unroll_definitions(cls._definitions)
def __getattr__(cls, key):
if key == '__test__':
return None
if key not in cls._definitions:
raise cls._exception(key)
# Generates a new class object with a struct based on the definitions
return lambda *args, **kwargs: cls(
key,
make_struct(cls._definitions[key], *args, **kwargs),
)
class MetaAggregate(MetaQuery):
'''
Modified MetaQuery.MetaAggregate getattr to handle aggregate names.
'''
def __getattr__(cls, key):
if key == '__test__':
return None
if key not in cls._definitions:
raise cls._exception(key)
return lambda *args, **kwargs: cls(
key,
args[0],
make_struct(cls._definitions[key], *args[1:], **kwargs),
)
class MetaSuggester(MetaQuery):
'''
Modified MetaQuery.MetaSuggester getattr to handle suggester names and text.
'''
def __getattr__(cls, key):
if key == '__test__':
return None
if key not in cls._definitions:
raise cls._exception(key)
return lambda *args, **kwargs: cls(
key,
args[0],
args[1],
make_struct(cls._definitions[key], *args[2:], **kwargs),
)
class BaseQuery(object):
'''
The base class which represents a Filter/Query struct.
'''
_struct = None
_dsl_type = None
def __init__(self, dsl_type, struct):
self._dsl_type = dsl_type
self._struct = struct
def dict(self):
# Handle reserved Python keyword alternatives (from_, or_)
dsl_type = self._dsl_type[:-1] if self._dsl_type.endswith('_') else self._dsl_type
return {
dsl_type: unroll_struct(self._struct),
}
class BaseAggregate(BaseQuery):
'''
Modified BaseQuery to handle aggregate name storage.
'''
_name = None
def __init__(self, dsl_type, name, struct):
self._dsl_type = dsl_type
self._struct = struct
self._name = name
self._aggs = []
def dict(self):
struct = {
self._name: {
self._dsl_type: unroll_struct(self._struct),
},
}
if self._aggs:
aggregates = {}
for agg in self._aggs:
aggregates.update(agg.dict())
struct[self._name]['aggregations'] = aggregates
return struct
def aggregate(self, *aggregates):
self._aggs.extend(aggregates)
return self
class BaseSuggester(BaseQuery):
'''
Modified BaseQuery to handle suggester name & text storage.
'''
_name = None
def __init__(self, dsl_type, name, text, struct):
self._dsl_type = dsl_type
self._struct = struct
self._name = name
self._text = text
self._suggs = []
def dict(self):
struct = {
self._name: {
'text': self._text,
self._dsl_type: unroll_struct(self._struct),
},
}
return struct
| 23.496689 | 90 | 0.576381 |
4521f4b953f6654b4933860699a78546335dcbb1 | 2,364 | py | Python | var/spack/repos/builtin/packages/r-enrichplot/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | var/spack/repos/builtin/packages/r-enrichplot/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | var/spack/repos/builtin/packages/r-enrichplot/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class REnrichplot(RPackage):
"""Visualization of Functional Enrichment Result
The 'enrichplot' package implements several visualization methods for
interpreting functional enrichment results obtained from ORA or GSEA
analysis. All the visualization methods are developed based on 'ggplot2'
graphics."""
homepage = "https://bioconductor.org/packages/enrichplot"
git = "https://git.bioconductor.org/packages/enrichplot.git"
version('1.10.2', commit='77ee04f60a07cc31151f8f47f8ee64f3a43c9760')
version('1.4.0', commit='6ffe5d9c5dbe5cbea29f2e0941595475bbbcea0e')
version('1.2.0', commit='2eeaafb571d35a106eba8ae7df014f3201066e8b')
version('1.0.2', commit='ba7726fa0d4b581b7514dcbb04889cdbdd75ff29')
depends_on('r@3.4.0:', type=('build', 'run'))
depends_on('r@3.5.0:', when='@1.10.2:', type=('build', 'run'))
depends_on('r-cowplot', type=('build', 'run'))
depends_on('r-dose', type=('build', 'run'))
depends_on('r-dose@3.5.1:', when='@:1.4.0', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-ggraph', type=('build', 'run'))
depends_on('r-igraph', type=('build', 'run'))
depends_on('r-plyr', when='@1.10.2:', type=('build', 'run'))
depends_on('r-purrr', when='@1.2.0:', type=('build', 'run'))
depends_on('r-rcolorbrewer', when='@1.2.0:', type=('build', 'run'))
depends_on('r-reshape2', type=('build', 'run'))
depends_on('r-scatterpie', when='@1.10.2:', type=('build', 'run'))
depends_on('r-shadowtext', when='@1.10.2:', type=('build', 'run'))
depends_on('r-gosemsim', type=('build', 'run'))
depends_on('r-magrittr', when='@1.10.2:', type=('build', 'run'))
depends_on('r-ggridges', when='@:1.4.0', type=('build', 'run'))
depends_on('r-upsetr', when='@:1.4.0', type=('build', 'run'))
depends_on('r-annotationdbi', when='@:1.4.0', type=('build', 'run'))
depends_on('r-europepmc', when='@1.2.0:1.4.0', type=('build', 'run'))
depends_on('r-ggplotify', when='@1.2.0:1.4.0', type=('build', 'run'))
depends_on('r-gridextra', when='@1.2.0:1.4.0', type=('build', 'run'))
| 50.297872 | 79 | 0.642978 |
46f6bc686e621c3aec09ceffd6d3a36c56a8e6e1 | 163 | py | Python | {{cookiecutter.repo_name}}/run.py | Jswig/cookiecutter-minimal-ml | c4c4059701638f0d32c1374b9f9d98c192410cc1 | [
"MIT"
] | 1 | 2020-04-19T02:56:21.000Z | 2020-04-19T02:56:21.000Z | {{cookiecutter.repo_name}}/run.py | Jswig/cookiecutter-flexible-ml | c4c4059701638f0d32c1374b9f9d98c192410cc1 | [
"MIT"
] | 1 | 2020-05-06T02:33:16.000Z | 2020-05-09T22:56:09.000Z | {{cookiecutter.repo_name}}/run.py | Jswig/cookiecutter-minimal-ml | c4c4059701638f0d32c1374b9f9d98c192410cc1 | [
"MIT"
] | 1 | 2020-07-11T23:20:02.000Z | 2020-07-11T23:20:02.000Z | # import files from src as needed here
def build_project():
# your code to run the full analysis here
pass
if __name__ == "__main__":
build_project() | 20.375 | 45 | 0.693252 |
189b4138bbd20789f96d9fc11cf199db46108ca6 | 23 | py | Python | src/dcn_v2/__init__.py | Hzx66666/FairMOT_PS | c0b2ef18cd712ebd2512ce73672667a72a9d4f04 | [
"MIT"
] | null | null | null | src/dcn_v2/__init__.py | Hzx66666/FairMOT_PS | c0b2ef18cd712ebd2512ce73672667a72a9d4f04 | [
"MIT"
] | null | null | null | src/dcn_v2/__init__.py | Hzx66666/FairMOT_PS | c0b2ef18cd712ebd2512ce73672667a72a9d4f04 | [
"MIT"
] | null | null | null | from .dcn_v2 import DCN | 23 | 23 | 0.826087 |
0acf32e7ec9b650949edc9e869284bc7f7ef7115 | 917 | py | Python | python/runtime/pai/__init__.py | bobmayuze/sqlflow | 8e51c19eed656d506ffa9240024b8e12010cbd01 | [
"Apache-2.0"
] | null | null | null | python/runtime/pai/__init__.py | bobmayuze/sqlflow | 8e51c19eed656d506ffa9240024b8e12010cbd01 | [
"Apache-2.0"
] | null | null | null | python/runtime/pai/__init__.py | bobmayuze/sqlflow | 8e51c19eed656d506ffa9240024b8e12010cbd01 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from runtime.pai.submitter_evaluate import submit_pai_evaluate as evaluate # noqa
from runtime.pai.submitter_explain import submit_pai_explain as explain # noqa
from runtime.pai.submitter_predict import submit_pai_predict as predict # noqa
from runtime.pai.submitter_train import submit_pai_train as train # noqa
| 50.944444 | 82 | 0.798255 |
e84cdfb1511c7482d3a414af11ee71b116e42afa | 393 | py | Python | backend/chat_user_profile/api/v1/urls.py | crowdbotics-apps/agape-centre-29820 | 25ff06c33043baa5a8a65148e60498000f079048 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/chat_user_profile/api/v1/urls.py | crowdbotics-apps/agape-centre-29820 | 25ff06c33043baa5a8a65148e60498000f079048 | [
"FTL",
"AML",
"RSA-MD"
] | 20 | 2021-08-22T20:25:03.000Z | 2021-10-06T00:05:44.000Z | backend/chat_user_profile/api/v1/urls.py | crowdbotics-apps/kp3-29820 | 25ff06c33043baa5a8a65148e60498000f079048 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import ContactViewSet, ProfileViewSet, VerificationCodeViewSet
router = DefaultRouter()
router.register("verificationcode", VerificationCodeViewSet)
router.register("contact", ContactViewSet)
router.register("profile", ProfileViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| 30.230769 | 77 | 0.80916 |
773ea18760a1527725d3b5c48b2edbee2f731fac | 1,930 | py | Python | lib/utils/visualize.py | penghouwen/CDARTS | 7dddc8d5db4ed343979ed3687c6adfc39dfce284 | [
"MIT"
] | 21 | 2020-06-19T01:05:38.000Z | 2020-08-11T02:15:03.000Z | lib/utils/visualize.py | penghouwen/CDARTS | 7dddc8d5db4ed343979ed3687c6adfc39dfce284 | [
"MIT"
] | 1 | 2020-07-11T17:01:07.000Z | 2020-07-11T17:01:07.000Z | lib/utils/visualize.py | penghouwen/CDARTS | 7dddc8d5db4ed343979ed3687c6adfc39dfce284 | [
"MIT"
] | 1 | 2020-11-02T02:43:20.000Z | 2020-11-02T02:43:20.000Z | """ Network architecture visualizer using graphviz """
import sys
from graphviz import Digraph
import lib.utils.genotypes as gt
def plot(genotype, file_path, caption=None):
""" make DAG plot and save to file_path as .png """
edge_attr = {
'fontsize': '20',
'fontname': 'times'
}
node_attr = {
'style': 'filled',
'shape': 'rect',
'align': 'center',
'fontsize': '20',
'height': '0.5',
'width': '0.5',
'penwidth': '2',
'fontname': 'times'
}
g = Digraph(
format='png',
edge_attr=edge_attr,
node_attr=node_attr,
engine='dot')
g.body.extend(['rankdir=LR'])
# input nodes
g.node("c_{k-2}", fillcolor='darkseagreen2')
g.node("c_{k-1}", fillcolor='darkseagreen2')
# intermediate nodes
n_nodes = len(genotype)
for i in range(n_nodes):
g.node(str(i), fillcolor='lightblue')
for i, edges in enumerate(genotype):
for op, j in edges:
if j == 0:
u = "c_{k-2}"
elif j == 1:
u = "c_{k-1}"
else:
u = str(j-2)
v = str(i)
g.edge(u, v, label=op, fillcolor="gray")
# output node
g.node("c_{k}", fillcolor='palegoldenrod')
for i in range(n_nodes):
g.edge(str(i), "c_{k}", fillcolor="gray")
# add image caption
if caption:
g.attr(label=caption, overlap='false', fontsize='20', fontname='times')
g.render(file_path, view=False)
if __name__ == '__main__':
if len(sys.argv) != 2:
raise ValueError("usage:\n python {} GENOTYPE".format(sys.argv[0]))
genotype_str = sys.argv[1]
try:
genotype = gt.from_str(genotype_str)
except AttributeError:
raise ValueError("Cannot parse {}".format(genotype_str))
plot(genotype.normal, "normal")
plot(genotype.reduce, "reduction")
| 25.733333 | 79 | 0.549223 |
a1ad8fba8f950b892d44f12924929e267f039853 | 1,410 | py | Python | Moderate/A Pile Of Bricks/main.py | AstrorEnales/CodeEval | eae0fb471d27d3a83d544ff4a4651ed1a2076930 | [
"MIT"
] | null | null | null | Moderate/A Pile Of Bricks/main.py | AstrorEnales/CodeEval | eae0fb471d27d3a83d544ff4a4651ed1a2076930 | [
"MIT"
] | null | null | null | Moderate/A Pile Of Bricks/main.py | AstrorEnales/CodeEval | eae0fb471d27d3a83d544ff4a4651ed1a2076930 | [
"MIT"
] | null | null | null | import sys
def parseCoords(x):
return [int(y) for y in x.strip('[').strip(']').split(',')]
def size2(a, b):
return sorted([abs(a[0] - b[0]), abs(a[1] - b[1])])
def size3(a, b):
s = []
s.append(sorted([abs(a[0] - b[0]), abs(a[1] - b[1])]))
s.append(sorted([abs(a[0] - b[0]), abs(a[2] - b[2])]))
s.append(sorted([abs(a[1] - b[1]), abs(a[2] - b[2])]))
return s
lines = open(sys.argv[1], 'r')
for line in lines:
line = line.replace('\n', '').replace('\r', '')
if len(line) > 0:
hole, bricks = line.split('|')
holecoords = [parseCoords(x) for x in hole.split(' ')]
holesize = size2(holecoords[0], holecoords[1])
bricks = [[y for y in x.strip('(').strip(')').split(' ')] for x in bricks.split(';')]
bricks = [[int(x[0]), parseCoords(x[1]), parseCoords(x[2])] for x in bricks]
passed = []
for brick in bricks:
s = size3(brick[1], brick[2])
if s[0][0] <= holesize[0] and s[0][1] <= holesize[1]:
passed.append(brick[0])
elif s[1][0] <= holesize[0] and s[1][1] <= holesize[1]:
passed.append(brick[0])
elif s[2][0] <= holesize[0] and s[2][1] <= holesize[1]:
passed.append(brick[0])
print(','.join([str(x) for x in sorted(passed)]) if len(passed) > 0 else '-')
lines.close()
| 37.105263 | 94 | 0.485816 |
e3bc0493daeca34c625af621a67a751ac2ab4694 | 574 | py | Python | CNN Visualization/Visualizing and Understanding Convolutional Networks/notebooks/utils.py | KushajveerSingh/pytorch_projects | ba70b662897a7483230c7e3ea04428df2a5fa3b8 | [
"MIT"
] | 19 | 2019-06-04T20:19:24.000Z | 2022-03-14T03:15:54.000Z | CNN Visualization/Visualizing and Understanding Convolutional Networks/notebooks/utils.py | KushajveerSingh/pytorch_projects | ba70b662897a7483230c7e3ea04428df2a5fa3b8 | [
"MIT"
] | null | null | null | CNN Visualization/Visualizing and Understanding Convolutional Networks/notebooks/utils.py | KushajveerSingh/pytorch_projects | ba70b662897a7483230c7e3ea04428df2a5fa3b8 | [
"MIT"
] | 12 | 2019-06-06T12:56:39.000Z | 2021-12-30T11:54:08.000Z | import json
import torch
def decode_predictions(preds, top=5):
with open('/home/kushaj/Desktop/Data/imagenet_class_index.json') as f:
class_index_dict = json.load(f)
results = []
for pred in preds:
top_value, top_indices = torch.topk(pred, top)
result = [tuple(class_index_dict[str(i.item())]) + (pred[i].item(),) \
for i in top_indices]
result = [tuple(class_index_dict[str(i.item())]) + (j.item(),) \
for (i, j) in zip(top_indices, top_value)]
results.append(result)
return results | 33.764706 | 78 | 0.61324 |
4edf42b518c8f4492b07b5cf2b53e4ce44588287 | 11,524 | py | Python | statsmodels/genmod/tests/test_bayes_mixed_glm.py | josef-pkt/statsmodels | 9eebbfa000b9f7ca853247efbced99f4ddaa088c | [
"BSD-3-Clause"
] | 20 | 2015-01-28T21:52:59.000Z | 2022-01-24T01:24:26.000Z | statsmodels/genmod/tests/test_bayes_mixed_glm.py | josef-pkt/statsmodels | 9eebbfa000b9f7ca853247efbced99f4ddaa088c | [
"BSD-3-Clause"
] | 6 | 2015-08-28T16:59:03.000Z | 2019-04-12T22:29:01.000Z | statsmodels/genmod/tests/test_bayes_mixed_glm.py | josef-pkt/statsmodels | 9eebbfa000b9f7ca853247efbced99f4ddaa088c | [
"BSD-3-Clause"
] | 28 | 2015-04-01T20:02:25.000Z | 2021-07-03T00:09:28.000Z | import numpy as np
from statsmodels.genmod.bayes_mixed_glm import (
BinomialBayesMixedGLM, PoissonBayesMixedGLM)
import pandas as pd
from scipy import sparse
from numpy.testing import assert_allclose
from scipy.optimize import approx_fprime
def gen_simple_logit(nc, cs, s):
np.random.seed(3799)
exog_vc = np.kron(np.eye(nc), np.ones((cs, 1)))
exog_fe = np.random.normal(size=(nc*cs, 2))
vc = s*np.random.normal(size=nc)
lp = np.dot(exog_fe, np.r_[1, -1]) + np.dot(exog_vc, vc)
pr = 1 / (1 + np.exp(-lp))
y = 1*(np.random.uniform(size=nc*cs) < pr)
ident = np.zeros(nc, dtype=np.int)
return y, exog_fe, exog_vc, ident
def gen_simple_poisson(nc, cs, s):
np.random.seed(3799)
exog_vc = np.kron(np.eye(nc), np.ones((cs, 1)))
exog_fe = np.random.normal(size=(nc*cs, 2))
vc = s*np.random.normal(size=nc)
lp = np.dot(exog_fe, np.r_[0.1, -0.1]) + np.dot(exog_vc, vc)
r = np.exp(lp)
y = np.random.poisson(r)
ident = np.zeros(nc, dtype=np.int)
return y, exog_fe, exog_vc, ident
def gen_crossed_logit(nc, cs, s1, s2):
np.random.seed(3799)
a = np.kron(np.eye(nc), np.ones((cs, 1)))
b = np.kron(np.ones((cs, 1)), np.eye(nc))
exog_vc = np.concatenate((a, b), axis=1)
exog_fe = np.random.normal(size=(nc*cs, 1))
vc = s1 * np.random.normal(size=2*nc)
vc[nc:] *= s2 / s1
lp = np.dot(exog_fe, np.r_[-0.5]) + np.dot(exog_vc, vc)
pr = 1 / (1 + np.exp(-lp))
y = 1*(np.random.uniform(size=nc*cs) < pr)
ident = np.zeros(2*nc, dtype=np.int)
ident[nc:] = 1
return y, exog_fe, exog_vc, ident
def gen_crossed_poisson(nc, cs, s1, s2):
np.random.seed(3799)
a = np.kron(np.eye(nc), np.ones((cs, 1)))
b = np.kron(np.ones((cs, 1)), np.eye(nc))
exog_vc = np.concatenate((a, b), axis=1)
exog_fe = np.random.normal(size=(nc*cs, 1))
vc = s1 * np.random.normal(size=2*nc)
vc[nc:] *= s2 / s1
lp = np.dot(exog_fe, np.r_[-0.5]) + np.dot(exog_vc, vc)
r = np.exp(lp)
y = np.random.poisson(r)
ident = np.zeros(2*nc, dtype=np.int)
ident[nc:] = 1
return y, exog_fe, exog_vc, ident
def gen_crossed_logit_pandas(nc, cs, s1, s2):
np.random.seed(3799)
a = np.kron(np.arange(nc), np.ones(cs))
b = np.kron(np.ones(cs), np.arange(nc))
fe = np.ones(nc * cs)
vc = np.zeros(nc * cs)
for i in np.unique(a):
ii = np.flatnonzero(a == i)
vc[ii] += s1*np.random.normal()
for i in np.unique(b):
ii = np.flatnonzero(b == i)
vc[ii] += s2*np.random.normal()
lp = -0.5 * fe + vc
pr = 1 / (1 + np.exp(-lp))
y = 1*(np.random.uniform(size=nc*cs) < pr)
ident = np.zeros(2*nc, dtype=np.int)
ident[nc:] = 1
df = pd.DataFrame({"fe": fe, "a": a, "b": b, "y": y})
return df
def test_simple_logit_map():
y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 2)
exog_vc = sparse.csr_matrix(exog_vc)
glmm = BinomialBayesMixedGLM(y, exog_fe, exog_vc, ident,
vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params), atol=1e-3)
def test_simple_poisson_map():
y, exog_fe, exog_vc, ident = gen_simple_poisson(10, 10, 0.2)
exog_vc = sparse.csr_matrix(exog_vc)
glmm1 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident,
vcp_p=0.5)
rslt1 = glmm1.fit_map()
assert_allclose(glmm1.logposterior_grad(rslt1.params),
np.zeros_like(rslt1.params), atol=1e-3)
# This should give the same answer as above
glmm2 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident,
vcp_p=0.5)
rslt2 = glmm2.fit_map()
assert_allclose(rslt1.params, rslt2.params, atol=1e-4)
def test_crossed_logit_map():
y, exog_fe, exog_vc, ident = gen_crossed_logit(10, 10, 1, 2)
exog_vc = sparse.csr_matrix(exog_vc)
glmm = BinomialBayesMixedGLM(y, exog_fe, exog_vc, ident,
vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params), atol=1e-4)
def test_crossed_poisson_map():
y, exog_fe, exog_vc, ident = gen_crossed_poisson(10, 10, 1, 1)
exog_vc = sparse.csr_matrix(exog_vc)
glmm = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident,
vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params), atol=1e-4)
def test_logit_map_crossed_formula():
data = gen_crossed_logit_pandas(10, 10, 1, 0.5)
fml = "y ~ fe"
fml_vc = {"a": "0 + C(a)", "b": "0 + C(b)"}
glmm = BinomialBayesMixedGLM.from_formula(
fml, fml_vc, data, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params), atol=1e-4)
rslt.summary()
r = rslt.random_effects("a")
assert_allclose(r.iloc[0, :].values,
np.r_[-0.02004904, 0.094014], atol=1e-4)
def test_elbo_grad():
for f in range(2):
for j in range(2):
if f == 0:
if j == 0:
y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 2)
else:
y, exog_fe, exog_vc, ident = gen_crossed_logit(
10, 10, 1, 2)
elif f == 1:
if j == 0:
y, exog_fe, exog_vc, ident = gen_simple_poisson(
10, 10, 0.5)
else:
y, exog_fe, exog_vc, ident = gen_crossed_poisson(
10, 10, 1, 0.5)
exog_vc = sparse.csr_matrix(exog_vc)
if f == 0:
glmm1 = BinomialBayesMixedGLM(y, exog_fe, exog_vc, ident,
vcp_p=0.5)
else:
glmm1 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident,
vcp_p=0.5)
rslt1 = glmm1.fit_map()
for k in range(3):
if k == 0:
vb_mean = rslt1.params
vb_sd = np.ones_like(vb_mean)
elif k == 1:
vb_mean = np.zeros(len(vb_mean))
vb_sd = np.ones_like(vb_mean)
else:
vb_mean = np.random.normal(size=len(vb_mean))
vb_sd = np.random.uniform(1, 2, size=len(vb_mean))
mean_grad, sd_grad = glmm1.vb_elbo_grad(vb_mean, vb_sd)
def elbo(vec):
n = len(vec) // 2
return glmm1.vb_elbo(vec[:n], vec[n:])
x = np.concatenate((vb_mean, vb_sd))
g1 = approx_fprime(x, elbo, 1e-5)
n = len(x) // 2
mean_grad_n = g1[:n]
sd_grad_n = g1[n:]
assert_allclose(mean_grad, mean_grad_n, atol=1e-2,
rtol=1e-2)
assert_allclose(sd_grad, sd_grad_n, atol=1e-2,
rtol=1e-2)
def test_simple_logit_vb():
y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 0)
exog_vc = sparse.csr_matrix(exog_vc)
glmm1 = BinomialBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5,
fe_p=0.5)
rslt1 = glmm1.fit_map()
glmm2 = BinomialBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5,
fe_p=0.5)
rslt2 = glmm2.fit_vb(rslt1.params)
rslt1.summary()
rslt2.summary()
assert_allclose(rslt1.params[0:5], np.r_[
0.75330405, -0.71643228, -2.49091288, -0.00959806, 0.00450254],
rtol=1e-4, atol=1e-4)
assert_allclose(rslt2.params[0:5], np.r_[
0.79338836, -0.7599833, -0.64149356, -0.24772884, 0.10775366],
rtol=1e-4, atol=1e-4)
def test_simple_poisson_vb():
y, exog_fe, exog_vc, ident = gen_simple_poisson(10, 10, 1)
exog_vc = sparse.csr_matrix(exog_vc)
glmm1 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt1 = glmm1.fit_map()
glmm2 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt2 = glmm2.fit_vb(rslt1.params)
rslt1.summary()
rslt2.summary()
assert_allclose(rslt1.params[0:5], np.r_[
-0.07233493, -0.06706505, -0.47159649, 1.12575122, -1.02442201],
rtol=1e-4, atol=1e-4)
assert_allclose(rslt1.cov_params.flat[0:5], np.r_[
0.00790914, 0.00080666, -0.00050719, 0.00022648, 0.00046235],
rtol=1e-4, atol=1e-4)
assert_allclose(rslt2.params[0:5], np.r_[
-0.07088814, -0.06373107, -0.22770786, 1.12923746, -1.26161339],
rtol=1e-4, atol=1e-4)
assert_allclose(rslt2.cov_params[0:5], np.r_[
0.00747782, 0.0092554, 0.04508904, 0.02934488, 0.20312746],
rtol=1e-4, atol=1e-4)
def test_crossed_logit_vb():
y, exog_fe, exog_vc, ident = gen_crossed_logit(10, 10, 1, 2)
glmm1 = BinomialBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5,
fe_p=0.5)
rslt1 = glmm1.fit_map()
glmm2 = BinomialBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5,
fe_p=0.5)
rslt2 = glmm2.fit_vb(mean=rslt1.params)
rslt1.summary()
rslt2.summary()
assert_allclose(rslt1.params[0:5], np.r_[
-5.43073978e-01, -2.46197518e+00, -2.36582801e+00,
-9.64030461e-03, 2.32701078e-03],
rtol=1e-4, atol=1e-4)
assert_allclose(rslt1.cov_params.flat[0:5], np.r_[
4.12927123e-02, -2.04448923e-04, 4.64829219e-05,
1.20377543e-04, -1.45003234e-04],
rtol=1e-4, atol=1e-4)
assert_allclose(rslt2.params[0:5], np.r_[
-0.70834417, -0.3571011, 0.19126823, -0.36074489, 0.058976],
rtol=1e-4, atol=1e-4)
assert_allclose(rslt2.cov_params[0:5], np.r_[
0.05212492, 0.04729656, 0.03916944, 0.25921842, 0.25782576],
rtol=1e-4, atol=1e-4)
def test_crossed_logit_vb_formula():
data = gen_crossed_logit_pandas(10, 10, 1, 2)
fml = "y ~ fe"
fml_vc = {"a": "0 + C(a)", "b": "0 + C(b)"}
glmm1 = BinomialBayesMixedGLM.from_formula(
fml, fml_vc, data, vcp_p=0.5)
rslt1 = glmm1.fit_vb()
glmm2 = BinomialBayesMixedGLM(glmm1.endog, glmm1.exog_fe, glmm1.exog_vc,
glmm1.ident, vcp_p=0.5)
rslt2 = glmm2.fit_vb()
assert_allclose(rslt1.params, rslt2.params, atol=1e-4)
rslt1.summary()
rslt2.summary()
def test_crossed_poisson_vb():
y, exog_fe, exog_vc, ident = gen_crossed_poisson(10, 10, 1, 0.5)
glmm1 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5,
fe_p=0.5)
rslt1 = glmm1.fit_map()
glmm2 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5,
fe_p=0.5)
rslt2 = glmm2.fit_vb(mean=rslt1.params)
rslt1.summary()
rslt2.summary()
assert_allclose(rslt1.params[0:5], np.r_[
-0.54855281, 0.10458834, -0.68777741, -0.01699925, 0.77200546],
rtol=1e-4, atol=1e-4)
assert_allclose(rslt2.params[0:5], np.r_[
-0.54691502, 0.22297158, -0.52673802, -0.06218684, 0.74385237],
rtol=1e-4, atol=1e-4)
| 30.486772 | 76 | 0.559788 |
3abd19a46e6e61e5fd3033d66e7030cfb719dea6 | 28,496 | py | Python | sympy/stats/tests/test_stochastic_process.py | rahulmnair1997/sympy | 04c784908573c8524e98fa258b784cdd64df210f | [
"BSD-3-Clause"
] | 1 | 2021-11-16T14:29:20.000Z | 2021-11-16T14:29:20.000Z | sympy/stats/tests/test_stochastic_process.py | Sanchayan-Bhowal/sympy | c237fe7f4c1a4abcfaef0e79600e26cba4ac0a6d | [
"BSD-3-Clause"
] | null | null | null | sympy/stats/tests/test_stochastic_process.py | Sanchayan-Bhowal/sympy | c237fe7f4c1a4abcfaef0e79600e26cba4ac0a6d | [
"BSD-3-Clause"
] | null | null | null | from sympy import (S, symbols, FiniteSet, Eq, Matrix, MatrixSymbol, Float, And,
ImmutableMatrix, Ne, Lt, Gt, exp, Not, Rational, Lambda, erf,
Piecewise, factorial, Interval, oo, Contains, sqrt, pi, ceiling,
gamma, lowergamma, Sum, Range, Tuple, ImmutableDenseMatrix, Symbol)
from sympy.stats import (DiscreteMarkovChain, P, TransitionMatrixOf, E,
StochasticStateSpaceOf, variance, ContinuousMarkovChain,
BernoulliProcess, PoissonProcess, WienerProcess,
GammaProcess, sample_stochastic_process)
from sympy.stats.joint_rv import JointDistribution
from sympy.stats.joint_rv_types import JointDistributionHandmade
from sympy.stats.rv import RandomIndexedSymbol
from sympy.stats.symbolic_probability import Probability, Expectation
from sympy.testing.pytest import raises, skip, ignore_warnings
from sympy.external import import_module
from sympy.stats.frv_types import BernoulliDistribution
from sympy.stats.drv_types import PoissonDistribution
from sympy.stats.crv_types import NormalDistribution, GammaDistribution
from sympy.core.symbol import Str
def test_DiscreteMarkovChain():
# pass only the name
X = DiscreteMarkovChain("X")
assert isinstance(X.state_space, Range)
assert X.index_set == S.Naturals0
assert isinstance(X.transition_probabilities, MatrixSymbol)
t = symbols('t', positive=True, integer=True)
assert isinstance(X[t], RandomIndexedSymbol)
assert E(X[0]) == Expectation(X[0])
raises(TypeError, lambda: DiscreteMarkovChain(1))
raises(NotImplementedError, lambda: X(t))
nz = Symbol('n', integer=True)
TZ = MatrixSymbol('M', nz, nz)
SZ = Range(nz)
YZ = DiscreteMarkovChain('Y', SZ, TZ)
assert P(Eq(YZ[2], 1), Eq(YZ[1], 0)) == TZ[0, 1]
raises(ValueError, lambda: sample_stochastic_process(t))
raises(ValueError, lambda: next(sample_stochastic_process(X)))
# pass name and state_space
# any hashable object should be a valid state
# states should be valid as a tuple/set/list/Tuple/Range
sym, rainy, cloudy, sunny = symbols('a Rainy Cloudy Sunny', real=True)
state_spaces = [(1, 2, 3), [Str('Hello'), sym, DiscreteMarkovChain],
Tuple(1, exp(sym), Str('World'), sympify=False), Range(-1, 5, 2),
[rainy, cloudy, sunny]]
chains = [DiscreteMarkovChain("Y", state_space) for state_space in state_spaces]
for i, Y in enumerate(chains):
assert isinstance(Y.transition_probabilities, MatrixSymbol)
assert Y.state_space == state_spaces[i] or Y.state_space == FiniteSet(*state_spaces[i])
assert Y.number_of_states == 3
with ignore_warnings(UserWarning): # TODO: Restore tests once warnings are removed
assert P(Eq(Y[2], 1), Eq(Y[0], 2), evaluate=False) == Probability(Eq(Y[2], 1), Eq(Y[0], 2))
assert E(Y[0]) == Expectation(Y[0])
raises(ValueError, lambda: next(sample_stochastic_process(Y)))
raises(TypeError, lambda: DiscreteMarkovChain("Y", dict((1, 1))))
Y = DiscreteMarkovChain("Y", Range(1, t, 2))
assert Y.number_of_states == ceiling((t-1)/2)
# pass name and transition_probabilities
chains = [DiscreteMarkovChain("Y", trans_probs=Matrix([[]])),
DiscreteMarkovChain("Y", trans_probs=Matrix([[0, 1], [1, 0]])),
DiscreteMarkovChain("Y", trans_probs=Matrix([[pi, 1-pi], [sym, 1-sym]]))]
for Z in chains:
assert Z.number_of_states == Z.transition_probabilities.shape[0]
assert isinstance(Z.transition_probabilities, ImmutableDenseMatrix)
# pass name, state_space and transition_probabilities
T = Matrix([[0.5, 0.2, 0.3],[0.2, 0.5, 0.3],[0.2, 0.3, 0.5]])
TS = MatrixSymbol('T', 3, 3)
Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
YS = DiscreteMarkovChain("Y", ['One', 'Two', 3], TS)
assert YS._transient2transient() == None
assert YS._transient2absorbing() == None
assert Y.joint_distribution(1, Y[2], 3) == JointDistribution(Y[1], Y[2], Y[3])
raises(ValueError, lambda: Y.joint_distribution(Y[1].symbol, Y[2].symbol))
assert P(Eq(Y[3], 2), Eq(Y[1], 1)).round(2) == Float(0.36, 2)
assert (P(Eq(YS[3], 2), Eq(YS[1], 1)) -
(TS[0, 2]*TS[1, 0] + TS[1, 1]*TS[1, 2] + TS[1, 2]*TS[2, 2])).simplify() == 0
assert P(Eq(YS[1], 1), Eq(YS[2], 2)) == Probability(Eq(YS[1], 1))
assert P(Eq(YS[3], 3), Eq(YS[1], 1)) == TS[0, 2]*TS[1, 0] + TS[1, 1]*TS[1, 2] + TS[1, 2]*TS[2, 2]
TO = Matrix([[0.25, 0.75, 0],[0, 0.25, 0.75],[0.75, 0, 0.25]])
assert P(Eq(Y[3], 2), Eq(Y[1], 1) & TransitionMatrixOf(Y, TO)).round(3) == Float(0.375, 3)
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
assert E(Y[3], evaluate=False) == Expectation(Y[3])
assert E(Y[3], Eq(Y[2], 1)).round(2) == Float(1.1, 3)
TSO = MatrixSymbol('T', 4, 4)
raises(ValueError, lambda: str(P(Eq(YS[3], 2), Eq(YS[1], 1) & TransitionMatrixOf(YS, TSO))))
raises(TypeError, lambda: DiscreteMarkovChain("Z", [0, 1, 2], symbols('M')))
raises(ValueError, lambda: DiscreteMarkovChain("Z", [0, 1, 2], MatrixSymbol('T', 3, 4)))
raises(ValueError, lambda: E(Y[3], Eq(Y[2], 6)))
raises(ValueError, lambda: E(Y[2], Eq(Y[3], 1)))
# extended tests for probability queries
TO1 = Matrix([[Rational(1, 4), Rational(3, 4), 0],[Rational(1, 3), Rational(1, 3), Rational(1, 3)],[0, Rational(1, 4), Rational(3, 4)]])
assert P(And(Eq(Y[2], 1), Eq(Y[1], 1), Eq(Y[0], 0)),
Eq(Probability(Eq(Y[0], 0)), Rational(1, 4)) & TransitionMatrixOf(Y, TO1)) == Rational(1, 16)
assert P(And(Eq(Y[2], 1), Eq(Y[1], 1), Eq(Y[0], 0)), TransitionMatrixOf(Y, TO1)) == \
Probability(Eq(Y[0], 0))/4
assert P(Lt(X[1], 2) & Gt(X[1], 0), Eq(X[0], 2) &
StochasticStateSpaceOf(X, [0, 1, 2]) & TransitionMatrixOf(X, TO1)) == Rational(1, 4)
assert P(Lt(X[1], 2) & Gt(X[1], 0), Eq(X[0], 2) &
StochasticStateSpaceOf(X, [None, 'None', 1]) & TransitionMatrixOf(X, TO1)) == Rational(1, 4)
assert P(Ne(X[1], 2) & Ne(X[1], 1), Eq(X[0], 2) &
StochasticStateSpaceOf(X, [0, 1, 2]) & TransitionMatrixOf(X, TO1)) is S.Zero
assert P(Ne(X[1], 2) & Ne(X[1], 1), Eq(X[0], 2) &
StochasticStateSpaceOf(X, [None, 'None', 1]) & TransitionMatrixOf(X, TO1)) is S.Zero
assert P(And(Eq(Y[2], 1), Eq(Y[1], 1), Eq(Y[0], 0)), Eq(Y[1], 1)) == 0.1*Probability(Eq(Y[0], 0))
# testing properties of Markov chain
TO2 = Matrix([[S.One, 0, 0],[Rational(1, 3), Rational(1, 3), Rational(1, 3)],[0, Rational(1, 4), Rational(3, 4)]])
TO3 = Matrix([[Rational(1, 4), Rational(3, 4), 0],[Rational(1, 3), Rational(1, 3), Rational(1, 3)],[0, Rational(1, 4), Rational(3, 4)]])
Y2 = DiscreteMarkovChain('Y', trans_probs=TO2)
Y3 = DiscreteMarkovChain('Y', trans_probs=TO3)
assert Y3._transient2absorbing() == None
raises (ValueError, lambda: Y3.fundamental_matrix())
assert Y2.is_absorbing_chain() == True
assert Y3.is_absorbing_chain() == False
TO4 = Matrix([[Rational(1, 5), Rational(2, 5), Rational(2, 5)], [Rational(1, 10), S.Half, Rational(2, 5)], [Rational(3, 5), Rational(3, 10), Rational(1, 10)]])
Y4 = DiscreteMarkovChain('Y', trans_probs=TO4)
w = ImmutableMatrix([[Rational(11, 39), Rational(16, 39), Rational(4, 13)]])
assert Y4.limiting_distribution == w
assert Y4.is_regular() == True
TS1 = MatrixSymbol('T', 3, 3)
Y5 = DiscreteMarkovChain('Y', trans_probs=TS1)
assert Y5.limiting_distribution(w, TO4).doit() == True
TO6 = Matrix([[S.One, 0, 0, 0, 0],[S.Half, 0, S.Half, 0, 0],[0, S.Half, 0, S.Half, 0], [0, 0, S.Half, 0, S.Half], [0, 0, 0, 0, 1]])
Y6 = DiscreteMarkovChain('Y', trans_probs=TO6)
assert Y6._transient2absorbing() == ImmutableMatrix([[S.Half, 0], [0, 0], [0, S.Half]])
assert Y6._transient2transient() == ImmutableMatrix([[0, S.Half, 0], [S.Half, 0, S.Half], [0, S.Half, 0]])
assert Y6.fundamental_matrix() == ImmutableMatrix([[Rational(3, 2), S.One, S.Half], [S.One, S(2), S.One], [S.Half, S.One, Rational(3, 2)]])
assert Y6.absorbing_probabilities() == ImmutableMatrix([[Rational(3, 4), Rational(1, 4)], [S.Half, S.Half], [Rational(1, 4), Rational(3, 4)]])
# test communication_class
# see https://drive.google.com/drive/folders/1HbxLlwwn2b3U8Lj7eb_ASIUb5vYaNIjg?usp=sharing
# tutorial 2.pdf
TO7 = Matrix([[0, 5, 5, 0, 0],
[0, 0, 0, 10, 0],
[5, 0, 5, 0, 0],
[0, 10, 0, 0, 0],
[0, 3, 0, 3, 4]])/10
Y7 = DiscreteMarkovChain('Y', trans_probs=TO7)
tuples = Y7.communication_classes()
classes, recurrence, periods = list(zip(*tuples))
assert classes == ([1, 3], [0, 2], [4])
assert recurrence == (True, False, False)
assert periods == (2, 1, 1)
TO8 = Matrix([[0, 0, 0, 10, 0, 0],
[5, 0, 5, 0, 0, 0],
[0, 4, 0, 0, 0, 6],
[10, 0, 0, 0, 0, 0],
[0, 10, 0, 0, 0, 0],
[0, 0, 0, 5, 5, 0]])/10
Y8 = DiscreteMarkovChain('Y', trans_probs=TO8)
tuples = Y8.communication_classes()
classes, recurrence, periods = list(zip(*tuples))
assert classes == ([0, 3], [1, 2, 5, 4])
assert recurrence == (True, False)
assert periods == (2, 2)
TO9 = Matrix([[2, 0, 0, 3, 0, 0, 3, 2, 0, 0],
[0, 10, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 2, 2, 0, 0, 0, 0, 0, 3, 3],
[0, 0, 0, 3, 0, 0, 6, 1, 0, 0],
[0, 0, 0, 0, 5, 5, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 10, 0, 0, 0, 0],
[4, 0, 0, 5, 0, 0, 1, 0, 0, 0],
[2, 0, 0, 4, 0, 0, 2, 2, 0, 0],
[3, 0, 1, 0, 0, 0, 0, 0, 4, 2],
[0, 0, 4, 0, 0, 0, 0, 0, 3, 3]])/10
Y9 = DiscreteMarkovChain('Y', trans_probs=TO9)
tuples = Y9.communication_classes()
classes, recurrence, periods = list(zip(*tuples))
assert classes == ([0, 3, 6, 7], [1], [2, 8, 9], [5], [4])
assert recurrence == (True, True, False, True, False)
assert periods == (1, 1, 1, 1, 1)
# test custom state space
Y10 = DiscreteMarkovChain('Y', [1, 2, 3], TO2)
tuples = Y10.communication_classes()
classes, recurrence, periods = list(zip(*tuples))
assert classes == ([1], [2, 3])
assert recurrence == (True, False)
assert periods == (1, 1)
# testing miscellaneous queries
T = Matrix([[S.Half, Rational(1, 4), Rational(1, 4)],
[Rational(1, 3), 0, Rational(2, 3)],
[S.Half, S.Half, 0]])
X = DiscreteMarkovChain('X', [0, 1, 2], T)
assert P(Eq(X[1], 2) & Eq(X[2], 1) & Eq(X[3], 0),
Eq(P(Eq(X[1], 0)), Rational(1, 4)) & Eq(P(Eq(X[1], 1)), Rational(1, 4))) == Rational(1, 12)
assert P(Eq(X[2], 1) | Eq(X[2], 2), Eq(X[1], 1)) == Rational(2, 3)
assert P(Eq(X[2], 1) & Eq(X[2], 2), Eq(X[1], 1)) is S.Zero
assert P(Ne(X[2], 2), Eq(X[1], 1)) == Rational(1, 3)
assert E(X[1]**2, Eq(X[0], 1)) == Rational(8, 3)
assert variance(X[1], Eq(X[0], 1)) == Rational(8, 9)
raises(ValueError, lambda: E(X[1], Eq(X[2], 1)))
raises(ValueError, lambda: DiscreteMarkovChain('X', [0, 1], T))
# testing miscellaneous queries with different state space
X = DiscreteMarkovChain('X', ['A', 'B', 'C'], T)
assert P(Eq(X[1], 2) & Eq(X[2], 1) & Eq(X[3], 0),
Eq(P(Eq(X[1], 0)), Rational(1, 4)) & Eq(P(Eq(X[1], 1)), Rational(1, 4))) == Rational(1, 12)
assert P(Eq(X[2], 1) | Eq(X[2], 2), Eq(X[1], 1)) == Rational(2, 3)
assert P(Eq(X[2], 1) & Eq(X[2], 2), Eq(X[1], 1)) is S.Zero
assert P(Ne(X[2], 2), Eq(X[1], 1)) == Rational(1, 3)
a = X.state_space.args[0]
c = X.state_space.args[2]
assert (E(X[1] ** 2, Eq(X[0], 1)) - (a**2/3 + 2*c**2/3)).simplify() == 0
assert (variance(X[1], Eq(X[0], 1)) - (2*(-a/3 + c/3)**2/3 + (2*a/3 - 2*c/3)**2/3)).simplify() == 0
raises(ValueError, lambda: E(X[1], Eq(X[2], 1)))
def test_sample_stochastic_process():
if not import_module('scipy'):
skip('SciPy Not installed. Skip sampling tests')
import random
random.seed(0)
numpy = import_module('numpy')
if numpy:
numpy.random.seed(0) # scipy uses numpy to sample so to set its seed
T = Matrix([[0.5, 0.2, 0.3],[0.2, 0.5, 0.3],[0.2, 0.3, 0.5]])
Y = DiscreteMarkovChain("Y", [0, 1, 2], T)
for samps in range(10):
assert next(sample_stochastic_process(Y)) in Y.state_space
Z = DiscreteMarkovChain("Z", ['1', 1, None], T)
for samps in range(10):
assert next(sample_stochastic_process(Z)) in Z.state_space
T = Matrix([[S.Half, Rational(1, 4), Rational(1, 4)],
[Rational(1, 3), 0, Rational(2, 3)],
[S.Half, S.Half, 0]])
X = DiscreteMarkovChain('X', [0, 1, 2], T)
for samps in range(10):
assert next(sample_stochastic_process(X)) in X.state_space
W = DiscreteMarkovChain('W', [1, pi, oo], T)
for samps in range(10):
assert next(sample_stochastic_process(W)) in W.state_space
def test_ContinuousMarkovChain():
T1 = Matrix([[S(-2), S(2), S.Zero],
[S.Zero, S.NegativeOne, S.One],
[Rational(3, 2), Rational(3, 2), S(-3)]])
C1 = ContinuousMarkovChain('C', [0, 1, 2], T1)
assert C1.limiting_distribution() == ImmutableMatrix([[Rational(3, 19), Rational(12, 19), Rational(4, 19)]])
T2 = Matrix([[-S.One, S.One, S.Zero], [S.One, -S.One, S.Zero], [S.Zero, S.One, -S.One]])
C2 = ContinuousMarkovChain('C', [0, 1, 2], T2)
A, t = C2.generator_matrix, symbols('t', positive=True)
assert C2.transition_probabilities(A)(t) == Matrix([[S.Half + exp(-2*t)/2, S.Half - exp(-2*t)/2, 0],
[S.Half - exp(-2*t)/2, S.Half + exp(-2*t)/2, 0],
[S.Half - exp(-t) + exp(-2*t)/2, S.Half - exp(-2*t)/2, exp(-t)]])
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
assert P(Eq(C2(1), 1), Eq(C2(0), 1), evaluate=False) == Probability(Eq(C2(1), 1), Eq(C2(0), 1))
assert P(Eq(C2(1), 1), Eq(C2(0), 1)) == exp(-2)/2 + S.Half
assert P(Eq(C2(1), 0) & Eq(C2(2), 1) & Eq(C2(3), 1),
Eq(P(Eq(C2(1), 0)), S.Half)) == (Rational(1, 4) - exp(-2)/4)*(exp(-2)/2 + S.Half)
assert P(Not(Eq(C2(1), 0) & Eq(C2(2), 1) & Eq(C2(3), 2)) |
(Eq(C2(1), 0) & Eq(C2(2), 1) & Eq(C2(3), 2)),
Eq(P(Eq(C2(1), 0)), Rational(1, 4)) & Eq(P(Eq(C2(1), 1)), Rational(1, 4))) is S.One
assert E(C2(Rational(3, 2)), Eq(C2(0), 2)) == -exp(-3)/2 + 2*exp(Rational(-3, 2)) + S.Half
assert variance(C2(Rational(3, 2)), Eq(C2(0), 1)) == ((S.Half - exp(-3)/2)**2*(exp(-3)/2 + S.Half)
+ (Rational(-1, 2) - exp(-3)/2)**2*(S.Half - exp(-3)/2))
raises(KeyError, lambda: P(Eq(C2(1), 0), Eq(P(Eq(C2(1), 1)), S.Half)))
assert P(Eq(C2(1), 0), Eq(P(Eq(C2(5), 1)), S.Half)) == Probability(Eq(C2(1), 0))
TS1 = MatrixSymbol('G', 3, 3)
CS1 = ContinuousMarkovChain('C', [0, 1, 2], TS1)
A = CS1.generator_matrix
assert CS1.transition_probabilities(A)(t) == exp(t*A)
C3 = ContinuousMarkovChain('C', [Symbol('0'), Symbol('1'), Symbol('2')], T2)
assert P(Eq(C3(1), 1), Eq(C3(0), 1)) == exp(-2)/2 + S.Half
assert P(Eq(C3(1), Symbol('1')), Eq(C3(0), Symbol('1'))) == exp(-2)/2 + S.Half
def test_BernoulliProcess():
B = BernoulliProcess("B", p=0.6, success=1, failure=0)
assert B.state_space == FiniteSet(0, 1)
assert B.index_set == S.Naturals0
assert B.success == 1
assert B.failure == 0
X = BernoulliProcess("X", p=Rational(1,3), success='H', failure='T')
assert X.state_space == FiniteSet('H', 'T')
H, T = symbols("H,T")
assert E(X[1]+X[2]*X[3]) == H**2/9 + 4*H*T/9 + H/3 + 4*T**2/9 + 2*T/3
t, x = symbols('t, x', positive=True, integer=True)
assert isinstance(B[t], RandomIndexedSymbol)
raises(ValueError, lambda: BernoulliProcess("X", p=1.1, success=1, failure=0))
raises(NotImplementedError, lambda: B(t))
raises(IndexError, lambda: B[-3])
assert B.joint_distribution(B[3], B[9]) == JointDistributionHandmade(Lambda((B[3], B[9]),
Piecewise((0.6, Eq(B[3], 1)), (0.4, Eq(B[3], 0)), (0, True))
*Piecewise((0.6, Eq(B[9], 1)), (0.4, Eq(B[9], 0)), (0, True))))
assert B.joint_distribution(2, B[4]) == JointDistributionHandmade(Lambda((B[2], B[4]),
Piecewise((0.6, Eq(B[2], 1)), (0.4, Eq(B[2], 0)), (0, True))
*Piecewise((0.6, Eq(B[4], 1)), (0.4, Eq(B[4], 0)), (0, True))))
# Test for the sum distribution of Bernoulli Process RVs
Y = B[1] + B[2] + B[3]
assert P(Eq(Y, 0)).round(2) == Float(0.06, 1)
assert P(Eq(Y, 2)).round(2) == Float(0.43, 2)
assert P(Eq(Y, 4)).round(2) == 0
assert P(Gt(Y, 1)).round(2) == Float(0.65, 2)
# Test for independency of each Random Indexed variable
assert P(Eq(B[1], 0) & Eq(B[2], 1) & Eq(B[3], 0) & Eq(B[4], 1)).round(2) == Float(0.06, 1)
assert E(2 * B[1] + B[2]).round(2) == Float(1.80, 3)
assert E(2 * B[1] + B[2] + 5).round(2) == Float(6.80, 3)
assert E(B[2] * B[4] + B[10]).round(2) == Float(0.96, 2)
assert E(B[2] > 0, Eq(B[1],1) & Eq(B[2],1)).round(2) == Float(0.60,2)
assert E(B[1]) == 0.6
assert P(B[1] > 0).round(2) == Float(0.60, 2)
assert P(B[1] < 1).round(2) == Float(0.40, 2)
assert P(B[1] > 0, B[2] <= 1).round(2) == Float(0.60, 2)
assert P(B[12] * B[5] > 0).round(2) == Float(0.36, 2)
assert P(B[12] * B[5] > 0, B[4] < 1).round(2) == Float(0.36, 2)
assert P(Eq(B[2], 1), B[2] > 0) == 1
assert P(Eq(B[5], 3)) == 0
assert P(Eq(B[1], 1), B[1] < 0) == 0
assert P(B[2] > 0, Eq(B[2], 1)) == 1
assert P(B[2] < 0, Eq(B[2], 1)) == 0
assert P(B[2] > 0, B[2]==7) == 0
assert P(B[5] > 0, B[5]) == BernoulliDistribution(0.6, 0, 1)
raises(ValueError, lambda: P(3))
raises(ValueError, lambda: P(B[3] > 0, 3))
# test issue 19456
expr = Sum(B[t], (t, 0, 4))
expr2 = Sum(B[t], (t, 1, 3))
expr3 = Sum(B[t]**2, (t, 1, 3))
assert expr.doit() == B[0] + B[1] + B[2] + B[3] + B[4]
assert expr2.doit() == Y
assert expr3.doit() == B[1]**2 + B[2]**2 + B[3]**2
assert B[2*t].free_symbols == {B[2*t], t}
assert B[4].free_symbols == {B[4]}
assert B[x*t].free_symbols == {B[x*t], x, t}
def test_PoissonProcess():
X = PoissonProcess("X", 3)
assert X.state_space == S.Naturals0
assert X.index_set == Interval(0, oo)
assert X.lamda == 3
t, d, x, y = symbols('t d x y', positive=True)
assert isinstance(X(t), RandomIndexedSymbol)
assert X.distribution(X(t)) == PoissonDistribution(3*t)
raises(ValueError, lambda: PoissonProcess("X", -1))
raises(NotImplementedError, lambda: X[t])
raises(IndexError, lambda: X(-5))
assert X.joint_distribution(X(2), X(3)) == JointDistributionHandmade(Lambda((X(2), X(3)),
6**X(2)*9**X(3)*exp(-15)/(factorial(X(2))*factorial(X(3)))))
assert X.joint_distribution(4, 6) == JointDistributionHandmade(Lambda((X(4), X(6)),
12**X(4)*18**X(6)*exp(-30)/(factorial(X(4))*factorial(X(6)))))
assert P(X(t) < 1) == exp(-3*t)
assert P(Eq(X(t), 0), Contains(t, Interval.Lopen(3, 5))) == exp(-6) # exp(-2*lamda)
res = P(Eq(X(t), 1), Contains(t, Interval.Lopen(3, 4)))
assert res == 3*exp(-3)
# Equivalent to P(Eq(X(t), 1))**4 because of non-overlapping intervals
assert P(Eq(X(t), 1) & Eq(X(d), 1) & Eq(X(x), 1) & Eq(X(y), 1), Contains(t, Interval.Lopen(0, 1))
& Contains(d, Interval.Lopen(1, 2)) & Contains(x, Interval.Lopen(2, 3))
& Contains(y, Interval.Lopen(3, 4))) == res**4
# Return Probability because of overlapping intervals
assert P(Eq(X(t), 2) & Eq(X(d), 3), Contains(t, Interval.Lopen(0, 2))
& Contains(d, Interval.Ropen(2, 4))) == \
Probability(Eq(X(d), 3) & Eq(X(t), 2), Contains(t, Interval.Lopen(0, 2))
& Contains(d, Interval.Ropen(2, 4)))
raises(ValueError, lambda: P(Eq(X(t), 2) & Eq(X(d), 3),
Contains(t, Interval.Lopen(0, 4)) & Contains(d, Interval.Lopen(3, oo)))) # no bound on d
assert P(Eq(X(3), 2)) == 81*exp(-9)/2
assert P(Eq(X(t), 2), Contains(t, Interval.Lopen(0, 5))) == 225*exp(-15)/2
# Check that probability works correctly by adding it to 1
res1 = P(X(t) <= 3, Contains(t, Interval.Lopen(0, 5)))
res2 = P(X(t) > 3, Contains(t, Interval.Lopen(0, 5)))
assert res1 == 691*exp(-15)
assert (res1 + res2).simplify() == 1
# Check Not and Or
assert P(Not(Eq(X(t), 2) & (X(d) > 3)), Contains(t, Interval.Ropen(2, 4)) & \
Contains(d, Interval.Lopen(7, 8))).simplify() == -18*exp(-6) + 234*exp(-9) + 1
assert P(Eq(X(t), 2) | Ne(X(t), 4), Contains(t, Interval.Ropen(2, 4))) == 1 - 36*exp(-6)
raises(ValueError, lambda: P(X(t) > 2, X(t) + X(d)))
assert E(X(t)) == 3*t # property of the distribution at a given timestamp
assert E(X(t)**2 + X(d)*2 + X(y)**3, Contains(t, Interval.Lopen(0, 1))
& Contains(d, Interval.Lopen(1, 2)) & Contains(y, Interval.Ropen(3, 4))) == 75
assert E(X(t)**2, Contains(t, Interval.Lopen(0, 1))) == 12
assert E(x*(X(t) + X(d))*(X(t)**2+X(d)**2), Contains(t, Interval.Lopen(0, 1))
& Contains(d, Interval.Ropen(1, 2))) == \
Expectation(x*(X(d) + X(t))*(X(d)**2 + X(t)**2), Contains(t, Interval.Lopen(0, 1))
& Contains(d, Interval.Ropen(1, 2)))
# Value Error because of infinite time bound
raises(ValueError, lambda: E(X(t)**3, Contains(t, Interval.Lopen(1, oo))))
# Equivalent to E(X(t)**2) - E(X(d)**2) == E(X(1)**2) - E(X(1)**2) == 0
assert E((X(t) + X(d))*(X(t) - X(d)), Contains(t, Interval.Lopen(0, 1))
& Contains(d, Interval.Lopen(1, 2))) == 0
assert E(X(2) + x*E(X(5))) == 15*x + 6
assert E(x*X(1) + y) == 3*x + y
assert P(Eq(X(1), 2) & Eq(X(t), 3), Contains(t, Interval.Lopen(1, 2))) == 81*exp(-6)/4
Y = PoissonProcess("Y", 6)
Z = X + Y
assert Z.lamda == X.lamda + Y.lamda == 9
raises(ValueError, lambda: X + 5) # should be added be only PoissonProcess instance
N, M = Z.split(4, 5)
assert N.lamda == 4
assert M.lamda == 5
raises(ValueError, lambda: Z.split(3, 2)) # 2+3 != 9
raises(ValueError, lambda :P(Eq(X(t), 0), Contains(t, Interval.Lopen(1, 3)) & Eq(X(1), 0)))
# check if it handles queries with two random variables in one args
res1 = P(Eq(N(3), N(5)))
assert res1 == P(Eq(N(t), 0), Contains(t, Interval(3, 5)))
res2 = P(N(3) > N(1))
assert res2 == P((N(t) > 0), Contains(t, Interval(1, 3)))
assert P(N(3) < N(1)) == 0 # condition is not possible
res3 = P(N(3) <= N(1)) # holds only for Eq(N(3), N(1))
assert res3 == P(Eq(N(t), 0), Contains(t, Interval(1, 3)))
# tests from https://www.probabilitycourse.com/chapter11/11_1_2_basic_concepts_of_the_poisson_process.php
X = PoissonProcess('X', 10) # 11.1
assert P(Eq(X(S(1)/3), 3) & Eq(X(1), 10)) == exp(-10)*Rational(8000000000, 11160261)
assert P(Eq(X(1), 1), Eq(X(S(1)/3), 3)) == 0
assert P(Eq(X(1), 10), Eq(X(S(1)/3), 3)) == P(Eq(X(S(2)/3), 7))
X = PoissonProcess('X', 2) # 11.2
assert P(X(S(1)/2) < 1) == exp(-1)
assert P(X(3) < 1, Eq(X(1), 0)) == exp(-4)
assert P(Eq(X(4), 3), Eq(X(2), 3)) == exp(-4)
X = PoissonProcess('X', 3)
assert P(Eq(X(2), 5) & Eq(X(1), 2)) == Rational(81, 4)*exp(-6)
# check few properties
assert P(X(2) <= 3, X(1)>=1) == 3*P(Eq(X(1), 0)) + 2*P(Eq(X(1), 1)) + P(Eq(X(1), 2))
assert P(X(2) <= 3, X(1) > 1) == 2*P(Eq(X(1), 0)) + 1*P(Eq(X(1), 1))
assert P(Eq(X(2), 5) & Eq(X(1), 2)) == P(Eq(X(1), 3))*P(Eq(X(1), 2))
assert P(Eq(X(3), 4), Eq(X(1), 3)) == P(Eq(X(2), 1))
def test_WienerProcess():
X = WienerProcess("X")
assert X.state_space == S.Reals
assert X.index_set == Interval(0, oo)
t, d, x, y = symbols('t d x y', positive=True)
assert isinstance(X(t), RandomIndexedSymbol)
assert X.distribution(X(t)) == NormalDistribution(0, sqrt(t))
raises(ValueError, lambda: PoissonProcess("X", -1))
raises(NotImplementedError, lambda: X[t])
raises(IndexError, lambda: X(-2))
assert X.joint_distribution(X(2), X(3)) == JointDistributionHandmade(
Lambda((X(2), X(3)), sqrt(6)*exp(-X(2)**2/4)*exp(-X(3)**2/6)/(12*pi)))
assert X.joint_distribution(4, 6) == JointDistributionHandmade(
Lambda((X(4), X(6)), sqrt(6)*exp(-X(4)**2/8)*exp(-X(6)**2/12)/(24*pi)))
assert P(X(t) < 3).simplify() == erf(3*sqrt(2)/(2*sqrt(t)))/2 + S(1)/2
assert P(X(t) > 2, Contains(t, Interval.Lopen(3, 7))).simplify() == S(1)/2 -\
erf(sqrt(2)/2)/2
# Equivalent to P(X(1)>1)**4
assert P((X(t) > 4) & (X(d) > 3) & (X(x) > 2) & (X(y) > 1),
Contains(t, Interval.Lopen(0, 1)) & Contains(d, Interval.Lopen(1, 2))
& Contains(x, Interval.Lopen(2, 3)) & Contains(y, Interval.Lopen(3, 4))).simplify() ==\
(1 - erf(sqrt(2)/2))*(1 - erf(sqrt(2)))*(1 - erf(3*sqrt(2)/2))*(1 - erf(2*sqrt(2)))/16
# Contains an overlapping interval so, return Probability
assert P((X(t)< 2) & (X(d)> 3), Contains(t, Interval.Lopen(0, 2))
& Contains(d, Interval.Ropen(2, 4))) == Probability((X(d) > 3) & (X(t) < 2),
Contains(d, Interval.Ropen(2, 4)) & Contains(t, Interval.Lopen(0, 2)))
assert str(P(Not((X(t) < 5) & (X(d) > 3)), Contains(t, Interval.Ropen(2, 4)) &
Contains(d, Interval.Lopen(7, 8))).simplify()) == \
'-(1 - erf(3*sqrt(2)/2))*(2 - erfc(5/2))/4 + 1'
# Distribution has mean 0 at each timestamp
assert E(X(t)) == 0
assert E(x*(X(t) + X(d))*(X(t)**2+X(d)**2), Contains(t, Interval.Lopen(0, 1))
& Contains(d, Interval.Ropen(1, 2))) == Expectation(x*(X(d) + X(t))*(X(d)**2 + X(t)**2),
Contains(d, Interval.Ropen(1, 2)) & Contains(t, Interval.Lopen(0, 1)))
assert E(X(t) + x*E(X(3))) == 0
def test_GammaProcess_symbolic():
t, d, x, y, g, l = symbols('t d x y g l', positive=True)
X = GammaProcess("X", l, g)
raises(NotImplementedError, lambda: X[t])
raises(IndexError, lambda: X(-1))
assert isinstance(X(t), RandomIndexedSymbol)
assert X.state_space == Interval(0, oo)
assert X.distribution(X(t)) == GammaDistribution(g*t, 1/l)
assert X.joint_distribution(5, X(3)) == JointDistributionHandmade(Lambda(
(X(5), X(3)), l**(8*g)*exp(-l*X(3))*exp(-l*X(5))*X(3)**(3*g - 1)*X(5)**(5*g
- 1)/(gamma(3*g)*gamma(5*g))))
# property of the gamma process at any given timestamp
assert E(X(t)) == g*t/l
assert variance(X(t)).simplify() == g*t/l**2
# Equivalent to E(2*X(1)) + E(X(1)**2) + E(X(1)**3), where E(X(1)) == g/l
assert E(X(t)**2 + X(d)*2 + X(y)**3, Contains(t, Interval.Lopen(0, 1))
& Contains(d, Interval.Lopen(1, 2)) & Contains(y, Interval.Ropen(3, 4))) == \
2*g/l + (g**2 + g)/l**2 + (g**3 + 3*g**2 + 2*g)/l**3
assert P(X(t) > 3, Contains(t, Interval.Lopen(3, 4))).simplify() == \
1 - lowergamma(g, 3*l)/gamma(g) # equivalent to P(X(1)>3)
def test_GammaProcess_numeric():
t, d, x, y = symbols('t d x y', positive=True)
X = GammaProcess("X", 1, 2)
assert X.state_space == Interval(0, oo)
assert X.index_set == Interval(0, oo)
assert X.lamda == 1
assert X.gamma == 2
raises(ValueError, lambda: GammaProcess("X", -1, 2))
raises(ValueError, lambda: GammaProcess("X", 0, -2))
raises(ValueError, lambda: GammaProcess("X", -1, -2))
# all are independent because of non-overlapping intervals
assert P((X(t) > 4) & (X(d) > 3) & (X(x) > 2) & (X(y) > 1), Contains(t,
Interval.Lopen(0, 1)) & Contains(d, Interval.Lopen(1, 2)) & Contains(x,
Interval.Lopen(2, 3)) & Contains(y, Interval.Lopen(3, 4))).simplify() == \
120*exp(-10)
# Check working with Not and Or
assert P(Not((X(t) < 5) & (X(d) > 3)), Contains(t, Interval.Ropen(2, 4)) &
Contains(d, Interval.Lopen(7, 8))).simplify() == -4*exp(-3) + 472*exp(-8)/3 + 1
assert P((X(t) > 2) | (X(t) < 4), Contains(t, Interval.Ropen(1, 4))).simplify() == \
-643*exp(-4)/15 + 109*exp(-2)/15 + 1
assert E(X(t)) == 2*t # E(X(t)) == gamma*t/l
assert E(X(2) + x*E(X(5))) == 10*x + 4
| 51.344144 | 163 | 0.556113 |
efd1ffbc376c999f7eea6718dccbf15c2807af3c | 10,514 | py | Python | chromanode.py | killerstorm/ngcccbase | 30fd033835ceeecd0eafc3461bf5b4dcfff117de | [
"MIT"
] | 31 | 2015-01-25T01:59:07.000Z | 2022-03-11T02:49:53.000Z | chromanode.py | killerstorm/ngcccbase | 30fd033835ceeecd0eafc3461bf5b4dcfff117de | [
"MIT"
] | 5 | 2015-06-16T14:43:49.000Z | 2016-07-19T12:49:16.000Z | chromanode.py | jeorgen/ngcccbase | 0a7348d95353598a320e5612166402ba676c8d33 | [
"MIT"
] | 17 | 2015-02-14T15:19:49.000Z | 2019-11-28T19:17:50.000Z | #!/usr/bin/env python
import os, sys
import json
import hashlib
import httplib
import threading, time
from socket import error as SocketError
from bitcoin.rpc import JSONRPCException
import web
from coloredcoinlib import BlockchainState, ColorDefinition
urls = (
'/tx', 'Tx',
'/publish_tx', 'PublishTx',
'/tx_blockhash', 'TxBlockhash',
'/prefetch', 'Prefetch',
'/blockcount', 'BlockCount',
'/header', 'Header',
'/chunk', 'Chunk',
'/merkle', 'Merkle'
)
testnet = False
if (len(sys.argv) > 2) and (sys.argv[2] == 'testnet'):
testnet = True
HEADERS_FILE = 'headers.testnet' if testnet else 'headers.mainnet'
if (len(sys.argv) > 3):
HEADERS_FILE = sys.argv[3]
class ErrorThrowingRequestProcessor:
def require(self, data, key, message):
value = data.get(key, None)
if value is None:
raise web.HTTPError("400 Bad request!",
{"content-type": "text/plain"},
message)
class Tx(ErrorThrowingRequestProcessor):
def POST(self):
# data is sent in as json
data = json.loads(web.data())
self.require(data, 'txhash', "TX requires txhash")
txhash = data.get('txhash')
print txhash
blockchainstate = BlockchainState.from_url(None, testnet)
return blockchainstate.get_raw(txhash)
class PublishTx(ErrorThrowingRequestProcessor):
def POST(self):
txdata = web.data()
reply = None
try:
blockchainstate = BlockchainState.from_url(None, testnet)
reply = blockchainstate.bitcoind.sendrawtransaction(txdata)
except Exception as e:
reply = ("Error: " + str(e))
return reply
class TxBlockhash(ErrorThrowingRequestProcessor):
def POST(self):
# data is sent in as json
data = json.loads(web.data())
self.require(data, 'txhash', "TX requires txhash")
txhash = data.get('txhash')
print txhash
blockchainstate = BlockchainState.from_url(None, testnet)
blockhash, in_mempool = blockchainstate.get_tx_blockhash(txhash)
return json.dumps([blockhash, in_mempool])
class Prefetch(ErrorThrowingRequestProcessor):
def POST(self):
blockchainstate = BlockchainState.from_url(None, testnet)
# data is sent in as json
data = json.loads(web.data())
self.require(data, 'txhash', "Prefetch requires txhash")
self.require(data, 'output_set', "Prefetch requires output_set")
self.require(data, 'color_desc', "Prefetch requires color_desc")
txhash = data.get('txhash')
output_set = data.get('output_set')
color_desc = data.get('color_desc')
limit = data.get('limit')
# note the id doesn't actually matter we need to add it so
# we have a valid color definition
color_def = ColorDefinition.from_color_desc(9999, color_desc)
# gather all the transactions and return them
tx_lookup = {}
def process(current_txhash, current_outindex):
"""For any tx out, process the colorvalues of the affecting
inputs first and then scan that tx.
"""
if limit and len(tx_lookup) > limit:
return
if tx_lookup.get(current_txhash):
return
current_tx = blockchainstate.get_tx(current_txhash)
if not current_tx:
return
tx_lookup[current_txhash] = blockchainstate.get_raw(current_txhash)
# note a genesis tx will simply have 0 affecting inputs
inputs = set()
inputs = inputs.union(
color_def.get_affecting_inputs(current_tx,
[current_outindex]))
for i in inputs:
process(i.prevout.hash, i.prevout.n)
for oi in output_set:
process(txhash, oi)
return tx_lookup
class BlockCount(ErrorThrowingRequestProcessor):
def GET(self):
blockchainstate = BlockchainState.from_url(None, testnet)
return str(blockchainstate.get_block_count())
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
import decimal
if isinstance(o, decimal.Decimal):
return str(o)
return super(DecimalEncoder, self).default(o)
class Header(ErrorThrowingRequestProcessor):
def POST(self):
blockchainstate = BlockchainState.from_url(None, testnet)
data = json.loads(web.data())
block_hash = data.get('block_hash')
if not block_hash:
self.require(data, 'height', "block_hash or height required")
height = data.get('height')
block_hash = blockchainstate.get_block_hash(height)
block = blockchainstate.get_block(block_hash)
return json.dumps({
'block_height': block['height'],
'version': block['version'],
'prev_block_hash': block['previousblockhash'],
'merkle_root': block['merkleroot'],
'timestamp': block['time'],
'bits': int(block['bits'], 16),
'nonce': block['nonce'],
}, cls=DecimalEncoder)
class ChunkThread(threading.Thread):
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.running = False
self.lock = threading.Lock()
self.headers = ''
def is_running(self):
with self.lock:
return self.running
def stop(self):
with self.lock:
self.running = False
def run(self):
self.headers = open(HEADERS_FILE, 'ab+').read()
with self.lock:
self.running = True
run_time = time.time()
while self.is_running():
if run_time > time.time():
time.sleep(0.05)
continue
run_time = time.time() + 1
try:
blockchainstate = BlockchainState.from_url(None, testnet)
height = blockchainstate.get_block_count()
if height == self.height:
continue
if height < self.height:
self.headers = self.headers[:height*80]
while height > self.height:
if not self.is_running():
break
block_height = self.height + 1
blockhash = blockchainstate.get_block_hash(block_height)
block = blockchainstate.get_block(blockhash)
if block_height == 0:
self.headers = self._header_to_string(block)
else:
prev_hash = self._hash_header(self.headers[-80:])
if prev_hash == block['previousblockhash']:
self.headers += self._header_to_string(block)
else:
self.headers = self.headers[:-80]
open(HEADERS_FILE, 'wb').write(self.headers)
except httplib.BadStatusLine:
pass # bad connection, try again later
except SocketError:
pass # bad connection, try again later
except JSONRPCException as e:
if e.error["code"] != -28:
raise # Not error we are looking for
pass # Loading block index... , try again later
@property
def height(self):
return len(self.headers)/80 - 1
def _rev_hex(self, s):
return s.decode('hex')[::-1].encode('hex')
def _int_to_hex(self, i, length=1):
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return self._rev_hex(s)
def _header_to_string(self, h):
s = self._int_to_hex(h.get('version'),4) \
+ self._rev_hex(h.get('previousblockhash', "0"*64)) \
+ self._rev_hex(h.get('merkleroot')) \
+ self._int_to_hex(h.get('time'),4) \
+ self._rev_hex(h.get('bits')) \
+ self._int_to_hex(h.get('nonce'),4)
return s.decode('hex')
def _hash_header(self, raw_header):
return hashlib.sha256(hashlib.sha256(raw_header).digest()).digest()[::-1].encode('hex_codec')
chunkThread = ChunkThread()
class Chunk(ErrorThrowingRequestProcessor):
def POST(self):
data = json.loads(web.data())
self.require(data, 'index', "Chunk requires index")
index = data.get('index')
with open(HEADERS_FILE, 'rb') as headers:
headers.seek(index*2016*80)
return headers.read(2016*80)
class Merkle(ErrorThrowingRequestProcessor):
def POST(self):
data = json.loads(web.data())
self.require(data, 'txhash', "Merkle requires txhash")
self.require(data, 'blockhash', "Merkle requires blockhash")
txhash = data.get('txhash')
blockhash = data.get('blockhash')
hash_decode = lambda x: x.decode('hex')[::-1]
hash_encode = lambda x: x[::-1].encode('hex')
Hash = lambda x: hashlib.sha256(hashlib.sha256(x).digest()).digest()
blockchainstate = BlockchainState.from_url(None, testnet)
b = blockchainstate.get_block(blockhash)
tx_list = b.get('tx')
tx_pos = tx_list.index(txhash)
merkle = map(hash_decode, tx_list)
target_hash = hash_decode(txhash)
s = []
while len(merkle) != 1:
if len(merkle) % 2:
merkle.append(merkle[-1])
n = []
while merkle:
new_hash = Hash(merkle[0] + merkle[1])
if merkle[0] == target_hash:
s.append(hash_encode(merkle[1]))
target_hash = new_hash
elif merkle[1] == target_hash:
s.append(hash_encode(merkle[0]))
target_hash = new_hash
n.append(new_hash)
merkle = merkle[2:]
merkle = n
return json.dumps({"block_height": b.get('height'), "merkle": s, "pos": tx_pos})
if __name__ == "__main__":
import signal
def sigint_handler(signum, frame):
print ('exit chunk thread')
chunkThread.stop()
print ('done')
sys.exit(1)
signal.signal(signal.SIGINT, sigint_handler)
chunkThread.start()
app = web.application(urls, globals())
app.run()
| 33.807074 | 101 | 0.573045 |
9446bb0db5c987a15f2712cfb098cfc172269636 | 6,063 | py | Python | smt/applications/tests/test_mfkplsk.py | Laurentww/smt | f124c01ffa78c04b80221dded278a20123dac742 | [
"BSD-3-Clause"
] | 354 | 2017-08-15T22:12:58.000Z | 2022-03-31T08:34:19.000Z | smt/applications/tests/test_mfkplsk.py | enjoyneer87/smt | 4a4df255b9259965439120091007f9852f41523e | [
"BSD-3-Clause"
] | 258 | 2017-08-11T15:08:40.000Z | 2022-03-30T09:54:26.000Z | smt/applications/tests/test_mfkplsk.py | enjoyneer87/smt | 4a4df255b9259965439120091007f9852f41523e | [
"BSD-3-Clause"
] | 184 | 2017-08-11T14:55:17.000Z | 2022-03-17T11:22:50.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon May 07 14:20:11 2018
@author: m.meliani
Adapted to new SMT version in march 2020 by Nathalie Bartoli
"""
import matplotlib
matplotlib.use("Agg")
import unittest
import numpy as np
import unittest
import inspect
from collections import OrderedDict
from smt.problems import Sphere, TensorProduct
from smt.sampling_methods import LHS, FullFactorial
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
from smt.utils import compute_rms_error
from smt.surrogate_models import LS, QP, KPLS, KRG, KPLSK, GEKPLS, GENN
from smt.applications.mfk import MFK, NestedLHS
from smt.applications.mfkplsk import MFKPLSK
from copy import deepcopy
print_output = False
class TestMFKPLSK(SMTestCase):
def setUp(self):
self.nt = 100
self.ne = 100
self.ndim = 3
self.n_comp = 2
def test_mfkplsk(self):
self.problems = ["exp", "tanh", "cos"]
for fname in self.problems:
prob = TensorProduct(ndim=self.ndim, func=fname)
sampling = FullFactorial(xlimits=prob.xlimits, clip=True)
np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)
for i in range(self.ndim):
yt = np.concatenate((yt, prob(xt, kx=i)), axis=1)
y_lf = 2 * prob(xt) + 2
x_lf = deepcopy(xt)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
# Modif MM
sm = MFKPLSK()
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
# to test some options
sm.options["eval_noise"] = True
sm.options["optim_var"] = True
# modif MM
sm.options["n_comp"] = self.n_comp
sm.options["theta0"] = [1e-2] * self.n_comp
sm.set_training_values(xt, yt[:, 0])
sm.set_training_values(x_lf, y_lf[:, 0], name=0)
sm.set_training_values(xt, yt[:, 0])
sm.set_training_values(x_lf, y_lf[:, 0], name=0)
with Silence():
sm.train()
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
self.assert_error(t_error, 0.0, 1.5)
self.assert_error(e_error, 0.0, 1.5)
def test_mfkplsk_derivs(self):
if self.ndim < 2:
print("To try test_mfkplsk_derivs the dimension must be greater than 1")
prob = Sphere(ndim=self.ndim)
sampling = LHS(xlimits=prob.xlimits)
# Modif MM
nt = 100
np.random.seed(0)
xt = sampling(nt)
yt = prob(xt)
dyt = {}
for kx in range(prob.xlimits.shape[0]):
dyt[kx] = prob(xt, kx=kx)
y_lf = 2 * prob(xt) + 2
x_lf = deepcopy(xt)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
dye = {}
for kx in range(prob.xlimits.shape[0]):
dye[kx] = prob(xe, kx=kx)
# modif MM
sm = MFKPLSK()
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
# to test some options
sm.options["eval_noise"] = False
# modif MM
sm.options["n_comp"] = self.n_comp
sm.options["theta0"] = [1e-2] * self.n_comp
sm.set_training_values(xt, yt)
sm.set_training_values(x_lf, y_lf, name=0)
with Silence():
sm.train()
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
e_error0 = compute_rms_error(sm, xe, dye[0], 0)
e_error1 = compute_rms_error(sm, xe, dye[1], 1)
if print_output:
print(
"%8s %6s %18.9e %18.9e %18.9e %18.9e"
% (pname[:6], sname, t_error, e_error, e_error0, e_error1)
)
self.assert_error(e_error0, 0.0, 1e-1)
self.assert_error(e_error1, 0.0, 1e-1)
@staticmethod
def run_mfkplsk_example():
import numpy as np
import matplotlib.pyplot as plt
from smt.applications.mfk import MFK, NestedLHS
from smt.applications.mfkplsk import MFKPLSK
# low fidelity modelk
def lf_function(x):
import numpy as np
return (
0.5 * ((x * 6 - 2) ** 2) * np.sin((x * 6 - 2) * 2)
+ (x - 0.5) * 10.0
- 5
)
# high fidelity model
def hf_function(x):
import numpy as np
return ((x * 6 - 2) ** 2) * np.sin((x * 6 - 2) * 2)
# Problem set up
xlimits = np.array([[0.0, 1.0]])
xdoes = NestedLHS(nlevel=2, xlimits=xlimits, random_state=0)
xt_c, xt_e = xdoes(7)
# Evaluate the HF and LF functions
yt_e = hf_function(xt_e)
yt_c = lf_function(xt_c)
# choice of number of PLS components
ncomp = 1
sm = MFKPLSK(n_comp=ncomp, theta0=ncomp * [1.0])
# low-fidelity dataset names being integers from 0 to level-1
sm.set_training_values(xt_c, yt_c, name=0)
# high-fidelity dataset without name
sm.set_training_values(xt_e, yt_e)
# train the model
sm.train()
x = np.linspace(0, 1, 101, endpoint=True).reshape(-1, 1)
# query the outputs
y = sm.predict_values(x)
mse = sm.predict_variances(x)
derivs = sm.predict_derivatives(x, kx=0)
plt.figure()
plt.plot(x, hf_function(x), label="reference")
plt.plot(x, y, linestyle="-.", label="mean_gp")
plt.scatter(xt_e, yt_e, marker="o", color="k", label="HF doe")
plt.scatter(xt_c, yt_c, marker="*", color="g", label="LF doe")
plt.legend(loc=0)
plt.ylim(-10, 17)
plt.xlim(-0.1, 1.1)
plt.xlabel(r"$x$")
plt.ylabel(r"$y$")
plt.show()
if __name__ == "__main__":
unittest.main()
| 27.684932 | 84 | 0.55616 |
4c8712d3391ca9e50a19cee9ad195dc17b750552 | 650 | py | Python | imdb/items.py | LeeCrazy/scrapy-imdb | 31094ea42731b4f8b6d3fb0fb9040b25396d28b7 | [
"Apache-2.0"
] | 10 | 2016-12-27T14:47:44.000Z | 2019-12-30T11:19:27.000Z | imdb/items.py | ejibo/imdbspider | b60c393cc791524e2d78d9811cd32a07757ed778 | [
"Apache-2.0"
] | null | null | null | imdb/items.py | ejibo/imdbspider | b60c393cc791524e2d78d9811cd32a07757ed778 | [
"Apache-2.0"
] | 6 | 2017-04-18T06:52:41.000Z | 2019-04-13T13:22:11.000Z | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Field, Item
class ImdbItem(Item):
# define the fields for your item here like:
video_title = Field()
video_rating = Field()
video_name = Field()
video_alias = Field()
video_director = Field()
video_actor = Field()
video_length = Field()
video_language = Field()
video_year = Field()
video_type = Field()
video_color = Field()
video_area = Field()
video_voice = Field()
video_summary = Field()
video_url = Field()
| 23.214286 | 51 | 0.653846 |
eb75599eb0d843504deadb6dfde802dcb8eaa28f | 11,264 | py | Python | src/pyoram/storage/block_storage_ram.py | ghackebeil/PyORAM | 53e109dfb1ecec52348a70ddc64fae65eea7490a | [
"MIT"
] | 24 | 2016-04-14T14:27:37.000Z | 2022-03-13T13:53:18.000Z | src/pyoram/storage/block_storage_ram.py | ghackebeil/PyORAM | 53e109dfb1ecec52348a70ddc64fae65eea7490a | [
"MIT"
] | 4 | 2016-03-14T04:40:23.000Z | 2016-06-01T04:37:18.000Z | src/pyoram/storage/block_storage_ram.py | ghackebeil/PyORAM | 53e109dfb1ecec52348a70ddc64fae65eea7490a | [
"MIT"
] | 4 | 2016-03-16T23:53:24.000Z | 2020-05-27T19:27:37.000Z | __all__ = ('BlockStorageRAM',)
import os
import struct
import logging
import errno
from multiprocessing.pool import ThreadPool
import pyoram
from pyoram.storage.block_storage import \
(BlockStorageInterface,
BlockStorageTypeFactory)
from pyoram.storage.block_storage_mmap import \
(BlockStorageMMap,
_BlockStorageMemoryImpl)
import tqdm
import six
from six.moves import xrange
log = logging.getLogger("pyoram")
class BlockStorageRAM(_BlockStorageMemoryImpl,
BlockStorageInterface):
"""
A class implementing the block storage interface where all data is
kept in RAM. This class uses the same storage format as
BlockStorageFile. Thus, a block storage space can be created using
this class and then, after saving the raw storage data to disk,
reopened with any other class compatible with BlockStorageFile
(and visa versa).
"""
_index_struct_string = BlockStorageMMap._index_struct_string
_index_offset = struct.calcsize(_index_struct_string)
def __init__(self,
storage_data,
threadpool_size=None,
ignore_lock=False):
self._bytes_sent = 0
self._bytes_received = 0
self._ignore_lock = ignore_lock
self._f = None
self._pool = None
self._close_pool = True
if type(storage_data) is not bytearray:
raise TypeError(
"BlockStorageRAM requires input argument of type "
"'bytearray'. Invalid input type: %s"
% (type(storage_data)))
self._f = storage_data
self._block_size, self._block_count, user_header_size, locked = \
struct.unpack(
BlockStorageRAM._index_struct_string,
self._f[:BlockStorageRAM._index_offset])
if locked and (not self._ignore_lock):
raise IOError(
"Can not open block storage device because it is "
"locked by another process. To ignore this check, "
"initialize this class with the keyword 'ignore_lock' "
"set to True.")
self._user_header_data = bytes()
if user_header_size > 0:
self._user_header_data = \
bytes(self._f[BlockStorageRAM._index_offset:\
(BlockStorageRAM._index_offset+user_header_size)])
assert len(self._user_header_data) == user_header_size
self._header_offset = BlockStorageRAM._index_offset + \
len(self._user_header_data)
if not self._ignore_lock:
# turn on the locked flag
self._f[:BlockStorageRAM._index_offset] = \
struct.pack(BlockStorageRAM._index_struct_string,
self.block_size,
self.block_count,
len(self._user_header_data),
True)
# Although we do not use the threadpool we still
# create just in case we are the first
if threadpool_size != 0:
self._pool = ThreadPool(threadpool_size)
#
# Add some methods specific to BlockStorageRAM
#
@staticmethod
def fromfile(file_,
threadpool_size=None,
ignore_lock=False):
"""
Instantiate BlockStorageRAM device from a file saved in block
storage format. The file_ argument can be a file object or a
string that represents a filename. If called with a file
object, it should be opened in binary mode, and the caller is
responsible for closing the file.
This method returns a BlockStorageRAM instance.
"""
close_file = False
if not hasattr(file_, 'read'):
file_ = open(file_, 'rb')
close_file = True
try:
header_data = file_.read(BlockStorageRAM._index_offset)
block_size, block_count, user_header_size, locked = \
struct.unpack(
BlockStorageRAM._index_struct_string,
header_data)
if locked and (not ignore_lock):
raise IOError(
"Can not open block storage device because it is "
"locked by another process. To ignore this check, "
"call this method with the keyword 'ignore_lock' "
"set to True.")
header_offset = len(header_data) + \
user_header_size
f = bytearray(header_offset + \
(block_size * block_count))
f[:header_offset] = header_data + file_.read(user_header_size)
f[header_offset:] = file_.read(block_size * block_count)
finally:
if close_file:
file_.close()
return BlockStorageRAM(f,
threadpool_size=threadpool_size,
ignore_lock=ignore_lock)
def tofile(self, file_):
"""
Dump all storage data to a file. The file_ argument can be a
file object or a string that represents a filename. If called
with a file object, it should be opened in binary mode, and
the caller is responsible for closing the file.
The method should only be called after the storage device has
been closed to ensure that the locked flag has been set to
False.
"""
close_file = False
if not hasattr(file_, 'write'):
file_ = open(file_, 'wb')
close_file = True
file_.write(self._f)
if close_file:
file_.close()
@property
def data(self):
"""Access the raw bytearray"""
return self._f
#
# Define BlockStorageInterface Methods
#
def clone_device(self):
f = BlockStorageRAM(self._f,
threadpool_size=0,
ignore_lock=True)
f._pool = self._pool
f._close_pool = False
return f
@classmethod
def compute_storage_size(cls, *args, **kwds):
return BlockStorageMMap.compute_storage_size(*args, **kwds)
@classmethod
def setup(cls,
storage_name,
block_size,
block_count,
initialize=None,
header_data=None,
ignore_existing=False,
threadpool_size=None):
# We ignore the 'storage_name' argument
# We ignore the 'ignore_existing' flag
if (block_size <= 0) or (block_size != int(block_size)):
raise ValueError(
"Block size (bytes) must be a positive integer: %s"
% (block_size))
if (block_count <= 0) or (block_count != int(block_count)):
raise ValueError(
"Block count must be a positive integer: %s"
% (block_count))
if (header_data is not None) and \
(type(header_data) is not bytes):
raise TypeError(
"'header_data' must be of type bytes. "
"Invalid type: %s" % (type(header_data)))
if initialize is None:
zeros = bytes(bytearray(block_size))
initialize = lambda i: zeros
# create_index
index_data = None
if header_data is None:
index_data = struct.pack(BlockStorageRAM._index_struct_string,
block_size,
block_count,
0,
False)
header_data = bytes()
else:
index_data = struct.pack(BlockStorageRAM._index_struct_string,
block_size,
block_count,
len(header_data),
False)
header_offset = len(index_data) + len(header_data)
f = bytearray(header_offset + \
(block_size * block_count))
f[:header_offset] = index_data + header_data
progress_bar = tqdm.tqdm(total=block_count*block_size,
desc="Initializing File Block Storage Space",
unit="B",
unit_scale=True,
disable=not pyoram.config.SHOW_PROGRESS_BAR)
for i in xrange(block_count):
block = initialize(i)
assert len(block) == block_size, \
("%s != %s" % (len(block), block_size))
pos_start = header_offset + i * block_size
pos_start = header_offset + i * block_size
pos_stop = pos_start + block_size
f[pos_start:pos_stop] = block[:]
progress_bar.update(n=block_size)
progress_bar.close()
return BlockStorageRAM(f, threadpool_size=threadpool_size)
@property
def header_data(self):
return self._user_header_data
@property
def block_count(self):
return self._block_count
@property
def block_size(self):
return self._block_size
@property
def storage_name(self):
return None
def update_header_data(self, new_header_data):
if len(new_header_data) != len(self.header_data):
raise ValueError(
"The size of header data can not change.\n"
"Original bytes: %s\n"
"New bytes: %s" % (len(self.header_data),
len(new_header_data)))
self._user_header_data = bytes(new_header_data)
self._f[BlockStorageRAM._index_offset:\
(BlockStorageRAM._index_offset+len(new_header_data))] = \
self._user_header_data
def close(self):
if self._close_pool and (self._pool is not None):
self._pool.close()
self._pool.join()
self._pool = None
if not self._ignore_lock:
# turn off the locked flag
self._f[:BlockStorageRAM._index_offset] = \
struct.pack(BlockStorageRAM._index_struct_string,
self.block_size,
self.block_count,
len(self._user_header_data),
False)
self._ignore_lock = True
#
# We must cast from bytearray to bytes
# when reading from a bytearray so that this
# class works with the encryption layer.
#
def read_blocks(self, indices):
return [bytes(block) for block
in super(BlockStorageRAM, self).read_blocks(indices)]
def yield_blocks(self, indices):
for block in super(BlockStorageRAM, self).yield_blocks(indices):
yield bytes(block)
def read_block(self, i):
return bytes(super(BlockStorageRAM, self).read_block(i))
#def write_blocks(...)
#def write_block(...)
@property
def bytes_sent(self):
return self._bytes_sent
@property
def bytes_received(self):
return self._bytes_received
BlockStorageTypeFactory.register_device("ram", BlockStorageRAM)
| 35.75873 | 80 | 0.568182 |
664e0b58e3c2ac302dfc83de36d1e571a5e4bd3b | 2,471 | py | Python | setup.py | syscocloud/puppetboard | 94052a0330b1e73963504a2a2fff9dbab52b2a87 | [
"Apache-2.0"
] | null | null | null | setup.py | syscocloud/puppetboard | 94052a0330b1e73963504a2a2fff9dbab52b2a87 | [
"Apache-2.0"
] | null | null | null | setup.py | syscocloud/puppetboard | 94052a0330b1e73963504a2a2fff9dbab52b2a87 | [
"Apache-2.0"
] | null | null | null | import sys
import codecs
from setuptools.command.test import test as TestCommand
from setuptools import setup, find_packages
from puppetboard.version import __version__
with codecs.open('README.md', encoding='utf-8') as f:
README = f.read()
with codecs.open('CHANGELOG.md', encoding='utf-8') as f:
CHANGELOG = f.read()
requirements = None
with open('requirements.txt', 'r') as f:
requirements = [line.rstrip()
for line in f.readlines() if not line.startswith('-')]
requirements_test = None
with open('requirements-test.txt', 'r') as f:
requirements_test = [line.rstrip() for line in f.readlines()
if not line.startswith('-')]
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = '--cov=puppetboard --cov-report=term-missing'
def run_tests(self):
import shlex
import pytest
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
setup(
name='puppetboard',
version=__version__,
author='Vox Pupuli',
author_email='voxpupuli@groups.io',
packages=find_packages(),
url='https://github.com/voxpupuli/puppetboard',
license='Apache License 2.0',
description='Web frontend for PuppetDB',
include_package_data=True,
long_description='\n'.join((README, CHANGELOG)),
long_description_content_type='text/markdown',
zip_safe=False,
install_requires=requirements,
tests_require=requirements_test,
extras_require={'test': requirements_test},
data_files=[('requirements_for_tests', ['requirements-test.txt']),
('requirements_for_docker', ['requirements-docker.txt'])],
keywords="puppet puppetdb puppetboard",
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
| 32.513158 | 74 | 0.654391 |
1ae57b4ca2b8267cf20b89a3d439c1e2a2cdfcea | 8,870 | py | Python | contrib/devtools/symbol-check.py | robbelouwet/Elixir | 609412402c5dd4fb9d77ae6d87505d8efd608132 | [
"MIT"
] | null | null | null | contrib/devtools/symbol-check.py | robbelouwet/Elixir | 609412402c5dd4fb9d77ae6d87505d8efd608132 | [
"MIT"
] | null | null | null | contrib/devtools/symbol-check.py | robbelouwet/Elixir | 609412402c5dd4fb9d77ae6d87505d8efd608132 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the executables produced by gitian only contain
certain symbols and are only linked against allowed libraries.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python3 contrib/devtools/symbol-check.py
'''
import subprocess
import sys
import os
from typing import List, Optional
import lief
import pixie
# Debian 8 (Jessie) EOL: 2020. https://wiki.debian.org/DebianReleases#Production_Releases
#
# - g++ version 4.9.2 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.19 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=libc6)
#
# Ubuntu 16.04 (Xenial) EOL: 2024. https://wiki.ubuntu.com/Releases
#
# - g++ version 5.3.1 (https://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=xenial§ion=all)
# - libc version 2.23.0 (https://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=xenial§ion=all)
#
# CentOS 7 EOL: 2024. https://wiki.centos.org/FAQ/General
#
# - g++ version 4.8.5 (http://mirror.centos.org/centos/7/os/x86_64/Packages/)
# - libc version 2.17 (http://mirror.centos.org/centos/7/os/x86_64/Packages/)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.8.5: GCC_4.8.0
# (glibc) GLIBC_2_17
#
MAX_VERSIONS = {
'GCC': (4,8,0),
'GLIBC': (2,17),
'LIBATOMIC': (1,0)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', '__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr',
'environ', '_environ', '__environ',
}
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
# Allowed NEEDED libraries
ELF_ALLOWED_LIBRARIES = {
# elixird and elixir-qt
'libgcc_s.so.1', # GCC base support
'libc.so.6', # C library
'libpthread.so.0', # threading
'libm.so.6', # math library
'librt.so.1', # real-time (clock)
'libatomic.so.1',
'ld-linux-x86-64.so.2', # 64-bit dynamic linker
'ld-linux.so.2', # 32-bit dynamic linker
'ld-linux-aarch64.so.1', # 64-bit ARM dynamic linker
'ld-linux-armhf.so.3', # 32-bit ARM dynamic linker
'ld64.so.1', # POWER64 ABIv1 dynamic linker
'ld64.so.2', # POWER64 ABIv2 dynamic linker
'ld-linux-riscv64-lp64d.so.1', # 64-bit RISC-V dynamic linker
# elixir-qt only
'libxcb.so.1', # part of X11
'libxkbcommon.so.0', # keyboard keymapping
'libxkbcommon-x11.so.0', # keyboard keymapping
'libfontconfig.so.1', # font support
'libfreetype.so.6', # font parsing
'libdl.so.2' # programming interface to dynamic linker
}
ARCH_MIN_GLIBC_VER = {
pixie.EM_386: (2,1),
pixie.EM_X86_64: (2,2,5),
pixie.EM_ARM: (2,4),
pixie.EM_AARCH64:(2,17),
pixie.EM_PPC64: (2,17),
pixie.EM_RISCV: (2,27)
}
MACHO_ALLOWED_LIBRARIES = {
# elixird and elixir-qt
'libc++.1.dylib', # C++ Standard Library
'libSystem.B.dylib', # libc, libm, libpthread, libinfo
# elixir-qt only
'AppKit', # user interface
'ApplicationServices', # common application tasks.
'Carbon', # deprecated c back-compat API
'CoreFoundation', # low level func, data types
'CoreGraphics', # 2D rendering
'CoreServices', # operating system services
'CoreText', # interface for laying out text and handling fonts.
'CoreVideo', # video processing
'Foundation', # base layer functionality for apps/frameworks
'ImageIO', # read and write image file formats.
'IOKit', # user-space access to hardware devices and drivers.
'IOSurface', # cross process image/drawing buffers
'libobjc.A.dylib', # Objective-C runtime library
'Metal', # 3D graphics
'Security', # access control and authentication
'QuartzCore', # animation
}
PE_ALLOWED_LIBRARIES = {
'ADVAPI32.dll', # security & registry
'IPHLPAPI.DLL', # IP helper API
'KERNEL32.dll', # win32 base APIs
'msvcrt.dll', # C standard library for MSVC
'SHELL32.dll', # shell API
'USER32.dll', # user interface
'WS2_32.dll', # sockets
# elixir-qt only
'dwmapi.dll', # desktop window manager
'GDI32.dll', # graphics device interface
'IMM32.dll', # input method editor
'NETAPI32.dll',
'ole32.dll', # component object model
'OLEAUT32.dll', # OLE Automation API
'SHLWAPI.dll', # light weight shell API
'USERENV.dll',
'UxTheme.dll',
'VERSION.dll', # version checking
'WINMM.dll', # WinMM audio API
'WTSAPI32.dll',
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
def __call__(self, mangled):
self.proc.stdin.write(mangled + '\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def check_version(max_versions, version, arch) -> bool:
if '_' in version:
(lib, _, ver) = version.rpartition('_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib] or lib == 'GLIBC' and ver <= ARCH_MIN_GLIBC_VER[arch]
def check_imported_symbols(filename) -> bool:
elf = pixie.load(filename)
cppfilt = CPPFilt()
ok: bool = True
for symbol in elf.dyn_symbols:
if not symbol.is_import:
continue
sym = symbol.name.decode()
version = symbol.version.decode() if symbol.version is not None else None
if version and not check_version(MAX_VERSIONS, version, elf.hdr.e_machine):
print('{}: symbol {} from unsupported version {}'.format(filename, cppfilt(sym), version))
ok = False
return ok
def check_exported_symbols(filename) -> bool:
elf = pixie.load(filename)
cppfilt = CPPFilt()
ok: bool = True
for symbol in elf.dyn_symbols:
if not symbol.is_export:
continue
sym = symbol.name.decode()
if elf.hdr.e_machine == pixie.EM_RISCV or sym in IGNORE_EXPORTS:
continue
print('{}: export of symbol {} not allowed'.format(filename, cppfilt(sym)))
ok = False
return ok
def check_ELF_libraries(filename) -> bool:
ok: bool = True
elf = pixie.load(filename)
for library_name in elf.query_dyn_tags(pixie.DT_NEEDED):
assert(isinstance(library_name, bytes))
if library_name.decode() not in ELF_ALLOWED_LIBRARIES:
print('{}: NEEDED library {} is not allowed'.format(filename, library_name.decode()))
ok = False
return ok
def check_MACHO_libraries(filename) -> bool:
ok: bool = True
binary = lief.parse(filename)
for dylib in binary.libraries:
split = dylib.name.split('/')
if split[-1] not in MACHO_ALLOWED_LIBRARIES:
print(f'{split[-1]} is not in ALLOWED_LIBRARIES!')
ok = False
return ok
def check_PE_libraries(filename) -> bool:
ok: bool = True
binary = lief.parse(filename)
for dylib in binary.libraries:
if dylib not in PE_ALLOWED_LIBRARIES:
print(f'{dylib} is not in ALLOWED_LIBRARIES!')
ok = False
return ok
CHECKS = {
'ELF': [
('IMPORTED_SYMBOLS', check_imported_symbols),
('EXPORTED_SYMBOLS', check_exported_symbols),
('LIBRARY_DEPENDENCIES', check_ELF_libraries)
],
'MACHO': [
('DYNAMIC_LIBRARIES', check_MACHO_libraries)
],
'PE' : [
('DYNAMIC_LIBRARIES', check_PE_libraries)
]
}
def identify_executable(executable) -> Optional[str]:
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
elif magic.startswith(b'\xcf\xfa'):
return 'MACHO'
return None
if __name__ == '__main__':
retval: int = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print(f'{filename}: unknown format')
retval = 1
continue
failed: List[str] = []
for (name, func) in CHECKS[etype]:
if not func(filename):
failed.append(name)
if failed:
print(f'{filename}: failed {" ".join(failed)}')
retval = 1
except IOError:
print(f'{filename}: cannot open')
retval = 1
sys.exit(retval)
| 32.851852 | 154 | 0.666404 |
5fe2c0c567b4b17bb3a6765a147b357e4bb37551 | 1,827 | py | Python | landingpages/migrations/0001_initial.py | R3v1L/django-landingpages | a4a028337be0d3fbdfa3e62ec35cd9c79bce87ef | [
"MIT"
] | null | null | null | landingpages/migrations/0001_initial.py | R3v1L/django-landingpages | a4a028337be0d3fbdfa3e62ec35cd9c79bce87ef | [
"MIT"
] | null | null | null | landingpages/migrations/0001_initial.py | R3v1L/django-landingpages | a4a028337be0d3fbdfa3e62ec35cd9c79bce87ef | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import sitetools.models.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='LandingPage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='Landing page name', max_length=250, verbose_name='Name')),
('url', models.CharField(help_text='Landing page URL', max_length=250, verbose_name='URL')),
('title', models.CharField(help_text='SEO Title for this page', max_length=250, verbose_name='Title')),
('desc', models.TextField(help_text='SEO Description', verbose_name='Description')),
('keywords', models.CharField(help_text='SEO Keywords', max_length=250, verbose_name='Keywords')),
('language', sitetools.models.fields.LanguageField(help_text='Language this page is intended for', max_length=2, verbose_name='Language', choices=[(b'es', b'Spanish'), (b'en', b'English')])),
('content', sitetools.models.fields.HTMLField(help_text='Landing page contents', verbose_name='Content')),
('extrahead', models.TextField(help_text='Extra head content (Scripts, meta, etc.)', null=True, verbose_name='Extra head', blank=True)),
('template', models.CharField(default=b'landingpages/default.html', help_text='Template name to be used with this landing page', max_length=50, verbose_name='Template')),
],
options={
'verbose_name': 'Landing Page',
'verbose_name_plural': 'Landing Pages',
},
),
]
| 53.735294 | 207 | 0.634921 |
7647d9bcd10dceef30b546a53ed49360a462cdbb | 94,142 | py | Python | hrpro/custom.py | hereabdulla/hrpro | 5ee5db9182a8152fd23140e6490dc995b0aef743 | [
"MIT"
] | null | null | null | hrpro/custom.py | hereabdulla/hrpro | 5ee5db9182a8152fd23140e6490dc995b0aef743 | [
"MIT"
] | null | null | null | hrpro/custom.py | hereabdulla/hrpro | 5ee5db9182a8152fd23140e6490dc995b0aef743 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2017, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe,os,base64
import requests
import datetime
import json,calendar
from datetime import datetime,timedelta,date,time
import datetime as dt
from datetime import date
from frappe.utils import cint,today,flt,date_diff,add_days,add_months,date_diff,getdate,formatdate,cint,cstr
from frappe.desk.notifications import delete_notification_count_for
from frappe import _
import xml.etree.ElementTree as ET
# from hrpro.hrpro.report.monthly_absenteesim.monthly_absenteesim import validate_if_attendance_not_applicable
from frappe.email.email_body import (replace_filename_with_cid,
get_email, inline_style_in_html, get_header)
import dateutil.parser
from dateutil.relativedelta import relativedelta
# from hrpro.update_attendance import update_att_from_shift
from json import JSONEncoder
from datetime import datetime
@frappe.whitelist()
def check_appraisal(emp,year):
if frappe.db.exists("Performance Management Self", {"employee_code1": emp, "appraisal_year": year,"docstatus":1}):
return year
@frappe.whitelist()
def get_previous_year_goals(emp,year):
pm = frappe.db.exists("Performance Management Self",{"employee_code1": emp, "appraisal_year": year})
if pm:
return pm
else:
return 'NA'
@frappe.whitelist()
def fetch_att_test(from_date,to_date,employee=None,department=None,designation=None,location=None):
frappe.errprint(from_date)
employees = []
from_date = (datetime.strptime(str(from_date), '%Y-%m-%d')).date()
to_date = (datetime.strptime(str(to_date), '%Y-%m-%d')).date()
for preday in daterange(from_date, to_date):
day = preday.strftime("%d%m%Y")
date = datetime.today().strftime("%Y-%m-%d")
exc = frappe.db.get_list("Auto Present Employees",fields=['employee'])
auto_present_list = []
for e in exc:
auto_present_list.append(e.employee)
# if employee and not department and not designation and not location:
# employees.append(employee)
else:
filters = frappe._dict({
'status':'Active',
'date_of_joining':('<=',preday)
})
if employee:
filters.update({ "employee_number": employee})
if department:
filters.update({ "department": department})
if designation:
filters.update({ "designation": designation})
if location:
filters.update({ "location_name": location})
employees = frappe.get_all('Employee',filters)
for emp in employees:
working_shift = frappe.db.get_value("Employee", {'employee':emp.name},['working_shift'])
assigned_shift = frappe.db.sql("""select shift from `tabShift Assignment`
where employee = %s and %s between from_date and to_date""", (emp.name, preday), as_dict=True)
if assigned_shift:
working_shift = assigned_shift[0]['shift']
if emp.name in auto_present_list:
doc = frappe.get_doc("Employee",emp.name)
attendance = frappe.db.exists("Attendance", {"employee": doc.employee, "attendance_date": preday})
if attendance:
frappe.db.set_value("Attendance",attendance,"status","Present")
frappe.db.commit()
else:
attendance = frappe.new_doc("Attendance")
attendance.employee = doc.employee
attendance.employee_name = doc.employee_name
attendance.status = "Present"
attendance.first_half_status = "PR"
attendance.second_half_status = "PR"
attendance.attendance_date = preday
# attendance.company = doc.company
attendance.working_shift = working_shift,
attendance.late_in = "00:00:00"
attendance.work_time = "00:00:00"
attendance.early_out = "00:00:00"
attendance.overtime = "00:00:00"
attendance.save(ignore_permissions=True)
attendance.submit()
frappe.db.commit()
else:
try:
url = 'http://182.72.89.102/cosec/api.svc/v2/attendance-daily?action=get;field-name=userid,ProcessDate,firsthalf,\
secondhalf,punch1,punch2,workingshift,shiftstart,shiftend,latein,earlyout,worktime,overtime;date-range=%s-%s;range=user;id=%s;format=xml' % (day,day,emp.name)
r = requests.get(url, auth=('sa', 'matrixx'))
if "No records found" in r.content:
attendance_id = frappe.db.exists("Attendance", {
"employee": emp.name, "attendance_date": preday,"docstatus":1})
if attendance_id:
pass
else:
attendance = frappe.new_doc("Attendance")
attendance.update({
"employee": emp.name,
"attendance_date": preday,
"status": 'Absent',
"first_half_status": "AB",
"second_half_status": "AB",
"late_in" : "0:00:00",
"early_out" : "0:00:00",
"working_shift" : working_shift,
"work_time": "0:00:00",
"overtime":"0:00:00"
})
attendance.save(ignore_permissions=True)
attendance.submit()
frappe.db.commit()
else:
if not "failed: 0010102003" in r.content:
root = ET.fromstring(r.content)
for att in root.findall('attendance-daily'):
userid = att.find('UserID').text
in_time = att.find('Punch1').text
out_time = att.find('Punch2').text
first_half_status = att.find('firsthalf').text
second_half_status = att.find('secondhalf').text
date = datetime.strptime((att.find('ProcessDate').text.replace("/","")), "%d%m%Y").date()
date_f = date.strftime("%Y-%m-%d")
if flt(att.find('WorkTime').text) > 1440:
work_time = timedelta(minutes=flt('1400'))
else:
work_time = timedelta(minutes=flt(att.find('WorkTime').text))
over_time = timedelta(minutes=flt(att.find('Overtime').text))
late_in = timedelta(minutes=flt(att.find('LateIn').text))
early_out = timedelta(minutes=flt(att.find('EarlyOut').text))
attendance_id = frappe.db.exists("Attendance", {
"employee": emp.name, "attendance_date": date_f,"docstatus":1})
if out_time:
out_time_f = datetime.strptime(out_time, "%d/%m/%Y %H:%M:%S")
if in_time:
in_time_f = datetime.strptime(in_time, "%d/%m/%Y %H:%M:%S")
if in_time and out_time:
work_time = out_time_f - in_time_f
wt_seconds = work_time.total_seconds() // 60
if wt_seconds > 1440:
work_time = timedelta(minutes=flt('1400'))
if work_time >= timedelta(hours=4):
if work_time < timedelta(hours=7,minutes=45):
status = 'Half Day'
else:
status = 'Present'
else:
status = 'Absent'
if attendance_id:
attendance = frappe.get_doc(
"Attendance", attendance_id)
attendance.out_time = out_time
attendance.in_time = in_time
attendance.status = status
attendance.first_half_status = first_half_status
attendance.second_half_status = second_half_status
attendance.late_in = late_in
attendance.early_out = early_out
attendance.working_shift = working_shift
attendance.work_time = work_time
attendance.overtime = over_time
attendance.db_update()
frappe.db.commit()
else:
attendance = frappe.new_doc("Attendance")
attendance.update({
"employee": emp.name,
"attendance_date": date_f,
"status": status,
"in_time": in_time,
"first_half_status":first_half_status,
"second_half_status":second_half_status,
"late_in" : late_in,
"early_out" : early_out,
"working_shift" : working_shift,
"out_time": out_time,
"work_time": work_time,
"overtime":over_time
})
attendance.save(ignore_permissions=True)
attendance.submit()
frappe.db.commit()
except Exception as e:
frappe.msgprint(_("Connection Failed,Kindly check Matrix Server is Up"))
break
@frappe.whitelist()
def fetch_att_prev():
prev_day = add_days(today(),-1)
fetch_att_test(prev_day,prev_day)
@frappe.whitelist()
def test():
delete_notification_count_for("Chat")
def get_employees():
query = """SELECT employee,employee_name,designation FROM `tabEmployee` WHERE status='Active'
ORDER BY employee"""
data = frappe.db.sql(query, as_dict=1)
return data
@frappe.whitelist()
def fetch_att():
preday = datetime.strptime(today(), '%Y-%m-%d')
day = preday.strftime("%d%m%Y")
exc = frappe.db.get_list("Auto Present Employees",fields=['employee'])
auto_present_list = []
for e in exc:
auto_present_list.append(e.employee)
employees = frappe.get_all('Employee',{'status':'Active'})
for emp in employees:
working_shift = frappe.db.get_value("Employee", {'employee':emp.name},['working_shift'])
assigned_shift = frappe.db.sql("""select shift from `tabShift Assignment`
where employee = %s and %s between from_date and to_date""", (emp.name, preday), as_dict=True)
if assigned_shift:
working_shift = assigned_shift[0]['shift']
if emp.name in auto_present_list:
doc = frappe.get_doc("Employee",emp.name)
attendance = frappe.db.exists("Attendance", {"employee": doc.employee, "attendance_date": preday})
if attendance:
frappe.db.set_value("Attendance",attendance,"status","Present")
frappe.db.commit()
else:
attendance = frappe.new_doc("Attendance")
attendance.employee = doc.employee
attendance.employee_name = doc.employee_name
attendance.status = "Present"
attendance.attendance_date = preday
# attendance.company = doc.company
attendance.working_shift = working_shift
attendance.late_in = "00:00:00"
attendance.work_time = "00:00:00"
attendance.early_out = "00:00:00"
attendance.overtime = "00:00:00"
attendance.save(ignore_permissions=True)
attendance.submit()
frappe.db.commit()
else:
url = 'http://182.72.89.102/cosec/api.svc/v2/attendance-daily?action=get;field-name=userid,ProcessDate,firsthalf,\
secondhalf,punch1,punch2,workingshift,shiftstart,shiftend,latein,earlyout,worktime,overtime;date-range=%s-%s;range=user;id=%s;format=xml' % (day,day,emp.name)
r = requests.get(url, auth=('sa', 'matrixx'))
if "No records found" in r.content:
attendance_id = frappe.db.exists("Attendance", {
"employee": emp.name, "attendance_date": preday,"docstatus":1})
if attendance_id:
pass
else:
attendance = frappe.new_doc("Attendance")
attendance.update({
"employee": emp.name,
"attendance_date": preday,
"status": 'Absent',
"late_in" : "0:00:00",
"early_out" : "0:00:00",
"working_shift" : working_shift,
"work_time": "0:00:00",
"overtime":"0:00:00"
})
attendance.save(ignore_permissions=True)
attendance.submit()
frappe.db.commit()
else:
if not "failed: 0010102003" in r.content:
root = ET.fromstring(r.content)
for att in root.findall('attendance-daily'):
userid = att.find('UserID').text
in_time = att.find('Punch1').text
out_time = att.find('Punch2').text
date = datetime.strptime((att.find('ProcessDate').text.replace("/","")), "%d%m%Y").date()
date_f = date.strftime("%Y-%m-%d")
first_half_status = att.find('firsthalf').text
second_half_status = att.find('secondhalf').text
if flt(att.find('WorkTime').text) > 1440:
work_time = timedelta(minutes=flt('1400'))
else:
work_time = timedelta(minutes=flt(att.find('WorkTime').text))
over_time = timedelta(minutes=flt(att.find('Overtime').text))
late_in = timedelta(minutes=flt(att.find('LateIn').text))
early_out = timedelta(minutes=flt(att.find('EarlyOut').text))
attendance_id = frappe.db.exists("Attendance", {
"employee": emp.name, "attendance_date": date_f,"docstatus":1})
if out_time:
out_time_f = datetime.strptime(out_time, "%d/%m/%Y %H:%M:%S")
if in_time:
in_time_f = datetime.strptime(in_time, "%d/%m/%Y %H:%M:%S")
if in_time and out_time:
work_time = out_time_f - in_time_f
wt_seconds = work_time.total_seconds() // 60
if wt_seconds > 1440:
work_time = timedelta(minutes=flt('1400'))
if work_time >= timedelta(hours=4):
if work_time < timedelta(hours=7,minutes=45):
status = 'Half Day'
else:
status = 'Present'
else:
status = 'Absent'
if attendance_id:
attendance = frappe.get_doc(
"Attendance", attendance_id)
attendance.out_time = out_time
attendance.in_time = in_time
attendance.status = status
attendance.first_half_status = first_half_status
attendance.second_half_status = second_half_status
attendance.late_in = late_in
attendance.early_out = early_out
attendance.working_shift = working_shift
attendance.work_time = work_time
attendance.overtime = over_time
attendance.db_update()
frappe.db.commit()
else:
attendance = frappe.new_doc("Attendance")
attendance.update({
"employee": emp.name,
"attendance_date": date_f,
"status": status,
"in_time": in_time,
"late_in" : late_in,
"early_out" : early_out,
"working_shift" : working_shift,
"out_time": out_time,
"work_time": work_time,
"overtime":over_time
})
attendance.save(ignore_permissions=True)
attendance.submit()
frappe.db.commit()
@frappe.whitelist()
def fetch_employee():
url = 'http://182.72.89.102/cosec/api.svc/v2/user?action=get;format=xml'
r = requests.get(url, auth=('sa', 'matrixx'))
root = ET.fromstring(r.content)
for emp in root.findall('user'):
reference_code = emp.find('reference-code').text
# if reference_code == "2057":
if not frappe.db.exists("Employee",reference_code):
employee = frappe.new_doc("Employee")
employee.update({
"employee_name": emp.find('name').text,
"employee_number": emp.find('reference-code').text,
"gender" : "Male",
"reports_to" : emp.find('rg_incharge_1').text,
"leave_approver": emp.find('rg_incharge_1').text,
# "prefered_contact_email": emp.find('official-email').text,
"passport_number":emp.find('passport-no').text,
"pf_number":emp.find('pf-no').text,
"pan_number":emp.find('pan').text,
"uan_number":emp.find('uan').text
})
if emp.find('joining-date').text != None:
employee.update({
"date_of_joining": (datetime.strptime(emp.find('joining-date').text, '%d%m%Y')).date(),
})
employee.save(ignore_permissions=True)
frappe.db.commit()
def daterange(date1,date2):
for n in range(int ((date2 - date1).days)+1):
yield date1 + timedelta(n)
@frappe.whitelist()
def display_announcement(note,announcement):
msgvar = """Notification.requestPermission(function (permission)
{
if (permission === "granted")
{
var notification = new Notification("%s");
notification.onclick = function(event){
event.preventDefault();
frappe.set_route('Form','Announcements','%s')
}
}
});""" % (note,announcement)
user_list = frappe.get_all('User',filters={'enabled':1})
for user in user_list:
frappe.publish_realtime(event='eval_js',message=msgvar,user=user['name'])
@frappe.whitelist()
def send_birthday_wish():
# """Send Employee birthday reminders if no 'Stop Birthday Reminders' is not set."""
# if int(frappe.db.get_single_value("HR Settings", "stop_birthday_reminders") or 0):
# return
from frappe.utils.user import get_enabled_system_users
users = None
birthdays = get_employees_who_are_born_today()
wish = frappe.db.sql("""select wish from `tabWishes` order by RAND() limit 1""",as_dict=1)[0]
if birthdays:
if not users:
users = [u.email_id or u.name for u in get_enabled_system_users()]
for e in birthdays:
age = calculate_age(e.date_of_birth)
args = dict(employee=e.employee_name,age=age,wish=wish['wish'],company=frappe.defaults.get_defaults().company,photo=e.image)
# frappe.sendmail(recipients=filter(lambda u: u not in (e.company_email, e.personal_email, e.user_id), users),
frappe.sendmail(recipients=['sivaranjani.s@voltechgroup.com'],
subject=_("Birthday Reminder for {0}").format(e.employee_name),
# message=_("""Today is {0}'s birthday!""").format(e.employee_name),
template = 'birthday_wish',
args = args)
def calculate_age(dtob):
today = date.today()
return today.year - dtob.year - ((today.month, today.day) < (dtob.month, dtob.day))
def get_employees_who_are_born_today():
"""Get Employee properties whose birthday is today."""
return frappe.db.sql("""select name,date_of_birth, personal_email, company_email, user_id, employee_name,image
from tabEmployee where day(date_of_birth) = day(%(date)s)
and month(date_of_birth) = month(%(date)s)
and status = 'Active'""", {"date": today()}, as_dict=True)
# msgvar = """Notification.requestPermission(function (permission)
# {
# if (permission === "granted")
# {
# var notification = new Notification("%s");
# }
# });""" % wish[0]
# user_list = frappe.get_all('User',filters={'enabled':1})
# for user in user_list:
# frappe.publish_realtime(event='msgprint',message=wish[0],user=user['name'])
# note = frappe.new_doc("Note")
# note.title = 'Birthday Wishes'
# note.public = 1
# note.notify_on_login = 1
# note.content = str(wish[0])
# note.save(ignore_permissions=True)
# frappe.db.commit()
@frappe.whitelist()
def update_leave_approval(doc,status):
lap = frappe.get_doc("Leave Application",doc)
lap.update({
"status":status
})
lap.save(ignore_permissions=True)
lap.submit()
frappe.db.commit()
@frappe.whitelist()
def update_onduty_approval(doc,status):
lap = frappe.get_doc("On Duty Application",doc)
lap.update({
"status":status
})
lap.save(ignore_permissions=True)
lap.submit()
frappe.db.commit()
@frappe.whitelist()
def update_miss_punch_approval(doc,status):
mpap = frappe.get_doc("Miss Punch Application",doc)
mpap.update({
"status":status
})
mpap.save(ignore_permissions=True)
mpap.submit()
frappe.db.commit()
@frappe.whitelist()
def update_movement_register(doc,status):
tm = frappe.get_doc("Movement Register",doc)
tm.update({
"status":status
})
tm.save(ignore_permissions=True)
tm.submit()
frappe.db.commit()
@frappe.whitelist()
def update_travel_approval(doc,status):
tm = frappe.get_doc("Travel Management",doc)
tm.update({
"status":status
})
tm.save(ignore_permissions=True)
tm.submit()
frappe.db.commit()
tour = tm.tour_application
tour_doc = frappe.get_doc("Tour Application",tour)
tour_doc.update({
"status":status
})
tour_doc.save(ignore_permissions=True)
tour_doc.submit()
frappe.db.commit()
@frappe.whitelist()
def update_expense_approval(doc,status,approval_status):
tm = frappe.get_doc("Expense Claim",doc)
tm.update({
"workflow_state":status,
"approval_status":approval_status
})
tm.save(ignore_permissions=True)
# tm.submit()
frappe.db.commit()
@frappe.whitelist()
def update_tour_approval(doc,status):
tm = frappe.get_doc("Tour Application",doc)
tm.update({
"status":status
})
tm.save(ignore_permissions=True)
tm.submit()
frappe.db.commit()
@frappe.whitelist()
def bulk_leave_approve(names,status):
if not frappe.has_permission("Leave Application","write"):
frappe.throw(_("Not Permitted"),frappe.PermissionError)
names = json.loads(names)
for name in names:
lap = frappe.get_doc("Leave Application",name)
lap.update({
"status":status
})
lap.save(ignore_permissions=True)
lap.submit()
frappe.db.commit()
@frappe.whitelist()
def bulk_travel_approve(names,status):
if not frappe.has_permission("Travel Management","write"):
frappe.throw(_("Not Permitted"),frappe.PermissionError)
names = json.loads(names)
for name in names:
tm = frappe.get_doc("Travel Management",name)
tm.update({
"status":status
})
tm.save(ignore_permissions=True)
tm.submit()
frappe.db.commit()
@frappe.whitelist()
def bulk_onduty_approve(names,status):
if not frappe.has_permission("On Duty Application","write"):
frappe.throw(_("Not Permitted"),frappe.PermissionError)
names = json.loads(names)
for name in names:
oda = frappe.get_doc("On Duty Application",name)
oda.update({
"status":status
})
oda.save(ignore_permissions=True)
oda.submit()
frappe.db.commit()
def update_website_context(context):
context.update(dict(
splash_image = '/assets/hrpro/images/hd.svg'
))
return context
@frappe.whitelist()
def bulk_auto_present():
for preday in daterange(date(2019, 1, 10), date(2019, 1, 10)):
# # preday = dt
# preday = datetime.strptime(today(), '%Y-%m-%d').date()
employee = []
for emp in frappe.db.get_list("Auto Present Employees",fields=['employee']):
# skip_attendance = validate_if_attendance_not_applicable(emp,preday)
# if not skip_attendance:
doc = frappe.get_doc("Employee",emp['employee'])
attendance = frappe.db.exists("Attendance", {"employee": emp['employee'], "attendance_date": preday})
if attendance:
frappe.db.set_value("Attendance",attendance,"status","Present")
frappe.db.commit()
else:
attendance = frappe.new_doc("Attendance")
attendance.employee = doc.employee
attendance.employee_name = doc.employee_name
attendance.status = "Present"
attendance.attendance_date = preday
# attendance.company = doc.company
attendance.late_in = "00:00:00"
attendance.work_time = "00:00:00"
attendance.early_out = "00:00:00"
attendance.overtime = "00:00:00"
attendance.save(ignore_permissions=True)
attendance.submit()
frappe.db.commit()
def daterange(date1,date2):
for n in range(int ((date2 - date1).days)+1):
yield date1 + timedelta(n)
def log_error(method, message):
# employee = message["userid"]
message = frappe.utils.cstr(message) + "\n" if message else ""
d = frappe.new_doc("Error Log")
d.method = method
d.error = message
d.insert(ignore_permissions=True)
@frappe.whitelist()
def validate_if_attendance_not_applicable(employee, attendance_date):
frappe.errprint("hi")
# Check if attendance_date is a Holiday
if is_holiday(employee, attendance_date):
return True
# Check if employee on Leave
leave_record = frappe.db.sql("""select half_day from `tabLeave Application`
where employee = %s and %s between from_date and to_date
and docstatus = 1""", (employee, attendance_date), as_dict=True)
if leave_record:
return True
return False
@frappe.whitelist()
def in_punch_alert():
day = today()
exc = frappe.db.get_list("Auto Present Employees",fields=['employee'])
employees_list = []
for e in exc:
employees_list.append(e.employee)
employees = frappe.get_all('Employee',{'status':'Active'})
for emp in employees:
if emp.name not in employees_list:
skip_attendance = validate_if_attendance_not_applicable(emp.name,day)
if not skip_attendance:
# frappe.sendmail(
# recipients=['sivaranjani.s@voltechgroup.com'],
# subject='Missed IN Punch Alert for %s' +
# formatdate(today()),
# message="""
# <h3>Missed In Punch Alert</h3>
# <p>Dear %s,</p>
# <h4>Info:</h4>
# <p>This is the reminder for Missed In Punch for today %s</p>
# """ % (frappe.get_value("Employee",emp.name,"employee_name"),formatdate(day))
# )
att = frappe.db.exists("Attendance",{"employee":emp.name,"attendance_date":day})
if not att:
attendance = frappe.new_doc("Attendance")
attendance.update({
"employee": emp.name,
"attendance_date": day,
"status": 'Absent',
"late_in" : "0:00:00",
"early_out" : "0:00:00",
"working_shift" : frappe.get_value("Employee",emp.name,"working_shift"),
"work_time": "0:00:00",
"overtime":"0:00:00"
})
attendance.save(ignore_permissions=True)
attendance.submit()
frappe.db.commit()
@frappe.whitelist()
def out_punch_alert():
day = today()
exc = frappe.db.get_list("Auto Present Employees",fields=['employee'])
employees_list = []
for e in exc:
employees_list.append(e.employee)
employees = frappe.get_all('Employee',{'status':'Active'})
for emp in employees:
if emp.name not in employees_list:
att_record = frappe.db.sql("""select name from `tabAttendance`
where employee = %s and in_time > '0' and out_time is null and attendance_date = %s
and docstatus = 1""", (emp.name, day), as_dict=True)
# if att_record:
# frappe.sendmail(
# recipients=['sivaranjani.s@voltechgroup.com'],
# subject='Missed Out Punch Alert for %s' +
# formatdate(today()),
# message="""
# <h3>Missed Out Punch Alert</h3>
# <p>Dear %s,</p>
# <h4>Info:</h4>
# <p>This is the reminder for Missed Out Punch for today %s</p>
# """ % (frappe.get_value("Employee",emp.name,"employee_name"),formatdate(day))
# )
@frappe.whitelist()
def continuous_absentees():
# day = today()
exc = frappe.db.get_list("Auto Present Employees",fields=['employee'])
employees_list = []
for e in exc:
employees_list.append(e.employee)
query = """select name,employee,attendance_date from `tabAttendance` where \
status='Absent' and attendance_date between '%s' and '%s' """ % (add_days(today(),-3),today())
employees = frappe.db.sql(query,as_dict=True)
for emp in employees:
if emp.employee not in employees_list:
print(emp)
def validate_if_attendance_not_applicable(employee, attendance_date):
# Check if attendance is Present
att_record = frappe.db.sql("""select name from `tabAttendance`
where employee = %s and in_time > '0' and attendance_date = %s
and docstatus = 1""", (employee, attendance_date), as_dict=True)
if att_record:
return "att",True
# Check if attendance_date is a Holiday
if is_holiday(employee, attendance_date):
return "holiday",True
# Check if employee on Leave
leave_record = frappe.db.sql("""select half_day from `tabLeave Application`
where employee = %s and %s between from_date and to_date
and docstatus = 1""", (employee, attendance_date), as_dict=True)
if leave_record:
return "leave",True
# Check if employee on On-Duty
od_record = frappe.db.sql("""select half_day from `tabOn Duty Application`
where employee = %s and %s between from_date and to_date
and docstatus = 1""", (employee, attendance_date), as_dict=True)
if od_record:
return "od",True
# Check if employee on On-Travel
tm_record = frappe.db.sql("""select half_day from `tabTravel Management`
where employee = %s and %s between from_date and to_date
and docstatus = 1""", (employee, attendance_date), as_dict=True)
if tm_record:
return "tm",True
return False
def get_holiday_list_for_employee(employee, raise_exception=True):
if employee:
holiday_list, company = frappe.db.get_value("Employee", employee, ["holiday_list", "company"])
else:
holiday_list=''
company=frappe.db.get_value("Global Defaults", None, "default_company")
if not holiday_list:
holiday_list = frappe.get_cached_value('Company', company, "default_holiday_list")
if not holiday_list and raise_exception:
frappe.throw(_('Please set a default Holiday List for Employee {0} or Company {1}').format(employee, company))
return holiday_list
def is_holiday(employee, date=None):
'''Returns True if given Employee has an holiday on the given date
:param employee: Employee `name`
:param date: Date to check. Will check for today if None'''
holiday_list = get_holiday_list_for_employee(employee)
if not date:
date = today()
if holiday_list:
return frappe.get_all('Holiday List', dict(name=holiday_list, holiday_date=date)) and True or False
def calculate_comp_off():
# day = add_days(today(),-1)
# from_date = (datetime.strptime('2018-12-25', '%Y-%m-%d')).date()
# to_date = (datetime.strptime('2019-01-24', '%Y-%m-%d')).date()
# for preday in daterange(from_date,to_date):
# for emp in frappe.get_all("Employee",{"status":"Active","coff_eligible":1}):
# if is_holiday(emp, preday):
# coff_hours = frappe.get_value("Attendance",{"attendance_date":preday,"employee":emp.name},["work_time"])
# if coff_hours:
# coff_id = frappe.db.exists("Comp Off Balance",{"employee":emp.name,"comp_off_date":preday})
# if coff_id:
# coff = frappe.get_doc("Comp Off Balance",coff_id)
# else:
# coff = frappe.new_doc("Comp Off Balance")
# coff.update({
# "employee":emp.name,
# "hours":coff_hours,
# "comp_off_date":preday,
# "validity":add_months(preday,3)
# })
# coff.save(ignore_permissions=True)
# frappe.db.commit()
from_date = (datetime.strptime('2019-10-25', '%Y-%m-%d')).date()
to_date = (datetime.strptime('2019-12-24', '%Y-%m-%d')).date()
for preday in daterange(from_date,to_date):
employee = frappe.db.sql("""select name,employee_name,department,designation,category from `tabEmployee`
where status ="Active" and coff_eligible=1 """, as_dict=True)
for emp in employee:
#C-off for Holiday Work
if is_holiday(emp, preday):
ot = frappe.get_value("Attendance",{"attendance_date":preday,"employee":emp.name},["work_time"])
if ot > (timedelta(hours = 3)):
calculate_ot(emp.name,preday,ot)
#C-Off for OT
ews = frappe.db.get_value("Employee", emp.name, ["working_shift"])
assigned_shift = frappe.db.sql("""select shift from `tabShift Assignment`
where employee = %s and %s between from_date and to_date""", (emp.name, preday), as_dict=True)
if assigned_shift:
ews = assigned_shift[0]['shift']
ws = frappe.get_doc("Working Shift", ews)
if not ws.name == 'FS4':
actual_in_time = ws.out_time
actual_out_time = ws.in_time
actual_work_hours = ws.out_time - ws.in_time
if frappe.db.exists("Attendance", {"employee":emp.name,"attendance_date": preday}):
attendance = frappe.get_doc("Attendance", {"employee":emp.name,"attendance_date": preday})
if attendance.work_time > actual_work_hours:
ot = attendance.work_time - actual_work_hours
print(emp.name,preday,ot)
if emp.category == "Management Staff":
if ot > (timedelta(hours = 3)):
calculate_ot(emp.name,preday,ot)
else:
if ot > (timedelta(hours = 2)):
calculate_ot(emp.name,preday,ot)
def update_comp_off(doc,method):
emp = doc.employee
preday = doc.comp_off_date
ot = doc.hours.split(":")
ot = timedelta(hours =cint(ot[0]),minutes=cint(ot[1]))
calculate_ot(emp,preday,ot)
def calculate_ot(emp,preday,ot):
pre = []
ot_time = []
pre.append(preday)
ot_time.append(ot)
child = []
emp = frappe.get_doc("Employee",emp)
coff_id = frappe.db.exists("Comp Off Details",{"employee":emp.name})
if coff_id:
coff = frappe.get_doc("Comp Off Details",coff_id)
child = coff.comp_off_calculation_details
comp_off_child_date = []
comp_off_child_time = []
for c in child:
cdate = (c.comp_off_date).strftime('%Y-%m-%d')
ctime = c.hours
comp_off_child_date.append(cdate)
comp_off_child_time.append(ctime)
date_result = (set(comp_off_child_date) & set(pre))
time_result = (set(comp_off_child_time) & set(ot_time))
if (not date_result and not time_result) or (date_result and not time_result):
child_row = coff.append("comp_off_calculation_details",{
"comp_off_date": preday,
"hours": ot,
"validity": add_months(preday,3)
})
else:
coff = frappe.new_doc("Comp Off Details")
coff.update({
"employee":emp.name,
"employee_name":emp.employee_name,
"department":emp.department,
"designation":emp.designation
})
child_row = coff.append("comp_off_calculation_details",{
"comp_off_date": preday,
"hours": ot,
"validity": add_months(preday,3)
})
child = coff.comp_off_calculation_details
t = timedelta(minutes = 0)
for c in child:
t = t + c.hours
t1 = t.total_seconds()
minutes = t1 // 60
hours = minutes // 60
t3 = "%02d:%02d:%02d" % (hours, minutes % 60, t1 % 60)
coff.update({
"total_hours": t3
})
coff.save(ignore_permissions=True)
frappe.db.commit()
@frappe.whitelist()
def get_coff(employee):
t_hours = 0
if frappe.db.exists("Comp Off Details",{"employee":employee}):
coff_hours = frappe.get_value("Comp Off Details",{"employee":employee},["total_hours"])
frappe.errprint(coff_hours)
# minutes = (coff_hours%3600) // 60
return coff_hours
else:
return "No Data"
@frappe.whitelist()
def att_permission(employee):
if frappe.db.exists("Attendance",{"employee":employee}):
# att = frappe.get_value("Attendance",{"employee":emttendance_date'])
frappe.errprint(att)
@frappe.whitelist()
def att_adjust(employee,attendance_date,name,in_time,out_time,status_p,status_a,status_ph,status_wo,status_first_half_present,status_second_half_present,status_first_half_absent,status_second_half_absent):
if name:
itime = otime = ""
att = frappe.get_doc("Attendance",name)
fdate = datetime.strptime(attendance_date,'%Y-%m-%d').strftime('%d/%m/%Y')
if in_time:
itime = fdate + ' '+ in_time
if out_time:
otime = fdate + ' '+ out_time
if att and status_p == "1":
att.update({
"status":"Present",
"admin_approved_status": "Present",
"in_time": itime,
"out_time": otime
})
att.save(ignore_permissions=True)
frappe.db.commit()
elif att and status_a == "1":
att.update({
"status":"Absent",
"admin_approved_status": "Absent",
"in_time": itime,
"out_time": otime
})
att.save(ignore_permissions=True)
frappe.db.commit()
elif att and status_ph == "1":
att.update({
"status":"Absent",
"admin_approved_status": "PH",
"in_time": itime,
"out_time": otime
})
att.save(ignore_permissions=True)
frappe.db.commit()
elif att and status_wo == "1":
att.update({
"status":"Absent",
"admin_approved_status": "WO",
"in_time": itime,
"out_time": otime
})
att.save(ignore_permissions=True)
frappe.db.commit()
elif att and status_first_half_present == "1":
if att.status == 'Present':
att.update({
"status":"Present",
"first_half_status":"PR",
"admin_approved_status": "First Half Present",
"in_time": itime,
"out_time": otime
})
if att.status == 'Half Day':
admin_approved_status = "First Half Present"
aas = frappe.get_value("Attendance",{"employee":employee,"attendance_date":attendance_date},"admin_approved_status")
if aas == 'Second Half Present':
admin_approved_status = 'Present'
sh_status = get_sh(employee,attendance_date)
if sh_status == 'PR':
status = 'Present'
else:
status = 'Half Day'
att.update({
"status":status,
"first_half_status":"PR",
"second_half_status":sh_status,
"admin_approved_status": admin_approved_status,
"in_time": itime,
"out_time": otime
})
if att.status == 'Absent':
att.update({
"status":"Half Day",
"first_half_status":"PR",
"admin_approved_status": "First Half Present",
"in_time": itime,
"out_time": otime
})
att.save(ignore_permissions=True)
frappe.db.commit()
elif att and status_second_half_present == "1":
if att.status == 'Present':
att.update({
"second_half_status":"PR",
"admin_approved_status": "Second Half Present",
"in_time": itime,
"out_time": otime
})
if att.status == 'Half Day':
admin_approved_status = "Second Half Present"
aas = frappe.get_value("Attendance",{"employee":employee,"attendance_date":attendance_date},"admin_approved_status")
if aas == 'First Half Present':
admin_approved_status = 'Present'
fh_status = get_fh(employee,attendance_date)
if fh_status == 'PR':
status = 'Present'
else:
status = 'Half Day'
att.update({
"status":status,
"first_half_status":fh_status,
"second_half_status":"PR",
"admin_approved_status": admin_approved_status,
"in_time": itime,
"out_time": otime
})
if att.status == 'Absent':
att.update({
"status":"Half Day",
"second_half_status":"PR",
"admin_approved_status": "Second Half Present",
"in_time": itime,
"out_time": otime
})
att.save(ignore_permissions=True)
frappe.db.commit()
elif att and status_first_half_absent == "1":
if att.status == 'Present':
att.update({
"status":"Half Day",
"first_half_status":"AB",
"admin_approved_status": "First Half Absent",
"in_time": itime,
"out_time": otime
})
if att.status == 'Half Day':
att.update({
"first_half_status":"AB",
"admin_approved_status": "First Half Absent",
"in_time": itime,
"out_time": otime
})
if att.status == 'Absent':
att.update({
"first_half_status":"AB",
"admin_approved_status": "First Half Absent",
"in_time": itime,
"out_time": otime
})
att.save(ignore_permissions=True)
frappe.db.commit()
elif att and status_second_half_absent == "1":
if att.status == 'Present':
att.update({
"status":"Half Day",
"second_half_status":"AB",
"admin_approved_status": "Second Half Absent",
"in_time": itime,
"out_time": otime
})
if att.status == 'Half Day':
att.update({
"second_half_status":"AB",
"admin_approved_status": "Second Half Absent",
"in_time": itime,
"out_time": otime
})
if att.status == 'Absent':
att.update({
"second_half_status":"AB",
"admin_approved_status": "Second Half Absent",
"in_time": itime,
"out_time": otime
})
att.save(ignore_permissions=True)
frappe.db.commit()
elif att and in_time or out_time:
att.update({
"admin_approved_status": "Present",
"in_time": itime,
"out_time": otime
})
att.save(ignore_permissions=True)
frappe.db.commit()
return True
@frappe.whitelist()
def get_sh(employee,attendance_date):
shs = frappe.get_value("Attendance",{"employee":employee,"attendance_date":attendance_date},"second_half_status")
out_time = frappe.db.get_value("Attendance", {"employee": employee, "attendance_date": attendance_date},["out_time"])
dt = datetime.strptime(out_time, "%d/%m/%Y %H:%M:%S")
emp_out_time = dt.time()
emp_out_time = timedelta(hours=emp_out_time.hour,minutes=emp_out_time.minute,seconds=emp_out_time.second)
working_shift = frappe.db.get_value("Employee", {'employee':employee},['working_shift'])
assigned_shift = frappe.db.sql("""select shift from `tabShift Assignment`
where employee = %s and %s between from_date and to_date""", (employee, attendance_date), as_dict=True)
if assigned_shift:
working_shift = assigned_shift[0]['shift']
shift_out_time = frappe.db.get_value("Working Shift",working_shift,"out_time") + frappe.db.get_value("Working Shift",working_shift,"grace_out_time")
if emp_out_time >= shift_out_time:
shs = 'PR'
return shs
def get_fh(employee,attendance_date):
fhs = frappe.get_value("Attendance",{"employee":employee,"attendance_date":attendance_date},"first_half_status")
in_time = frappe.db.get_value("Attendance", {"employee": employee, "attendance_date": attendance_date},["in_time"])
dt = datetime.strptime(in_time, "%d/%m/%Y %H:%M:%S")
emp_in_time = dt.time()
emp_in_time = timedelta(hours=emp_in_time.hour,minutes=emp_in_time.minute,seconds=emp_in_time.second)
working_shift = frappe.db.get_value("Employee", {'employee':employee},['working_shift'])
assigned_shift = frappe.db.sql("""select shift from `tabShift Assignment`
where employee = %s and %s between from_date and to_date""", (employee, attendance_date), as_dict=True)
if assigned_shift:
working_shift = assigned_shift[0]['shift']
shift_in_time = frappe.db.get_value("Working Shift",working_shift,"in_time") + frappe.db.get_value("Working Shift",working_shift,"grace_in_time")
if emp_in_time <= shift_in_time:
fhs = 'PR'
return fhs
@frappe.whitelist()
def updated_att_adjust():
attendance = frappe.db.sql("""select name from `tabAttendance` where docstatus =1 and attendance_date between '2019-03-25' and '2019-04-24' """, as_dict = 1)
for a in attendance:
att = frappe.get_doc("Attendance",a.name)
admin_approved_status = att.admin_approved_status
print(att)
if att and admin_approved_status == "Present":
att.update({
"status":"Present",
"first_half_status":"PR",
"second_half_status":"PR",
"admin_approved_status": "Present",
})
att.save(ignore_permissions=True)
frappe.db.commit()
elif att and admin_approved_status == "Absent":
att.update({
"status":"Absent",
"first_half_status":"AB",
"second_half_status":"AB",
"admin_approved_status": "Absent",
})
att.save(ignore_permissions=True)
frappe.db.commit()
elif att and admin_approved_status == "PH":
att.update({
"status":"Absent",
"admin_approved_status": "PH",
})
att.save(ignore_permissions=True)
frappe.db.commit()
elif att and admin_approved_status == "WO":
att.update({
"status":"Absent",
"admin_approved_status": "WO"
})
att.save(ignore_permissions=True)
frappe.db.commit()
elif att and admin_approved_status == "First Half Present":
if att.status == 'Present':
att.update({
"status":"Present",
"first_half_status":"PR",
"admin_approved_status": "First Half Present"
})
if att.status == 'Half Day' and admin_approved_status == "First Half Present":
att.update({
# "status":"Present",
"first_half_status":"PR",
"admin_approved_status": "First Half Present"
})
if att.status == 'Absent' and admin_approved_status == "First Half Present":
att.update({
"status":"Half Day",
"first_half_status":"PR",
"admin_approved_status": "First Half Present"
})
att.save(ignore_permissions=True)
frappe.db.commit()
elif att and admin_approved_status == "Second Half Present":
if att.status == 'Present':
att.update({
"second_half_status":"PR",
"admin_approved_status": "Second Half Present"
})
if att.status == 'Half Day' and admin_approved_status == "Second Half Present":
att.update({
"second_half_status":"PR",
"admin_approved_status": "Second Half Present"
})
if att.status == 'Absent' and admin_approved_status == "Second Half Present":
att.update({
"status":"Half Day",
"second_half_status":"PR",
"admin_approved_status": "Second Half Present"
})
att.save(ignore_permissions=True)
frappe.db.commit()
elif att and admin_approved_status == "First Half Absent":
if att.status == 'Present' and admin_approved_status == "First Half Absent":
att.update({
"status":'Half Day',
"first_half_status":"AB",
"admin_approved_status": "First Half Absent"
})
if att.status == 'Half Day' and admin_approved_status == "First Half Absent":
att.update({
"first_half_status":"AB",
"admin_approved_status": "First Half Absent"
})
if att.status == 'Absent' and admin_approved_status == "First Half Absent":
att.update({
"first_half_status":"AB",
"admin_approved_status": "First Half Absent",
})
att.save(ignore_permissions=True)
frappe.db.commit()
elif att and admin_approved_status == "Second Half Absent":
if att.status == 'Present' and admin_approved_status == "Second Half Absent":
att.update({
"status":'Half Day',
"second_half_status":"AB",
"admin_approved_status": "Second Half Absent",
})
if att.status == 'Half Day' and admin_approved_status == "Second Half Absent":
att.update({
"second_half_status":"AB",
"admin_approved_status": "Second Half Absent",
})
if att.status == 'Absent' and admin_approved_status == "Second Half Absent":
att.update({
"second_half_status":"AB",
"admin_approved_status": "Second Half Absent",
})
att.save(ignore_permissions=True)
frappe.db.commit()
@frappe.whitelist()
def bulk_att_adjust(from_date,to_date,status,location=None,employee=None):
if location:
att = frappe.db.sql("""
select `tabAttendance`.name from `tabAttendance`
join `tabEmployee` on `tabEmployee`.name = `tabAttendance`.employee
where
`tabEmployee`.location_name = %s
and `tabAttendance`.attendance_date between %s and %s""", (location,from_date, to_date), as_dict=1)
for a in att:
if a:
att = frappe.get_doc("Attendance",a)
if att:
att.update({
"admin_approved_status": status
})
frappe.errprint(att.admin_approved_status)
att.save(ignore_permissions=True)
frappe.db.commit()
return True
if employee:
att = frappe.db.sql("""select name from `tabAttendance`
where employee=%s
and attendance_date between %s and %s""", (employee,from_date, to_date), as_dict=1)
for a in att:
if a:
att = frappe.get_doc("Attendance",a)
if att:
att.update({
"admin_approved_status": status
})
frappe.errprint(att.admin_approved_status)
att.save(ignore_permissions=True)
frappe.db.commit()
return True
# @frappe.whitelist()
# def bulk_admin_att():
# attendance = frappe.get_all("Attendance",{"admin_approved_status":("not like","")},['admin_approved_status'])
# for att in attendance:
# att1 = frappe.get_doc("Attendance",att)
# att1.update({
# "status":att['admin_approved_status']
# })
# att1.db_update()
# frappe.db.commit()
@frappe.whitelist()
def fetch_att_temp():
from_date = (datetime.strptime('2019-05-07', '%Y-%m-%d')).date()
to_date = (datetime.strptime('2019-05-24', '%Y-%m-%d')).date()
emp = '1321'
emp = frappe.get_doc('Employee',emp)
for preday in daterange(from_date,to_date):
day = preday.strftime("%d%m%Y")
exc = frappe.db.get_list("Auto Present Employees",fields=['employee'])
auto_present_list = []
for e in exc:
auto_present_list.append(e.employee)
# employees = frappe.get_all('Employee',{'status':'Active','date_of_joining':('<=',preday)})
# for emp in employees:
working_shift = frappe.db.get_value("Employee", {'employee':emp.name},['working_shift'])
assigned_shift = frappe.db.sql("""select shift from `tabShift Assignment`
where employee = %s and %s between from_date and to_date""", (emp.name, preday), as_dict=True)
if assigned_shift:
working_shift = assigned_shift[0]['shift']
if emp.name in auto_present_list:
doc = frappe.get_doc("Employee",emp.name)
attendance = frappe.db.exists("Attendance", {"employee": doc.employee, "attendance_date": preday})
if attendance:
frappe.db.set_value("Attendance",attendance,"status","Present")
frappe.db.commit()
else:
attendance = frappe.new_doc("Attendance")
attendance.employee = doc.employee
attendance.employee_name = doc.employee_name
attendance.status = "Present"
attendance.attendance_date = preday
# attendance.company = doc.company
attendance.working_shift = working_shift,
attendance.late_in = "00:00:00"
attendance.work_time = "00:00:00"
attendance.early_out = "00:00:00"
attendance.overtime = "00:00:00"
attendance.save(ignore_permissions=True)
attendance.submit()
frappe.db.commit()
else:
url = 'http://182.72.89.102/cosec/api.svc/v2/attendance-daily?action=get;field-name=userid,ProcessDate,firsthalf,\
secondhalf,punch1,punch2,workingshift,shiftstart,shiftend,latein,earlyout,worktime,overtime;date-range=%s-%s;range=user;id=%s;format=xml' % (day,day,emp.name)
r = requests.get(url, auth=('sa', 'matrixx'))
if "No records found" in r.content:
attendance_id = frappe.db.exists("Attendance", {
"employee": emp.name, "attendance_date": preday,"docstatus":1})
if attendance_id:
pass
else:
attendance = frappe.new_doc("Attendance")
attendance.update({
"employee": emp.name,
"attendance_date": preday,
"status": 'Absent',
"late_in" : "0:00:00",
"early_out" : "0:00:00",
"working_shift" : frappe.get_value("Employee",emp.name,"working_shift"),
"work_time": "0:00:00",
"overtime":"0:00:00"
})
attendance.save(ignore_permissions=True)
attendance.submit()
frappe.db.commit()
else:
if not "failed: 0010102003" in r.content:
root = ET.fromstring(r.content)
for att in root.findall('attendance-daily'):
userid = att.find('UserID').text
in_time = att.find('Punch1').text
out_time = att.find('Punch2').text
first_half_status = att.find('firsthalf').text
second_half_status = att.find('secondhalf').text
date = datetime.strptime((att.find('ProcessDate').text.replace("/","")), "%d%m%Y").date()
date_f = date.strftime("%Y-%m-%d")
# print userid,date_f
work_time = timedelta(minutes=flt(att.find('WorkTime').text))
over_time = timedelta(minutes=flt(att.find('Overtime').text))
late_in = timedelta(minutes=flt(att.find('LateIn').text))
early_out = timedelta(minutes=flt(att.find('EarlyOut').text))
working_shift = att.find('WorkingShift').text
attendance_id = frappe.db.exists("Attendance", {
"employee": emp.name, "attendance_date": date_f,"docstatus":1})
if out_time:
out_time_f = datetime.strptime(out_time, "%d/%m/%Y %H:%M:%S")
if in_time:
in_time_f = datetime.strptime(in_time, "%d/%m/%Y %H:%M:%S")
if in_time and out_time:
work_time = out_time_f - in_time_f
if work_time >= timedelta(hours=4) :
if work_time < timedelta(hours=7,minutes=45):
status = 'Half Day'
else:
status = 'Present'
else:
status = 'Absent'
if attendance_id:
attendance = frappe.get_doc(
"Attendance", attendance_id)
attendance.out_time = out_time
attendance.in_time = in_time
attendance.status = status
attendance.first_half_status = first_half_status
attendance.second_half_status = second_half_status
attendance.late_in = late_in
attendance.early_out = early_out
attendance.working_shift = working_shift
attendance.work_time = work_time
attendance.overtime = over_time
attendance.db_update()
frappe.db.commit()
else:
attendance = frappe.new_doc("Attendance")
attendance.update({
"employee": emp.name,
"attendance_date": date_f,
"status": status,
"in_time": in_time,
"late_in" : late_in,
"early_out" : early_out,
"working_shift" : working_shift,
"out_time": out_time,
"work_time": work_time,
"overtime":over_time
})
attendance.save(ignore_permissions=True)
attendance.submit()
frappe.db.commit()
@frappe.whitelist()
def shift_assignment(employee,attendance_date,shift):
if employee:
shift_assignment = frappe.db.exists("Shift Assignment", {"employee": employee})
shift = shift.split("(")
shift = shift[0]
if shift_assignment:
sa = frappe.db.sql("""select name from `tabShift Assignment`
where employee = %s and %s between from_date and to_date""", (employee, attendance_date), as_dict=True)
if sa:
for s in sa:
doc = frappe.get_doc("Shift Assignment",s)
doc.update({
"shift":shift
})
doc.save(ignore_permissions=True)
frappe.db.commit()
else:
doc = frappe.get_doc("Employee",employee)
sa = frappe.new_doc("Shift Assignment")
sa.update({
"employee": employee,
"employee_name": doc.employee_name,
"business_unit": doc.business_unit,
"location": doc.location_name,
"department": doc.department,
"category": doc.category,
"from_date":attendance_date,
"to_date":attendance_date,
"shift":shift
})
sa.save(ignore_permissions=True)
frappe.db.commit()
else:
doc = frappe.get_doc("Employee",employee)
sa = frappe.new_doc("Shift Assignment")
sa.update({
"employee": employee,
"employee_name": doc.employee_name,
"business_unit": doc.business_unit,
"location": doc.location_name,
"department": doc.department,
"category": doc.category,
"from_date":attendance_date,
"to_date":attendance_date,
"shift":shift
})
sa.save(ignore_permissions=True)
frappe.db.commit()
# update_att_from_shift(employee,attendance_date,shift)
if frappe.db.exists("Attendance", {
"employee": employee, "attendance_date": attendance_date,"docstatus":1}):
attendance_id = frappe.db.exists("Attendance", {
"employee": employee, "attendance_date": attendance_date,"docstatus":1})
if attendance_id:
attendance = frappe.get_doc(
"Attendance", attendance_id)
shift_in_time = frappe.db.get_value("Working Shift",shift,"in_time")
shift_out_time = frappe.db.get_value("Working Shift",shift,"out_time")
grace_in_time = frappe.db.get_value("Working Shift",shift,"grace_in_time")
grace_out_time = frappe.db.get_value("Working Shift",shift,"grace_out_time")
work_time = over_time = late_in = early_out = "0:00:00"
shift_in_time += grace_in_time
shift_out_time -= grace_out_time
first_half_status = second_half_status = ""
status = attendance.status
if attendance.in_time:
in_time = attendance.in_time
dt = datetime.strptime(in_time, "%d/%m/%Y %H:%M:%S")
from_time = dt.time()
emp_in_time = timedelta(hours=from_time.hour,minutes=from_time.minute,seconds=from_time.second)
#Check Movement Register
if get_mr_in1(employee,attendance_date):
mr_status_in = True
emp_in_time = emp_in_time - get_mr_in1(employee,attendance_date)
if emp_in_time > shift_in_time:
first_half_status = 'AB'
if second_half_status == "AB":
status = "Absent"
elif second_half_status == "PR":
status = "Half Day"
late_in = (emp_in_time - shift_in_time) + grace_in_time
else:
first_half_status = 'PR'
if second_half_status == "AB":
status = "Half Day"
elif second_half_status == "PR":
status = "Present"
late_in = timedelta(seconds=0)
if attendance.out_time:
if attendance.in_time:
out_time = attendance.out_time
dt = datetime.strptime(out_time, "%d/%m/%Y %H:%M:%S")
end_time = dt.time()
emp_out_time = timedelta(hours=end_time.hour,minutes=end_time.minute,seconds=end_time.second)
#Check Movement Register
if get_mr_out1(employee,attendance_date):
mr_status_out = True
emp_out_time = emp_out_time + get_mr_out1(employee,attendance_date)
if emp_out_time < shift_out_time:
second_half_status = 'AB'
if first_half_status == "AB":
status = "Absent"
elif first_half_status == "PR":
status = "Half Day"
early_out = (shift_out_time - emp_out_time) - grace_out_time
else:
second_half_status = 'PR'
if first_half_status == "AB":
status = "Half Day"
elif first_half_status == "PR":
status = "Present"
early_out = timedelta(seconds=0)
if attendance.in_time and attendance.out_time:
in_time = attendance.in_time
out_time = attendance.out_time
out_time_f = datetime.strptime(out_time, "%d/%m/%Y %H:%M:%S")
in_time_f = datetime.strptime(in_time, "%d/%m/%Y %H:%M:%S")
work_time = (out_time_f - in_time_f).total_seconds() // 60
if work_time < 1440:
work_time = timedelta(minutes=flt(work_time))
else:
work_time = timedelta(minutes=flt('1400'))
if emp_out_time > shift_out_time:
over_time = (emp_out_time - shift_out_time).total_seconds() // 60
if over_time < 1440:
over_time = timedelta(minutes=flt(over_time))
else:
over_time = timedelta(minutes=flt('1400'))
attendance.update({
"status": status,
"late_in" : late_in,
"early_out" : early_out,
"working_shift" : shift,
"work_time": work_time,
"overtime":over_time,
"first_half_status": first_half_status,
"second_half_status":second_half_status
})
frappe.errprint(status)
attendance.db_update()
frappe.db.commit()
return "OK"
def get_mr_out1(emp,day):
from_time = to_time = 0
day = (datetime.strptime(str(day), '%Y-%m-%d')).date()
dt = datetime.combine(day, datetime.min.time())
mrs = frappe.db.sql("""select from_time,to_time from `tabMovement Register` where employee= '%s' and docstatus=1 and status='Approved' and from_time between '%s' and '%s' """ % (emp,dt,add_days(dt,1)),as_dict=True)
for mr in mrs:
from_time = mr.from_time
to_time = mr.to_time
out_time = frappe.get_value("Attendance",{"employee":emp,"attendance_date":day},["out_time"])
if out_time:
att_out_time = datetime.strptime(out_time,'%d/%m/%Y %H:%M:%S')
if from_time:
if att_out_time >= (from_time + timedelta(minutes=-10)) :
return to_time - from_time
def get_mr_in1(emp,day):
from_time = to_time = 0
day = (datetime.strptime(str(day), '%Y-%m-%d')).date()
dt = datetime.combine(day, datetime.min.time())
mrs = frappe.db.sql("""select from_time,to_time from `tabMovement Register` where employee= '%s' and docstatus=1 and status='Approved' and from_time between '%s' and '%s' """ % (emp,dt,add_days(dt,1)),as_dict=True)
for mr in mrs:
from_time = mr.from_time
to_time = mr.to_time
in_time = frappe.get_value("Attendance",{"employee":emp,"attendance_date":day},["in_time"])
if in_time:
att_in_time = datetime.strptime(in_time,'%d/%m/%Y %H:%M:%S')
if from_time:
if att_in_time >= (from_time + timedelta(minutes=-10)):
return to_time - from_time
@frappe.whitelist()
def send_announcement(name):
doc = frappe.get_doc('Employee', name)
experience = doc.external_work_history
edu = doc.education
if doc.one_above_manager:
report_manager_doc = frappe.get_doc('Employee', {"user_id": doc.one_above_manager})
else:
report_manager_doc = " "
for i in range(1):
content = """
<h1><br></h1>
<h1 align="center"><u><span style="font-size: 14px;"> </span></u></h1>
<h1 align="center"><br><u><span style="font-size: 14px;"></span></u></h1>
<h1 align="center"><u><span style="font-size: 14px;">ORGANIZATIONAL ANNOUNCE</span></u><span style="font-size: 14px;"></span><u><span style="font-size: 14px;">MENT</span></u><span style="font-size: 14px;"></span><span style="font-size: 14px;"></span></h1>
<center><div><span style="font-size: 12px;">HDI/HR&ADM/QA-150</span></div></center>
<center><div><span style="font-size: 12px;">February 22,2019</span></div></center>
<div align="left"><span style="font-size: 12px;"><br></span></div>
<div align="left"><span style="font-size: 12px;"><br></span></div>
<div align="left"><span style="font-size: 14px;">I have great pleasure in Welcoming <b> %s . %s</b>, who has joined in our organization on <b>%s</b> as <b>%s</b> based out of <b>%s</b>.</span></div>
<div align="left"><span style="font-size: 12px;"><br></span></div>
<div align="left"><span style="font-size: 12px;"><br></span></div>
<div align="left"><span style="font-size: 14px;">Before joinnig HDI, he was working as<b> %s </b>with<b> %s</b>. <b>%s</b> has completed <b> %s</b> and he shall Report to <b>%s . %s, %s.</b></span></div>
<div align="left"><span style="font-size: 12px;"><br></span></div>
<div align="left"><span style="font-size: 14px;"><br></span></div>
<div align="left"><span style="font-size: 14px;">Email </span><a><span style="font-size: 14px;">ID:<u>%s</u></span></a></div>
<div align="left"><span style="font-size: 12px;"><br></span></div>
<div align="left"><span style="font-size: 12px;"><br></span></div>
<div align="left"><span style="font-size: 14px;">I extend him warm welcome to our Hunter Douglas India Family and sure that all of you will add on the same.</span></div>
<div align="left"><span style="font-size: 12px;"><br></span></div>
<div align="left"><span style="font-size: 12px;"><br></span></div>
<div align="left"><b><span style="font-size: 14px;">Best Wishes,</span></b></div>
<div align="left"><b><span style="font-size: 12px;"><br></span></b></div>
<div align="left"><b><span style="font-size: 12px;"><br></span></b></div>
<div align="left"><b><span style="font-size: 12px;"><br></span></b></div>
<div align="left"><b><span style="font-size: 14px;">(S.Raghavan)</span></b></div>
<div align="left"><b><span style="font-size: 12px;"><span style="font-size: 14px;"> Financial Controller</span><br></span></b></div>
<div align="left"><span style="font-size: 12px;"><br></span></div>
</center><center><div><br></div>
<div><br></div>
</center>
""" %(doc.salutation,doc.employee_name,doc.date_of_joining,doc.designation,doc.location_name,experience[0].designation,
experience[0].company_name,doc.employee_name,edu[0].qualification,report_manager_doc.salutation,report_manager_doc.employee_name,report_manager_doc.designation,doc.user_id)
frappe.sendmail(
recipients=['ramya.a@voltechgroup.com'],
subject='Announcement For All',
message=""" %s""" % (content))
# return content
# @frappe.whitelist()
# def submit_leave_application():
# leave = frappe.get_all("Leave Application",{"docstatus":0, "status": "Approved"},['name'])
# for l in leave:
# doc = frappe.get_doc("Leave Application",l)
# frappe.errprint(doc.status)
# doc.save(ignore_permissions=True)
# doc.submit()
# def get_mr_in(doc,method):
# from_time = to_time = 0
# dt = datetime.combine(day, datetime.min.time())
# mrs = frappe.db.sql("""select from_time,to_time from `tabMovement Register` where employee= '%s' and docstatus=1 and status='Approved' and from_time between '%s' and '%s' """ % (emp,dt,add_days(dt,1)),as_dict=True)
# for mr in mrs:
# from_time = mr.from_time
# to_time = mr.to_time
# in_time = frappe.get_value("Attendance",{"employee":emp,"attendance_date":day},["in_time"])
# if in_time:
# att_in_time = datetime.strptime(in_time,'%d/%m/%Y %H:%M:%S')
# if from_time:
# if att_in_time >= (from_time + timedelta(minutes=-10)):
# return to_time - from_time
@frappe.whitelist()
def check_attendance_status(employee,from_date,to_date):
query = """select name,employee,attendance_date,status from `tabAttendance` where employee=%s and attendance_date between '%s' and '%s' """ % (employee,from_date,to_date)
attendance = frappe.db.sql(query,as_dict=True)
for a in attendance:
doc = frappe.get_doc("Attendance",a.name)
doc.update({
"status": "Absent"
})
doc.save(ignore_permissions=True)
doc.submit()
frappe.db.commit()
frappe.errprint(doc.status)
return "Ok"
@frappe.whitelist()
def update_ecode():
pmm = frappe.get_all("Performance Management Reviewer",fields=['name','employee_code'])
for pm in pmm:
# print loop.index
# print pm['name']
frappe.db.set_value("Performance Management Reviewer",pm['name'],"employee_code1",pm['employee_code'])
frappe.db.commit()
@frappe.whitelist()
def update_mis(employee,date_of_joining=None,gender=None,date_of_birth=None,department=None,salary_mode=None,bank_name=None,bank_ac_no=None,ifsc_code=None,working_shift=None,pan_number=None,
uan_number=None,cell_number=None,father_name=None,husband_wife_name=None,permanent_address_is=None,permanent_address=None,current_address_is=None,current_address=None):
if employee:
emp_doc = frappe.get_doc("Employee",employee)
eiu = frappe.new_doc("Employee Info Update")
eiu.update({
"employee_code": employee,
"employee_name":emp_doc.employee_name,
"date_of_joining":date_of_joining,
"gender":gender,
"date_of_birth": date_of_birth,
"department": department,
"salary_mode": salary_mode,
"bank_name": bank_name,
"bank_ac_no": bank_ac_no,
"ifsc_code": ifsc_code,
"working_shift":working_shift,
"pan_number": pan_number,
"uan_number":uan_number,
"cell_number":cell_number,
"father_name":father_name,
"husband_wife_name":husband_wife_name,
"permanent_address_is":permanent_address_is,
"permanent_address":permanent_address,
"current_address_is":current_address_is,
"current_address":current_address
})
eiu.save(ignore_permissions=True)
frappe.db.commit()
frappe.sendmail(
recipients=['hr.hdi@hunterdouglas.asia'],
subject='RE:MIS Update',
message=""" <h4>Request for MIS Update</h4>
<p>Employee %s Request to update data in MIS. Request file ID is %s.</p>
<p>You can Approve/Reject the Request by clicking the below Link </p>
<a href="{{ frappe.utils.get_url_to_form("Employee Info Update", %s ) }}">Open On Duty Application</a>
"""%(employee,eiu.name,eiu.name)
)
return "Ok"
@frappe.whitelist()
def update_main_mis(name,employee,status):
if status == "Approved":
eiu = frappe.get_doc("Employee Info Update",name)
emp_doc = frappe.get_doc("Employee",employee)
if eiu.date_of_joining:
date_of_joining = eiu.date_of_joining
else:
date_of_joining = emp_doc.date_of_joining
if eiu.gender:
gender = eiu.gender
else:
gender = emp_doc.gender
if eiu.date_of_birth:
date_of_birth = eiu.date_of_birth
else:
date_of_birth = emp_doc.date_of_birth
if eiu.department:
department = eiu.department
else:
department = emp_doc.department
if eiu.salary_mode:
salary_mode = eiu.salary_mode
else:
salary_mode = emp_doc.salary_mode
if eiu.bank_name:
bank_name = eiu.bank_name
else:
bank_name = emp_doc.bank_name
if eiu.bank_ac_no:
bank_ac_no = eiu.bank_ac_no
else:
bank_ac_no = emp_doc.bank_ac_no
if eiu.ifsc_code:
ifsc_code = eiu.ifsc_code
else:
ifsc_code = emp_doc.ifsc_code
if eiu.working_shift:
working_shift = eiu.working_shift
else:
working_shift = emp_doc.working_shift
if eiu.pan_number:
pan_number = eiu.pan_number
else:
pan_number = emp_doc.pan_number
if eiu.uan_number:
uan_number = eiu.uan_number
else:
uan_number = emp_doc.uan_number
if eiu.cell_number:
cell_number = eiu.cell_number
else:
cell_number = emp_doc.cell_number
if eiu.father_name:
father_name = eiu.father_name
else:
father_name = emp_doc.father_name
if eiu.husband_wife_name:
husband_wife_name = eiu.husband_wife_name
else:
husband_wife_name = emp_doc.husband_wife_name
if eiu.permanent_address_is:
permanent_address_is = eiu.permanent_address_is
else:
permanent_address_is = emp_doc.permanent_accommodation_type
if eiu.permanent_address:
permanent_address = eiu.permanent_address
else:
permanent_address = emp_doc.permanent_address
if eiu.current_address_is:
current_address_is = eiu.current_address_is
else:
current_address_is = emp_doc.current_accommodation_type
if eiu.current_address:
current_address = eiu.current_address
else:
current_address = emp_doc.current_address
emp_doc.update({
"date_of_joining":date_of_joining,
"gender":gender,
"date_of_birth": date_of_birth,
"department": department,
"salary_mode": salary_mode,
"bank_name": bank_name,
"bank_ac_no": bank_ac_no,
"ifsc_code": ifsc_code,
"working_shift":working_shift,
"pan_number": pan_number,
"uan_number":uan_number,
"cell_number":cell_number,
"father_name":father_name,
"husband_wife_name":husband_wife_name,
"permanent_address_is":permanent_address_is,
"permanent_address":permanent_address,
"current_address_is":current_address_is,
"current_address":current_address
})
emp_doc.save(ignore_permissions=True)
frappe.db.commit()
frappe.sendmail(
recipients=[emp_doc.user_id],
subject='RE:MIS Update',
message=""" <h4>Replay for MIS Update</h4>
<p>Your MIS update request was Approved """
)
else:
frappe.sendmail(
recipients=[emp_doc.user_id],
subject='RE:MIS Update',
message=""" <h4>Replay for MIS Update</h4>
<p>Your MIS update request was Rejected """
)
return "Ok"
@frappe.whitelist(allow_guest=True)
def update_attendance_by_app(employee,from_date,to_date,from_date_session,to_date_session,m_status):
query = """select name from `tabAttendance` where employee=%s and attendance_date between '%s' and '%s' """ % (employee,from_date,to_date)
attendance = frappe.db.sql(query,as_dict=True)
from_date = (datetime.strptime(str(from_date), '%Y-%m-%d')).date()
to_date = (datetime.strptime(str(to_date), '%Y-%m-%d')).date()
for a in attendance:
doc = frappe.get_doc("Attendance",a.name)
attendance_date = (datetime.strptime(str(doc.attendance_date), '%Y-%m-%d')).date()
first_half_status = doc.first_half_status
second_half_status = doc.second_half_status
status = doc.status
if from_date == to_date:
if doc.attendance_date == from_date:
if from_date_session == "First Half":
first_half_status = m_status,
if second_half_status == "AB":
status = "Half Day"
elif second_half_status == "PR":
status = "Present"
elif from_date_session == "Second Half":
second_half_status = m_status
if first_half_status == "AB":
status = "Half Day"
elif first_half_status == "PR":
status = "Present"
else:
first_half_status = second_half_status = m_status,
status = "Present"
else:
if doc.attendance_date == from_date:
if from_date_session == "Second Half":
second_half_status = m_status
if first_half_status == "AB":
status = "Half Day"
elif first_half_status == "PR":
status = "Present"
elif from_date_session == "Full Day":
first_half_status = second_half_status = m_status,
status = "Present"
elif doc.attendance_date == to_date:
if to_date_session == "First Half":
first_half_status = m_status,
if second_half_status == "AB":
status = "Half Day"
elif second_half_status == "PR":
status = "Present"
elif to_date_session == "Full Day":
first_half_status = second_half_status = m_status,
status = "Present"
else:
first_half_status = second_half_status = m_status,
doc.update({
"first_half_status": first_half_status,
"second_half_status": second_half_status,
# "status": status
})
doc.save(ignore_permissions=True)
doc.submit()
frappe.db.commit()
@frappe.whitelist()
def update_mr_in_att(employee,from_time,to_time,total_permission_hour):
mr_in_time = mr_out_time = ''
frappe.errprint(employee)
frappe.errprint(from_time)
frappe.errprint(to_time)
frappe.errprint(total_permission_hour)
att_date = (datetime.strptime(str(from_time), '%Y-%m-%d %H:%M:%S')).date()
att = frappe.get_doc("Attendance",{"employee": employee,"attendance_date": att_date})
mr_in = mr_out = ""
if att:
work_time = att.work_time
frappe.errprint(work_time)
# if att.first_half_status == "AB":
mr_in = get_mr_in(employee,att_date)
if mr_in:
mr_in_time = mr_in
work_time = mr_in + att.work_time
# if att.second_half_status == "AB":
mr_out = get_mr_out(employee,att_date)
if mr_out:
mr_out_time = mr_out
work_time = mr_out + att.work_time
wt_seconds = work_time.total_seconds() // 60
if wt_seconds > 1440:
work_time = timedelta(minutes=flt('1400'))
if work_time >= timedelta(hours=4):
if work_time < timedelta(hours=7,minutes=45):
status = 'Half Day'
else:
status = 'Present'
else:
status = 'Absent'
att.status = status
att.work_time = work_time
att.mr_in_time = mr_in_time
att.mr_out_time = mr_out_time
att.db_update()
frappe.db.commit()
def get_mr_out(emp,day):
from_time = to_time = 0
day = (datetime.strptime(str(day), '%Y-%m-%d')).date()
dt = datetime.combine(day, datetime.min.time())
mrs = frappe.db.sql("""select from_time,to_time from `tabMovement Register` where employee= '%s' and from_time between '%s' and '%s' """ % (emp,dt,add_days(dt,1)),as_dict=True)
for mr in mrs:
from_time = mr.from_time
to_time = mr.to_time
if from_time and to_time:
return to_time - from_time
def get_mr_in(emp,day):
from_time = to_time = 0
day = (datetime.strptime(str(day), '%Y-%m-%d')).date()
dt = datetime.combine(day, datetime.min.time())
mrs = frappe.db.sql("""select from_time,to_time from `tabMovement Register` where employee= '%s' and docstatus=1 and status='Approved' and from_time between '%s' and '%s' """ % (emp,dt,add_days(dt,1)),as_dict=True)
for mr in mrs:
from_time = mr.from_time
to_time = mr.to_time
if from_time and to_time:
return to_time - from_time
def update_designation():
att = frappe.get_all("Attendance")
for at in att:
at_id = frappe.get_doc("Attendance",at)
des = frappe.get_value("Employee",at_id.employee,"designation")
at_id.designation = des
at_id.db_update()
frappe.db.commit()
# frappe.set_value('Attendance',at_id.name,"designation",des)
def remove_att_for_left_emp():
emp = frappe.get_all("Employee",{"status":'Left','employee':'1147'},['employee','relieving_date'])
for e in emp:
att = frappe.db.sql("""select name as name,employee as emp,attendance_date as att_date from `tabAttendance` where employee = %s and attendance_date > %s """,(e.employee,e.relieving_date),as_dict=True)
for a in att:
atid = frappe.get_doc("Attendance",a.name)
atid.cancel()
frappe.delete_doc('Attendance',atid.name)
def des_update_from_old():
emp = frappe.get_all("Designation_new",{"parent":"1107"},['parent','effective_from','data_1'])
for e in emp:
att = frappe.db.sql("""select name as name,employee as emp,attendance_date as att_date,designation as des from `tabAttendance` where employee = %s and attendance_date <= %s """,(e.parent,e.effective_from),as_dict=True)
for a in att:
at_id = frappe.get_doc("Attendance",a.name)
at_id.designation = e.data_1
at_id.db_update()
frappe.db.commit()
def retirement_alert():
year_start = (datetime.today()).date()
year_end = add_months(year_start,12)
print(year_start)
employees = frappe.db.sql("""select name,employee_name,company_email,date_of_retirement FROM `tabEmployee` where date_of_retirement BETWEEN '%s' AND '%s' ANd status = "Active" """ %(year_start,year_end),as_dict=True)
for emp in employees:
print(emp.company_email)
frappe.sendmail(
recipients= [ "%s" ] ,
subject='Retirement Announcement' ,
message="""<p>Dear %s,</p>
<p> It saddens us to announce the retirement of %s. %s contributions will always be valued and remembered. %s hard work, commitment, and dedication are worthy of admiration.
On behalf of every one, I would like to wish %s the best of luck. </p>""" % (emp.company_email,emp.employee_name,emp.employee_name,emp.employee_name,emp.employee_name))
@frappe.whitelist()
def get_six_month(date):
ex_date = add_months(date,6)
return ex_date | 45.833496 | 264 | 0.538548 |
3be1b257ed049b89648aee83c0dec50c88e59eb8 | 2,770 | py | Python | flask-nlp-api/client.py | dr-jgsmith/flask-nlp-api | bc47594a0b200271ff71d1637ae76338d3b3912c | [
"MIT"
] | 1 | 2019-06-14T06:36:24.000Z | 2019-06-14T06:36:24.000Z | flask-nlp-api/client.py | dr-jgsmith/flask-nlp-api | bc47594a0b200271ff71d1637ae76338d3b3912c | [
"MIT"
] | null | null | null | flask-nlp-api/client.py | dr-jgsmith/flask-nlp-api | bc47594a0b200271ff71d1637ae76338d3b3912c | [
"MIT"
] | null | null | null | from requests import put, get, post, delete
data = {
'username': 'Peter2Face',
'email': 'Peter2Face@live.com',
'password_hash': 'I2Peter2Face'
}
data2 = {
'username': 'Peter2Face',
'password_hash': 'I2Peter2Face'
}
x = post('http://localhost:5000/modern_api/users/add', data=data).json()
print(x)
z = get('http://localhost:5000/modern_api/users/list').json()
print(z)
a = post('http://localhost:5000/modern_api/login', data=data2).json()
print(a)
corpus_data = {
'username': 'Peter2Face',
'api_key': a['valid_user'],
'title': 'streamate',
'type': 'website corpus',
'seed_url': 'http://streammate.com',
'description': '',
'corpus_file': 'streammate_corpus.dict',
'model_file': 'streammate_corpus.mm'
}
b = post('http://localhost:5000/modern_api/corpus/add', data=corpus_data).json()
print(b)
topic_data = {
'username': 'Peter2Face',
'api_key': a['valid_user'],
'title': 'streamate',
'type': 'topics',
'url': 'http://streammate.com',
'description': '',
}
#c = post('http://localhost:5000/modern_api/topics/url/add', data=topic_data).json()
#print(c)
d = post('http://localhost:5000/modern_api/topics/seed/add', data=topic_data).json()
print(d)
text_data = {
'username': 'Peter2Face',
'api_key': a['valid_user'],
'title': 'streamate',
'text': "We go through a process of encoding patterns and making note of anomalies to our patterns. "
"When an anomaly is persist it becomes linked in memory, associated with the context of occurrence, "
"which relate to other occurrences in thought and action."
}
e = post('http://localhost:5000/modern_api/topics/extract_from_text', data=text_data).json()
print(e)
f = get('http://localhost:5000/modern_api/sentiment/text/raw', data=text_data).json()
print(f)
"""
files = {'file': open('/Users/justinsmith/PycharmProjects/genesis_net/uploads/Screen_Shot_2018-10-29_at_9.21.02_PM.png',
'rb')}
y = get('http://localhost:5000/')
print(y)
x = put('http://localhost:5000/login', data={'email': email, 'password': password}).json()
print(x)
token = {'token': x}
z = post('http://localhost:5000/upload', files=files, data=token).json()
print(z)
files2 = {'file': open('/Users/justinsmith/Desktop/Data/festival_data_clean.csv', 'rb')}
a = post('http://localhost:5000/upload', files=files2, data=token).json()
print(a)
b = post('http://localhost:5000/crawl', data={'url': 'http://streammate.com', 'token': 'mytoken'}).json()
bj = json.loads(b)
print(bj)
c = get('http://localhost:5000/crawl', data={'crawl_id': bj['success']}).json()
bc = json.loads(c)
print(bc)
# c = post('http://localhost:5000/scrape', data={'targets': bj['targets'], 'token': x}).json()
# print(c)
"""
| 25.412844 | 120 | 0.65343 |
d7d0fc0b86820f14f5ec9e2bf269901f63926cac | 555 | py | Python | exercices-pin-raspberry/Dection Temperature/temp.py | Elitross/Ecole-raspberrypi | 5897217550194516014562e76df8a28b2aa83480 | [
"MIT"
] | null | null | null | exercices-pin-raspberry/Dection Temperature/temp.py | Elitross/Ecole-raspberrypi | 5897217550194516014562e76df8a28b2aa83480 | [
"MIT"
] | null | null | null | exercices-pin-raspberry/Dection Temperature/temp.py | Elitross/Ecole-raspberrypi | 5897217550194516014562e76df8a28b2aa83480 | [
"MIT"
] | null | null | null | from gpiozero import CPUTemperature
from time import sleep, strftime, time
import matplotlib.pyplot as plt
plt.ion()
x = []
y = []
cpu = CPUTemperature()
with open("/home/pi/repo/exercice-temperature-raspberrypi/cpu_temp.csv", "a") as log:
while True:
temp = cpu.temperature
print(temp)
"""
#Partie 2
log.write("{0},{1}\n".format(strftime("%Y-%m-%d %H:%M:%S"),str(temp)))
sleep(1)
"""
y.append(temp)
x.append(time())
plt.clf()
plt.scatter(x,y)
plt.plot(x,y)
plt.pause(1)
plt.draw() | 17.903226 | 85 | 0.598198 |
39b76150ad9acca5ac05fb03d132f742a8e1a45c | 2,182 | py | Python | hamper/cli.py | ecnahc515/hamper | c344f17caca40ebf830bb46682ff5477d33a4f0e | [
"MIT"
] | 2 | 2021-11-08T09:59:46.000Z | 2022-01-23T23:30:26.000Z | hamper/cli.py | ecnahc515/hamper | c344f17caca40ebf830bb46682ff5477d33a4f0e | [
"MIT"
] | 3 | 2015-02-22T03:52:17.000Z | 2016-08-18T18:32:49.000Z | hamper/cli.py | ecnahc515/hamper | c344f17caca40ebf830bb46682ff5477d33a4f0e | [
"MIT"
] | 12 | 2015-01-25T22:22:25.000Z | 2022-01-17T22:04:30.000Z | from fnmatch import fnmatch
from twisted.internet import reactor
from twisted.internet.stdio import StandardIO
from twisted.protocols.basic import LineReceiver
from twisted.plugin import getPlugins
import sqlalchemy
from sqlalchemy import orm
from hamper.commander import DB, PluginLoader
import hamper.config
from hamper.interfaces import BaseInterface
import hamper.log
import hamper.plugins
class CLIProtocol(LineReceiver):
"""
A bare-bones protocol meant for imitating a single-user session with
Hamper over stdio.
"""
delimiter = "\n"
def __init__(self, config):
self.loader = PluginLoader()
self.loader.config = config
self.history = {}
if 'db' in config:
print('Loading db from config: ' + config['db'])
db_engine = sqlalchemy.create_engine(config['db'])
else:
print('Using in-memory db')
db_engine = sqlalchemy.create_engine('sqlite:///:memory:')
DBSession = orm.sessionmaker(db_engine)
session = DBSession()
self.loader.db = DB(db_engine, session)
# Load all plugins mentioned in the configuration. Allow globbing.
print "Loading plugins", config["plugins"]
plugins = getPlugins(BaseInterface, package=hamper.plugins)
for plugin in plugins:
for pattern in config["plugins"]:
if fnmatch(plugin.name, pattern):
self.loader.registerPlugin(plugin)
def connectionLost(self, reason):
reactor.stop()
def lineReceived(self, line):
comm = {
'raw_message': line,
'message': line,
'raw_user': "user",
'user': "user",
'target': "hamper",
'channel': "hamper",
'directed': True,
'pm': True,
}
self.loader.runPlugins("chat", "message", self, comm)
def reply(self, comm, message):
print "Sending", message
self.sendLine(message)
def main():
hamper.log.setup_logging()
config = hamper.config.load()
StandardIO(CLIProtocol(config))
reactor.run()
if __name__ == "__main__":
main()
| 26.609756 | 74 | 0.621907 |
26ff0f2ed531b20a7bab8ded1b477617b3af25e6 | 1,211 | py | Python | calc_repu/getMovieGrade.py | CSC522-Data-mining-NCSU/reputation-hpc | 532a9885107f2969715a6d5a5ee1a9acc8f93aac | [
"MIT"
] | null | null | null | calc_repu/getMovieGrade.py | CSC522-Data-mining-NCSU/reputation-hpc | 532a9885107f2969715a6d5a5ee1a9acc8f93aac | [
"MIT"
] | null | null | null | calc_repu/getMovieGrade.py | CSC522-Data-mining-NCSU/reputation-hpc | 532a9885107f2969715a6d5a5ee1a9acc8f93aac | [
"MIT"
] | null | null | null | import csv
import pickle
from WeightFinder import WeightFinder
import time
from mpi4py import MPI
import pickle
from demos import cmd
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
finder = WeightFinder()
def get_movie_grade(movie_id):
try:
f = open('../training_set/mv_'+str(movie_id).zfill(7)+'.txt','r')
except: return 0
reader = csv.reader(f)
reader.next()
score = 0
sum_w = 0
for row in reader:
a = int(row[0])
rate = float(row[1])
foo, weight = finder.get_user_weight(a)
score = score + weight * rate
sum_w += weight
#ground = pickle.load(open('movie_ground'))
#ground[movie_id] = score/sum_w
#pickle.dump(ground,open('movie_ground','w'))
f.close()
return score/sum_w
def run(q):
processors = 4
era = 0
r = {}
while True:
k = era * processors + rank
if k >= 1000: break
if k%100 == 0: print k
#if rank==0: print k
k = k + int(q)*1000
if k > 17770: break
r[str(k)] = get_movie_grade(k)
era += 1
if rank == 0:
for i in range(1,processors):
temp = comm.recv(source=i)
r.update(temp)
with open('temgrade/'+str(q)+'_tem_grade','wb') as teg:
pickle.dump(r, teg)
else:
comm.send(r,dest=0)
if __name__ == '__main__':
eval(cmd())
#run(7)
| 20.525424 | 67 | 0.649876 |
e9e3e0318c524d9c70e643ea0008dfbf775084c3 | 8,952 | py | Python | nova/tests/unit/api/openstack/compute/test_image_size.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/api/openstack/compute/test_image_size.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/api/openstack/compute/test_image_size.py | bopopescu/nova-token | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | [
"Apache-2.0"
] | 2 | 2017-07-20T17:31:34.000Z | 2020-07-24T02:42:19.000Z | begin_unit
comment|'# Copyright 2013 Rackspace Hosting'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'from'
name|'oslo_serialization'
name|'import'
name|'jsonutils'
newline|'\n'
name|'import'
name|'webob'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'image'
name|'import'
name|'glance'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'test'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'api'
op|'.'
name|'openstack'
name|'import'
name|'fakes'
newline|'\n'
nl|'\n'
DECL|variable|NOW_API_FORMAT
name|'NOW_API_FORMAT'
op|'='
string|'"2010-10-11T10:30:22Z"'
newline|'\n'
DECL|variable|IMAGES
name|'IMAGES'
op|'='
op|'['
op|'{'
nl|'\n'
string|"'id'"
op|':'
string|"'123'"
op|','
nl|'\n'
string|"'name'"
op|':'
string|"'public image'"
op|','
nl|'\n'
string|"'metadata'"
op|':'
op|'{'
string|"'key1'"
op|':'
string|"'value1'"
op|'}'
op|','
nl|'\n'
string|"'updated'"
op|':'
name|'NOW_API_FORMAT'
op|','
nl|'\n'
string|"'created'"
op|':'
name|'NOW_API_FORMAT'
op|','
nl|'\n'
string|"'status'"
op|':'
string|"'ACTIVE'"
op|','
nl|'\n'
string|"'progress'"
op|':'
number|'100'
op|','
nl|'\n'
string|"'minDisk'"
op|':'
number|'10'
op|','
nl|'\n'
string|"'minRam'"
op|':'
number|'128'
op|','
nl|'\n'
string|"'size'"
op|':'
number|'12345678'
op|','
nl|'\n'
string|'"links"'
op|':'
op|'['
op|'{'
nl|'\n'
string|'"rel"'
op|':'
string|'"self"'
op|','
nl|'\n'
string|'"href"'
op|':'
string|'"http://localhost/v2/fake/images/123"'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'{'
nl|'\n'
string|'"rel"'
op|':'
string|'"bookmark"'
op|','
nl|'\n'
string|'"href"'
op|':'
string|'"http://localhost/fake/images/123"'
op|','
nl|'\n'
op|'}'
op|']'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'{'
nl|'\n'
string|"'id'"
op|':'
string|"'124'"
op|','
nl|'\n'
string|"'name'"
op|':'
string|"'queued snapshot'"
op|','
nl|'\n'
string|"'updated'"
op|':'
name|'NOW_API_FORMAT'
op|','
nl|'\n'
string|"'created'"
op|':'
name|'NOW_API_FORMAT'
op|','
nl|'\n'
string|"'status'"
op|':'
string|"'SAVING'"
op|','
nl|'\n'
string|"'progress'"
op|':'
number|'25'
op|','
nl|'\n'
string|"'minDisk'"
op|':'
number|'0'
op|','
nl|'\n'
string|"'minRam'"
op|':'
number|'0'
op|','
nl|'\n'
string|"'size'"
op|':'
number|'87654321'
op|','
nl|'\n'
string|'"links"'
op|':'
op|'['
op|'{'
nl|'\n'
string|'"rel"'
op|':'
string|'"self"'
op|','
nl|'\n'
string|'"href"'
op|':'
string|'"http://localhost/v2/fake/images/124"'
op|','
nl|'\n'
op|'}'
op|','
nl|'\n'
op|'{'
nl|'\n'
string|'"rel"'
op|':'
string|'"bookmark"'
op|','
nl|'\n'
string|'"href"'
op|':'
string|'"http://localhost/fake/images/124"'
op|','
nl|'\n'
op|'}'
op|']'
op|','
nl|'\n'
op|'}'
op|']'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|fake_show
name|'def'
name|'fake_show'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'IMAGES'
op|'['
number|'0'
op|']'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|fake_detail
dedent|''
name|'def'
name|'fake_detail'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'IMAGES'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|ImageSizeTestV21
dedent|''
name|'class'
name|'ImageSizeTestV21'
op|'('
name|'test'
op|'.'
name|'NoDBTestCase'
op|')'
op|':'
newline|'\n'
DECL|variable|content_type
indent|' '
name|'content_type'
op|'='
string|"'application/json'"
newline|'\n'
DECL|variable|prefix
name|'prefix'
op|'='
string|"'OS-EXT-IMG-SIZE'"
newline|'\n'
nl|'\n'
DECL|member|setUp
name|'def'
name|'setUp'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'ImageSizeTestV21'
op|','
name|'self'
op|')'
op|'.'
name|'setUp'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'glance'
op|'.'
name|'GlanceImageService'
op|','
string|"'show'"
op|','
name|'fake_show'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'stubs'
op|'.'
name|'Set'
op|'('
name|'glance'
op|'.'
name|'GlanceImageService'
op|','
string|"'detail'"
op|','
name|'fake_detail'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'osapi_compute_extension'
op|'='
op|'['
string|"'nova.api.openstack.compute'"
nl|'\n'
string|"'.contrib.image_size.Image_size'"
op|']'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'api_servers'
op|'='
op|'['
string|"'http://localhost:9292'"
op|']'
op|','
name|'group'
op|'='
string|"'glance'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|_make_request
dedent|''
name|'def'
name|'_make_request'
op|'('
name|'self'
op|','
name|'url'
op|')'
op|':'
newline|'\n'
indent|' '
name|'req'
op|'='
name|'webob'
op|'.'
name|'Request'
op|'.'
name|'blank'
op|'('
name|'url'
op|')'
newline|'\n'
name|'req'
op|'.'
name|'headers'
op|'['
string|"'Accept'"
op|']'
op|'='
name|'self'
op|'.'
name|'content_type'
newline|'\n'
name|'res'
op|'='
name|'req'
op|'.'
name|'get_response'
op|'('
name|'self'
op|'.'
name|'_get_app'
op|'('
op|')'
op|')'
newline|'\n'
name|'return'
name|'res'
newline|'\n'
nl|'\n'
DECL|member|_get_app
dedent|''
name|'def'
name|'_get_app'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'fakes'
op|'.'
name|'wsgi_app_v21'
op|'('
name|'init_only'
op|'='
op|'('
string|"'images'"
op|','
string|"'image-size'"
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_get_image
dedent|''
name|'def'
name|'_get_image'
op|'('
name|'self'
op|','
name|'body'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'jsonutils'
op|'.'
name|'loads'
op|'('
name|'body'
op|')'
op|'.'
name|'get'
op|'('
string|"'image'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|_get_images
dedent|''
name|'def'
name|'_get_images'
op|'('
name|'self'
op|','
name|'body'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'jsonutils'
op|'.'
name|'loads'
op|'('
name|'body'
op|')'
op|'.'
name|'get'
op|'('
string|"'images'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|assertImageSize
dedent|''
name|'def'
name|'assertImageSize'
op|'('
name|'self'
op|','
name|'image'
op|','
name|'size'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'image'
op|'.'
name|'get'
op|'('
string|"'%s:size'"
op|'%'
name|'self'
op|'.'
name|'prefix'
op|')'
op|','
name|'size'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_show
dedent|''
name|'def'
name|'test_show'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'url'
op|'='
string|"'/v2/fake/images/1'"
newline|'\n'
name|'res'
op|'='
name|'self'
op|'.'
name|'_make_request'
op|'('
name|'url'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'res'
op|'.'
name|'status_int'
op|','
number|'200'
op|')'
newline|'\n'
name|'image'
op|'='
name|'self'
op|'.'
name|'_get_image'
op|'('
name|'res'
op|'.'
name|'body'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertImageSize'
op|'('
name|'image'
op|','
number|'12345678'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_detail
dedent|''
name|'def'
name|'test_detail'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'url'
op|'='
string|"'/v2/fake/images/detail'"
newline|'\n'
name|'res'
op|'='
name|'self'
op|'.'
name|'_make_request'
op|'('
name|'url'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'res'
op|'.'
name|'status_int'
op|','
number|'200'
op|')'
newline|'\n'
name|'images'
op|'='
name|'self'
op|'.'
name|'_get_images'
op|'('
name|'res'
op|'.'
name|'body'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertImageSize'
op|'('
name|'images'
op|'['
number|'0'
op|']'
op|','
number|'12345678'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertImageSize'
op|'('
name|'images'
op|'['
number|'1'
op|']'
op|','
number|'87654321'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|ImageSizeTestV2
dedent|''
dedent|''
name|'class'
name|'ImageSizeTestV2'
op|'('
name|'ImageSizeTestV21'
op|')'
op|':'
newline|'\n'
DECL|member|_get_app
indent|' '
name|'def'
name|'_get_app'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'fakes'
op|'.'
name|'wsgi_app'
op|'('
name|'init_only'
op|'='
op|'('
string|"'images'"
op|','
op|')'
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| 12.398892 | 88 | 0.587913 |
d1fffe52766a1dbab3d0157fa1e3e3d2aab3be6d | 2,360 | py | Python | 2020/day08.py | groselt/AoC | c32d8bafddbe00c03d6420fc62cc5e78cbb1cd3c | [
"MIT"
] | null | null | null | 2020/day08.py | groselt/AoC | c32d8bafddbe00c03d6420fc62cc5e78cbb1cd3c | [
"MIT"
] | null | null | null | 2020/day08.py | groselt/AoC | c32d8bafddbe00c03d6420fc62cc5e78cbb1cd3c | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
from typing import List, NamedTuple, Optional, Set
from utils import get_file_lines
class Instruction(NamedTuple):
operation: str
param: int
class Computer:
def __init__(self, program: List[str]):
self.instructions = self.parse(program)
self.reset()
@staticmethod
def parse(program: List[str]) -> List[Instruction]:
instructions = []
for line in program:
words = line.split()
instructions.append(Instruction(words[0], int(words[1])))
return instructions
def reset(self) -> None:
self.ip = 0 # instruction pointer
self.accumulator = 0
def step(self):
instruction = self.instructions[self.ip]
if instruction.operation == 'nop':
self.ip += 1
elif instruction.operation == 'jmp':
self.ip += instruction.param
elif instruction.operation == 'acc':
self.accumulator += instruction.param
self.ip += 1
else:
raise RuntimeError(f'Unknown instruction {instruction.operation} at {self.ip}')
def run_to_loop_start(computer: Computer) -> bool:
executed: Set[int] = set()
while computer.ip not in executed and computer.ip < len(computer.instructions):
executed.add(computer.ip)
computer.step()
return computer.ip < len(computer.instructions)
def part1(computer: Computer) -> int:
assert run_to_loop_start(computer)
return computer.accumulator
def part2(computer: Computer) -> Optional[int]:
def make_new_instruction(old: Instruction) -> Instruction:
new_operation = 'jmp' if old.operation == 'nop' else 'nop'
return Instruction(new_operation, old.param)
for i in range(len(computer.instructions)):
computer.reset()
old_instruction = computer.instructions[i]
if old_instruction.operation in ('jmp', 'nop'):
computer.instructions[i] = make_new_instruction(old_instruction)
if not run_to_loop_start(computer):
return computer.accumulator
computer.instructions[i] = old_instruction
return None
if __name__ == '__main__':
raw_instructions = get_file_lines()
main_computer = Computer(raw_instructions)
print(part1(main_computer)) # 2051
print(part2(main_computer)) # 2304
| 30.649351 | 91 | 0.649576 |
34950a7cd3d485d48c7e52033cd9d266128dddfa | 4,614 | py | Python | tests/test_pipeline/components/feature_preprocessing/test_select_rates_classification.py | jianzhnie/AutoTabular | fb407300adf97532a26d33f7442d2a606fa30512 | [
"Apache-2.0"
] | 48 | 2021-09-06T08:09:26.000Z | 2022-03-28T13:02:54.000Z | tests/test_pipeline/components/feature_preprocessing/test_select_rates_classification.py | Fanxingye/Autotabular | d630c78290a52f8c73885afb16884e18135c34f6 | [
"Apache-2.0"
] | null | null | null | tests/test_pipeline/components/feature_preprocessing/test_select_rates_classification.py | Fanxingye/Autotabular | d630c78290a52f8c73885afb16884e18135c34f6 | [
"Apache-2.0"
] | 7 | 2021-09-23T07:28:46.000Z | 2021-10-02T21:15:18.000Z | import unittest
import numpy as np
import scipy.sparse
import sklearn.preprocessing
from autotabular.pipeline.components.feature_preprocessing.select_rates_classification import SelectClassificationRates
from autotabular.pipeline.util import _test_preprocessing, get_dataset
class SelectClassificationRatesComponentTest(unittest.TestCase):
def test_default_configuration(self):
transformation, original = _test_preprocessing(
SelectClassificationRates)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], 3)
self.assertFalse((transformation == 0).all())
transformation, original = _test_preprocessing(
SelectClassificationRates, make_sparse=True)
self.assertTrue(scipy.sparse.issparse(transformation))
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], int(original.shape[1] / 2))
# Custom preprocessing test to check if clipping to zero works
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
original_X_train = X_train.copy()
ss = sklearn.preprocessing.StandardScaler()
X_train = ss.fit_transform(X_train)
configuration_space = SelectClassificationRates.get_hyperparameter_search_space(
)
default = configuration_space.get_default_configuration()
preprocessor = SelectClassificationRates(
random_state=1,
**{
hp_name: default[hp_name]
for hp_name in default if default[hp_name] is not None
})
transformer = preprocessor.fit(X_train, Y_train)
transformation, original = transformer.transform(
X_train), original_X_train
self.assertEqual(transformation.shape[0], original.shape[0])
# I don't know why it's 52 here and not 32 which would be half of the
# number of features. Seems to be related to a runtime warning raised
# by sklearn
self.assertEqual(transformation.shape[1], 52)
def test_preprocessing_dtype(self):
# Dense
# np.float32
X_train, Y_train, X_test, Y_test = get_dataset('iris')
self.assertEqual(X_train.dtype, np.float32)
configuration_space = SelectClassificationRates.get_hyperparameter_search_space(
)
default = configuration_space.get_default_configuration()
preprocessor = SelectClassificationRates(
random_state=1,
**{hp_name: default[hp_name]
for hp_name in default})
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float32)
# np.float64
X_train, Y_train, X_test, Y_test = get_dataset('iris')
X_train = X_train.astype(np.float64)
configuration_space = SelectClassificationRates.get_hyperparameter_search_space(
)
default = configuration_space.get_default_configuration()
preprocessor = SelectClassificationRates(
random_state=1,
**{hp_name: default[hp_name]
for hp_name in default})
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float64)
# Sparse
# np.float32
X_train, Y_train, X_test, Y_test = get_dataset(
'iris', make_sparse=True)
self.assertEqual(X_train.dtype, np.float32)
configuration_space = SelectClassificationRates.get_hyperparameter_search_space(
)
default = configuration_space.get_default_configuration()
preprocessor = SelectClassificationRates(
random_state=1,
**{hp_name: default[hp_name]
for hp_name in default})
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float32)
# np.float64
X_train, Y_train, X_test, Y_test = get_dataset(
'iris', make_sparse=True)
X_train = X_train.astype(np.float64)
configuration_space = SelectClassificationRates.get_hyperparameter_search_space(
)
default = configuration_space.get_default_configuration()
preprocessor = SelectClassificationRates(
random_state=1,
**{hp_name: default[hp_name]
for hp_name in default})
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float64)
| 41.567568 | 119 | 0.675769 |
485873f66a966166d74941b4717772e0c5d2adf8 | 9,312 | py | Python | util/dvsim/verixcdc-report-parser.py | asb/opentitan | af68ff5041b10c81e97adc075a4d042f8ac7ab20 | [
"Apache-2.0"
] | 1,375 | 2019-11-05T15:11:00.000Z | 2022-03-28T17:50:43.000Z | util/dvsim/verixcdc-report-parser.py | asb/opentitan | af68ff5041b10c81e97adc075a4d042f8ac7ab20 | [
"Apache-2.0"
] | 7,045 | 2019-11-05T16:05:45.000Z | 2022-03-31T23:08:08.000Z | util/dvsim/verixcdc-report-parser.py | asb/opentitan | af68ff5041b10c81e97adc075a4d042f8ac7ab20 | [
"Apache-2.0"
] | 428 | 2019-11-05T15:00:20.000Z | 2022-03-28T15:34:57.000Z | #!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
r"""Parses cdc report and dump filtered messages in hjson format.
"""
import argparse
import logging as log
import re
import sys
import os
import hjson
from pathlib import Path
from LintParser import LintParser
def extract_rule_patterns(file_path: Path):
'''
This parses the CDC summary table to get the message totals,
rule names and corresponding severities.
'''
rule_patterns = []
full_file = ''
try:
with Path(file_path).open() as f:
full_file = f.read()
except IOError:
# We will attempt read this file again in a second pass to parse out
# the details, this error will get caught and reported.
pass
category = ''
severity = ''
known_rule_names = {}
total_msgs = 0
# extract the summary table
m = re.findall(
r'^Summary of Policy: ALL((?:.|\n|\r\n)*)Rule Details of Policy: ALL',
full_file, flags=re.MULTILINE)
if m:
# step through the table and identify rule names and their
# category and severity
for line in m[0].split('\n'):
if re.match(r'^POLICY\s+ALL', line):
total = re.findall(r'^POLICY\s+ALL\s+([0-9]+)', line)
total_msgs = int(total[0])
elif re.match(r'^ GROUP\s+SDC_ENV_LINT', line):
category = 'sdc'
elif re.match(r'^ GROUP\s+VCDC_SETUP_CHECKS', line):
category = 'setup'
elif re.match(r'^ GROUP\s+VCDC_ANALYSIS_CHECKS', line):
category = 'cdc'
elif re.match(r'^ GROUP\s+ERROR', line):
severity = 'error'
elif re.match(r'^ GROUP\s+WARNING', line):
severity = 'warning'
elif re.match(r'^ GROUP\s+INFO', line):
severity = 'info'
elif re.match(r'^ GROUP\s+REVIEW', line):
severity = 'review'
elif re.match(r'^ INSTANCE', line):
# we've found a new rule. convert it to a known rule pattern
# with the correct category and severity
rule = re.findall(
r'^ INSTANCE\s+([a-zA-Z0-9\_]+)\s+([0-9\_]+)', line)
name = rule[0][0]
count = int(rule[0][1])
# a few rules produce messages with different severities but
# the same rule labels. for simplicity, we promote messages
# from lower severity buckets to the severity bucket where
# this rule name has first been encountered. since higher
# severity messages are listed first in this summary table, it
# is straightforward to check whether the rule name has
# already appeared in a higher severity bucket.
if name in known_rule_names:
msg_group = known_rule_names[name]
log.warning('Rule {} is reported in multiple severity '
'classes. All messages of this rule are '
'promoted to {}'.format(name, msg_group))
else:
msg_group = category + '_' + severity
known_rule_names.update({name: msg_group})
rule_patterns.append((msg_group, r'^{}:.*'.format(name)))
return rule_patterns
# Reuse the lint parser, but add more buckets.
class CdcParser(LintParser):
def __init__(self) -> None:
self.buckets = {
'flow_info': [],
'flow_warning': [],
'flow_error': [],
'sdc_info': [],
'sdc_review': [],
'sdc_warning': [],
'sdc_error': [],
'setup_info': [],
'setup_review': [],
'setup_warning': [],
'setup_error': [],
'cdc_info': [],
'cdc_review': [],
'cdc_warning': [],
'cdc_error': [],
# this bucket is temporary and will be removed at the end of the
# parsing pass.
'fusesoc-error': []
}
self.severities = {
'flow_info': 'info',
'flow_warning': 'warning',
'flow_error': 'error',
'sdc_info': 'info',
'sdc_review': 'warning',
'sdc_warning': 'warning',
'sdc_error': 'error',
'setup_info': 'info',
'setup_review': 'warning',
'setup_warning': 'warning',
'setup_error': 'error',
'cdc_info': 'info',
'cdc_review': 'warning',
'cdc_warning': 'warning',
'cdc_error': 'error'
}
# TODO(#9079): this script will be removed long term once the
# parser has been merged with the Dvsim core code.
def main():
parser = argparse.ArgumentParser(
description="""This script parses AscentLint log and report files from
a lint run, filters the messages and creates an aggregated result
.hjson file with lint messages and their severities.
The script returns nonzero status if any warnings or errors are
present.
""")
parser.add_argument('--repdir',
type=lambda p: Path(p).resolve(),
default="./",
help="""The script searches the 'vcdc.log' and
'vcdc.rpt' files in this directory.
Defaults to './'""")
parser.add_argument('--outfile',
type=lambda p: Path(p).resolve(),
default="./results.hjson",
help="""Path to the results Hjson file.
Defaults to './results.hjson'""")
args = parser.parse_args()
# Define warning/error patterns for each logfile
parser_args = {}
# Patterns for lint.log
parser_args.update({
args.repdir.joinpath('build.log'): [
# If lint warnings have been found, the lint tool will exit
# with a nonzero status code and fusesoc will always spit out
# an error like
#
# ERROR: Failed to build ip:core:name:0.1 : 'make' exited with an error code
#
# If we found any other warnings or errors, there's no point in
# listing this too. BUT we want to make sure we *do* see this
# error if there are no other errors or warnings, since that
# shows something has come unstuck. (Probably the lint tool
# spat out a warning that we don't understand)
("fusesoc-error",
r"^ERROR: Failed to build .* : 'make' exited with an error code")
]
})
# Patterns for vcdc.log
parser_args.update({
args.repdir.joinpath('syn-icarus/vcdc.log'): [
("flow_error", r"^FlexNet Licensing error.*"),
("flow_error", r"^Error: .*"),
("flow_error", r"^ERROR.*"),
("flow_error", r"^ ERR .*"),
("flow_warning", r"^Warning: .*"),
# We ignore several messages here:
# #25010: unused signals
# #25011: unused signals
# #25012: unused port
# #25013: unused signals
# #26038: unused or RTL constant
# #39035: parameter becomes local
# #39122: non-positive repeat
# #39491: parameter in package
("flow_warning", r"^ "
"(?!WARN \[#25010\])"
"(?!WARN \[#25011\])"
"(?!WARN \[#25012\])"
"(?!WARN \[#25013\])"
"(?!WARN \[#26038\])"
"(?!WARN \[#39035\])"
"(?!WARN \[#39122\])"
"(?!WARN \[#39491\])"
"WARN .*"),
("flow_info", r"^ INFO .*")
]
})
# The CDC messages are a bit more involved to parse out, since we
# need to know the names and associated severities to do this.
# The tool prints out an overview table in the report, which we are
# going to parse first in order to get this information.
# This is then used to construct the regex patterns to look for
# in a second pass to get the actual CDC messages.
cdc_rule_patterns = extract_rule_patterns(
args.repdir.joinpath('syn-icarus/vcdc.rpt'))
# Patterns for vcdc.rpt
parser_args.update({
args.repdir.joinpath('syn-icarus/vcdc.rpt'): cdc_rule_patterns
})
# Parse logs
parser = CdcParser()
num_messages = parser.get_results(parser_args)
# Write out results file
parser.write_results_as_hjson(args.outfile)
# return nonzero status if any warnings or errors are present
# lint infos do not count as failures
if num_messages['error'] > 0 or num_messages['warning'] > 0:
log.info("Found %d lint errors and %d lint warnings",
num_messages['error'],
num_messages['warning'])
sys.exit(1)
log.info("Lint logfile parsed succesfully")
sys.exit(0)
if __name__ == "__main__":
main()
| 37.548387 | 87 | 0.536082 |
a6034574bce65b9336f08e8d3cb79ad93ad23346 | 5,979 | py | Python | cloudvolume/dask.py | perlman/cloud-volume | e3c01f69e6519f0c53d13d39e7a42bb38d63a936 | [
"BSD-3-Clause"
] | 1 | 2021-02-12T12:15:33.000Z | 2021-02-12T12:15:33.000Z | cloudvolume/dask.py | perlman/cloud-volume | e3c01f69e6519f0c53d13d39e7a42bb38d63a936 | [
"BSD-3-Clause"
] | null | null | null | cloudvolume/dask.py | perlman/cloud-volume | e3c01f69e6519f0c53d13d39e7a42bb38d63a936 | [
"BSD-3-Clause"
] | null | null | null | """Utilties for translating to/from dask arrays.
NOTE: Using the thread-based dask scheduler is very inefficient with
CloudVolume because CV is pure python and will cause substantial
GIL contention between the dask worker threads. It is HIGHLY
ADVISABLE to use a distributed process scheduler with one thread
per process.
"""
import numpy as np
from .cloudvolume import CloudVolume
def to_cloudvolume(arr,
cloudpath,
resolution=(1, 1, 1),
voxel_offset=(0, 0, 0),
layer_type=None,
encoding='raw',
max_mip=0,
compute=True,
return_stored=False,
**kwargs):
"""Save 3d or 4d dask array to the precomputed CloudVolume storage format.
NOTE: DO NOT USE thread-based dask scheduler. See comment at top of module.
See https://docs.dask.org/en/latest/array.html for details about the format.
Parameters
----------
arr: dask.array
Data to store
cloudpath: str
Path to the dataset layer. This should match storage's supported
providers.
e.g. Google: gs://$BUCKET/$DATASET/$LAYER/
S3 : s3://$BUCKET/$DATASET/$LAYER/
Lcl FS: file:///tmp/$DATASET/$LAYER/
Boss : boss://$COLLECTION/$EXPERIMENT/$CHANNEL
HTTP/S: http(s)://.../$CHANNEL
matrix: matrix://$BUCKET/$DATASET/$LAYER/
resolution: Iterable of ints of length 3
The x, y, z voxel dimensions in nanometers
voxel_offset: Iterable of ints of length 3
The x, y, z beginning of dataset in positive cartesian space.
layer_type: str
"image" or "segmentation"
max_mip: int
Maximum mip level id.
compute: boolean, optional
If true compute immediately, return ``dask.delayed.Delayed`` otherwise.
return_stored: boolean, optional
Optionally return stored results.
kwargs: passed to the ``cloudvolume.CloudVolume()`` function, e.g., compression options
Raises
------
ValueError
If ``arr`` has ndim different that 3 or 4, or ``layer_type`` is unsupported.
Returns
-------
See notes on `compute` and `return_stored` parameters.
"""
import dask
import dask.array as da
if not da.core._check_regular_chunks(arr.chunks):
raise ValueError('Attempt to save array to cloudvolume with irregular '
'chunking, please call `arr.rechunk(...)` first.')
if not layer_type:
if arr.dtype in (np.bool, np.uint32, np.uint64, np.uint16):
layer_type = 'segmentation'
elif np.issubdtype(arr.dtype, np.integer) or np.issubdtype(arr.dtype, np.floating):
layer_type = 'image'
else:
raise ValueError('Unsupported layer_type for CloudVolume: %s' % layer_type)
if arr.ndim == 3:
num_channels = 1
chunk_size = arr.chunksize
elif arr.ndim == 4:
num_channels = arr.shape[-1]
chunk_size = arr.chunksize[:3]
else:
raise ValueError('CloudVolume only supports 3 or 4 dimensions. Array has %d.' % arr.ndim)
info = CloudVolume.create_new_info(num_channels,
layer_type,
arr.dtype.name,
encoding,
resolution,
voxel_offset,
arr.shape[:3],
chunk_size=chunk_size,
max_mip=max_mip)
# Delay writing any metadata until computation time.
# - the caller may never do the full computation
# - the filesystem may be slow, and there is a desire to open files
# in parallel on worker machines.
vol = dask.delayed(_create_cloudvolume)(cloudpath, info, **kwargs)
return arr.store(vol, lock=False, compute=compute, return_stored=return_stored)
def _create_cloudvolume(cloudpath, info, **kwargs):
"""Create cloudvolume and commit metadata."""
vol = CloudVolume(cloudpath, info=info, progress=False, **kwargs)
vol.commit_info()
vol.provenance.processing = [{'method': 'cloudvolume.dask.to_cloudvolume'}]
vol.commit_provenance()
return vol
def from_cloudvolume(cloudpath, chunks=None, name=None, **kwargs):
"""Load dask array from a cloudvolume compatible dataset.
NOTE: DO NOT USE thread-based dask scheduler. See comment at top of module.
Volumes with a single channel will be returned as 4d arrays with a
length-1 channel dimension, even if they were stored from 3d data.
The channel dimension is returned as a single-chunk by default, as that
is how CloudVolumes are stored.
See https://docs.dask.org/en/latest/array.html for details about the format.
Parameters
----------
cloudpath: str
Path to the dataset layer. This should match storage's supported
providers.
e.g. Google: gs://$BUCKET/$DATASET/$LAYER/
S3 : s3://$BUCKET/$DATASET/$LAYER/
Lcl FS: file:///tmp/$DATASET/$LAYER/
Boss : boss://$COLLECTION/$EXPERIMENT/$CHANNEL
HTTP/S: http(s)://.../$CHANNEL
matrix: matrix://$BUCKET/$DATASET/$LAYER/
chunks: tuple of ints or tuples of ints
Passed to ``da.from_array``, allows setting the chunks on
initialisation, if the chunking scheme in the stored dataset is not
optimal for the calculations to follow. Note that the chunking should
be compatible with an underlying 4d array.
name: str, optional
An optional keyname for the array. Defaults to hashing the input
kwargs: passed to the ``cloudvolume.CloudVolume()`` function, e.g., compression options
Returns
-------
Dask array
"""
import dask.array as da
from dask.base import tokenize
vol = CloudVolume(cloudpath, progress=False, **kwargs)
if chunks is None:
chunks = tuple(vol.chunk_size) + (vol.num_channels, )
if name is None:
name = 'from-cloudvolume-' + tokenize(vol, chunks, **kwargs)
return da.from_array(vol, chunks, name=name)
| 36.907407 | 94 | 0.645091 |
aa2db790fa4093be1213dbef5fff9ffa70737e3c | 330 | py | Python | ThreeBotPackages/zerobot/admin/actors/pools.py | threefoldtech/jumpscaleX_threebot | 2841e1109abf1bee113b027a7e04bfbd25cbcb48 | [
"Apache-2.0"
] | null | null | null | ThreeBotPackages/zerobot/admin/actors/pools.py | threefoldtech/jumpscaleX_threebot | 2841e1109abf1bee113b027a7e04bfbd25cbcb48 | [
"Apache-2.0"
] | 546 | 2019-08-29T11:48:19.000Z | 2020-12-06T07:20:45.000Z | ThreeBotPackages/zerobot/admin/actors/pools.py | threefoldtech/jumpscaleX_threebot | 2841e1109abf1bee113b027a7e04bfbd25cbcb48 | [
"Apache-2.0"
] | 5 | 2019-09-26T14:03:05.000Z | 2020-04-16T08:47:10.000Z | from Jumpscale import j
class pools(j.baseclasses.threebot_actor):
@j.baseclasses.actor_method
def list_pools(self, schema_out=None, user_session=None):
ret_pools = []
for pool in j.sal.zosv2.pools.list():
ret_pools.append(pool._ddict)
return j.data.serializers.json.dumps(ret_pools)
| 30 | 61 | 0.690909 |
ec8520615ec50d0a8e90cf010c19c98e10f0b30c | 3,984 | py | Python | alipay/aop/api/request/AlipayMerchantOrderCreateandpayRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/request/AlipayMerchantOrderCreateandpayRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/request/AlipayMerchantOrderCreateandpayRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayMerchantOrderCreateandpayModel import AlipayMerchantOrderCreateandpayModel
class AlipayMerchantOrderCreateandpayRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayMerchantOrderCreateandpayModel):
self._biz_content = value
else:
self._biz_content = AlipayMerchantOrderCreateandpayModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.merchant.order.createandpay'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.475862 | 148 | 0.646084 |
c64ccac34170437164289971b61d3c2277663f4d | 354 | py | Python | lib/steps/SetEnvVar.py | lastcolour/Tacos | fe2b65250bfa74613151ae2dc6a91eb30f254844 | [
"MIT"
] | null | null | null | lib/steps/SetEnvVar.py | lastcolour/Tacos | fe2b65250bfa74613151ae2dc6a91eb30f254844 | [
"MIT"
] | null | null | null | lib/steps/SetEnvVar.py | lastcolour/Tacos | fe2b65250bfa74613151ae2dc6a91eb30f254844 | [
"MIT"
] | null | null | null | from .Step import Step
import os
class SetEnvVar(Step):
def __init__(self):
Step.__init__(self)
self._vars = {}
def serialize(self, jsonNode):
for key in jsonNode:
self._vars[str(key)] = str(jsonNode[key])
def run(self):
for key, var in self._vars.items():
os.environ[key] = var | 22.125 | 53 | 0.573446 |
585c08b0574465d82b9880d21cb365fa277febe0 | 509 | py | Python | lambda_handlers/validators/http/http_validator.py | renovate-tests/lambda-handlers | 0b14013f19b597524a8d50f7ea8813ee726c584c | [
"Apache-2.0"
] | null | null | null | lambda_handlers/validators/http/http_validator.py | renovate-tests/lambda-handlers | 0b14013f19b597524a8d50f7ea8813ee726c584c | [
"Apache-2.0"
] | null | null | null | lambda_handlers/validators/http/http_validator.py | renovate-tests/lambda-handlers | 0b14013f19b597524a8d50f7ea8813ee726c584c | [
"Apache-2.0"
] | null | null | null | from typing import Any, Dict
from lambda_handlers.validators.validator import Validator
class HttpValidator(Validator):
def __init__(self, path=None, query=None, body=None, request=None, response=None):
super().__init__(input_schema=request, output_schema=response)
self._schemas = {
'pathParameters': path,
'queryStringParameters': query,
'body': body,
}
@property
def schemas(self) -> Dict[str, Any]:
return self._schemas
| 26.789474 | 86 | 0.654224 |
b4b79fa5ca7d81074c658c7ce14e51cbe93a7089 | 189 | py | Python | Images/From_2D_to_3D/sweep.py | RubenRubens/Montys-CAD | 3c59b1a8b01f66f61cce4bf8be735d63fd79bcef | [
"Apache-2.0"
] | 2 | 2020-06-06T22:16:36.000Z | 2020-08-10T11:24:25.000Z | Images/From_2D_to_3D/sweep.py | RubenRubens/SolidMonty | 3c59b1a8b01f66f61cce4bf8be735d63fd79bcef | [
"Apache-2.0"
] | null | null | null | Images/From_2D_to_3D/sweep.py | RubenRubens/SolidMonty | 3c59b1a8b01f66f61cce4bf8be735d63fd79bcef | [
"Apache-2.0"
] | null | null | null | from SolidMonty_FreeCAD import *
A = rotate("Y", angle = 90) (circle(10))
path = spline(points = [[0, 0], [30, 50], [40, 59]], closed = False)
F = sweep(A, path)
export(name="sweep") (F) | 23.625 | 68 | 0.608466 |
e1da72d5be0e3ad5c43a1630257e9ead86883011 | 2,095 | py | Python | text/keras_detectE_invoice.py | kingemma/invoice | b381ffcd4b798434ea74cb4463eb5cff276ded3a | [
"MIT"
] | 1,017 | 2019-08-02T04:18:35.000Z | 2022-03-29T08:18:03.000Z | text/keras_detectE_invoice.py | kingemma/invoice | b381ffcd4b798434ea74cb4463eb5cff276ded3a | [
"MIT"
] | 47 | 2019-08-08T08:36:48.000Z | 2022-03-08T07:00:29.000Z | text/keras_detectE_invoice.py | kingemma/invoice | b381ffcd4b798434ea74cb4463eb5cff276ded3a | [
"MIT"
] | 300 | 2019-08-03T03:06:30.000Z | 2022-03-31T02:20:11.000Z | from config import kerasTextEModel_invoice,IMGSIZE,keras_anchors,class_names,GPU,GPUID
from text.keras_yolo3 import yolo_text,box_layer,K
from apphelper.image import resize_im,letterbox_image
from PIL import Image
import numpy as np
import tensorflow as tf
graph = tf.get_default_graph()##解决web.py 相关报错问题
anchors = [float(x) for x in keras_anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2)
num_anchors = len(anchors)
num_classes = len(class_names)
textModel = yolo_text(num_classes,anchors)
textModel.load_weights(kerasTextEModel_invoice)
sess = K.get_session()
image_shape = K.placeholder(shape=(2, ))##图像原尺寸:h,w
input_shape = K.placeholder(shape=(2, ))##图像resize尺寸:h,w
box_score = box_layer([*textModel.output,image_shape,input_shape],anchors, num_classes)
def text_detect(img,prob = 0.05):
im = Image.fromarray(img)
scale = IMGSIZE[0]
w,h = im.size
w_,h_ = resize_im(w,h, scale=scale, max_scale=2048)##短边固定为608,长边max_scale<4000
#boxed_image,f = letterbox_image(im, (w_,h_))
boxed_image = im.resize((w_,h_), Image.BICUBIC)
image_data = np.array(boxed_image, dtype='float32')
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
imgShape = np.array([[h,w]])
inputShape = np.array([[h_,w_]])
global graph
with graph.as_default():
##定义 graph变量 解决web.py 相关报错问题
"""
pred = textModel.predict_on_batch([image_data,imgShape,inputShape])
box,scores = pred[:,:4],pred[:,-1]
"""
box,scores = sess.run(
[box_score],
feed_dict={
textModel.input: image_data,
input_shape: [h_, w_],
image_shape: [h, w],
K.learning_phase(): 0
})[0]
keep = np.where(scores>prob)
box[:, 0:4][box[:, 0:4]<0] = 0
box[:, 0][box[:, 0]>=w] = w-1
box[:, 1][box[:, 1]>=h] = h-1
box[:, 2][box[:, 2]>=w] = w-1
box[:, 3][box[:, 3]>=h] = h-1
box = box[keep[0]]
scores = scores[keep[0]]
return box,scores
| 30.808824 | 87 | 0.622912 |
71dfd93d3a87810260b0717c7bd3489e6a853dd3 | 1,098 | py | Python | src/third_party/wiredtiger/dist/java_doc.py | danx0r/mongo | 70d4944c235bcdf7fbbc63971099563d2af72956 | [
"Apache-2.0"
] | 72 | 2020-06-12T06:33:41.000Z | 2021-03-22T03:15:56.000Z | src/third_party/wiredtiger/dist/java_doc.py | danx0r/mongo | 70d4944c235bcdf7fbbc63971099563d2af72956 | [
"Apache-2.0"
] | 9 | 2020-07-02T09:36:49.000Z | 2021-03-25T23:54:00.000Z | src/third_party/wiredtiger/dist/java_doc.py | danx0r/mongo | 70d4944c235bcdf7fbbc63971099563d2af72956 | [
"Apache-2.0"
] | 23 | 2017-01-22T03:35:26.000Z | 2021-12-16T11:17:39.000Z | #!/usr/bin/env python
# This program pulls the function names from wiredtiger.in and generates
# an input file for Java SWIG that adds doxygen copydoc comments to functions.
import os, re, sys
import api_data
from dist import compare_srcfile
# Temporary file.
tmp_file = '__tmp'
#####################################################################
# Update wiredtiger.in with doxygen comments
#####################################################################
f='../src/include/wiredtiger.in'
o='../lang/java/java_doc.i'
tfile = open(tmp_file, 'w')
tfile.write('''/* DO NOT EDIT: automatically built by dist/java_doc.py. */
''')
cclass_re = re.compile('^struct __([a-z_]*) {')
cfunc_re = re.compile('\t.*? __F\(([a-z_]*)\)')
curr_class = ""
for line in open(f, 'r'):
m = cclass_re.match(line)
if m:
curr_class = m.group(1)
if curr_class == "":
continue
m = cfunc_re.match(line)
if m:
tfile.write('COPYDOC(__' + curr_class.lower() + ', ' +
curr_class.upper() + ', ' + m.group(1) + ')\n')
tfile.close()
compare_srcfile(tmp_file, o)
| 24.954545 | 78 | 0.562842 |
794f581387e59c130c0476e0f8f180cbae813724 | 2,351 | py | Python | patterns.py | IvanBrasilico/cli_talker | 9932196e414e7875da592851a761e3f7a47550f6 | [
"MIT"
] | null | null | null | patterns.py | IvanBrasilico/cli_talker | 9932196e414e7875da592851a761e3f7a47550f6 | [
"MIT"
] | 1 | 2021-06-01T21:56:20.000Z | 2021-06-01T21:56:20.000Z | patterns.py | IvanBrasilico/cli_talker | 9932196e414e7875da592851a761e3f7a47550f6 | [
"MIT"
] | null | null | null | '''Configuration of the routes, or vocabulary of the bot'''
from botteryapp import ch, ih
from bottery.conf.patterns import Pattern, DefaultPattern
from bottery.views import pong
from cli_talker.views import (flask_restless_view, help_text,
say_help, tec_view)
from sql_alchemy_view.views import input_example, note_view, notebook_view
class FunctionPattern(Pattern):
'''Allows check to be made by an user-defined function'''
def __init__(self, pattern, view, function):
'''Pass any function that receives a string and
returns True or False'''
self.function = function
super().__init__(pattern, view)
def check(self, message):
return self.function(self.pattern, message.text)
class HangUserPattern(DefaultPattern):
def __init__(self, view):
self.hanged_users = set()
super().__init__(view)
def activate_hang(self, message):
self.hanged_users.add(message.user.id)
def deactivate_hang(self, message):
self.hanged_users.discard(message.user.id)
def check(self, message):
if message is None:
return 'Empty message'
if message.user.id in self.hanged_users:
return self.view
hang_user_pattern = HangUserPattern(flask_restless_view)
hang_user_pattern_tec = HangUserPattern(tec_view)
hang_user_pattern_notebook = HangUserPattern(notebook_view)
hang_user_pattern_note = HangUserPattern(note_view)
hang_user_pattern_input = HangUserPattern(input_example)
ch.set_hang(hang_user_pattern, 'person')
ch.set_hang(hang_user_pattern_tec, 'tec')
ch.set_hang(hang_user_pattern_notebook, 'notebook')
ch.set_hang(hang_user_pattern_note, 'note')
ih.set_hang(hang_user_pattern_input, 'project')
def first_word(pattern, text):
words = text.split(' ')
if words:
return words[0] == pattern
return False
patterns = [
hang_user_pattern,
hang_user_pattern_tec,
hang_user_pattern_notebook,
hang_user_pattern_note,
hang_user_pattern_input,
Pattern('tec', tec_view),
Pattern('person', flask_restless_view),
FunctionPattern('notebook', notebook_view, first_word),
FunctionPattern('note', note_view, first_word),
Pattern('project', input_example),
Pattern('ping', pong),
Pattern('help', help_text),
DefaultPattern(say_help)
]
| 30.532468 | 74 | 0.72097 |
da879c91a5473a23d4134151f19c4cc0a11e1d79 | 11,698 | py | Python | acq4/devices/MicroManagerStage/mmstage.py | aleonlein/acq4 | 4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555 | [
"MIT"
] | 1 | 2020-06-04T17:04:53.000Z | 2020-06-04T17:04:53.000Z | acq4/devices/MicroManagerStage/mmstage.py | aleonlein/acq4 | 4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555 | [
"MIT"
] | 24 | 2016-09-27T17:25:24.000Z | 2017-03-02T21:00:11.000Z | acq4/devices/MicroManagerStage/mmstage.py | sensapex/acq4 | 9561ba73caff42c609bd02270527858433862ad8 | [
"MIT"
] | 4 | 2016-10-19T06:39:36.000Z | 2019-09-30T21:06:45.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
import time
import numpy as np
from acq4.util import Qt
from ..Stage import Stage, MoveFuture, StageInterface
from acq4.util.Mutex import Mutex
from acq4.util.Thread import Thread
from acq4.pyqtgraph import debug, ptime, SpinBox
from acq4.util.micromanager import getMMCorePy
class MicroManagerStage(Stage):
"""
Class to wrap the micromanager xy stage
"""
def __init__(self, man, config, name):
self.scale = config.pop('scale', (1e-6, 1e-6, 1e-6))
self.speedToMeters = .001
self.mmc = getMMCorePy()
self._mmDeviceNames = {'xy': None, 'z': None}
self._mmSerialPortNames = {'xy': None, 'z': None}
self._axes = []
# Configure XY and Z stages separately
if 'xyStage' not in config and 'zStage' not in config:
raise Exception("Micromanager stage configuration myst have 'xyStage', 'zStage', or both.")
allAdapters = self.mmc.getDeviceAdapterNames()
for axes in ('xy', 'z'):
# sanity check for MM adapter and device name
stageCfg = config.get(axes + 'Stage', None)
if stageCfg is None:
continue
self._axes.append(axes)
adapterName = stageCfg['mmAdapterName']
if adapterName not in allAdapters:
raise ValueError("Adapter name '%s' is not valid. Options are: %s" % (adapterName, allAdapters))
mmDeviceName = stageCfg.get('mmDeviceName', None)
allDevices = self.mmc.getAvailableDevices(adapterName)
if mmDeviceName not in allDevices:
raise ValueError("Device name '%s' is not valid for adapter '%s'. Options are: %s" % (mmDeviceName, adapterName, allDevices))
# Load this device
devName = str(name) + '_' + axes
self._mmDeviceNames[axes] = devName
self.mmc.loadDevice(devName, adapterName, mmDeviceName)
# Set up serial port if needed
if 'serial' in stageCfg:
# Z stage may use the same serial port as XY stage
if stageCfg['serial']['port'] == 'shared':
if axes != 'z':
raise Exception('Shared serial port only allowed for Z axis.')
if 'xyStage' not in config:
raise Exception('Shared serial port requires xyStage.')
portName = self._mmDeviceNames['xy'] + '_port'
self.mmc.setProperty(devName, 'Port', portName)
self._mmSerialPortNames[axes] = portName
else:
portName = devName + "_port"
self.mmc.loadDevice(portName, "SerialManager", str(stageCfg['serial']['port']))
if 'baud' in stageCfg['serial']:
self.mmc.setProperty(portName, 'BaudRate', stageCfg['serial']['baud'])
self.mmc.setProperty(devName, 'Port', portName)
self.mmc.initializeDevice(portName)
self._mmSerialPortNames[axes] = portName
self.mmc.initializeDevice(devName)
self._lastMove = None
self._focusDevice = self
# self.userSpeed = np.asarray(self.mmc.getProperty(self._mmDeviceName, 'Speed-S')).astype(float) * self.speedToMeters
man.sigAbortAll.connect(self.abort)
Stage.__init__(self, man, config, name)
# clear cached position for this device and re-read to generate an initial position update
self._lastPos = None
time.sleep(1.0)
self.getPosition(refresh=True)
# thread for polling position changes
self.monitor = MonitorThread(self)
self.monitor.start()
def capabilities(self):
"""Return a structure describing the capabilities of this device"""
if 'capabilities' in self.config:
return self.config['capabilities']
else:
haveXY = 'xy' in self._axes
haveZ = 'z' in self._axes
return {
'getPos': (haveXY, haveXY, haveZ),
'setPos': (haveXY, haveXY, haveZ),
'limits': (False, False, False),
}
def stop(self):
"""Stop the manipulator.
If the manipulator is currently in use elsewhere, this method blocks until it becomes available.
"""
with self.lock:
for ax in self._axes:
self.mmc.stop(self._mmDeviceNames[ax])
if self._lastMove is not None:
self._lastMove._stopped()
self._lastMove = None
def abort(self):
"""Stop the manipulator immediately.
This method asks the manipulator to stop even if it is being accessed elsewhere.
This can cause communication errors, but may be preferred if stopping immediately is critical.
"""
for ax in self._axes:
try:
self.mmc.stop(self._mmDeviceNames[ax])
if self._lastMove is not None:
self._lastMove._stopped()
self._lastMove = None
except:
printExc("Error stopping axis %s:" % ax)
def setUserSpeed(self, v):
"""Set the maximum speed of the stage (m/sec) when under manual control.
The stage's maximum speed is reset to this value when it is not under
programmed control.
"""
self.userSpeed = v
self.mmc.setProperty(self._mmDeviceName, 'Speed-S', v / self.speedToMeters)
def _getPosition(self):
# Called by superclass when user requests position refresh
with self.lock:
pos = [0., 0., 0.]
if 'xy' in self._axes:
pos[0] = self.mmc.getXPosition(self._mmDeviceNames['xy']) * self.scale[0]
pos[1] = self.mmc.getYPosition(self._mmDeviceNames['xy']) * self.scale[1]
if 'z' in self._axes:
pos[2] = self.mmc.getPosition(self._mmDeviceNames['z']) * self.scale[2]
if pos != self._lastPos:
self._lastPos = pos
emit = True
else:
emit = False
if emit:
# don't emit signal while locked
self.posChanged(pos)
return pos
def targetPosition(self):
with self.lock:
if self._lastMove is None or self._lastMove.isDone():
return self.getPosition()
else:
return self._lastMove.targetPos
def quit(self):
self.monitor.stop()
Stage.quit(self)
def _move(self, abs, rel, speed, linear):
with self.lock:
if self._lastMove is not None and not self._lastMove.isDone():
self.stop()
pos = self._toAbsolutePosition(abs, rel)
# Decide which axes to move
moveXY = True
if abs is not None:
moveZ = abs[2] is not None
moveXY = abs[0] is not None and abs[1] is not None
else:
moveZ = rel[2] is not None
moveXY = rel[0] is not None and rel[1] is not None
speed = self._interpretSpeed(speed)
self._lastMove = MicroManagerMoveFuture(self, pos, speed, self.userSpeed, moveXY=moveXY, moveX=moveZ)
return self._lastMove
def deviceInterface(self, win):
return MicroManagerGUI(self, win)
def startMoving(self, vel):
"""Begin moving the stage at a continuous velocity.
"""
raise Exception("MicroManager stage does not support startMoving() function.")
class MonitorThread(Thread):
"""Thread to poll for manipulator position changes.
"""
def __init__(self, dev):
self.dev = dev
self.lock = Mutex(recursive=True)
self.stopped = False
self.interval = 0.3
Thread.__init__(self)
def start(self):
self.stopped = False
Thread.start(self)
def stop(self):
with self.lock:
self.stopped = True
def setInterval(self, i):
with self.lock:
self.interval = i
def run(self):
minInterval = 100e-3
interval = minInterval
lastPos = None
while True:
try:
with self.lock:
if self.stopped:
break
maxInterval = self.interval
pos = self.dev._getPosition() # this causes sigPositionChanged to be emitted
if pos != lastPos:
# if there was a change, then loop more rapidly for a short time.
interval = minInterval
lastPos = pos
else:
interval = min(maxInterval, interval*2)
time.sleep(interval)
except:
debug.printExc('Error in MicromanagerStage monitor thread:')
time.sleep(maxInterval)
class MicroManagerMoveFuture(MoveFuture):
"""Provides access to a move-in-progress on a micromanager stage.
"""
def __init__(self, dev, pos, speed, userSpeed, moveXY=True, moveZ=True):
MoveFuture.__init__(self, dev, pos, speed)
self._interrupted = False
self._errorMSg = None
self._finished = False
pos = np.array(pos) / np.array(self.dev.scale)
with self.dev.lock:
if moveXY:
self.dev.mmc.setXYPosition(self.dev._mmDeviceNames['xy'], pos[0:1])
if moveXY:
self.dev.mmc.setPosition(self.dev._mmDeviceNames['z'], pos[2])
def wasInterrupted(self):
"""Return True if the move was interrupted before completing.
"""
return self._interrupted
def isDone(self):
"""Return True if the move is complete.
"""
return self._getStatus() != 0
def _getStatus(self):
# check status of move unless we already know it is complete.
# 0: still moving; 1: finished successfully; -1: finished unsuccessfully
if self._finished:
if self._interrupted:
return -1
else:
return 1
for ax in self._axes:
if self.dev.mmc.deviceBusy(self.dev._mmDeviceNames[ax]):
# Still moving
return 0
# did we reach target?
pos = self.dev._getPosition()
dif = ((np.array(pos) - np.array(self.targetPos))**2).sum()**0.5
if dif < 2.5e-6:
# reached target
self._finished = True
return 1
else:
# missed
self._finished = True
self._interrupted = True
self._errorMsg = "Move did not complete (target=%s, position=%s, dif=%s)." % (self.targetPos, pos, dif)
return -1
def _stopped(self):
# Called when the manipulator is stopped, possibly interrupting this move.
status = self._getStatus()
if status == 1:
# finished; ignore stop
return
elif status == -1:
self._errorMsg = "Move was interrupted before completion."
elif status == 0:
# not actually stopped! This should not happen.
raise RuntimeError("Interrupted move but manipulator is still running!")
else:
raise Exception("Unknown status: %s" % status)
def errorMessage(self):
return self._errorMsg
class MicroManagerGUI(StageInterface):
def __init__(self, dev, win):
StageInterface.__init__(self, dev, win)
| 35.883436 | 141 | 0.565481 |
629621bf0b1be6f53e8d6c0c3619070bc3e8b8b1 | 256 | py | Python | module2-sql-for-analysis/DS8_example.py | CurtCalledBurt/DS-Unit-3-Sprint-2-SQL-and-Databases | 132e6355b8175667eff3fd5fc89f075086afadd3 | [
"MIT"
] | null | null | null | module2-sql-for-analysis/DS8_example.py | CurtCalledBurt/DS-Unit-3-Sprint-2-SQL-and-Databases | 132e6355b8175667eff3fd5fc89f075086afadd3 | [
"MIT"
] | null | null | null | module2-sql-for-analysis/DS8_example.py | CurtCalledBurt/DS-Unit-3-Sprint-2-SQL-and-Databases | 132e6355b8175667eff3fd5fc89f075086afadd3 | [
"MIT"
] | null | null | null | def increment(x):
return x + 1
def double(x):
return x * 2
def run_twice(func, arg):
return func(func(arg))
def rec_print(n):
print(n)
if n > 0:
rec_print(n-1)
def add(x,y):
return x + y
def identity(x):
return x
| 12.190476 | 26 | 0.5625 |
5d76c124236344f2a9b280e1dbdecfa010b62c45 | 309 | py | Python | atividade1/questao3.py | AbraaoHonorio/PDI | 96cc0839c38b1b13989eca762f98c472ac11ed00 | [
"MIT"
] | 2 | 2018-07-05T00:46:21.000Z | 2018-07-09T15:14:57.000Z | atividade1/questao3.py | AbraaoHonorio/PDI | 96cc0839c38b1b13989eca762f98c472ac11ed00 | [
"MIT"
] | null | null | null | atividade1/questao3.py | AbraaoHonorio/PDI | 96cc0839c38b1b13989eca762f98c472ac11ed00 | [
"MIT"
] | null | null | null | from util import *
def questao3(originalImage):
'''
Coloca a imagem em modo negativo
Parametros:
originalImage: Imagem a ser modificada
'''
imageResult = None
imageResult = applyToAllPixels(originalImage, {'fun': negative})
return imageResult
def negative(b,g,r):
return [255-b,255-g,255-r] | 17.166667 | 65 | 0.721683 |
34adbb64f7dae554d1a418394748e5b4d07a87ba | 2,456 | py | Python | closed/Intel/code/resnet50/pytorch-cpu/Backend.py | ctuning/inference_results_v1.1 | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | [
"Apache-2.0"
] | 12 | 2021-09-23T08:05:57.000Z | 2022-03-21T03:52:11.000Z | closed/Intel/code/resnet50/pytorch-cpu/Backend.py | ctuning/inference_results_v1.1 | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | [
"Apache-2.0"
] | 11 | 2021-09-23T20:34:06.000Z | 2022-01-22T07:58:02.000Z | closed/Intel/code/resnet50/pytorch-cpu/Backend.py | ctuning/inference_results_v1.1 | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | [
"Apache-2.0"
] | 16 | 2021-09-23T20:26:38.000Z | 2022-03-09T12:59:56.000Z | import logging
import os
import sys
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("BACKEND")
from baseBackend import baseBackend
"""
Resnet50 Pytorch SUT class
"""
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import intel_pytorch_extension as ipex
import _torch_ipex as core
import numpy as np
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
class Backend(baseBackend):
def __init__(self, model_path = "", ipex=None, dnnl=True, jit= True,int8=True, configure_dir = "", **kwargs):
if not os.path.isfile(model_path):
log.error("Model not found: {}".format(model_path))
sys.exit(1)
if (int8 and not os.path.isfile(configure_dir)):
log.error("Configure dir not found: {}".format(configure_dir))
sys.exit(1)
self.model_path = model_path
self.ipex = ipex
self.dnnl = dnnl
self.configure_dir = configure_dir
self.int8 = int8
self.jit = jit
self.model = models.__dict__['resnet50'](pretrained=True)
print("Loaded pretrained model")
def load_model(self):
if self.ipex:
import intel_pytorch_extension as ipex
if self.dnnl:
ipex.core.enable_auto_dnnl()
else:
self.core.disable_auto_dnnl()
#self.model = models.__dict__['resnet50'](pretrained=True)
print("model_path: " + self.model_path)
self.model.load_state_dict(torch.load(self.model_path))
log.info("Model loaded")
#self.model = torch.nn.DataParallel(self.model)
if self.ipex:
self.model = self.model.to(device = ipex.DEVICE)
if self.jit:
self.model = torch.jit.script(self.model)
if self.int8:
self.conf = ipex.AmpConf(torch.int8, self.configure_dir)
else:
self.conf = ipex.AmpConf(None)
self.model.eval()
def predict(self, data):
with torch.no_grad():
with ipex.AutoMixPrecision(self.conf, running_mode="inference"):
data = data.to(device = ipex.DEVICE)
# compute output
output = self.model(data)
return output
| 32.315789 | 113 | 0.651873 |
c13fd4817ab16a266af1405b3f99ff454184c3db | 4,377 | py | Python | src/interpret.py | kundajelab/retina-models | bd458c65f5917f5dccb87f123e9be672472bd4b6 | [
"MIT"
] | null | null | null | src/interpret.py | kundajelab/retina-models | bd458c65f5917f5dccb87f123e9be672472bd4b6 | [
"MIT"
] | null | null | null | src/interpret.py | kundajelab/retina-models | bd458c65f5917f5dccb87f123e9be672472bd4b6 | [
"MIT"
] | null | null | null | import deepdish as dd
import json
import numpy as np
import tensorflow as tf
import pandas as pd
import shap
from tensorflow import keras
import pyfaidx
import shutil
import errno
import os
from utils import argmanager, data_utils
from utils.loss import multinomial_nll
from utils.shap_utils import *
# disable eager execution so shap deep explainer wont break
tf.compat.v1.disable_eager_execution()
def generate_shap_dict(seqs, scores):
assert(seqs.shape==scores.shape)
assert(seqs.shape[2]==4)
# construct a dictionary for the raw shap scores and the
# the projected shap scores
# MODISCO workflow expects one hot sequences with shape (None,4,inputlen)
d = {
'raw': {'seq': np.transpose(seqs, (0, 2, 1))},
'shap': {'seq': np.transpose(scores, (0, 2, 1))},
'projected_shap': {'seq': np.transpose(seqs*scores, (0, 2, 1))}
}
return d
def interpret(model, seqs, output_prefix, profile_or_counts):
print("Seqs dimension : {}".format(seqs.shape))
outlen = model.output_shape[0][1]
profile_model_input = model.input
profile_input = seqs
counts_model_input = model.input
counts_input = seqs
if "counts" in profile_or_counts:
profile_model_counts_explainer = shap.explainers.deep.TFDeepExplainer(
(counts_model_input, tf.reduce_sum(model.outputs[1], axis=-1)),
shuffle_several_times,
combine_mult_and_diffref=combine_mult_and_diffref)
print("Generating 'counts' shap scores")
counts_shap_scores = profile_model_counts_explainer.shap_values(
counts_input, progress_message=100)
counts_scores_dict = generate_shap_dict(seqs, counts_shap_scores)
# save the dictionary in HDF5 formnat
print("Saving 'counts' scores")
dd.io.save("{}.counts_scores.h5".format(output_prefix),
counts_scores_dict,
compression='blosc')
del counts_shap_scores, counts_scores_dict
if "profile" in profile_or_counts:
weightedsum_meannormed_logits = get_weightedsum_meannormed_logits(model)
profile_model_profile_explainer = shap.explainers.deep.TFDeepExplainer(
(profile_model_input, weightedsum_meannormed_logits),
shuffle_several_times,
combine_mult_and_diffref=combine_mult_and_diffref)
print("Generating 'profile' shap scores")
profile_shap_scores = profile_model_profile_explainer.shap_values(
profile_input, progress_message=100)
profile_scores_dict = generate_shap_dict(seqs, profile_shap_scores)
# save the dictionary in HDF5 formnat
print("Saving 'profile' scores")
dd.io.save("{}.profile_scores.h5".format(output_prefix),
profile_scores_dict,
compression='blosc')
def main():
# parse the command line arguments
args = argmanager.fetch_interpret_args()
# check if the output directory exists
if not os.path.exists(os.path.dirname(args.output_prefix)):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), os.path.dirname(args.output_prefix))
# write all the command line arguments to a json file
with open("{}.args.json".format(args.output_prefix), "w") as fp:
json.dump(vars(args), fp, ensure_ascii=False, indent=4)
regions_df = pd.read_csv(args.regions, sep='\t', names=data_utils.NARROWPEAK_SCHEMA)
if args.debug_chr:
regions_df = regions_df[regions_df['chr'].isin(args.debug_chr)]
regions_df.to_csv("{}.interpreted_regions.bed".format(args.output_prefix), header=False, sep='\t')
else:
# copy regions bed to output directory
shutil.copy(args.regions, "{}.interpreted_regions.bed".format(args.output_prefix))
# load the model
with keras.utils.CustomObjectScope({'multinomial_nll':multinomial_nll, 'tf':tf}):
model = keras.models.load_model(args.model)
inputlen = model.input_shape[1]
# load sequences
# NOTE: it will pull out sequences of length inputlen
# centered at the summit (start + 10th column)
genome = pyfaidx.Fasta(args.genome)
seqs = data_utils.get_seq(regions_df, genome, inputlen)
genome.close()
interpret(model, seqs, args.output_prefix, args.profile_or_counts)
if __name__ == '__main__':
main()
| 35.585366 | 109 | 0.693626 |
db633717e976db8c4b0800094472ffb4a1022d92 | 11,052 | py | Python | code/pretrain_DAMSM.py | s-52/AttnGAN | 66a33bf20deb419fb2e91ea15d32aaff36558b44 | [
"MIT"
] | null | null | null | code/pretrain_DAMSM.py | s-52/AttnGAN | 66a33bf20deb419fb2e91ea15d32aaff36558b44 | [
"MIT"
] | null | null | null | code/pretrain_DAMSM.py | s-52/AttnGAN | 66a33bf20deb419fb2e91ea15d32aaff36558b44 | [
"MIT"
] | 1 | 2022-02-24T20:24:40.000Z | 2022-02-24T20:24:40.000Z | from __future__ import print_function
from miscc.utils import mkdir_p
from miscc.utils import build_super_images
from miscc.losses import sent_loss, words_loss
from miscc.config import cfg, cfg_from_file
from datasets import TextDataset
from datasets import prepare_data
from model import RNN_ENCODER, CNN_ENCODER
import os
import sys
import time
import random
import pprint
import datetime
import dateutil.tz
import argparse
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
dir_path = (os.path.abspath(os.path.join(os.path.realpath(__file__), './.')))
sys.path.append(dir_path)
UPDATE_INTERVAL = 200
def parse_args():
parser = argparse.ArgumentParser(description='Train a DAMSM network')
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfg/DAMSM/coco.yml', type=str)
parser.add_argument('--gpu', dest='gpu_id', type=int, default=0)
parser.add_argument('--data_dir', dest='data_dir', type=str, default='')
parser.add_argument('--manualSeed', type=int, help='manual seed')
args = parser.parse_args()
return args
def train(dataloader, cnn_model, rnn_model, batch_size,
labels, optimizer, epoch, ixtoword, image_dir):
cnn_model.train()
rnn_model.train()
s_total_loss0 = 0
s_total_loss1 = 0
w_total_loss0 = 0
w_total_loss1 = 0
count = (epoch + 1) * len(dataloader)
start_time = time.time()
for step, data in enumerate(dataloader, 0):
# print('step', step)
rnn_model.zero_grad()
cnn_model.zero_grad()
imgs, captions, cap_lens, \
class_ids, keys = prepare_data(data)
# words_features: batch_size x nef x 17 x 17
# sent_code: batch_size x nef
words_features, sent_code = cnn_model(imgs[-1])
# --> batch_size x nef x 17*17
nef, att_sze = words_features.size(1), words_features.size(2)
# words_features = words_features.view(batch_size, nef, -1)
hidden = rnn_model.init_hidden(batch_size)
# words_emb: batch_size x nef x seq_len
# sent_emb: batch_size x nef
words_emb, sent_emb = rnn_model(captions, cap_lens, hidden)
w_loss0, w_loss1, attn_maps = words_loss(words_features, words_emb, labels,
cap_lens, class_ids, batch_size)
w_total_loss0 += w_loss0.data
w_total_loss1 += w_loss1.data
loss = w_loss0 + w_loss1
s_loss0, s_loss1 = \
sent_loss(sent_code, sent_emb, labels, class_ids, batch_size)
loss += s_loss0 + s_loss1
s_total_loss0 += s_loss0.data
s_total_loss1 += s_loss1.data
#
loss.backward()
#
# `clip_grad_norm` helps prevent
# the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm(rnn_model.parameters(),
cfg.TRAIN.RNN_GRAD_CLIP)
optimizer.step()
if step % UPDATE_INTERVAL == 0:
count = epoch * len(dataloader) + step
s_cur_loss0 = s_total_loss0 / UPDATE_INTERVAL # s_cur_loss0 = s_total_loss0[0] / UPDATE_INTERVAL
s_cur_loss1 = s_total_loss1 / UPDATE_INTERVAL # s_cur_loss1 = s_total_loss1[0] / UPDATE_INTERVAL
w_cur_loss0 = w_total_loss0 / UPDATE_INTERVAL # w_cur_loss0 = w_total_loss0[0] / UPDATE_INTERVAL
w_cur_loss1 = w_total_loss1 / UPDATE_INTERVAL # w_cur_loss1 = w_total_loss1[0] / UPDATE_INTERVAL
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | '
's_loss {:5.2f} {:5.2f} | '
'w_loss {:5.2f} {:5.2f}'
.format(epoch, step, len(dataloader),
elapsed * 1000. / UPDATE_INTERVAL,
s_cur_loss0, s_cur_loss1,
w_cur_loss0, w_cur_loss1))
s_total_loss0 = 0
s_total_loss1 = 0
w_total_loss0 = 0
w_total_loss1 = 0
start_time = time.time()
# attention Maps
img_set, _ = \
build_super_images(imgs[-1].cpu(), captions,
ixtoword, attn_maps, att_sze)
if img_set is not None:
im = Image.fromarray(img_set)
fullpath = '%s/attention_maps%d.png' % (image_dir, step)
im.save(fullpath)
return count
def evaluate(dataloader, cnn_model, rnn_model, batch_size):
cnn_model.eval()
rnn_model.eval()
s_total_loss = 0
w_total_loss = 0
for step, data in enumerate(dataloader, 0):
real_imgs, captions, cap_lens, \
class_ids, keys = prepare_data(data)
words_features, sent_code = cnn_model(real_imgs[-1])
# nef = words_features.size(1)
# words_features = words_features.view(batch_size, nef, -1)
hidden = rnn_model.init_hidden(batch_size)
words_emb, sent_emb = rnn_model(captions, cap_lens, hidden)
w_loss0, w_loss1, attn = words_loss(words_features, words_emb, labels,
cap_lens, class_ids, batch_size)
w_total_loss += (w_loss0 + w_loss1).data
s_loss0, s_loss1 = \
sent_loss(sent_code, sent_emb, labels, class_ids, batch_size)
s_total_loss += (s_loss0 + s_loss1).data
if step == 50:
break
s_cur_loss = s_total_loss / step #s_total_loss[0] / step wegen https://github.com/NVIDIA/flownet2-pytorch/issues/113
w_cur_loss = w_total_loss / step #w_total_loss[0] / step
return s_cur_loss, w_cur_loss
def build_models():
# build model ############################################################
text_encoder = RNN_ENCODER(dataset.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)
image_encoder = CNN_ENCODER(cfg.TEXT.EMBEDDING_DIM)
labels = Variable(torch.LongTensor(range(batch_size)))
start_epoch = 0
if cfg.TRAIN.NET_E != '':
state_dict = torch.load(cfg.TRAIN.NET_E)
text_encoder.load_state_dict(state_dict)
print('Load ', cfg.TRAIN.NET_E)
#
name = cfg.TRAIN.NET_E.replace('text_encoder', 'image_encoder')
state_dict = torch.load(name)
image_encoder.load_state_dict(state_dict)
print('Load ', name)
istart = cfg.TRAIN.NET_E.rfind('_') + 8
iend = cfg.TRAIN.NET_E.rfind('.')
start_epoch = cfg.TRAIN.NET_E[istart:iend]
start_epoch = int(start_epoch) + 1
print('start_epoch', start_epoch)
if cfg.CUDA:
text_encoder = text_encoder.cuda()
image_encoder = image_encoder.cuda()
labels = labels.cuda()
return text_encoder, image_encoder, labels, start_epoch
if __name__ == "__main__":
args = parse_args()
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.gpu_id == -1:
cfg.CUDA = False
else:
cfg.GPU_ID = args.gpu_id
if args.data_dir != '':
cfg.DATA_DIR = args.data_dir
print('Using config:')
pprint.pprint(cfg)
if not cfg.TRAIN.FLAG:
args.manualSeed = 100
elif args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
np.random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if cfg.CUDA:
torch.cuda.manual_seed_all(args.manualSeed)
##########################################################################
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
output_dir = '../output/%s_%s_%s' % \
(cfg.DATASET_NAME, cfg.CONFIG_NAME, timestamp)
model_dir = os.path.join(output_dir, 'Model')
image_dir = os.path.join(output_dir, 'Image')
mkdir_p(model_dir)
mkdir_p(image_dir)
torch.cuda.set_device(cfg.GPU_ID)
cudnn.benchmark = True
# Get data loader ##################################################
imsize = cfg.TREE.BASE_SIZE * (2 ** (cfg.TREE.BRANCH_NUM-1))
batch_size = cfg.TRAIN.BATCH_SIZE
image_transform = transforms.Compose([
transforms.Resize(int(imsize * 76 / 64)),
transforms.RandomCrop(imsize),
transforms.RandomHorizontalFlip()])
dataset = TextDataset(cfg.DATA_DIR, 'train',
base_size=cfg.TREE.BASE_SIZE,
transform=image_transform)
print(dataset.n_words, dataset.embeddings_num)
assert dataset
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, drop_last=True,
shuffle=True, num_workers=int(cfg.WORKERS))
# # validation data #
dataset_val = TextDataset(cfg.DATA_DIR, 'test',
base_size=cfg.TREE.BASE_SIZE,
transform=image_transform)
dataloader_val = torch.utils.data.DataLoader(
dataset_val, batch_size=batch_size, drop_last=True,
shuffle=True, num_workers=int(cfg.WORKERS))
# Train ##############################################################
text_encoder, image_encoder, labels, start_epoch = build_models()
para = list(text_encoder.parameters())
for v in image_encoder.parameters():
if v.requires_grad:
para.append(v)
# optimizer = optim.Adam(para, lr=cfg.TRAIN.ENCODER_LR, betas=(0.5, 0.999))
# At any point you can hit Ctrl + C to break out of training early.
try:
lr = cfg.TRAIN.ENCODER_LR
for epoch in range(start_epoch, cfg.TRAIN.MAX_EPOCH):
optimizer = optim.Adam(para, lr=lr, betas=(0.5, 0.999))
epoch_start_time = time.time()
count = train(dataloader, image_encoder, text_encoder,
batch_size, labels, optimizer, epoch,
dataset.ixtoword, image_dir)
print('-' * 89)
if len(dataloader_val) > 0:
s_loss, w_loss = evaluate(dataloader_val, image_encoder,
text_encoder, batch_size)
print('| end epoch {:3d} | valid loss '
'{:5.2f} {:5.2f} | lr {:.5f}|'
.format(epoch, s_loss, w_loss, lr))
print('-' * 89)
if lr > cfg.TRAIN.ENCODER_LR/10.:
lr *= 0.98
if (epoch % cfg.TRAIN.SNAPSHOT_INTERVAL == 0 or
epoch == cfg.TRAIN.MAX_EPOCH):
torch.save(image_encoder.state_dict(),
'%s/image_encoder%d.pth' % (model_dir, epoch))
torch.save(text_encoder.state_dict(),
'%s/text_encoder%d.pth' % (model_dir, epoch))
print('Save G/Ds models.')
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
| 37.337838 | 123 | 0.59962 |
4c5f6aca589388c1cbb93ddd7c99546db2d8ca6a | 339 | py | Python | tolqc-api/app/main/service/specimen.py | sanger-tol/tolqc | 6ef535a0eb0028c5d9270ff18d95d38c6656cbdd | [
"MIT"
] | null | null | null | tolqc-api/app/main/service/specimen.py | sanger-tol/tolqc | 6ef535a0eb0028c5d9270ff18d95d38c6656cbdd | [
"MIT"
] | null | null | null | tolqc-api/app/main/service/specimen.py | sanger-tol/tolqc | 6ef535a0eb0028c5d9270ff18d95d38c6656cbdd | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2021 Genome Research Ltd.
#
# SPDX-License-Identifier: MIT
from main.model import TolqcSpecimen
from main.schema import SpecimenSchema
from .base import BaseService, setup_service
@setup_service
class SpecimenService(BaseService):
class Meta:
model = TolqcSpecimen
schema = SpecimenSchema
| 21.1875 | 51 | 0.769912 |
ee06d56e85a8c6140a4c00814e5fb41354d70c68 | 638 | py | Python | commands/help.py | plainenough/slackbot | 28c0b5c43f9b344aef3bf94d243ebdee82edaf72 | [
"MIT"
] | null | null | null | commands/help.py | plainenough/slackbot | 28c0b5c43f9b344aef3bf94d243ebdee82edaf72 | [
"MIT"
] | 25 | 2019-10-29T21:15:33.000Z | 2021-01-07T04:03:28.000Z | commands/help.py | plainenough/slackbot | 28c0b5c43f9b344aef3bf94d243ebdee82edaf72 | [
"MIT"
] | null | null | null | """Generate help message from docstrings."""
def alias():
"""Load custom commands and the functions they map too."""
alias = dict(help=generate_help)
return alias
def generate_help(**kwargs):
"""Pull the doc string out of all of the commands."""
message = kwargs.get('message')
commands = message._list_commands
if len(message._text) > 25:
return ''
# This directs the message to the user.
message.channel = message.user
ret = '\n'.join(['{:<30}: {}'.format(name, func.__doc__.strip())
for name, func in sorted(commands.items())])
return '```{}```'.format(ret)
| 30.380952 | 68 | 0.619122 |
a8a7cba23305b1ecc60e31c4549427514e9ae35d | 9,910 | py | Python | meanshifttracking.py | himanshubnm/vision-opencv | 17324a0cf12bc8203ea0740e2c536c91e701b999 | [
"MIT"
] | null | null | null | meanshifttracking.py | himanshubnm/vision-opencv | 17324a0cf12bc8203ea0740e2c536c91e701b999 | [
"MIT"
] | null | null | null | meanshifttracking.py | himanshubnm/vision-opencv | 17324a0cf12bc8203ea0740e2c536c91e701b999 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
class MeanShiftTracker(object):
# initialization of tracker
def __init__(self, centroid_x, centroid_y, obj_width, obj_height):
self._prev_centroid_x = centroid_x
self._prev_centroid_y = centroid_y
self._curr_centroid_x = centroid_x
self._curr_centroid_y = centroid_y
self._prev_similarity_BC = 0.0
self._curr_similarity_BC = 0.0
if(obj_width % 2 == 0):
obj_width += 1
if(obj_height % 2 == 0):
obj_height += 1
self._prev_width = obj_width
self._prev_height = obj_height
self._curr_width = obj_width
self._curr_height = obj_height
self._curr_half_width = int((self._curr_width - 1) * 0.5)
self._curr_half_height = int((self._curr_height - 1) * 0.5)
# specification for the features
self._bins_per_channel = 16
self._bin_size = int(np.floor(256 / self._bins_per_channel))
self._model_dim = np.power(self._bins_per_channel, 3)
# The object models
self._target_model = np.zeros(self._model_dim)
self._prev_model = np.zeros(self._model_dim)
self._curr_model = np.zeros(self._model_dim)
# Array which stores the index to which each color value will be assigned in the color histogram
self.combined_index = np.zeros([self._curr_height, self._curr_width])
self._max_itr = 5
self.compute_ellipse_kernel()
def compute_ellipse_kernel(self):
""" compute the ellipse kernel weights
"""
error_code = 0
half_width = (self._curr_width - 1) * 0.5
half_height = (self._curr_height - 1) * 0.5
x_limit = int(np.floor((self._prev_width - 1) * 0.5))
y_limit = int(np.floor((self._prev_height - 1) * 0.5))
x_range = np.array([range(-x_limit, x_limit + 1)])
y_range = np.array([range(-y_limit, y_limit + 1)])
y_range = np.transpose(y_range)
x_matrix = np.repeat(x_range, y_limit * 2 + 1, axis=0)
y_matrix = np.repeat(y_range, x_limit*2 + 1, axis=1)
x_square = np.multiply(x_matrix, x_matrix)
y_square = np.multiply(y_matrix, y_matrix)
x_square = np.divide(x_square, float(half_width * half_width))
y_square = np.divide(y_square, float(half_height * half_height))
self._kernel_mask = np.ones(
[self._curr_height, self._curr_width]) - (y_square + x_square)
self._kernel_mask[self._kernel_mask < 0] = 0
print('kerbnel computation complete ')
return error_code
def compute_target_model(self, ref_image):
error_code = 0
self.compute_object_model(ref_image)
self._target_model = np.copy(self._curr_model)
print('Target model computation complete')
return error_code
def compute_object_model(self, image):
self._curr_model = self._curr_model * 0.0
self.combined_index = self.combined_index * 0
# converting to a floating point image
image = image.astype(float)
half_width = int((self._curr_width - 1) * 0.5)
half_height = int((self._curr_height - 1) * 0.5)
# extract the object region from the image IMP the upper bound is not included
obj_image = image[self._curr_centroid_y - half_height: self._curr_centroid_y + half_height +
1, self._curr_centroid_x - half_width: self._curr_centroid_x + half_width + 1, :]
index_matrix = np.divide(obj_image, self._bin_size)
index_matrix = np.floor(index_matrix)
index_matrix = index_matrix.astype(int)
b_index, g_index, r_index = cv2.split(index_matrix)
combined_index = b_index * \
np.power(self._bins_per_channel, 2) + \
self._bins_per_channel * g_index + r_index
combined_index = combined_index.astype(int)
self.combined_index = combined_index.astype(int)
print(self._curr_model.shape)
for i in range(0, self._curr_height):
for j in range(0, self._curr_width):
self._curr_model[combined_index[i, j]
] += self._kernel_mask[i, j]
# l1 normalize the feature( histogram )
sum_val = np.sum(self._curr_model)
self._curr_model = self._curr_model/float(sum_val)
print('Object model computed ')
def perform_mean_shift(self, image):
half_width = (self._curr_width - 1) * 0.5
half_height = (self._curr_height - 1) * 0.5
norm_factor = 0.0
self._curr_x = 0.0
self._curr_y = 0.0
itr = 1
tmp_x = 0.0
tmp_y = 0.0
# Initialize to start the iterations from the current frame
self._curr_centroid_x = self._prev_centroid_x
self._curr_centroid_y = self._prev_centroid_y
# Performing mean shift iterations
for itr in range(0, self._max_itr):
print('mean shift iteration %s ', itr)
print('max target = %s', np.max(self._target_model))
print(id(self._target_model))
print(id(self._curr_model))
print('max_diff = %s', np.max(
np.fabs(self._target_model - self._curr_model)))
# compute the object model in the current frame keeping the current postioin as the position from the previous frame
self.compute_object_model(image)
print('max_diff = %s', np.max(
np.fabs(self._target_model - self._curr_model)))
print('max target = %s', np.max(self._curr_model))
self.compute_similarity_value()
self._prev_similarity_BC = self._curr_similarity_BC
# Avoid divide by zero error
self._curr_model[self._curr_model == 0] = 0.001
# weight value computed as teh ratio of the target ansd the candidate model
feature_ratio = np.divide(self._target_model, self._curr_model)
# computing the new position
for i in range(0, self._curr_height):
for j in range(0, self._curr_width):
tmp_x += (j - half_width) * \
feature_ratio[self.combined_index[i, j]]
tmp_y += (i - half_height) * \
feature_ratio[self.combined_index[i, j]]
norm_factor += feature_ratio[self.combined_index[i, j]]
mean_shift_x = tmp_x / norm_factor
mean_shift_y = tmp_y / norm_factor
# computing the new position using mean-shift
self._curr_centroid_x += np.round(mean_shift_x)
self._curr_centroid_y += np.round(mean_shift_y)
self._curr_centroid_x = int(self._curr_centroid_x)
self._curr_centroid_y = int(self._curr_centroid_y)
# compute the object model at the new position
self.compute_object_model(image)
# compute the similarity of the target and the current model
self.compute_similarity_value()
# Performing line search
while(self._curr_similarity_BC - self._prev_similarity_BC < -0.01):
# while( self._curr_similarity_BC < self._prev_similarity_BC ):
self._curr_centroid_x = int(
np.floor((self._curr_centroid_x + self._prev_centroid_x) * 0.5))
self._curr_centroid_y = int(
np.floor((self._curr_centroid_y + self._prev_centroid_y) * 0.5))
#self._prev_similarity_BC = self._curr_similarity_BC
# this section of code was written as the round off error prevents the while loop from converging
# compute the current location object model
self.compute_object_model(image)
self.compute_similarity_value()
diff_x = self._prev_centroid_x - self._curr_centroid_x
diff_y = self._prev_centroid_y - self._curr_centroid_y
# euclidean distance between the points obtained in two consecutive iteration
euc_dist = np.power(diff_x, 2) + np.power(diff_y, 2)
# Check for convergence
if(euc_dist <= 2): # if converged
break
# difference between the centroid values in the current iteration and previous iteration
diff_x = self._prev_centroid_x - self._curr_centroid_x
diff_y = self._prev_centroid_y - self._curr_centroid_y
# euclidean distance between the points obtained in two consecutive iteration
euc_dist = np.power(diff_x, 2) + np.power(diff_y, 2)
self._prev_centroid_x = self._curr_centroid_x
self._prev_centroid_y = self._curr_centroid_y
# Check for convergence
if(euc_dist <= 2): # if converged
break
# else: # comntinue for the next iteration
def compute_similarity_value(self):
""" compute the similarity value between two distributions using Bhattacharyya similarity
"""
error_code = 0
self._curr_similarity_BC = 0.0
# Bhattacharya similariy between two distributions
for i in range(self._model_dim):
if(self._target_model[i] != 0 and self._curr_model[i] != 0):
#print( 'val 1 = %s a nd val2 = %s ', self._target_model[i], self._curr_model[ i ] )
self._curr_similarity_BC += np.sqrt(
self._target_model[i] * self._curr_model[i])
#print( 'max_val =%s', np.max( self._target_model ))
#print( 'sum_val =%s', np.sum( self._target_model ))
#print( 'max_val =%s', np.max( self._curr_model ))
#print( 'sum_val =%s', np.sum( self._curr_model ))
#print( 'sim val = ', self._curr_similarity_BC )
return error_code
| 36.300366 | 128 | 0.614329 |
393219935255b25dfb97ea7e1bee291a6f2bb679 | 1,084 | py | Python | correlations/server.py | egenerat/portfolio | b356c682222255e647202b224266d6dc8ce2e0fb | [
"Apache-2.0"
] | null | null | null | correlations/server.py | egenerat/portfolio | b356c682222255e647202b224266d6dc8ce2e0fb | [
"Apache-2.0"
] | 41 | 2017-03-18T17:57:19.000Z | 2021-06-13T14:17:36.000Z | correlations/server.py | egenerat/portfolio | b356c682222255e647202b224266d6dc8ce2e0fb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
from flask import Flask, jsonify, request
from funds_correlations import correlations, parse_performances_from_dict
import traceback
app = Flask(__name__)
@app.route("/correlations", methods=['POST'])
def correlation_api():
try:
req_json = request.get_json()
valid_input = True
perf_list = []
if req_json:
perf_list = parse_performances_from_dict(req_json)
if len(perf_list) < 2:
valid_input = False
else:
valid_input = False
if not valid_input:
return jsonify({
'error': 'not enough valid data'
}), 400
corr, min_size, limiting = correlations(perf_list)
data = {
'correlations': corr,
'min_size': min_size,
'limiting': limiting
}
return jsonify(data)
except Exception:
traceback.print_exc()
return jsonify({
'error': 'Internal error'
}), 500
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| 27.794872 | 73 | 0.575646 |
7c3c7904e73bf158c6e1f2f17881c8e02d135bf7 | 4,950 | py | Python | tweet-new-statements.py | factopolis/factopolis | ab8305299431c41218e0a5f7211b18d5b18b6ad0 | [
"CC-BY-4.0",
"MIT"
] | 3 | 2017-08-16T19:58:46.000Z | 2017-09-23T17:46:47.000Z | tweet-new-statements.py | factopolis/factopolis | ab8305299431c41218e0a5f7211b18d5b18b6ad0 | [
"CC-BY-4.0",
"MIT"
] | 2 | 2017-09-04T03:25:30.000Z | 2017-11-02T03:10:29.000Z | tweet-new-statements.py | factopolis/factopolis | ab8305299431c41218e0a5f7211b18d5b18b6ad0 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | #!/usr/bin/python3
# Yes, this is in desparate need of refactoring. Functionality grew without proper planning.
import os, re, json, tweepy, datetime, pprint, humanize
def tweet(msg, in_reply_to):
if ('TWITTER_CONSUMER_KEY' in os.environ
and 'TWITTER_CONSUMER_SECRET' in os.environ
and 'TWITTER_ACCESS_TOKEN_KEY' in os.environ
and 'TWITTER_ACCESS_TOKEN_SECRET' in os.environ
and "TRAVIS_BRANCH" in os.environ
and os.environ["TRAVIS_BRANCH"] == "master"
and "TRAVIS_PULL_REQUEST" in os.environ
and os.environ["TRAVIS_PULL_REQUEST"] == "false"):
auth = tweepy.OAuthHandler(os.environ['TWITTER_CONSUMER_KEY'], os.environ['TWITTER_CONSUMER_SECRET'])
auth.set_access_token(os.environ['TWITTER_ACCESS_TOKEN_KEY'], os.environ['TWITTER_ACCESS_TOKEN_SECRET'])
tApi = tweepy.API(auth)
tApi.update_status(msg, in_reply_to)
if in_reply_to:
print("Re " + str(in_reply_to) + ": " + msg)
else:
print(msg)
def twitterHandle(person):
fp = open("web/person/{}/index.json".format(person))
data = json.load(fp)
if 'twitter' in data:
return data['twitter']
else:
None
def checkerTwitterHandle(checker):
fp = open("web/checker/{}/index.json".format(checker))
data = json.load(fp)
if 'twitter' in data:
return '@' + data['twitter']
else:
return None
statementsToHandle = {}
def handleStatement(stmt, onlyReply=False):
msg = ""
handle = twitterHandle(stmt['person'])
m = re.match('^/person/(([^/]+)/([0-9]{4})\-([0-9]{2})\-([0-9]{2}))(\-.+)?/$', stmt["id"])
when = datetime.datetime(year=int(m.group(3), 10), month=int(m.group(4),10), day=int(m.group(5), 10))
archive = (when + datetime.timedelta(14)) < datetime.datetime.today()
if archive:
msg = humanize.naturaltime(datetime.datetime.today() - when)
msg = msg[0].upper() + msg[1:] + " "
handle = twitterHandle(stmt['person'])
if handle:
if len(msg) == 0:
msg += '.'
msg += "@" + handle
else:
msg += stmt['name']
msg += " wrongly "
if archive:
msg += "claimed "
else:
msg += "claims "
msg += stmt['claims'][0]['title']
if (len(msg) + 24) > 280:
if archive:
msg = 'Old incorrect statement by '
else:
msg = 'New incorrect statement by '
if handle:
msg += '@' + handle
else:
msg += stmt['name']
msg += ' on ' + stmt['date']
if 'where' in stmt:
lenWithWhere = len(msg) + len(stmt['where']) + 28
if lenWithWhere <= 280:
msg += ' (' + stmt['where'] + ')'
msg += ' added'
checkerList = ''
if "checks" in stmt:
handles = []
for check in stmt['checks']:
checkerHandle = checkerTwitterHandle(check['checkerId'])
if checkerHandle:
handles.append(checkerHandle)
clist = " checked by " + (", ".join(handles))
if (len(msg) + len(clist) + 25) < 280:
checkerList = clist
url = 'https://www.factopolis.com' + stmt['id']
msg += ' ' + url
msg += checkerList
if not onlyReply:
tweet(msg, None)
for source in stmt['sources']:
if source['type'] == 'twitter':
if 'id' in source:
claim = stmt['claims'][0]
msg = '@' + source['user'] + " No, " + claim['negativePlain'] + " "
msg += 'https://www.factopolis.com/claims/' + claim['id'] + '/'
tweet(msg, int(source['id']))
def parseStatement(person, stmtId):
fp = open('web/person/' + person + '/' + stmtId + '/index.json')
stmt = json.load(fp)
claimId = stmt['claims'][0]['id']
if not person in statementsToHandle:
statementsToHandle[person] = {}
if not claimId in statementsToHandle[person]:
statementsToHandle[person][claimId] = []
statementsToHandle[person][claimId].insert(0, stmt)
def handleStatements():
for person in statementsToHandle:
for claim in statementsToHandle[person]:
isFirst = True
for stmt in statementsToHandle[person][claim]:
handleStatement(stmt, not isFirst)
isFirst = False
stmtRegex = re.compile("^content/person/([^/]+)/(([0-9]{4})\-([0-9]{2})\-([0-9]{2})(\-.+)?).md$")
commit_range = "origin/master"
if "TRAVIS_COMMIT_RANGE" in os.environ:
commit_range = os.environ["TRAVIS_COMMIT_RANGE"]
stream = os.popen("git diff --name-only --diff-filter=A {}".format(commit_range))
while True:
filename = stream.readline().strip()
if not filename:
break
else:
match = stmtRegex.match(filename)
if match:
parseStatement(match.group(1), match.group(2))
# handleStatement(filename)
handleStatements()
| 30.9375 | 112 | 0.569091 |
95ad8184c61dd157cb47fb8ad0f2fd40a62aed63 | 1,182 | py | Python | Researchforum/researchforum/controllers/signup.py | Pratikgit/Researchforum | 3f86d1d959ac51ca224faba302092c23e805cfb4 | [
"Apache-2.0"
] | null | null | null | Researchforum/researchforum/controllers/signup.py | Pratikgit/Researchforum | 3f86d1d959ac51ca224faba302092c23e805cfb4 | [
"Apache-2.0"
] | null | null | null | Researchforum/researchforum/controllers/signup.py | Pratikgit/Researchforum | 3f86d1d959ac51ca224faba302092c23e805cfb4 | [
"Apache-2.0"
] | null | null | null | import logging
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from researchforum.lib.base import BaseController, render
from researchforum.model import *
log = logging.getLogger(__name__)
class SignupController(BaseController):
def index(self):
#ross = User(email='ross@example.com', name='Ross', password='122').save()
return render('/signup.html')
def create_user(self):
name = request.params.get('name')
email = request.params.get('email')
password = request.params.get('password')
uni_name = request.params.get('university')
log.info("Params are:%s %s %s %s" %(name, email, password, uni_name))
new_user = User(email=email, name=name, password=password, uni_name = uni_name).save()
c.user_id = "%s" %new_user.id
if new_user:
log.info("User Created Successfully with id:%s" %c.user_id)
#return "User Created..."
response.set_cookie('login', 'present', max_age = 18000)
return "User Created..."
else:
return "Error while creating user.."
| 35.818182 | 94 | 0.646362 |
4ff35c862e1d3a586c93d0f6c44ff5b60779af6e | 4,190 | py | Python | challenge/leet/medium/q695.py | odys-z/hello | 39ca67cae34eb4bc4cbd848a06b3c0d65a995954 | [
"MIT"
] | null | null | null | challenge/leet/medium/q695.py | odys-z/hello | 39ca67cae34eb4bc4cbd848a06b3c0d65a995954 | [
"MIT"
] | 3 | 2021-04-17T18:36:24.000Z | 2022-03-04T20:30:09.000Z | challenge/leet/medium/q695.py | odys-z/hello | 39ca67cae34eb4bc4cbd848a06b3c0d65a995954 | [
"MIT"
] | null | null | null | '''
695. Max Area of Island
https://leetcode.com/problems/max-area-of-island/
Input: grid = [
[_,_,1,_,_,_,_,1,_,_,_,_,_],
[_,_,_,_,_,_,_,1,1,1,_,_,_],
[_,1,1,_,1,_,_,_,_,_,_,_,_],
[_,1,_,_,1,1,_,_,1,_,1,_,_],
[_,1,_,_,1,1,_,_,1,1,1,_,_],
[_,_,_,_,_,_,_,_,_,_,1,_,_],
[_,_,_,_,_,_,_,1,1,1,_,_,_],
[_,_,_,_,_,_,_,1,1,_,_,_,_] ]
Output: 6
Explanation:
The answer is not 11, because the island must be connected 4-directionally.
Created on 1_ Jul 2_21
@author: Odys Zhou
'''
from typing import List
from unittest import TestCase
class Solution:
'''
21.01% -> 40.01%
'''
def maxAreaOfIsland(self, grid: List[List[int]]) -> int:
row = len(grid)
col = len(grid[0])
maxlands = 0
def parentOf(islands, ix):
if ix not in islands:
return (ix, 1)
p, l = islands.get(ix)
while p != ix: # {ix 15: p 7}, {7: 7}
ix = p
p, l = islands.get(ix)
return p, l
def dsu(islands, u, v, maxlands):
'''
u u v v
v u
'''
pu, cu = parentOf(islands, u)
pv, cv = parentOf(islands, v)
if pu < pv:
pu, pv = pv, pu
lands = cu + cv
islands.update({v: (pu, lands)})
islands.update({pu: (pu, lands)})
islands.update({pv: (pu, lands)})
return max(maxlands, cu + cv)
islands = dict() # {ix: parent}
for r in range(row):
for c in range(col):
if grid[r][c] == 0:
continue
me = r * col + c
# new found land
if me not in islands:
maxlands = max(maxlands, 1)
islands.update({me: (me, 1)})
adjs = [(r+1, c), (r, c+1)]
for x, y in adjs:
if 0 <= x < row and 0 <= y < col and grid[x][y] != 0:
ix = x * col + y
if parentOf(islands, ix)[0] == parentOf(islands, me)[0]:
continue
maxlands = dsu(islands, me, ix, maxlands)
return maxlands
if __name__ == '__main__':
s = Solution()
t = TestCase()
grid = [[1,0,1],
[1,1,1]]
t.assertEqual(5, s.maxAreaOfIsland(grid))
grid = [[1,0],
[1,1]]
t.assertEqual(3, s.maxAreaOfIsland(grid))
grid = [[1,0,0],
[1,1,1]]
t.assertEqual(4, s.maxAreaOfIsland(grid))
grid = [[0,0,1],
[1,1,1]]
t.assertEqual(4, s.maxAreaOfIsland(grid))
grid = [[1,0,0],
[1,1,0]]
t.assertEqual(3, s.maxAreaOfIsland(grid))
_ = 0
grid = [
#0 1 2 3 4 5 6 7 8 9 10 12
[_,_,1,_,_,_,_,1,_,_,_,_,_],
[_,_,_,_,_,_,_,1,1,1,_,_,_],
[_,1,1,_,1,_,_,_,_,_,_,_,_],
[_,1,_,_,1,1,_,_,1,_,1,_,_],
[_,1,_,_,1,1,_,_,1,1,1,_,_], # 4
[_,_,_,_,_,_,_,_,_,_,1,_,_],
[_,_,_,_,_,_,_,1,1,1,_,_,_],
[_,_,_,_,_,_,_,1,1,_,_,_,_] ]
t.assertEqual(6, s.maxAreaOfIsland(grid))
grid = [[1]]
t.assertEqual(1, s.maxAreaOfIsland(grid))
grid = [[0]]
t.assertEqual(0, s.maxAreaOfIsland(grid))
grid = [[1 for _ in range(4)] for i in range(3)]
t.assertEqual(12, s.maxAreaOfIsland(grid))
grid = [[0 for _ in range(4)] for i in range(3)]
t.assertEqual(0, s.maxAreaOfIsland(grid))
grid = [
#0 1 2 3 4 5 6 7 8 9 10 12
[1,_,1,_,1,_,1,_,1,_,1,_,1],
[_,1,_,1,_,1,_,1,_,1,_,1,_],
[1,_,1,_,1,_,1,_,1,_,1,_,1],
[_,1,_,1,_,1,_,1,_,1,_,1,_],
[1,_,1,_,1,_,1,_,1,_,1,_,1],
[_,1,_,1,_,1,_,1,_,1,_,1,_],
[1,_,1,_,1,_,1,_,1,_,1,_,1], # 4
[_,1,_,1,_,1,_,1,_,1,_,1,_],
[1,_,1,_,1,_,1,_,1,_,1,_,1],
[_,1,_,1,_,1,_,1,_,1,_,1,_],
[1,_,1,_,1,_,1,_,1,_,1,_,1],
[_,1,_,1,_,1,_,1,_,1,_,1,_],
[1,_,1,_,1,_,1,_,1,_,1,_,1] ]
t.assertEqual(1, s.maxAreaOfIsland(grid))
print('OK!')
| 26.518987 | 80 | 0.432458 |
d24c979e256c96ae4f0a8c5ee108896f70af11c5 | 434 | py | Python | gen_big_slant_up.py | sechilds/altair_survey_pyconca_2019 | d4439031596f3351628e4adc5d5ad6ce12b1be70 | [
"MIT"
] | null | null | null | gen_big_slant_up.py | sechilds/altair_survey_pyconca_2019 | d4439031596f3351628e4adc5d5ad6ce12b1be70 | [
"MIT"
] | null | null | null | gen_big_slant_up.py | sechilds/altair_survey_pyconca_2019 | d4439031596f3351628e4adc5d5ad6ce12b1be70 | [
"MIT"
] | null | null | null | import pandas as pd
import altair as alt
datasaurus = pd.read_csv('data/DataSaurusDozen.tsv', delimiter = '\t')
alt.Chart(datasaurus[datasaurus.dataset == 'slant_up']).mark_point(color="#1b9e77").encode(
x = 'x:Q',
y = 'y:Q',
size = alt.value(90),
strokeWidth = alt.value(4)
).configure_axis(
labelFontSize = 24,
titleFontSize = 36
).properties(
height = 600,
width = 600
).save('big_slant_up.json')
| 24.111111 | 91 | 0.658986 |
d1fbee4e4d368569615cc48ea176df46a50180fa | 1,553 | py | Python | python-algorithm/leetcode/problem_40.py | isudox/nerd-algorithm | c1fbe153953cf3fc24395f75d102016fdf9ea0fa | [
"MIT"
] | 5 | 2017-06-11T09:19:34.000Z | 2019-01-16T16:58:31.000Z | python-algorithm/leetcode/problem_40.py | isudox/leetcode-solution | 60085e64deaf396a171367affc94b18114565c43 | [
"MIT"
] | 5 | 2020-03-22T13:53:54.000Z | 2020-03-23T08:49:35.000Z | python-algorithm/leetcode/problem_40.py | isudox/nerd-algorithm | c1fbe153953cf3fc24395f75d102016fdf9ea0fa | [
"MIT"
] | 1 | 2019-03-02T15:50:43.000Z | 2019-03-02T15:50:43.000Z | """40. Combination Sum II
https://leetcode.com/problems/combination-sum-ii/
Given a collection of candidate numbers (candidates) and
a target number (target), find all unique combinations in candidates where
the candidate numbers sums to target.
Each number in candidates may only be used once in the combination.
Note:
All numbers (including target) will be positive integers.
The solution set must not contain duplicate combinations.
Example 1:
Input: candidates = [10,1,2,7,6,1,5], target = 8,
A solution set is:
[
[1, 7],
[1, 2, 5],
[2, 6],
[1, 1, 6]
]
Example 2:
Input: candidates = [2,5,2,1,2], target = 5,
A solution set is:
[
[1,2,2],
[5]
]
"""
from typing import List
class Solution:
def combination_sum2(self, candidates: List[int], target: int) \
-> List[List[int]]:
ans = []
last_pop = None
candidates.sort()
def backtrack(nums: List[int], targ: int, start: int, store: List[int]):
nonlocal last_pop
for i in range(start, len(nums)):
num = nums[i]
if num == last_pop:
continue
if targ < num:
break
store.append(num)
if targ == num:
nonlocal ans
ans.append(store[:])
store.pop()
break
backtrack(nums, targ - num, i + 1, store)
last_pop = store.pop()
backtrack(candidates, target, 0, [])
return ans
| 24.265625 | 80 | 0.553767 |
4d4b1a324a7127990e8676acbe7216845303a6ae | 46,350 | py | Python | tensor2tensor/data_generators/generator_utils.py | jaseweir/tensor2tensor | 2a33b152d7835af66a6d20afe7961751047e28dd | [
"Apache-2.0"
] | 12,921 | 2017-06-15T17:11:46.000Z | 2022-03-31T15:22:11.000Z | tensor2tensor/data_generators/generator_utils.py | jaseweir/tensor2tensor | 2a33b152d7835af66a6d20afe7961751047e28dd | [
"Apache-2.0"
] | 1,635 | 2017-06-18T15:29:27.000Z | 2022-03-19T20:35:23.000Z | tensor2tensor/data_generators/generator_utils.py | jaseweir/tensor2tensor | 2a33b152d7835af66a6d20afe7961751047e28dd | [
"Apache-2.0"
] | 3,521 | 2017-06-15T18:25:42.000Z | 2022-03-31T05:47:55.000Z | # coding=utf-8
# Copyright 2021 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for data generators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import gzip
import math
import multiprocessing
import os
import random
import stat
import tarfile
import tempfile
import numpy as np
import requests
import six
from six.moves import range # pylint: disable=redefined-builtin
# Imports urllib on Python2, urllib.request on Python3
import six.moves.urllib_request as urllib
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.utils import mlperf_log
import tensorflow.compat.v1 as tf
UNSHUFFLED_SUFFIX = "-unshuffled"
def to_example(dictionary):
"""Helper: build tf.Example from (string -> int/float/str list) dictionary."""
features = {}
for (k, v) in six.iteritems(dictionary):
if not v:
raise ValueError("Empty generated field: %s" % str((k, v)))
# Subtly in PY2 vs PY3, map is not scriptable in py3. As a result,
# map objects will fail with TypeError, unless converted to a list.
if six.PY3 and isinstance(v, map):
v = list(v)
if (isinstance(v[0], six.integer_types) or
np.issubdtype(type(v[0]), np.integer)):
features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))
elif isinstance(v[0], float):
features[k] = tf.train.Feature(float_list=tf.train.FloatList(value=v))
elif isinstance(v[0], six.string_types):
if not six.PY2: # Convert in python 3.
v = [bytes(x, "utf-8") for x in v]
features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
elif isinstance(v[0], bytes):
features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
else:
raise ValueError("Value for %s is not a recognized type; v: %s type: %s" %
(k, str(v[0]), str(type(v[0]))))
return tf.train.Example(features=tf.train.Features(feature=features))
def generate_files_distributed(generator,
output_name,
output_dir,
num_shards=1,
max_cases=None,
task_id=0):
"""generate_files but with a single writer writing to shard task_id."""
assert task_id < num_shards
output_filename = sharded_name(output_name, task_id, num_shards)
output_file = os.path.join(output_dir, output_filename)
tf.logging.info("Writing to file %s", output_file)
writer = tf.python_io.TFRecordWriter(output_file)
counter = 0
for case in generator:
if counter % 100000 == 0:
tf.logging.info("Generating case %d for %s." % (counter, output_name))
counter += 1
if max_cases and counter > max_cases:
break
example = to_example(case)
writer.write(example.SerializeToString())
writer.close()
return output_file
def _data_filenames(output_name, output_dir, num_shards):
return [
os.path.join(output_dir, fname)
for fname in shard_filepath(output_name, num_shards)
]
def train_data_filenames(problem, output_dir, num_shards):
return _data_filenames(problem + "-train", output_dir, num_shards)
def dev_data_filenames(problem, output_dir, num_shards):
return _data_filenames(problem + "-dev", output_dir, num_shards)
def test_data_filenames(problem, output_dir, num_shards):
return _data_filenames(problem + "-test", output_dir, num_shards)
def combined_data_filenames(problem, output_dir, num_training_shards):
return (train_data_filenames(problem, output_dir, num_training_shards) +
dev_data_filenames(problem, output_dir, 1) + test_data_filenames(
problem, output_dir, 1))
def sharded_name(base_name, shard, total_shards):
return "%s-%.5d-of-%.5d" % (base_name, shard, total_shards)
def shard_filepath(fname, num_shards):
return [
sharded_name(fname, shard, num_shards) for shard in range(num_shards)
]
def outputs_exist(filenames):
for out_fname in filenames:
out_fname = out_fname.replace(UNSHUFFLED_SUFFIX, "")
if tf.gfile.Exists(out_fname):
return out_fname
def generate_files(generator, output_filenames,
max_cases=None, cycle_every_n=1):
"""Generate cases from a generator and save as TFRecord files.
Generated cases are transformed to tf.Example protos and saved as TFRecords
in sharded files named output_dir/output_name-00..N-of-00..M=num_shards.
Args:
generator: a generator yielding (string -> int/float/str list) dictionaries.
output_filenames: List of output file paths.
max_cases: maximum number of cases to get from the generator;
if None (default), we use the generator until StopIteration is raised.
cycle_every_n: how many cases from the generator to take before
switching to the next shard; by default set to 1, switch every case.
"""
if outputs_exist(output_filenames):
tf.logging.info("Skipping generator because outputs files exists at {}"
.format(output_filenames))
return
tmp_filenames = [fname + ".incomplete" for fname in output_filenames]
num_shards = len(output_filenames)
# Check if is training or eval, ref: train_data_filenames().
if num_shards > 0:
if "-train" in output_filenames[0]:
tag = "train"
elif "-dev" in output_filenames[0]:
tag = "eval"
else:
tag = "other"
writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filenames]
counter, shard = 0, 0
for case in generator:
if case is None:
continue
if counter % 100000 == 0:
tf.logging.info("Generating case %d." % counter)
counter += 1
if max_cases and counter > max_cases:
break
example = to_example(case)
writers[shard].write(example.SerializeToString())
if counter % cycle_every_n == 0:
shard = (shard + 1) % num_shards
for writer in writers:
writer.close()
for tmp_name, final_name in zip(tmp_filenames, output_filenames):
tf.gfile.Rename(tmp_name, final_name)
if num_shards > 0:
if tag == "train":
mlperf_log.transformer_print(
key=mlperf_log.PREPROC_NUM_TRAIN_EXAMPLES, value=counter)
elif tag == "eval":
mlperf_log.transformer_print(
key=mlperf_log.PREPROC_NUM_EVAL_EXAMPLES, value=counter)
tf.logging.info("Generated %s Examples", counter)
def download_report_hook(count, block_size, total_size):
"""Report hook for download progress.
Args:
count: current block number
block_size: block size
total_size: total size
"""
percent = int(count * block_size * 100 / total_size)
print("\r%d%%" % percent + " completed", end="\r")
def maybe_download(directory, filename, uri):
"""Download filename from uri unless it's already in directory.
Copies a remote file to local if that local file does not already exist. If
the local file pre-exists this function call, it does not check that the local
file is a copy of the remote.
Remote filenames can be filepaths, any URI readable by tensorflow.gfile, or a
URL.
Args:
directory: path to the directory that will be used.
filename: name of the file to download to (do nothing if it already exists).
uri: URI to copy (or download) from.
Returns:
The path to the downloaded file.
"""
tf.gfile.MakeDirs(directory)
filepath = os.path.join(directory, filename)
if tf.gfile.Exists(filepath):
tf.logging.info("Not downloading, file already found: %s" % filepath)
return filepath
tf.logging.info("Downloading %s to %s" % (uri, filepath))
try:
tf.gfile.Copy(uri, filepath)
except tf.errors.UnimplementedError:
if uri.startswith("http"):
inprogress_filepath = filepath + ".incomplete"
inprogress_filepath, _ = urllib.urlretrieve(
uri, inprogress_filepath, reporthook=download_report_hook)
# Print newline to clear the carriage return from the download progress
print()
tf.gfile.Rename(inprogress_filepath, filepath)
else:
raise ValueError("Unrecognized URI: " + filepath)
statinfo = os.stat(filepath)
tf.logging.info("Successfully downloaded %s, %s bytes." %
(filename, statinfo.st_size))
return filepath
def maybe_download_from_drive(directory, filename, url):
"""Download filename from Google drive unless it's already in directory.
Args:
directory: path to the directory that will be used.
filename: name of the file to download to (do nothing if it already exists).
url: URL to download from.
Returns:
The path to the downloaded file.
"""
if not tf.gfile.Exists(directory):
tf.logging.info("Creating directory %s" % directory)
tf.gfile.MakeDirs(directory)
filepath = os.path.join(directory, filename)
confirm_token = None
if tf.gfile.Exists(filepath):
tf.logging.info("Not downloading, file already found: %s" % filepath)
return filepath
# Since the file is big, drive will scan it for virus and take it to a
# warning page. We find the confirm token on this page and append it to the
# URL to start the download process.
confirm_token = None
session = requests.Session()
response = session.get(url, stream=True)
for k, v in response.cookies.items():
if k.startswith("download_warning"):
confirm_token = v
if confirm_token:
url = url + "&confirm=" + confirm_token
tf.logging.info("Downloading %s to %s" % (url, filepath))
response = session.get(url, stream=True)
# Now begin the download.
chunk_size = 16 * 1024
with open(filepath, "wb") as f:
for chunk in response.iter_content(chunk_size):
if chunk:
f.write(chunk)
# Print newline to clear the carriage return from the download progress
print()
statinfo = os.stat(filepath)
tf.logging.info("Successfully downloaded %s, %s bytes." % (filename,
statinfo.st_size))
return filepath
def gunzip_file(gz_path, new_path):
"""Unzips from gz_path into new_path.
Args:
gz_path: path to the zipped file.
new_path: path to where the file will be unzipped.
"""
if tf.gfile.Exists(new_path):
tf.logging.info("File %s already exists, skipping unpacking" % new_path)
return
tf.logging.info("Unpacking %s to %s" % (gz_path, new_path))
# We may be unpacking into a newly created directory, add write mode.
mode = stat.S_IRWXU or stat.S_IXGRP or stat.S_IRGRP or stat.S_IROTH
os.chmod(os.path.dirname(new_path), mode)
with gzip.open(gz_path, "rb") as gz_file:
with tf.gfile.GFile(new_path, mode="wb") as new_file:
for line in gz_file:
new_file.write(line)
def get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generator, max_subtoken_length=None,
reserved_tokens=None):
"""Inner implementation for vocab generators.
Args:
data_dir: The base directory where data and vocab files are stored. If None,
then do not save the vocab even if it doesn't exist.
vocab_filename: relative filename where vocab file is stored
vocab_size: target size of the vocabulary constructed by SubwordTextEncoder
generator: a generator that produces tokens from the vocabulary
max_subtoken_length: an optional integer. Set this to a finite value to
avoid quadratic costs during vocab building.
reserved_tokens: List of reserved tokens. `text_encoder.RESERVED_TOKENS`
should be a prefix of `reserved_tokens`. If `None`, defaults to
`RESERVED_TOKENS`.
Returns:
A SubwordTextEncoder vocabulary object.
"""
if data_dir and vocab_filename:
vocab_filepath = os.path.join(data_dir, vocab_filename)
if tf.gfile.Exists(vocab_filepath):
tf.logging.info("Found vocab file: %s", vocab_filepath)
return text_encoder.SubwordTextEncoder(vocab_filepath)
else:
vocab_filepath = None
tf.logging.info("Generating vocab file: %s", vocab_filepath)
vocab = text_encoder.SubwordTextEncoder.build_from_generator(
generator, vocab_size, max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
if vocab_filepath:
tf.gfile.MakeDirs(data_dir)
vocab.store_to_file(vocab_filepath)
return vocab
def get_or_generate_vocab(data_dir, tmp_dir, vocab_filename, vocab_size,
sources, file_byte_budget=1e6,
max_subtoken_length=None):
"""Generate a vocabulary from the datasets in sources."""
vocab_generator = generate_lines_for_vocab(tmp_dir, sources, file_byte_budget)
return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
vocab_generator, max_subtoken_length)
def generate_lines_for_vocab(tmp_dir, sources, file_byte_budget=1e6):
"""Generate lines for vocabulary generation."""
tf.logging.info("Generating vocab from: %s", str(sources))
for source in sources:
url = source[0]
filename = os.path.basename(url)
compressed_file = maybe_download(tmp_dir, filename, url)
for lang_file in source[1]:
tf.logging.info("Reading file: %s" % lang_file)
filepath = os.path.join(tmp_dir, lang_file)
# Extract from tar if needed.
if not tf.gfile.Exists(filepath):
read_type = "r:gz" if filename.endswith("tgz") else "r"
with tarfile.open(compressed_file, read_type) as corpus_tar:
corpus_tar.extractall(tmp_dir)
# For some datasets a second extraction is necessary.
if lang_file.endswith(".gz"):
new_filepath = os.path.join(tmp_dir, lang_file[:-3])
if tf.gfile.Exists(new_filepath):
tf.logging.info(
"Subdirectory %s already exists, skipping unpacking" % filepath)
else:
tf.logging.info("Unpacking subdirectory %s" % filepath)
gunzip_file(filepath, new_filepath)
filepath = new_filepath
with tf.gfile.GFile(filepath, mode="r") as source_file:
file_byte_budget_ = file_byte_budget
counter = 0
countermax = int(source_file.size() / file_byte_budget_ / 2)
for line in source_file:
if counter < countermax:
counter += 1
else:
if file_byte_budget_ <= 0:
break
line = line.strip()
file_byte_budget_ -= len(line)
counter = 0
yield line
def get_or_generate_tabbed_vocab(data_dir, tmp_dir, source_filename,
index, vocab_filename, vocab_size):
r"""Generate a vocabulary from a tabbed source file.
The source is a file of source, target pairs, where each line contains
a source string and a target string, separated by a tab ('\t') character.
The index parameter specifies 0 for the source or 1 for the target.
Args:
data_dir: path to the data directory.
tmp_dir: path to the temporary directory.
source_filename: the name of the tab-separated source file.
index: index.
vocab_filename: the name of the vocabulary file.
vocab_size: vocabulary size.
Returns:
The vocabulary.
"""
def generate():
filepath = os.path.join(tmp_dir, source_filename)
tf.logging.info("Generating vocab from %s", filepath)
with tf.gfile.GFile(filepath, mode="r") as source_file:
for line in source_file:
line = line.strip()
if line and "\t" in line:
parts = line.split("\t", 1)
part = parts[index].strip()
yield part
return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generate())
def get_or_generate_txt_vocab(data_dir, vocab_filename, vocab_size,
filepatterns):
"""Generate a vocabulary from txt files with example-per-line."""
if isinstance(filepatterns, str):
filepatterns = [filepatterns]
def generate():
tf.logging.info("Generating vocab from %s", filepatterns)
for filepattern in filepatterns:
for filename in tf.gfile.Glob(filepattern):
with tf.gfile.GFile(filename, mode="r") as source_file:
for line in source_file:
yield line.strip()
return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generate())
def read_records(filename):
reader = tf.python_io.tf_record_iterator(filename)
records = []
for record in reader:
records.append(record)
if len(records) % 100000 == 0:
tf.logging.info("read: %d", len(records))
return records
def write_records(records, out_filename):
writer = tf.python_io.TFRecordWriter(out_filename)
for count, record in enumerate(records):
writer.write(record)
if count > 0 and count % 100000 == 0:
tf.logging.info("write: %d", count)
writer.close()
def generate_dataset_and_shuffle(train_gen,
train_paths,
dev_gen,
dev_paths,
shuffle=True):
generate_files(train_gen, train_paths)
generate_files(dev_gen, dev_paths)
mlperf_log.transformer_print(key=mlperf_log.INPUT_ORDER)
if shuffle:
shuffle_dataset(train_paths + dev_paths)
def _shuffle_single(fname, extra_fn=None):
"""Shuffle a single file of records.
Args:
fname: a string
extra_fn: an optional function from list of TFRecords to list of TFRecords
to be called after shuffling.
"""
records = read_records(fname)
random.shuffle(records)
if extra_fn is not None:
records = extra_fn(records)
out_fname = fname.replace(UNSHUFFLED_SUFFIX, "")
write_records(records, out_fname)
tf.gfile.Remove(fname)
def shuffle_dataset(filenames, extra_fn=None):
"""Shuffles the dataset.
Args:
filenames: a list of strings
extra_fn: an optional function from list of records to list of records
to be called after shuffling a file.
"""
if outputs_exist(filenames):
tf.logging.info("Skipping shuffle because output files exist")
return
tf.logging.info("Shuffling data...")
for filename in filenames:
_shuffle_single(filename, extra_fn=extra_fn)
tf.logging.info("Data shuffled.")
class SequencePacker(object):
"""Helper for constructing a packed example of sequence examples.
See comments to pack_examples()
"""
def __init__(self, first_sequence, spacing=2):
self._spacing = spacing
self._ids = first_sequence[:]
self._segmentation = [1] * len(first_sequence)
self._position = list(range(len(first_sequence)))
def add(self, ids):
padding = [0] * self._spacing
self._ids.extend(padding + ids)
next_segment_num = self._segmentation[-1] + 1 if self._segmentation else 1
self._segmentation.extend(padding + [next_segment_num] * len(ids))
self._position.extend(padding + list(range(len(ids))))
def can_fit(self, ids, packed_length):
return len(self._ids) + self._spacing + len(ids) <= packed_length
def to_dict(self):
return {"inputs": [0],
"targets": self._ids,
"targets_segmentation": self._segmentation,
"targets_position": self._position}
class SequencePairPacker(object):
"""Helper for packing sequence-to-sequence examples into bigger examples.
See comments to pack_examples()
"""
def __init__(self, first_sequence_pair, spacing=2):
self._inputs = SequencePacker(first_sequence_pair[0], spacing)
self._targets = SequencePacker(first_sequence_pair[1], spacing)
def add(self, pair):
self._inputs.add(pair[0])
self._targets.add(pair[1])
def can_fit(self, pair, packed_length):
return (self._inputs.can_fit(pair[0], packed_length) and
self._targets.can_fit(pair[1], packed_length))
def to_dict(self):
ret = self._targets.to_dict()
inputs_dict = self._inputs.to_dict()
ret["inputs"] = inputs_dict["targets"]
ret["inputs_segmentation"] = inputs_dict["targets_segmentation"]
ret["inputs_position"] = inputs_dict["targets_position"]
return ret
def pack_examples(examples,
has_inputs,
packed_length=256,
spacing=2,
queue_size=10,
chop_long_sequences=False):
"""Pack examples into longer examples.
If has_inputs=False, we are packing single-sequence examples with
targets only and no inputs.
In this case, we concatenate the targets from several examples to form
each new example. We insert a number of zeros for spacing between the
original sequences. This is to help the sequences stay separate
under convolutions. If chop_long_sequences is set, then any input sequence
longer than packed_length gets chopped up into multiple examples. Otherwise,
long sequences are emitted as singletons.
If has_inputs=True, then we are packing sequence-to-sequence
examples. We combine several examples by concatenating the inputs
(as above) and concatenating the targets (as above). Chopping of
long sequences is not supported.
The packed examples are represented as dictionaries containing:
"inputs", "targets": the packed sequences described above
"inputs_segmentation", "targets_segmentation":
Sequences aligned with "inputs", "targets" specifying to which original
sequence each position belongs. Numbering starts from 1, and 0 is used
for spacing. This information is useful for preventing attention across
segments.
e.g. [1 1 1 1 1 1 0 0 2 2 2 0 0 3 3 3 3 3 0 0 4 4 4]
"inputs_position", "targets_position":
Sequences aligned with "inputs", "targets" specifying position within
the original sequence. This is useful for positional encodings.
e.g. [0 1 2 3 4 5 0 0 0 1 2 0 0 0 1 2 3 4 0 0 0 1 2]
Args:
examples: a generator returning feature dictionaries.
has_inputs: a boolean
packed_length: an integer
spacing: an integer
queue_size: an integer
chop_long_sequences: a boolean
Yields:
feature dictionaries.
"""
packer = SequencePairPacker if has_inputs else SequencePacker
combined = []
for example in examples:
x = ((example["inputs"], example["targets"])
if has_inputs else example["targets"])
if chop_long_sequences and len(x) > packed_length:
assert not has_inputs
num_fragments = len(x) // packed_length
for i in range(num_fragments):
yield packer(
x[packed_length * i:packed_length * (i + 1)], spacing).to_dict()
x = x[packed_length * num_fragments:]
added = False
for c in combined:
if c.can_fit(x, packed_length):
c.add(x)
added = True
break
if not added:
if len(combined) == queue_size:
yield combined[0].to_dict()
combined = combined[1:]
combined.append(packer(x, spacing))
for c in combined:
yield c.to_dict()
def pack_dataset(dataset, length, keys=None, use_custom_ops=False):
"""Creates a 'packed' version of a dataset on-the-fly.
This is meant to replace the irritation of having to create a separate
"packed" version of a dataset to train efficiently on TPU.
Each example in the output dataset represents several examples in the
input dataset.
For each key in the input dataset, two additional keys are created:
<key>_segmentation: an int32 tensor identifying the parts
representing the original example.
<key>_position: an int32 tensor identifying the position within the original
example.
Example:
Two input examples get combined to form an output example.
The input examples are:
{"inputs": [8, 7, 1, 0], "targets":[4, 1, 0]}
{"inputs": [2, 3, 4, 1], "targets":[5, 6, 1]}
The output example is:
{
"inputs": [8, 7, 1, 2, 3, 4, 1, 0, 0, 0]
"inputs_segmentation": [1, 1, 1, 2, 2, 2, 2, 0, 0, 0]
"inputs_position": [0, 1, 2, 0, 1, 2, 3, 0, 0, 0]
"targets": [4, 1, 5, 6, 1, 0, 0, 0, 0, 0]
"targets_segmentation": [1, 1, 2, 2, 2, 0, 0, 0, 0, 0]
"targets_position": [0, 1, 0, 1, 2, 0, 0, 0, 0, 0]
}
0 represents padding in both the inputs and the outputs.
Sequences in the incoming examples are truncated to length "length", and the
sequences in the output examples all have fixed (padded) length "length".
Args:
dataset: a tf.data.Dataset
length: an integer
keys: a list of strings (e.g. ["inputs", "targets"])
use_custom_ops: use a custom c++ op not included in standard tf (faster)
Returns:
a tf.data.Dataset
"""
shapes = dataset.output_shapes
if keys is None:
keys = shapes.keys()
for k in keys:
if k not in shapes:
raise ValueError("Key %s not found in dataset. Available keys are %s"
% (k, shapes.keys()))
if not shapes[k].is_compatible_with(tf.TensorShape([None])):
raise ValueError("Tensors to be packed must be one-dimensional.")
if use_custom_ops:
return _pack_with_custom_ops(dataset, keys, length)
else:
packer = SequenceDatasetPacker(length, spacing=0, queue_size=10)
return packer(dataset, cycle_length=10, keys=keys)
def _pack_with_custom_ops(dataset, keys, length):
"""Helper-function for packing a dataset which has already been batched.
See pack_dataset()
Relies on custom ops which require a custom compiled binary.
Faster than _pack_with_tf_ops(), and denser packing.
Args:
dataset: a dataset containing padded batches of examples.
keys: a list of strings (must have length 2)
length: an integer
Returns:
a dataset.
"""
from tensor2tensor.data_generators.ops import pack_sequences_ops # pylint: disable=g-import-not-at-top
# trim to length
dataset = dataset.map(lambda x: {k: x[k][:length] for k in keys})
# Setting batch_size=length ensures that the concatenated sequences (if they
# have length >=1) are sufficient to fill at least one packed example.
batch_size = length
dataset = dataset.padded_batch(
batch_size, padded_shapes={k: [-1] for k in keys})
# better packing (may be faster) but requires custom-built binary.
k1, k2 = keys
def map_fn_custom(x):
"""Map-function."""
(k1_packed, k1_segmengation, k1_position,
k2_packed, k2_segmentation, k2_position) = (
pack_sequences_ops.pack_sequences2(x[k1], x[k2], length, length))
packed = {
k1: k1_packed,
k1 + "_segmentation": k1_segmengation,
k1 + "_position": k1_position,
k2: k2_packed,
k2 + "_segmentation": k2_segmentation,
k2 + "_position": k2_position,
}
return tf.data.Dataset.from_tensor_slices(packed)
dataset = dataset.flat_map(map_fn_custom)
return dataset
INDEX_DTYPE = tf.int32
class SequenceDatasetPacker(object):
"""Helper class for packing a dataset of sequences in an online fashon.
The input sequence is expected to be a tuple of 1D Tensors which will be
converted to a dataset which produces a dict of packed examples, example
positions, and segment ids.
If `window_size` or `cycle_length` is specified multiple packing operations
will be performed in parallel to increase throughput. A value of None will
select default parallelism parameters. If this dataset will be run on a TPU,
specifying a cycle_length > 10 is recommended.
"""
def __init__(self, packed_length=256, spacing=0, queue_size=10,
chop_long_sequences=False):
self._packed_length = packed_length
self._spacing = spacing
self._queue_size = queue_size
self._chop_long_sequences = chop_long_sequences
self._num_sequences = None
self._token_dtype = None
def __call__(self, dataset, **kwargs):
if {"window_size", "cycle_length"}.intersection(kwargs):
return self._concurrent_pack(dataset, **kwargs)
return self._pack(dataset, **kwargs)
def _concurrent_pack(self, dataset, window_size=None, cycle_length=None,
keys=None):
"""Selects sensible default parallelism parameters based for a task."""
if window_size is None:
# This is a heuristic to fill all of the queues 10 times, and should do a
# reasonable job balancing parallelism (which benefits from lower window
# size) with packing efficiency (which suffers from edge effects when the
# window size is too low.)
window_size = int(self._packed_length / 8 * self._queue_size * 10)
if cycle_length is None:
# Typically binning one stream will saturate about 3 cores.
# Note on TPUs:
# cycle_length should still be explicitly set when training on TPUs,
# since the cpu count will be the local CPU count (which could be quite
# small), wereas the transforms will actually run on the TPU host
# controller which has a very robust CPU.
cycle_length = max([int(multiprocessing.cpu_count() / 3), 1])
return self._pack(dataset, window_size=window_size,
cycle_length=cycle_length, keys=keys)
def _pack(self, dataset, window_size=None, cycle_length=None,
deterministic=False, keys=None):
"""Main method for chaining together packing transformation steps."""
(dataset, self._num_sequences, self._token_dtype, keys
) = self._standardize(dataset, keys)
if window_size is None:
dataset = self._scanning_pack(dataset)
else:
# Dataset.window splits nested Tensors.
re_zip = lambda *x: tf.data.Dataset.zip(x)
dataset = dataset.window(window_size).map(re_zip).interleave(
self._scanning_pack, cycle_length=cycle_length,
block_length=window_size,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if not deterministic:
# Sloppy interleave offers a marginal performance improvement.
options = tf.data.Options()
options.experimental_deterministic = False
dataset = dataset.with_options(options)
dataset = dataset.map(
self._finalize, num_parallel_calls=tf.data.experimental.AUTOTUNE)
self._num_sequences, self._token_dtype = None, None
if keys:
def dict_pack(example):
output = {}
for i, key in enumerate(keys):
output[key] = example["contents"][:, i]
output[key + "_segmentation"] = example["segment"][:, i]
output[key + "_position"] = example["position"][:, i]
return output
dataset = dataset.map(dict_pack)
return dataset
def _standardize(self, dataset, keys):
"""Force dataset structure into a tuple of Tensors."""
shapes = tf.data.get_output_shapes(dataset)
if isinstance(shapes, dict):
keys = keys or tuple(shapes.keys())
dataset = dataset.map(lambda x: tuple(x[k] for k in keys))
shapes = tf.data.get_output_shapes(dataset)
if not all(isinstance(i, tf.TensorShape) for i in shapes):
# Internally this class expects tuples of Tensors, even for the degenerate
# case of a single sequence.
dataset = dataset.map(lambda x: (x,))
shapes = tf.data.get_output_shapes(dataset)
for s in shapes:
if not s.is_compatible_with(tf.TensorShape([None])):
raise ValueError("Tensors to be packed must be one-dimensional.")
if not shapes:
raise ValueError("Expected sequence dataset.")
if self._chop_long_sequences and len(shapes) != 1:
raise ValueError("chop_long_sequences expects a single sequence dataset.")
token_types = tf.data.get_output_types(dataset)
if len(set(token_types)) > 1:
raise ValueError("Inconsistent dtypes: {}".format(token_types))
return dataset, len(shapes), token_types[0], keys
def _eviction_fn(self, _):
return tuple(-tf.ones((self._packed_length,), dtype=self._token_dtype)
for _ in range(self._num_sequences))
def _scan_initial_state(self):
"""Create TensorArrays and indices to track bin assignment.
availability: TensorArray[queue_size, num_sequences]
This represents the number of tokens available in the ith bin.
See implementation note below.
contents: TensorArray[queue_size, num_sequences * 2]
This holds the actual contents of the packed strings as well as a bit
mask indicating where sequences begin. It is stored in a flat vector and
is accessed in offsets of packed_length.
top_index: scalar [0, queue_size)
Integer tensor indicating which index is the "top" bin. See implementation
note below.
IMPLEMENTATION_NOTE:
The FFD algorithm periodically pops the topmost queue and pushes a new
one to replace it. In order to replicate those semantics with a fixed size
TensorArray, indexing operations are shifted by top_index. For example,
instead of:
`queue_available.read(i)`
a read is instead performed as:
`queue_available.read((i - top_index) % queue_size)`
to account for the fact that the "ith" logical FFD queue is stored at
position j. This means that the pop / push update can be performed by
simply incrementing top_index. (And zeroing the old top_index position.)
Returns:
The state for the binning scan.
"""
all_available = tf.ones((self._queue_size, self._num_sequences),
dtype=INDEX_DTYPE) * self._packed_length
total_size = self._packed_length * self._queue_size
total_size_range = tf.range(total_size, dtype=INDEX_DTYPE)
empty = tf.zeros((total_size, self._num_sequences * 2),
dtype=self._token_dtype)
availability = tf.TensorArray(
dtype=INDEX_DTYPE, size=self._queue_size, dynamic_size=False,
clear_after_read=False, element_shape=(self._num_sequences,)
).scatter(tf.range(self._queue_size, dtype=INDEX_DTYPE), all_available)
contents = tf.TensorArray(
dtype=self._token_dtype, size=total_size, dynamic_size=False,
clear_after_read=False, element_shape=(self._num_sequences * 2,)
).scatter(total_size_range, empty)
# Which index should be considered the "top" bucket for the purpose of
# the first-fit descending algorithm.
top_index = tf.zeros((), dtype=INDEX_DTYPE)
return availability, contents, top_index
def _scanning_pack(self, dataset):
"""Apply scan based pack to a dataset."""
if self._chop_long_sequences:
dataset = dataset.map(lambda x: (x[:self._packed_length],))
else:
dataset = dataset.filter(lambda *x: tf.reduce_max( # pylint: disable=g-long-lambda
tf.stack([tf.shape(i)[0] for i in x]), axis=0) <= self._packed_length)
# In order to retrieve the sequences which are still in the queue when the
# dataset is exhausted, we feed dummy sequences which are guaranteed to
# displace the remaining elements.
dataset = dataset.concatenate(
tf.data.Dataset.range(self._queue_size).map(self._eviction_fn))
initial_state = self._scan_initial_state()
step_fn = functools.partial(
tf.autograph.to_graph(_scan_step_fn), packed_length=self._packed_length,
queue_size=self._queue_size, spacing=self._spacing,
num_sequences=self._num_sequences, token_dtype=self._token_dtype)
dataset = dataset.apply(tf.data.experimental.scan(initial_state, step_fn))
is_real_sample = lambda valid_sample, _: valid_sample
return dataset.filter(is_real_sample)
def _compute_auxiliary_structure(self, contents_and_mask):
"""Compute segment and position metadata."""
contents = contents_and_mask[:, :self._num_sequences]
start_mask = tf.cast(contents_and_mask[:, self._num_sequences:],
dtype=INDEX_DTYPE)
segment = tf.cumsum(start_mask, axis=0)
uniform_count = tf.ones_like(segment[:, 0])
position = []
for i in range(self._num_sequences):
segment_slice = segment[:, i]
counts = tf.math.segment_sum(uniform_count, segment[:, i])
position.append(tf.range(self._packed_length) - tf.cumsum(
tf.gather(counts, segment_slice - 1) * start_mask[:, i]))
position = tf.concat([i[:, tf.newaxis] for i in position], axis=1)
# Correct for padding tokens.
pad_mask = tf.cast(tf.not_equal(contents, 0), dtype=INDEX_DTYPE)
segment *= pad_mask
position *= pad_mask
return segment, position
def _finalize(self, _, contents):
"""Structure output and compute segment and position metadata."""
# The output shape information is lost during the filter; however we can
# guarantee the shape. (That's the point of this exercise, after all!)
contents.set_shape((self._packed_length, self._num_sequences * 2))
# Both the dummy branch of the scan step function and the eviction dataset
# use vectors of minus one. The cost of this check is negligible and the
# leakage of such dummy sequences would be difficult to debug downstream.
check_leaks = tf.assert_none_equal(contents, -tf.ones_like(contents))
with tf.control_dependencies([check_leaks]):
contents = tf.identity(contents)
segment, position = self._compute_auxiliary_structure(contents)
return {"contents": contents[:, :self._num_sequences],
"segment": segment, "position": position}
def _scan_step_fn(state, example, packed_length, queue_size, spacing,
num_sequences, token_dtype): # pylint: disable=g-doc-args
"""Transform function used by tf.data.experimental.scan to process an example.
This is written as a stateless function rather than a class method because we
trace it with AutoGraph (in order to simplify the conditional), and this way
we don't have to worry about handling re-tracing semantics.
Args:
See the SequenceDatasetPacker class.
Returns:
The updated queue state, and either a packed example or a dummy sequence
which will be filtered out downstream.
"""
# Convert TensorArray tuples to lists since we'll need to replace them.
availability, contents, top_index = state
lengths = tf.concat([tf.shape(i) for i in example], axis=0)
start_availability = availability.stack()
can_fit = tf.reduce_all(tf.greater_equal(start_availability, lengths), axis=1)
any_can_fit = tf.reduce_any(can_fit, axis=0)
# AutoGraph will convert this block to a tf.cond
if any_can_fit:
# This indicates where in the FFD queue rotation a given index sits
shifted_range = (
tf.range(queue_size, dtype=INDEX_DTYPE) - top_index) % queue_size
# Mark any indices which cannot accommodate the current example.
exclusion_mask = tf.cast(tf.logical_not(can_fit), INDEX_DTYPE) * queue_size
# Index in [0, queue_size) in which to place the sample. Note, this index
# is the position in the actual TensorArray, not the index of the FFD queue.
queue_index = (tf.reduce_min(shifted_range + exclusion_mask) +
top_index) % queue_size
# NOTE(taylorrobie): We emit a non-empty Tensor for downstream checks.
output_contents = -tf.ones((1, num_sequences), dtype=token_dtype)
else:
index_range = top_index * packed_length + tf.range(packed_length)
output_contents = contents.gather(index_range)
# Reset the queue state.
availability = availability.write(
top_index, packed_length * tf.ones((num_sequences,), dtype=INDEX_DTYPE))
empty_contents = tf.zeros((packed_length, num_sequences * 2),
dtype=token_dtype)
contents = contents.scatter(index_range, empty_contents)
queue_index = top_index
top_index = (top_index + 1) % queue_size
pre_assign_availability = availability.read(queue_index)
space_left = pre_assign_availability - lengths - spacing
availability = availability.write(queue_index, space_left)
# ============================================================================
# == Update contents =========================================================
# ============================================================================
# Consider the following case for a seq-to-seq packing:
# (padding is represented as underscores)
#
# Queue starting state:
# [1, 3, 2, 4, 6, 1, _, _, _, _, _, ...]
# [5, 9, _, _, _, _, _, _, _, _, _, ...]
#
# Examples:
# [4, 2, 4], [3]
#
# Desired new queue state:
# [1, 3, 2, 4, 6, 1, _, _, 4, 2, 4, _, _, ...]
# [5, 9, _, _, 3, _, _, _, _, _, _, _, _, ...]
#
# This could be acomplished by creating a TensorArray for each of the two
# sequences, and scattering into the respective arrays. However TensorArray
# writes are extremely expensive relative to other operations. So instead we
# store the contents in a single TensorArray of shape (packed_length, 2), and
# we pad and concatenate the examples such that they can be added in a single
# assign:
#
# [_, _, _, _, 4, 2, 4]
# [3, _, _, _, _, _, _]
# +
# [1, 3, 2, 4, 6, 1, _, _, _, _, _, ...]
# [5, 9, _, _, _, _, _, _, _, _, _, ...]
#
# And in practice, the extra work of padding is neglidgable compared to
# the gain from vectorizing the TensorArray assign. We also store a bit mask
# denoting where sequences start which is used to compute segment and
# position metadata:
#
# [_, _, _, _, 1, _, _]
# [1, _, _, _, _, _, _]
# +
# [1, _, _, _, _, _, _, _, _, _, _, ...]
# [1, _, _, _, _, _, _, _, _, _, _, ...]
#
# Both the contents and the mask are concatenated in the same TensorArray
# for performance.
start_index = packed_length - pre_assign_availability
end_index = start_index + lengths
leftmost = tf.reduce_min(start_index, axis=0)
rightmost = tf.reduce_max(end_index, axis=0)
delta = rightmost - leftmost
pad_indices = [tf.stack((start_index[i] - leftmost, rightmost - end_index[i]))
for i in range(num_sequences)]
padded_examples = [tf.pad(ex, padding[tf.newaxis, :])
for ex, padding in zip(example, pad_indices)]
padded_examples = tf.transpose(tf.stack(padded_examples))
mask_update = tf.one_hot(start_index - leftmost, delta,
dtype=contents.dtype, axis=0)
content_update = tf.concat([padded_examples, mask_update], axis=1)
index_range = (queue_index * packed_length + # Offset into the right section.
tf.range(delta, dtype=INDEX_DTYPE) + leftmost)
contents = contents.scatter(index_range, contents.gather(index_range) +
content_update)
state = (availability, contents, top_index)
return state, (tf.logical_not(any_can_fit), output_contents)
def make_tmp_dir(suffix="", prefix="tmp", dir=None): # pylint: disable=redefined-builtin
"""Make a temporary directory."""
if dir is None:
return tempfile.mkdtemp(suffix, prefix, dir)
else:
while True:
rand_term = random.randint(1, 9999)
tmp_dir = os.path.join(dir, "%s%d%s" % (prefix, rand_term, suffix))
if tf.gfile.Exists(tmp_dir):
continue
tf.gfile.MakeDirs(tmp_dir)
break
return tmp_dir
def tfrecord_iterator_for_problem(problem, data_dir,
dataset_split=tf.estimator.ModeKeys.TRAIN):
"""Iterate over the records on disk for the Problem."""
filenames = tf.gfile.Glob(problem.filepattern(data_dir, mode=dataset_split))
example_spec = problem.example_reading_spec()[0]
return tfrecord_iterator(filenames, example_spec=example_spec)
def tfrecord_iterator(filenames, gzipped=False, example_spec=None):
"""Yields records from TFRecord files.
Args:
filenames: list<str>, list of TFRecord filenames to read from.
gzipped: bool, whether the TFRecord files are gzip-encoded.
example_spec: dict<str feature name, tf.VarLenFeature/tf.FixedLenFeature>,
if provided, will parse each record as a tensorflow.Example proto.
Yields:
Records (or parsed Examples, if example_spec is provided) from files.
"""
with tf.Graph().as_default():
dataset = tf.data.Dataset.from_tensor_slices(filenames)
def _load_records(filename):
return tf.data.TFRecordDataset(
filename,
compression_type=tf.constant("GZIP") if gzipped else None,
buffer_size=16 * 1000 * 1000)
dataset = dataset.flat_map(_load_records)
def _parse_example(ex_ser):
return tf.parse_single_example(ex_ser, example_spec)
if example_spec:
dataset = dataset.map(_parse_example, num_parallel_calls=32)
dataset = dataset.prefetch(100)
record_it = dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
while True:
try:
ex = sess.run(record_it)
yield ex
except tf.errors.OutOfRangeError:
break
def random_deinterleave(text, separator_symbol="X"):
"""Create a fill-in-the-blanks training example from text.
Split on spaces, then cut into segments at random points. Alternate segments
are assigned to the two output strings. separator_symbol separates segments
within each of the outputs.
example:
text="The quick brown fox jumps over the lazy dog."
returns: ("X quick brown X the lazy X", "The X fox jumps over X dog.")
The two outputs can also be reversed to yield an instance of the same problem.
Args:
text: a string
separator_symbol: a string
Returns:
a pair of strings
"""
words = text.strip().split(" ")
n = len(words)
if n <= 1:
return text, ""
cut = [False] * n
cut[0] = True
num_cuts = int(math.exp(random.uniform(0, math.log(n))))
for _ in range(num_cuts):
cut[random.randint(1, n -1)] = True
out = [[], []]
part = random.randint(0, 1)
for i in range(n):
if cut[i]:
out[part].append(separator_symbol)
part = 1 - part
out[part].append(words[i])
return " ".join(out[0]), " ".join(out[1])
| 36.961722 | 105 | 0.681036 |
3eaee3ea49d4252618de34e08e759ef0ad2b1d0a | 2,022 | py | Python | icsReader.py | LoicYvinec/PoliCal | d6f81a77727c5caeed5901b750b6bb322d9f5dba | [
"BSD-3-Clause"
] | null | null | null | icsReader.py | LoicYvinec/PoliCal | d6f81a77727c5caeed5901b750b6bb322d9f5dba | [
"BSD-3-Clause"
] | null | null | null | icsReader.py | LoicYvinec/PoliCal | d6f81a77727c5caeed5901b750b6bb322d9f5dba | [
"BSD-3-Clause"
] | null | null | null | import wget
import os
import logging
logging.basicConfig(filename='Running.log',level=logging.INFO, format='%(asctime)s:%(levelname)s:%(message)s')
def convertICStoCSV():
logging.info("Empezando:")
logging.infoc("Eliminando si existe")
print("Empezando:")
print("Eliminando si existe")
filename="mycalendar.ics"
if os.path.exists(filename):
os.remove(filename)
url = "https://educacionvirtual.epn.edu.ec/calendar/export_execute.php?userid=7587&authtoken=a43c2f67460752ab1e1b0d5a784dd330cb5b93e7&preset_what=all&preset_time=recentupcoming"
wget.download(url, "mycalendar.ics")
f = open("mycalendar.ics","r")
f2 = open("calendar.csv","w+")
f1 = f.readlines()
headers = ["BEGIN","UID","SUMMARY","DESCRIPTION","CLASS","LAST-MODIFIED","DTSTAMP","DTSTART","DTEND","CATEGORIES"]
for x in headers:
f2.write(x+";")
f2.write("\n")
wrBegin=False
wrNormal=False
wrDescription=False
for x in f1:
#print(x)
#print(str(x.count('\t'))+"---"+x)
list = x.split(":",1)
if list[0]=="BEGIN" and list[1]=="VEVENT\n":
wrNormal=True
wrBegin=True
list[1]="VEVENT"
#print(list[1])
elif list[0]=="DESCRIPTION":
wrDescription=True
f2.write("\"")
#print(list[0])
elif list[0]=="CLASS":
wrDescription=False
f2.write("\""+";")
#print(list[0])
elif list[0]=="END" and list[1]=="VEVENT\n":
wrNormal=False
#print(list[0])
else:
""
if wrNormal and wrDescription==False:
removebsn=list[1].split("\n",1)
f2.write(removebsn[0]+";")
elif wrNormal and wrDescription:
for y in list:
new_list = {x.replace('\n', '').replace('\t', '') for x in list}
for x in new_list:
f2.write(x)
elif wrNormal==False and wrBegin:
f2.write("\n")
| 33.7 | 181 | 0.558853 |
652482e633d9cce394c57398084724959a89b819 | 4,287 | py | Python | v7/subindexes/subindexes.py | littlemo/plugins | d7a6a6af11a7722c560c2de4fe959f77eb68d5b9 | [
"MIT"
] | 1 | 2017-10-15T00:43:44.000Z | 2017-10-15T00:43:44.000Z | v7/subindexes/subindexes.py | littlemo/plugins | d7a6a6af11a7722c560c2de4fe959f77eb68d5b9 | [
"MIT"
] | null | null | null | v7/subindexes/subindexes.py | littlemo/plugins | d7a6a6af11a7722c560c2de4fe959f77eb68d5b9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright © 2012-2014 Blake Winton <bwinton@latte.ca>.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
from collections import defaultdict
import os
from nikola.plugin_categories import Task
from nikola.utils import config_changed
class SubIndexes(Task):
"""Render the indexes for each subdirectory in the blog."""
name = "render_subindexes"
def gen_tasks(self):
self.site.scan_posts()
yield self.group_task()
kw = {
"output_folder": self.site.config['OUTPUT_FOLDER'],
"filters": self.site.config['FILTERS'],
"index_file": self.site.config['INDEX_FILE'],
"strip_indexes": self.site.config['STRIP_INDEXES'],
"index_teasers": self.site.config['INDEX_TEASERS'],
}
template_name = "index.tmpl"
for lang in self.site.config['TRANSLATIONS']:
groups = defaultdict(list)
for p in self.site.timeline:
if p.is_post:
dirname = os.path.dirname(p.destination_path(lang))
dirname = [part for part in os.path.split(dirname) if part]
if dirname == ["."]:
continue
dirname.reverse()
part = ''
while len(dirname):
part = os.path.join(part, dirname.pop())
groups[part].append(p)
for dirname, post_list in groups.items():
context = {}
context["items"] = []
should_render = True
output_name = os.path.join(kw['output_folder'], dirname, kw['index_file'])
short_destination = os.path.join(dirname, kw['index_file'])
link = short_destination.replace('\\', '/')
index_len = len(kw['index_file'])
if kw['strip_indexes'] and link[-(1 + index_len):] == '/' + kw['index_file']:
link = link[:-index_len]
context["permalink"] = link
for post in post_list:
# If there is an index.html pending to be created from
# a story, do not generate the STORY_INDEX
if post.destination_path(lang) == short_destination:
should_render = False
else:
context["items"].append((post.title(lang),
post.permalink(lang)))
context['index_teasers'] = kw['index_teasers']
if should_render:
task = self.site.generic_post_list_renderer(lang, post_list,
output_name,
template_name,
kw['filters'],
context)
task_cfg = {1: task['uptodate'][0].config, 2: kw}
task['uptodate'] = [config_changed(task_cfg)]
task['basename'] = self.name
yield task
| 43.30303 | 93 | 0.555633 |
fdfbf6b683b192199efb31573b5646783525f016 | 4,096 | py | Python | src/ontask/forms.py | ShizhuZhang/ontask_b | acbf05ff9b18dae0a41c67d1e41774e54a890c40 | [
"MIT"
] | 1 | 2020-11-08T16:47:46.000Z | 2020-11-08T16:47:46.000Z | src/ontask/forms.py | ShizhuZhang/Ontask_b_zh | ca4526871f26e7153b724b1e97b922a0b52f75d6 | [
"MIT"
] | null | null | null | src/ontask/forms.py | ShizhuZhang/Ontask_b_zh | ca4526871f26e7153b724b1e97b922a0b52f75d6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from datetimewidget.widgets import DateTimeWidget
from django import forms
from django.template.defaultfilters import filesizeformat
from django.utils.translation import ugettext_lazy as _
import ontask.ontask_prefs
dateTimeOptions = {
'weekStart': 1, # Start week on Monday
'minuteStep': 5, # Minute step
}
class RestrictedFileField(forms.FileField):
def __init__(self, *args, **kwargs):
self.content_types = kwargs.pop('content_types', None)
self.max_upload_size = kwargs.pop('max_upload_size', None)
if not self.max_upload_size:
self.max_upload_size = str(ontask.ontask_prefs.MAX_UPLOAD_SIZE)
super(RestrictedFileField, self).__init__(*args, **kwargs)
def clean(self, *args, **kwargs):
data = super(RestrictedFileField, self).clean(*args, **kwargs)
try:
if data.content_type in self.content_types:
if data.size > self.max_upload_size:
raise forms.ValidationError(
_('File size must be under %(max)s. Current file '
'size is %(current)s.')
% ({
'max': filesizeformat(self.max_upload_size),
'current': filesizeformat(data.size)
}))
else:
raise forms.ValidationError(
_('File type (%s) is not supported.') % data.content_type)
except AttributeError:
pass
return data
def column_to_field(col, initial=None, required=False, label=None):
"""
Function that given the description of a column it generates the
appropriate field to be included in a form
:param col: Column object to use as the basis to create the field
:param initial: Initial value for the field
:param required: flag to generate the field with the required attribute
:param label: Value to overwrite the label attribute
:return: Field object
"""
# If no label is given, take the column name
if not label:
label = col.name
if col.categories:
# Column has a finite set of prefixed values
choices = [(x, x) for x in col.categories]
initial = next((v for x, v in enumerate(choices) if v[0] == initial),
('', '---'))
# If the column is of type string, allow always the empty value
if col.data_type == 'string':
choices.insert(0, ('', '---'))
return forms.ChoiceField(choices,
required=required,
initial=initial,
label=label)
# Column is open value
if col.data_type == 'string':
if not initial:
initial = ''
if not col.categories:
# The field does not have any categories
return forms.CharField(initial=initial,
label=label,
required=required)
elif col.data_type == 'integer':
return forms.IntegerField(initial=initial,
label=label,
required=required)
elif col.data_type == 'double':
return forms.FloatField(initial=initial,
label=label,
required=required)
elif col.data_type == 'boolean':
return forms.BooleanField(initial=initial,
label=label,
required=required)
elif col.data_type == 'datetime':
return forms.DateTimeField(
initial=initial,
label=label,
required=required,
widget=DateTimeWidget(options=dateTimeOptions,
usel10n=True,
bootstrap_version=3),
)
else:
raise Exception(_('Unable to process datatype '), col.data_type)
| 36.247788 | 78 | 0.557617 |
65e00cc0acfbde0ffeca3fb48ea9cdcb30ba7951 | 2,365 | py | Python | src/testToneFrequency.py | ytyaru/Python.Audio.Chord.2017081743 | f9bad6c9c013c216aff586bed56ea646f26d1236 | [
"CC0-1.0"
] | null | null | null | src/testToneFrequency.py | ytyaru/Python.Audio.Chord.2017081743 | f9bad6c9c013c216aff586bed56ea646f26d1236 | [
"CC0-1.0"
] | null | null | null | src/testToneFrequency.py | ytyaru/Python.Audio.Chord.2017081743 | f9bad6c9c013c216aff586bed56ea646f26d1236 | [
"CC0-1.0"
] | null | null | null | import Wave.Player
import Wave.Sampler
import Wave.BaseWaveMaker
import Wave.WaveFile
import MusicTheory.ToneFrequency
if __name__ == '__main__':
tf = MusicTheory.ToneFrequency.ToneFrequency()
for name in ('C','D','E','F','G','A','B'): print(name, tf.ToFrequency(name))
print()
for name in ('C+','D+','E+','F+','G+','A+','B+'): print(name, tf.ToFrequency(name))
print()
for name in ('C-','D-','E-','F-','G-','A-','B-'): print(name, tf.ToFrequency(name))
print()
for name in ['C'+'-'*n for n in range(25)]: print(name, tf.ToFrequency(name))
print()
for name in ['C'+'+'*n for n in range(25)]: print(name, tf.ToFrequency(name, -1))
print()
wm = Wave.BaseWaveMaker.BaseWaveMaker()
sampler = Wave.Sampler.Sampler()
p = Wave.Player.Player()
p.Open()
"""
for name in ('C','D','E','F','G','A','B'):
print(name, tf.ToFrequency(name));
p.Play(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=tf.ToFrequency(name), sec=0.5)))
for name in ('C','C+','D','D+','E','F','G','G+','A','A+','B'):
print(name, tf.ToFrequency(name));
p.Play(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=tf.ToFrequency(name), sec=0.5)))
for name in ('C+','D+','E+','F+','G+','A+','B+'):
print(name, tf.ToFrequency(name));
p.Play(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=tf.ToFrequency(name), sec=0.5)))
for name in ('C-','D-','E-','F-','G-','A-','B-'):
print(name, tf.ToFrequency(name));
p.Play(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=tf.ToFrequency(name), sec=0.5)))
"""
for pitch in range(-1, 10):
print('-------pitch:', pitch)
for name in ('C','C+','D','D+','E','F','G','G+','A','A+','B'):
# for name in ('C','D','E','F','G','A','B'):
# print(name, tf.ToFrequency(name));
f0, keyId, pitch = tf.ToFrequency(name, pitch)
print('{0:<2} {1:>2} {2:>3} {3:f}'.format(tf.EqualTemperament.Names[keyId], keyId, pitch, f0))
p.Play(sampler.Sampling(wm.Sin(a=1, fs=32000, f0=f0, sec=0.125)), rate=32000)
# p.Play(sampler.Sampling(wm.Sin(a=1, fs=16000, f0=f0, sec=0.125)), rate=16000)
# p.Play(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=f0, sec=0.125)))
# p.Play(sampler.Sampling(wm.Sin(a=1, fs=8000, f0=tf.ToFrequency(name, pitch), sec=0.125)))
p.Close()
| 46.372549 | 106 | 0.550529 |
78b3e367ddcbc5a5adfe95465f7aebd686ebf968 | 154 | py | Python | Core/pageManager/login.py | alpinepwd/Tiamat | 4d7f6d8af60b475bdbd70c3246f2d12b38c8e8e5 | [
"Apache-2.0"
] | 1 | 2016-09-22T09:38:02.000Z | 2016-09-22T09:38:02.000Z | Core/pageManager/login.py | alpinepwd/Tiamat | 4d7f6d8af60b475bdbd70c3246f2d12b38c8e8e5 | [
"Apache-2.0"
] | null | null | null | Core/pageManager/login.py | alpinepwd/Tiamat | 4d7f6d8af60b475bdbd70c3246f2d12b38c8e8e5 | [
"Apache-2.0"
] | null | null | null | from PageManager import PageManager
class login(PageManager):
"controlador de Login"
def __init__(self):
super(self)
print "jiji"
| 22 | 35 | 0.675325 |
660cc7cb121ed7d5dc1b89fe980add15ffce1310 | 2,464 | py | Python | omics/stats/tSNR.py | choyichen/pybcb | 60ba382be28bdbce466a9b24760fe44d421aa5ae | [
"MIT"
] | 3 | 2017-05-11T02:13:03.000Z | 2020-08-04T06:59:11.000Z | omics/stats/tSNR.py | choyichen/pybcb | 60ba382be28bdbce466a9b24760fe44d421aa5ae | [
"MIT"
] | null | null | null | omics/stats/tSNR.py | choyichen/pybcb | 60ba382be28bdbce466a9b24760fe44d421aa5ae | [
"MIT"
] | 1 | 2020-07-03T06:57:51.000Z | 2020-07-03T06:57:51.000Z | """Transcriptomic SNR (tSNR)
"""
import numpy as np
__version__ = '16.12.28'
__author__ = 'Cho-Yi Chen'
def tsnr(X, Y):
"""Transcriptomic SNR (tSNR)
X: case expression matrix (genes-by-samples)
Y: ctrl expression matrix (genes-by-samples)
Return the tSNR (float) between X and Y.
"""
# See http://docs.scipy.org/doc/scipy/reference/spatial.distance.html for more distance metrics
from scipy.spatial.distance import euclidean
m = X.shape[1]
n = Y.shape[1]
xmean = X.mean(axis=1)
ymean = Y.mean(axis=1)
signal = euclidean(xmean, ymean)
xvar = np.sum(np.square(np.apply_along_axis(euclidean, 0, X, xmean))) / (m - 1)
yvar = np.sum(np.square(np.apply_along_axis(euclidean, 0, Y, ymean))) / (n - 1)
noise = np.sqrt((xvar / m) + (yvar / n))
return 1. * signal / noise
def tsnr_pval(X, Y, permute=1000):
"""Estimate the P value via permutation test.
"""
m = X.shape[1]
n = Y.shape[1]
snr = tsnr(X, Y)
Z = np.concatenate([X,Y], axis=1).T
pool = []
for _ in xrange(permute):
np.random.shuffle(Z)
x = Z[:m,:].T
y = Z[m:,:].T
pool.append(tsnr(x, y))
pool = np.array(pool)
pval = 1. * sum(pool >= snr) / permute
return pval if pval != 0 else (0.5 / permute)
def tsnr_boot(X, Y, N=30, boot=1000):
"""Estimate the tSNR via bootstrapping (resampling with replacement)
Warning: It truns out using bootstrapping to estimate the tSNR is not
a good idea. The estimated tSNR tends to be larger, and is dependent
on N (how many samples to pick). It N is close to the sample sizes,
it will be more likely to choose repeated data points, thus lowring
the randomness of sample distribution and strengthening the signal
between cases and controls. I think this is biased and should not be
used.
"""
m = X.shape[1]
n = Y.shape[1]
out = []
for i in xrange(boot):
x = X[:, np.random.choice(m, N, replace=True)]
y = Y[:, np.random.choice(n, N, replace=True)]
out.append(tsnr(x, y))
return np.mean(out), np.median(out), np.std(out)
if __name__ == "__main__":
print "tSNR between two random matrices:"
X = np.random.rand(40, 40)
Y = np.random.rand(40, 40)
print "tSNR:", tsnr(X, Y)
print "Pval:", tsnr_pval(X, Y)
print "tSNR between two identical matrices:"
print "tSNR:", tsnr(X, X + 1)
print "Pval:", tsnr_pval(X, X + 1)
| 32.853333 | 99 | 0.616477 |
20572dfded2b4a9edb521e6e61ccc492312b29d5 | 658 | py | Python | install/TexGen/Python/Tests/TestUtils.py | dalexa10/puma | ca02309c9f5c71e2e80ad8d64155dd6ca936c667 | [
"NASA-1.3"
] | 14 | 2021-06-17T17:17:07.000Z | 2022-03-26T05:20:20.000Z | install/TexGen/Python/Tests/TestUtils.py | dalexa10/puma | ca02309c9f5c71e2e80ad8d64155dd6ca936c667 | [
"NASA-1.3"
] | 6 | 2021-11-01T20:37:39.000Z | 2022-03-11T17:18:53.000Z | install/TexGen/Python/Tests/TestUtils.py | dalexa10/puma | ca02309c9f5c71e2e80ad8d64155dd6ca936c667 | [
"NASA-1.3"
] | 8 | 2021-07-20T09:24:23.000Z | 2022-02-26T16:32:00.000Z | from TexGen.Core import *
def GetTestTextile():
Textile = CTextile()
Yarn = CYarn()
Yarn.AddNode(CNode(XYZ(0, 0, 0)))
Yarn.AddNode(CNode(XYZ(0, 1, 0)))
Yarn.AssignInterpolation(CInterpolationBezier())
Yarn.AssignSection(CYarnSectionConstant(CSectionEllipse(1, 1)))
Yarn.SetResolution(20)
Yarn.AddRepeat(XYZ(0, 1, 0))
Textile.AddYarn(Yarn)
Domain = CDomainPlanes(XYZ(-1,-1,-1), XYZ(1,1,1))
Textile.AssignDomain(Domain)
return Textile
def GetTestWeave():
weave = CTextileWeave2D(2, 2, 1, 0.2, True)
weave.SwapPosition(0, 0)
weave.SwapPosition(1, 1)
weave.AssignDefaultDomain()
return weave
| 28.608696 | 67 | 0.674772 |
58ba402f2f717dd728770a6009bd5df772812508 | 2,883 | py | Python | mod_md/MDMessageCmd.py | k-komarov/c10r | 3f3d394f23aba889ac2c4db9b9e1aeaaf6f44cd6 | [
"MIT"
] | null | null | null | mod_md/MDMessageCmd.py | k-komarov/c10r | 3f3d394f23aba889ac2c4db9b9e1aeaaf6f44cd6 | [
"MIT"
] | null | null | null | mod_md/MDMessageCmd.py | k-komarov/c10r | 3f3d394f23aba889ac2c4db9b9e1aeaaf6f44cd6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import os.path
import mysql.connector
import logging
import toml
import dns.resolver
cwd = os.path.dirname(os.path.realpath(__file__))
config = toml.load(cwd + "/MDMessageCmd.toml")
logging.basicConfig(filename=config["logger"]["file"],
format='%(asctime)s [%(levelname)s] %(message)s')
logger = logging.getLogger("MDMessageCMD")
logger.setLevel(config["logger"]["level"])
logger.debug("Config: %s", config)
if not len(sys.argv) == 3:
print(f"Error: Not enough arguments!")
print(f"Usage: {sys.argv[0]} <mod_md status> <domain>")
print(f"Example: {sys.argv[0]} installed example.com")
sys.exit(1)
reason = sys.argv[1]
fqdn = sys.argv[2]
def set_https_flag(domain: str, enabled: bool = True):
try:
# Catches any domains that incorrectly have https = 1, but no certificate exists on the fs
cursor.execute(config["domains"]["update_https_query"] % (1 if enabled == True else 0, domain))
cnx.commit()
logger.debug("Setting `https` flag for [%s]: '%s' %s", domain, enabled, query)
except mysql.connector.Error as err:
logger.error("Mysql error: %s", err)
logger.debug("Incoming MD message for [%s]: reason '%s'", fqdn, reason)
with mysql.connector.connect(
host=config["mysql"]["host"] or "localhost",
port=config["mysql"]["port"] or "3306",
db=config["mysql"]["db"] or "domains",
user=config["mysql"]["user"] or "root",
password=config["mysql"]["password"] or ""
) as cnx:
with cnx.cursor(dictionary=True) as cursor:
query = config["domains"]["select_domain_query"] % fqdn
cursor.execute(query)
row = cursor.fetchone()
logger.debug("Domain [%s] %s found: %s", fqdn, "NOT" if cursor.rowcount == 0 else "", query)
if cursor.rowcount > 0:
result = dns.resolver.resolve(fqdn, dns.rdatatype.CNAME)
cname_target = config["domains"]["cname_target"]
if all(str(cname).rstrip(".") != cname_target for cname in result):
# todo: Probably, we should be notified somehow?
logger.warning("Domain [%s] doesn't have CNAME %s DNS record", fqdn, cname_target)
dns.resolver.resolve(row["subdomain"])
if reason == "renewed":
logger.warning("Renewed: %s", fqdn)
if reason == "installed":
logger.debug("Certificate installed for [%s]", fqdn)
set_https_flag(fqdn, True)
if reason == 'expiring':
logger.debug("Certificate expiring for [%s]", fqdn)
# todo: Why are we disabling it? renewal?
set_https_flag(fqdn, False)
if reason == 'errored':
if not os.path.isfile(f"/mnt/nfs/md/domains/{fqdn}/pubcert.pem"):
set_https_flag(fqdn, False)
| 38.44 | 103 | 0.607353 |
6219edf5f8696e2a3e025f09f59c67446de336a8 | 831 | py | Python | constants.py | Batlin/NestTweaks | 0ab833d68f20be078b0e06a2658b9ba3c55e4e99 | [
"MIT"
] | null | null | null | constants.py | Batlin/NestTweaks | 0ab833d68f20be078b0e06a2658b9ba3c55e4e99 | [
"MIT"
] | null | null | null | constants.py | Batlin/NestTweaks | 0ab833d68f20be078b0e06a2658b9ba3c55e4e99 | [
"MIT"
] | null | null | null | from enum import Enum
import apikeys
# Endpoints
SPREADSHEET = 'https://script.google.com/macros/s/' + apikeys.GS_ID + '/exec'
SPREADSHEET_DEV = 'https://script.google.com/macros/s/' + apikeys.GS_ID + '/dev'
# Open Weather
OWM_API_KEY = apikeys.OWM
OWM_CITY = 'Madrid, Spain'
OWM_UNITS = 'celsius'
# Google Assistant queries
QUERY_INTEMP = 'Whats the thermostat temperature'
QUERY_INHUMIDITY = 'Whats the thermostat humidity'
QUERY_MODE = 'Whats the thermostat mode'
SET_MODE_HEAT = 'Set thermostat to heat mode'
# Assistant SDK
ASSISTANT_API_ENDPOINT = 'embeddedassistant.googleapis.com'
DEFAULT_GRPC_DEADLINE = 60 * 3 + 5
# Parsing response
DIV_CLASS = '//div[@class="show_text_content"]/text()'
HEAT = 'heat'
ECO = 'eco'
OFF = 'turned off'
ERROR = -1
# Modes
class NestMode(Enum):
HEAT = 1
ECO = 2
OFF = 3
UNKNOWN = 4
| 23.083333 | 80 | 0.731649 |
070274fba0af195c51a82b2219ce63701fc6ac4c | 5,141 | py | Python | natlas-server/app/admin/forms.py | ryanohoro/natlas | 7df513fe429928ac75b7478532cccd5ca88c8b62 | [
"Apache-2.0"
] | null | null | null | natlas-server/app/admin/forms.py | ryanohoro/natlas | 7df513fe429928ac75b7478532cccd5ca88c8b62 | [
"Apache-2.0"
] | null | null | null | natlas-server/app/admin/forms.py | ryanohoro/natlas | 7df513fe429928ac75b7478532cccd5ca88c8b62 | [
"Apache-2.0"
] | null | null | null | from flask_wtf import FlaskForm
from flask_wtf.file import FileField
from wtforms import StringField, BooleanField, SubmitField, TextAreaField, PasswordField, IntegerField, SelectField
from wtforms.validators import DataRequired, ValidationError, Email, Optional
from app.models import User, ScopeItem, AgentScript
import ipaddress
from app.elastic import Elastic
class ConfigForm(FlaskForm):
login_required = BooleanField('Login Required')
register_allowed = BooleanField('Registration Allowed')
agent_authentication = BooleanField('Agent Authentication Required')
local_subresources = BooleanField('Use Local Subresources (instead of CDN)')
custom_brand = StringField('Custom Branding')
elasticsearch_url = StringField("Elastic URL")
mail_from = StringField("From Address", validators=[Email(), Optional()])
mail_server = StringField("Mail Server")
mail_port = StringField("Mail Port")
mail_use_tls = BooleanField("Use TLS")
mail_username = StringField("Mail username")
mail_password = PasswordField("Mail password")
submit = SubmitField("Save Changes")
def validate_elasticsearch_url(self, elasticsearch_url):
tmpElasticInstance = Elastic(elasticsearch_url.data)
if not tmpElasticInstance.status:
raise ValidationError("%s : %s" % (tmpElasticInstance.errorinfo, elasticsearch_url.data))
class InviteUserForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
submit = SubmitField('Invite User')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Email %s already exists!' % user.email)
class UserDeleteForm(FlaskForm):
deleteUser = SubmitField('Delete User')
class UserEditForm(FlaskForm):
editUser = SubmitField('Toggle Admin')
class NewScopeForm(FlaskForm):
target = StringField('Target', validators=[DataRequired()])
blacklist = BooleanField('Blacklist')
submit = SubmitField('Add Target')
def validate_target(self, target):
if '/' not in target.data:
target.data = target.data + '/32'
try:
isValid = ipaddress.ip_network(target.data, False)
item = ScopeItem.query.filter_by(target=isValid.with_prefixlen).first()
if item is not None:
raise ValidationError('Target %s already exists!' % item.target)
except ipaddress.AddressValueError:
raise ValidationError(
'Target %s couldn\'t be validated' % target.data)
class ImportScopeForm(FlaskForm):
scope = TextAreaField("Scope Import")
submit = SubmitField("Import Scope")
class ImportBlacklistForm(FlaskForm):
scope = TextAreaField("Blacklist Import")
submit = SubmitField("Import Blacklist")
class ScopeDeleteForm(FlaskForm):
deleteScopeItem = SubmitField('Delete Target')
class ScopeToggleForm(FlaskForm):
toggleScopeItem = SubmitField('Toggle Blacklist')
class ServicesUploadForm(FlaskForm):
serviceFile = FileField('Select a file to upload')
uploadFile = SubmitField('Upload Services File')
class AddServiceForm(FlaskForm):
serviceName = StringField('Service Name', validators=[DataRequired()])
servicePort = IntegerField('Service Port', validators=[DataRequired()])
serviceProtocol = SelectField("Protocol", validators=[DataRequired()])
addService = SubmitField('Add Service')
def validate_serviceName(self, serviceName):
if ' ' in serviceName.data:
raise ValidationError('Service names cannot contain spaces! Use - instead.')
def validate_servicePort(self, servicePort):
if servicePort.data > 65535 or servicePort.data < 0:
raise ValidationError('Port has to be withing range of 0-65535')
class AgentConfigForm(FlaskForm):
versionDetection = BooleanField("Version Detection (-sV)")
osDetection = BooleanField("OS Detection (-O)")
enableScripts = BooleanField("Scripting Engine (--script)")
onlyOpens = BooleanField("Open Ports Only (--open)")
scanTimeout = IntegerField("Maximum Nmap Run Time")
webScreenshots = BooleanField("Web Screenshots (aquatone)")
webScreenshotTimeout = IntegerField("Web Screenshot Timeout")
vncScreenshots = BooleanField("VNC Screenshots (vncsnapshot)")
vncScreenshotTimeout = IntegerField("VNC Screenshot Timeout")
scriptTimeout = IntegerField("Script Timeout (--script-timeout)")
hostTimeout = IntegerField("Host Timeout (--host-timeout)")
osScanLimit = BooleanField("Limit OS Scan (--osscan-limit)")
noPing = BooleanField("No Ping (-Pn)")
udpScan = BooleanField("Also scan UDP (-sUS)")
updateAgents = SubmitField("Update Agent Config")
class AddScriptForm(FlaskForm):
scriptName = StringField("Script Name", validators=[DataRequired()])
addScript = SubmitField("Add Script")
def validate_scriptname(self, scriptName):
script = AgentScript.query.filter_by(name=scriptName).first()
if script is not None:
raise ValidationError('%s already exists!' % script.name)
class DeleteForm(FlaskForm):
delete = SubmitField("Delete")
class AddTagForm(FlaskForm):
tagname = StringField("Tag Name", validators=[DataRequired()])
addTag = SubmitField("Add Tag")
class TagScopeForm(FlaskForm):
tagname = SelectField("Tag Name", validators=[DataRequired()])
addTagToScope = SubmitField("Add Tag to Scope")
| 35.212329 | 115 | 0.76736 |
4c2cc2c86a9659e538fa587ebc98637d041df279 | 3,346 | py | Python | precise/model.py | mondhs/mycroft-precise | 5b0cc922ab8b767c7f7adf704aa9c4652a6a1bb9 | [
"Apache-2.0"
] | null | null | null | precise/model.py | mondhs/mycroft-precise | 5b0cc922ab8b767c7f7adf704aa9c4652a6a1bb9 | [
"Apache-2.0"
] | null | null | null | precise/model.py | mondhs/mycroft-precise | 5b0cc922ab8b767c7f7adf704aa9c4652a6a1bb9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Loads model
"""
import attr
from os.path import isfile
from typing import *
from precise.functions import load_keras, false_pos, false_neg, weighted_log_loss, set_loss_bias
from precise.params import inject_params, pr
if TYPE_CHECKING:
from keras.models import Sequential
@attr.s()
class ModelParams:
"""
Attributes:
recurrent_units: Number of GRU units. Higher values increase computation
but allow more complex learning. Too high of a value causes overfitting
dropout: Reduces overfitting but can potentially decrease accuracy if too high
extra_metrics: Whether to include false positive and false negative metrics while training
skip_acc: Whether to skip accuracy calculation while training
loss_bias: Near 1.0 reduces false positives. See <set_loss_bias>
freeze_till: Layer number from start to freeze after loading (allows for partial training)
"""
recurrent_units = attr.ib(20) # type: int
dropout = attr.ib(0.2) # type: float
extra_metrics = attr.ib(False) # type: bool
skip_acc = attr.ib(False) # type: bool
loss_bias = attr.ib(0.7) # type: float
freeze_till = attr.ib(0) # type: int
def load_precise_model(model_name: str) -> Any:
"""Loads a Keras model from file, handling custom loss function"""
if not model_name.endswith('.net') or not model_name.endswith('.h5'):
print('Warning: Unknown model type, ', model_name)
inject_params(model_name)
return load_keras().models.load_model(model_name, compile = False)
def create_model(model_name: Optional[str], params: ModelParams) -> 'Sequential':
"""
Load or create a precise model
Args:
model_name: Name of model
params: Parameters used to create the model
Returns:
model: Loaded Keras model
"""
if model_name and isfile(model_name):
print('Loading from ' + model_name + '...')
model = load_precise_model(model_name)
else:
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import GRU
from tensorflow.keras.models import Sequential
model = Sequential()
model.add(GRU(
params.recurrent_units, activation='linear',
input_shape=(
pr.n_features, pr.feature_size), dropout=params.dropout, name='net'
))
model.add(Dense(1, activation='sigmoid'))
load_keras()
metrics = ['accuracy'] + params.extra_metrics * [false_pos, false_neg]
set_loss_bias(params.loss_bias)
for i in model.layers[:params.freeze_till]:
i.trainable = False
model.compile('rmsprop', weighted_log_loss,
metrics=(not params.skip_acc) * metrics)
return model
| 36.769231 | 98 | 0.692767 |
dc22e7d6a38b1b9e224a744d78487be33fca9852 | 1,806 | py | Python | terrascript/digitalocean/r.py | GarnerCorp/python-terrascript | ec6c2d9114dcd3cb955dd46069f8ba487e320a8c | [
"BSD-2-Clause"
] | null | null | null | terrascript/digitalocean/r.py | GarnerCorp/python-terrascript | ec6c2d9114dcd3cb955dd46069f8ba487e320a8c | [
"BSD-2-Clause"
] | null | null | null | terrascript/digitalocean/r.py | GarnerCorp/python-terrascript | ec6c2d9114dcd3cb955dd46069f8ba487e320a8c | [
"BSD-2-Clause"
] | 1 | 2018-11-15T16:23:05.000Z | 2018-11-15T16:23:05.000Z | from terrascript import _resource
class digitalocean_certificate(_resource): pass
certificate = digitalocean_certificate
class digitalocean_cdn(_resource): pass
cdn = digitalocean_cdn
class digitalocean_database_cluster(_resource): pass
database_cluster = digitalocean_database_cluster
class digitalocean_domain(_resource): pass
domain = digitalocean_domain
class digitalocean_droplet(_resource): pass
droplet = digitalocean_droplet
class digitalocean_droplet_snapshot(_resource): pass
droplet_snapshot = digitalocean_droplet_snapshot
class digitalocean_firewall(_resource): pass
firewall = digitalocean_firewall
class digitalocean_floating_ip(_resource): pass
floating_ip = digitalocean_floating_ip
class digitalocean_floating_ip_assignment(_resource): pass
floating_ip_assignment = digitalocean_floating_ip_assignment
class digitalocean_kubernetes_cluster(_resource): pass
kubernetes_cluster = digitalocean_kubernetes_cluster
class digitalocean_kubernetes_node_pool(_resource): pass
kubernetes_node_pool = digitalocean_kubernetes_node_pool
class digitalocean_loadbalancer(_resource): pass
loadbalancer = digitalocean_loadbalancer
class digitalocean_project(_resource): pass
project = digitalocean_project
class digitalocean_record(_resource): pass
record = digitalocean_record
class digitalocean_spaces_bucket(_resource): pass
spaces_bucket = digitalocean_spaces_bucket
class digitalocean_ssh_key(_resource): pass
ssh_key = digitalocean_ssh_key
class digitalocean_tag(_resource): pass
tag = digitalocean_tag
class digitalocean_volume(_resource): pass
volume = digitalocean_volume
class digitalocean_volume_attachment(_resource): pass
volume_attachment = digitalocean_volume_attachment
class digitalocean_volume_snapshot(_resource): pass
volume_snapshot = digitalocean_volume_snapshot
| 29.129032 | 60 | 0.875969 |
dabc85be1b414657b1778bbaa0bd34f29a6c5254 | 18,763 | py | Python | rllib/tests/test_nested_observation_spaces.py | acmore/ray | 9f0f54266064e203b0bdcc9d3fa947cb4518ebc0 | [
"Apache-2.0"
] | null | null | null | rllib/tests/test_nested_observation_spaces.py | acmore/ray | 9f0f54266064e203b0bdcc9d3fa947cb4518ebc0 | [
"Apache-2.0"
] | 1 | 2020-06-23T07:54:44.000Z | 2020-06-23T08:04:47.000Z | rllib/tests/test_nested_observation_spaces.py | acmore/ray | 9f0f54266064e203b0bdcc9d3fa947cb4518ebc0 | [
"Apache-2.0"
] | null | null | null | from gym import spaces
from gym.envs.registration import EnvSpec
import gym
import numpy as np
import pickle
import unittest
import ray
from ray.rllib.agents.a3c import A2CTrainer
from ray.rllib.agents.pg import PGTrainer
from ray.rllib.agents.pg.pg_tf_policy import PGTFPolicy
from ray.rllib.env import MultiAgentEnv
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.rollout import rollout
from ray.rllib.tests.test_external_env import SimpleServing
from ray.tune.registry import register_env
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.spaces.repeated import Repeated
tf = try_import_tf()
_, nn = try_import_torch()
DICT_SPACE = spaces.Dict({
"sensors": spaces.Dict({
"position": spaces.Box(low=-100, high=100, shape=(3, )),
"velocity": spaces.Box(low=-1, high=1, shape=(3, )),
"front_cam": spaces.Tuple(
(spaces.Box(low=0, high=1, shape=(10, 10, 3)),
spaces.Box(low=0, high=1, shape=(10, 10, 3)))),
"rear_cam": spaces.Box(low=0, high=1, shape=(10, 10, 3)),
}),
"inner_state": spaces.Dict({
"charge": spaces.Discrete(100),
"job_status": spaces.Dict({
"task": spaces.Discrete(5),
"progress": spaces.Box(low=0, high=100, shape=()),
})
})
})
DICT_SAMPLES = [DICT_SPACE.sample() for _ in range(10)]
TUPLE_SPACE = spaces.Tuple([
spaces.Box(low=-100, high=100, shape=(3, )),
spaces.Tuple((spaces.Box(low=0, high=1, shape=(10, 10, 3)),
spaces.Box(low=0, high=1, shape=(10, 10, 3)))),
spaces.Discrete(5),
])
TUPLE_SAMPLES = [TUPLE_SPACE.sample() for _ in range(10)]
# Constraints on the Repeated space.
MAX_PLAYERS = 4
MAX_ITEMS = 7
MAX_EFFECTS = 2
ITEM_SPACE = spaces.Box(-5, 5, shape=(1, ))
EFFECT_SPACE = spaces.Box(9000, 9999, shape=(4, ))
PLAYER_SPACE = spaces.Dict({
"location": spaces.Box(-100, 100, shape=(2, )),
"items": Repeated(ITEM_SPACE, max_len=MAX_ITEMS),
"effects": Repeated(EFFECT_SPACE, max_len=MAX_EFFECTS),
"status": spaces.Box(-1, 1, shape=(10, )),
})
REPEATED_SPACE = Repeated(PLAYER_SPACE, max_len=MAX_PLAYERS)
REPEATED_SAMPLES = [REPEATED_SPACE.sample() for _ in range(10)]
def one_hot(i, n):
out = [0.0] * n
out[i] = 1.0
return out
class NestedDictEnv(gym.Env):
def __init__(self):
self.action_space = spaces.Discrete(2)
self.observation_space = DICT_SPACE
self._spec = EnvSpec("NestedDictEnv-v0")
self.steps = 0
def reset(self):
self.steps = 0
return DICT_SAMPLES[0]
def step(self, action):
self.steps += 1
return DICT_SAMPLES[self.steps], 1, self.steps >= 5, {}
class NestedTupleEnv(gym.Env):
def __init__(self):
self.action_space = spaces.Discrete(2)
self.observation_space = TUPLE_SPACE
self._spec = EnvSpec("NestedTupleEnv-v0")
self.steps = 0
def reset(self):
self.steps = 0
return TUPLE_SAMPLES[0]
def step(self, action):
self.steps += 1
return TUPLE_SAMPLES[self.steps], 1, self.steps >= 5, {}
class RepeatedSpaceEnv(gym.Env):
def __init__(self):
self.action_space = spaces.Discrete(2)
self.observation_space = REPEATED_SPACE
self._spec = EnvSpec("RepeatedSpaceEnv-v0")
self.steps = 0
def reset(self):
self.steps = 0
return REPEATED_SAMPLES[0]
def step(self, action):
self.steps += 1
return REPEATED_SAMPLES[self.steps], 1, self.steps >= 5, {}
class NestedMultiAgentEnv(MultiAgentEnv):
def __init__(self):
self.steps = 0
def reset(self):
return {
"dict_agent": DICT_SAMPLES[0],
"tuple_agent": TUPLE_SAMPLES[0],
}
def step(self, actions):
self.steps += 1
obs = {
"dict_agent": DICT_SAMPLES[self.steps],
"tuple_agent": TUPLE_SAMPLES[self.steps],
}
rew = {
"dict_agent": 0,
"tuple_agent": 0,
}
dones = {"__all__": self.steps >= 5}
infos = {
"dict_agent": {},
"tuple_agent": {},
}
return obs, rew, dones, infos
class InvalidModel(TorchModelV2):
def forward(self, input_dict, state, seq_lens):
return "not", "valid"
class InvalidModel2(TFModelV2):
def forward(self, input_dict, state, seq_lens):
return tf.constant(0), tf.constant(0)
class TorchSpyModel(TorchModelV2, nn.Module):
capture_index = 0
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
self.fc = FullyConnectedNetwork(
obs_space.original_space.spaces["sensors"].spaces["position"],
action_space, num_outputs, model_config, name)
def forward(self, input_dict, state, seq_lens):
pos = input_dict["obs"]["sensors"]["position"].numpy()
front_cam = input_dict["obs"]["sensors"]["front_cam"][0].numpy()
task = input_dict["obs"]["inner_state"]["job_status"]["task"].numpy()
ray.experimental.internal_kv._internal_kv_put(
"torch_spy_in_{}".format(TorchSpyModel.capture_index),
pickle.dumps((pos, front_cam, task)),
overwrite=True)
TorchSpyModel.capture_index += 1
return self.fc({
"obs": input_dict["obs"]["sensors"]["position"]
}, state, seq_lens)
def value_function(self):
return self.fc.value_function()
class TorchRepeatedSpyModel(TorchModelV2, nn.Module):
capture_index = 0
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
self.fc = FullyConnectedNetwork(
obs_space.original_space.child_space["location"], action_space,
num_outputs, model_config, name)
def forward(self, input_dict, state, seq_lens):
ray.experimental.internal_kv._internal_kv_put(
"torch_rspy_in_{}".format(TorchRepeatedSpyModel.capture_index),
pickle.dumps(input_dict["obs"].unbatch_all()),
overwrite=True)
TorchRepeatedSpyModel.capture_index += 1
return self.fc({
"obs": input_dict["obs"].values["location"][:, 0]
}, state, seq_lens)
def value_function(self):
return self.fc.value_function()
def to_list(value):
if isinstance(value, list):
return [to_list(x) for x in value]
elif isinstance(value, dict):
return {k: to_list(v) for k, v in value.items()}
elif isinstance(value, np.ndarray):
return value.tolist()
elif isinstance(value, int):
return value
else:
return value.numpy().tolist()
class DictSpyModel(TFModelV2):
capture_index = 0
def forward(self, input_dict, state, seq_lens):
def spy(pos, front_cam, task):
# TF runs this function in an isolated context, so we have to use
# redis to communicate back to our suite
ray.experimental.internal_kv._internal_kv_put(
"d_spy_in_{}".format(DictSpyModel.capture_index),
pickle.dumps((pos, front_cam, task)),
overwrite=True)
DictSpyModel.capture_index += 1
return 0
spy_fn = tf.py_func(
spy, [
input_dict["obs"]["sensors"]["position"],
input_dict["obs"]["sensors"]["front_cam"][0],
input_dict["obs"]["inner_state"]["job_status"]["task"]
],
tf.int64,
stateful=True)
with tf.control_dependencies([spy_fn]):
output = tf.layers.dense(input_dict["obs"]["sensors"]["position"],
self.num_outputs)
return output, []
class TupleSpyModel(TFModelV2):
capture_index = 0
def forward(self, input_dict, state, seq_lens):
def spy(pos, cam, task):
# TF runs this function in an isolated context, so we have to use
# redis to communicate back to our suite
ray.experimental.internal_kv._internal_kv_put(
"t_spy_in_{}".format(TupleSpyModel.capture_index),
pickle.dumps((pos, cam, task)),
overwrite=True)
TupleSpyModel.capture_index += 1
return 0
spy_fn = tf.py_func(
spy, [
input_dict["obs"][0],
input_dict["obs"][1][0],
input_dict["obs"][2],
],
tf.int64,
stateful=True)
with tf.control_dependencies([spy_fn]):
output = tf.layers.dense(input_dict["obs"][0], self.num_outputs)
return output, []
class NestedSpacesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(num_cpus=5)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_invalid_model(self):
ModelCatalog.register_custom_model("invalid", InvalidModel)
self.assertRaisesRegexp(
ValueError,
"Subclasses of TorchModelV2 must also inherit from nn.Module",
lambda: PGTrainer(
env="CartPole-v0",
config={
"model": {
"custom_model": "invalid",
},
"framework": "torch",
}))
def test_invalid_model2(self):
ModelCatalog.register_custom_model("invalid2", InvalidModel2)
self.assertRaisesRegexp(
ValueError, "Expected output shape of",
lambda: PGTrainer(
env="CartPole-v0", config={
"model": {
"custom_model": "invalid2",
},
"framework": "tf",
}))
def do_test_nested_dict(self, make_env, test_lstm=False):
ModelCatalog.register_custom_model("composite", DictSpyModel)
register_env("nested", make_env)
pg = PGTrainer(
env="nested",
config={
"num_workers": 0,
"rollout_fragment_length": 5,
"train_batch_size": 5,
"model": {
"custom_model": "composite",
"use_lstm": test_lstm,
},
"framework": "tf",
})
pg.train()
# Check that the model sees the correct reconstructed observations
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"d_spy_in_{}".format(i)))
pos_i = DICT_SAMPLES[i]["sensors"]["position"].tolist()
cam_i = DICT_SAMPLES[i]["sensors"]["front_cam"][0].tolist()
task_i = one_hot(
DICT_SAMPLES[i]["inner_state"]["job_status"]["task"], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
def do_test_nested_tuple(self, make_env):
ModelCatalog.register_custom_model("composite2", TupleSpyModel)
register_env("nested2", make_env)
pg = PGTrainer(
env="nested2",
config={
"num_workers": 0,
"rollout_fragment_length": 5,
"train_batch_size": 5,
"model": {
"custom_model": "composite2",
},
"framework": "tf",
})
pg.train()
# Check that the model sees the correct reconstructed observations
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"t_spy_in_{}".format(i)))
pos_i = TUPLE_SAMPLES[i][0].tolist()
cam_i = TUPLE_SAMPLES[i][1][0].tolist()
task_i = one_hot(TUPLE_SAMPLES[i][2], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
def test_nested_dict_gym(self):
self.do_test_nested_dict(lambda _: NestedDictEnv())
def test_nested_dict_gym_lstm(self):
self.do_test_nested_dict(lambda _: NestedDictEnv(), test_lstm=True)
def test_nested_dict_vector(self):
self.do_test_nested_dict(
lambda _: VectorEnv.wrap(lambda i: NestedDictEnv()))
def test_nested_dict_serving(self):
self.do_test_nested_dict(lambda _: SimpleServing(NestedDictEnv()))
def test_nested_dict_async(self):
self.do_test_nested_dict(
lambda _: BaseEnv.to_base_env(NestedDictEnv()))
def test_nested_tuple_gym(self):
self.do_test_nested_tuple(lambda _: NestedTupleEnv())
def test_nested_tuple_vector(self):
self.do_test_nested_tuple(
lambda _: VectorEnv.wrap(lambda i: NestedTupleEnv()))
def test_nested_tuple_serving(self):
self.do_test_nested_tuple(lambda _: SimpleServing(NestedTupleEnv()))
def test_nested_tuple_async(self):
self.do_test_nested_tuple(
lambda _: BaseEnv.to_base_env(NestedTupleEnv()))
def test_multi_agent_complex_spaces(self):
ModelCatalog.register_custom_model("dict_spy", DictSpyModel)
ModelCatalog.register_custom_model("tuple_spy", TupleSpyModel)
register_env("nested_ma", lambda _: NestedMultiAgentEnv())
act_space = spaces.Discrete(2)
pg = PGTrainer(
env="nested_ma",
config={
"num_workers": 0,
"rollout_fragment_length": 5,
"train_batch_size": 5,
"multiagent": {
"policies": {
"tuple_policy": (
PGTFPolicy, TUPLE_SPACE, act_space,
{"model": {"custom_model": "tuple_spy"}}),
"dict_policy": (
PGTFPolicy, DICT_SPACE, act_space,
{"model": {"custom_model": "dict_spy"}}),
},
"policy_mapping_fn": lambda a: {
"tuple_agent": "tuple_policy",
"dict_agent": "dict_policy"}[a],
},
"framework": "tf",
})
pg.train()
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"d_spy_in_{}".format(i)))
pos_i = DICT_SAMPLES[i]["sensors"]["position"].tolist()
cam_i = DICT_SAMPLES[i]["sensors"]["front_cam"][0].tolist()
task_i = one_hot(
DICT_SAMPLES[i]["inner_state"]["job_status"]["task"], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"t_spy_in_{}".format(i)))
pos_i = TUPLE_SAMPLES[i][0].tolist()
cam_i = TUPLE_SAMPLES[i][1][0].tolist()
task_i = one_hot(TUPLE_SAMPLES[i][2], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
def test_rollout_dict_space(self):
register_env("nested", lambda _: NestedDictEnv())
agent = PGTrainer(env="nested", config={"framework": "tf"})
agent.train()
path = agent.save()
agent.stop()
# Test train works on restore
agent2 = PGTrainer(env="nested", config={"framework": "tf"})
agent2.restore(path)
agent2.train()
# Test rollout works on restore
rollout(agent2, "nested", 100)
def test_py_torch_model(self):
ModelCatalog.register_custom_model("composite", TorchSpyModel)
register_env("nested", lambda _: NestedDictEnv())
a2c = A2CTrainer(
env="nested",
config={
"num_workers": 0,
"rollout_fragment_length": 5,
"train_batch_size": 5,
"model": {
"custom_model": "composite",
},
"framework": "torch",
})
a2c.train()
# Check that the model sees the correct reconstructed observations
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"torch_spy_in_{}".format(i)))
pos_i = DICT_SAMPLES[i]["sensors"]["position"].tolist()
cam_i = DICT_SAMPLES[i]["sensors"]["front_cam"][0].tolist()
task_i = one_hot(
DICT_SAMPLES[i]["inner_state"]["job_status"]["task"], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
# TODO(ekl) should probably also add a test for TF/eager
def test_torch_repeated(self):
ModelCatalog.register_custom_model("r1", TorchRepeatedSpyModel)
register_env("repeat", lambda _: RepeatedSpaceEnv())
a2c = A2CTrainer(
env="repeat",
config={
"num_workers": 0,
"rollout_fragment_length": 5,
"train_batch_size": 5,
"model": {
"custom_model": "r1",
},
"framework": "torch",
})
a2c.train()
# Check that the model sees the correct reconstructed observations
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"torch_rspy_in_{}".format(i)))
self.assertEqual(to_list(seen), [to_list(REPEATED_SAMPLES[i])])
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| 34.810761 | 78 | 0.575761 |
756638a08de7ca34645f8c0bf3bb917c1a8d026b | 24,520 | py | Python | Lib/site-packages/PyQt5/examples/richtext/textedit/textedit.py | heylenz/python27 | bee49fa9d65b8ab7d591146a5b6cd47aeb41d940 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | Lib/site-packages/PyQt5/examples/richtext/textedit/textedit.py | heylenz/python27 | bee49fa9d65b8ab7d591146a5b6cd47aeb41d940 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | Lib/site-packages/PyQt5/examples/richtext/textedit/textedit.py | heylenz/python27 | bee49fa9d65b8ab7d591146a5b6cd47aeb41d940 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited
## Copyright (C) 2010 Hans-Peter Jansen <hpj@urpla.net>.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:LGPL$
## Commercial Usage
## Licensees holding valid Qt Commercial licenses may use this file in
## accordance with the Qt Commercial License Agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Nokia.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Nokia gives you certain additional
## rights. These rights are described in the Nokia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU General Public License version 3.0 requirements will be
## met: http://www.gnu.org/copyleft/gpl.html.
##
## If you have questions regarding the use of this file, please contact
## Nokia at qt-info@nokia.com.
## $QT_END_LICENSE$
##
#############################################################################
import sys
from PyQt5.QtCore import QFile, QFileInfo, Qt, QTextCodec
from PyQt5.QtGui import (QFont, QFontDatabase, QFontInfo, QIcon, QKeySequence,
QPixmap, QTextBlockFormat, QTextCharFormat, QTextCursor,
QTextDocumentWriter, QTextListFormat)
from PyQt5.QtWidgets import (QAction, QActionGroup, QApplication, QColorDialog,
QComboBox, QFileDialog, QFontComboBox, QMainWindow, QMenu, QMessageBox,
QTextEdit, QToolBar)
from PyQt5.QtPrintSupport import QPrintDialog, QPrinter, QPrintPreviewDialog
import textedit_rc
if sys.platform.startswith('darwin'):
rsrcPath = ":/images/mac"
else:
rsrcPath = ":/images/win"
class TextEdit(QMainWindow):
def __init__(self, fileName=None, parent=None):
super(TextEdit, self).__init__(parent)
self.setWindowIcon(QIcon(':/images/logo.png'))
self.setToolButtonStyle(Qt.ToolButtonFollowStyle)
self.setupFileActions()
self.setupEditActions()
self.setupTextActions()
helpMenu = QMenu("Help", self)
self.menuBar().addMenu(helpMenu)
helpMenu.addAction("About", self.about)
helpMenu.addAction("About &Qt", QApplication.instance().aboutQt)
self.textEdit = QTextEdit(self)
self.textEdit.currentCharFormatChanged.connect(
self.currentCharFormatChanged)
self.textEdit.cursorPositionChanged.connect(self.cursorPositionChanged)
self.setCentralWidget(self.textEdit)
self.textEdit.setFocus()
self.setCurrentFileName()
self.fontChanged(self.textEdit.font())
self.colorChanged(self.textEdit.textColor())
self.alignmentChanged(self.textEdit.alignment())
self.textEdit.document().modificationChanged.connect(
self.actionSave.setEnabled)
self.textEdit.document().modificationChanged.connect(
self.setWindowModified)
self.textEdit.document().undoAvailable.connect(
self.actionUndo.setEnabled)
self.textEdit.document().redoAvailable.connect(
self.actionRedo.setEnabled)
self.setWindowModified(self.textEdit.document().isModified())
self.actionSave.setEnabled(self.textEdit.document().isModified())
self.actionUndo.setEnabled(self.textEdit.document().isUndoAvailable())
self.actionRedo.setEnabled(self.textEdit.document().isRedoAvailable())
self.actionUndo.triggered.connect(self.textEdit.undo)
self.actionRedo.triggered.connect(self.textEdit.redo)
self.actionCut.setEnabled(False)
self.actionCopy.setEnabled(False)
self.actionCut.triggered.connect(self.textEdit.cut)
self.actionCopy.triggered.connect(self.textEdit.copy)
self.actionPaste.triggered.connect(self.textEdit.paste)
self.textEdit.copyAvailable.connect(self.actionCut.setEnabled)
self.textEdit.copyAvailable.connect(self.actionCopy.setEnabled)
QApplication.clipboard().dataChanged.connect(self.clipboardDataChanged)
if fileName is None:
fileName = ':/example.html'
if not self.load(fileName):
self.fileNew()
def closeEvent(self, e):
if self.maybeSave():
e.accept()
else:
e.ignore()
def setupFileActions(self):
tb = QToolBar(self)
tb.setWindowTitle("File Actions")
self.addToolBar(tb)
menu = QMenu("&File", self)
self.menuBar().addMenu(menu)
self.actionNew = QAction(
QIcon.fromTheme('document-new',
QIcon(rsrcPath + '/filenew.png')),
"&New", self, priority=QAction.LowPriority,
shortcut=QKeySequence.New, triggered=self.fileNew)
tb.addAction(self.actionNew)
menu.addAction(self.actionNew)
self.actionOpen = QAction(
QIcon.fromTheme('document-open',
QIcon(rsrcPath + '/fileopen.png')),
"&Open...", self, shortcut=QKeySequence.Open,
triggered=self.fileOpen)
tb.addAction(self.actionOpen)
menu.addAction(self.actionOpen)
menu.addSeparator()
self.actionSave = QAction(
QIcon.fromTheme('document-save',
QIcon(rsrcPath + '/filesave.png')),
"&Save", self, shortcut=QKeySequence.Save,
triggered=self.fileSave, enabled=False)
tb.addAction(self.actionSave)
menu.addAction(self.actionSave)
self.actionSaveAs = QAction("Save &As...", self,
priority=QAction.LowPriority,
shortcut=Qt.CTRL + Qt.SHIFT + Qt.Key_S,
triggered=self.fileSaveAs)
menu.addAction(self.actionSaveAs)
menu.addSeparator()
self.actionPrint = QAction(
QIcon.fromTheme('document-print',
QIcon(rsrcPath + '/fileprint.png')),
"&Print...", self, priority=QAction.LowPriority,
shortcut=QKeySequence.Print, triggered=self.filePrint)
tb.addAction(self.actionPrint)
menu.addAction(self.actionPrint)
self.actionPrintPreview = QAction(
QIcon.fromTheme('fileprint',
QIcon(rsrcPath + '/fileprint.png')),
"Print Preview...", self,
shortcut=Qt.CTRL + Qt.SHIFT + Qt.Key_P,
triggered=self.filePrintPreview)
menu.addAction(self.actionPrintPreview)
self.actionPrintPdf = QAction(
QIcon.fromTheme('exportpdf',
QIcon(rsrcPath + '/exportpdf.png')),
"&Export PDF...", self, priority=QAction.LowPriority,
shortcut=Qt.CTRL + Qt.Key_D,
triggered=self.filePrintPdf)
tb.addAction(self.actionPrintPdf)
menu.addAction(self.actionPrintPdf)
menu.addSeparator()
self.actionQuit = QAction("&Quit", self, shortcut=QKeySequence.Quit,
triggered=self.close)
menu.addAction(self.actionQuit)
def setupEditActions(self):
tb = QToolBar(self)
tb.setWindowTitle("Edit Actions")
self.addToolBar(tb)
menu = QMenu("&Edit", self)
self.menuBar().addMenu(menu)
self.actionUndo = QAction(
QIcon.fromTheme('edit-undo',
QIcon(rsrcPath + '/editundo.png')),
"&Undo", self, shortcut=QKeySequence.Undo)
tb.addAction(self.actionUndo)
menu.addAction(self.actionUndo)
self.actionRedo = QAction(
QIcon.fromTheme('edit-redo',
QIcon(rsrcPath + '/editredo.png')),
"&Redo", self, priority=QAction.LowPriority,
shortcut=QKeySequence.Redo)
tb.addAction(self.actionRedo)
menu.addAction(self.actionRedo)
menu.addSeparator()
self.actionCut = QAction(
QIcon.fromTheme('edit-cut', QIcon(rsrcPath + '/editcut.png')),
"Cu&t", self, priority=QAction.LowPriority,
shortcut=QKeySequence.Cut)
tb.addAction(self.actionCut)
menu.addAction(self.actionCut)
self.actionCopy = QAction(
QIcon.fromTheme('edit-copy',
QIcon(rsrcPath + '/editcopy.png')),
"&Copy", self, priority=QAction.LowPriority,
shortcut=QKeySequence.Copy)
tb.addAction(self.actionCopy)
menu.addAction(self.actionCopy)
self.actionPaste = QAction(
QIcon.fromTheme('edit-paste',
QIcon(rsrcPath + '/editpaste.png')),
"&Paste", self, priority=QAction.LowPriority,
shortcut=QKeySequence.Paste,
enabled=(len(QApplication.clipboard().text()) != 0))
tb.addAction(self.actionPaste)
menu.addAction(self.actionPaste)
def setupTextActions(self):
tb = QToolBar(self)
tb.setWindowTitle("Format Actions")
self.addToolBar(tb)
menu = QMenu("F&ormat", self)
self.menuBar().addMenu(menu)
self.actionTextBold = QAction(
QIcon.fromTheme('format-text-bold',
QIcon(rsrcPath + '/textbold.png')),
"&Bold", self, priority=QAction.LowPriority,
shortcut=Qt.CTRL + Qt.Key_B, triggered=self.textBold,
checkable=True)
bold = QFont()
bold.setBold(True)
self.actionTextBold.setFont(bold)
tb.addAction(self.actionTextBold)
menu.addAction(self.actionTextBold)
self.actionTextItalic = QAction(
QIcon.fromTheme('format-text-italic',
QIcon(rsrcPath + '/textitalic.png')),
"&Italic", self, priority=QAction.LowPriority,
shortcut=Qt.CTRL + Qt.Key_I, triggered=self.textItalic,
checkable=True)
italic = QFont()
italic.setItalic(True)
self.actionTextItalic.setFont(italic)
tb.addAction(self.actionTextItalic)
menu.addAction(self.actionTextItalic)
self.actionTextUnderline = QAction(
QIcon.fromTheme('format-text-underline',
QIcon(rsrcPath + '/textunder.png')),
"&Underline", self, priority=QAction.LowPriority,
shortcut=Qt.CTRL + Qt.Key_U, triggered=self.textUnderline,
checkable=True)
underline = QFont()
underline.setUnderline(True)
self.actionTextUnderline.setFont(underline)
tb.addAction(self.actionTextUnderline)
menu.addAction(self.actionTextUnderline)
menu.addSeparator()
grp = QActionGroup(self, triggered=self.textAlign)
# Make sure the alignLeft is always left of the alignRight.
if QApplication.isLeftToRight():
self.actionAlignLeft = QAction(
QIcon.fromTheme('format-justify-left',
QIcon(rsrcPath + '/textleft.png')),
"&Left", grp)
self.actionAlignCenter = QAction(
QIcon.fromTheme('format-justify-center',
QIcon(rsrcPath + '/textcenter.png')),
"C&enter", grp)
self.actionAlignRight = QAction(
QIcon.fromTheme('format-justify-right',
QIcon(rsrcPath + '/textright.png')),
"&Right", grp)
else:
self.actionAlignRight = QAction(
QIcon.fromTheme('format-justify-right',
QIcon(rsrcPath + '/textright.png')),
"&Right", grp)
self.actionAlignCenter = QAction(
QIcon.fromTheme('format-justify-center',
QIcon(rsrcPath + '/textcenter.png')),
"C&enter", grp)
self.actionAlignLeft = QAction(
QIcon.fromTheme('format-justify-left',
QIcon(rsrcPath + '/textleft.png')),
"&Left", grp)
self.actionAlignJustify = QAction(
QIcon.fromTheme('format-justify-fill',
QIcon(rsrcPath + '/textjustify.png')),
"&Justify", grp)
self.actionAlignLeft.setShortcut(Qt.CTRL + Qt.Key_L)
self.actionAlignLeft.setCheckable(True)
self.actionAlignLeft.setPriority(QAction.LowPriority)
self.actionAlignCenter.setShortcut(Qt.CTRL + Qt.Key_E)
self.actionAlignCenter.setCheckable(True)
self.actionAlignCenter.setPriority(QAction.LowPriority)
self.actionAlignRight.setShortcut(Qt.CTRL + Qt.Key_R)
self.actionAlignRight.setCheckable(True)
self.actionAlignRight.setPriority(QAction.LowPriority)
self.actionAlignJustify.setShortcut(Qt.CTRL + Qt.Key_J)
self.actionAlignJustify.setCheckable(True)
self.actionAlignJustify.setPriority(QAction.LowPriority)
tb.addActions(grp.actions())
menu.addActions(grp.actions())
menu.addSeparator()
pix = QPixmap(16, 16)
pix.fill(Qt.black)
self.actionTextColor = QAction(QIcon(pix), "&Color...", self,
triggered=self.textColor)
tb.addAction(self.actionTextColor)
menu.addAction(self.actionTextColor)
tb = QToolBar(self)
tb.setAllowedAreas(Qt.TopToolBarArea | Qt.BottomToolBarArea)
tb.setWindowTitle("Format Actions")
self.addToolBarBreak(Qt.TopToolBarArea)
self.addToolBar(tb)
comboStyle = QComboBox(tb)
tb.addWidget(comboStyle)
comboStyle.addItem("Standard")
comboStyle.addItem("Bullet List (Disc)")
comboStyle.addItem("Bullet List (Circle)")
comboStyle.addItem("Bullet List (Square)")
comboStyle.addItem("Ordered List (Decimal)")
comboStyle.addItem("Ordered List (Alpha lower)")
comboStyle.addItem("Ordered List (Alpha upper)")
comboStyle.addItem("Ordered List (Roman lower)")
comboStyle.addItem("Ordered List (Roman upper)")
comboStyle.activated.connect(self.textStyle)
self.comboFont = QFontComboBox(tb)
tb.addWidget(self.comboFont)
self.comboFont.activated[str].connect(self.textFamily)
self.comboSize = QComboBox(tb)
self.comboSize.setObjectName("comboSize")
tb.addWidget(self.comboSize)
self.comboSize.setEditable(True)
db = QFontDatabase()
for size in db.standardSizes():
self.comboSize.addItem("%s" % (size))
self.comboSize.activated[str].connect(self.textSize)
self.comboSize.setCurrentIndex(
self.comboSize.findText(
"%s" % (QApplication.font().pointSize())))
def load(self, f):
if not QFile.exists(f):
return False
fh = QFile(f)
if not fh.open(QFile.ReadOnly):
return False
data = fh.readAll()
codec = QTextCodec.codecForHtml(data)
unistr = codec.toUnicode(data)
if Qt.mightBeRichText(unistr):
self.textEdit.setHtml(unistr)
else:
self.textEdit.setPlainText(unistr)
self.setCurrentFileName(f)
return True
def maybeSave(self):
if not self.textEdit.document().isModified():
return True
if self.fileName.startswith(':/'):
return True
ret = QMessageBox.warning(self, "Application",
"The document has been modified.\n"
"Do you want to save your changes?",
QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel)
if ret == QMessageBox.Save:
return self.fileSave()
if ret == QMessageBox.Cancel:
return False
return True
def setCurrentFileName(self, fileName=''):
self.fileName = fileName
self.textEdit.document().setModified(False)
if not fileName:
shownName = 'untitled.txt'
else:
shownName = QFileInfo(fileName).fileName()
self.setWindowTitle(self.tr("%s[*] - %s" % (shownName, "Rich Text")))
self.setWindowModified(False)
def fileNew(self):
if self.maybeSave():
self.textEdit.clear()
self.setCurrentFileName()
def fileOpen(self):
fn, _ = QFileDialog.getOpenFileName(self, "Open File...", None,
"HTML-Files (*.htm *.html);;All Files (*)")
if fn:
self.load(fn)
def fileSave(self):
if not self.fileName:
return self.fileSaveAs()
writer = QTextDocumentWriter(self.fileName)
success = writer.write(self.textEdit.document())
if success:
self.textEdit.document().setModified(False)
return success
def fileSaveAs(self):
fn, _ = QFileDialog.getSaveFileName(self, "Save as...", None,
"ODF files (*.odt);;HTML-Files (*.htm *.html);;All Files (*)")
if not fn:
return False
lfn = fn.lower()
if not lfn.endswith(('.odt', '.htm', '.html')):
# The default.
fn += '.odt'
self.setCurrentFileName(fn)
return self.fileSave()
def filePrint(self):
printer = QPrinter(QPrinter.HighResolution)
dlg = QPrintDialog(printer, self)
if self.textEdit.textCursor().hasSelection():
dlg.addEnabledOption(QPrintDialog.PrintSelection)
dlg.setWindowTitle("Print Document")
if dlg.exec_() == QPrintDialog.Accepted:
self.textEdit.print_(printer)
del dlg
def filePrintPreview(self):
printer = QPrinter(QPrinter.HighResolution)
preview = QPrintPreviewDialog(printer, self)
preview.paintRequested.connect(self.printPreview)
preview.exec_()
def printPreview(self, printer):
self.textEdit.print_(printer)
def filePrintPdf(self):
fn, _ = QFileDialog.getSaveFileName(self, "Export PDF", None,
"PDF files (*.pdf);;All Files (*)")
if fn:
if QFileInfo(fn).suffix().isEmpty():
fn += '.pdf'
printer = QPrinter(QPrinter.HighResolution)
printer.setOutputFormat(QPrinter.PdfFormat)
printer.setOutputFileName(fileName)
self.textEdit.document().print_(printer)
def textBold(self):
fmt = QTextCharFormat()
fmt.setFontWeight(self.actionTextBold.isChecked() and QFont.Bold or QFont.Normal)
self.mergeFormatOnWordOrSelection(fmt)
def textUnderline(self):
fmt = QTextCharFormat()
fmt.setFontUnderline(self.actionTextUnderline.isChecked())
self.mergeFormatOnWordOrSelection(fmt)
def textItalic(self):
fmt = QTextCharFormat()
fmt.setFontItalic(self.actionTextItalic.isChecked())
self.mergeFormatOnWordOrSelection(fmt)
def textFamily(self, family):
fmt = QTextCharFormat()
fmt.setFontFamily(family)
self.mergeFormatOnWordOrSelection(fmt)
def textSize(self, pointSize):
pointSize = float(pointSize)
if pointSize > 0:
fmt = QTextCharFormat()
fmt.setFontPointSize(pointSize)
self.mergeFormatOnWordOrSelection(fmt)
def textStyle(self, styleIndex):
cursor = self.textEdit.textCursor()
if styleIndex:
styleDict = {
1: QTextListFormat.ListDisc,
2: QTextListFormat.ListCircle,
3: QTextListFormat.ListSquare,
4: QTextListFormat.ListDecimal,
5: QTextListFormat.ListLowerAlpha,
6: QTextListFormat.ListUpperAlpha,
7: QTextListFormat.ListLowerRoman,
8: QTextListFormat.ListUpperRoman,
}
style = styleDict.get(styleIndex, QTextListFormat.ListDisc)
cursor.beginEditBlock()
blockFmt = cursor.blockFormat()
listFmt = QTextListFormat()
if cursor.currentList():
listFmt = cursor.currentList().format()
else:
listFmt.setIndent(blockFmt.indent() + 1)
blockFmt.setIndent(0)
cursor.setBlockFormat(blockFmt)
listFmt.setStyle(style)
cursor.createList(listFmt)
cursor.endEditBlock()
else:
bfmt = QTextBlockFormat()
bfmt.setObjectIndex(-1)
cursor.mergeBlockFormat(bfmt)
def textColor(self):
col = QColorDialog.getColor(self.textEdit.textColor(), self)
if not col.isValid():
return
fmt = QTextCharFormat()
fmt.setForeground(col)
self.mergeFormatOnWordOrSelection(fmt)
self.colorChanged(col)
def textAlign(self, action):
if action == self.actionAlignLeft:
self.textEdit.setAlignment(Qt.AlignLeft | Qt.AlignAbsolute)
elif action == self.actionAlignCenter:
self.textEdit.setAlignment(Qt.AlignHCenter)
elif action == self.actionAlignRight:
self.textEdit.setAlignment(Qt.AlignRight | Qt.AlignAbsolute)
elif action == self.actionAlignJustify:
self.textEdit.setAlignment(Qt.AlignJustify)
def currentCharFormatChanged(self, format):
self.fontChanged(format.font())
self.colorChanged(format.foreground().color())
def cursorPositionChanged(self):
self.alignmentChanged(self.textEdit.alignment())
def clipboardDataChanged(self):
self.actionPaste.setEnabled(len(QApplication.clipboard().text()) != 0)
def about(self):
QMessageBox.about(self, "About",
"This example demonstrates Qt's rich text editing facilities "
"in action, providing an example document for you to "
"experiment with.")
def mergeFormatOnWordOrSelection(self, format):
cursor = self.textEdit.textCursor()
if not cursor.hasSelection():
cursor.select(QTextCursor.WordUnderCursor)
cursor.mergeCharFormat(format)
self.textEdit.mergeCurrentCharFormat(format)
def fontChanged(self, font):
self.comboFont.setCurrentIndex(
self.comboFont.findText(QFontInfo(font).family()))
self.comboSize.setCurrentIndex(
self.comboSize.findText("%s" % font.pointSize()))
self.actionTextBold.setChecked(font.bold())
self.actionTextItalic.setChecked(font.italic())
self.actionTextUnderline.setChecked(font.underline())
def colorChanged(self, color):
pix = QPixmap(16, 16)
pix.fill(color)
self.actionTextColor.setIcon(QIcon(pix))
def alignmentChanged(self, alignment):
if alignment & Qt.AlignLeft:
self.actionAlignLeft.setChecked(True)
elif alignment & Qt.AlignHCenter:
self.actionAlignCenter.setChecked(True)
elif alignment & Qt.AlignRight:
self.actionAlignRight.setChecked(True)
elif alignment & Qt.AlignJustify:
self.actionAlignJustify.setChecked(True)
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWindows = []
for fn in sys.argv[1:] or [None]:
textEdit = TextEdit(fn)
textEdit.resize(700, 800)
textEdit.show()
mainWindows.append(textEdit)
sys.exit(app.exec_())
| 37.321157 | 89 | 0.612602 |
d24524e72eb3db446524554d15e8330f8db89fe7 | 2,303 | py | Python | Lib/site-packages/rope/base/ast.py | hirorin-demon/hirorin-streamlit | 03fbb6f03ec94f909d451e708a3b30b177607695 | [
"0BSD"
] | 463 | 2015-01-15T08:17:42.000Z | 2022-03-28T15:10:20.000Z | Lib/site-packages/rope/base/ast.py | hirorin-demon/hirorin-streamlit | 03fbb6f03ec94f909d451e708a3b30b177607695 | [
"0BSD"
] | 52 | 2015-01-06T02:43:59.000Z | 2022-03-14T11:15:21.000Z | Lib/site-packages/rope/base/ast.py | hirorin-demon/hirorin-streamlit | 03fbb6f03ec94f909d451e708a3b30b177607695 | [
"0BSD"
] | 249 | 2015-01-07T22:49:49.000Z | 2022-03-18T02:32:06.000Z | from __future__ import absolute_import
import ast
from ast import *
from rope.base import fscommands
try:
unicode
except NameError:
unicode = str
def parse(source, filename='<string>'):
# NOTE: the raw string should be given to `compile` function
if isinstance(source, unicode):
source = fscommands.unicode_to_file_data(source)
if b'\r' in source:
source = source.replace(b'\r\n', b'\n').replace(b'\r', b'\n')
if not source.endswith(b'\n'):
source += b'\n'
try:
return ast.parse(source, filename='<unknown>')
except (TypeError, ValueError) as e:
error = SyntaxError()
error.lineno = 1
error.filename = filename
error.msg = str(e)
raise error
def walk(node, walker):
"""Walk the syntax tree"""
method_name = '_' + node.__class__.__name__
method = getattr(walker, method_name, None)
if method is not None:
if isinstance(node, ast.ImportFrom) and node.module is None:
# In python < 2.7 ``node.module == ''`` for relative imports
# but for python 2.7 it is None. Generalizing it to ''.
node.module = ''
return method(node)
for child in get_child_nodes(node):
walk(child, walker)
def get_child_nodes(node):
if isinstance(node, ast.Module):
return node.body
result = []
if node._fields is not None:
for name in node._fields:
child = getattr(node, name)
if isinstance(child, list):
for entry in child:
if isinstance(entry, ast.AST):
result.append(entry)
if isinstance(child, ast.AST):
result.append(child)
return result
def call_for_nodes(node, callback, recursive=False):
"""If callback returns `True` the child nodes are skipped"""
result = callback(node)
if recursive and not result:
for child in get_child_nodes(node):
call_for_nodes(child, callback, recursive)
def get_children(node):
result = []
if node._fields is not None:
for name in node._fields:
if name in ['lineno', 'col_offset']:
continue
child = getattr(node, name)
result.append(child)
return result
| 29.525641 | 72 | 0.600955 |
ef33e591013a547be58ff2b4dad927091046608b | 1,587 | py | Python | django_get_started/urls.py | patricia-j/devops-aula13 | 893282d02c832ba8e6fbd61ca0c785761890c548 | [
"MIT"
] | null | null | null | django_get_started/urls.py | patricia-j/devops-aula13 | 893282d02c832ba8e6fbd61ca0c785761890c548 | [
"MIT"
] | 12 | 2018-05-24T01:20:06.000Z | 2018-05-24T02:26:34.000Z | django_get_started/urls.py | patricia-j/devops-aula13 | 893282d02c832ba8e6fbd61ca0c785761890c548 | [
"MIT"
] | null | null | null | """
Definition of urls for django_get_started.
"""
from datetime import datetime
from django.conf.urls import patterns, url
from app.forms import BootstrapAuthenticationForm
# Uncomment the next lines to enable the admin:
# from django.conf.urls import include
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'app.views.home', name='home'),
url(r'^contact$', 'app.views.contact', name='contact'),
url(r'^about', 'app.views.about', name='about'),
url(r'^lista_cursos', 'app.views.lista_cursos', name='lista_cursos'),
url(r'^lista_vestibulares', 'app.views.lista_vestibulares', name='lista_vestibulares'),
url(r'^lista_candidato', 'app.views.lista_candidatos', name='lista_candidatos'),
url(r'^locais_prova', 'app.views.local_prova', name='local_prova'),
url(r'^login/$',
'django.contrib.auth.views.login',
{
'template_name': 'app/login.html',
'authentication_form': BootstrapAuthenticationForm,
'extra_context':
{
'title':'Log in',
'year':datetime.now().year,
}
},
name='login'),
url(r'^logout$',
'django.contrib.auth.views.logout',
{
'next_page': '/',
},
name='logout'),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
| 33.0625 | 91 | 0.623819 |
d9bdbcd81822f1c58d3d9759404a67f3596d5025 | 2,060 | py | Python | dbdaora/hash/_tests/test_integration_service_hash_aioredis_exists.py | dutradda/sqldataclass | 5c87a3818e9d736bbf5e1438edc5929a2f5acd3f | [
"MIT"
] | 21 | 2019-10-14T14:33:33.000Z | 2022-02-11T04:43:07.000Z | dbdaora/hash/_tests/test_integration_service_hash_aioredis_exists.py | dutradda/sqldataclass | 5c87a3818e9d736bbf5e1438edc5929a2f5acd3f | [
"MIT"
] | null | null | null | dbdaora/hash/_tests/test_integration_service_hash_aioredis_exists.py | dutradda/sqldataclass | 5c87a3818e9d736bbf5e1438edc5929a2f5acd3f | [
"MIT"
] | 1 | 2019-09-29T23:51:44.000Z | 2019-09-29T23:51:44.000Z | import itertools
import asynctest
import pytest
from aioredis import RedisError
from jsondaora import dataclasses
@pytest.mark.asyncio
async def test_should_exists(fake_service, serialized_fake_entity):
await fake_service.repository.memory_data_source.hmset(
'fake:fake', *itertools.chain(*serialized_fake_entity.items())
)
assert fake_service.exists('fake')
@pytest.mark.asyncio
async def test_should_exists_without_cache(
fake_service, serialized_fake_entity
):
fake_service.exists_cache = None
await fake_service.repository.memory_data_source.hmset(
'fake:fake', *itertools.chain(*serialized_fake_entity.items())
)
assert await fake_service.exists('fake')
@pytest.mark.asyncio
async def test_should_exists_from_cache(fake_service, serialized_fake_entity):
fake_service.repository.memory_data_source.exists = (
asynctest.CoroutineMock()
)
fake_service.exists_cache['fake'] = True
assert await fake_service.exists('fake')
assert not fake_service.repository.memory_data_source.exists.called
@pytest.mark.asyncio
async def test_should_exists_from_fallback_after_open_circuit_breaker(
fake_service, fake_entity, mocker
):
fake_service.repository.memory_data_source.exists = asynctest.CoroutineMock(
side_effect=RedisError
)
fake_service.repository.fallback_data_source.db[
'fake:fake'
] = dataclasses.asdict(fake_entity)
assert await fake_service.exists('fake')
assert fake_service.logger.warning.call_count == 1
@pytest.mark.asyncio
async def test_should_exists_from_fallback_after_open_circuit_breaker_without_cache(
fake_service, fake_entity, mocker
):
fake_service.exists_cache = None
fake_service.repository.memory_data_source.exists = asynctest.CoroutineMock(
side_effect=RedisError
)
fake_service.repository.fallback_data_source.db[
'fake:fake'
] = dataclasses.asdict(fake_entity)
assert await fake_service.exists('fake')
assert fake_service.logger.warning.call_count == 1
| 30.294118 | 84 | 0.773301 |
17bd0a522f2729e1c2b6fab3cad91eb517756dba | 3,073 | py | Python | tests/clustering_system/evaluator/test_SupervisedEvaluation.py | vanam/clustering | 6e3d3ce7e60e31519b81547bc4afdf6ef3b0079f | [
"MIT"
] | 5 | 2019-05-28T15:45:46.000Z | 2020-10-05T17:48:27.000Z | tests/clustering_system/evaluator/test_SupervisedEvaluation.py | vanam/clustering | 6e3d3ce7e60e31519b81547bc4afdf6ef3b0079f | [
"MIT"
] | null | null | null | tests/clustering_system/evaluator/test_SupervisedEvaluation.py | vanam/clustering | 6e3d3ce7e60e31519b81547bc4afdf6ef3b0079f | [
"MIT"
] | 2 | 2019-07-16T14:13:19.000Z | 2020-11-23T01:53:44.000Z | import numpy as np
import scipy.stats
import sklearn.metrics
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import clustering_system.evaluator.measures as cm
from clustering_system.evaluator.SupervisedEvaluation import SupervisedEvaluation
class TestSupervisedEvaluation:
def test_evaluation(self):
classes = np.array([
11, 11, 11,
11, 11, 22,
11, 22, 22,
22, 22, 33,
11, 11,
33, 33, 33,
])
clusters = np.array([
1, 1, 1,
1, 1, 1,
2, 2, 2,
2, 2, 2,
3, 3,
3, 3, 3,
])
aic = 1.1
bic = 2.2
likelihood = 3.3
K = 3
C = 3
N = 17
cluster_entropy = scipy.stats.entropy([6/17, 6/17, 5/17])
class_entropy = scipy.stats.entropy([8/17, 5/17, 4/17])
purity = 12/17
purity2 = np.array([[5/6, 6], [4/6, 6], [3/5, 5]])
rand_index = 23/34
precision = 0.5
recall = 20/44
f1_measure = 10/21
homogeneity = sklearn.metrics.homogeneity_score(classes, clusters)
completeness = sklearn.metrics.completeness_score(classes, clusters)
v_measure = sklearn.metrics.v_measure_score(classes, clusters)
nv_measure = v_measure
mutual_information = cm.mutual_information(clusters, classes)
normalized_mutual_information = sklearn.metrics.normalized_mutual_info_score(classes, clusters)
normalized_mutual_information2 = cm.normalized_mutual_information2(clusters, classes)
evaluation = SupervisedEvaluation(clusters, classes, aic, bic, likelihood)
assert aic == evaluation.aic
assert bic == evaluation.bic
assert likelihood == evaluation.likelihood
assert N == evaluation.N
assert K == evaluation.K
assert C == evaluation.C
assert_almost_equal(evaluation.cluster_entropy, cluster_entropy)
assert_almost_equal(evaluation.class_entropy, class_entropy)
assert_almost_equal(evaluation.purity, purity)
assert_array_almost_equal(evaluation.purity2, purity2)
assert_almost_equal(evaluation.rand_index, rand_index)
assert_almost_equal(evaluation.precision, precision)
assert_almost_equal(evaluation.recall, recall)
assert_almost_equal(evaluation.f1_measure, f1_measure)
assert_almost_equal(evaluation.homogeneity, homogeneity)
assert_almost_equal(evaluation.completeness, completeness, )
assert_almost_equal(evaluation.v_measure, v_measure)
assert_almost_equal(evaluation.nv_measure, nv_measure)
assert_almost_equal(evaluation.mutual_information, mutual_information)
assert_almost_equal(evaluation.normalized_mutual_information, normalized_mutual_information)
assert_almost_equal(evaluation.normalized_mutual_information2, normalized_mutual_information2)
assert len(evaluation.get_attribute_names()) == 16 + 5 # Assert the number of attributes
| 37.47561 | 103 | 0.669378 |
96a62a325dba27ce21e6618dec8dcf747836ad38 | 423 | py | Python | src/similarizer.py | DavidRivasPhD/mrseadd | fd41cf4965c7e96a5e995513b039fb5e3c7c9d0b | [
"Apache-2.0"
] | null | null | null | src/similarizer.py | DavidRivasPhD/mrseadd | fd41cf4965c7e96a5e995513b039fb5e3c7c9d0b | [
"Apache-2.0"
] | null | null | null | src/similarizer.py | DavidRivasPhD/mrseadd | fd41cf4965c7e96a5e995513b039fb5e3c7c9d0b | [
"Apache-2.0"
] | null | null | null |
def similarizing(query, sentences, nlp):
"""
Given a original_query and clean_sentences, the function scores the similarity between them
"""
doc_query = nlp(query)
similarity_scores = {}
for i in range(0, len(sentences)):
doc_sentence_i = nlp(sentences[i])
similarity_i = doc_query.similarity(doc_sentence_i)
similarity_scores[i] = similarity_i
return similarity_scores
| 32.538462 | 95 | 0.695035 |
767dc3e7d96e1e19e6064e94893c2d8a260da73e | 6,260 | py | Python | examples/flappy_birds.py | ericmux/termux2d | 195571c06b6844726e34cbd5d2643a2425c5f403 | [
"MIT"
] | null | null | null | examples/flappy_birds.py | ericmux/termux2d | 195571c06b6844726e34cbd5d2643a2425c5f403 | [
"MIT"
] | null | null | null | examples/flappy_birds.py | ericmux/termux2d | 195571c06b6844726e34cbd5d2643a2425c5f403 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import os
sys.path.append(os.path.abspath(".."))
import curses
from termux2d import Canvas, line
from time import sleep
from thread import start_new_thread
from Queue import Queue
import locale
from random import randint
locale.setlocale(locale.LC_ALL,"")
stdscr = curses.initscr()
stdscr.refresh()
keys = Queue()
speed = 0.0
fps = 20
frame_no = 0
score = 0
delta = frame_no / fps
height = 100
width = 100
position = height / 2
bird_map = [
#1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
[0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0], #1
[0,0,0,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0], #2
[0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0], #3
[0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,1,0,1,0,0,0], #4
[0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,1,0,0], #5
[0,1,1,1,1,1,1,1,1,0,1,0,0,0,0,1,0,0,1,0,0], #6
[1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,0], #7
[1,0,0,0,0,0,0,1,0,0,0,0,1,1,1,1,1,1,1,1,0], #8
[1,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,1], #9
[1,0,0,0,0,0,1,0,0,0,0,0,1,1,1,1,1,1,1,1,0], #0
[0,1,1,1,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0], #1
[0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,1,1,1,1,0,0], #2
[0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0], #3
]
bird = []
for y, row in enumerate(bird_map):
for x,col in enumerate(row):
if col:
bird.append((x, y))
def read_keys(stdscr):
while 1:
c = stdscr.getch()
keys.put(c)
class Bar():
def __init__(self, bar_width, cap_height=4, space=3*13):
self.height = randint(cap_height+space+1, height-1-cap_height)
self.width = bar_width
self.cap_height = cap_height
self.x = width - bar_width - 1
self.space = space
def draw(self):
for x,y in line(self.x,
self.height,
self.x+self.width,
self.height):
yield x, y
for x,y in line(self.x,
self.height,
self.x,
self.height+self.cap_height):
yield x, y
for x,y in line(self.x+self.width,
self.height,
x+self.width,
self.height+self.cap_height):
yield x, y
for x,y in line(self.x,
self.height+self.cap_height,
self.x+2,
self.height+self.cap_height):
yield x, y
for x,y in line(self.x+self.width-2,
self.height+self.cap_height,
self.x+self.width,
self.height+self.cap_height):
yield x, y
for x,y in line(self.x+2,
self.height+self.cap_height,
self.x+2,
height):
yield x, y
for x,y in line(self.x+self.width-2,
self.height+self.cap_height,
self.x+self.width-2,
height):
yield x, y
for x,y in line(self.x,
self.height-self.space,
self.x+self.width,
self.height-self.space):
yield x, y
for x,y in line(self.x,
self.height-self.space,
self.x,
self.height-self.cap_height-self.space):
yield x, y
for x,y in line(self.x+self.width,
self.height-self.space,
x+self.width,
self.height-self.cap_height-self.space):
yield x, y
for x,y in line(self.x,
self.height-self.cap_height-self.space,
self.x+2,
self.height-self.cap_height-self.space):
yield x, y
for x,y in line(self.x+self.width-2,
self.height-self.cap_height-self.space,
self.x+self.width,
self.height-self.cap_height-self.space):
yield x, y
for x,y in line(self.x+2,
self.height-self.cap_height-self.space,
self.x+2,
0):
yield x, y
for x,y in line(self.x+self.width-2,
self.height-self.cap_height-self.space,
self.x+self.width-2,
0):
yield x, y
def check_collision(bird_pos, bar):
# TODO more efficient collision detection
if bar.x > 21:
return False
if bar.height <= bird_pos-13 and bar.height+bar.space > bird_pos:
return False
for bar_x, bar_y in bar.draw():
for bird_x, bird_y in bird:
if int(bird_x) == int(bar_x) and int(bird_y+bird_pos) == int(bar_y):
return True
return False
def main(stdscr):
global frame_no, speed, position, score
c = Canvas()
bar_width = 16
bars = [Bar(bar_width)]
stdscr.refresh()
while True:
frame_no += 1
for bar in bars:
if check_collision(position, bar):
return
while not keys.empty():
if keys.get() == 113:
return
speed = 32.0
c.set(0,0)
c.set(width, height)
if frame_no % 50 == 0:
bars.append(Bar(bar_width))
for x,y in bird:
c.set(x,y+position)
for bar_index, bar in enumerate(bars):
if bar.x < 1:
bars.pop(bar_index)
score += 1
else:
bars[bar_index].x -= 1
for x,y in bar.draw():
c.set(x,y)
f = c.frame()+'\n'
stdscr.addstr(0, 0, f)
stdscr.addstr(height/4+1, 0, 'score: {0}'.format(score))
stdscr.refresh()
c.clear()
speed -= 2
position -= speed/10
if position < 0:
position = 0
speed = 0.0
elif position > height-13:
position = height-13
speed = 0.0
sleep(1.0/fps)
if __name__ == '__main__':
start_new_thread(read_keys, (stdscr,))
curses.wrapper(main)
print('Final score: {0}'.format(score))
| 29.528302 | 80 | 0.474601 |
baecb9b88d901fe49ea2e4df982740cd10f7253e | 735 | py | Python | tests/test_reweighing.py | gusrabbit/aif360-learn | b14a9b98e96dd2756faf312047e9a50ccc1559fa | [
"Apache-2.0"
] | null | null | null | tests/test_reweighing.py | gusrabbit/aif360-learn | b14a9b98e96dd2756faf312047e9a50ccc1559fa | [
"Apache-2.0"
] | null | null | null | tests/test_reweighing.py | gusrabbit/aif360-learn | b14a9b98e96dd2756faf312047e9a50ccc1559fa | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from aiflearn.algorithms.preprocessing.reweighing import Reweighing
from aiflearn.datasets import AdultDataset
def test_instance_weights():
ad = AdultDataset(instance_weights_name='fnlwgt', features_to_drop=[])
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
rw = Reweighing(unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
transf = rw.fit_transform(ad)
print(transf.instance_weights.sum())
assert np.isclose(ad.instance_weights.sum(), transf.instance_weights.sum())
| 36.75 | 79 | 0.77551 |
304d5f835bdd81c7d2f2ebca7e3c22f6ee2a3ca8 | 459 | py | Python | examples/ackley.py | SimpleArt/spsa | 55ff58868f9b5e515dff4b55c8d0671f0f124f3d | [
"MIT"
] | 8 | 2022-02-19T07:28:18.000Z | 2022-03-16T07:15:37.000Z | examples/ackley.py | SimpleArt/spsa | 55ff58868f9b5e515dff4b55c8d0671f0f124f3d | [
"MIT"
] | null | null | null | examples/ackley.py | SimpleArt/spsa | 55ff58868f9b5e515dff4b55c8d0671f0f124f3d | [
"MIT"
] | null | null | null | import numpy as np
from examples._utils import run
def function(x: np.ndarray) -> float:
"""
The Ackley function is
-20 exp(-0.2 sqrt(0.5 (x^2 + y^2)))
- exp(0.5 (cos(2 pi x) + cos(2 pi y)))
"""
return -(
20 * np.exp(-0.2 * np.sqrt(0.5 * (x**2).sum()))
+ np.exp(0.5 * np.cos(2 * np.pi * x).sum())
)
def main() -> None:
run(function, (2, 3), "Ackley", noise=0.1)
if __name__ == "__main__":
main()
| 22.95 | 55 | 0.503268 |
197f22e8843755fbb73e438c997faf4be5d2868a | 407 | py | Python | fireworks/utils/utils_test.py | kellylab/Fireworks | ff027cd8d1b8ce5eec6a37d786e7de675d8c0849 | [
"MIT"
] | 9 | 2019-05-01T01:22:10.000Z | 2020-12-08T15:41:13.000Z | fireworks/utils/utils_test.py | smk508/Fireworks | ff027cd8d1b8ce5eec6a37d786e7de675d8c0849 | [
"MIT"
] | 53 | 2019-01-20T17:02:38.000Z | 2019-03-24T18:00:08.000Z | fireworks/utils/utils_test.py | smk508/Fireworks | ff027cd8d1b8ce5eec6a37d786e7de675d8c0849 | [
"MIT"
] | 4 | 2019-07-04T15:39:46.000Z | 2021-08-17T04:59:25.000Z | from . import utils
import numpy as np
def test_one_hot():
hot = utils.one_hot(2, 10)
assert (hot == np.array([0,0,1,0,0,0,0,0,0,0])).all()
def test_subset_dict():
d1 = {'a':1, 'b':2, 'c':3, 'd': 4}
d2 = utils.subset_dict(d1, ['b', 'c', 'e'])
assert d2['b'] == d1['b']
assert d2['c'] == d2['c']
assert 'e' not in d2
d3 = utils.subset_dict(d1, ['f'])
assert d3 == {}
| 21.421053 | 57 | 0.518428 |
e20a3d9ae867a8e4b24a1cc401095f36334af586 | 151,598 | py | Python | awlify/utils/awl_full_results.py | lpmi-13/awlify-python | e1a5826d94ceff1e586e1785fcd1967691ba2758 | [
"MIT"
] | 1 | 2019-09-14T11:38:48.000Z | 2019-09-14T11:38:48.000Z | awlify/utils/awl_full_results.py | lpmi-13/awlify-python | e1a5826d94ceff1e586e1785fcd1967691ba2758 | [
"MIT"
] | 1 | 2019-02-09T14:24:02.000Z | 2020-10-25T05:25:55.000Z | awlify/utils/awl_full_results.py | lpmi-13/awlify-python | e1a5826d94ceff1e586e1785fcd1967691ba2758 | [
"MIT"
] | null | null | null | awl_list = {"adjacent": {"head": "adjacent", "sublist": 10}, "albeit": {"head": "albeit", "sublist": 10}, "assemble": {"head": "assemble", "sublist": 10}, "assembled": {"head": "assemble", "sublist": 10}, "assembles": {"head": "assemble", "sublist": 10}, "assemblies": {"head": "assemble", "sublist": 10}, "assembling": {"head": "assemble", "sublist": 10}, "assembly": {"head": "assemble", "sublist": 10}, "collapse": {"head": "collapse", "sublist": 10}, "collapsed": {"head": "collapse", "sublist": 10}, "collapses": {"head": "collapse", "sublist": 10}, "collapsible": {"head": "collapse", "sublist": 10}, "collapsing": {"head": "collapse", "sublist": 10}, "colleague": {"head": "colleague", "sublist": 10}, "colleagues": {"head": "colleague", "sublist": 10}, "compile": {"head": "compile", "sublist": 10}, "compilation": {"head": "compile", "sublist": 10}, "compilations": {"head": "compile", "sublist": 10}, "compiled": {"head": "compile", "sublist": 10}, "compiles": {"head": "compile", "sublist": 10}, "compiling": {"head": "compile", "sublist": 10}, "conceive": {"head": "conceive", "sublist": 10}, "conceivable": {"head": "conceive", "sublist": 10}, "conceivably": {"head": "conceive", "sublist": 10}, "conceived": {"head": "conceive", "sublist": 10}, "conceives": {"head": "conceive", "sublist": 10}, "conceiving": {"head": "conceive", "sublist": 10}, "inconceivable": {"head": "conceive", "sublist": 10}, "inconceivably": {"head": "conceive", "sublist": 10}, "convince": {"head": "convince", "sublist": 10}, "convinced": {"head": "convince", "sublist": 10}, "convinces": {"head": "convince", "sublist": 10}, "convincing": {"head": "convince", "sublist": 10}, "convincingly": {"head": "convince", "sublist": 10}, "unconvinced": {"head": "convince", "sublist": 10}, "depress": {"head": "depress", "sublist": 10}, "depressed": {"head": "depress", "sublist": 10}, "depresses": {"head": "depress", "sublist": 10}, "depressing": {"head": "depress", "sublist": 10}, "depression": {"head": "depress", "sublist": 10}, "encounter": {"head": "encounter", "sublist": 10}, "encountered": {"head": "encounter", "sublist": 10}, "encountering": {"head": "encounter", "sublist": 10}, "encounters": {"head": "encounter", "sublist": 10}, "enormous": {"head": "enormous", "sublist": 10}, "enormity": {"head": "enormous", "sublist": 10}, "enormously": {"head": "enormous", "sublist": 10}, "forthcoming": {"head": "forthcoming", "sublist": 10}, "incline": {"head": "incline", "sublist": 10}, "inclination": {"head": "incline", "sublist": 10}, "inclinations": {"head": "incline", "sublist": 10}, "inclined": {"head": "incline", "sublist": 10}, "inclines": {"head": "incline", "sublist": 10}, "inclining": {"head": "incline", "sublist": 10}, "integrity": {"head": "integrity", "sublist": 10}, "intrinsic": {"head": "intrinsic", "sublist": 10}, "intrinsically": {"head": "intrinsic", "sublist": 10}, "invoke": {"head": "invoke", "sublist": 10}, "invoked": {"head": "invoke", "sublist": 10}, "invokes": {"head": "invoke", "sublist": 10}, "invoking": {"head": "invoke", "sublist": 10}, "levy": {"head": "levy", "sublist": 10}, "levies": {"head": "levy", "sublist": 10}, "likewise": {"head": "likewise", "sublist": 10}, "nonetheless": {"head": "nonetheless", "sublist": 10}, "notwithstanding": {"head": "notwithstanding", "sublist": 10}, "odd": {"head": "odd", "sublist": 10}, "odds": {"head": "odd", "sublist": 10}, "ongoing": {"head": "ongoing", "sublist": 10}, "panel": {"head": "panel", "sublist": 10}, "panelled": {"head": "panel", "sublist": 10}, "panelling": {"head": "panel", "sublist": 10}, "panels": {"head": "panel", "sublist": 10}, "persist": {"head": "persist", "sublist": 10}, "persisted": {"head": "persist", "sublist": 10}, "persistence": {"head": "persist", "sublist": 10}, "persistent": {"head": "persist", "sublist": 10}, "persistently": {"head": "persist", "sublist": 10}, "persisting": {"head": "persist", "sublist": 10}, "persists": {"head": "persist", "sublist": 10}, "pose": {"head": "pose", "sublist": 10}, "posed": {"head": "pose", "sublist": 10}, "poses": {"head": "pose", "sublist": 10}, "posing": {"head": "pose", "sublist": 10}, "reluctance": {"head": "reluctance", "sublist": 10}, "reluctant": {"head": "reluctance", "sublist": 10}, "reluctantly": {"head": "reluctance", "sublist": 10}, "so\u2011called": {"head": "so\u2011called", "sublist": 10}, "straightforward": {"head": "straightforward", "sublist": 10}, "undergo": {"head": "undergo", "sublist": 10}, "undergoes": {"head": "undergo", "sublist": 10}, "undergoing": {"head": "undergo", "sublist": 10}, "undergone": {"head": "undergo", "sublist": 10}, "underwent": {"head": "undergo", "sublist": 10}, "whereby": {"head": "whereby", "sublist": 10}, "academy": {"head": "academy", "sublist": 5}, "academia": {"head": "academy", "sublist": 5}, "academic": {"head": "academy", "sublist": 5}, "academically": {"head": "academy", "sublist": 5}, "academics": {"head": "academy", "sublist": 5}, "academies": {"head": "academy", "sublist": 5}, "adjust": {"head": "adjust", "sublist": 5}, "adjusted": {"head": "adjust", "sublist": 5}, "adjusting": {"head": "adjust", "sublist": 5}, "adjustment": {"head": "adjust", "sublist": 5}, "adjustments": {"head": "adjust", "sublist": 5}, "adjusts": {"head": "adjust", "sublist": 5}, "readjust": {"head": "adjust", "sublist": 5}, "readjusted": {"head": "adjust", "sublist": 5}, "readjusting": {"head": "adjust", "sublist": 5}, "readjustment": {"head": "adjust", "sublist": 5}, "readjustments": {"head": "adjust", "sublist": 5}, "readjusts": {"head": "adjust", "sublist": 5}, "alter": {"head": "alter", "sublist": 5}, "alterable": {"head": "alter", "sublist": 5}, "alteration": {"head": "alter", "sublist": 5}, "alterations": {"head": "alter", "sublist": 5}, "altered": {"head": "alter", "sublist": 5}, "altering": {"head": "alter", "sublist": 5}, "alternate": {"head": "alter", "sublist": 5}, "alternating": {"head": "alter", "sublist": 5}, "alters": {"head": "alter", "sublist": 5}, "unalterable": {"head": "alter", "sublist": 5}, "unaltered": {"head": "alter", "sublist": 5}, "amend": {"head": "amend", "sublist": 5}, "amended": {"head": "amend", "sublist": 5}, "amending": {"head": "amend", "sublist": 5}, "amendment": {"head": "amend", "sublist": 5}, "amendments": {"head": "amend", "sublist": 5}, "amends": {"head": "amend", "sublist": 5}, "aware": {"head": "aware", "sublist": 5}, "awareness": {"head": "aware", "sublist": 5}, "unaware": {"head": "aware", "sublist": 5}, "capacity": {"head": "capacity", "sublist": 5}, "capacities": {"head": "capacity", "sublist": 5}, "incapacitate": {"head": "capacity", "sublist": 5}, "incapacitated": {"head": "capacity", "sublist": 5}, "challenge": {"head": "challenge", "sublist": 5}, "challenged": {"head": "challenge", "sublist": 5}, "challenger": {"head": "challenge", "sublist": 5}, "challengers": {"head": "challenge", "sublist": 5}, "challenges": {"head": "challenge", "sublist": 5}, "challenging": {"head": "challenge", "sublist": 5}, "clause": {"head": "clause", "sublist": 5}, "clauses": {"head": "clause", "sublist": 5}, "compound": {"head": "compound", "sublist": 5}, "compounded": {"head": "compound", "sublist": 5}, "compounding": {"head": "compound", "sublist": 5}, "compounds": {"head": "compound", "sublist": 5}, "conflict": {"head": "conflict", "sublist": 5}, "conflicted": {"head": "conflict", "sublist": 5}, "conflicting": {"head": "conflict", "sublist": 5}, "conflicts": {"head": "conflict", "sublist": 5}, "consult": {"head": "consult", "sublist": 5}, "consultancy": {"head": "consult", "sublist": 5}, "consultant": {"head": "consult", "sublist": 5}, "consultants": {"head": "consult", "sublist": 5}, "consultation": {"head": "consult", "sublist": 5}, "consultations": {"head": "consult", "sublist": 5}, "consultative": {"head": "consult", "sublist": 5}, "consulted": {"head": "consult", "sublist": 5}, "consults": {"head": "consult", "sublist": 5}, "consulting": {"head": "consult", "sublist": 5}, "contact": {"head": "contact", "sublist": 5}, "contactable": {"head": "contact", "sublist": 5}, "contacted": {"head": "contact", "sublist": 5}, "contacting": {"head": "contact", "sublist": 5}, "contacts": {"head": "contact", "sublist": 5}, "decline": {"head": "decline", "sublist": 5}, "declined": {"head": "decline", "sublist": 5}, "declines": {"head": "decline", "sublist": 5}, "declining": {"head": "decline", "sublist": 5}, "discrete": {"head": "discrete", "sublist": 5}, "discretely": {"head": "discrete", "sublist": 5}, "discretion": {"head": "discrete", "sublist": 5}, "discretionary": {"head": "discrete", "sublist": 5}, "indiscrete": {"head": "discrete", "sublist": 5}, "indiscretion": {"head": "discrete", "sublist": 5}, "draft": {"head": "draft", "sublist": 5}, "drafted": {"head": "draft", "sublist": 5}, "drafting": {"head": "draft", "sublist": 5}, "drafts": {"head": "draft", "sublist": 5}, "redraft": {"head": "draft", "sublist": 5}, "redrafted": {"head": "draft", "sublist": 5}, "redrafting": {"head": "draft", "sublist": 5}, "redrafts": {"head": "draft", "sublist": 5}, "enable": {"head": "enable", "sublist": 5}, "enabled": {"head": "enable", "sublist": 5}, "enables": {"head": "enable", "sublist": 5}, "enabling": {"head": "enable", "sublist": 5}, "energy": {"head": "energy", "sublist": 5}, "energetic": {"head": "energy", "sublist": 5}, "energetically": {"head": "energy", "sublist": 5}, "energies": {"head": "energy", "sublist": 5}, "enforce": {"head": "enforce", "sublist": 5}, "enforced": {"head": "enforce", "sublist": 5}, "enforcement": {"head": "enforce", "sublist": 5}, "enforces": {"head": "enforce", "sublist": 5}, "enforcing": {"head": "enforce", "sublist": 5}, "entity": {"head": "entity", "sublist": 5}, "entities": {"head": "entity", "sublist": 5}, "equivalent": {"head": "equivalent", "sublist": 5}, "equivalence": {"head": "equivalent", "sublist": 5}, "evolve": {"head": "evolve", "sublist": 5}, "evolution": {"head": "evolve", "sublist": 5}, "evolved": {"head": "evolve", "sublist": 5}, "evolving": {"head": "evolve", "sublist": 5}, "evolves": {"head": "evolve", "sublist": 5}, "evolutionary": {"head": "evolve", "sublist": 5}, "evolutionist": {"head": "evolve", "sublist": 5}, "evolutionists": {"head": "evolve", "sublist": 5}, "expand": {"head": "expand", "sublist": 5}, "expanded": {"head": "expand", "sublist": 5}, "expanding": {"head": "expand", "sublist": 5}, "expands": {"head": "expand", "sublist": 5}, "expansion": {"head": "expand", "sublist": 5}, "expansionism": {"head": "expand", "sublist": 5}, "expansive": {"head": "expand", "sublist": 5}, "expose": {"head": "expose", "sublist": 5}, "exposed": {"head": "expose", "sublist": 5}, "exposes": {"head": "expose", "sublist": 5}, "exposing": {"head": "expose", "sublist": 5}, "exposure": {"head": "expose", "sublist": 5}, "exposures": {"head": "expose", "sublist": 5}, "external": {"head": "external", "sublist": 5}, "externalisation": {"head": "external", "sublist": 5}, "externalise": {"head": "external", "sublist": 5}, "externalised": {"head": "external", "sublist": 5}, "externalises": {"head": "external", "sublist": 5}, "externalising": {"head": "external", "sublist": 5}, "externality": {"head": "external", "sublist": 5}, "externalization": {"head": "external", "sublist": 5}, "externalize": {"head": "external", "sublist": 5}, "externalized": {"head": "external", "sublist": 5}, "externalizes": {"head": "external", "sublist": 5}, "externalizing": {"head": "external", "sublist": 5}, "externally": {"head": "external", "sublist": 5}, "facilitate": {"head": "facilitate", "sublist": 5}, "facilitated": {"head": "facilitate", "sublist": 5}, "facilitates": {"head": "facilitate", "sublist": 5}, "facilities": {"head": "facilitate", "sublist": 5}, "facilitating": {"head": "facilitate", "sublist": 5}, "facilitation": {"head": "facilitate", "sublist": 5}, "facilitator": {"head": "facilitate", "sublist": 5}, "facilitators": {"head": "facilitate", "sublist": 5}, "facility": {"head": "facilitate", "sublist": 5}, "fundamental": {"head": "fundamental", "sublist": 5}, "fundamentally": {"head": "fundamental", "sublist": 5}, "generate": {"head": "generate", "sublist": 5}, "generated": {"head": "generate", "sublist": 5}, "generates": {"head": "generate", "sublist": 5}, "generating": {"head": "generate", "sublist": 5}, "generation": {"head": "generation", "sublist": 5}, "generations": {"head": "generation", "sublist": 5}, "image": {"head": "image", "sublist": 5}, "imagery": {"head": "image", "sublist": 5}, "images": {"head": "image", "sublist": 5}, "liberal": {"head": "liberal", "sublist": 5}, "liberalise": {"head": "liberal", "sublist": 5}, "liberalism": {"head": "liberal", "sublist": 5}, "liberalisation": {"head": "liberal", "sublist": 5}, "liberalised": {"head": "liberal", "sublist": 5}, "liberalises": {"head": "liberal", "sublist": 5}, "liberalising": {"head": "liberal", "sublist": 5}, "liberalization": {"head": "liberal", "sublist": 5}, "liberalize": {"head": "liberal", "sublist": 5}, "liberalized": {"head": "liberal", "sublist": 5}, "liberalizes": {"head": "liberal", "sublist": 5}, "liberalizing": {"head": "liberal", "sublist": 5}, "liberate": {"head": "liberal", "sublist": 5}, "liberated": {"head": "liberal", "sublist": 5}, "liberates": {"head": "liberal", "sublist": 5}, "liberation": {"head": "liberal", "sublist": 5}, "liberations": {"head": "liberal", "sublist": 5}, "liberating": {"head": "liberal", "sublist": 5}, "liberator": {"head": "liberal", "sublist": 5}, "liberators": {"head": "liberal", "sublist": 5}, "liberally": {"head": "liberal", "sublist": 5}, "liberals": {"head": "liberal", "sublist": 5}, "licence": {"head": "licence", "sublist": 5}, "licences": {"head": "licence", "sublist": 5}, "license": {"head": "licence", "sublist": 5}, "licensed": {"head": "licence", "sublist": 5}, "licensing": {"head": "licence", "sublist": 5}, "licenses": {"head": "licence", "sublist": 5}, "unlicensed": {"head": "licence", "sublist": 5}, "logic": {"head": "logic", "sublist": 5}, "illogical": {"head": "logic", "sublist": 5}, "illogically": {"head": "logic", "sublist": 5}, "logical": {"head": "logic", "sublist": 5}, "logically": {"head": "logic", "sublist": 5}, "logician": {"head": "logic", "sublist": 5}, "logicians": {"head": "logic", "sublist": 5}, "margin": {"head": "margin", "sublist": 5}, "marginal": {"head": "margin", "sublist": 5}, "marginally": {"head": "margin", "sublist": 5}, "margins": {"head": "margin", "sublist": 5}, "medical": {"head": "medical", "sublist": 5}, "medically": {"head": "medical", "sublist": 5}, "mental": {"head": "mental", "sublist": 5}, "mentality": {"head": "mental", "sublist": 5}, "mentally": {"head": "mental", "sublist": 5}, "modify": {"head": "modify", "sublist": 5}, "modification": {"head": "modify", "sublist": 5}, "modifications": {"head": "modify", "sublist": 5}, "modified": {"head": "modify", "sublist": 5}, "modifies": {"head": "modify", "sublist": 5}, "modifying": {"head": "modify", "sublist": 5}, "unmodified": {"head": "modify", "sublist": 5}, "monitor": {"head": "monitor", "sublist": 5}, "monitored": {"head": "monitor", "sublist": 5}, "monitoring": {"head": "monitor", "sublist": 5}, "monitors": {"head": "monitor", "sublist": 5}, "unmonitored": {"head": "monitor", "sublist": 5}, "network": {"head": "network", "sublist": 5}, "networked": {"head": "network", "sublist": 5}, "networking": {"head": "network", "sublist": 5}, "networks": {"head": "network", "sublist": 5}, "notion": {"head": "notion", "sublist": 5}, "notions": {"head": "notion", "sublist": 5}, "objective": {"head": "objective", "sublist": 5}, "objectively": {"head": "objective", "sublist": 5}, "objectivity": {"head": "objective", "sublist": 5}, "orient": {"head": "orient", "sublist": 5}, "orientate": {"head": "orient", "sublist": 5}, "orientated": {"head": "orient", "sublist": 5}, "orientates": {"head": "orient", "sublist": 5}, "orientation": {"head": "orient", "sublist": 5}, "orientating": {"head": "orient", "sublist": 5}, "oriented": {"head": "orient", "sublist": 5}, "orienting": {"head": "orient", "sublist": 5}, "orients": {"head": "orient", "sublist": 5}, "reorient": {"head": "orient", "sublist": 5}, "reorientation": {"head": "orient", "sublist": 5}, "perspective": {"head": "perspective", "sublist": 5}, "perspectives": {"head": "perspective", "sublist": 5}, "precise": {"head": "precise", "sublist": 5}, "imprecise": {"head": "precise", "sublist": 5}, "precisely": {"head": "precise", "sublist": 5}, "precision": {"head": "precise", "sublist": 5}, "prime": {"head": "prime", "sublist": 5}, "primacy": {"head": "prime", "sublist": 5}, "psychology": {"head": "psychology", "sublist": 5}, "psychological": {"head": "psychology", "sublist": 5}, "psychologically": {"head": "psychology", "sublist": 5}, "psychologist": {"head": "psychology", "sublist": 5}, "psychologists": {"head": "psychology", "sublist": 5}, "pursue": {"head": "pursue", "sublist": 5}, "pursued": {"head": "pursue", "sublist": 5}, "pursues": {"head": "pursue", "sublist": 5}, "pursuing": {"head": "pursue", "sublist": 5}, "pursuit": {"head": "pursue", "sublist": 5}, "pursuits": {"head": "pursue", "sublist": 5}, "ratio": {"head": "ratio", "sublist": 5}, "ratios": {"head": "ratio", "sublist": 5}, "reject": {"head": "reject", "sublist": 5}, "rejected": {"head": "reject", "sublist": 5}, "rejecting": {"head": "reject", "sublist": 5}, "rejection": {"head": "reject", "sublist": 5}, "rejects": {"head": "reject", "sublist": 5}, "rejections": {"head": "reject", "sublist": 5}, "revenue": {"head": "revenue", "sublist": 5}, "revenues": {"head": "revenue", "sublist": 5}, "stable": {"head": "stable", "sublist": 5}, "instability": {"head": "stable", "sublist": 5}, "stabilisation": {"head": "stable", "sublist": 5}, "stabilise": {"head": "stable", "sublist": 5}, "stabilised": {"head": "stable", "sublist": 5}, "stabilises": {"head": "stable", "sublist": 5}, "stabilising": {"head": "stable", "sublist": 5}, "stabilization": {"head": "stable", "sublist": 5}, "stabilize": {"head": "stable", "sublist": 5}, "stabilized": {"head": "stable", "sublist": 5}, "stabilizes": {"head": "stable", "sublist": 5}, "stabilizing": {"head": "stable", "sublist": 5}, "stability": {"head": "stable", "sublist": 5}, "unstable": {"head": "stable", "sublist": 5}, "style": {"head": "style", "sublist": 5}, "styled": {"head": "style", "sublist": 5}, "styles": {"head": "style", "sublist": 5}, "styling": {"head": "style", "sublist": 5}, "stylish": {"head": "style", "sublist": 5}, "stylise": {"head": "style", "sublist": 5}, "stylised": {"head": "style", "sublist": 5}, "stylises": {"head": "style", "sublist": 5}, "stylising": {"head": "style", "sublist": 5}, "stylize": {"head": "style", "sublist": 5}, "stylized": {"head": "style", "sublist": 5}, "stylizes": {"head": "style", "sublist": 5}, "stylizing": {"head": "style", "sublist": 5}, "substitute": {"head": "substitute", "sublist": 5}, "substituted": {"head": "substitute", "sublist": 5}, "substitutes": {"head": "substitute", "sublist": 5}, "substituting": {"head": "substitute", "sublist": 5}, "substitution": {"head": "substitute", "sublist": 5}, "sustain": {"head": "sustain", "sublist": 5}, "sustainable": {"head": "sustain", "sublist": 5}, "sustainability": {"head": "sustain", "sublist": 5}, "sustained": {"head": "sustain", "sublist": 5}, "sustaining": {"head": "sustain", "sublist": 5}, "sustains": {"head": "sustain", "sublist": 5}, "sustenance": {"head": "sustain", "sublist": 5}, "unsustainable": {"head": "sustain", "sublist": 5}, "symbol": {"head": "symbol", "sublist": 5}, "symbolic": {"head": "symbol", "sublist": 5}, "symbolically": {"head": "symbol", "sublist": 5}, "symbolise": {"head": "symbol", "sublist": 5}, "symbolises": {"head": "symbol", "sublist": 5}, "symbolised": {"head": "symbol", "sublist": 5}, "symbolising": {"head": "symbol", "sublist": 5}, "symbolism": {"head": "symbol", "sublist": 5}, "symbolize": {"head": "symbol", "sublist": 5}, "symbolized": {"head": "symbol", "sublist": 5}, "symbolizes": {"head": "symbol", "sublist": 5}, "symbolizing": {"head": "symbol", "sublist": 5}, "symbols": {"head": "symbol", "sublist": 5}, "target": {"head": "target", "sublist": 5}, "targeted": {"head": "target", "sublist": 5}, "targeting": {"head": "target", "sublist": 5}, "targets": {"head": "target", "sublist": 5}, "transit": {"head": "transit", "sublist": 5}, "transited": {"head": "transit", "sublist": 5}, "transiting": {"head": "transit", "sublist": 5}, "transition": {"head": "transit", "sublist": 5}, "transitional": {"head": "transit", "sublist": 5}, "transitions": {"head": "transit", "sublist": 5}, "transitory": {"head": "transit", "sublist": 5}, "transits": {"head": "transit", "sublist": 5}, "trend": {"head": "trend", "sublist": 5}, "trends": {"head": "trend", "sublist": 5}, "version": {"head": "version", "sublist": 5}, "versions": {"head": "version", "sublist": 5}, "welfare": {"head": "welfare", "sublist": 5}, "whereas": {"head": "whereas", "sublist": 5}, "adapt": {"head": "adapt", "sublist": 7}, "adaptability": {"head": "adapt", "sublist": 7}, "adaptable": {"head": "adapt", "sublist": 7}, "adaptation": {"head": "adapt", "sublist": 7}, "adaptations": {"head": "adapt", "sublist": 7}, "adapted": {"head": "adapt", "sublist": 7}, "adapting": {"head": "adapt", "sublist": 7}, "adaptive": {"head": "adapt", "sublist": 7}, "adapts": {"head": "adapt", "sublist": 7}, "adult": {"head": "adult", "sublist": 7}, "adulthood": {"head": "adult", "sublist": 7}, "adults": {"head": "adult", "sublist": 7}, "advocate": {"head": "advocate", "sublist": 7}, "advocacy": {"head": "advocate", "sublist": 7}, "advocated": {"head": "advocate", "sublist": 7}, "advocates": {"head": "advocate", "sublist": 7}, "advocating": {"head": "advocate", "sublist": 7}, "aid": {"head": "aid", "sublist": 7}, "aided": {"head": "aid", "sublist": 7}, "aiding": {"head": "aid", "sublist": 7}, "aids": {"head": "aid", "sublist": 7}, "unaided": {"head": "aid", "sublist": 7}, "channel": {"head": "channel", "sublist": 7}, "channelled": {"head": "channel", "sublist": 7}, "channelling": {"head": "channel", "sublist": 7}, "channels": {"head": "channel", "sublist": 7}, "chemical": {"head": "chemical", "sublist": 7}, "chemically": {"head": "chemical", "sublist": 7}, "chemicals": {"head": "chemical", "sublist": 7}, "classic": {"head": "classic", "sublist": 7}, "classical": {"head": "classic", "sublist": 7}, "classics": {"head": "classic", "sublist": 7}, "comprehensive": {"head": "comprehensive", "sublist": 7}, "comprehensively": {"head": "comprehensive", "sublist": 7}, "comprise": {"head": "comprise", "sublist": 7}, "comprised": {"head": "comprise", "sublist": 7}, "comprises": {"head": "comprise", "sublist": 7}, "comprising": {"head": "comprise", "sublist": 7}, "confirm": {"head": "confirm", "sublist": 7}, "confirmation": {"head": "confirm", "sublist": 7}, "confirmed": {"head": "confirm", "sublist": 7}, "confirming": {"head": "confirm", "sublist": 7}, "confirms": {"head": "confirm", "sublist": 7}, "contrary": {"head": "contrary", "sublist": 7}, "contrarily": {"head": "contrary", "sublist": 7}, "convert": {"head": "convert", "sublist": 7}, "conversion": {"head": "convert", "sublist": 7}, "conversions": {"head": "convert", "sublist": 7}, "converted": {"head": "convert", "sublist": 7}, "convertible": {"head": "convert", "sublist": 7}, "converting": {"head": "convert", "sublist": 7}, "converts": {"head": "convert", "sublist": 7}, "couple": {"head": "couple", "sublist": 7}, "coupled": {"head": "couple", "sublist": 7}, "coupling": {"head": "couple", "sublist": 7}, "couples": {"head": "couple", "sublist": 7}, "decade": {"head": "decade", "sublist": 7}, "decades": {"head": "decade", "sublist": 7}, "definite": {"head": "definite", "sublist": 7}, "definitely": {"head": "definite", "sublist": 7}, "definitive": {"head": "definite", "sublist": 7}, "indefinite": {"head": "definite", "sublist": 7}, "indefinitely": {"head": "definite", "sublist": 7}, "deny": {"head": "deny", "sublist": 7}, "deniable": {"head": "deny", "sublist": 7}, "denial": {"head": "deny", "sublist": 7}, "denials": {"head": "deny", "sublist": 7}, "denied": {"head": "deny", "sublist": 7}, "denies": {"head": "deny", "sublist": 7}, "denying": {"head": "deny", "sublist": 7}, "undeniable": {"head": "deny", "sublist": 7}, "differentiate": {"head": "differentiate", "sublist": 7}, "differentiated": {"head": "differentiate", "sublist": 7}, "differentiates": {"head": "differentiate", "sublist": 7}, "differentiating": {"head": "differentiate", "sublist": 7}, "differentiation": {"head": "differentiate", "sublist": 7}, "dispose": {"head": "dispose", "sublist": 7}, "disposable": {"head": "dispose", "sublist": 7}, "disposal": {"head": "dispose", "sublist": 7}, "disposed": {"head": "dispose", "sublist": 7}, "disposes": {"head": "dispose", "sublist": 7}, "disposing": {"head": "dispose", "sublist": 7}, "dynamic": {"head": "dynamic", "sublist": 7}, "dynamically": {"head": "dynamic", "sublist": 7}, "dynamics": {"head": "dynamic", "sublist": 7}, "eliminate": {"head": "eliminate", "sublist": 7}, "eliminated": {"head": "eliminate", "sublist": 7}, "eliminates": {"head": "eliminate", "sublist": 7}, "eliminating": {"head": "eliminate", "sublist": 7}, "elimination": {"head": "eliminate", "sublist": 7}, "empirical": {"head": "empirical", "sublist": 7}, "empirically": {"head": "empirical", "sublist": 7}, "empiricism": {"head": "empirical", "sublist": 7}, "equip": {"head": "equip", "sublist": 7}, "equipment": {"head": "equip", "sublist": 7}, "equipped": {"head": "equip", "sublist": 7}, "equipping": {"head": "equip", "sublist": 7}, "equips": {"head": "equip", "sublist": 7}, "extract": {"head": "extract", "sublist": 7}, "extracted": {"head": "extract", "sublist": 7}, "extracting": {"head": "extract", "sublist": 7}, "extraction": {"head": "extract", "sublist": 7}, "extracts": {"head": "extract", "sublist": 7}, "file": {"head": "file", "sublist": 7}, "filed": {"head": "file", "sublist": 7}, "files": {"head": "file", "sublist": 7}, "filing": {"head": "file", "sublist": 7}, "finite": {"head": "finite", "sublist": 7}, "infinite": {"head": "finite", "sublist": 7}, "infinitely": {"head": "finite", "sublist": 7}, "foundation": {"head": "foundation", "sublist": 7}, "foundations": {"head": "foundation", "sublist": 7}, "globe": {"head": "globe", "sublist": 7}, "global": {"head": "globe", "sublist": 7}, "globally": {"head": "globe", "sublist": 7}, "globalisation": {"head": "globe", "sublist": 7}, "globalization": {"head": "globe", "sublist": 7}, "grade": {"head": "grade", "sublist": 7}, "graded": {"head": "grade", "sublist": 7}, "grades": {"head": "grade", "sublist": 7}, "grading": {"head": "grade", "sublist": 7}, "guarantee": {"head": "guarantee", "sublist": 7}, "guaranteed": {"head": "guarantee", "sublist": 7}, "guaranteeing": {"head": "guarantee", "sublist": 7}, "guarantees": {"head": "guarantee", "sublist": 7}, "hierarchy": {"head": "hierarchy", "sublist": 7}, "hierarchical": {"head": "hierarchy", "sublist": 7}, "hierarchies": {"head": "hierarchy", "sublist": 7}, "identical": {"head": "identical", "sublist": 7}, "identically": {"head": "identical", "sublist": 7}, "ideology": {"head": "ideology", "sublist": 7}, "ideological": {"head": "ideology", "sublist": 7}, "ideologically": {"head": "ideology", "sublist": 7}, "ideologies": {"head": "ideology", "sublist": 7}, "infer": {"head": "infer", "sublist": 7}, "inference": {"head": "infer", "sublist": 7}, "inferences": {"head": "infer", "sublist": 7}, "inferred": {"head": "infer", "sublist": 7}, "inferring": {"head": "infer", "sublist": 7}, "infers": {"head": "infer", "sublist": 7}, "innovate": {"head": "innovate", "sublist": 7}, "innovation": {"head": "innovate", "sublist": 7}, "innovated": {"head": "innovate", "sublist": 7}, "innovates": {"head": "innovate", "sublist": 7}, "innovating": {"head": "innovate", "sublist": 7}, "innovations": {"head": "innovate", "sublist": 7}, "innovative": {"head": "innovate", "sublist": 7}, "innovator": {"head": "innovate", "sublist": 7}, "innovators": {"head": "innovate", "sublist": 7}, "insert": {"head": "insert", "sublist": 7}, "inserted": {"head": "insert", "sublist": 7}, "inserting": {"head": "insert", "sublist": 7}, "insertion": {"head": "insert", "sublist": 7}, "inserts": {"head": "insert", "sublist": 7}, "intervene": {"head": "intervene", "sublist": 7}, "intervened": {"head": "intervene", "sublist": 7}, "intervenes": {"head": "intervene", "sublist": 7}, "intervening": {"head": "intervene", "sublist": 7}, "intervention": {"head": "intervene", "sublist": 7}, "interventions": {"head": "intervene", "sublist": 7}, "isolate": {"head": "isolate", "sublist": 7}, "isolated": {"head": "isolate", "sublist": 7}, "isolates": {"head": "isolate", "sublist": 7}, "isolating": {"head": "isolate", "sublist": 7}, "isolation": {"head": "isolate", "sublist": 7}, "isolationism": {"head": "isolate", "sublist": 7}, "media": {"head": "media", "sublist": 7}, "mode": {"head": "mode", "sublist": 7}, "modes": {"head": "mode", "sublist": 7}, "paradigm": {"head": "paradigm", "sublist": 7}, "paradigms": {"head": "paradigm", "sublist": 7}, "phenomenon": {"head": "phenomenon", "sublist": 7}, "phenomena": {"head": "phenomenon", "sublist": 7}, "phenomenal": {"head": "phenomenon", "sublist": 7}, "priority": {"head": "priority", "sublist": 7}, "priorities": {"head": "priority", "sublist": 7}, "prioritisation": {"head": "priority", "sublist": 7}, "prioritise": {"head": "priority", "sublist": 7}, "prioritised": {"head": "priority", "sublist": 7}, "prioritises": {"head": "priority", "sublist": 7}, "prioritising": {"head": "priority", "sublist": 7}, "prioritization": {"head": "priority", "sublist": 7}, "prioritize": {"head": "priority", "sublist": 7}, "prioritized": {"head": "priority", "sublist": 7}, "prioritizes": {"head": "priority", "sublist": 7}, "prioritizing": {"head": "priority", "sublist": 7}, "prohibit": {"head": "prohibit", "sublist": 7}, "prohibited": {"head": "prohibit", "sublist": 7}, "prohibiting": {"head": "prohibit", "sublist": 7}, "prohibition": {"head": "prohibit", "sublist": 7}, "prohibitions": {"head": "prohibit", "sublist": 7}, "prohibitive": {"head": "prohibit", "sublist": 7}, "prohibits": {"head": "prohibit", "sublist": 7}, "publication": {"head": "publication", "sublist": 7}, "publications": {"head": "publication", "sublist": 7}, "quote": {"head": "quote", "sublist": 7}, "quotation": {"head": "quote", "sublist": 7}, "quotations": {"head": "quote", "sublist": 7}, "quoted": {"head": "quote", "sublist": 7}, "quotes": {"head": "quote", "sublist": 7}, "quoting": {"head": "quote", "sublist": 7}, "release": {"head": "release", "sublist": 7}, "released": {"head": "release", "sublist": 7}, "releases": {"head": "release", "sublist": 7}, "releasing": {"head": "release", "sublist": 7}, "reverse": {"head": "reverse", "sublist": 7}, "reversal": {"head": "reverse", "sublist": 7}, "reversed": {"head": "reverse", "sublist": 7}, "reverses": {"head": "reverse", "sublist": 7}, "reversible": {"head": "reverse", "sublist": 7}, "reversing": {"head": "reverse", "sublist": 7}, "reversals": {"head": "reverse", "sublist": 7}, "irreversible": {"head": "reverse", "sublist": 7}, "simulate": {"head": "simulate", "sublist": 7}, "simulated": {"head": "simulate", "sublist": 7}, "simulates": {"head": "simulate", "sublist": 7}, "simulating": {"head": "simulate", "sublist": 7}, "simulation": {"head": "simulate", "sublist": 7}, "sole": {"head": "sole", "sublist": 7}, "solely": {"head": "sole", "sublist": 7}, "somewhat": {"head": "somewhat", "sublist": 7}, "submit": {"head": "submit", "sublist": 7}, "submission": {"head": "submit", "sublist": 7}, "submissions": {"head": "submit", "sublist": 7}, "submits": {"head": "submit", "sublist": 7}, "submitted": {"head": "submit", "sublist": 7}, "submitting": {"head": "submit", "sublist": 7}, "successor": {"head": "successor", "sublist": 7}, "succession": {"head": "successor", "sublist": 7}, "successions": {"head": "successor", "sublist": 7}, "successive": {"head": "successor", "sublist": 7}, "successively": {"head": "successor", "sublist": 7}, "successors": {"head": "successor", "sublist": 7}, "survive": {"head": "survive", "sublist": 7}, "survival": {"head": "survive", "sublist": 7}, "survived": {"head": "survive", "sublist": 7}, "survives": {"head": "survive", "sublist": 7}, "surviving": {"head": "survive", "sublist": 7}, "survivor": {"head": "survive", "sublist": 7}, "survivors": {"head": "survive", "sublist": 7}, "thesis": {"head": "thesis", "sublist": 7}, "theses": {"head": "thesis", "sublist": 7}, "topic": {"head": "topic", "sublist": 7}, "topical": {"head": "topic", "sublist": 7}, "topics": {"head": "topic", "sublist": 7}, "transmit": {"head": "transmit", "sublist": 7}, "transmission": {"head": "transmit", "sublist": 7}, "transmissions": {"head": "transmit", "sublist": 7}, "transmitted": {"head": "transmit", "sublist": 7}, "transmitting": {"head": "transmit", "sublist": 7}, "transmits": {"head": "transmit", "sublist": 7}, "ultimate": {"head": "ultimate", "sublist": 7}, "ultimately": {"head": "ultimate", "sublist": 7}, "unique": {"head": "unique", "sublist": 7}, "uniquely": {"head": "unique", "sublist": 7}, "uniqueness": {"head": "unique", "sublist": 7}, "visible": {"head": "visible", "sublist": 7}, "visibility": {"head": "visible", "sublist": 7}, "visibly": {"head": "visible", "sublist": 7}, "invisible": {"head": "visible", "sublist": 7}, "invisibility": {"head": "visible", "sublist": 7}, "voluntary": {"head": "voluntary", "sublist": 7}, "voluntarily": {"head": "voluntary", "sublist": 7}, "volunteer": {"head": "voluntary", "sublist": 7}, "volunteering": {"head": "voluntary", "sublist": 7}, "volunteered": {"head": "voluntary", "sublist": 7}, "volunteers": {"head": "voluntary", "sublist": 7}, "access": {"head": "access", "sublist": 4}, "accessed": {"head": "access", "sublist": 4}, "accesses": {"head": "access", "sublist": 4}, "accessibility": {"head": "access", "sublist": 4}, "accessible": {"head": "access", "sublist": 4}, "accessing": {"head": "access", "sublist": 4}, "inaccessible": {"head": "access", "sublist": 4}, "adequate": {"head": "adequate", "sublist": 4}, "adequacy": {"head": "adequate", "sublist": 4}, "adequately": {"head": "adequate", "sublist": 4}, "inadequacies": {"head": "adequate", "sublist": 4}, "inadequacy": {"head": "adequate", "sublist": 4}, "inadequate": {"head": "adequate", "sublist": 4}, "inadequately": {"head": "adequate", "sublist": 4}, "annual": {"head": "annual", "sublist": 4}, "annually": {"head": "annual", "sublist": 4}, "apparent": {"head": "apparent", "sublist": 4}, "apparently": {"head": "apparent", "sublist": 4}, "approximate": {"head": "approximate", "sublist": 4}, "approximated": {"head": "approximate", "sublist": 4}, "approximately": {"head": "approximate", "sublist": 4}, "approximates": {"head": "approximate", "sublist": 4}, "approximating": {"head": "approximate", "sublist": 4}, "approximation": {"head": "approximate", "sublist": 4}, "approximations": {"head": "approximate", "sublist": 4}, "attitude": {"head": "attitude", "sublist": 4}, "attitudes": {"head": "attitude", "sublist": 4}, "attribute": {"head": "attribute", "sublist": 4}, "attributable": {"head": "attribute", "sublist": 4}, "attributed": {"head": "attribute", "sublist": 4}, "attributes": {"head": "attribute", "sublist": 4}, "attributing": {"head": "attribute", "sublist": 4}, "attribution": {"head": "attribute", "sublist": 4}, "civil": {"head": "civil", "sublist": 4}, "code": {"head": "code", "sublist": 4}, "coded": {"head": "code", "sublist": 4}, "codes": {"head": "code", "sublist": 4}, "coding": {"head": "code", "sublist": 4}, "commit": {"head": "commit", "sublist": 4}, "commitment": {"head": "commit", "sublist": 4}, "commitments": {"head": "commit", "sublist": 4}, "commits": {"head": "commit", "sublist": 4}, "committed": {"head": "commit", "sublist": 4}, "committing": {"head": "commit", "sublist": 4}, "communicate": {"head": "communicate", "sublist": 4}, "communicable": {"head": "communicate", "sublist": 4}, "communicated": {"head": "communicate", "sublist": 4}, "communicates": {"head": "communicate", "sublist": 4}, "communicating": {"head": "communicate", "sublist": 4}, "communication": {"head": "communicate", "sublist": 4}, "communications": {"head": "communicate", "sublist": 4}, "communicative": {"head": "communicate", "sublist": 4}, "communicatively": {"head": "communicate", "sublist": 4}, "uncommunicative": {"head": "communicate", "sublist": 4}, "concentrate": {"head": "concentrate", "sublist": 4}, "concentrated": {"head": "concentrate", "sublist": 4}, "concentrates": {"head": "concentrate", "sublist": 4}, "concentrating": {"head": "concentrate", "sublist": 4}, "concentration": {"head": "concentrate", "sublist": 4}, "confer": {"head": "confer", "sublist": 4}, "conference": {"head": "confer", "sublist": 4}, "conferences": {"head": "confer", "sublist": 4}, "conferred": {"head": "confer", "sublist": 4}, "conferring": {"head": "confer", "sublist": 4}, "confers": {"head": "confer", "sublist": 4}, "contrast": {"head": "contrast", "sublist": 4}, "contrasted": {"head": "contrast", "sublist": 4}, "contrasting": {"head": "contrast", "sublist": 4}, "contrastive": {"head": "contrast", "sublist": 4}, "contrasts": {"head": "contrast", "sublist": 4}, "cycle": {"head": "cycle", "sublist": 4}, "cycled": {"head": "cycle", "sublist": 4}, "cycles": {"head": "cycle", "sublist": 4}, "cyclic": {"head": "cycle", "sublist": 4}, "cyclical": {"head": "cycle", "sublist": 4}, "cycling": {"head": "cycle", "sublist": 4}, "debate": {"head": "debate", "sublist": 4}, "debatable": {"head": "debate", "sublist": 4}, "debated": {"head": "debate", "sublist": 4}, "debates": {"head": "debate", "sublist": 4}, "debating": {"head": "debate", "sublist": 4}, "despite": {"head": "despite", "sublist": 4}, "dimension": {"head": "dimension", "sublist": 4}, "dimensional": {"head": "dimension", "sublist": 4}, "dimensions": {"head": "dimension", "sublist": 4}, "multidimensional": {"head": "dimension", "sublist": 4}, "domestic": {"head": "domestic", "sublist": 4}, "domestically": {"head": "domestic", "sublist": 4}, "domesticate": {"head": "domestic", "sublist": 4}, "domesticated": {"head": "domestic", "sublist": 4}, "domesticating": {"head": "domestic", "sublist": 4}, "domestics": {"head": "domestic", "sublist": 4}, "emerge": {"head": "emerge", "sublist": 4}, "emerged": {"head": "emerge", "sublist": 4}, "emergence": {"head": "emerge", "sublist": 4}, "emergent": {"head": "emerge", "sublist": 4}, "emerges": {"head": "emerge", "sublist": 4}, "emerging": {"head": "emerge", "sublist": 4}, "error": {"head": "error", "sublist": 4}, "erroneous": {"head": "error", "sublist": 4}, "erroneously": {"head": "error", "sublist": 4}, "errors": {"head": "error", "sublist": 4}, "ethnic": {"head": "ethnic", "sublist": 4}, "ethnicity": {"head": "ethnic", "sublist": 4}, "goal": {"head": "goal", "sublist": 4}, "goals": {"head": "goal", "sublist": 4}, "grant": {"head": "grant", "sublist": 4}, "granted": {"head": "grant", "sublist": 4}, "granting": {"head": "grant", "sublist": 4}, "grants": {"head": "grant", "sublist": 4}, "hence": {"head": "hence", "sublist": 4}, "hypothesis": {"head": "hypothesis", "sublist": 4}, "hypotheses": {"head": "hypothesis", "sublist": 4}, "hypothesise": {"head": "hypothesis", "sublist": 4}, "hypothesised": {"head": "hypothesis", "sublist": 4}, "hypothesises": {"head": "hypothesis", "sublist": 4}, "hypothesising": {"head": "hypothesis", "sublist": 4}, "hypothesize": {"head": "hypothesis", "sublist": 4}, "hypothesized": {"head": "hypothesis", "sublist": 4}, "hypothesizes": {"head": "hypothesis", "sublist": 4}, "hypothesizing": {"head": "hypothesis", "sublist": 4}, "hypothetical": {"head": "hypothesis", "sublist": 4}, "hypothetically": {"head": "hypothesis", "sublist": 4}, "implement": {"head": "implement", "sublist": 4}, "implementation": {"head": "implement", "sublist": 4}, "implemented": {"head": "implement", "sublist": 4}, "implementing": {"head": "implement", "sublist": 4}, "implements": {"head": "implement", "sublist": 4}, "implicate": {"head": "implicate", "sublist": 4}, "implicated": {"head": "implicate", "sublist": 4}, "implicates": {"head": "implicate", "sublist": 4}, "implicating": {"head": "implicate", "sublist": 4}, "implication": {"head": "implicate", "sublist": 4}, "implications": {"head": "implicate", "sublist": 4}, "impose": {"head": "impose", "sublist": 4}, "imposed": {"head": "impose", "sublist": 4}, "imposes": {"head": "impose", "sublist": 4}, "imposing": {"head": "impose", "sublist": 4}, "imposition": {"head": "impose", "sublist": 4}, "integrate": {"head": "integrate", "sublist": 4}, "integrated": {"head": "integrate", "sublist": 4}, "integrates": {"head": "integrate", "sublist": 4}, "integrating": {"head": "integrate", "sublist": 4}, "integration": {"head": "integrate", "sublist": 4}, "internal": {"head": "internal", "sublist": 4}, "internalise": {"head": "internal", "sublist": 4}, "internalised": {"head": "internal", "sublist": 4}, "internalises": {"head": "internal", "sublist": 4}, "internalising": {"head": "internal", "sublist": 4}, "internalize": {"head": "internal", "sublist": 4}, "internalized": {"head": "internal", "sublist": 4}, "internalizes": {"head": "internal", "sublist": 4}, "internalizing": {"head": "internal", "sublist": 4}, "internally": {"head": "internal", "sublist": 4}, "investigate": {"head": "investigate", "sublist": 4}, "investigated": {"head": "investigate", "sublist": 4}, "investigates": {"head": "investigate", "sublist": 4}, "investigating": {"head": "investigate", "sublist": 4}, "investigation": {"head": "investigate", "sublist": 4}, "investigations": {"head": "investigate", "sublist": 4}, "investigative": {"head": "investigate", "sublist": 4}, "investigator": {"head": "investigate", "sublist": 4}, "investigators": {"head": "investigate", "sublist": 4}, "job": {"head": "job", "sublist": 4}, "jobs": {"head": "job", "sublist": 4}, "label": {"head": "label", "sublist": 4}, "labeled": {"head": "label", "sublist": 4}, "labeling": {"head": "label", "sublist": 4}, "labelled": {"head": "label", "sublist": 4}, "labelling": {"head": "label", "sublist": 4}, "labels": {"head": "label", "sublist": 4}, "mechanism": {"head": "mechanism", "sublist": 4}, "mechanisms": {"head": "mechanism", "sublist": 4}, "obvious": {"head": "obvious", "sublist": 4}, "obviously": {"head": "obvious", "sublist": 4}, "occupy": {"head": "occupy", "sublist": 4}, "occupancy": {"head": "occupy", "sublist": 4}, "occupant": {"head": "occupy", "sublist": 4}, "occupants": {"head": "occupy", "sublist": 4}, "occupation": {"head": "occupy", "sublist": 4}, "occupational": {"head": "occupy", "sublist": 4}, "occupations": {"head": "occupy", "sublist": 4}, "occupied": {"head": "occupy", "sublist": 4}, "occupier": {"head": "occupy", "sublist": 4}, "occupiers": {"head": "occupy", "sublist": 4}, "occupies": {"head": "occupy", "sublist": 4}, "occupying": {"head": "occupy", "sublist": 4}, "option": {"head": "option", "sublist": 4}, "optional": {"head": "option", "sublist": 4}, "options": {"head": "option", "sublist": 4}, "output": {"head": "output", "sublist": 4}, "outputs": {"head": "output", "sublist": 4}, "overall": {"head": "overall", "sublist": 4}, "parallel": {"head": "parallel", "sublist": 4}, "paralleled": {"head": "parallel", "sublist": 4}, "parallelled": {"head": "parallel", "sublist": 4}, "parallelling": {"head": "parallel", "sublist": 4}, "parallels": {"head": "parallel", "sublist": 4}, "unparalleled": {"head": "parallel", "sublist": 4}, "parameter": {"head": "parameter", "sublist": 4}, "parameters": {"head": "parameter", "sublist": 4}, "phase": {"head": "phase", "sublist": 4}, "phased": {"head": "phase", "sublist": 4}, "phases": {"head": "phase", "sublist": 4}, "phasing": {"head": "phase", "sublist": 4}, "predict": {"head": "predict", "sublist": 4}, "predictability": {"head": "predict", "sublist": 4}, "predictable": {"head": "predict", "sublist": 4}, "predictably": {"head": "predict", "sublist": 4}, "predicted": {"head": "predict", "sublist": 4}, "predicting": {"head": "predict", "sublist": 4}, "prediction": {"head": "predict", "sublist": 4}, "predictions": {"head": "predict", "sublist": 4}, "predicts": {"head": "predict", "sublist": 4}, "unpredictability": {"head": "predict", "sublist": 4}, "unpredictable": {"head": "predict", "sublist": 4}, "principal": {"head": "principal", "sublist": 4}, "principally": {"head": "principal", "sublist": 4}, "prior": {"head": "prior", "sublist": 4}, "professional": {"head": "professional", "sublist": 4}, "professionally": {"head": "professional", "sublist": 4}, "professionals": {"head": "professional", "sublist": 4}, "professionalism": {"head": "professional", "sublist": 4}, "project": {"head": "project", "sublist": 4}, "projected": {"head": "project", "sublist": 4}, "projecting": {"head": "project", "sublist": 4}, "projection": {"head": "project", "sublist": 4}, "projections": {"head": "project", "sublist": 4}, "projects": {"head": "project", "sublist": 4}, "promote": {"head": "promote", "sublist": 4}, "promoted": {"head": "promote", "sublist": 4}, "promoter": {"head": "promote", "sublist": 4}, "promoters": {"head": "promote", "sublist": 4}, "promotes": {"head": "promote", "sublist": 4}, "promoting": {"head": "promote", "sublist": 4}, "promotion": {"head": "promote", "sublist": 4}, "promotions": {"head": "promote", "sublist": 4}, "regime": {"head": "regime", "sublist": 4}, "regimes": {"head": "regime", "sublist": 4}, "resolve": {"head": "resolve", "sublist": 4}, "resolution": {"head": "resolve", "sublist": 4}, "resolved": {"head": "resolve", "sublist": 4}, "resolves": {"head": "resolve", "sublist": 4}, "resolving": {"head": "resolve", "sublist": 4}, "unresolved": {"head": "resolve", "sublist": 4}, "retain": {"head": "retain", "sublist": 4}, "retained": {"head": "retain", "sublist": 4}, "retaining": {"head": "retain", "sublist": 4}, "retainer": {"head": "retain", "sublist": 4}, "retainers": {"head": "retain", "sublist": 4}, "retains": {"head": "retain", "sublist": 4}, "retention": {"head": "retain", "sublist": 4}, "retentive": {"head": "retain", "sublist": 4}, "series": {"head": "series", "sublist": 4}, "statistic": {"head": "statistic", "sublist": 4}, "statistician": {"head": "statistic", "sublist": 4}, "statisticians": {"head": "statistic", "sublist": 4}, "statistical": {"head": "statistic", "sublist": 4}, "statistically": {"head": "statistic", "sublist": 4}, "statistics": {"head": "statistic", "sublist": 4}, "status": {"head": "status", "sublist": 4}, "stress": {"head": "stress", "sublist": 4}, "stressed": {"head": "stress", "sublist": 4}, "stresses": {"head": "stress", "sublist": 4}, "stressful": {"head": "stress", "sublist": 4}, "stressing": {"head": "stress", "sublist": 4}, "unstressed": {"head": "stress", "sublist": 4}, "subsequent": {"head": "subsequent", "sublist": 4}, "subsequently": {"head": "subsequent", "sublist": 4}, "sum": {"head": "sum", "sublist": 4}, "summation": {"head": "sum", "sublist": 4}, "summed": {"head": "sum", "sublist": 4}, "summing": {"head": "sum", "sublist": 4}, "sums": {"head": "sum", "sublist": 4}, "summary": {"head": "summary", "sublist": 4}, "summaries": {"head": "summary", "sublist": 4}, "summarise": {"head": "summary", "sublist": 4}, "summarised": {"head": "summary", "sublist": 4}, "summarises": {"head": "summary", "sublist": 4}, "summarising": {"head": "summary", "sublist": 4}, "summarisation": {"head": "summary", "sublist": 4}, "summarisations": {"head": "summary", "sublist": 4}, "summarization": {"head": "summary", "sublist": 4}, "summarizations": {"head": "summary", "sublist": 4}, "summarize": {"head": "summary", "sublist": 4}, "summarized": {"head": "summary", "sublist": 4}, "summarizes": {"head": "summary", "sublist": 4}, "summarizing": {"head": "summary", "sublist": 4}, "undertake": {"head": "undertake", "sublist": 4}, "undertaken": {"head": "undertake", "sublist": 4}, "undertakes": {"head": "undertake", "sublist": 4}, "undertaking": {"head": "undertake", "sublist": 4}, "undertook": {"head": "undertake", "sublist": 4}, "analyse": {"head": "analyse", "sublist": 1}, "analysed": {"head": "analyse", "sublist": 1}, "analyser": {"head": "analyse", "sublist": 1}, "analysing": {"head": "analyse", "sublist": 1}, "analysis": {"head": "analyse", "sublist": 1}, "analyst": {"head": "analyse", "sublist": 1}, "analytic": {"head": "analyse", "sublist": 1}, "analytical": {"head": "analyse", "sublist": 1}, "analytically": {"head": "analyse", "sublist": 1}, "analyze": {"head": "analyse", "sublist": 1}, "analyzed": {"head": "analyse", "sublist": 1}, "analyzing": {"head": "analyse", "sublist": 1}, "approach": {"head": "approach", "sublist": 1}, "approachable": {"head": "approach", "sublist": 1}, "approached": {"head": "approach", "sublist": 1}, "approaches": {"head": "approach", "sublist": 1}, "approaching": {"head": "approach", "sublist": 1}, "unapproachable": {"head": "approach", "sublist": 1}, "area": {"head": "area", "sublist": 1}, "areas": {"head": "area", "sublist": 1}, "assess": {"head": "assess", "sublist": 1}, "assessable": {"head": "assess", "sublist": 1}, "assessed": {"head": "assess", "sublist": 1}, "assesses": {"head": "assess", "sublist": 1}, "assessing": {"head": "assess", "sublist": 1}, "assessment": {"head": "assess", "sublist": 1}, "assessments": {"head": "assess", "sublist": 1}, "reassess": {"head": "assess", "sublist": 1}, "reassessed": {"head": "assess", "sublist": 1}, "reassessing": {"head": "assess", "sublist": 1}, "reassessment": {"head": "assess", "sublist": 1}, "unassessed": {"head": "assess", "sublist": 1}, "assume": {"head": "assume", "sublist": 1}, "assumed": {"head": "assume", "sublist": 1}, "assumes": {"head": "assume", "sublist": 1}, "assuming": {"head": "assume", "sublist": 1}, "assumption": {"head": "assume", "sublist": 1}, "assumptions": {"head": "assume", "sublist": 1}, "authority": {"head": "authority", "sublist": 1}, "authoritative": {"head": "authority", "sublist": 1}, "authorities": {"head": "authority", "sublist": 1}, "available": {"head": "available", "sublist": 1}, "availability": {"head": "available", "sublist": 1}, "unavailable": {"head": "available", "sublist": 1}, "benefit": {"head": "benefit", "sublist": 1}, "beneficial": {"head": "benefit", "sublist": 1}, "beneficiary": {"head": "benefit", "sublist": 1}, "beneficiaries": {"head": "benefit", "sublist": 1}, "benefited": {"head": "benefit", "sublist": 1}, "benefiting": {"head": "benefit", "sublist": 1}, "benefits": {"head": "benefit", "sublist": 1}, "concept": {"head": "concept", "sublist": 1}, "conception": {"head": "concept", "sublist": 1}, "concepts": {"head": "concept", "sublist": 1}, "conceptual": {"head": "concept", "sublist": 1}, "conceptualisation": {"head": "concept", "sublist": 1}, "conceptualise": {"head": "concept", "sublist": 1}, "conceptualised": {"head": "concept", "sublist": 1}, "conceptualises": {"head": "concept", "sublist": 1}, "conceptualising": {"head": "concept", "sublist": 1}, "conceptually": {"head": "concept", "sublist": 1}, "consist": {"head": "consist", "sublist": 1}, "consisted": {"head": "consist", "sublist": 1}, "consistency": {"head": "consist", "sublist": 1}, "consistent": {"head": "consist", "sublist": 1}, "consistently": {"head": "consist", "sublist": 1}, "consisting": {"head": "consist", "sublist": 1}, "consists": {"head": "consist", "sublist": 1}, "inconsistencies": {"head": "consist", "sublist": 1}, "inconsistency": {"head": "consist", "sublist": 1}, "inconsistent": {"head": "consist", "sublist": 1}, "constitute": {"head": "constitute", "sublist": 1}, "constituencies": {"head": "constitute", "sublist": 1}, "constituency": {"head": "constitute", "sublist": 1}, "constituent": {"head": "constitute", "sublist": 1}, "constituents": {"head": "constitute", "sublist": 1}, "constituted": {"head": "constitute", "sublist": 1}, "constitutes": {"head": "constitute", "sublist": 1}, "constituting": {"head": "constitute", "sublist": 1}, "constitution": {"head": "constitute", "sublist": 1}, "constitutions": {"head": "constitute", "sublist": 1}, "constitutional": {"head": "constitute", "sublist": 1}, "constitutionally": {"head": "constitute", "sublist": 1}, "constitutive": {"head": "constitute", "sublist": 1}, "unconstitutional": {"head": "constitute", "sublist": 1}, "context": {"head": "context", "sublist": 1}, "contexts": {"head": "context", "sublist": 1}, "contextual": {"head": "context", "sublist": 1}, "contextualise": {"head": "context", "sublist": 1}, "contextualised": {"head": "context", "sublist": 1}, "contextualising": {"head": "context", "sublist": 1}, "uncontextualised": {"head": "context", "sublist": 1}, "contextualize": {"head": "context", "sublist": 1}, "contextualized": {"head": "context", "sublist": 1}, "contextualizing": {"head": "context", "sublist": 1}, "uncontextualized": {"head": "context", "sublist": 1}, "contract": {"head": "contract", "sublist": 1}, "contracted": {"head": "contract", "sublist": 1}, "contracting": {"head": "contract", "sublist": 1}, "contractor": {"head": "contract", "sublist": 1}, "contractors": {"head": "contract", "sublist": 1}, "contracts": {"head": "contract", "sublist": 1}, "create": {"head": "create", "sublist": 1}, "created": {"head": "create", "sublist": 1}, "creates": {"head": "create", "sublist": 1}, "creating": {"head": "create", "sublist": 1}, "creation": {"head": "create", "sublist": 1}, "creations": {"head": "create", "sublist": 1}, "creative": {"head": "create", "sublist": 1}, "creatively": {"head": "create", "sublist": 1}, "creativity": {"head": "create", "sublist": 1}, "creator": {"head": "create", "sublist": 1}, "creators": {"head": "create", "sublist": 1}, "recreate": {"head": "create", "sublist": 1}, "recreated": {"head": "create", "sublist": 1}, "recreates": {"head": "create", "sublist": 1}, "recreating": {"head": "create", "sublist": 1}, "data": {"head": "data", "sublist": 1}, "define": {"head": "define", "sublist": 1}, "definable": {"head": "define", "sublist": 1}, "defined": {"head": "define", "sublist": 1}, "defines": {"head": "define", "sublist": 1}, "defining": {"head": "define", "sublist": 1}, "definition": {"head": "define", "sublist": 1}, "definitions": {"head": "define", "sublist": 1}, "redefine": {"head": "define", "sublist": 1}, "redefined": {"head": "define", "sublist": 1}, "redefines": {"head": "define", "sublist": 1}, "redefining": {"head": "define", "sublist": 1}, "undefined": {"head": "define", "sublist": 1}, "derive": {"head": "derive", "sublist": 1}, "derivation": {"head": "derive", "sublist": 1}, "derivations": {"head": "derive", "sublist": 1}, "derivative": {"head": "derive", "sublist": 1}, "derivatives": {"head": "derive", "sublist": 1}, "derived": {"head": "derive", "sublist": 1}, "derives": {"head": "derive", "sublist": 1}, "deriving": {"head": "derive", "sublist": 1}, "distribute": {"head": "distribute", "sublist": 1}, "distributed": {"head": "distribute", "sublist": 1}, "distributing": {"head": "distribute", "sublist": 1}, "distribution": {"head": "distribute", "sublist": 1}, "distributional": {"head": "distribute", "sublist": 1}, "distributions": {"head": "distribute", "sublist": 1}, "distributive": {"head": "distribute", "sublist": 1}, "distributor": {"head": "distribute", "sublist": 1}, "distributors": {"head": "distribute", "sublist": 1}, "redistribute": {"head": "distribute", "sublist": 1}, "redistributed": {"head": "distribute", "sublist": 1}, "redistributes": {"head": "distribute", "sublist": 1}, "redistributing": {"head": "distribute", "sublist": 1}, "redistribution": {"head": "distribute", "sublist": 1}, "economy": {"head": "economy", "sublist": 1}, "economic": {"head": "economy", "sublist": 1}, "economical": {"head": "economy", "sublist": 1}, "economically": {"head": "economy", "sublist": 1}, "economics": {"head": "economy", "sublist": 1}, "economies": {"head": "economy", "sublist": 1}, "economist": {"head": "economy", "sublist": 1}, "economists": {"head": "economy", "sublist": 1}, "uneconomical": {"head": "economy", "sublist": 1}, "environment": {"head": "environment", "sublist": 1}, "environmental": {"head": "environment", "sublist": 1}, "environmentalist": {"head": "environment", "sublist": 1}, "environmentalists": {"head": "environment", "sublist": 1}, "environmentally": {"head": "environment", "sublist": 1}, "environments": {"head": "environment", "sublist": 1}, "establish": {"head": "establish", "sublist": 1}, "disestablish": {"head": "establish", "sublist": 1}, "disestablished": {"head": "establish", "sublist": 1}, "disestablishes": {"head": "establish", "sublist": 1}, "disestablishing": {"head": "establish", "sublist": 1}, "disestablishment": {"head": "establish", "sublist": 1}, "established": {"head": "establish", "sublist": 1}, "establishes": {"head": "establish", "sublist": 1}, "establishing": {"head": "establish", "sublist": 1}, "establishment": {"head": "establish", "sublist": 1}, "establishments": {"head": "establish", "sublist": 1}, "estimate": {"head": "estimate", "sublist": 1}, "estimated": {"head": "estimate", "sublist": 1}, "estimates": {"head": "estimate", "sublist": 1}, "estimating": {"head": "estimate", "sublist": 1}, "estimation": {"head": "estimate", "sublist": 1}, "estimations": {"head": "estimate", "sublist": 1}, "over\u2011estimate": {"head": "estimate", "sublist": 1}, "overestimate": {"head": "estimate", "sublist": 1}, "overestimated": {"head": "estimate", "sublist": 1}, "overestimates": {"head": "estimate", "sublist": 1}, "overestimating": {"head": "estimate", "sublist": 1}, "underestimate": {"head": "estimate", "sublist": 1}, "underestimated": {"head": "estimate", "sublist": 1}, "underestimates": {"head": "estimate", "sublist": 1}, "underestimating": {"head": "estimate", "sublist": 1}, "evident": {"head": "evident", "sublist": 1}, "evidenced": {"head": "evident", "sublist": 1}, "evidence": {"head": "evident", "sublist": 1}, "evidential": {"head": "evident", "sublist": 1}, "evidently": {"head": "evident", "sublist": 1}, "export": {"head": "export", "sublist": 1}, "exported": {"head": "export", "sublist": 1}, "exporter": {"head": "export", "sublist": 1}, "exporters": {"head": "export", "sublist": 1}, "exporting": {"head": "export", "sublist": 1}, "exports": {"head": "export", "sublist": 1}, "factor": {"head": "factor", "sublist": 1}, "factored": {"head": "factor", "sublist": 1}, "factoring": {"head": "factor", "sublist": 1}, "factors": {"head": "factor", "sublist": 1}, "finance": {"head": "finance", "sublist": 1}, "financed": {"head": "finance", "sublist": 1}, "finances": {"head": "finance", "sublist": 1}, "financial": {"head": "finance", "sublist": 1}, "financially": {"head": "finance", "sublist": 1}, "financier": {"head": "finance", "sublist": 1}, "financiers": {"head": "finance", "sublist": 1}, "financing": {"head": "finance", "sublist": 1}, "formula": {"head": "formula", "sublist": 1}, "formulae": {"head": "formula", "sublist": 1}, "formulas": {"head": "formula", "sublist": 1}, "formulate": {"head": "formula", "sublist": 1}, "formulated": {"head": "formula", "sublist": 1}, "formulating": {"head": "formula", "sublist": 1}, "formulation": {"head": "formula", "sublist": 1}, "formulations": {"head": "formula", "sublist": 1}, "reformulate": {"head": "formula", "sublist": 1}, "reformulated": {"head": "formula", "sublist": 1}, "reformulating": {"head": "formula", "sublist": 1}, "reformulation": {"head": "formula", "sublist": 1}, "reformulations": {"head": "formula", "sublist": 1}, "function": {"head": "function", "sublist": 1}, "functional": {"head": "function", "sublist": 1}, "functionally": {"head": "function", "sublist": 1}, "functioned": {"head": "function", "sublist": 1}, "functioning": {"head": "function", "sublist": 1}, "functions": {"head": "function", "sublist": 1}, "identify": {"head": "identify", "sublist": 1}, "identifiable": {"head": "identify", "sublist": 1}, "identification": {"head": "identify", "sublist": 1}, "identified": {"head": "identify", "sublist": 1}, "identifies": {"head": "identify", "sublist": 1}, "identifying": {"head": "identify", "sublist": 1}, "identities": {"head": "identify", "sublist": 1}, "identity": {"head": "identify", "sublist": 1}, "unidentifiable": {"head": "identify", "sublist": 1}, "income": {"head": "income", "sublist": 1}, "incomes": {"head": "income", "sublist": 1}, "indicate": {"head": "indicate", "sublist": 1}, "indicated": {"head": "indicate", "sublist": 1}, "indicates": {"head": "indicate", "sublist": 1}, "indicating": {"head": "indicate", "sublist": 1}, "indication": {"head": "indicate", "sublist": 1}, "indications": {"head": "indicate", "sublist": 1}, "indicative": {"head": "indicate", "sublist": 1}, "indicator": {"head": "indicate", "sublist": 1}, "indicators": {"head": "indicate", "sublist": 1}, "individual": {"head": "individual", "sublist": 1}, "individualised": {"head": "individual", "sublist": 1}, "individuality": {"head": "individual", "sublist": 1}, "individualism": {"head": "individual", "sublist": 1}, "individualist": {"head": "individual", "sublist": 1}, "individualists": {"head": "individual", "sublist": 1}, "individualistic": {"head": "individual", "sublist": 1}, "individually": {"head": "individual", "sublist": 1}, "individuals": {"head": "individual", "sublist": 1}, "interpret": {"head": "interpret", "sublist": 1}, "interpretation": {"head": "interpret", "sublist": 1}, "interpretations": {"head": "interpret", "sublist": 1}, "interpretative": {"head": "interpret", "sublist": 1}, "interpreted": {"head": "interpret", "sublist": 1}, "interpreting": {"head": "interpret", "sublist": 1}, "interpretive": {"head": "interpret", "sublist": 1}, "interprets": {"head": "interpret", "sublist": 1}, "misinterpret": {"head": "interpret", "sublist": 1}, "misinterpretation": {"head": "interpret", "sublist": 1}, "misinterpretations": {"head": "interpret", "sublist": 1}, "misinterpreted": {"head": "interpret", "sublist": 1}, "misinterpreting": {"head": "interpret", "sublist": 1}, "misinterprets": {"head": "interpret", "sublist": 1}, "reinterpret": {"head": "interpret", "sublist": 1}, "reinterpreted": {"head": "interpret", "sublist": 1}, "reinterprets": {"head": "interpret", "sublist": 1}, "reinterpreting": {"head": "interpret", "sublist": 1}, "reinterpretation": {"head": "interpret", "sublist": 1}, "reinterpretations": {"head": "interpret", "sublist": 1}, "involve": {"head": "involve", "sublist": 1}, "involved": {"head": "involve", "sublist": 1}, "involvement": {"head": "involve", "sublist": 1}, "involves": {"head": "involve", "sublist": 1}, "involving": {"head": "involve", "sublist": 1}, "uninvolved": {"head": "involve", "sublist": 1}, "issue": {"head": "issue", "sublist": 1}, "issued": {"head": "issue", "sublist": 1}, "issues": {"head": "issue", "sublist": 1}, "issuing": {"head": "issue", "sublist": 1}, "labour": {"head": "labour", "sublist": 1}, "labor": {"head": "labour", "sublist": 1}, "labored": {"head": "labour", "sublist": 1}, "labors": {"head": "labour", "sublist": 1}, "laboured": {"head": "labour", "sublist": 1}, "labouring": {"head": "labour", "sublist": 1}, "labours": {"head": "labour", "sublist": 1}, "legal": {"head": "legal", "sublist": 1}, "illegal": {"head": "legal", "sublist": 1}, "illegality": {"head": "legal", "sublist": 1}, "illegally": {"head": "legal", "sublist": 1}, "legality": {"head": "legal", "sublist": 1}, "legally": {"head": "legal", "sublist": 1}, "legislate": {"head": "legislate", "sublist": 1}, "legislated": {"head": "legislate", "sublist": 1}, "legislates": {"head": "legislate", "sublist": 1}, "legislating": {"head": "legislate", "sublist": 1}, "legislation": {"head": "legislate", "sublist": 1}, "legislative": {"head": "legislate", "sublist": 1}, "legislator": {"head": "legislate", "sublist": 1}, "legislators": {"head": "legislate", "sublist": 1}, "legislature": {"head": "legislate", "sublist": 1}, "major": {"head": "major", "sublist": 1}, "majorities": {"head": "major", "sublist": 1}, "majority": {"head": "major", "sublist": 1}, "method": {"head": "method", "sublist": 1}, "methodical": {"head": "method", "sublist": 1}, "methodological": {"head": "method", "sublist": 1}, "methodologies": {"head": "method", "sublist": 1}, "methodology": {"head": "method", "sublist": 1}, "methods": {"head": "method", "sublist": 1}, "occur": {"head": "occur", "sublist": 1}, "occurred": {"head": "occur", "sublist": 1}, "occurrence": {"head": "occur", "sublist": 1}, "occurrences": {"head": "occur", "sublist": 1}, "occurring": {"head": "occur", "sublist": 1}, "occurs": {"head": "occur", "sublist": 1}, "reoccur": {"head": "occur", "sublist": 1}, "reoccurred": {"head": "occur", "sublist": 1}, "reoccurring": {"head": "occur", "sublist": 1}, "reoccurs": {"head": "occur", "sublist": 1}, "percent": {"head": "percent", "sublist": 1}, "percentage": {"head": "percent", "sublist": 1}, "percentages": {"head": "percent", "sublist": 1}, "period": {"head": "period", "sublist": 1}, "periodic": {"head": "period", "sublist": 1}, "periodical": {"head": "period", "sublist": 1}, "periodically": {"head": "period", "sublist": 1}, "periodicals": {"head": "period", "sublist": 1}, "periods": {"head": "period", "sublist": 1}, "policy": {"head": "policy", "sublist": 1}, "policies": {"head": "policy", "sublist": 1}, "principle": {"head": "principle", "sublist": 1}, "principled": {"head": "principle", "sublist": 1}, "principles": {"head": "principle", "sublist": 1}, "unprincipled": {"head": "principle", "sublist": 1}, "proceed": {"head": "proceed", "sublist": 1}, "procedural": {"head": "proceed", "sublist": 1}, "procedure": {"head": "proceed", "sublist": 1}, "procedures": {"head": "proceed", "sublist": 1}, "proceeded": {"head": "proceed", "sublist": 1}, "proceeding": {"head": "proceed", "sublist": 1}, "proceedings": {"head": "proceed", "sublist": 1}, "proceeds": {"head": "proceed", "sublist": 1}, "process": {"head": "process", "sublist": 1}, "processed": {"head": "process", "sublist": 1}, "processes": {"head": "process", "sublist": 1}, "processing": {"head": "process", "sublist": 1}, "require": {"head": "require", "sublist": 1}, "required": {"head": "require", "sublist": 1}, "requirement": {"head": "require", "sublist": 1}, "requirements": {"head": "require", "sublist": 1}, "requires": {"head": "require", "sublist": 1}, "requiring": {"head": "require", "sublist": 1}, "research": {"head": "research", "sublist": 1}, "researched": {"head": "research", "sublist": 1}, "researcher": {"head": "research", "sublist": 1}, "researchers": {"head": "research", "sublist": 1}, "researches": {"head": "research", "sublist": 1}, "researching": {"head": "research", "sublist": 1}, "respond": {"head": "respond", "sublist": 1}, "responded": {"head": "respond", "sublist": 1}, "respondent": {"head": "respond", "sublist": 1}, "respondents": {"head": "respond", "sublist": 1}, "responding": {"head": "respond", "sublist": 1}, "responds": {"head": "respond", "sublist": 1}, "response": {"head": "respond", "sublist": 1}, "responses": {"head": "respond", "sublist": 1}, "responsive": {"head": "respond", "sublist": 1}, "responsiveness": {"head": "respond", "sublist": 1}, "unresponsive": {"head": "respond", "sublist": 1}, "role": {"head": "role", "sublist": 1}, "roles": {"head": "role", "sublist": 1}, "section": {"head": "section", "sublist": 1}, "sectioned": {"head": "section", "sublist": 1}, "sectioning": {"head": "section", "sublist": 1}, "sections": {"head": "section", "sublist": 1}, "sector": {"head": "sector", "sublist": 1}, "sectors": {"head": "sector", "sublist": 1}, "significant": {"head": "significant", "sublist": 1}, "insignificant": {"head": "significant", "sublist": 1}, "insignificantly": {"head": "significant", "sublist": 1}, "significance": {"head": "significant", "sublist": 1}, "significantly": {"head": "significant", "sublist": 1}, "signified": {"head": "significant", "sublist": 1}, "signifies": {"head": "significant", "sublist": 1}, "signify": {"head": "significant", "sublist": 1}, "signifying": {"head": "significant", "sublist": 1}, "similar": {"head": "similar", "sublist": 1}, "dissimilar": {"head": "similar", "sublist": 1}, "similarities": {"head": "similar", "sublist": 1}, "similarity": {"head": "similar", "sublist": 1}, "similarly": {"head": "similar", "sublist": 1}, "source": {"head": "source", "sublist": 1}, "sourced": {"head": "source", "sublist": 1}, "sources": {"head": "source", "sublist": 1}, "sourcing": {"head": "source", "sublist": 1}, "specific": {"head": "specific", "sublist": 1}, "specifically": {"head": "specific", "sublist": 1}, "specification": {"head": "specific", "sublist": 1}, "specifications": {"head": "specific", "sublist": 1}, "specificity": {"head": "specific", "sublist": 1}, "specifics": {"head": "specific", "sublist": 1}, "structure": {"head": "structure", "sublist": 1}, "restructure": {"head": "structure", "sublist": 1}, "restructured": {"head": "structure", "sublist": 1}, "restructures": {"head": "structure", "sublist": 1}, "restructuring": {"head": "structure", "sublist": 1}, "structural": {"head": "structure", "sublist": 1}, "structurally": {"head": "structure", "sublist": 1}, "structured": {"head": "structure", "sublist": 1}, "structures": {"head": "structure", "sublist": 1}, "structuring": {"head": "structure", "sublist": 1}, "unstructured": {"head": "structure", "sublist": 1}, "theory": {"head": "theory", "sublist": 1}, "theoretical": {"head": "theory", "sublist": 1}, "theoretically": {"head": "theory", "sublist": 1}, "theories": {"head": "theory", "sublist": 1}, "theorist": {"head": "theory", "sublist": 1}, "theorists": {"head": "theory", "sublist": 1}, "vary": {"head": "vary", "sublist": 1}, "invariable": {"head": "vary", "sublist": 1}, "invariably": {"head": "vary", "sublist": 1}, "variability": {"head": "vary", "sublist": 1}, "variable": {"head": "vary", "sublist": 1}, "variables": {"head": "vary", "sublist": 1}, "variably": {"head": "vary", "sublist": 1}, "variance": {"head": "vary", "sublist": 1}, "variant": {"head": "vary", "sublist": 1}, "variants": {"head": "vary", "sublist": 1}, "variation": {"head": "vary", "sublist": 1}, "variations": {"head": "vary", "sublist": 1}, "varied": {"head": "vary", "sublist": 1}, "varies": {"head": "vary", "sublist": 1}, "varying": {"head": "vary", "sublist": 1}, "alternative": {"head": "alternative", "sublist": 3}, "alternatively": {"head": "alternative", "sublist": 3}, "alternatives": {"head": "alternative", "sublist": 3}, "circumstance": {"head": "circumstance", "sublist": 3}, "circumstances": {"head": "circumstance", "sublist": 3}, "comment": {"head": "comment", "sublist": 3}, "commentaries": {"head": "comment", "sublist": 3}, "commentary": {"head": "comment", "sublist": 3}, "commentator": {"head": "comment", "sublist": 3}, "commentators": {"head": "comment", "sublist": 3}, "commented": {"head": "comment", "sublist": 3}, "commenting": {"head": "comment", "sublist": 3}, "comments": {"head": "comment", "sublist": 3}, "compensate": {"head": "compensate", "sublist": 3}, "compensated": {"head": "compensate", "sublist": 3}, "compensates": {"head": "compensate", "sublist": 3}, "compensating": {"head": "compensate", "sublist": 3}, "compensation": {"head": "compensate", "sublist": 3}, "compensations": {"head": "compensate", "sublist": 3}, "compensatory": {"head": "compensate", "sublist": 3}, "component": {"head": "component", "sublist": 3}, "componentry": {"head": "component", "sublist": 3}, "components": {"head": "component", "sublist": 3}, "consent": {"head": "consent", "sublist": 3}, "consensus": {"head": "consent", "sublist": 3}, "consented": {"head": "consent", "sublist": 3}, "consenting": {"head": "consent", "sublist": 3}, "consents": {"head": "consent", "sublist": 3}, "considerable": {"head": "considerable", "sublist": 3}, "considerably": {"head": "considerable", "sublist": 3}, "constant": {"head": "constant", "sublist": 3}, "constancy": {"head": "constant", "sublist": 3}, "constantly": {"head": "constant", "sublist": 3}, "constants": {"head": "constant", "sublist": 3}, "inconstancy": {"head": "constant", "sublist": 3}, "inconstantly": {"head": "constant", "sublist": 3}, "constrain": {"head": "constrain", "sublist": 3}, "constrained": {"head": "constrain", "sublist": 3}, "constraining": {"head": "constrain", "sublist": 3}, "constrains": {"head": "constrain", "sublist": 3}, "constraint": {"head": "constrain", "sublist": 3}, "constraints": {"head": "constrain", "sublist": 3}, "unconstrained": {"head": "constrain", "sublist": 3}, "contribute": {"head": "contribute", "sublist": 3}, "contributed": {"head": "contribute", "sublist": 3}, "contributes": {"head": "contribute", "sublist": 3}, "contributing": {"head": "contribute", "sublist": 3}, "contribution": {"head": "contribute", "sublist": 3}, "contributions": {"head": "contribute", "sublist": 3}, "contributor": {"head": "contribute", "sublist": 3}, "contributors": {"head": "contribute", "sublist": 3}, "convene": {"head": "convene", "sublist": 3}, "convention": {"head": "convene", "sublist": 3}, "convenes": {"head": "convene", "sublist": 3}, "convened": {"head": "convene", "sublist": 3}, "convening": {"head": "convene", "sublist": 3}, "conventional": {"head": "convene", "sublist": 3}, "conventionally": {"head": "convene", "sublist": 3}, "conventions": {"head": "convene", "sublist": 3}, "unconventional": {"head": "convene", "sublist": 3}, "coordinate": {"head": "coordinate", "sublist": 3}, "coordinated": {"head": "coordinate", "sublist": 3}, "coordinates": {"head": "coordinate", "sublist": 3}, "coordinating": {"head": "coordinate", "sublist": 3}, "coordination": {"head": "coordinate", "sublist": 3}, "coordinator": {"head": "coordinate", "sublist": 3}, "coordinators": {"head": "coordinate", "sublist": 3}, "co-ordinate": {"head": "coordinate", "sublist": 3}, "co\u2011ordinated": {"head": "coordinate", "sublist": 3}, "co\u2011ordinates": {"head": "coordinate", "sublist": 3}, "co\u2011ordinating": {"head": "coordinate", "sublist": 3}, "co\u2011ordination": {"head": "coordinate", "sublist": 3}, "co\u2011ordinator": {"head": "coordinate", "sublist": 3}, "co\u2011ordinators": {"head": "coordinate", "sublist": 3}, "core": {"head": "core", "sublist": 3}, "cores": {"head": "core", "sublist": 3}, "coring": {"head": "core", "sublist": 3}, "cored": {"head": "core", "sublist": 3}, "corporate": {"head": "corporate", "sublist": 3}, "corporates": {"head": "corporate", "sublist": 3}, "corporation": {"head": "corporate", "sublist": 3}, "corporations": {"head": "corporate", "sublist": 3}, "correspond": {"head": "correspond", "sublist": 3}, "corresponded": {"head": "correspond", "sublist": 3}, "correspondence": {"head": "correspond", "sublist": 3}, "corresponding": {"head": "correspond", "sublist": 3}, "correspondingly": {"head": "correspond", "sublist": 3}, "corresponds": {"head": "correspond", "sublist": 3}, "criteria": {"head": "criteria", "sublist": 3}, "criterion": {"head": "criteria", "sublist": 3}, "deduce": {"head": "deduce", "sublist": 3}, "deduced": {"head": "deduce", "sublist": 3}, "deduces": {"head": "deduce", "sublist": 3}, "deducing": {"head": "deduce", "sublist": 3}, "deduction": {"head": "deduce", "sublist": 3}, "deductions": {"head": "deduce", "sublist": 3}, "demonstrate": {"head": "demonstrate", "sublist": 3}, "demonstrable": {"head": "demonstrate", "sublist": 3}, "demonstrably": {"head": "demonstrate", "sublist": 3}, "demonstrated": {"head": "demonstrate", "sublist": 3}, "demonstrates": {"head": "demonstrate", "sublist": 3}, "demonstrating": {"head": "demonstrate", "sublist": 3}, "demonstration": {"head": "demonstrate", "sublist": 3}, "demonstrations": {"head": "demonstrate", "sublist": 3}, "demonstrative": {"head": "demonstrate", "sublist": 3}, "demonstratively": {"head": "demonstrate", "sublist": 3}, "demonstrator": {"head": "demonstrate", "sublist": 3}, "demonstrators": {"head": "demonstrate", "sublist": 3}, "document": {"head": "document", "sublist": 3}, "documentation": {"head": "document", "sublist": 3}, "documented": {"head": "document", "sublist": 3}, "documenting": {"head": "document", "sublist": 3}, "documents": {"head": "document", "sublist": 3}, "dominate": {"head": "dominate", "sublist": 3}, "dominance": {"head": "dominate", "sublist": 3}, "dominant": {"head": "dominate", "sublist": 3}, "dominated": {"head": "dominate", "sublist": 3}, "dominates": {"head": "dominate", "sublist": 3}, "dominating": {"head": "dominate", "sublist": 3}, "domination": {"head": "dominate", "sublist": 3}, "emphasis": {"head": "emphasis", "sublist": 3}, "emphasise": {"head": "emphasis", "sublist": 3}, "emphasised": {"head": "emphasis", "sublist": 3}, "emphasising": {"head": "emphasis", "sublist": 3}, "emphasize": {"head": "emphasis", "sublist": 3}, "emphasized": {"head": "emphasis", "sublist": 3}, "emphasizes": {"head": "emphasis", "sublist": 3}, "emphasizing": {"head": "emphasis", "sublist": 3}, "emphatic": {"head": "emphasis", "sublist": 3}, "emphatically": {"head": "emphasis", "sublist": 3}, "ensure": {"head": "ensure", "sublist": 3}, "ensured": {"head": "ensure", "sublist": 3}, "ensures": {"head": "ensure", "sublist": 3}, "ensuring": {"head": "ensure", "sublist": 3}, "exclude": {"head": "exclude", "sublist": 3}, "excluded": {"head": "exclude", "sublist": 3}, "excludes": {"head": "exclude", "sublist": 3}, "excluding": {"head": "exclude", "sublist": 3}, "exclusion": {"head": "exclude", "sublist": 3}, "exclusionary": {"head": "exclude", "sublist": 3}, "exclusionist": {"head": "exclude", "sublist": 3}, "exclusions": {"head": "exclude", "sublist": 3}, "exclusive": {"head": "exclude", "sublist": 3}, "exclusively": {"head": "exclude", "sublist": 3}, "framework": {"head": "framework", "sublist": 3}, "frameworks": {"head": "framework", "sublist": 3}, "fund": {"head": "fund", "sublist": 3}, "funded": {"head": "fund", "sublist": 3}, "funder": {"head": "fund", "sublist": 3}, "funders": {"head": "fund", "sublist": 3}, "funding": {"head": "fund", "sublist": 3}, "funds": {"head": "fund", "sublist": 3}, "illustrate": {"head": "illustrate", "sublist": 3}, "illustrated": {"head": "illustrate", "sublist": 3}, "illustrates": {"head": "illustrate", "sublist": 3}, "illustrating": {"head": "illustrate", "sublist": 3}, "illustration": {"head": "illustrate", "sublist": 3}, "illustrations": {"head": "illustrate", "sublist": 3}, "illustrative": {"head": "illustrate", "sublist": 3}, "immigrate": {"head": "immigrate", "sublist": 3}, "immigrant": {"head": "immigrate", "sublist": 3}, "immigrants": {"head": "immigrate", "sublist": 3}, "immigrated": {"head": "immigrate", "sublist": 3}, "immigrates": {"head": "immigrate", "sublist": 3}, "immigrating": {"head": "immigrate", "sublist": 3}, "immigration": {"head": "immigrate", "sublist": 3}, "imply": {"head": "imply", "sublist": 3}, "implied": {"head": "imply", "sublist": 3}, "implies": {"head": "imply", "sublist": 3}, "implying": {"head": "imply", "sublist": 3}, "initial": {"head": "initial", "sublist": 3}, "initially": {"head": "initial", "sublist": 3}, "instance": {"head": "instance", "sublist": 3}, "instances": {"head": "instance", "sublist": 3}, "interact": {"head": "interact", "sublist": 3}, "interacted": {"head": "interact", "sublist": 3}, "interacting": {"head": "interact", "sublist": 3}, "interaction": {"head": "interact", "sublist": 3}, "interactions": {"head": "interact", "sublist": 3}, "interactive": {"head": "interact", "sublist": 3}, "interactively": {"head": "interact", "sublist": 3}, "interacts": {"head": "interact", "sublist": 3}, "justify": {"head": "justify", "sublist": 3}, "justifiable": {"head": "justify", "sublist": 3}, "justifiably": {"head": "justify", "sublist": 3}, "justification": {"head": "justify", "sublist": 3}, "justifications": {"head": "justify", "sublist": 3}, "justified": {"head": "justify", "sublist": 3}, "justifies": {"head": "justify", "sublist": 3}, "justifying": {"head": "justify", "sublist": 3}, "unjustified": {"head": "justify", "sublist": 3}, "layer": {"head": "layer", "sublist": 3}, "layered": {"head": "layer", "sublist": 3}, "layering": {"head": "layer", "sublist": 3}, "layers": {"head": "layer", "sublist": 3}, "link": {"head": "link", "sublist": 3}, "linkage": {"head": "link", "sublist": 3}, "linkages": {"head": "link", "sublist": 3}, "linked": {"head": "link", "sublist": 3}, "linking": {"head": "link", "sublist": 3}, "links": {"head": "link", "sublist": 3}, "locate": {"head": "locate", "sublist": 3}, "located": {"head": "locate", "sublist": 3}, "locating": {"head": "locate", "sublist": 3}, "location": {"head": "locate", "sublist": 3}, "locations": {"head": "locate", "sublist": 3}, "relocate": {"head": "locate", "sublist": 3}, "relocated": {"head": "locate", "sublist": 3}, "relocates": {"head": "locate", "sublist": 3}, "relocating": {"head": "locate", "sublist": 3}, "relocation": {"head": "locate", "sublist": 3}, "maximise": {"head": "maximise", "sublist": 3}, "max": {"head": "maximise", "sublist": 3}, "maximised": {"head": "maximise", "sublist": 3}, "maximises": {"head": "maximise", "sublist": 3}, "maximising": {"head": "maximise", "sublist": 3}, "maximisation": {"head": "maximise", "sublist": 3}, "maximize": {"head": "maximise", "sublist": 3}, "maximized": {"head": "maximise", "sublist": 3}, "maximizes": {"head": "maximise", "sublist": 3}, "maximizing": {"head": "maximise", "sublist": 3}, "maximization": {"head": "maximise", "sublist": 3}, "maximum": {"head": "maximise", "sublist": 3}, "minor": {"head": "minor", "sublist": 3}, "minorities": {"head": "minor", "sublist": 3}, "minority": {"head": "minor", "sublist": 3}, "minors": {"head": "minor", "sublist": 3}, "negate": {"head": "negate", "sublist": 3}, "negative": {"head": "negate", "sublist": 3}, "negated": {"head": "negate", "sublist": 3}, "negates": {"head": "negate", "sublist": 3}, "negating": {"head": "negate", "sublist": 3}, "negatively": {"head": "negate", "sublist": 3}, "negatives": {"head": "negate", "sublist": 3}, "outcome": {"head": "outcome", "sublist": 3}, "outcomes": {"head": "outcome", "sublist": 3}, "partner": {"head": "partner", "sublist": 3}, "partners": {"head": "partner", "sublist": 3}, "partnership": {"head": "partner", "sublist": 3}, "partnerships": {"head": "partner", "sublist": 3}, "philosophy": {"head": "philosophy", "sublist": 3}, "philosopher": {"head": "philosophy", "sublist": 3}, "philosophers": {"head": "philosophy", "sublist": 3}, "philosophical": {"head": "philosophy", "sublist": 3}, "philosophically": {"head": "philosophy", "sublist": 3}, "philosophies": {"head": "philosophy", "sublist": 3}, "philosophise": {"head": "philosophy", "sublist": 3}, "philosophised": {"head": "philosophy", "sublist": 3}, "philosophises": {"head": "philosophy", "sublist": 3}, "philosophising": {"head": "philosophy", "sublist": 3}, "philosophize": {"head": "philosophy", "sublist": 3}, "philosophized": {"head": "philosophy", "sublist": 3}, "philosophizes": {"head": "philosophy", "sublist": 3}, "philosophizing": {"head": "philosophy", "sublist": 3}, "physical": {"head": "physical", "sublist": 3}, "physically": {"head": "physical", "sublist": 3}, "proportion": {"head": "proportion", "sublist": 3}, "disproportion": {"head": "proportion", "sublist": 3}, "disproportionate": {"head": "proportion", "sublist": 3}, "disproportionately": {"head": "proportion", "sublist": 3}, "proportional": {"head": "proportion", "sublist": 3}, "proportionally": {"head": "proportion", "sublist": 3}, "proportionate": {"head": "proportion", "sublist": 3}, "proportionately": {"head": "proportion", "sublist": 3}, "proportions": {"head": "proportion", "sublist": 3}, "publish": {"head": "publish", "sublist": 3}, "published": {"head": "publish", "sublist": 3}, "publisher": {"head": "publish", "sublist": 3}, "publishers": {"head": "publish", "sublist": 3}, "publishes": {"head": "publish", "sublist": 3}, "publishing": {"head": "publish", "sublist": 3}, "unpublished": {"head": "publish", "sublist": 3}, "react": {"head": "react", "sublist": 3}, "reacted": {"head": "react", "sublist": 3}, "reacts": {"head": "react", "sublist": 3}, "reacting": {"head": "react", "sublist": 3}, "reaction": {"head": "react", "sublist": 3}, "reactionaries": {"head": "react", "sublist": 3}, "reactionary": {"head": "react", "sublist": 3}, "reactions": {"head": "react", "sublist": 3}, "reactive": {"head": "react", "sublist": 3}, "reactivate": {"head": "react", "sublist": 3}, "reactivation": {"head": "react", "sublist": 3}, "reactor": {"head": "react", "sublist": 3}, "reactors": {"head": "react", "sublist": 3}, "register": {"head": "register", "sublist": 3}, "deregister": {"head": "register", "sublist": 3}, "deregistered": {"head": "register", "sublist": 3}, "deregistering": {"head": "register", "sublist": 3}, "deregisters": {"head": "register", "sublist": 3}, "deregistration": {"head": "register", "sublist": 3}, "registered": {"head": "register", "sublist": 3}, "registering": {"head": "register", "sublist": 3}, "registers": {"head": "register", "sublist": 3}, "registration": {"head": "register", "sublist": 3}, "rely": {"head": "rely", "sublist": 3}, "reliability": {"head": "rely", "sublist": 3}, "reliable": {"head": "rely", "sublist": 3}, "reliably": {"head": "rely", "sublist": 3}, "reliance": {"head": "rely", "sublist": 3}, "reliant": {"head": "rely", "sublist": 3}, "relied": {"head": "rely", "sublist": 3}, "relies": {"head": "rely", "sublist": 3}, "relying": {"head": "rely", "sublist": 3}, "unreliable": {"head": "rely", "sublist": 3}, "remove": {"head": "remove", "sublist": 3}, "removable": {"head": "remove", "sublist": 3}, "removal": {"head": "remove", "sublist": 3}, "removals": {"head": "remove", "sublist": 3}, "removed": {"head": "remove", "sublist": 3}, "removes": {"head": "remove", "sublist": 3}, "removing": {"head": "remove", "sublist": 3}, "scheme": {"head": "scheme", "sublist": 3}, "schematic": {"head": "scheme", "sublist": 3}, "schematically": {"head": "scheme", "sublist": 3}, "schemed": {"head": "scheme", "sublist": 3}, "schemes": {"head": "scheme", "sublist": 3}, "scheming": {"head": "scheme", "sublist": 3}, "sequence": {"head": "sequence", "sublist": 3}, "sequenced": {"head": "sequence", "sublist": 3}, "sequences": {"head": "sequence", "sublist": 3}, "sequencing": {"head": "sequence", "sublist": 3}, "sequential": {"head": "sequence", "sublist": 3}, "sequentially": {"head": "sequence", "sublist": 3}, "sex": {"head": "sex", "sublist": 3}, "sexes": {"head": "sex", "sublist": 3}, "sexism": {"head": "sex", "sublist": 3}, "sexual": {"head": "sex", "sublist": 3}, "sexuality": {"head": "sex", "sublist": 3}, "sexually": {"head": "sex", "sublist": 3}, "shift": {"head": "shift", "sublist": 3}, "shifted": {"head": "shift", "sublist": 3}, "shifting": {"head": "shift", "sublist": 3}, "shifts": {"head": "shift", "sublist": 3}, "specify": {"head": "specify", "sublist": 3}, "specifiable": {"head": "specify", "sublist": 3}, "specified": {"head": "specify", "sublist": 3}, "specifies": {"head": "specify", "sublist": 3}, "specifying": {"head": "specify", "sublist": 3}, "unspecified": {"head": "specify", "sublist": 3}, "sufficient": {"head": "sufficient", "sublist": 3}, "sufficiency": {"head": "sufficient", "sublist": 3}, "insufficient": {"head": "sufficient", "sublist": 3}, "insufficiently": {"head": "sufficient", "sublist": 3}, "sufficiently": {"head": "sufficient", "sublist": 3}, "task": {"head": "task", "sublist": 3}, "tasks": {"head": "task", "sublist": 3}, "technical": {"head": "technical", "sublist": 3}, "technically": {"head": "technical", "sublist": 3}, "technique": {"head": "technique", "sublist": 3}, "techniques": {"head": "technique", "sublist": 3}, "technology": {"head": "technology", "sublist": 3}, "technological": {"head": "technology", "sublist": 3}, "technologically": {"head": "technology", "sublist": 3}, "valid": {"head": "valid", "sublist": 3}, "invalidate": {"head": "valid", "sublist": 3}, "invalidity": {"head": "valid", "sublist": 3}, "validate": {"head": "valid", "sublist": 3}, "validated": {"head": "valid", "sublist": 3}, "validating": {"head": "valid", "sublist": 3}, "validation": {"head": "valid", "sublist": 3}, "validity": {"head": "valid", "sublist": 3}, "validly": {"head": "valid", "sublist": 3}, "volume": {"head": "volume", "sublist": 3}, "volumes": {"head": "volume", "sublist": 3}, "vol": {"head": "volume", "sublist": 3}, "abandon": {"head": "abandon", "sublist": 8}, "abandoned": {"head": "abandon", "sublist": 8}, "abandoning": {"head": "abandon", "sublist": 8}, "abandonment": {"head": "abandon", "sublist": 8}, "abandons": {"head": "abandon", "sublist": 8}, "accompany": {"head": "accompany", "sublist": 8}, "accompanied": {"head": "accompany", "sublist": 8}, "accompanies": {"head": "accompany", "sublist": 8}, "accompaniment": {"head": "accompany", "sublist": 8}, "accompanying": {"head": "accompany", "sublist": 8}, "unaccompanied": {"head": "accompany", "sublist": 8}, "accumulate": {"head": "accumulate", "sublist": 8}, "accumulated": {"head": "accumulate", "sublist": 8}, "accumulating": {"head": "accumulate", "sublist": 8}, "accumulation": {"head": "accumulate", "sublist": 8}, "accumulates": {"head": "accumulate", "sublist": 8}, "ambiguous": {"head": "ambiguous", "sublist": 8}, "ambiguities": {"head": "ambiguous", "sublist": 8}, "ambiguity": {"head": "ambiguous", "sublist": 8}, "unambiguous": {"head": "ambiguous", "sublist": 8}, "unambiguously": {"head": "ambiguous", "sublist": 8}, "append": {"head": "append", "sublist": 8}, "appendix": {"head": "append", "sublist": 8}, "appended": {"head": "append", "sublist": 8}, "appends": {"head": "append", "sublist": 8}, "appending": {"head": "append", "sublist": 8}, "appendices": {"head": "append", "sublist": 8}, "appendixes": {"head": "append", "sublist": 8}, "appreciate": {"head": "appreciate", "sublist": 8}, "appreciable": {"head": "appreciate", "sublist": 8}, "appreciably": {"head": "appreciate", "sublist": 8}, "appreciated": {"head": "appreciate", "sublist": 8}, "appreciates": {"head": "appreciate", "sublist": 8}, "appreciating": {"head": "appreciate", "sublist": 8}, "appreciation": {"head": "appreciate", "sublist": 8}, "unappreciated": {"head": "appreciate", "sublist": 8}, "arbitrary": {"head": "arbitrary", "sublist": 8}, "arbitrariness": {"head": "arbitrary", "sublist": 8}, "arbitrarily": {"head": "arbitrary", "sublist": 8}, "automate": {"head": "automate", "sublist": 8}, "automatic": {"head": "automate", "sublist": 8}, "automated": {"head": "automate", "sublist": 8}, "automates": {"head": "automate", "sublist": 8}, "automating": {"head": "automate", "sublist": 8}, "automatically": {"head": "automate", "sublist": 8}, "automation": {"head": "automate", "sublist": 8}, "bias": {"head": "bias", "sublist": 8}, "biased": {"head": "bias", "sublist": 8}, "biases": {"head": "bias", "sublist": 8}, "biasing": {"head": "bias", "sublist": 8}, "unbiased": {"head": "bias", "sublist": 8}, "chart": {"head": "chart", "sublist": 8}, "charted": {"head": "chart", "sublist": 8}, "charting": {"head": "chart", "sublist": 8}, "charts": {"head": "chart", "sublist": 8}, "uncharted": {"head": "chart", "sublist": 8}, "clarify": {"head": "clarify", "sublist": 8}, "clarification": {"head": "clarify", "sublist": 8}, "clarified": {"head": "clarify", "sublist": 8}, "clarifies": {"head": "clarify", "sublist": 8}, "clarifying": {"head": "clarify", "sublist": 8}, "clarity": {"head": "clarify", "sublist": 8}, "commodity": {"head": "commodity", "sublist": 8}, "commodities": {"head": "commodity", "sublist": 8}, "complement": {"head": "complement", "sublist": 8}, "complementary": {"head": "complement", "sublist": 8}, "complemented": {"head": "complement", "sublist": 8}, "complementing": {"head": "complement", "sublist": 8}, "complements": {"head": "complement", "sublist": 8}, "conform": {"head": "conform", "sublist": 8}, "conformable": {"head": "conform", "sublist": 8}, "conformability": {"head": "conform", "sublist": 8}, "conformance": {"head": "conform", "sublist": 8}, "conformation": {"head": "conform", "sublist": 8}, "conformed": {"head": "conform", "sublist": 8}, "conforming": {"head": "conform", "sublist": 8}, "conformist": {"head": "conform", "sublist": 8}, "conformists": {"head": "conform", "sublist": 8}, "conformity": {"head": "conform", "sublist": 8}, "conforms": {"head": "conform", "sublist": 8}, "nonconformist": {"head": "conform", "sublist": 8}, "nonconformists": {"head": "conform", "sublist": 8}, "nonconformity": {"head": "conform", "sublist": 8}, "non-conformist": {"head": "conform", "sublist": 8}, "non-conformists": {"head": "conform", "sublist": 8}, "non-conformity": {"head": "conform", "sublist": 8}, "contemporary": {"head": "contemporary", "sublist": 8}, "contemporaries": {"head": "contemporary", "sublist": 8}, "contradict": {"head": "contradict", "sublist": 8}, "contradicted": {"head": "contradict", "sublist": 8}, "contradicting": {"head": "contradict", "sublist": 8}, "contradiction": {"head": "contradict", "sublist": 8}, "contradictions": {"head": "contradict", "sublist": 8}, "contradictory": {"head": "contradict", "sublist": 8}, "contradicts": {"head": "contradict", "sublist": 8}, "crucial": {"head": "crucial", "sublist": 8}, "crucially": {"head": "crucial", "sublist": 8}, "currency": {"head": "currency", "sublist": 8}, "currencies": {"head": "currency", "sublist": 8}, "denote": {"head": "denote", "sublist": 8}, "denotation": {"head": "denote", "sublist": 8}, "denotations": {"head": "denote", "sublist": 8}, "denoted": {"head": "denote", "sublist": 8}, "denotes": {"head": "denote", "sublist": 8}, "denoting": {"head": "denote", "sublist": 8}, "detect": {"head": "detect", "sublist": 8}, "detectable": {"head": "detect", "sublist": 8}, "detected": {"head": "detect", "sublist": 8}, "detecting": {"head": "detect", "sublist": 8}, "detection": {"head": "detect", "sublist": 8}, "detective": {"head": "detect", "sublist": 8}, "detectives": {"head": "detect", "sublist": 8}, "detector": {"head": "detect", "sublist": 8}, "detectors": {"head": "detect", "sublist": 8}, "detects": {"head": "detect", "sublist": 8}, "deviate": {"head": "deviate", "sublist": 8}, "deviated": {"head": "deviate", "sublist": 8}, "deviates": {"head": "deviate", "sublist": 8}, "deviating": {"head": "deviate", "sublist": 8}, "deviation": {"head": "deviate", "sublist": 8}, "deviations": {"head": "deviate", "sublist": 8}, "displace": {"head": "displace", "sublist": 8}, "displaced": {"head": "displace", "sublist": 8}, "displacement": {"head": "displace", "sublist": 8}, "displaces": {"head": "displace", "sublist": 8}, "displacing": {"head": "displace", "sublist": 8}, "drama": {"head": "drama", "sublist": 8}, "dramas": {"head": "drama", "sublist": 8}, "dramatic": {"head": "drama", "sublist": 8}, "dramatically": {"head": "drama", "sublist": 8}, "dramatise": {"head": "drama", "sublist": 8}, "dramatised": {"head": "drama", "sublist": 8}, "dramatising": {"head": "drama", "sublist": 8}, "dramatises": {"head": "drama", "sublist": 8}, "dramatisation": {"head": "drama", "sublist": 8}, "dramatisations": {"head": "drama", "sublist": 8}, "dramatist": {"head": "drama", "sublist": 8}, "dramatists": {"head": "drama", "sublist": 8}, "dramatization": {"head": "drama", "sublist": 8}, "dramatizations": {"head": "drama", "sublist": 8}, "dramatize": {"head": "drama", "sublist": 8}, "dramatized": {"head": "drama", "sublist": 8}, "dramatizes": {"head": "drama", "sublist": 8}, "dramatizing": {"head": "drama", "sublist": 8}, "eventual": {"head": "eventual", "sublist": 8}, "eventuality": {"head": "eventual", "sublist": 8}, "eventually": {"head": "eventual", "sublist": 8}, "exhibit": {"head": "exhibit", "sublist": 8}, "exhibited": {"head": "exhibit", "sublist": 8}, "exhibiting": {"head": "exhibit", "sublist": 8}, "exhibition": {"head": "exhibit", "sublist": 8}, "exhibitions": {"head": "exhibit", "sublist": 8}, "exhibits": {"head": "exhibit", "sublist": 8}, "exploit": {"head": "exploit", "sublist": 8}, "exploitation": {"head": "exploit", "sublist": 8}, "exploited": {"head": "exploit", "sublist": 8}, "exploiting": {"head": "exploit", "sublist": 8}, "exploits": {"head": "exploit", "sublist": 8}, "fluctuate": {"head": "fluctuate", "sublist": 8}, "fluctuated": {"head": "fluctuate", "sublist": 8}, "fluctuates": {"head": "fluctuate", "sublist": 8}, "fluctuating": {"head": "fluctuate", "sublist": 8}, "fluctuation": {"head": "fluctuate", "sublist": 8}, "fluctuations": {"head": "fluctuate", "sublist": 8}, "guideline": {"head": "guideline", "sublist": 8}, "guidelines": {"head": "guideline", "sublist": 8}, "highlight": {"head": "highlight", "sublist": 8}, "highlighted": {"head": "highlight", "sublist": 8}, "highlighting": {"head": "highlight", "sublist": 8}, "highlights": {"head": "highlight", "sublist": 8}, "implicit": {"head": "implicit", "sublist": 8}, "implicitly": {"head": "implicit", "sublist": 8}, "induce": {"head": "induce", "sublist": 8}, "induced": {"head": "induce", "sublist": 8}, "induces": {"head": "induce", "sublist": 8}, "inducing": {"head": "induce", "sublist": 8}, "induction": {"head": "induce", "sublist": 8}, "inevitable": {"head": "inevitable", "sublist": 8}, "inevitability": {"head": "inevitable", "sublist": 8}, "inevitably": {"head": "inevitable", "sublist": 8}, "infrastructure": {"head": "infrastructure", "sublist": 8}, "infrastructures": {"head": "infrastructure", "sublist": 8}, "inspect": {"head": "inspect", "sublist": 8}, "inspected": {"head": "inspect", "sublist": 8}, "inspecting": {"head": "inspect", "sublist": 8}, "inspection": {"head": "inspect", "sublist": 8}, "inspections": {"head": "inspect", "sublist": 8}, "inspector": {"head": "inspect", "sublist": 8}, "inspectors": {"head": "inspect", "sublist": 8}, "inspects": {"head": "inspect", "sublist": 8}, "intense": {"head": "intense", "sublist": 8}, "intensely": {"head": "intense", "sublist": 8}, "intenseness": {"head": "intense", "sublist": 8}, "intensification": {"head": "intense", "sublist": 8}, "intensified": {"head": "intense", "sublist": 8}, "intensifies": {"head": "intense", "sublist": 8}, "intensify": {"head": "intense", "sublist": 8}, "intensifying": {"head": "intense", "sublist": 8}, "intension": {"head": "intense", "sublist": 8}, "intensity": {"head": "intense", "sublist": 8}, "intensive": {"head": "intense", "sublist": 8}, "intensively": {"head": "intense", "sublist": 8}, "manipulate": {"head": "manipulate", "sublist": 8}, "manipulated": {"head": "manipulate", "sublist": 8}, "manipulates": {"head": "manipulate", "sublist": 8}, "manipulating": {"head": "manipulate", "sublist": 8}, "manipulation": {"head": "manipulate", "sublist": 8}, "manipulations": {"head": "manipulate", "sublist": 8}, "manipulative": {"head": "manipulate", "sublist": 8}, "minimise": {"head": "minimise", "sublist": 8}, "minimised": {"head": "minimise", "sublist": 8}, "minimises": {"head": "minimise", "sublist": 8}, "minimising": {"head": "minimise", "sublist": 8}, "minimize": {"head": "minimise", "sublist": 8}, "minimized": {"head": "minimise", "sublist": 8}, "minimizes": {"head": "minimise", "sublist": 8}, "minimizing": {"head": "minimise", "sublist": 8}, "nuclear": {"head": "nuclear", "sublist": 8}, "offset": {"head": "offset", "sublist": 8}, "offsets": {"head": "offset", "sublist": 8}, "offsetting": {"head": "offset", "sublist": 8}, "paragraph": {"head": "paragraph", "sublist": 8}, "paragraphing": {"head": "paragraph", "sublist": 8}, "paragraphs": {"head": "paragraph", "sublist": 8}, "plus": {"head": "plus", "sublist": 8}, "pluses": {"head": "plus", "sublist": 8}, "practitioner": {"head": "practitioner", "sublist": 8}, "practitioners": {"head": "practitioner", "sublist": 8}, "predominant": {"head": "predominant", "sublist": 8}, "predominance": {"head": "predominant", "sublist": 8}, "predominantly": {"head": "predominant", "sublist": 8}, "predominate": {"head": "predominant", "sublist": 8}, "predominated": {"head": "predominant", "sublist": 8}, "predominates": {"head": "predominant", "sublist": 8}, "predominating": {"head": "predominant", "sublist": 8}, "prospect": {"head": "prospect", "sublist": 8}, "prospective": {"head": "prospect", "sublist": 8}, "prospects": {"head": "prospect", "sublist": 8}, "radical": {"head": "radical", "sublist": 8}, "radically": {"head": "radical", "sublist": 8}, "radicals": {"head": "radical", "sublist": 8}, "random": {"head": "random", "sublist": 8}, "randomly": {"head": "random", "sublist": 8}, "randomness": {"head": "random", "sublist": 8}, "reinforce": {"head": "reinforce", "sublist": 8}, "reinforced": {"head": "reinforce", "sublist": 8}, "reinforcement": {"head": "reinforce", "sublist": 8}, "reinforcements": {"head": "reinforce", "sublist": 8}, "reinforces": {"head": "reinforce", "sublist": 8}, "reinforcing": {"head": "reinforce", "sublist": 8}, "restore": {"head": "restore", "sublist": 8}, "restoration": {"head": "restore", "sublist": 8}, "restored": {"head": "restore", "sublist": 8}, "restores": {"head": "restore", "sublist": 8}, "restoring": {"head": "restore", "sublist": 8}, "revise": {"head": "revise", "sublist": 8}, "revised": {"head": "revise", "sublist": 8}, "revises": {"head": "revise", "sublist": 8}, "revising": {"head": "revise", "sublist": 8}, "revision": {"head": "revise", "sublist": 8}, "revisions": {"head": "revise", "sublist": 8}, "schedule": {"head": "schedule", "sublist": 8}, "reschedule": {"head": "schedule", "sublist": 8}, "rescheduled": {"head": "schedule", "sublist": 8}, "reschedules": {"head": "schedule", "sublist": 8}, "rescheduling": {"head": "schedule", "sublist": 8}, "scheduled": {"head": "schedule", "sublist": 8}, "schedules": {"head": "schedule", "sublist": 8}, "scheduling": {"head": "schedule", "sublist": 8}, "unscheduled": {"head": "schedule", "sublist": 8}, "tense": {"head": "tense", "sublist": 8}, "tension": {"head": "tense", "sublist": 8}, "tensely": {"head": "tense", "sublist": 8}, "tenser": {"head": "tense", "sublist": 8}, "tensest": {"head": "tense", "sublist": 8}, "tensions": {"head": "tense", "sublist": 8}, "terminate": {"head": "terminate", "sublist": 8}, "terminal": {"head": "terminate", "sublist": 8}, "terminals": {"head": "terminate", "sublist": 8}, "terminated": {"head": "terminate", "sublist": 8}, "terminates": {"head": "terminate", "sublist": 8}, "terminating": {"head": "terminate", "sublist": 8}, "termination": {"head": "terminate", "sublist": 8}, "terminations": {"head": "terminate", "sublist": 8}, "theme": {"head": "theme", "sublist": 8}, "themes": {"head": "theme", "sublist": 8}, "thematic": {"head": "theme", "sublist": 8}, "thematically": {"head": "theme", "sublist": 8}, "thereby": {"head": "thereby", "sublist": 8}, "uniform": {"head": "uniform", "sublist": 8}, "uniformity": {"head": "uniform", "sublist": 8}, "uniformly": {"head": "uniform", "sublist": 8}, "vehicle": {"head": "vehicle", "sublist": 8}, "vehicles": {"head": "vehicle", "sublist": 8}, "via": {"head": "via", "sublist": 8}, "virtual": {"head": "virtual", "sublist": 8}, "virtually": {"head": "virtual", "sublist": 8}, "visual": {"head": "visual", "sublist": 8}, "visualise": {"head": "visual", "sublist": 8}, "visualised": {"head": "visual", "sublist": 8}, "visualising": {"head": "visual", "sublist": 8}, "visualisation": {"head": "visual", "sublist": 8}, "visualize": {"head": "visual", "sublist": 8}, "visualized": {"head": "visual", "sublist": 8}, "visualizing": {"head": "visual", "sublist": 8}, "visualization": {"head": "visual", "sublist": 8}, "visually": {"head": "visual", "sublist": 8}, "widespread": {"head": "widespread", "sublist": 8}, "accommodate": {"head": "accommodate", "sublist": 9}, "accommodated": {"head": "accommodate", "sublist": 9}, "accommodates": {"head": "accommodate", "sublist": 9}, "accommodating": {"head": "accommodate", "sublist": 9}, "accommodation": {"head": "accommodate", "sublist": 9}, "analogy": {"head": "analogy", "sublist": 9}, "analogies": {"head": "analogy", "sublist": 9}, "analogous": {"head": "analogy", "sublist": 9}, "anticipate": {"head": "anticipate", "sublist": 9}, "anticipated": {"head": "anticipate", "sublist": 9}, "anticipates": {"head": "anticipate", "sublist": 9}, "anticipating": {"head": "anticipate", "sublist": 9}, "anticipation": {"head": "anticipate", "sublist": 9}, "unanticipated": {"head": "anticipate", "sublist": 9}, "assure": {"head": "assure", "sublist": 9}, "assurance": {"head": "assure", "sublist": 9}, "assurances": {"head": "assure", "sublist": 9}, "assured": {"head": "assure", "sublist": 9}, "assuredly": {"head": "assure", "sublist": 9}, "assures": {"head": "assure", "sublist": 9}, "assuring": {"head": "assure", "sublist": 9}, "attain": {"head": "attain", "sublist": 9}, "attainable": {"head": "attain", "sublist": 9}, "attained": {"head": "attain", "sublist": 9}, "attaining": {"head": "attain", "sublist": 9}, "attainment": {"head": "attain", "sublist": 9}, "attainments": {"head": "attain", "sublist": 9}, "attains": {"head": "attain", "sublist": 9}, "unattainable": {"head": "attain", "sublist": 9}, "behalf": {"head": "behalf", "sublist": 9}, "bulk": {"head": "bulk", "sublist": 9}, "bulky": {"head": "bulk", "sublist": 9}, "cease": {"head": "cease", "sublist": 9}, "ceased": {"head": "cease", "sublist": 9}, "ceaseless": {"head": "cease", "sublist": 9}, "ceases": {"head": "cease", "sublist": 9}, "ceasing": {"head": "cease", "sublist": 9}, "coherent": {"head": "coherent", "sublist": 9}, "coherence": {"head": "coherent", "sublist": 9}, "coherently": {"head": "coherent", "sublist": 9}, "incoherent": {"head": "coherent", "sublist": 9}, "incoherently": {"head": "coherent", "sublist": 9}, "coincide": {"head": "coincide", "sublist": 9}, "coincided": {"head": "coincide", "sublist": 9}, "coincides": {"head": "coincide", "sublist": 9}, "coinciding": {"head": "coincide", "sublist": 9}, "coincidence": {"head": "coincide", "sublist": 9}, "coincidences": {"head": "coincide", "sublist": 9}, "coincident": {"head": "coincide", "sublist": 9}, "coincidental": {"head": "coincide", "sublist": 9}, "commence": {"head": "commence", "sublist": 9}, "commenced": {"head": "commence", "sublist": 9}, "commences": {"head": "commence", "sublist": 9}, "commencement": {"head": "commence", "sublist": 9}, "commencing": {"head": "commence", "sublist": 9}, "recommences": {"head": "commence", "sublist": 9}, "recommenced": {"head": "commence", "sublist": 9}, "recommencing": {"head": "commence", "sublist": 9}, "compatible": {"head": "compatible", "sublist": 9}, "compatibility": {"head": "compatible", "sublist": 9}, "incompatibility": {"head": "compatible", "sublist": 9}, "incompatible": {"head": "compatible", "sublist": 9}, "concurrent": {"head": "concurrent", "sublist": 9}, "concurrently": {"head": "concurrent", "sublist": 9}, "confine": {"head": "confine", "sublist": 9}, "confined": {"head": "confine", "sublist": 9}, "confines": {"head": "confine", "sublist": 9}, "confining": {"head": "confine", "sublist": 9}, "unconfined": {"head": "confine", "sublist": 9}, "controversy": {"head": "controversy", "sublist": 9}, "controversies": {"head": "controversy", "sublist": 9}, "controversial": {"head": "controversy", "sublist": 9}, "controversially": {"head": "controversy", "sublist": 9}, "uncontroversial": {"head": "controversy", "sublist": 9}, "converse": {"head": "converse", "sublist": 9}, "conversely": {"head": "converse", "sublist": 9}, "device": {"head": "device", "sublist": 9}, "devices": {"head": "device", "sublist": 9}, "devote": {"head": "devote", "sublist": 9}, "devoted": {"head": "devote", "sublist": 9}, "devotedly": {"head": "devote", "sublist": 9}, "devotes": {"head": "devote", "sublist": 9}, "devoting": {"head": "devote", "sublist": 9}, "devotion": {"head": "devote", "sublist": 9}, "devotions": {"head": "devote", "sublist": 9}, "diminish": {"head": "diminish", "sublist": 9}, "diminished": {"head": "diminish", "sublist": 9}, "diminishes": {"head": "diminish", "sublist": 9}, "diminishing": {"head": "diminish", "sublist": 9}, "diminution": {"head": "diminish", "sublist": 9}, "undiminished": {"head": "diminish", "sublist": 9}, "distort": {"head": "distort", "sublist": 9}, "distorted": {"head": "distort", "sublist": 9}, "distorting": {"head": "distort", "sublist": 9}, "distortion": {"head": "distort", "sublist": 9}, "distortions": {"head": "distort", "sublist": 9}, "distorts": {"head": "distort", "sublist": 9}, "duration": {"head": "duration", "sublist": 9}, "erode": {"head": "erode", "sublist": 9}, "eroded": {"head": "erode", "sublist": 9}, "erodes": {"head": "erode", "sublist": 9}, "eroding": {"head": "erode", "sublist": 9}, "erosion": {"head": "erode", "sublist": 9}, "ethic": {"head": "ethic", "sublist": 9}, "ethical": {"head": "ethic", "sublist": 9}, "ethically": {"head": "ethic", "sublist": 9}, "ethics": {"head": "ethic", "sublist": 9}, "unethical": {"head": "ethic", "sublist": 9}, "format": {"head": "format", "sublist": 9}, "formatted": {"head": "format", "sublist": 9}, "formatting": {"head": "format", "sublist": 9}, "formats": {"head": "format", "sublist": 9}, "found": {"head": "found", "sublist": 9}, "founded": {"head": "found", "sublist": 9}, "founder": {"head": "found", "sublist": 9}, "founders": {"head": "found", "sublist": 9}, "founding": {"head": "found", "sublist": 9}, "unfounded": {"head": "found", "sublist": 9}, "inherent": {"head": "inherent", "sublist": 9}, "inherently": {"head": "inherent", "sublist": 9}, "insight": {"head": "insight", "sublist": 9}, "insightful": {"head": "insight", "sublist": 9}, "insights": {"head": "insight", "sublist": 9}, "integral": {"head": "integral", "sublist": 9}, "intermediate": {"head": "intermediate", "sublist": 9}, "manual": {"head": "manual", "sublist": 9}, "manually": {"head": "manual", "sublist": 9}, "manuals": {"head": "manual", "sublist": 9}, "mature": {"head": "mature", "sublist": 9}, "immature": {"head": "mature", "sublist": 9}, "immaturity": {"head": "mature", "sublist": 9}, "maturation": {"head": "mature", "sublist": 9}, "maturational": {"head": "mature", "sublist": 9}, "matured": {"head": "mature", "sublist": 9}, "matures": {"head": "mature", "sublist": 9}, "maturing": {"head": "mature", "sublist": 9}, "maturity": {"head": "mature", "sublist": 9}, "mediate": {"head": "mediate", "sublist": 9}, "mediated": {"head": "mediate", "sublist": 9}, "mediates": {"head": "mediate", "sublist": 9}, "mediating": {"head": "mediate", "sublist": 9}, "mediation": {"head": "mediate", "sublist": 9}, "medium": {"head": "medium", "sublist": 9}, "military": {"head": "military", "sublist": 9}, "minimal": {"head": "minimal", "sublist": 9}, "minimalisation": {"head": "minimal", "sublist": 9}, "minimalise": {"head": "minimal", "sublist": 9}, "minimalises": {"head": "minimal", "sublist": 9}, "minimalised": {"head": "minimal", "sublist": 9}, "minimalising": {"head": "minimal", "sublist": 9}, "minimalist": {"head": "minimal", "sublist": 9}, "minimalists": {"head": "minimal", "sublist": 9}, "minimalistic": {"head": "minimal", "sublist": 9}, "minimalization": {"head": "minimal", "sublist": 9}, "minimalize": {"head": "minimal", "sublist": 9}, "minimalized": {"head": "minimal", "sublist": 9}, "minimalizes": {"head": "minimal", "sublist": 9}, "minimalizing": {"head": "minimal", "sublist": 9}, "minimally": {"head": "minimal", "sublist": 9}, "mutual": {"head": "mutual", "sublist": 9}, "mutually": {"head": "mutual", "sublist": 9}, "norm": {"head": "norm", "sublist": 9}, "norms": {"head": "norm", "sublist": 9}, "overlap": {"head": "overlap", "sublist": 9}, "overlapped": {"head": "overlap", "sublist": 9}, "overlapping": {"head": "overlap", "sublist": 9}, "overlaps": {"head": "overlap", "sublist": 9}, "passive": {"head": "passive", "sublist": 9}, "passively": {"head": "passive", "sublist": 9}, "passivity": {"head": "passive", "sublist": 9}, "portion": {"head": "portion", "sublist": 9}, "portions": {"head": "portion", "sublist": 9}, "preliminary": {"head": "preliminary", "sublist": 9}, "preliminaries": {"head": "preliminary", "sublist": 9}, "protocol": {"head": "protocol", "sublist": 9}, "protocols": {"head": "protocol", "sublist": 9}, "qualitative": {"head": "qualitative", "sublist": 9}, "qualitatively": {"head": "qualitative", "sublist": 9}, "refine": {"head": "refine", "sublist": 9}, "refined": {"head": "refine", "sublist": 9}, "refinement": {"head": "refine", "sublist": 9}, "refinements": {"head": "refine", "sublist": 9}, "refines": {"head": "refine", "sublist": 9}, "refining": {"head": "refine", "sublist": 9}, "relax": {"head": "relax", "sublist": 9}, "relaxation": {"head": "relax", "sublist": 9}, "relaxed": {"head": "relax", "sublist": 9}, "relaxes": {"head": "relax", "sublist": 9}, "relaxing": {"head": "relax", "sublist": 9}, "restrain": {"head": "restrain", "sublist": 9}, "restrained": {"head": "restrain", "sublist": 9}, "restraining": {"head": "restrain", "sublist": 9}, "restrains": {"head": "restrain", "sublist": 9}, "restraint": {"head": "restrain", "sublist": 9}, "restraints": {"head": "restrain", "sublist": 9}, "unrestrained": {"head": "restrain", "sublist": 9}, "revolution": {"head": "revolution", "sublist": 9}, "revolutionary": {"head": "revolution", "sublist": 9}, "revolutionaries": {"head": "revolution", "sublist": 9}, "revolutionise": {"head": "revolution", "sublist": 9}, "revolutionised": {"head": "revolution", "sublist": 9}, "revolutionises": {"head": "revolution", "sublist": 9}, "revolutionising": {"head": "revolution", "sublist": 9}, "revolutionist": {"head": "revolution", "sublist": 9}, "revolutionists": {"head": "revolution", "sublist": 9}, "revolutionize": {"head": "revolution", "sublist": 9}, "revolutionized": {"head": "revolution", "sublist": 9}, "revolutionizes": {"head": "revolution", "sublist": 9}, "revolutionizing": {"head": "revolution", "sublist": 9}, "revolutions": {"head": "revolution", "sublist": 9}, "rigid": {"head": "rigid", "sublist": 9}, "rigidities": {"head": "rigid", "sublist": 9}, "rigidity": {"head": "rigid", "sublist": 9}, "rigidly": {"head": "rigid", "sublist": 9}, "route": {"head": "route", "sublist": 9}, "routed": {"head": "route", "sublist": 9}, "routes": {"head": "route", "sublist": 9}, "routing": {"head": "route", "sublist": 9}, "scenario": {"head": "scenario", "sublist": 9}, "scenarios": {"head": "scenario", "sublist": 9}, "sphere": {"head": "sphere", "sublist": 9}, "spheres": {"head": "sphere", "sublist": 9}, "spherical": {"head": "sphere", "sublist": 9}, "spherically": {"head": "sphere", "sublist": 9}, "subordinate": {"head": "subordinate", "sublist": 9}, "subordinates": {"head": "subordinate", "sublist": 9}, "subordination": {"head": "subordinate", "sublist": 9}, "supplement": {"head": "supplement", "sublist": 9}, "supplementary": {"head": "supplement", "sublist": 9}, "supplemented": {"head": "supplement", "sublist": 9}, "supplementing": {"head": "supplement", "sublist": 9}, "supplements": {"head": "supplement", "sublist": 9}, "suspend": {"head": "suspend", "sublist": 9}, "suspended": {"head": "suspend", "sublist": 9}, "suspending": {"head": "suspend", "sublist": 9}, "suspends": {"head": "suspend", "sublist": 9}, "suspension": {"head": "suspend", "sublist": 9}, "team": {"head": "team", "sublist": 9}, "teamed": {"head": "team", "sublist": 9}, "teaming": {"head": "team", "sublist": 9}, "teams": {"head": "team", "sublist": 9}, "temporary": {"head": "temporary", "sublist": 9}, "temporarily": {"head": "temporary", "sublist": 9}, "trigger": {"head": "trigger", "sublist": 9}, "triggered": {"head": "trigger", "sublist": 9}, "triggering": {"head": "trigger", "sublist": 9}, "triggers": {"head": "trigger", "sublist": 9}, "unify": {"head": "unify", "sublist": 9}, "unification": {"head": "unify", "sublist": 9}, "unified": {"head": "unify", "sublist": 9}, "unifies": {"head": "unify", "sublist": 9}, "unifying": {"head": "unify", "sublist": 9}, "violate": {"head": "violate", "sublist": 9}, "violated": {"head": "violate", "sublist": 9}, "violates": {"head": "violate", "sublist": 9}, "violating": {"head": "violate", "sublist": 9}, "violation": {"head": "violate", "sublist": 9}, "violations": {"head": "violate", "sublist": 9}, "vision": {"head": "vision", "sublist": 9}, "visions": {"head": "vision", "sublist": 9}, "achieve": {"head": "achieve", "sublist": 2}, "achievable": {"head": "achieve", "sublist": 2}, "achieved": {"head": "achieve", "sublist": 2}, "achievement": {"head": "achieve", "sublist": 2}, "achievements": {"head": "achieve", "sublist": 2}, "achieves": {"head": "achieve", "sublist": 2}, "achieving": {"head": "achieve", "sublist": 2}, "acquire": {"head": "acquire", "sublist": 2}, "acquired": {"head": "acquire", "sublist": 2}, "acquires": {"head": "acquire", "sublist": 2}, "acquiring": {"head": "acquire", "sublist": 2}, "acquisition": {"head": "acquire", "sublist": 2}, "acquisitions": {"head": "acquire", "sublist": 2}, "administrate": {"head": "administrate", "sublist": 2}, "administrates": {"head": "administrate", "sublist": 2}, "administration": {"head": "administrate", "sublist": 2}, "administrations": {"head": "administrate", "sublist": 2}, "administrative": {"head": "administrate", "sublist": 2}, "administratively": {"head": "administrate", "sublist": 2}, "administrator": {"head": "administrate", "sublist": 2}, "administrators": {"head": "administrate", "sublist": 2}, "affect": {"head": "affect", "sublist": 2}, "affected": {"head": "affect", "sublist": 2}, "affecting": {"head": "affect", "sublist": 2}, "affective": {"head": "affect", "sublist": 2}, "affectively": {"head": "affect", "sublist": 2}, "affects": {"head": "affect", "sublist": 2}, "unaffected": {"head": "affect", "sublist": 2}, "appropriate": {"head": "appropriate", "sublist": 2}, "appropriacy": {"head": "appropriate", "sublist": 2}, "appropriately": {"head": "appropriate", "sublist": 2}, "appropriateness": {"head": "appropriate", "sublist": 2}, "inappropriacy": {"head": "appropriate", "sublist": 2}, "inappropriate": {"head": "appropriate", "sublist": 2}, "inappropriately": {"head": "appropriate", "sublist": 2}, "aspect": {"head": "aspect", "sublist": 2}, "aspects": {"head": "aspect", "sublist": 2}, "assist": {"head": "assist", "sublist": 2}, "assistance": {"head": "assist", "sublist": 2}, "assistant": {"head": "assist", "sublist": 2}, "assistants": {"head": "assist", "sublist": 2}, "assisted": {"head": "assist", "sublist": 2}, "assisting": {"head": "assist", "sublist": 2}, "assists": {"head": "assist", "sublist": 2}, "unassisted": {"head": "assist", "sublist": 2}, "category": {"head": "category", "sublist": 2}, "categories": {"head": "category", "sublist": 2}, "categorisation": {"head": "category", "sublist": 2}, "categorise": {"head": "category", "sublist": 2}, "categorised": {"head": "category", "sublist": 2}, "categorises": {"head": "category", "sublist": 2}, "categorising": {"head": "category", "sublist": 2}, "categorization": {"head": "category", "sublist": 2}, "categorized": {"head": "category", "sublist": 2}, "categorizes": {"head": "category", "sublist": 2}, "categorizing": {"head": "category", "sublist": 2}, "chapter": {"head": "chapter", "sublist": 2}, "chapters": {"head": "chapter", "sublist": 2}, "commission": {"head": "commission", "sublist": 2}, "commissioned": {"head": "commission", "sublist": 2}, "commissioner": {"head": "commission", "sublist": 2}, "commissioners": {"head": "commission", "sublist": 2}, "commissioning": {"head": "commission", "sublist": 2}, "commissions": {"head": "commission", "sublist": 2}, "community": {"head": "community", "sublist": 2}, "communities": {"head": "community", "sublist": 2}, "complex": {"head": "complex", "sublist": 2}, "complexities": {"head": "complex", "sublist": 2}, "complexity": {"head": "complex", "sublist": 2}, "compute": {"head": "compute", "sublist": 2}, "computation": {"head": "compute", "sublist": 2}, "computational": {"head": "compute", "sublist": 2}, "computations": {"head": "compute", "sublist": 2}, "computable": {"head": "compute", "sublist": 2}, "computer": {"head": "compute", "sublist": 2}, "computed": {"head": "compute", "sublist": 2}, "computerised": {"head": "compute", "sublist": 2}, "computers": {"head": "compute", "sublist": 2}, "computing": {"head": "compute", "sublist": 2}, "conclude": {"head": "conclude", "sublist": 2}, "concluded": {"head": "conclude", "sublist": 2}, "concludes": {"head": "conclude", "sublist": 2}, "concluding": {"head": "conclude", "sublist": 2}, "conclusion": {"head": "conclude", "sublist": 2}, "conclusions": {"head": "conclude", "sublist": 2}, "conclusive": {"head": "conclude", "sublist": 2}, "conclusively": {"head": "conclude", "sublist": 2}, "inconclusive": {"head": "conclude", "sublist": 2}, "inconclusively": {"head": "conclude", "sublist": 2}, "conduct": {"head": "conduct", "sublist": 2}, "conducted": {"head": "conduct", "sublist": 2}, "conducting": {"head": "conduct", "sublist": 2}, "conducts": {"head": "conduct", "sublist": 2}, "consequent": {"head": "consequent", "sublist": 2}, "consequence": {"head": "consequent", "sublist": 2}, "consequences": {"head": "consequent", "sublist": 2}, "consequently": {"head": "consequent", "sublist": 2}, "construct": {"head": "construct", "sublist": 2}, "constructed": {"head": "construct", "sublist": 2}, "constructing": {"head": "construct", "sublist": 2}, "construction": {"head": "construct", "sublist": 2}, "constructions": {"head": "construct", "sublist": 2}, "constructive": {"head": "construct", "sublist": 2}, "constructs": {"head": "construct", "sublist": 2}, "reconstruct": {"head": "construct", "sublist": 2}, "reconstructed": {"head": "construct", "sublist": 2}, "reconstructing": {"head": "construct", "sublist": 2}, "reconstruction": {"head": "construct", "sublist": 2}, "reconstructs": {"head": "construct", "sublist": 2}, "consume": {"head": "consume", "sublist": 2}, "consumed": {"head": "consume", "sublist": 2}, "consumer": {"head": "consume", "sublist": 2}, "consumers": {"head": "consume", "sublist": 2}, "consumes": {"head": "consume", "sublist": 2}, "consuming": {"head": "consume", "sublist": 2}, "consumption": {"head": "consume", "sublist": 2}, "credit": {"head": "credit", "sublist": 2}, "credited": {"head": "credit", "sublist": 2}, "crediting": {"head": "credit", "sublist": 2}, "creditor": {"head": "credit", "sublist": 2}, "creditors": {"head": "credit", "sublist": 2}, "credits": {"head": "credit", "sublist": 2}, "culture": {"head": "culture", "sublist": 2}, "cultural": {"head": "culture", "sublist": 2}, "culturally": {"head": "culture", "sublist": 2}, "cultured": {"head": "culture", "sublist": 2}, "cultures": {"head": "culture", "sublist": 2}, "uncultured": {"head": "culture", "sublist": 2}, "design": {"head": "design", "sublist": 2}, "designed": {"head": "design", "sublist": 2}, "designer": {"head": "design", "sublist": 2}, "designers": {"head": "design", "sublist": 2}, "designing": {"head": "design", "sublist": 2}, "designs": {"head": "design", "sublist": 2}, "distinct": {"head": "distinct", "sublist": 2}, "distinction": {"head": "distinct", "sublist": 2}, "distinctions": {"head": "distinct", "sublist": 2}, "distinctive": {"head": "distinct", "sublist": 2}, "distinctively": {"head": "distinct", "sublist": 2}, "distinctly": {"head": "distinct", "sublist": 2}, "indistinct": {"head": "distinct", "sublist": 2}, "indistinctly": {"head": "distinct", "sublist": 2}, "element": {"head": "element", "sublist": 2}, "elements": {"head": "element", "sublist": 2}, "equate": {"head": "equate", "sublist": 2}, "equated": {"head": "equate", "sublist": 2}, "equates": {"head": "equate", "sublist": 2}, "equating": {"head": "equate", "sublist": 2}, "equation": {"head": "equate", "sublist": 2}, "equations": {"head": "equate", "sublist": 2}, "evaluate": {"head": "evaluate", "sublist": 2}, "evaluated": {"head": "evaluate", "sublist": 2}, "evaluates": {"head": "evaluate", "sublist": 2}, "evaluating": {"head": "evaluate", "sublist": 2}, "evaluation": {"head": "evaluate", "sublist": 2}, "evaluations": {"head": "evaluate", "sublist": 2}, "evaluative": {"head": "evaluate", "sublist": 2}, "re-evaluate": {"head": "evaluate", "sublist": 2}, "re-evaluated": {"head": "evaluate", "sublist": 2}, "re-evaluates": {"head": "evaluate", "sublist": 2}, "re-evaluating": {"head": "evaluate", "sublist": 2}, "re-evaluation": {"head": "evaluate", "sublist": 2}, "feature": {"head": "feature", "sublist": 2}, "featured": {"head": "feature", "sublist": 2}, "features": {"head": "feature", "sublist": 2}, "featuring": {"head": "feature", "sublist": 2}, "final": {"head": "final", "sublist": 2}, "finalise": {"head": "final", "sublist": 2}, "finalised": {"head": "final", "sublist": 2}, "finalises": {"head": "final", "sublist": 2}, "finalising": {"head": "final", "sublist": 2}, "finalize": {"head": "final", "sublist": 2}, "finalized": {"head": "final", "sublist": 2}, "finalizes": {"head": "final", "sublist": 2}, "finalizing": {"head": "final", "sublist": 2}, "finality": {"head": "final", "sublist": 2}, "finally": {"head": "final", "sublist": 2}, "finals": {"head": "final", "sublist": 2}, "focus": {"head": "focus", "sublist": 2}, "focused": {"head": "focus", "sublist": 2}, "focuses": {"head": "focus", "sublist": 2}, "focusing": {"head": "focus", "sublist": 2}, "focussed": {"head": "focus", "sublist": 2}, "focussing": {"head": "focus", "sublist": 2}, "refocus": {"head": "focus", "sublist": 2}, "refocused": {"head": "focus", "sublist": 2}, "refocuses": {"head": "focus", "sublist": 2}, "refocusing": {"head": "focus", "sublist": 2}, "refocussed": {"head": "focus", "sublist": 2}, "refocusses": {"head": "focus", "sublist": 2}, "refocussing": {"head": "focus", "sublist": 2}, "impact": {"head": "impact", "sublist": 2}, "impacted": {"head": "impact", "sublist": 2}, "impacting": {"head": "impact", "sublist": 2}, "impacts": {"head": "impact", "sublist": 2}, "injure": {"head": "injure", "sublist": 2}, "injured": {"head": "injure", "sublist": 2}, "injures": {"head": "injure", "sublist": 2}, "injuries": {"head": "injure", "sublist": 2}, "injuring": {"head": "injure", "sublist": 2}, "injury": {"head": "injure", "sublist": 2}, "uninjured": {"head": "injure", "sublist": 2}, "institute": {"head": "institute", "sublist": 2}, "instituted": {"head": "institute", "sublist": 2}, "institutes": {"head": "institute", "sublist": 2}, "instituting": {"head": "institute", "sublist": 2}, "institution": {"head": "institute", "sublist": 2}, "institutional": {"head": "institute", "sublist": 2}, "institutionalise": {"head": "institute", "sublist": 2}, "institutionalised": {"head": "institute", "sublist": 2}, "institutionalises": {"head": "institute", "sublist": 2}, "institutionalising": {"head": "institute", "sublist": 2}, "institutionalized": {"head": "institute", "sublist": 2}, "institutionalizes": {"head": "institute", "sublist": 2}, "institutionalizing": {"head": "institute", "sublist": 2}, "institutionally": {"head": "institute", "sublist": 2}, "institutions": {"head": "institute", "sublist": 2}, "invest": {"head": "invest", "sublist": 2}, "invested": {"head": "invest", "sublist": 2}, "investing": {"head": "invest", "sublist": 2}, "investment": {"head": "invest", "sublist": 2}, "investments": {"head": "invest", "sublist": 2}, "investor": {"head": "invest", "sublist": 2}, "investors": {"head": "invest", "sublist": 2}, "invests": {"head": "invest", "sublist": 2}, "reinvest": {"head": "invest", "sublist": 2}, "reinvested": {"head": "invest", "sublist": 2}, "reinvesting": {"head": "invest", "sublist": 2}, "reinvestment": {"head": "invest", "sublist": 2}, "reinvests": {"head": "invest", "sublist": 2}, "item": {"head": "item", "sublist": 2}, "itemisation": {"head": "item", "sublist": 2}, "itemise": {"head": "item", "sublist": 2}, "itemised": {"head": "item", "sublist": 2}, "itemises": {"head": "item", "sublist": 2}, "itemising": {"head": "item", "sublist": 2}, "items": {"head": "item", "sublist": 2}, "journal": {"head": "journal", "sublist": 2}, "journals": {"head": "journal", "sublist": 2}, "maintain": {"head": "maintain", "sublist": 2}, "maintained": {"head": "maintain", "sublist": 2}, "maintaining": {"head": "maintain", "sublist": 2}, "maintains": {"head": "maintain", "sublist": 2}, "maintenance": {"head": "maintain", "sublist": 2}, "normal": {"head": "normal", "sublist": 2}, "abnormal": {"head": "normal", "sublist": 2}, "abnormally": {"head": "normal", "sublist": 2}, "normalisation": {"head": "normal", "sublist": 2}, "normalise": {"head": "normal", "sublist": 2}, "normalised": {"head": "normal", "sublist": 2}, "normalises": {"head": "normal", "sublist": 2}, "normalising": {"head": "normal", "sublist": 2}, "normalization": {"head": "normal", "sublist": 2}, "normalize": {"head": "normal", "sublist": 2}, "normalized": {"head": "normal", "sublist": 2}, "normalizes": {"head": "normal", "sublist": 2}, "normalizing": {"head": "normal", "sublist": 2}, "normality": {"head": "normal", "sublist": 2}, "normally": {"head": "normal", "sublist": 2}, "obtain": {"head": "obtain", "sublist": 2}, "obtainable": {"head": "obtain", "sublist": 2}, "obtained": {"head": "obtain", "sublist": 2}, "obtaining": {"head": "obtain", "sublist": 2}, "obtains": {"head": "obtain", "sublist": 2}, "unobtainable": {"head": "obtain", "sublist": 2}, "participate": {"head": "participate", "sublist": 2}, "participant": {"head": "participate", "sublist": 2}, "participants": {"head": "participate", "sublist": 2}, "participated": {"head": "participate", "sublist": 2}, "participates": {"head": "participate", "sublist": 2}, "participating": {"head": "participate", "sublist": 2}, "participation": {"head": "participate", "sublist": 2}, "participatory": {"head": "participate", "sublist": 2}, "perceive": {"head": "perceive", "sublist": 2}, "perceived": {"head": "perceive", "sublist": 2}, "perceives": {"head": "perceive", "sublist": 2}, "perceiving": {"head": "perceive", "sublist": 2}, "perception": {"head": "perceive", "sublist": 2}, "perceptions": {"head": "perceive", "sublist": 2}, "positive": {"head": "positive", "sublist": 2}, "positively": {"head": "positive", "sublist": 2}, "potential": {"head": "potential", "sublist": 2}, "potentially": {"head": "potential", "sublist": 2}, "previous": {"head": "previous", "sublist": 2}, "previously": {"head": "previous", "sublist": 2}, "primary": {"head": "primary", "sublist": 2}, "primarily": {"head": "primary", "sublist": 2}, "purchase": {"head": "purchase", "sublist": 2}, "purchased": {"head": "purchase", "sublist": 2}, "purchaser": {"head": "purchase", "sublist": 2}, "purchasers": {"head": "purchase", "sublist": 2}, "purchases": {"head": "purchase", "sublist": 2}, "purchasing": {"head": "purchase", "sublist": 2}, "range": {"head": "range", "sublist": 2}, "ranged": {"head": "range", "sublist": 2}, "ranges": {"head": "range", "sublist": 2}, "ranging": {"head": "range", "sublist": 2}, "region": {"head": "region", "sublist": 2}, "regional": {"head": "region", "sublist": 2}, "regionally": {"head": "region", "sublist": 2}, "regions": {"head": "region", "sublist": 2}, "regulate": {"head": "regulate", "sublist": 2}, "deregulated": {"head": "regulate", "sublist": 2}, "deregulates": {"head": "regulate", "sublist": 2}, "deregulating": {"head": "regulate", "sublist": 2}, "deregulation": {"head": "regulate", "sublist": 2}, "regulated": {"head": "regulate", "sublist": 2}, "regulates": {"head": "regulate", "sublist": 2}, "regulating": {"head": "regulate", "sublist": 2}, "regulation": {"head": "regulate", "sublist": 2}, "regulations": {"head": "regulate", "sublist": 2}, "regulator": {"head": "regulate", "sublist": 2}, "regulators": {"head": "regulate", "sublist": 2}, "regulatory": {"head": "regulate", "sublist": 2}, "unregulated": {"head": "regulate", "sublist": 2}, "relevant": {"head": "relevant", "sublist": 2}, "irrelevance": {"head": "relevant", "sublist": 2}, "irrelevant": {"head": "relevant", "sublist": 2}, "relevance": {"head": "relevant", "sublist": 2}, "reside": {"head": "reside", "sublist": 2}, "resided": {"head": "reside", "sublist": 2}, "residence": {"head": "reside", "sublist": 2}, "resident": {"head": "reside", "sublist": 2}, "residential": {"head": "reside", "sublist": 2}, "residents": {"head": "reside", "sublist": 2}, "resides": {"head": "reside", "sublist": 2}, "residing": {"head": "reside", "sublist": 2}, "resource": {"head": "resource", "sublist": 2}, "resourced": {"head": "resource", "sublist": 2}, "resourceful": {"head": "resource", "sublist": 2}, "resources": {"head": "resource", "sublist": 2}, "resourcing": {"head": "resource", "sublist": 2}, "unresourceful": {"head": "resource", "sublist": 2}, "under-resourced": {"head": "resource", "sublist": 2}, "restrict": {"head": "restrict", "sublist": 2}, "restricted": {"head": "restrict", "sublist": 2}, "restricting": {"head": "restrict", "sublist": 2}, "restriction": {"head": "restrict", "sublist": 2}, "restrictions": {"head": "restrict", "sublist": 2}, "restrictive": {"head": "restrict", "sublist": 2}, "restrictively": {"head": "restrict", "sublist": 2}, "restricts": {"head": "restrict", "sublist": 2}, "unrestricted": {"head": "restrict", "sublist": 2}, "unrestrictive": {"head": "restrict", "sublist": 2}, "secure": {"head": "secure", "sublist": 2}, "insecure": {"head": "secure", "sublist": 2}, "insecurities": {"head": "secure", "sublist": 2}, "insecurity": {"head": "secure", "sublist": 2}, "secured": {"head": "secure", "sublist": 2}, "securely": {"head": "secure", "sublist": 2}, "secures": {"head": "secure", "sublist": 2}, "securing": {"head": "secure", "sublist": 2}, "securities": {"head": "secure", "sublist": 2}, "security": {"head": "secure", "sublist": 2}, "seek": {"head": "seek", "sublist": 2}, "seeking": {"head": "seek", "sublist": 2}, "seeks": {"head": "seek", "sublist": 2}, "sought": {"head": "seek", "sublist": 2}, "select": {"head": "select", "sublist": 2}, "selected": {"head": "select", "sublist": 2}, "selecting": {"head": "select", "sublist": 2}, "selection": {"head": "select", "sublist": 2}, "selections": {"head": "select", "sublist": 2}, "selective": {"head": "select", "sublist": 2}, "selectively": {"head": "select", "sublist": 2}, "selector": {"head": "select", "sublist": 2}, "selectors": {"head": "select", "sublist": 2}, "selects": {"head": "select", "sublist": 2}, "site": {"head": "site", "sublist": 2}, "sites": {"head": "site", "sublist": 2}, "strategy": {"head": "strategy", "sublist": 2}, "strategic": {"head": "strategy", "sublist": 2}, "strategies": {"head": "strategy", "sublist": 2}, "strategically": {"head": "strategy", "sublist": 2}, "strategist": {"head": "strategy", "sublist": 2}, "strategists": {"head": "strategy", "sublist": 2}, "survey": {"head": "survey", "sublist": 2}, "surveyed": {"head": "survey", "sublist": 2}, "surveying": {"head": "survey", "sublist": 2}, "surveys": {"head": "survey", "sublist": 2}, "text": {"head": "text", "sublist": 2}, "texts": {"head": "text", "sublist": 2}, "textual": {"head": "text", "sublist": 2}, "tradition": {"head": "tradition", "sublist": 2}, "non-traditional": {"head": "tradition", "sublist": 2}, "traditional": {"head": "tradition", "sublist": 2}, "traditionalist": {"head": "tradition", "sublist": 2}, "traditionally": {"head": "tradition", "sublist": 2}, "traditions": {"head": "tradition", "sublist": 2}, "transfer": {"head": "transfer", "sublist": 2}, "transferable": {"head": "transfer", "sublist": 2}, "transference": {"head": "transfer", "sublist": 2}, "transferred": {"head": "transfer", "sublist": 2}, "transferring": {"head": "transfer", "sublist": 2}, "transfers": {"head": "transfer", "sublist": 2}, "abstract": {"head": "abstract", "sublist": 6}, "abstraction": {"head": "abstract", "sublist": 6}, "abstractions": {"head": "abstract", "sublist": 6}, "abstractly": {"head": "abstract", "sublist": 6}, "abstracts": {"head": "abstract", "sublist": 6}, "accurate": {"head": "accurate", "sublist": 6}, "accuracy": {"head": "accurate", "sublist": 6}, "accurately": {"head": "accurate", "sublist": 6}, "inaccuracy": {"head": "accurate", "sublist": 6}, "inaccuracies": {"head": "accurate", "sublist": 6}, "inaccurate": {"head": "accurate", "sublist": 6}, "acknowledge": {"head": "acknowledge", "sublist": 6}, "acknowledged": {"head": "acknowledge", "sublist": 6}, "acknowledges": {"head": "acknowledge", "sublist": 6}, "acknowledging": {"head": "acknowledge", "sublist": 6}, "acknowledgement": {"head": "acknowledge", "sublist": 6}, "acknowledgemens": {"head": "acknowledge", "sublist": 6}, "aggregate": {"head": "aggregate", "sublist": 6}, "aggregated": {"head": "aggregate", "sublist": 6}, "aggregates": {"head": "aggregate", "sublist": 6}, "aggregating": {"head": "aggregate", "sublist": 6}, "aggregation": {"head": "aggregate", "sublist": 6}, "allocate": {"head": "allocate", "sublist": 6}, "allocated": {"head": "allocate", "sublist": 6}, "allocates": {"head": "allocate", "sublist": 6}, "allocating": {"head": "allocate", "sublist": 6}, "allocation": {"head": "allocate", "sublist": 6}, "allocations": {"head": "allocate", "sublist": 6}, "assign": {"head": "assign", "sublist": 6}, "assigned": {"head": "assign", "sublist": 6}, "assigning": {"head": "assign", "sublist": 6}, "assignment": {"head": "assign", "sublist": 6}, "assignments": {"head": "assign", "sublist": 6}, "assigns": {"head": "assign", "sublist": 6}, "reassign": {"head": "assign", "sublist": 6}, "reassigned": {"head": "assign", "sublist": 6}, "reassigning": {"head": "assign", "sublist": 6}, "reassigns": {"head": "assign", "sublist": 6}, "unassigned": {"head": "assign", "sublist": 6}, "attach": {"head": "attach", "sublist": 6}, "attached": {"head": "attach", "sublist": 6}, "attaches": {"head": "attach", "sublist": 6}, "attaching": {"head": "attach", "sublist": 6}, "attachment": {"head": "attach", "sublist": 6}, "attachments": {"head": "attach", "sublist": 6}, "unattached": {"head": "attach", "sublist": 6}, "author": {"head": "author", "sublist": 6}, "authored": {"head": "author", "sublist": 6}, "authoring": {"head": "author", "sublist": 6}, "authors": {"head": "author", "sublist": 6}, "authorship": {"head": "author", "sublist": 6}, "bond": {"head": "bond", "sublist": 6}, "bonded": {"head": "bond", "sublist": 6}, "bonding": {"head": "bond", "sublist": 6}, "bonds": {"head": "bond", "sublist": 6}, "brief": {"head": "brief", "sublist": 6}, "brevity": {"head": "brief", "sublist": 6}, "briefed": {"head": "brief", "sublist": 6}, "briefing": {"head": "brief", "sublist": 6}, "briefly": {"head": "brief", "sublist": 6}, "briefs": {"head": "brief", "sublist": 6}, "capable": {"head": "capable", "sublist": 6}, "capabilities": {"head": "capable", "sublist": 6}, "capability": {"head": "capable", "sublist": 6}, "incapable": {"head": "capable", "sublist": 6}, "cite": {"head": "cite", "sublist": 6}, "citation": {"head": "cite", "sublist": 6}, "citations": {"head": "cite", "sublist": 6}, "cited": {"head": "cite", "sublist": 6}, "citing": {"head": "cite", "sublist": 6}, "cites": {"head": "cite", "sublist": 6}, "cooperate": {"head": "cooperate", "sublist": 6}, "cooperated": {"head": "cooperate", "sublist": 6}, "cooperates": {"head": "cooperate", "sublist": 6}, "cooperating": {"head": "cooperate", "sublist": 6}, "cooperation": {"head": "cooperate", "sublist": 6}, "cooperative": {"head": "cooperate", "sublist": 6}, "cooperatively": {"head": "cooperate", "sublist": 6}, "co-operate": {"head": "cooperate", "sublist": 6}, "co\u2011operated": {"head": "cooperate", "sublist": 6}, "co\u2011operates": {"head": "cooperate", "sublist": 6}, "co\u2011operation": {"head": "cooperate", "sublist": 6}, "co\u2011operative": {"head": "cooperate", "sublist": 6}, "co\u2011operatively": {"head": "cooperate", "sublist": 6}, "discriminate": {"head": "discriminate", "sublist": 6}, "discriminated": {"head": "discriminate", "sublist": 6}, "discriminates": {"head": "discriminate", "sublist": 6}, "discriminating": {"head": "discriminate", "sublist": 6}, "discrimination": {"head": "discriminate", "sublist": 6}, "display": {"head": "display", "sublist": 6}, "displayed": {"head": "display", "sublist": 6}, "displaying": {"head": "display", "sublist": 6}, "displays": {"head": "display", "sublist": 6}, "diverse": {"head": "diverse", "sublist": 6}, "diversely": {"head": "diverse", "sublist": 6}, "diversification": {"head": "diverse", "sublist": 6}, "diversified": {"head": "diverse", "sublist": 6}, "diversifies": {"head": "diverse", "sublist": 6}, "diversify": {"head": "diverse", "sublist": 6}, "diversifying": {"head": "diverse", "sublist": 6}, "diversity": {"head": "diverse", "sublist": 6}, "domain": {"head": "domain", "sublist": 6}, "domains": {"head": "domain", "sublist": 6}, "edit": {"head": "edit", "sublist": 6}, "edited": {"head": "edit", "sublist": 6}, "editing": {"head": "edit", "sublist": 6}, "edition": {"head": "edit", "sublist": 6}, "editions": {"head": "edit", "sublist": 6}, "editor": {"head": "edit", "sublist": 6}, "editorial": {"head": "edit", "sublist": 6}, "editorials": {"head": "edit", "sublist": 6}, "editors": {"head": "edit", "sublist": 6}, "edits": {"head": "edit", "sublist": 6}, "enhance": {"head": "enhance", "sublist": 6}, "enhanced": {"head": "enhance", "sublist": 6}, "enhancement": {"head": "enhance", "sublist": 6}, "enhances": {"head": "enhance", "sublist": 6}, "enhancing": {"head": "enhance", "sublist": 6}, "estate": {"head": "estate", "sublist": 6}, "estates": {"head": "estate", "sublist": 6}, "exceed": {"head": "exceed", "sublist": 6}, "exceeded": {"head": "exceed", "sublist": 6}, "exceeding": {"head": "exceed", "sublist": 6}, "exceeds": {"head": "exceed", "sublist": 6}, "expert": {"head": "expert", "sublist": 6}, "expertise": {"head": "expert", "sublist": 6}, "expertly": {"head": "expert", "sublist": 6}, "experts": {"head": "expert", "sublist": 6}, "explicit": {"head": "explicit", "sublist": 6}, "explicitly": {"head": "explicit", "sublist": 6}, "federal": {"head": "federal", "sublist": 6}, "federation": {"head": "federal", "sublist": 6}, "federations": {"head": "federal", "sublist": 6}, "fee": {"head": "fee", "sublist": 6}, "fees": {"head": "fee", "sublist": 6}, "flexible": {"head": "flexible", "sublist": 6}, "flexibility": {"head": "flexible", "sublist": 6}, "inflexible": {"head": "flexible", "sublist": 6}, "inflexibility": {"head": "flexible", "sublist": 6}, "furthermore": {"head": "furthermore", "sublist": 6}, "gender": {"head": "gender", "sublist": 6}, "genders": {"head": "gender", "sublist": 6}, "ignorant": {"head": "ignorant", "sublist": 6}, "ignorance": {"head": "ignorant", "sublist": 6}, "ignore": {"head": "ignorant", "sublist": 6}, "ignored": {"head": "ignorant", "sublist": 6}, "ignores": {"head": "ignorant", "sublist": 6}, "ignoring": {"head": "ignorant", "sublist": 6}, "incentive": {"head": "incentive", "sublist": 6}, "incentives": {"head": "incentive", "sublist": 6}, "incidence": {"head": "incidence", "sublist": 6}, "incident": {"head": "incidence", "sublist": 6}, "incidentally": {"head": "incidence", "sublist": 6}, "incidents": {"head": "incidence", "sublist": 6}, "incorporate": {"head": "incorporate", "sublist": 6}, "incorporated": {"head": "incorporate", "sublist": 6}, "incorporates": {"head": "incorporate", "sublist": 6}, "incorporating": {"head": "incorporate", "sublist": 6}, "incorporation": {"head": "incorporate", "sublist": 6}, "index": {"head": "index", "sublist": 6}, "indexed": {"head": "index", "sublist": 6}, "indexes": {"head": "index", "sublist": 6}, "indexing": {"head": "index", "sublist": 6}, "inhibit": {"head": "inhibit", "sublist": 6}, "inhibited": {"head": "inhibit", "sublist": 6}, "inhibiting": {"head": "inhibit", "sublist": 6}, "inhibition": {"head": "inhibit", "sublist": 6}, "inhibitions": {"head": "inhibit", "sublist": 6}, "inhibits": {"head": "inhibit", "sublist": 6}, "initiate": {"head": "initiate", "sublist": 6}, "initiated": {"head": "initiate", "sublist": 6}, "initiates": {"head": "initiate", "sublist": 6}, "initiating": {"head": "initiate", "sublist": 6}, "initiation": {"head": "initiate", "sublist": 6}, "initiations": {"head": "initiate", "sublist": 6}, "initiative": {"head": "initiate", "sublist": 6}, "initiatives": {"head": "initiate", "sublist": 6}, "initiator": {"head": "initiate", "sublist": 6}, "initiators": {"head": "initiate", "sublist": 6}, "input": {"head": "input", "sublist": 6}, "inputs": {"head": "input", "sublist": 6}, "instruct": {"head": "instruct", "sublist": 6}, "instruction": {"head": "instruct", "sublist": 6}, "instructed": {"head": "instruct", "sublist": 6}, "instructing": {"head": "instruct", "sublist": 6}, "instructions": {"head": "instruct", "sublist": 6}, "instructive": {"head": "instruct", "sublist": 6}, "instructor": {"head": "instruct", "sublist": 6}, "instructors": {"head": "instruct", "sublist": 6}, "instructs": {"head": "instruct", "sublist": 6}, "intelligent": {"head": "intelligent", "sublist": 6}, "intelligence": {"head": "intelligent", "sublist": 6}, "intelligently": {"head": "intelligent", "sublist": 6}, "unintelligent": {"head": "intelligent", "sublist": 6}, "interval": {"head": "interval", "sublist": 6}, "intervals": {"head": "interval", "sublist": 6}, "lecture": {"head": "lecture", "sublist": 6}, "lectured": {"head": "lecture", "sublist": 6}, "lecturer": {"head": "lecture", "sublist": 6}, "lecturers": {"head": "lecture", "sublist": 6}, "lectures": {"head": "lecture", "sublist": 6}, "lecturing": {"head": "lecture", "sublist": 6}, "migrate": {"head": "migrate", "sublist": 6}, "migrant": {"head": "migrate", "sublist": 6}, "migrants": {"head": "migrate", "sublist": 6}, "migrated": {"head": "migrate", "sublist": 6}, "migrates": {"head": "migrate", "sublist": 6}, "migrating": {"head": "migrate", "sublist": 6}, "migration": {"head": "migrate", "sublist": 6}, "migrations": {"head": "migrate", "sublist": 6}, "migratory": {"head": "migrate", "sublist": 6}, "minimum": {"head": "minimum", "sublist": 6}, "ministry": {"head": "ministry", "sublist": 6}, "ministered": {"head": "ministry", "sublist": 6}, "ministering": {"head": "ministry", "sublist": 6}, "ministerial": {"head": "ministry", "sublist": 6}, "ministries": {"head": "ministry", "sublist": 6}, "motive": {"head": "motive", "sublist": 6}, "motivate": {"head": "motive", "sublist": 6}, "motivated": {"head": "motive", "sublist": 6}, "motivates": {"head": "motive", "sublist": 6}, "motivating": {"head": "motive", "sublist": 6}, "motivation": {"head": "motive", "sublist": 6}, "motivations": {"head": "motive", "sublist": 6}, "motives": {"head": "motive", "sublist": 6}, "unmotivated": {"head": "motive", "sublist": 6}, "neutral": {"head": "neutral", "sublist": 6}, "neutralisation": {"head": "neutral", "sublist": 6}, "neutralise": {"head": "neutral", "sublist": 6}, "neutralised": {"head": "neutral", "sublist": 6}, "neutralises": {"head": "neutral", "sublist": 6}, "neutralising": {"head": "neutral", "sublist": 6}, "neutrality": {"head": "neutral", "sublist": 6}, "neutralization": {"head": "neutral", "sublist": 6}, "neutralize": {"head": "neutral", "sublist": 6}, "neutralized": {"head": "neutral", "sublist": 6}, "neutralizes": {"head": "neutral", "sublist": 6}, "neutralizing": {"head": "neutral", "sublist": 6}, "nevertheless": {"head": "nevertheless", "sublist": 6}, "overseas": {"head": "overseas", "sublist": 6}, "precede": {"head": "precede", "sublist": 6}, "preceded": {"head": "precede", "sublist": 6}, "precedence": {"head": "precede", "sublist": 6}, "precedent": {"head": "precede", "sublist": 6}, "precedes": {"head": "precede", "sublist": 6}, "preceding": {"head": "precede", "sublist": 6}, "unprecedented": {"head": "precede", "sublist": 6}, "presume": {"head": "presume", "sublist": 6}, "presumably": {"head": "presume", "sublist": 6}, "presumed": {"head": "presume", "sublist": 6}, "presumes": {"head": "presume", "sublist": 6}, "presuming": {"head": "presume", "sublist": 6}, "presumption": {"head": "presume", "sublist": 6}, "presumptions": {"head": "presume", "sublist": 6}, "presumptuous": {"head": "presume", "sublist": 6}, "rational": {"head": "rational", "sublist": 6}, "irrational": {"head": "rational", "sublist": 6}, "rationalisation": {"head": "rational", "sublist": 6}, "rationalisations": {"head": "rational", "sublist": 6}, "rationalise": {"head": "rational", "sublist": 6}, "rationalised": {"head": "rational", "sublist": 6}, "rationalises": {"head": "rational", "sublist": 6}, "rationalising": {"head": "rational", "sublist": 6}, "rationalism": {"head": "rational", "sublist": 6}, "rationality": {"head": "rational", "sublist": 6}, "rationalization": {"head": "rational", "sublist": 6}, "rationalizations": {"head": "rational", "sublist": 6}, "rationalize": {"head": "rational", "sublist": 6}, "rationalized": {"head": "rational", "sublist": 6}, "rationalizes": {"head": "rational", "sublist": 6}, "rationally": {"head": "rational", "sublist": 6}, "recover": {"head": "recover", "sublist": 6}, "recoverable": {"head": "recover", "sublist": 6}, "recovered": {"head": "recover", "sublist": 6}, "recovering": {"head": "recover", "sublist": 6}, "recovers": {"head": "recover", "sublist": 6}, "recovery": {"head": "recover", "sublist": 6}, "reveal": {"head": "reveal", "sublist": 6}, "revealed": {"head": "reveal", "sublist": 6}, "revealing": {"head": "reveal", "sublist": 6}, "reveals": {"head": "reveal", "sublist": 6}, "revelation": {"head": "reveal", "sublist": 6}, "revelations": {"head": "reveal", "sublist": 6}, "scope": {"head": "scope", "sublist": 6}, "subsidy": {"head": "subsidy", "sublist": 6}, "subsidiary": {"head": "subsidy", "sublist": 6}, "subsidies": {"head": "subsidy", "sublist": 6}, "subsidise": {"head": "subsidy", "sublist": 6}, "subsidised": {"head": "subsidy", "sublist": 6}, "subsidises": {"head": "subsidy", "sublist": 6}, "subsidising": {"head": "subsidy", "sublist": 6}, "subsidize": {"head": "subsidy", "sublist": 6}, "subsidized": {"head": "subsidy", "sublist": 6}, "subsidizes": {"head": "subsidy", "sublist": 6}, "subsidizing": {"head": "subsidy", "sublist": 6}, "tape": {"head": "tape", "sublist": 6}, "taped": {"head": "tape", "sublist": 6}, "tapes": {"head": "tape", "sublist": 6}, "taping": {"head": "tape", "sublist": 6}, "trace": {"head": "trace", "sublist": 6}, "traceable": {"head": "trace", "sublist": 6}, "traced": {"head": "trace", "sublist": 6}, "traces": {"head": "trace", "sublist": 6}, "tracing": {"head": "trace", "sublist": 6}, "transform": {"head": "transform", "sublist": 6}, "transformation": {"head": "transform", "sublist": 6}, "transformations": {"head": "transform", "sublist": 6}, "transformed": {"head": "transform", "sublist": 6}, "transforming": {"head": "transform", "sublist": 6}, "transforms": {"head": "transform", "sublist": 6}, "transport": {"head": "transport", "sublist": 6}, "transportation": {"head": "transport", "sublist": 6}, "transported": {"head": "transport", "sublist": 6}, "transporter": {"head": "transport", "sublist": 6}, "transporters": {"head": "transport", "sublist": 6}, "transporting": {"head": "transport", "sublist": 6}, "transports": {"head": "transport", "sublist": 6}, "underlie": {"head": "underlie", "sublist": 6}, "underlay": {"head": "underlie", "sublist": 6}, "underlies": {"head": "underlie", "sublist": 6}, "underlying": {"head": "underlie", "sublist": 6}, "utilise": {"head": "utilise", "sublist": 6}, "utilisation": {"head": "utilise", "sublist": 6}, "utilised": {"head": "utilise", "sublist": 6}, "utilises": {"head": "utilise", "sublist": 6}, "utilising": {"head": "utilise", "sublist": 6}, "utiliser": {"head": "utilise", "sublist": 6}, "utilisers": {"head": "utilise", "sublist": 6}, "utility": {"head": "utilise", "sublist": 6}, "utilities": {"head": "utilise", "sublist": 6}, "utilization": {"head": "utilise", "sublist": 6}, "utilize": {"head": "utilise", "sublist": 6}, "utilized": {"head": "utilise", "sublist": 6}, "utilizes": {"head": "utilise", "sublist": 6}, "utilizing": {"head": "utilise", "sublist": 6}}
| 75,799 | 151,597 | 0.589902 |
fb2ac49313b125c5ec20102506096a8575274ec2 | 305 | py | Python | game.py | keerthana1502/python_practice | 8c0499e014826af78f9a88730551ace3fa79686d | [
"bzip2-1.0.6"
] | null | null | null | game.py | keerthana1502/python_practice | 8c0499e014826af78f9a88730551ace3fa79686d | [
"bzip2-1.0.6"
] | null | null | null | game.py | keerthana1502/python_practice | 8c0499e014826af78f9a88730551ace3fa79686d | [
"bzip2-1.0.6"
] | null | null | null | a=input("left or right?")
if(a=="right"):
b=input("swim or wait? ")
if(b=="swim"):
c=input("colour = ")
if(c=="yellow"):
print("win")
else:
print("Game over")
elif(b=="wait"):
print("Game over")
elif(a=="left"):
print("Game over")
| 19.0625 | 30 | 0.452459 |
41ac45d9b86b6e40b318dd19dca22ee36e3582b7 | 73,824 | py | Python | Lib/xml/etree/ElementTree.py | masamitsu-murase/single_binary_stackless_python | 1a601683882bc0a024b1bef3e35719554e44cce1 | [
"CNRI-Python-GPL-Compatible"
] | 5 | 2018-07-02T19:10:39.000Z | 2021-09-27T04:05:10.000Z | Lib/xml/etree/ElementTree.py | masamitsu-murase/single_binary_stackless_python | 1a601683882bc0a024b1bef3e35719554e44cce1 | [
"CNRI-Python-GPL-Compatible"
] | 3 | 2018-06-10T06:28:06.000Z | 2021-09-24T13:54:19.000Z | Lib/xml/etree/ElementTree.py | masamitsu-murase/single_binary_stackless_python | 1a601683882bc0a024b1bef3e35719554e44cce1 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | """Lightweight XML support for Python.
XML is an inherently hierarchical data format, and the most natural way to
represent it is with a tree. This module has two classes for this purpose:
1. ElementTree represents the whole XML document as a tree and
2. Element represents a single node in this tree.
Interactions with the whole document (reading and writing to/from files) are
usually done on the ElementTree level. Interactions with a single XML element
and its sub-elements are done on the Element level.
Element is a flexible container object designed to store hierarchical data
structures in memory. It can be described as a cross between a list and a
dictionary. Each Element has a number of properties associated with it:
'tag' - a string containing the element's name.
'attributes' - a Python dictionary storing the element's attributes.
'text' - a string containing the element's text content.
'tail' - an optional string containing text after the element's end tag.
And a number of child elements stored in a Python sequence.
To create an element instance, use the Element constructor,
or the SubElement factory function.
You can also use the ElementTree class to wrap an element structure
and convert it to and from XML.
"""
#---------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
#
# ElementTree
# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring", "fromstringlist",
"iselement", "iterparse",
"parse", "ParseError",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring", "tostringlist",
"TreeBuilder",
"VERSION",
"XML", "XMLID",
"XMLParser", "XMLPullParser",
"register_namespace",
"canonicalize", "C14NWriterTarget",
]
VERSION = "1.3.0"
import sys
import re
import warnings
import io
import collections
import collections.abc
import contextlib
from . import ElementPath
class ParseError(SyntaxError):
"""An error when parsing an XML document.
In addition to its exception value, a ParseError contains
two extra attributes:
'code' - the specific exception code
'position' - the line and column of the error
"""
pass
# --------------------------------------------------------------------
def iselement(element):
"""Return True if *element* appears to be an Element."""
return hasattr(element, 'tag')
class Element:
"""An XML element.
This class is the reference implementation of the Element interface.
An element's length is its number of subelements. That means if you
want to check if an element is truly empty, you should check BOTH
its length AND its text attribute.
The element tag, attribute names, and attribute values can be either
bytes or strings.
*tag* is the element name. *attrib* is an optional dictionary containing
element attributes. *extra* are additional element attributes given as
keyword arguments.
Example form:
<tag attrib>text<child/>...</tag>tail
"""
tag = None
"""The element's name."""
attrib = None
"""Dictionary of the element's attributes."""
text = None
"""
Text before first subelement. This is either a string or the value None.
Note that if there is no text, this attribute may be either
None or the empty string, depending on the parser.
"""
tail = None
"""
Text after this element's end tag, but before the next sibling element's
start tag. This is either a string or the value None. Note that if there
was no text, this attribute may be either None or an empty string,
depending on the parser.
"""
def __init__(self, tag, attrib={}, **extra):
if not isinstance(attrib, dict):
raise TypeError("attrib must be dict, not %s" % (
attrib.__class__.__name__,))
self.tag = tag
self.attrib = {**attrib, **extra}
self._children = []
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__, self.tag, id(self))
def makeelement(self, tag, attrib):
"""Create a new element with the same type.
*tag* is a string containing the element name.
*attrib* is a dictionary containing the element attributes.
Do not call this method, use the SubElement factory function instead.
"""
return self.__class__(tag, attrib)
def copy(self):
"""Return copy of current element.
This creates a shallow copy. Subelements will be shared with the
original tree.
"""
elem = self.makeelement(self.tag, self.attrib)
elem.text = self.text
elem.tail = self.tail
elem[:] = self
return elem
def __len__(self):
return len(self._children)
def __bool__(self):
warnings.warn(
"The behavior of this method will change in future versions. "
"Use specific 'len(elem)' or 'elem is not None' test instead.",
FutureWarning, stacklevel=2
)
return len(self._children) != 0 # emulate old behaviour, for now
def __getitem__(self, index):
return self._children[index]
def __setitem__(self, index, element):
if isinstance(index, slice):
for elt in element:
self._assert_is_element(elt)
else:
self._assert_is_element(element)
self._children[index] = element
def __delitem__(self, index):
del self._children[index]
def append(self, subelement):
"""Add *subelement* to the end of this element.
The new element will appear in document order after the last existing
subelement (or directly after the text, if it's the first subelement),
but before the end tag for this element.
"""
self._assert_is_element(subelement)
self._children.append(subelement)
def extend(self, elements):
"""Append subelements from a sequence.
*elements* is a sequence with zero or more elements.
"""
for element in elements:
self._assert_is_element(element)
self._children.extend(elements)
def insert(self, index, subelement):
"""Insert *subelement* at position *index*."""
self._assert_is_element(subelement)
self._children.insert(index, subelement)
def _assert_is_element(self, e):
# Need to refer to the actual Python implementation, not the
# shadowing C implementation.
if not isinstance(e, _Element_Py):
raise TypeError('expected an Element, not %s' % type(e).__name__)
def remove(self, subelement):
"""Remove matching subelement.
Unlike the find methods, this method compares elements based on
identity, NOT ON tag value or contents. To remove subelements by
other means, the easiest way is to use a list comprehension to
select what elements to keep, and then use slice assignment to update
the parent element.
ValueError is raised if a matching element could not be found.
"""
# assert iselement(element)
self._children.remove(subelement)
def getchildren(self):
"""(Deprecated) Return all subelements.
Elements are returned in document order.
"""
warnings.warn(
"This method will be removed in future versions. "
"Use 'list(elem)' or iteration over elem instead.",
DeprecationWarning, stacklevel=2
)
return self._children
def find(self, path, namespaces=None):
"""Find first matching element by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
return ElementPath.find(self, path, namespaces)
def findtext(self, path, default=None, namespaces=None):
"""Find text for first matching element by tag name or path.
*path* is a string having either an element tag or an XPath,
*default* is the value to return if the element was not found,
*namespaces* is an optional mapping from namespace prefix to full name.
Return text content of first matching element, or default value if
none was found. Note that if an element is found having no text
content, the empty string is returned.
"""
return ElementPath.findtext(self, path, default, namespaces)
def findall(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Returns list containing all matching elements in document order.
"""
return ElementPath.findall(self, path, namespaces)
def iterfind(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return an iterable yielding all matching elements in document order.
"""
return ElementPath.iterfind(self, path, namespaces)
def clear(self):
"""Reset element.
This function removes all subelements, clears all attributes, and sets
the text and tail attributes to None.
"""
self.attrib.clear()
self._children = []
self.text = self.tail = None
def get(self, key, default=None):
"""Get element attribute.
Equivalent to attrib.get, but some implementations may handle this a
bit more efficiently. *key* is what attribute to look for, and
*default* is what to return if the attribute was not found.
Returns a string containing the attribute value, or the default if
attribute was not found.
"""
return self.attrib.get(key, default)
def set(self, key, value):
"""Set element attribute.
Equivalent to attrib[key] = value, but some implementations may handle
this a bit more efficiently. *key* is what attribute to set, and
*value* is the attribute value to set it to.
"""
self.attrib[key] = value
def keys(self):
"""Get list of attribute names.
Names are returned in an arbitrary order, just like an ordinary
Python dict. Equivalent to attrib.keys()
"""
return self.attrib.keys()
def items(self):
"""Get element attributes as a sequence.
The attributes are returned in arbitrary order. Equivalent to
attrib.items().
Return a list of (name, value) tuples.
"""
return self.attrib.items()
def iter(self, tag=None):
"""Create tree iterator.
The iterator loops over the element and all subelements in document
order, returning all elements with a matching tag.
If the tree structure is modified during iteration, new or removed
elements may or may not be included. To get a stable set, use the
list() function on the iterator, and loop over the resulting list.
*tag* is what tags to look for (default is to return all elements)
Return an iterator containing all the matching elements.
"""
if tag == "*":
tag = None
if tag is None or self.tag == tag:
yield self
for e in self._children:
yield from e.iter(tag)
# compatibility
def getiterator(self, tag=None):
warnings.warn(
"This method will be removed in future versions. "
"Use 'elem.iter()' or 'list(elem.iter())' instead.",
DeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
def itertext(self):
"""Create text iterator.
The iterator loops over the element and all subelements in document
order, returning all inner text.
"""
tag = self.tag
if not isinstance(tag, str) and tag is not None:
return
t = self.text
if t:
yield t
for e in self:
yield from e.itertext()
t = e.tail
if t:
yield t
def SubElement(parent, tag, attrib={}, **extra):
"""Subelement factory which creates an element instance, and appends it
to an existing parent.
The element tag, attribute names, and attribute values can be either
bytes or Unicode strings.
*parent* is the parent element, *tag* is the subelements name, *attrib* is
an optional directory containing element attributes, *extra* are
additional attributes given as keyword arguments.
"""
attrib = {**attrib, **extra}
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
def Comment(text=None):
"""Comment element factory.
This function creates a special element which the standard serializer
serializes as an XML comment.
*text* is a string containing the comment string.
"""
element = Element(Comment)
element.text = text
return element
def ProcessingInstruction(target, text=None):
"""Processing Instruction element factory.
This function creates a special element which the standard serializer
serializes as an XML comment.
*target* is a string containing the processing instruction, *text* is a
string containing the processing instruction contents, if any.
"""
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
class QName:
"""Qualified name wrapper.
This class can be used to wrap a QName attribute value in order to get
proper namespace handing on output.
*text_or_uri* is a string containing the QName value either in the form
{uri}local, or if the tag argument is given, the URI part of a QName.
*tag* is an optional argument which if given, will make the first
argument (text_or_uri) be interpreted as a URI, and this argument (tag)
be interpreted as a local name.
"""
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.text)
def __hash__(self):
return hash(self.text)
def __le__(self, other):
if isinstance(other, QName):
return self.text <= other.text
return self.text <= other
def __lt__(self, other):
if isinstance(other, QName):
return self.text < other.text
return self.text < other
def __ge__(self, other):
if isinstance(other, QName):
return self.text >= other.text
return self.text >= other
def __gt__(self, other):
if isinstance(other, QName):
return self.text > other.text
return self.text > other
def __eq__(self, other):
if isinstance(other, QName):
return self.text == other.text
return self.text == other
# --------------------------------------------------------------------
class ElementTree:
"""An XML element hierarchy.
This class also provides support for serialization to and from
standard XML.
*element* is an optional root element node,
*file* is an optional file handle or file name of an XML file whose
contents will be used to initialize the tree with.
"""
def __init__(self, element=None, file=None):
# assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
def getroot(self):
"""Return root element of this tree."""
return self._root
def _setroot(self, element):
"""Replace root element of this tree.
This will discard the current contents of the tree and replace it
with the given element. Use with care!
"""
# assert iselement(element)
self._root = element
def parse(self, source, parser=None):
"""Load external XML document into element tree.
*source* is a file name or file object, *parser* is an optional parser
instance that defaults to XMLParser.
ParseError is raised if the parser fails to parse the document.
Returns the root element of the given source document.
"""
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
try:
if parser is None:
# If no parser was specified, create a default XMLParser
parser = XMLParser()
if hasattr(parser, '_parse_whole'):
# The default XMLParser, when it comes from an accelerator,
# can define an internal _parse_whole API for efficiency.
# It can be used to parse the whole source without feeding
# it with chunks.
self._root = parser._parse_whole(source)
return self._root
while True:
data = source.read(65536)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
finally:
if close_source:
source.close()
def iter(self, tag=None):
"""Create and return tree iterator for the root element.
The iterator loops over all elements in this tree, in document order.
*tag* is a string with the tag name to iterate over
(default is to return all elements).
"""
# assert self._root is not None
return self._root.iter(tag)
# compatibility
def getiterator(self, tag=None):
warnings.warn(
"This method will be removed in future versions. "
"Use 'tree.iter()' or 'list(tree.iter())' instead.",
DeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
def find(self, path, namespaces=None):
"""Find first matching element by tag name or path.
Same as getroot().find(path), which is Element.find()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.find(path, namespaces)
def findtext(self, path, default=None, namespaces=None):
"""Find first matching element by tag name or path.
Same as getroot().findtext(path), which is Element.findtext()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findtext(path, default, namespaces)
def findall(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
Same as getroot().findall(path), which is Element.findall().
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return list containing all matching elements in document order.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findall(path, namespaces)
def iterfind(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
Same as getroot().iterfind(path), which is element.iterfind()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return an iterable yielding all matching elements in document order.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.iterfind(path, namespaces)
def write(self, file_or_filename,
encoding=None,
xml_declaration=None,
default_namespace=None,
method=None, *,
short_empty_elements=True,
sort_attrib=False):
"""Write element tree to a file as XML.
Arguments:
*file_or_filename* -- file name or a file object opened for writing
*encoding* -- the output encoding (default: US-ASCII)
*xml_declaration* -- bool indicating if an XML declaration should be
added to the output. If None, an XML declaration
is added if encoding IS NOT either of:
US-ASCII, UTF-8, or Unicode
*default_namespace* -- sets the default XML namespace (for "xmlns")
*method* -- either "xml" (default), "html, "text", or "c14n"
*short_empty_elements* -- controls the formatting of elements
that contain no content. If True (default)
they are emitted as a single self-closed
tag, otherwise they are emitted as a pair
of start/end tags
*sort_attrib* -- if True, sort attributes.
"""
if not method:
method = "xml"
elif method not in _serialize:
raise ValueError("unknown method %r" % method)
if not encoding:
if method == "c14n":
encoding = "utf-8"
else:
encoding = "us-ascii"
enc_lower = encoding.lower()
with _get_writer(file_or_filename, enc_lower) as write:
if method == "xml" and (xml_declaration or
(xml_declaration is None and
enc_lower not in ("utf-8", "us-ascii", "unicode"))):
declared_encoding = encoding
if enc_lower == "unicode":
# Retrieve the default encoding for the xml declaration
import locale
declared_encoding = locale.getpreferredencoding()
write("<?xml version='1.0' encoding='%s'?>\n" % (
declared_encoding,))
if method == "text":
_serialize_text(write, self._root)
else:
qnames, namespaces = _namespaces(self._root, default_namespace)
serialize = _serialize[method]
serialize(write, self._root, qnames, namespaces,
short_empty_elements=short_empty_elements, sort_attrib=sort_attrib)
def write_c14n(self, file):
# lxml.etree compatibility. use output method instead
return self.write(file, method="c14n")
# --------------------------------------------------------------------
# serialization support
@contextlib.contextmanager
def _get_writer(file_or_filename, encoding):
# returns text write method and release all resources after using
try:
write = file_or_filename.write
except AttributeError:
# file_or_filename is a file name
if encoding == "unicode":
file = open(file_or_filename, "w")
else:
file = open(file_or_filename, "w", encoding=encoding,
errors="xmlcharrefreplace")
with file:
yield file.write
else:
# file_or_filename is a file-like object
# encoding determines if it is a text or binary writer
if encoding == "unicode":
# use a text writer as is
yield write
else:
# wrap a binary writer with TextIOWrapper
with contextlib.ExitStack() as stack:
if isinstance(file_or_filename, io.BufferedIOBase):
file = file_or_filename
elif isinstance(file_or_filename, io.RawIOBase):
file = io.BufferedWriter(file_or_filename)
# Keep the original file open when the BufferedWriter is
# destroyed
stack.callback(file.detach)
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
file = io.BufferedIOBase()
file.writable = lambda: True
file.write = write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
file.seekable = file_or_filename.seekable
file.tell = file_or_filename.tell
except AttributeError:
pass
file = io.TextIOWrapper(file,
encoding=encoding,
errors="xmlcharrefreplace",
newline="\n")
# Keep the original file open when the TextIOWrapper is
# destroyed
stack.callback(file.detach)
yield file.write
def _namespaces(elem, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].rsplit("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = "%s:%s" % (prefix, tag)
else:
qnames[qname] = tag # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = qname
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
for elem in elem.iter():
tag = elem.tag
if isinstance(tag, QName):
if tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, str):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def _serialize_xml(write, elem, qnames, namespaces,
short_empty_elements, sort_attrib=False, **kwargs):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % text)
elif tag is ProcessingInstruction:
write("<?%s?>" % text)
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None,
short_empty_elements=short_empty_elements,
sort_attrib=sort_attrib)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
ordered_namespaces = namespaces.items()
if sort_attrib:
ordered_namespaces = sorted(namespaces.items(),
key=lambda x: x[1]) # sort on prefix
for v, k in ordered_namespaces:
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
orderd_items = items
if sort_attrib:
orderd_items = sorted(items) # lexical order
for k, v in orderd_items:
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib(v)
write(" %s=\"%s\"" % (qnames[k], v))
if text or len(elem) or not short_empty_elements:
write(">")
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None,
short_empty_elements=short_empty_elements,
sort_attrib=sort_attrib)
write("</" + tag + ">")
else:
write(" />")
if elem.tail:
write(_escape_cdata(elem.tail))
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta", "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
def _serialize_html(write, elem, qnames, namespaces, sort_attrib=False, **kwargs):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None, sort_attrib=sort_attrib)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
ordered_namespaces = namespaces.items()
if sort_attrib:
ordered_namespaces = sorted(namespaces.items(),
key=lambda x: x[1]) # sort on prefix
for v, k in ordered_namespaces:
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
orderd_items = items
if sort_attrib:
orderd_items = sorted(items) # lexical order
for k, v in orderd_items:
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
write(">")
ltag = tag.lower()
if text:
if ltag == "script" or ltag == "style":
write(text)
else:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None, sort_attrib=sort_attrib)
if ltag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail))
def _serialize_text(write, elem):
for part in elem.itertext():
write(part)
if elem.tail:
write(elem.tail)
_serialize = {
"xml": _serialize_xml,
"html": _serialize_html,
"text": _serialize_text,
# this optional method is imported at the end of the module
# "c14n": _serialize_c14n,
}
def register_namespace(prefix, uri):
"""Register a namespace prefix.
The registry is global, and any existing mapping for either the
given prefix or the namespace URI will be removed.
*prefix* is the namespace prefix, *uri* is a namespace uri. Tags and
attributes in this namespace will be serialized with prefix if possible.
ValueError is raised if prefix is reserved or is invalid.
"""
if re.match(r"ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in list(_namespace_map.items()):
if k == uri or v == prefix:
del _namespace_map[k]
_namespace_map[uri] = prefix
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublin core
"http://purl.org/dc/elements/1.1/": "dc",
}
# For tests and troubleshooting
register_namespace._namespace_map = _namespace_map
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _escape_cdata(text):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 characters, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
# The following business with carriage returns is to satisfy
# Section 2.11 of the XML specification, stating that
# CR or CR LN should be replaced with just LN
# http://www.w3.org/TR/REC-xml/#sec-line-ends
if "\r\n" in text:
text = text.replace("\r\n", "\n")
if "\r" in text:
text = text.replace("\r", "\n")
#The following four lines are issue 17582
if "\n" in text:
text = text.replace("\n", " ")
if "\t" in text:
text = text.replace("\t", "	")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
def tostring(element, encoding=None, method=None, *,
xml_declaration=None, default_namespace=None,
short_empty_elements=True, sort_attrib=False):
"""Generate string representation of XML element.
All subelements are included. If encoding is "unicode", a string
is returned. Otherwise a bytestring is returned.
*element* is an Element instance, *encoding* is an optional output
encoding defaulting to US-ASCII, *method* is an optional output which can
be one of "xml" (default), "html", "text" or "c14n", *default_namespace*
sets the default XML namespace (for "xmlns").
Returns an (optionally) encoded string containing the XML data.
"""
stream = io.StringIO() if encoding == 'unicode' else io.BytesIO()
ElementTree(element).write(stream, encoding,
xml_declaration=xml_declaration,
default_namespace=default_namespace,
method=method,
short_empty_elements=short_empty_elements,
sort_attrib=sort_attrib)
return stream.getvalue()
class _ListDataStream(io.BufferedIOBase):
"""An auxiliary stream accumulating into a list reference."""
def __init__(self, lst):
self.lst = lst
def writable(self):
return True
def seekable(self):
return True
def write(self, b):
self.lst.append(b)
def tell(self):
return len(self.lst)
def tostringlist(element, encoding=None, method=None, *,
xml_declaration=None, default_namespace=None,
short_empty_elements=True, sort_attrib=False):
lst = []
stream = _ListDataStream(lst)
ElementTree(element).write(stream, encoding,
xml_declaration=xml_declaration,
default_namespace=default_namespace,
method=method,
short_empty_elements=short_empty_elements,
sort_attrib=sort_attrib)
return lst
def dump(elem):
"""Write element tree or element structure to sys.stdout.
This function should be used for debugging only.
*elem* is either an ElementTree, or a single Element. The exact output
format is implementation dependent. In this version, it's written as an
ordinary XML file.
"""
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout, encoding="unicode")
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
# --------------------------------------------------------------------
# parsing
def parse(source, parser=None):
"""Parse XML document into element tree.
*source* is a filename or file object containing XML data,
*parser* is an optional parser instance defaulting to XMLParser.
Return an ElementTree instance.
"""
tree = ElementTree()
tree.parse(source, parser)
return tree
def iterparse(source, events=None, parser=None):
"""Incrementally parse XML document into ElementTree.
This class also reports what's going on to the user based on the
*events* it is initialized with. The supported events are the strings
"start", "end", "start-ns" and "end-ns" (the "ns" events are used to get
detailed namespace information). If *events* is omitted, only
"end" events are reported.
*source* is a filename or file object containing XML data, *events* is
a list of events to report back, *parser* is an optional parser instance.
Returns an iterator providing (event, elem) pairs.
"""
# Use the internal, undocumented _parser argument for now; When the
# parser argument of iterparse is removed, this can be killed.
pullparser = XMLPullParser(events=events, _parser=parser)
def iterator():
try:
while True:
yield from pullparser.read_events()
# load event buffer
data = source.read(16 * 1024)
if not data:
break
pullparser.feed(data)
root = pullparser._close_and_return_root()
yield from pullparser.read_events()
it.root = root
finally:
if close_source:
source.close()
class IterParseIterator(collections.abc.Iterator):
__next__ = iterator().__next__
it = IterParseIterator()
it.root = None
del iterator, IterParseIterator
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
return it
class XMLPullParser:
def __init__(self, events=None, *, _parser=None):
# The _parser argument is for internal use only and must not be relied
# upon in user code. It will be removed in a future release.
# See http://bugs.python.org/issue17741 for more details.
self._events_queue = collections.deque()
self._parser = _parser or XMLParser(target=TreeBuilder())
# wire up the parser for event reporting
if events is None:
events = ("end",)
self._parser._setevents(self._events_queue, events)
def feed(self, data):
"""Feed encoded data to parser."""
if self._parser is None:
raise ValueError("feed() called after end of stream")
if data:
try:
self._parser.feed(data)
except SyntaxError as exc:
self._events_queue.append(exc)
def _close_and_return_root(self):
# iterparse needs this to set its root attribute properly :(
root = self._parser.close()
self._parser = None
return root
def close(self):
"""Finish feeding data to parser.
Unlike XMLParser, does not return the root element. Use
read_events() to consume elements from XMLPullParser.
"""
self._close_and_return_root()
def read_events(self):
"""Return an iterator over currently available (event, elem) pairs.
Events are consumed from the internal event queue as they are
retrieved from the iterator.
"""
events = self._events_queue
while events:
event = events.popleft()
if isinstance(event, Exception):
raise event
else:
yield event
def XML(text, parser=None):
"""Parse XML document from string constant.
This function can be used to embed "XML Literals" in Python code.
*text* is a string containing XML data, *parser* is an
optional parser instance, defaulting to the standard XMLParser.
Returns an Element instance.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
return parser.close()
def XMLID(text, parser=None):
"""Parse XML document from string constant for its IDs.
*text* is a string containing XML data, *parser* is an
optional parser instance, defaulting to the standard XMLParser.
Returns an (Element, dict) tuple, in which the
dict maps element id:s to elements.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.iter():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
# Parse XML document from string constant. Alias for XML().
fromstring = XML
def fromstringlist(sequence, parser=None):
"""Parse XML document from sequence of string fragments.
*sequence* is a list of other sequence, *parser* is an optional parser
instance, defaulting to the standard XMLParser.
Returns an Element instance.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
for text in sequence:
parser.feed(text)
return parser.close()
# --------------------------------------------------------------------
class TreeBuilder:
"""Generic element structure builder.
This builder converts a sequence of start, data, and end method
calls to a well-formed element structure.
You can use this class to build an element structure using a custom XML
parser, or a parser for some other XML-like format.
*element_factory* is an optional element factory which is called
to create new Element instances, as necessary.
*comment_factory* is a factory to create comments to be used instead of
the standard factory. If *insert_comments* is false (the default),
comments will not be inserted into the tree.
*pi_factory* is a factory to create processing instructions to be used
instead of the standard factory. If *insert_pis* is false (the default),
processing instructions will not be inserted into the tree.
"""
def __init__(self, element_factory=None, *,
comment_factory=None, pi_factory=None,
insert_comments=False, insert_pis=False):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._root = None # root element
self._tail = None # true if we're after an end tag
if comment_factory is None:
comment_factory = Comment
self._comment_factory = comment_factory
self.insert_comments = insert_comments
if pi_factory is None:
pi_factory = ProcessingInstruction
self._pi_factory = pi_factory
self.insert_pis = insert_pis
if element_factory is None:
element_factory = Element
self._factory = element_factory
def close(self):
"""Flush builder buffers and return toplevel document Element."""
assert len(self._elem) == 0, "missing end tags"
assert self._root is not None, "missing toplevel element"
return self._root
def _flush(self):
if self._data:
if self._last is not None:
text = "".join(self._data)
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
def data(self, data):
"""Add text to current element."""
self._data.append(data)
def start(self, tag, attrs):
"""Open new element and return it.
*tag* is the element name, *attrs* is a dict containing element
attributes.
"""
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
elif self._root is None:
self._root = elem
self._elem.append(elem)
self._tail = 0
return elem
def end(self, tag):
"""Close and return current Element.
*tag* is the element name.
"""
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
def comment(self, text):
"""Create a comment using the comment_factory.
*text* is the text of the comment.
"""
return self._handle_single(
self._comment_factory, self.insert_comments, text)
def pi(self, target, text=None):
"""Create a processing instruction using the pi_factory.
*target* is the target name of the processing instruction.
*text* is the data of the processing instruction, or ''.
"""
return self._handle_single(
self._pi_factory, self.insert_pis, target, text)
def _handle_single(self, factory, insert, *args):
elem = factory(*args)
if insert:
self._flush()
self._last = elem
if self._elem:
self._elem[-1].append(elem)
self._tail = 1
return elem
# also see ElementTree and TreeBuilder
class XMLParser:
"""Element structure builder for XML source data based on the expat parser.
*target* is an optional target object which defaults to an instance of the
standard TreeBuilder class, *encoding* is an optional encoding string
which if given, overrides the encoding specified in the XML file:
http://www.iana.org/assignments/character-sets
"""
def __init__(self, *, target=None, encoding=None):
try:
from xml.parsers import expat
except ImportError:
try:
import pyexpat as expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
parser = expat.ParserCreate(encoding, "}")
if target is None:
target = TreeBuilder()
# underscored names are provided for compatibility only
self.parser = self._parser = parser
self.target = self._target = target
self._error = expat.error
self._names = {} # name memo cache
# main callbacks
parser.DefaultHandlerExpand = self._default
if hasattr(target, 'start'):
parser.StartElementHandler = self._start
if hasattr(target, 'end'):
parser.EndElementHandler = self._end
if hasattr(target, 'start_ns'):
parser.StartNamespaceDeclHandler = self._start_ns
if hasattr(target, 'end_ns'):
parser.EndNamespaceDeclHandler = self._end_ns
if hasattr(target, 'data'):
parser.CharacterDataHandler = target.data
# miscellaneous callbacks
if hasattr(target, 'comment'):
parser.CommentHandler = target.comment
if hasattr(target, 'pi'):
parser.ProcessingInstructionHandler = target.pi
# Configure pyexpat: buffering, new-style attribute handling.
parser.buffer_text = 1
parser.ordered_attributes = 1
parser.specified_attributes = 1
self._doctype = None
self.entity = {}
try:
self.version = "Expat %d.%d.%d" % expat.version_info
except AttributeError:
pass # unknown
def _setevents(self, events_queue, events_to_report):
# Internal API for XMLPullParser
# events_to_report: a list of events to report during parsing (same as
# the *events* of XMLPullParser's constructor.
# events_queue: a list of actual parsing events that will be populated
# by the underlying parser.
#
parser = self._parser
append = events_queue.append
for event_name in events_to_report:
if event_name == "start":
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event_name, append=append,
start=self._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event_name == "end":
def handler(tag, event=event_name, append=append,
end=self._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event_name == "start-ns":
# TreeBuilder does not implement .start_ns()
if hasattr(self.target, "start_ns"):
def handler(prefix, uri, event=event_name, append=append,
start_ns=self._start_ns):
append((event, start_ns(prefix, uri)))
else:
def handler(prefix, uri, event=event_name, append=append):
append((event, (prefix or '', uri or '')))
parser.StartNamespaceDeclHandler = handler
elif event_name == "end-ns":
# TreeBuilder does not implement .end_ns()
if hasattr(self.target, "end_ns"):
def handler(prefix, event=event_name, append=append,
end_ns=self._end_ns):
append((event, end_ns(prefix)))
else:
def handler(prefix, event=event_name, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
elif event_name == 'comment':
def handler(text, event=event_name, append=append, self=self):
append((event, self.target.comment(text)))
parser.CommentHandler = handler
elif event_name == 'pi':
def handler(pi_target, data, event=event_name, append=append,
self=self):
append((event, self.target.pi(pi_target, data)))
parser.ProcessingInstructionHandler = handler
else:
raise ValueError("unknown event %r" % event_name)
def _raiseerror(self, value):
err = ParseError(value)
err.code = value.code
err.position = value.lineno, value.offset
raise err
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name
return name
def _start_ns(self, prefix, uri):
return self.target.start_ns(prefix or '', uri or '')
def _end_ns(self, prefix):
return self.target.end_ns(prefix or '')
def _start(self, tag, attr_list):
# Handler for expat's StartElementHandler. Since ordered_attributes
# is set, the attributes are reported as a list of alternating
# attribute name,value.
fixname = self._fixname
tag = fixname(tag)
attrib = {}
if attr_list:
for i in range(0, len(attr_list), 2):
attrib[fixname(attr_list[i])] = attr_list[i+1]
return self.target.start(tag, attrib)
def _end(self, tag):
return self.target.end(self._fixname(tag))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
data_handler = self.target.data
except AttributeError:
return
try:
data_handler(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
err = expat.error(
"undefined entity %s: line %d, column %d" %
(text, self.parser.ErrorLineNumber,
self.parser.ErrorColumnNumber)
)
err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
err.lineno = self.parser.ErrorLineNumber
err.offset = self.parser.ErrorColumnNumber
raise err
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = text.strip()
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
if pubid:
pubid = pubid[1:-1]
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
elif hasattr(self, "doctype"):
warnings.warn(
"The doctype() method of XMLParser is ignored. "
"Define doctype() method on the TreeBuilder target.",
RuntimeWarning)
self._doctype = None
def feed(self, data):
"""Feed encoded data to parser."""
try:
self.parser.Parse(data, 0)
except self._error as v:
self._raiseerror(v)
def close(self):
"""Finish feeding data to parser and return element structure."""
try:
self.parser.Parse("", 1) # end of data
except self._error as v:
self._raiseerror(v)
try:
close_handler = self.target.close
except AttributeError:
pass
else:
return close_handler()
finally:
# get rid of circular references
del self.parser, self._parser
del self.target, self._target
# --------------------------------------------------------------------
# C14N 2.0
def canonicalize(xml_data=None, *, out=None, from_file=None, **options):
"""Convert XML to its C14N 2.0 serialised form.
If *out* is provided, it must be a file or file-like object that receives
the serialised canonical XML output (text, not bytes) through its ``.write()``
method. To write to a file, open it in text mode with encoding "utf-8".
If *out* is not provided, this function returns the output as text string.
Either *xml_data* (an XML string) or *from_file* (a file path or
file-like object) must be provided as input.
The configuration options are the same as for the ``C14NWriterTarget``.
"""
if xml_data is None and from_file is None:
raise ValueError("Either 'xml_data' or 'from_file' must be provided as input")
sio = None
if out is None:
sio = out = io.StringIO()
parser = XMLParser(target=C14NWriterTarget(out.write, **options))
if xml_data is not None:
parser.feed(xml_data)
parser.close()
elif from_file is not None:
parse(from_file, parser=parser)
return sio.getvalue() if sio is not None else None
_looks_like_prefix_name = re.compile(r'^\w+:\w+$', re.UNICODE).match
class C14NWriterTarget:
"""
Canonicalization writer target for the XMLParser.
Serialises parse events to XML C14N 2.0.
The *write* function is used for writing out the resulting data stream
as text (not bytes). To write to a file, open it in text mode with encoding
"utf-8" and pass its ``.write`` method.
Configuration options:
- *with_comments*: set to true to include comments
- *strip_text*: set to true to strip whitespace before and after text content
- *rewrite_prefixes*: set to true to replace namespace prefixes by "n{number}"
- *qname_aware_tags*: a set of qname aware tag names in which prefixes
should be replaced in text content
- *qname_aware_attrs*: a set of qname aware attribute names in which prefixes
should be replaced in text content
- *exclude_attrs*: a set of attribute names that should not be serialised
- *exclude_tags*: a set of tag names that should not be serialised
"""
def __init__(self, write, *,
with_comments=False, strip_text=False, rewrite_prefixes=False,
qname_aware_tags=None, qname_aware_attrs=None,
exclude_attrs=None, exclude_tags=None):
self._write = write
self._data = []
self._with_comments = with_comments
self._strip_text = strip_text
self._exclude_attrs = set(exclude_attrs) if exclude_attrs else None
self._exclude_tags = set(exclude_tags) if exclude_tags else None
self._rewrite_prefixes = rewrite_prefixes
if qname_aware_tags:
self._qname_aware_tags = set(qname_aware_tags)
else:
self._qname_aware_tags = None
if qname_aware_attrs:
self._find_qname_aware_attrs = set(qname_aware_attrs).intersection
else:
self._find_qname_aware_attrs = None
# Stack with globally and newly declared namespaces as (uri, prefix) pairs.
self._declared_ns_stack = [[
("http://www.w3.org/XML/1998/namespace", "xml"),
]]
# Stack with user declared namespace prefixes as (uri, prefix) pairs.
self._ns_stack = []
if not rewrite_prefixes:
self._ns_stack.append(list(_namespace_map.items()))
self._ns_stack.append([])
self._prefix_map = {}
self._preserve_space = [False]
self._pending_start = None
self._root_seen = False
self._root_done = False
self._ignored_depth = 0
def _iter_namespaces(self, ns_stack, _reversed=reversed):
for namespaces in _reversed(ns_stack):
if namespaces: # almost no element declares new namespaces
yield from namespaces
def _resolve_prefix_name(self, prefixed_name):
prefix, name = prefixed_name.split(':', 1)
for uri, p in self._iter_namespaces(self._ns_stack):
if p == prefix:
return f'{{{uri}}}{name}'
raise ValueError(f'Prefix {prefix} of QName "{prefixed_name}" is not declared in scope')
def _qname(self, qname, uri=None):
if uri is None:
uri, tag = qname[1:].rsplit('}', 1) if qname[:1] == '{' else ('', qname)
else:
tag = qname
prefixes_seen = set()
for u, prefix in self._iter_namespaces(self._declared_ns_stack):
if u == uri and prefix not in prefixes_seen:
return f'{prefix}:{tag}' if prefix else tag, tag, uri
prefixes_seen.add(prefix)
# Not declared yet => add new declaration.
if self._rewrite_prefixes:
if uri in self._prefix_map:
prefix = self._prefix_map[uri]
else:
prefix = self._prefix_map[uri] = f'n{len(self._prefix_map)}'
self._declared_ns_stack[-1].append((uri, prefix))
return f'{prefix}:{tag}', tag, uri
if not uri and '' not in prefixes_seen:
# No default namespace declared => no prefix needed.
return tag, tag, uri
for u, prefix in self._iter_namespaces(self._ns_stack):
if u == uri:
self._declared_ns_stack[-1].append((uri, prefix))
return f'{prefix}:{tag}' if prefix else tag, tag, uri
raise ValueError(f'Namespace "{uri}" is not declared in scope')
def data(self, data):
if not self._ignored_depth:
self._data.append(data)
def _flush(self, _join_text=''.join):
data = _join_text(self._data)
del self._data[:]
if self._strip_text and not self._preserve_space[-1]:
data = data.strip()
if self._pending_start is not None:
args, self._pending_start = self._pending_start, None
qname_text = data if data and _looks_like_prefix_name(data) else None
self._start(*args, qname_text)
if qname_text is not None:
return
if data and self._root_seen:
self._write(_escape_cdata_c14n(data))
def start_ns(self, prefix, uri):
if self._ignored_depth:
return
# we may have to resolve qnames in text content
if self._data:
self._flush()
self._ns_stack[-1].append((uri, prefix))
def start(self, tag, attrs):
if self._exclude_tags is not None and (
self._ignored_depth or tag in self._exclude_tags):
self._ignored_depth += 1
return
if self._data:
self._flush()
new_namespaces = []
self._declared_ns_stack.append(new_namespaces)
if self._qname_aware_tags is not None and tag in self._qname_aware_tags:
# Need to parse text first to see if it requires a prefix declaration.
self._pending_start = (tag, attrs, new_namespaces)
return
self._start(tag, attrs, new_namespaces)
def _start(self, tag, attrs, new_namespaces, qname_text=None):
if self._exclude_attrs is not None and attrs:
attrs = {k: v for k, v in attrs.items() if k not in self._exclude_attrs}
qnames = {tag, *attrs}
resolved_names = {}
# Resolve prefixes in attribute and tag text.
if qname_text is not None:
qname = resolved_names[qname_text] = self._resolve_prefix_name(qname_text)
qnames.add(qname)
if self._find_qname_aware_attrs is not None and attrs:
qattrs = self._find_qname_aware_attrs(attrs)
if qattrs:
for attr_name in qattrs:
value = attrs[attr_name]
if _looks_like_prefix_name(value):
qname = resolved_names[value] = self._resolve_prefix_name(value)
qnames.add(qname)
else:
qattrs = None
else:
qattrs = None
# Assign prefixes in lexicographical order of used URIs.
parse_qname = self._qname
parsed_qnames = {n: parse_qname(n) for n in sorted(
qnames, key=lambda n: n.split('}', 1))}
# Write namespace declarations in prefix order ...
if new_namespaces:
attr_list = [
('xmlns:' + prefix if prefix else 'xmlns', uri)
for uri, prefix in new_namespaces
]
attr_list.sort()
else:
# almost always empty
attr_list = []
# ... followed by attributes in URI+name order
if attrs:
for k, v in sorted(attrs.items()):
if qattrs is not None and k in qattrs and v in resolved_names:
v = parsed_qnames[resolved_names[v]][0]
attr_qname, attr_name, uri = parsed_qnames[k]
# No prefix for attributes in default ('') namespace.
attr_list.append((attr_qname if uri else attr_name, v))
# Honour xml:space attributes.
space_behaviour = attrs.get('{http://www.w3.org/XML/1998/namespace}space')
self._preserve_space.append(
space_behaviour == 'preserve' if space_behaviour
else self._preserve_space[-1])
# Write the tag.
write = self._write
write('<' + parsed_qnames[tag][0])
if attr_list:
write(''.join([f' {k}="{_escape_attrib_c14n(v)}"' for k, v in attr_list]))
write('>')
# Write the resolved qname text content.
if qname_text is not None:
write(_escape_cdata_c14n(parsed_qnames[resolved_names[qname_text]][0]))
self._root_seen = True
self._ns_stack.append([])
def end(self, tag):
if self._ignored_depth:
self._ignored_depth -= 1
return
if self._data:
self._flush()
self._write(f'</{self._qname(tag)[0]}>')
self._preserve_space.pop()
self._root_done = len(self._preserve_space) == 1
self._declared_ns_stack.pop()
self._ns_stack.pop()
def comment(self, text):
if not self._with_comments:
return
if self._ignored_depth:
return
if self._root_done:
self._write('\n')
elif self._root_seen and self._data:
self._flush()
self._write(f'<!--{_escape_cdata_c14n(text)}-->')
if not self._root_seen:
self._write('\n')
def pi(self, target, data):
if self._ignored_depth:
return
if self._root_done:
self._write('\n')
elif self._root_seen and self._data:
self._flush()
self._write(
f'<?{target} {_escape_cdata_c14n(data)}?>' if data else f'<?{target}?>')
if not self._root_seen:
self._write('\n')
def _escape_cdata_c14n(text):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if '&' in text:
text = text.replace('&', '&')
if '<' in text:
text = text.replace('<', '<')
if '>' in text:
text = text.replace('>', '>')
if '\r' in text:
text = text.replace('\r', '
')
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_c14n(text):
# escape attribute value
try:
if '&' in text:
text = text.replace('&', '&')
if '<' in text:
text = text.replace('<', '<')
if '"' in text:
text = text.replace('"', '"')
if '\t' in text:
text = text.replace('\t', '	')
if '\n' in text:
text = text.replace('\n', '
')
if '\r' in text:
text = text.replace('\r', '
')
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
# Import the C accelerators
try:
# Element is going to be shadowed by the C implementation. We need to keep
# the Python version of it accessible for some "creative" by external code
# (see tests)
_Element_Py = Element
# Element, SubElement, ParseError, TreeBuilder, XMLParser, _set_factories
from _elementtree import *
from _elementtree import _set_factories
except ImportError:
pass
else:
_set_factories(Comment, ProcessingInstruction)
| 35.526468 | 96 | 0.57996 |
bdfe72787217fbeed7e0767f901365fb98bd66f1 | 73 | py | Python | start.py | ronligt/workshop_myrr | f898014301c3bc00179b71b6326803cc32847c6b | [
"MIT"
] | null | null | null | start.py | ronligt/workshop_myrr | f898014301c3bc00179b71b6326803cc32847c6b | [
"MIT"
] | null | null | null | start.py | ronligt/workshop_myrr | f898014301c3bc00179b71b6326803cc32847c6b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from workshop import workshop
workshop.example()
| 12.166667 | 29 | 0.767123 |
215405000f5844fd8744253b46c0ec248a43a6d0 | 31,882 | py | Python | pyqode/core/panels/folding.py | haesleinhuepf/pyqode.core | 88b9bab081fd580d4de86f3d926a9f0b19146d28 | [
"MIT"
] | null | null | null | pyqode/core/panels/folding.py | haesleinhuepf/pyqode.core | 88b9bab081fd580d4de86f3d926a9f0b19146d28 | [
"MIT"
] | null | null | null | pyqode/core/panels/folding.py | haesleinhuepf/pyqode.core | 88b9bab081fd580d4de86f3d926a9f0b19146d28 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This module contains the marker panel
"""
import logging
import os
import sys
from pyqode.core.api import TextBlockHelper, folding, TextDecoration, \
DelayJobRunner
from pyqode.core.api.folding import FoldScope
from pyqode.core.api.panel import Panel
from qtpy import QtCore, QtWidgets, QtGui, PYQT5_API
from pyqode.core.api.utils import TextHelper, drift_color, keep_tc_pos
def _logger():
""" Gets module's logger """
return logging.getLogger(__name__)
class FoldingPanel(Panel):
""" Displays the document outline and lets the user collapse/expand blocks.
The data represented by the panel come from the text block user state and
is set by the SyntaxHighlighter mode.
The panel does not expose any function that you can use directly. To
interact with the fold tree, you need to modify text block fold level or
trigger state using :class:`pyqode.core.api.utils.TextBlockHelper` or
:mod:`pyqode.core.api.folding`
"""
#: signal emitted when a fold trigger state has changed, parameters are
#: the concerned text block and the new state (collapsed or not).
trigger_state_changed = QtCore.Signal(QtGui.QTextBlock, bool)
collapse_all_triggered = QtCore.Signal()
expand_all_triggered = QtCore.Signal()
@property
def native_look(self):
"""
Defines whether the panel will use native indicator icons and color or
use custom one.
If you want to use custom indicator icons and color, you must first
set this flag to False.
"""
return self._native
@native_look.setter
def native_look(self, value):
self._native = value
# propagate changes to every clone
if self.editor:
for clone in self.editor.clones:
try:
clone.modes.get(self.__class__).native_look = value
except KeyError:
# this should never happen since we're working with clones
pass
@property
def custom_indicators_icons(self):
"""
Gets/sets the custom icon for the fold indicators.
The list of indicators is interpreted as follow::
(COLLAPSED_OFF, COLLAPSED_ON, EXPANDED_OFF, EXPANDED_ON)
To use this property you must first set `native_look` to False.
:returns: tuple(str, str, str, str)
"""
return self._custom_indicators
@custom_indicators_icons.setter
def custom_indicators_icons(self, value):
if len(value) != 4:
raise ValueError('The list of custom indicators must contains 4 '
'strings')
self._custom_indicators = value
if self.editor:
# propagate changes to every clone
for clone in self.editor.clones:
try:
clone.modes.get(
self.__class__).custom_indicators_icons = value
except KeyError:
# this should never happen since we're working with clones
pass
@property
def custom_fold_region_background(self):
"""
Custom base color for the fold region background
:return: QColor
"""
return self._custom_color
@custom_fold_region_background.setter
def custom_fold_region_background(self, value):
self._custom_color = value
# propagate changes to every clone
if self.editor:
for clone in self.editor.clones:
try:
clone.modes.get(
self.__class__).custom_fold_region_background = value
except KeyError:
# this should never happen since we're working with clones
pass
@property
def highlight_caret_scope(self):
"""
True to highlight the caret scope automatically.
(Similar to the ``Highlight blocks in Qt Creator``.
Default is False.
"""
return self._highlight_caret
@highlight_caret_scope.setter
def highlight_caret_scope(self, value):
if value != self._highlight_caret:
self._highlight_caret = value
if self.editor:
if value:
self._block_nbr = -1
self.editor.cursorPositionChanged.connect(
self._highlight_caret_scope)
else:
self._block_nbr = -1
self.editor.cursorPositionChanged.disconnect(
self._highlight_caret_scope)
for clone in self.editor.clones:
try:
clone.modes.get(
self.__class__).highlight_caret_scope = value
except KeyError:
# this should never happen since we're working with
# clones
pass
def __init__(self, highlight_caret_scope=False):
Panel.__init__(self)
self._native = True
self._custom_indicators = (
':/pyqode-icons/rc/arrow_right_off.png',
':/pyqode-icons/rc/arrow_right_on.png',
':/pyqode-icons/rc/arrow_down_off.png',
':/pyqode-icons/rc/arrow_down_on.png'
)
self._custom_color = QtGui.QColor('gray')
self._block_nbr = -1
self._highlight_caret = False
self.highlight_caret_scope = highlight_caret_scope
self._indic_size = 16
#: the list of deco used to highlight the current fold region (
#: surrounding regions are darker)
self._scope_decos = []
#: the list of folded blocs decorations
self._block_decos = []
self.setMouseTracking(True)
self.scrollable = True
self._mouse_over_line = None
self._current_scope = None
self._prev_cursor = None
self.context_menu = None
self.action_collapse = None
self.action_expand = None
self.action_collapse_all = None
self.action_expand_all = None
self._original_background = None
self._highlight_runner = DelayJobRunner(delay=250)
def on_install(self, editor):
"""
Add the folding menu to the editor, on install.
:param editor: editor instance on which the mode has been installed to.
"""
super(FoldingPanel, self).on_install(editor)
self.context_menu = QtWidgets.QMenu(_('Folding'), self.editor)
action = self.action_collapse = QtWidgets.QAction(
_('Collapse'), self.context_menu)
action.setShortcut('Shift+-')
action.triggered.connect(self._on_action_toggle)
self.context_menu.addAction(action)
action = self.action_expand = QtWidgets.QAction(_('Expand'),
self.context_menu)
action.setShortcut('Shift++')
action.triggered.connect(self._on_action_toggle)
self.context_menu.addAction(action)
self.context_menu.addSeparator()
action = self.action_collapse_all = QtWidgets.QAction(
_('Collapse all'), self.context_menu)
action.setShortcut('Ctrl+Shift+-')
action.triggered.connect(self._on_action_collapse_all_triggered)
self.context_menu.addAction(action)
action = self.action_expand_all = QtWidgets.QAction(
_('Expand all'), self.context_menu)
action.setShortcut('Ctrl+Shift++')
action.triggered.connect(self._on_action_expand_all_triggered)
self.context_menu.addAction(action)
self.editor.add_menu(self.context_menu)
def sizeHint(self):
""" Returns the widget size hint (based on the editor font size) """
fm = QtGui.QFontMetricsF(self.editor.font())
size_hint = QtCore.QSize(int(fm.height()), int(fm.height()))
if size_hint.width() > 16:
size_hint.setWidth(16)
return size_hint
def paintEvent(self, event):
# Paints the fold indicators and the possible fold region background
# on the folding panel.
super(FoldingPanel, self).paintEvent(event)
painter = QtGui.QPainter(self)
# Draw background over the selected non collapsed fold region
if self._mouse_over_line is not None:
block = self.editor.document().findBlockByNumber(
self._mouse_over_line)
try:
self._draw_fold_region_background(block, painter)
except ValueError:
pass
# Draw fold triggers
for top_position, line_number, block in self.editor.visible_blocks:
if TextBlockHelper.is_fold_trigger(block):
collapsed = TextBlockHelper.is_collapsed(block)
mouse_over = self._mouse_over_line == line_number
self._draw_fold_indicator(
top_position, mouse_over, collapsed, painter)
if collapsed:
# check if the block already has a decoration, it might
# have been folded by the parent editor/document in the
# case of cloned editor
for deco in self._block_decos:
if deco.block == block:
# no need to add a deco, just go to the next block
break
else:
self._add_fold_decoration(block, FoldScope(block))
else:
for deco in self._block_decos:
# check if the block decoration has been removed, it
# might have been unfolded by the parent
# editor/document in the case of cloned editor
if deco.block == block:
# remove it and
self._block_decos.remove(deco)
self.editor.decorations.remove(deco)
del deco
break
def _draw_fold_region_background(self, block, painter):
"""
Draw the fold region when the mouse is over and non collapsed
indicator.
:param top: Top position
:param block: Current block.
:param painter: QPainter
"""
r = folding.FoldScope(block)
th = TextHelper(self.editor)
start, end = r.get_range(ignore_blank_lines=True)
if start > 0:
top = th.line_pos_from_number(start)
else:
top = 0
bottom = th.line_pos_from_number(end + 1)
h = bottom - top
if h == 0:
h = self.sizeHint().height()
w = self.sizeHint().width()
self._draw_rect(QtCore.QRectF(0, top, w, h), painter)
def _draw_rect(self, rect, painter):
"""
Draw the background rectangle using the current style primitive color
or foldIndicatorBackground if nativeFoldingIndicator is true.
:param rect: The fold zone rect to draw
:param painter: The widget's painter.
"""
c = self._custom_color
if self._native:
c = self.get_system_bck_color()
grad = QtGui.QLinearGradient(rect.topLeft(),
rect.topRight())
if sys.platform == 'darwin':
grad.setColorAt(0, c.lighter(100))
grad.setColorAt(1, c.lighter(110))
outline = c.darker(110)
else:
grad.setColorAt(0, c.lighter(110))
grad.setColorAt(1, c.lighter(130))
outline = c.darker(100)
painter.fillRect(rect, grad)
painter.setPen(QtGui.QPen(outline))
painter.drawLine(rect.topLeft() +
QtCore.QPointF(1, 0),
rect.topRight() -
QtCore.QPointF(1, 0))
painter.drawLine(rect.bottomLeft() +
QtCore.QPointF(1, 0),
rect.bottomRight() -
QtCore.QPointF(1, 0))
painter.drawLine(rect.topRight() +
QtCore.QPointF(0, 1),
rect.bottomRight() -
QtCore.QPointF(0, 1))
painter.drawLine(rect.topLeft() +
QtCore.QPointF(0, 1),
rect.bottomLeft() -
QtCore.QPointF(0, 1))
@staticmethod
def get_system_bck_color():
"""
Gets a system color for drawing the fold scope background.
"""
def merged_colors(colorA, colorB, factor):
maxFactor = 100
colorA = QtGui.QColor(colorA)
colorB = QtGui.QColor(colorB)
tmp = colorA
tmp.setRed((tmp.red() * factor) / maxFactor +
(colorB.red() * (maxFactor - factor)) / maxFactor)
tmp.setGreen((tmp.green() * factor) / maxFactor +
(colorB.green() * (maxFactor - factor)) / maxFactor)
tmp.setBlue((tmp.blue() * factor) / maxFactor +
(colorB.blue() * (maxFactor - factor)) / maxFactor)
return tmp
pal = QtWidgets.QApplication.instance().palette()
b = pal.window().color()
h = pal.highlight().color()
return merged_colors(b, h, 50)
def _draw_fold_indicator(self, top, mouse_over, collapsed, painter):
"""
Draw the fold indicator/trigger (arrow).
:param top: Top position
:param mouse_over: Whether the mouse is over the indicator
:param collapsed: Whether the trigger is collapsed or not.
:param painter: QPainter
"""
rect = QtCore.QRect(0, top, self.sizeHint().width(),
self.sizeHint().height())
if self._native:
opt = QtWidgets.QStyleOptionViewItem()
opt.rect = rect
opt.state = (QtWidgets.QStyle.State_Active |
QtWidgets.QStyle.State_Item |
QtWidgets.QStyle.State_Children)
if not collapsed:
opt.state |= QtWidgets.QStyle.State_Open
if mouse_over:
opt.state |= (QtWidgets.QStyle.State_MouseOver |
QtWidgets.QStyle.State_Enabled |
QtWidgets.QStyle.State_Selected)
opt.palette.setBrush(QtGui.QPalette.Window,
self.palette().highlight())
opt.rect.translate(-2, 0)
self.style().drawPrimitive(QtWidgets.QStyle.PE_IndicatorBranch,
opt, painter, self)
else:
index = 0
if not collapsed:
index = 2
if mouse_over:
index += 1
QtGui.QIcon(self._custom_indicators[index]).paint(painter, rect)
@staticmethod
def find_parent_scope(block):
"""
Find parent scope, if the block is not a fold trigger.
"""
original = block
if not TextBlockHelper.is_fold_trigger(block):
# search level of next non blank line
while block.text().strip() == '' and block.isValid():
block = block.next()
ref_lvl = TextBlockHelper.get_fold_lvl(block) - 1
block = original
while (block.blockNumber() and
(not TextBlockHelper.is_fold_trigger(block) or
TextBlockHelper.get_fold_lvl(block) > ref_lvl)):
block = block.previous()
return block
def _clear_scope_decos(self):
"""
Clear scope decorations (on the editor)
"""
for deco in self._scope_decos:
self.editor.decorations.remove(deco)
self._scope_decos[:] = []
def _get_scope_highlight_color(self):
"""
Gets the base scope highlight color (derivated from the editor
background)
"""
color = self.editor.background
if color.lightness() < 128:
color = drift_color(color, 130)
else:
color = drift_color(color, 105)
return color
def _add_scope_deco(self, start, end, parent_start, parent_end, base_color,
factor):
"""
Adds a scope decoration that enclose the current scope
:param start: Start of the current scope
:param end: End of the current scope
:param parent_start: Start of the parent scope
:param parent_end: End of the parent scope
:param base_color: base color for scope decoration
:param factor: color factor to apply on the base color (to make it
darker).
"""
color = drift_color(base_color, factor=factor)
# upper part
if start > 0:
d = TextDecoration(self.editor.document(),
start_line=parent_start, end_line=start)
d.set_full_width(True, clear=False)
d.draw_order = 2
d.set_background(color)
self.editor.decorations.append(d)
self._scope_decos.append(d)
# lower part
if end <= self.editor.document().blockCount():
d = TextDecoration(self.editor.document(),
start_line=end, end_line=parent_end + 1)
d.set_full_width(True, clear=False)
d.draw_order = 2
d.set_background(color)
self.editor.decorations.append(d)
self._scope_decos.append(d)
def _add_scope_decorations(self, block, start, end):
"""
Show a scope decoration on the editor widget
:param start: Start line
:param end: End line
"""
try:
parent = FoldScope(block).parent()
except ValueError:
parent = None
if TextBlockHelper.is_fold_trigger(block):
base_color = self._get_scope_highlight_color()
factor_step = 5
if base_color.lightness() < 128:
factor_step = 10
factor = 70
else:
factor = 100
while parent:
# highlight parent scope
parent_start, parent_end = parent.get_range()
self._add_scope_deco(
start, end + 1, parent_start, parent_end,
base_color, factor)
# next parent scope
start = parent_start
end = parent_end
parent = parent.parent()
factor += factor_step
# global scope
parent_start = 0
parent_end = self.editor.document().blockCount()
self._add_scope_deco(
start, end + 1, parent_start, parent_end, base_color,
factor + factor_step)
else:
self._clear_scope_decos()
def _highlight_surrounding_scopes(self, block):
"""
Highlights the scopes surrounding the current fold scope.
:param block: Block that starts the current fold scope.
"""
scope = FoldScope(block)
if (self._current_scope is None or
self._current_scope.get_range() != scope.get_range()):
self._current_scope = scope
self._clear_scope_decos()
# highlight surrounding parent scopes with a darker color
start, end = scope.get_range()
if not TextBlockHelper.is_collapsed(block):
self._add_scope_decorations(block, start, end)
def mouseMoveEvent(self, event):
"""
Detect mouser over indicator and highlight the current scope in the
editor (up and down decoration arround the foldable text when the mouse
is over an indicator).
:param event: event
"""
super(FoldingPanel, self).mouseMoveEvent(event)
th = TextHelper(self.editor)
line = th.line_nbr_from_position(event.pos().y())
if line >= 0:
block = FoldScope.find_parent_scope(
self.editor.document().findBlockByNumber(line))
if TextBlockHelper.is_fold_trigger(block):
if self._mouse_over_line is None:
# mouse enter fold scope
QtWidgets.QApplication.setOverrideCursor(
QtGui.QCursor(QtCore.Qt.PointingHandCursor))
if self._mouse_over_line != block.blockNumber() and \
self._mouse_over_line is not None:
# fold scope changed, a previous block was highlighter so
# we quickly update our highlighting
self._mouse_over_line = block.blockNumber()
self._highlight_surrounding_scopes(block)
else:
# same fold scope, request highlight
self._mouse_over_line = block.blockNumber()
self._highlight_runner.request_job(
self._highlight_surrounding_scopes, block)
self._highight_block = block
else:
# no fold scope to highlight, cancel any pending requests
self._highlight_runner.cancel_requests()
self._mouse_over_line = None
QtWidgets.QApplication.restoreOverrideCursor()
self.repaint()
def leaveEvent(self, event):
"""
Removes scope decorations and background from the editor and the panel
if highlight_caret_scope, else simply update the scope decorations to
match the caret scope.
"""
super(FoldingPanel, self).leaveEvent(event)
QtWidgets.QApplication.restoreOverrideCursor()
self._highlight_runner.cancel_requests()
if not self.highlight_caret_scope:
self._clear_scope_decos()
self._mouse_over_line = None
self._current_scope = None
else:
self._block_nbr = -1
self._highlight_caret_scope()
self.editor.repaint()
def _add_fold_decoration(self, block, region):
"""
Add fold decorations (boxes arround a folded block in the editor
widget).
"""
deco = TextDecoration(block)
deco.signals.clicked.connect(self._on_fold_deco_clicked)
deco.tooltip = region.text(max_lines=25)
deco.draw_order = 1
deco.block = block
deco.select_line()
deco.set_outline(drift_color(
self._get_scope_highlight_color(), 110))
deco.set_background(self._get_scope_highlight_color())
deco.set_foreground(QtGui.QColor('#808080'))
self._block_decos.append(deco)
self.editor.decorations.append(deco)
def toggle_fold_trigger(self, block):
"""
Toggle a fold trigger block (expand or collapse it).
:param block: The QTextBlock to expand/collapse
"""
if not TextBlockHelper.is_fold_trigger(block):
return
region = FoldScope(block)
if region.collapsed:
region.unfold()
if self._mouse_over_line is not None:
self._add_scope_decorations(
region._trigger, *region.get_range())
else:
region.fold()
self._clear_scope_decos()
self._refresh_editor_and_scrollbars()
self.trigger_state_changed.emit(region._trigger, region.collapsed)
def mousePressEvent(self, event):
""" Folds/unfolds the pressed indicator if any. """
if self._mouse_over_line is not None:
block = self.editor.document().findBlockByNumber(
self._mouse_over_line)
self.toggle_fold_trigger(block)
def _on_fold_deco_clicked(self, deco):
"""
Unfold a folded block that has just been clicked by the user
"""
self.toggle_fold_trigger(deco.block)
def on_state_changed(self, state):
"""
On state changed we (dis)connect to the cursorPositionChanged signal
"""
if state:
self.editor.key_pressed.connect(self._on_key_pressed)
if self._highlight_caret:
self.editor.cursorPositionChanged.connect(
self._highlight_caret_scope)
self._block_nbr = -1
self.editor.new_text_set.connect(self._clear_block_deco)
else:
self.editor.key_pressed.disconnect(self._on_key_pressed)
if self._highlight_caret:
self.editor.cursorPositionChanged.disconnect(
self._highlight_caret_scope)
self._block_nbr = -1
self.editor.new_text_set.disconnect(self._clear_block_deco)
def _on_key_pressed(self, event):
"""
Override key press to select the current scope if the user wants
to deleted a folded scope (without selecting it).
"""
delete_request = event.key() in [QtCore.Qt.Key_Backspace,
QtCore.Qt.Key_Delete]
if event.text() or delete_request:
cursor = self.editor.textCursor()
if cursor.hasSelection():
# change selection to encompass the whole scope.
positions_to_check = cursor.selectionStart(), cursor.selectionEnd()
else:
positions_to_check = (cursor.position(), )
for pos in positions_to_check:
block = self.editor.document().findBlock(pos)
th = TextBlockHelper()
if th.is_fold_trigger(block) and th.is_collapsed(block):
self.toggle_fold_trigger(block)
if delete_request and cursor.hasSelection():
scope = FoldScope(self.find_parent_scope(block))
tc = TextHelper(self.editor).select_lines(*scope.get_range())
if tc.selectionStart() > cursor.selectionStart():
start = cursor.selectionStart()
else:
start = tc.selectionStart()
if tc.selectionEnd() < cursor.selectionEnd():
end = cursor.selectionEnd()
else:
end = tc.selectionEnd()
tc.setPosition(start)
tc.setPosition(end, tc.KeepAnchor)
self.editor.setTextCursor(tc)
@staticmethod
def _show_previous_blank_lines(block):
"""
Show the block previous blank lines
"""
# set previous blank lines visibles
pblock = block.previous()
while (pblock.text().strip() == '' and
pblock.blockNumber() >= 0):
pblock.setVisible(True)
pblock = pblock.previous()
def refresh_decorations(self, force=False):
"""
Refresh decorations colors. This function is called by the syntax
highlighter when the style changed so that we may update our
decorations colors according to the new style.
"""
cursor = self.editor.textCursor()
if (self._prev_cursor is None or force or
self._prev_cursor.blockNumber() != cursor.blockNumber()):
for deco in self._block_decos:
self.editor.decorations.remove(deco)
for deco in self._block_decos:
deco.set_outline(drift_color(
self._get_scope_highlight_color(), 110))
deco.set_background(self._get_scope_highlight_color())
self.editor.decorations.append(deco)
self._prev_cursor = cursor
def _refresh_editor_and_scrollbars(self):
"""
Refrehes editor content and scollbars.
We generate a fake resize event to refresh scroll bar.
We have the same problem as described here:
http://www.qtcentre.org/threads/44803 and we apply the same solution
(don't worry, there is no visual effect, the editor does not grow up
at all, even with a value = 500)
"""
TextHelper(self.editor).mark_whole_doc_dirty()
self.editor.repaint()
s = self.editor.size()
s.setWidth(s.width() + 1)
self.editor.resizeEvent(QtGui.QResizeEvent(self.editor.size(), s))
def collapse_all(self):
"""
Collapses all triggers and makes all blocks with fold level > 0
invisible.
"""
self._clear_block_deco()
block = self.editor.document().firstBlock()
last = self.editor.document().lastBlock()
while block.isValid():
lvl = TextBlockHelper.get_fold_lvl(block)
trigger = TextBlockHelper.is_fold_trigger(block)
if trigger:
if lvl == 0:
self._show_previous_blank_lines(block)
TextBlockHelper.set_collapsed(block, True)
block.setVisible(lvl == 0)
if block == last and block.text().strip() == '':
block.setVisible(True)
self._show_previous_blank_lines(block)
block = block.next()
self._refresh_editor_and_scrollbars()
tc = self.editor.textCursor()
tc.movePosition(tc.Start)
self.editor.setTextCursor(tc)
self.collapse_all_triggered.emit()
def _clear_block_deco(self):
"""
Clear the folded block decorations.
"""
for deco in self._block_decos:
self.editor.decorations.remove(deco)
self._block_decos[:] = []
def expand_all(self):
"""
Expands all fold triggers.
"""
block = self.editor.document().firstBlock()
while block.isValid():
TextBlockHelper.set_collapsed(block, False)
block.setVisible(True)
block = block.next()
self._clear_block_deco()
self._refresh_editor_and_scrollbars()
self.expand_all_triggered.emit()
def _on_action_toggle(self):
"""
Toggle the current fold trigger.
"""
block = FoldScope.find_parent_scope(self.editor.textCursor().block())
self.toggle_fold_trigger(block)
def _on_action_collapse_all_triggered(self):
"""
Closes all top levels fold triggers recursively
"""
self.collapse_all()
def _on_action_expand_all_triggered(self):
"""
Expands all fold triggers
:return:
"""
self.expand_all()
def _highlight_caret_scope(self):
"""
Highlight the scope surrounding the current caret position.
This get called only if :attr:`
pyqode.core.panels.FoldingPanel.highlight_care_scope` is True.
"""
cursor = self.editor.textCursor()
block_nbr = cursor.blockNumber()
if self._block_nbr != block_nbr:
block = FoldScope.find_parent_scope(
self.editor.textCursor().block())
try:
s = FoldScope(block)
except ValueError:
self._clear_scope_decos()
else:
self._mouse_over_line = block.blockNumber()
if TextBlockHelper.is_fold_trigger(block):
self._highlight_surrounding_scopes(block)
self._block_nbr = block_nbr
def clone_settings(self, original):
self.native_look = original.native_look
self.custom_indicators_icons = original.custom_indicators_icons
self.highlight_caret_scope = original.highlight_caret_scope
self.custom_fold_region_background = \
original.custom_fold_region_background
| 39.071078 | 85 | 0.577128 |
ea27b221c1d6ab1169212f62abde30df11413c7a | 88 | py | Python | vaskitsa/python/classifiers/__init__.py | hile/vaskitsa | acebf6cc970e9c342f6b8dc15f0c8d37b272aa5b | [
"PSF-2.0"
] | null | null | null | vaskitsa/python/classifiers/__init__.py | hile/vaskitsa | acebf6cc970e9c342f6b8dc15f0c8d37b272aa5b | [
"PSF-2.0"
] | null | null | null | vaskitsa/python/classifiers/__init__.py | hile/vaskitsa | acebf6cc970e9c342f6b8dc15f0c8d37b272aa5b | [
"PSF-2.0"
] | null | null | null | """
Python trove classifiers
"""
# flake8: noqa: F401
from .lookup import Classifiers
| 11 | 31 | 0.715909 |
3b5c19ef61dded7d2f804eae04e8bb6a25420b69 | 2,532 | py | Python | ceilometer/objectstore/notifications.py | gitter-badger/ceilometer | 21bde474f4836b4a933caf41baf4e20e5ff25135 | [
"Apache-2.0"
] | null | null | null | ceilometer/objectstore/notifications.py | gitter-badger/ceilometer | 21bde474f4836b4a933caf41baf4e20e5ff25135 | [
"Apache-2.0"
] | null | null | null | ceilometer/objectstore/notifications.py | gitter-badger/ceilometer | 21bde474f4836b4a933caf41baf4e20e5ff25135 | [
"Apache-2.0"
] | 3 | 2015-10-08T20:03:36.000Z | 2020-02-05T10:45:50.000Z | #
# Copyright 2015 Red Hat. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo.messaging
from oslo_config import cfg
from ceilometer.agent import plugin_base
from ceilometer import sample
OPTS = [
cfg.StrOpt('swift_control_exchange',
default='swift',
help="Exchange name for Swift notifications."),
]
cfg.CONF.register_opts(OPTS)
class _Base(plugin_base.NotificationBase):
"""Convert objectstore notification into Samples."""
@staticmethod
def get_targets(conf):
"""Return a sequence of oslo.messaging.Target
Sequence defining the exchange and topics to be connected for this
plugin.
"""
return [oslo.messaging.Target(topic=topic,
exchange=conf.swift_control_exchange)
for topic in conf.notification_topics]
class SwiftWsgiMiddleware(_Base):
def event_types(self):
return ['objectstore.http.request']
def process_notification(self, message):
if message['payload']['measurements']:
for meter in message['payload']['measurements']:
yield sample.Sample.from_notification(
name=meter['metric']['name'],
type=sample.TYPE_DELTA,
unit=meter['metric']['unit'],
volume=meter['result'],
resource_id=message['payload']['target']['id'],
user_id=message['payload']['initiator']['id'],
project_id=message['payload']['initiator']['project_id'],
message=message)
yield sample.Sample.from_notification(
name='storage.api.request',
type=sample.TYPE_DELTA,
unit='request',
volume=1,
resource_id=message['payload']['target']['id'],
user_id=message['payload']['initiator']['id'],
project_id=message['payload']['initiator']['project_id'],
message=message)
| 34.684932 | 77 | 0.628752 |
0bdfb86d43765d32c855bad56f72694632dbbce1 | 327 | py | Python | main.py | HiggsHydra/permian-frac-exchange | 1dd1e409e5389373590c7d3780a54cd9f12d1166 | [
"MIT"
] | null | null | null | main.py | HiggsHydra/permian-frac-exchange | 1dd1e409e5389373590c7d3780a54cd9f12d1166 | [
"MIT"
] | null | null | null | main.py | HiggsHydra/permian-frac-exchange | 1dd1e409e5389373590c7d3780a54cd9f12d1166 | [
"MIT"
] | null | null | null | """ Entry point"""
import logging
import app as app
import loggers
logger = logging.getLogger(__name__)
def main():
loggers.standard_config()
try:
app.run()
except Exception as e:
logger.exception("Process failed.")
if __name__ == "__main__":
main()
# TODO: Check ./data folder exists
| 13.08 | 43 | 0.64526 |
c515990dfecce7c502c22d4b99eecc65a8dca123 | 24 | py | Python | hello_world.py | angrymarcos/profiles-rest-api | 54c04f03871c94a12832ec5d5850a162d74f3adf | [
"MIT"
] | null | null | null | hello_world.py | angrymarcos/profiles-rest-api | 54c04f03871c94a12832ec5d5850a162d74f3adf | [
"MIT"
] | 5 | 2021-03-19T11:25:39.000Z | 2022-02-10T13:15:47.000Z | hello_world.py | angrymarcos/profiles-rest-api | 54c04f03871c94a12832ec5d5850a162d74f3adf | [
"MIT"
] | null | null | null | print("hello World!!!")
| 12 | 23 | 0.625 |
9254e8ef8243e1eac39a8b2f722b7af6217535b9 | 12,757 | py | Python | zunclient/common/apiclient/exceptions.py | wkite/python-zunclient | f94c2838d33bfe2f5d440ea642d6ce1bd0fd47de | [
"Apache-2.0"
] | null | null | null | zunclient/common/apiclient/exceptions.py | wkite/python-zunclient | f94c2838d33bfe2f5d440ea642d6ce1bd0fd47de | [
"Apache-2.0"
] | null | null | null | zunclient/common/apiclient/exceptions.py | wkite/python-zunclient | f94c2838d33bfe2f5d440ea642d6ce1bd0fd47de | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 Nebula, Inc.
# Copyright 2013 Alessio Ababilov
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception definitions.
"""
import inspect
import sys
import six
from zunclient.i18n import _
class VersionNotFoundForAPIMethod(Exception):
msg_fmt = "API version '%(vers)s' is not supported on '%(method)s' method."
def __init__(self, version, method):
self.version = version
self.method = method
def __str__(self):
return self.msg_fmt % {"vers": self.version, "method": self.method}
class ClientException(Exception):
"""The base exception class for all exceptions this library raises."""
pass
class UnsupportedVersion(ClientException):
"""User is trying to use an unsupported version of the API."""
pass
class CommandError(ClientException):
"""Error in CLI tool."""
pass
class AuthorizationFailure(ClientException):
"""Cannot authorize API client."""
pass
class ConnectionError(ClientException):
"""Cannot connect to API service."""
pass
class ConnectionRefused(ConnectionError):
"""Connection refused while trying to connect to API service."""
pass
class AuthPluginOptionsMissing(AuthorizationFailure):
"""Auth plugin misses some options."""
def __init__(self, opt_names):
super(AuthPluginOptionsMissing, self).__init__(
_("Authentication failed. Missing options: %s") %
", ".join(opt_names))
self.opt_names = opt_names
class AuthSystemNotFound(AuthorizationFailure):
"""User has specified an AuthSystem that is not installed."""
def __init__(self, auth_system):
super(AuthSystemNotFound, self).__init__(
_("AuthSystemNotFound: %r") % auth_system)
self.auth_system = auth_system
class NoUniqueMatch(ClientException):
"""Multiple entities found instead of one."""
pass
class EndpointException(ClientException):
"""Something is rotten in Service Catalog."""
pass
class EndpointNotFound(EndpointException):
"""Could not find requested endpoint in Service Catalog."""
pass
class AmbiguousEndpoints(EndpointException):
"""Found more than one matching endpoint in Service Catalog."""
def __init__(self, endpoints=None):
super(AmbiguousEndpoints, self).__init__(
_("AmbiguousEndpoints: %r") % endpoints)
self.endpoints = endpoints
class HttpError(ClientException):
"""The base exception class for all HTTP exceptions."""
http_status = 0
message = _("HTTP Error")
def __init__(self, message=None, details=None,
response=None, request_id=None,
url=None, method=None, http_status=None):
self.http_status = http_status or self.http_status
self.message = message or self.message
self.details = details
self.request_id = request_id
self.response = response
self.url = url
self.method = method
formatted_string = "%s (HTTP %s)" % (self.message, self.http_status)
if request_id:
formatted_string += " (Request-ID: %s)" % request_id
super(HttpError, self).__init__(formatted_string)
class HTTPRedirection(HttpError):
"""HTTP Redirection."""
message = _("HTTP Redirection")
class HTTPClientError(HttpError):
"""Client-side HTTP error.
Exception for cases in which the client seems to have erred.
"""
message = _("HTTP Client Error")
class HttpServerError(HttpError):
"""Server-side HTTP error.
Exception for cases in which the server is aware that it has
erred or is incapable of performing the request.
"""
message = _("HTTP Server Error")
class MultipleChoices(HTTPRedirection):
"""HTTP 300 - Multiple Choices.
Indicates multiple options for the resource that the client may follow.
"""
http_status = 300
message = _("Multiple Choices")
class BadRequest(HTTPClientError):
"""HTTP 400 - Bad Request.
The request cannot be fulfilled due to bad syntax.
"""
http_status = 400
message = _("Bad Request")
class Unauthorized(HTTPClientError):
"""HTTP 401 - Unauthorized.
Similar to 403 Forbidden, but specifically for use when authentication
is required and has failed or has not yet been provided.
"""
http_status = 401
message = _("Unauthorized")
class PaymentRequired(HTTPClientError):
"""HTTP 402 - Payment Required.
Reserved for future use.
"""
http_status = 402
message = _("Payment Required")
class Forbidden(HTTPClientError):
"""HTTP 403 - Forbidden.
The request was a valid request, but the server is refusing to respond
to it.
"""
http_status = 403
message = _("Forbidden")
class NotFound(HTTPClientError):
"""HTTP 404 - Not Found.
The requested resource could not be found but may be available again
in the future.
"""
http_status = 404
message = _("Not Found")
class MethodNotAllowed(HTTPClientError):
"""HTTP 405 - Method Not Allowed.
A request was made of a resource using a request method not supported
by that resource.
"""
http_status = 405
message = _("Method Not Allowed")
class NotAcceptable(HTTPClientError):
"""HTTP 406 - Not Acceptable.
The requested resource is only capable of generating content not
acceptable according to the Accept headers sent in the request.
"""
http_status = 406
message = _("Not Acceptable")
class ProxyAuthenticationRequired(HTTPClientError):
"""HTTP 407 - Proxy Authentication Required.
The client must first authenticate itself with the proxy.
"""
http_status = 407
message = _("Proxy Authentication Required")
class RequestTimeout(HTTPClientError):
"""HTTP 408 - Request Timeout.
The server timed out waiting for the request.
"""
http_status = 408
message = _("Request Timeout")
class Conflict(HTTPClientError):
"""HTTP 409 - Conflict.
Indicates that the request could not be processed because of conflict
in the request, such as an edit conflict.
"""
http_status = 409
message = _("Conflict")
class Gone(HTTPClientError):
"""HTTP 410 - Gone.
Indicates that the resource requested is no longer available and will
not be available again.
"""
http_status = 410
message = _("Gone")
class LengthRequired(HTTPClientError):
"""HTTP 411 - Length Required.
The request did not specify the length of its content, which is
required by the requested resource.
"""
http_status = 411
message = _("Length Required")
class PreconditionFailed(HTTPClientError):
"""HTTP 412 - Precondition Failed.
The server does not meet one of the preconditions that the requester
put on the request.
"""
http_status = 412
message = _("Precondition Failed")
class RequestEntityTooLarge(HTTPClientError):
"""HTTP 413 - Request Entity Too Large.
The request is larger than the server is willing or able to process.
"""
http_status = 413
message = _("Request Entity Too Large")
def __init__(self, *args, **kwargs):
try:
self.retry_after = int(kwargs.pop('retry_after'))
except (KeyError, ValueError):
self.retry_after = 0
super(RequestEntityTooLarge, self).__init__(*args, **kwargs)
class RequestUriTooLong(HTTPClientError):
"""HTTP 414 - Request-URI Too Long.
The URI provided was too long for the server to process.
"""
http_status = 414
message = _("Request-URI Too Long")
class UnsupportedMediaType(HTTPClientError):
"""HTTP 415 - Unsupported Media Type.
The request entity has a media type which the server or resource does
not support.
"""
http_status = 415
message = _("Unsupported Media Type")
class RequestedRangeNotSatisfiable(HTTPClientError):
"""HTTP 416 - Requested Range Not Satisfiable.
The client has asked for a portion of the file, but the server cannot
supply that portion.
"""
http_status = 416
message = _("Requested Range Not Satisfiable")
class ExpectationFailed(HTTPClientError):
"""HTTP 417 - Expectation Failed.
The server cannot meet the requirements of the Expect request-header field.
"""
http_status = 417
message = _("Expectation Failed")
class UnprocessableEntity(HTTPClientError):
"""HTTP 422 - Unprocessable Entity.
The request was well-formed but was unable to be followed due to semantic
errors.
"""
http_status = 422
message = _("Unprocessable Entity")
class InternalServerError(HttpServerError):
"""HTTP 500 - Internal Server Error.
A generic error message, given when no more specific message is suitable.
"""
http_status = 500
message = _("Internal Server Error")
# NotImplemented is a python keyword.
class HttpNotImplemented(HttpServerError):
"""HTTP 501 - Not Implemented.
The server either does not recognize the request method, or it lacks
the ability to fulfill the request.
"""
http_status = 501
message = _("Not Implemented")
class BadGateway(HttpServerError):
"""HTTP 502 - Bad Gateway.
The server was acting as a gateway or proxy and received an invalid
response from the upstream server.
"""
http_status = 502
message = _("Bad Gateway")
class ServiceUnavailable(HttpServerError):
"""HTTP 503 - Service Unavailable.
The server is currently unavailable.
"""
http_status = 503
message = _("Service Unavailable")
class GatewayTimeout(HttpServerError):
"""HTTP 504 - Gateway Timeout.
The server was acting as a gateway or proxy and did not receive a timely
response from the upstream server.
"""
http_status = 504
message = _("Gateway Timeout")
class HttpVersionNotSupported(HttpServerError):
"""HTTP 505 - HttpVersion Not Supported.
The server does not support the HTTP protocol version used in the request.
"""
http_status = 505
message = _("HTTP Version Not Supported")
# _code_map contains all the classes that have http_status attribute.
_code_map = dict(
(getattr(obj, 'http_status', None), obj)
for name, obj in vars(sys.modules[__name__]).items()
if inspect.isclass(obj) and getattr(obj, 'http_status', False)
)
def from_response(response, method, url):
"""Returns an instance of :class:`HttpError` or subclass based on response.
:param response: instance of `requests.Response` class
:param method: HTTP method used for request
:param url: URL used for request
"""
req_id = response.headers.get("x-openstack-request-id")
# NOTE(hdd) true for older versions of nova and cinder
if not req_id:
req_id = response.headers.get("x-compute-request-id")
kwargs = {
"http_status": response.status_code,
"response": response,
"method": method,
"url": url,
"request_id": req_id,
}
if "retry-after" in response.headers:
kwargs["retry_after"] = response.headers["retry-after"]
content_type = response.headers.get("Content-Type", "")
if content_type.startswith("application/json"):
try:
body = response.json()
except ValueError:
pass
else:
if isinstance(body, dict):
error = body.get(list(body)[0])
if isinstance(error, dict):
kwargs["message"] = (error.get("message") or
error.get("faultstring"))
kwargs["details"] = (error.get("details") or
six.text_type(body))
elif content_type.startswith("text/"):
kwargs["details"] = getattr(response, 'text', '')
try:
cls = _code_map[response.status_code]
except KeyError:
if 500 <= response.status_code < 600:
cls = HttpServerError
elif 400 <= response.status_code < 500:
cls = HTTPClientError
else:
cls = HttpError
return cls(**kwargs)
| 27.084926 | 79 | 0.670142 |
855194cc6b89604a3abfe46429e345503f2a27f1 | 96 | py | Python | venv/lib/python3.8/site-packages/numpy/core/numerictypes.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/numpy/core/numerictypes.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/numpy/core/numerictypes.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/8e/b1/37/7b57173356f7e78e62222f530083e033283e8bc4a15917acf6fea1faa4 | 96 | 96 | 0.895833 |
2f73d0ca868873168cb38f38457a2ea4422df4b4 | 2,454 | py | Python | src/lessons/watermark_lesson.py | vicktech/Face-Recognition | 5122cd73f664000ec6611b4d72aca1f4c57939cd | [
"Apache-2.0"
] | null | null | null | src/lessons/watermark_lesson.py | vicktech/Face-Recognition | 5122cd73f664000ec6611b4d72aca1f4c57939cd | [
"Apache-2.0"
] | null | null | null | src/lessons/watermark_lesson.py | vicktech/Face-Recognition | 5122cd73f664000ec6611b4d72aca1f4c57939cd | [
"Apache-2.0"
] | 1 | 2020-06-11T21:55:59.000Z | 2020-06-11T21:55:59.000Z | import numpy as np
import cv2
from utils import CFEVideoConf, image_resize
cap = cv2.VideoCapture(0)
save_path = 'saved-media/watermark.mp4'
frames_per_seconds = 24
config = CFEVideoConf(cap, filepath=save_path, res='720p')
out = cv2.VideoWriter(save_path, config.video_type, frames_per_seconds, config.dims)
img_path = 'images/logo/cfe-coffee.png'
logo = cv2.imread(img_path, -1)
watermark = image_resize(logo, height=50)
#watermark = cv2.cvtColor(watermark, cv2.COLOR_BGR2GRAY)
#watermark = cv2.cvtColor(watermark, cv2.COLOR_GRAY2BGR)
watermark = cv2.cvtColor(watermark, cv2.COLOR_BGR2BGRA)
# grayscale watermark
# cv2.imshow('watermark', watermark)
#print(watermark.shape)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
# print(frame[50, 150]) # numpy array
# start_cord_x = 50
# start_cord_y = 150
# color = (255, 0, 0) #BGR 0-255
# stroke = 2
# w = 100
# h = 200
# end_cord_x = start_cord_x + w
# end_cord_y = start_cord_y + h
# cv2.rectangle(frame, (start_cord_x, start_cord_y), (end_cord_x, end_cord_y), color, stroke)
# print(frame[start_cord_x:end_cord_x, start_cord_y:end_cord_y])
frame_h, frame_w, frame_c = frame.shape
#print(frame.shape)
# overlay with 4 channels BGR and Alpha
overlay = np.zeros((frame_h, frame_w, 4), dtype='uint8')
#overlay[100:250, 100:125] = (255, 255, 0, 1) # B, G, R, A
#overlay[100:250, 150:255] = (0, 255, 0, 1) # B, G, R, A
#overlay[start_y:end_y, start_x:end_x] = (B, G, R, A)
#cv2.imshow("overlay", overlay)
watermark_h, watermark_w, watermark_c = watermark.shape
for i in range(0, watermark_h):
for j in range(0, watermark_w):
#print(watermark[i,j])
if watermark[i,j][3] != 0:
#watermark[i, j] # RBGA
offset = 10
h_offset = frame_h - watermark_h - offset
w_offset = frame_w - watermark_w - offset
overlay[h_offset + i, w_offset+ j] = watermark[i,j]
cv2.addWeighted(overlay, 0.25, frame, 1.0, 0, frame)
#frame.addimage(watermark)
frame = cv2.cvtColor(frame, cv2.COLOR_BGRA2BGR)
out.write(frame)
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
out.release()
cv2.destroyAllWindows() | 32.72 | 97 | 0.656072 |
05a7fcdca2213ee07978dcb27fd46fd6a32f0cdb | 3,130 | py | Python | dmhyorg.py | simonkq1/DMHY_qBittorrent_search_plugin | 7c6c1c230e7d3ee5a7c80f987224c8c41cb28024 | [
"MIT"
] | null | null | null | dmhyorg.py | simonkq1/DMHY_qBittorrent_search_plugin | 7c6c1c230e7d3ee5a7c80f987224c8c41cb28024 | [
"MIT"
] | null | null | null | dmhyorg.py | simonkq1/DMHY_qBittorrent_search_plugin | 7c6c1c230e7d3ee5a7c80f987224c8c41cb28024 | [
"MIT"
] | null | null | null |
#VERSION: 1.02
#AUTHORS: xyau (xyauhideto@gmail.com)
# MIT License
#
# Copyright (c) 2018 xyau
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the right
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software i
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# qBT
from helpers import download_file, retrieve_url
from novaprinter import prettyPrinter
# parser
from re import compile as re_compile
class dmhyorg(object):
url = "https://share.dmhy.org"
name = "DMHY"
supported_categories = {"all":0,"anime":2,"pictures":3,"music":4,"tv":6,"games":9}
def download_torrent(self, info):
""" Downloader """
print(download_file(info))
# DO NOT CHANGE the name and parameters of this function
# This function will be the one called by nova2.py
def search(self, what, cat="all"):
""" Performs search """
def get_data(url):
highlight = re_compile('<span class="keyword">([^<]+)</span>')
get_next = re_compile('(?s)"fl".+href="([^"]+)">下一')
get_item = re_compile('(?m)<a href="(/topics/view/[^"]+)"[^>]+>\s*(.+[^<]*)</a>(?:\s*.*){3}(magnet:[^"]+)"(?:.*\s*.*>){3}([\d\.]+)(\w+)</td[^/]+btl_1">([\d-]+)</span></td>\s*[^/]+bts_1">([\d-]+)<')
html = retrieve_url(url)
next_page = get_next.search(html)
# clear highlighting
return [get_item.findall(highlight.sub(r"\1",html)),
next_page and self.url + next_page.group(1)]
query = "%s/topics/list/?keyword=%s&sort_id=%d" % (
self.url, what, self.supported_categories.get(cat, "0"))
while query:
[data,query] = get_data(query)
for item in data:
prettyPrinter({
"desc_link":self.url+item[0],
"name":item[1],
"link":item[2],
"size":str(int(float(item[3]) * 2 ** (10 * (1 + 'kmgtpezy'.find(item[4][0].lower()))))),
"seeds":0 if "-" == item[5] else int(item[5]),
"leech":0 if "-" == item[6] else int(item[6]),
"engine_url":self.url
})
if __name__ == "__main__":
engine = dmhyorg()
engine.search('conan')
| 40.649351 | 209 | 0.608626 |
a2a362cc2efb66f56fc41c410a44b81153be2736 | 1,538 | py | Python | phrase_extract.py | neerajvashistha/blogAggregation | e73eb530ea946a0e278f22cf7b031b56b7ab264d | [
"MIT"
] | null | null | null | phrase_extract.py | neerajvashistha/blogAggregation | e73eb530ea946a0e278f22cf7b031b56b7ab264d | [
"MIT"
] | null | null | null | phrase_extract.py | neerajvashistha/blogAggregation | e73eb530ea946a0e278f22cf7b031b56b7ab264d | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
import six
import sys
from modules import rake
import operator
import io
# from modules import spellcheck
# from spellchecker import SpellChecker
from nltk.corpus import stopwords
stopword_list = stopwords.words('english')
stopword_list.extend(['google','facebook','twitter','linkedin','whatsapp'])
# def spellcorector(sentence):
# spell = SpellChecker()
# for tokens in list(spell.unknown(spell.split_words(sentence))):
# correct = spell.correction(tokens)
# sentence = sentence.replace(tokens,correct)
# return sentence
def custom_ner(sentence):
"""for the purpose of phrase extraction this function is employed
:param name: sentence
:type name: str.
:param state: free from slangs and spell errors
:type state: str
:returns: list -- extracted phrases.
:raises: AttributeError, KeyError
"""
# 1. initialize RAKE by providing a path to a stopwords file
stoppath = "modules/SmartStoplist_mod.txt"
rake_object = rake.Rake(stoppath)
# 2. Split text into sentences
sentenceList = rake.split_sentences(sentence)
# 3. generate candidate keywords
stopwordpattern = rake.build_stop_word_regex(stoppath)
phraseList = rake.generate_candidate_keywords(sentenceList, stopwordpattern)
phraseList = [t for t in phraseList if '.' not in t and (len(t) > 6 or len(t.split()) > 2)]
return phraseList
if __name__ == "__main__":
print(extract_phrase("Samsung Phone")) | 34.954545 | 95 | 0.728218 |
de4ce1ca5c7463fdf2f0c5385aaab8ba3ffd8512 | 8,989 | py | Python | src/poetry/masonry/builders/editable.py | nikmolnar/poetry | ea335b569502cd03647a9df9d6a6aad71acde93f | [
"MIT"
] | null | null | null | src/poetry/masonry/builders/editable.py | nikmolnar/poetry | ea335b569502cd03647a9df9d6a6aad71acde93f | [
"MIT"
] | null | null | null | src/poetry/masonry/builders/editable.py | nikmolnar/poetry | ea335b569502cd03647a9df9d6a6aad71acde93f | [
"MIT"
] | null | null | null | from __future__ import annotations
import hashlib
import os
import shutil
from base64 import urlsafe_b64encode
from pathlib import Path
from typing import TYPE_CHECKING
from poetry.core.masonry.builders.builder import Builder
from poetry.core.masonry.builders.sdist import SdistBuilder
from poetry.core.masonry.utils.package_include import PackageInclude
from poetry.core.semver.version import Version
from poetry.utils._compat import WINDOWS
from poetry.utils._compat import decode
from poetry.utils.env import build_environment
from poetry.utils.helpers import is_dir_writable
from poetry.utils.pip import pip_install
if TYPE_CHECKING:
from cleo.io.io import IO
from poetry.poetry import Poetry
from poetry.utils.env import Env
SCRIPT_TEMPLATE = """\
#!{python}
import sys
from {module} import {callable_holder}
if __name__ == '__main__':
sys.exit({callable_}())
"""
WINDOWS_CMD_TEMPLATE = """\
@echo off\r\n"{python}" "%~dp0\\{script}" %*\r\n
"""
class EditableBuilder(Builder): # type: ignore[misc]
def __init__(self, poetry: Poetry, env: Env, io: IO) -> None:
super().__init__(poetry)
self._env = env
self._io = io
def build(self) -> None:
self._debug(
f" - Building package <c1>{self._package.name}</c1> in"
" <info>editable</info> mode"
)
if self._package.build_script:
if self._package.build_should_generate_setup():
self._debug(
" - <warning>Falling back on using a <b>setup.py</b></warning>"
)
return self._setup_build()
self._run_build_script(self._package.build_script)
for removed in self._env.site_packages.remove_distribution_files(
distribution_name=self._package.name
):
self._debug(
f" - Removed <c2>{removed.name}</c2> directory from"
f" <b>{removed.parent}</b>"
)
added_files = []
added_files += self._add_pth()
added_files += self._add_scripts()
self._add_dist_info(added_files)
def _run_build_script(self, build_script: str) -> None:
with build_environment(poetry=self._poetry, env=self._env, io=self._io) as env:
self._debug(f" - Executing build script: <b>{build_script}</b>")
env.run("python", str(self._path.joinpath(build_script)), call=True)
def _setup_build(self) -> None:
builder = SdistBuilder(self._poetry)
setup = self._path / "setup.py"
has_setup = setup.exists()
if has_setup:
self._io.write_error_line(
"<warning>A setup.py file already exists. Using it.</warning>"
)
else:
with setup.open("w", encoding="utf-8") as f:
f.write(decode(builder.build_setup()))
try:
if self._env.pip_version < Version.from_parts(19, 0):
pip_install(self._path, self._env, upgrade=True, editable=True)
else:
# Temporarily rename pyproject.toml
shutil.move(
str(self._poetry.file), str(self._poetry.file.with_suffix(".tmp"))
)
try:
pip_install(self._path, self._env, upgrade=True, editable=True)
finally:
shutil.move(
str(self._poetry.file.with_suffix(".tmp")),
str(self._poetry.file),
)
finally:
if not has_setup:
os.remove(str(setup))
def _add_pth(self) -> list[Path]:
paths = {
include.base.resolve().as_posix()
for include in self._module.includes
if isinstance(include, PackageInclude)
and (include.is_module() or include.is_package())
}
content = "".join(decode(path + os.linesep) for path in paths)
pth_file = Path(self._module.name).with_suffix(".pth")
# remove any pre-existing pth files for this package
for file in self._env.site_packages.find(path=pth_file, writable_only=True):
self._debug(
f" - Removing existing <c2>{file.name}</c2> from <b>{file.parent}</b>"
f" for {self._poetry.file.parent}"
)
# We can't use unlink(missing_ok=True) because it's not always available
if file.exists():
file.unlink()
try:
pth_file = self._env.site_packages.write_text(
pth_file, content, encoding="utf-8"
)
self._debug(
f" - Adding <c2>{pth_file.name}</c2> to <b>{pth_file.parent}</b> for"
f" {self._poetry.file.parent}"
)
return [pth_file]
except OSError:
# TODO: Replace with PermissionError
self._io.write_error_line(
f" - Failed to create <c2>{pth_file.name}</c2> for"
f" {self._poetry.file.parent}"
)
return []
def _add_scripts(self) -> list[Path]:
added = []
entry_points = self.convert_entry_points()
for scripts_path in self._env.script_dirs:
if is_dir_writable(path=scripts_path, create=True):
break
else:
self._io.write_error_line(
" - Failed to find a suitable script installation directory for"
f" {self._poetry.file.parent}"
)
return []
scripts = entry_points.get("console_scripts", [])
for script in scripts:
name, script = script.split(" = ")
module, callable_ = script.split(":")
callable_holder = callable_.split(".", 1)[0]
script_file = scripts_path.joinpath(name)
self._debug(
f" - Adding the <c2>{name}</c2> script to <b>{scripts_path}</b>"
)
with script_file.open("w", encoding="utf-8") as f:
f.write(
decode(
SCRIPT_TEMPLATE.format(
python=self._env.python,
module=module,
callable_holder=callable_holder,
callable_=callable_,
)
)
)
script_file.chmod(0o755)
added.append(script_file)
if WINDOWS:
cmd_script = script_file.with_suffix(".cmd")
cmd = WINDOWS_CMD_TEMPLATE.format(python=self._env.python, script=name)
self._debug(
f" - Adding the <c2>{cmd_script.name}</c2> script wrapper to"
f" <b>{scripts_path}</b>"
)
with cmd_script.open("w", encoding="utf-8") as f:
f.write(decode(cmd))
added.append(cmd_script)
return added
def _add_dist_info(self, added_files: list[Path]) -> None:
from poetry.core.masonry.builders.wheel import WheelBuilder
added_files = added_files[:]
builder = WheelBuilder(self._poetry)
dist_info = self._env.site_packages.mkdir(Path(builder.dist_info))
self._debug(
f" - Adding the <c2>{dist_info.name}</c2> directory to"
f" <b>{dist_info.parent}</b>"
)
with dist_info.joinpath("METADATA").open("w", encoding="utf-8") as f:
builder._write_metadata_file(f)
added_files.append(dist_info.joinpath("METADATA"))
with dist_info.joinpath("INSTALLER").open("w", encoding="utf-8") as f:
f.write("poetry")
added_files.append(dist_info.joinpath("INSTALLER"))
if self.convert_entry_points():
with dist_info.joinpath("entry_points.txt").open(
"w", encoding="utf-8"
) as f:
builder._write_entry_points(f)
added_files.append(dist_info.joinpath("entry_points.txt"))
record = dist_info.joinpath("RECORD")
with record.open("w", encoding="utf-8") as f:
for path in added_files:
hash = self._get_file_hash(path)
size = path.stat().st_size
f.write(f"{path!s},sha256={hash},{size}\n")
# RECORD itself is recorded with no hash or size
f.write(f"{record},,\n")
def _get_file_hash(self, filepath: Path) -> str:
hashsum = hashlib.sha256()
with filepath.open("rb") as src:
while True:
buf = src.read(1024 * 8)
if not buf:
break
hashsum.update(buf)
src.seek(0)
return urlsafe_b64encode(hashsum.digest()).decode("ascii").rstrip("=")
def _debug(self, msg: str) -> None:
if self._io.is_debug():
self._io.write_line(msg)
| 33.920755 | 87 | 0.557014 |
e18d6b482fd319c58448829fbf9b905f4275a4e2 | 3,773 | py | Python | proj_flask/web_app.py | markwkiehl/flask_proj1 | d065ca1b3f567800b5aca6e27379368e0e3346e9 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | proj_flask/web_app.py | markwkiehl/flask_proj1 | d065ca1b3f567800b5aca6e27379368e0e3346e9 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | proj_flask/web_app.py | markwkiehl/flask_proj1 | d065ca1b3f567800b5aca6e27379368e0e3346e9 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | #web_app.py
from flask import jsonify, request, render_template, redirect, url_for
from proj_flask import app
# define a navigation bar
#pip install Flask-Navigation
#https://flask-navigation.readthedocs.io/en/latest/
from flask_navigation import Navigation
nav = Navigation()
nav.init_app(app)
nav.Bar('top', [
nav.Item('Home', 'index'),
nav.Item('Posts', 'url_posts'),
nav.Item('Redirect', 'url_redirect'),
nav.Item('URL Query', 'url_query', {'scope': 'USA'}),
nav.Item('JSON', 'url_json'),
nav.Item('Static', 'url_static'),
nav.Item('XML', 'url_xml'),
])
# Get the content from topics.xml file
from proj_flask.class_xml_topics import XmlTopics
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class_file_path = os.path.join(basedir, 'static/xml/topics.xml')
data_json = {
"chartData": {
"labels": [
"sunday",
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday"
],
"thisWeek": [
20000,
14000,
12000,
15000,
18000,
19000,
22000
]
}
}
site = {'sitename': 'SavvyFlaskSolutions'}
@app.route('/')
def index():
return render_template('index.html', site=site, menu='home')
@app.route('/xml', methods=['GET'])
def url_xml():
c_topics = XmlTopics(class_file_path)
topics_all = c_topics.get()
if request.args.get('topic') == None:
topics = None
else:
topics = c_topics.get(request.args.get('topic'))
return render_template('xml.html', site=site, topics=topics, topics_all=topics_all)
@app.route('/posts')
def url_posts():
user = {'username': 'Mark'}
posts = [
{
'author': {'username': 'John'},
'month': 'Jan',
'year': '2020',
'body': 'Beautiful day in Portland!'
},
{
'author': {'username': 'Susan'},
'month': 'Dec',
'year': '2019',
'body': 'The Avengers movie was so cool!'
}
]
return render_template('posts.html', site=site, menu='posts', user=user, posts=posts)
@app.route('/redirect')
def url_redirect():
return redirect('/redirected')
@app.route('/redirected')
def redirected():
return render_template('redirect.html')
@app.route('/url_query', methods=['GET'])
def url_query():
scope = request.args.get('scope', 'unspecified')
msg = 'url_query scope = {scope}'
return msg, 200, {'Content-Type': 'text/plain; charset=utf-8'}
@app.route('/json')
def url_json():
#return jsonify(data_json)
return render_template('json.html', name=data_json)
@app.route('/static')
def url_static():
file_include = 'sample-html-elements.html'
return render_template('static.html', file_include=file_include)
# ----------- under development ..
@app.route("/chart")
def chart_form():
#return render_template('chart.html')
return render_template('chart-ajax.html')
@app.route("/chart-handler", methods=['POST'])
def chart_post():
return jsonify(data_json)
@app.route('/hello', methods=['GET', 'POST'])
def hello():
# POST request
if request.method == 'POST':
print('Incoming..')
print(request.get_json()) # parse as JSON
return 'OK', 200
# GET request
else:
message = {'greeting':'GET from ' + __name__}
return jsonify(message) # serialize and use JSON headers
#test_request_context() tells Flask to behave as though it’s handling a request even while we use a Python shell.
#with app.test_request_context():
# print('url_for(json) = ', url_for('json'))
# to allow for debugging and auto-reload
if __name__ == "__main__":
app.run(host="0.0.0.0",port=8000,debug=True)
| 24.986755 | 113 | 0.61463 |
a89e356e80b92e676fc8dcea12766fbda812d8f4 | 15,270 | py | Python | baselines/bigcn/model/Twitter/BiGCN_Twitter-ORG-Attn-0331.py | JihoChoi/dynamic-gcn-deprecated-TBU | ff315206811b757f2f61f0776917e6a7d43c9379 | [
"MIT"
] | 1 | 2022-02-24T09:17:31.000Z | 2022-02-24T09:17:31.000Z | baselines/bigcn/model/Twitter/BiGCN_Twitter-ORG-Attn-0331.py | JihoChoi/dynamic-gcn-deprecated-TBU | ff315206811b757f2f61f0776917e6a7d43c9379 | [
"MIT"
] | null | null | null | baselines/bigcn/model/Twitter/BiGCN_Twitter-ORG-Attn-0331.py | JihoChoi/dynamic-gcn-deprecated-TBU | ff315206811b757f2f61f0776917e6a7d43c9379 | [
"MIT"
] | null | null | null | import sys,os
sys.path.append(os.getcwd())
# from Process.process import *
from Process.process import *
import torch as th
from torch_scatter import scatter_mean
import torch.nn.functional as F
import numpy as np
from tools.earlystopping import EarlyStopping
from torch_geometric.data import DataLoader
from tqdm import tqdm
from Process.rand5fold import *
from tools.evaluate import *
from torch_geometric.nn import GCNConv
import copy
class TDrumorGCN(th.nn.Module):
def __init__(self, in_feats, hid_feats, out_feats):
super(TDrumorGCN, self).__init__()
self.conv1 = GCNConv(in_feats, hid_feats)
self.conv2 = GCNConv(hid_feats+in_feats, out_feats)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x1 = copy.copy(x.float())
x = self.conv1(x, edge_index)
x2 = copy.copy(x)
rootindex = data.rootindex
root_extend = th.zeros(len(data.batch), x1.size(1)).to(device)
batch_size = max(data.batch) + 1
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x1[rootindex[num_batch]]
x = th.cat((x, root_extend), 1)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
x = F.relu(x)
root_extend = th.zeros(len(data.batch), x2.size(1)).to(device)
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x2[rootindex[num_batch]]
x = th.cat((x, root_extend), 1)
x = scatter_mean(x, data.batch, dim=0)
return x
class BUrumorGCN(th.nn.Module):
def __init__(self, in_feats, hid_feats, out_feats):
super(BUrumorGCN, self).__init__()
self.conv1 = GCNConv(in_feats, hid_feats)
self.conv2 = GCNConv(hid_feats+in_feats, out_feats)
def forward(self, data):
x, edge_index = data.x, data.BU_edge_index
x1 = copy.copy(x.float())
x = self.conv1(x, edge_index)
x2 = copy.copy(x)
rootindex = data.rootindex
root_extend = th.zeros(len(data.batch), x1.size(1)).to(device)
batch_size = max(data.batch) + 1
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x1[rootindex[num_batch]]
x = th.cat((x, root_extend), 1)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
x = F.relu(x)
root_extend = th.zeros(len(data.batch), x2.size(1)).to(device)
for num_batch in range(batch_size):
index = (th.eq(data.batch, num_batch))
root_extend[index] = x2[rootindex[num_batch]]
x = th.cat((x, root_extend), 1)
x = scatter_mean(x, data.batch, dim=0)
return x
class Network(th.nn.Module):
def __init__(self, in_feats, hid_feats, out_feats):
super(Network, self).__init__()
self.TDrumorGCN = TDrumorGCN(in_feats, hid_feats, out_feats)
self.BUrumorGCN = BUrumorGCN(in_feats, hid_feats, out_feats)
# Attention Module
self.W_s1 = th.nn.Linear(out_feats * 2 * 2, 64)
self.W_s2 = th.nn.Linear(64, 1)
self.fc = th.nn.Linear((out_feats+hid_feats)*2, 4)
# self.fc = th.nn.Linear((out_feats+hid_feats) * 2 * 5, 4)
def attention_module(self, x0, x1, x2, x3, x4):
attn_w_1 = self.W_s2(F.relu(self.W_s1(x0)))
attn_w_2 = self.W_s2(F.relu(self.W_s1(x1)))
attn_w_3 = self.W_s2(F.relu(self.W_s1(x2)))
attn_w_4 = self.W_s2(F.relu(self.W_s1(x3)))
attn_w_5 = self.W_s2(F.relu(self.W_s1(x4)))
attn_weights = th.cat((attn_w_1, attn_w_2, attn_w_3, attn_w_4, attn_w_5), 1) # B x 5
attn_weights = F.softmax(attn_weights, dim=1) # TODO: confirmed
x0 = th.bmm(x0.unsqueeze(2), attn_weights[:, 0].unsqueeze(1).unsqueeze(2))
x1 = th.bmm(x1.unsqueeze(2), attn_weights[:, 1].unsqueeze(1).unsqueeze(2))
x2 = th.bmm(x2.unsqueeze(2), attn_weights[:, 2].unsqueeze(1).unsqueeze(2))
x3 = th.bmm(x3.unsqueeze(2), attn_weights[:, 3].unsqueeze(1).unsqueeze(2))
x4 = th.bmm(x4.unsqueeze(2), attn_weights[:, 4].unsqueeze(1).unsqueeze(2))
return x0, x1, x2, x3, x4
def forward(self, s0, s1, s2, s3, s4):
# TD_x = self.TDrumorGCN(data)
# BU_x = self.BUrumorGCN(data)
# TODO:
# 1) share gcn weights for the snapshots (current)
# 2) separate gcn weights for the snapshots
# 3) temporal + sequence
TD_x0 = self.TDrumorGCN(s0)
BU_x0 = self.BUrumorGCN(s0)
TD_x1 = self.TDrumorGCN(s1)
BU_x1 = self.BUrumorGCN(s1)
TD_x2 = self.TDrumorGCN(s2)
BU_x2 = self.BUrumorGCN(s2)
TD_x3 = self.TDrumorGCN(s3)
BU_x3 = self.BUrumorGCN(s3)
TD_x4 = self.TDrumorGCN(s4)
BU_x4 = self.BUrumorGCN(s4)
x0 = th.cat((TD_x0, BU_x0), 1)
x1 = th.cat((TD_x1, BU_x1), 1)
x2 = th.cat((TD_x2, BU_x2), 1)
x3 = th.cat((TD_x3, BU_x3), 1)
x4 = th.cat((TD_x4, BU_x4), 1)
# x = th.cat((BU_x,TD_x), 1)
x0, x1, x2, x3, x4 = self.attention_module(x0, x1, x2, x3, x4)
# x = th.cat((x0, x1, x2, x3, x4), 1).squeeze(2) # MobaXterm - 2.
#
# print("x0", x0.shape, x0[0][0])
# print("x1", x1.shape, x1[0][0])
# print("x2", x2.shape, x2[0][0])
# print("x3", x3.shape, x3[0][0])
# print("x4", x4.shape, x4[0][0])
x = th.stack([x0, x1, x2, x3, x4], 1).squeeze(2) # MobaXterm - 16.
# print("x", x.shape, x[0])
x = x.sum(dim=1).squeeze(2)
# print("x", x.shape, x[0][0])
x = self.fc(x)
x = F.log_softmax(x, dim=1)
return x
def train_GCN(treeDic, x_test, x_train, TDdroprate, BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, dataname, iter):
model = Network(5000, 64, 64).to(device)
# BU_params = list(map(id, model.BUrumorGCN.conv1.parameters()))
# BU_params += list(map(id, model.BUrumorGCN.conv2.parameters()))
# print(filter(lambda p: id(p) not in BU_params, model.parameters()))
# BU_params += list(map(id, model.BUrumorGCN.conv2.parameters()))
# base_params = filter(lambda p: id(p) not in BU_params, model.parameters())
# optimizer = th.optim.Adam([
# {'params': base_params},
# {'params': model.BUrumorGCN.conv1.parameters(), 'lr': lr/5},
# {'params': model.BUrumorGCN.conv2.parameters(), 'lr': lr/5}
# ], lr=lr, weight_decay=weight_decay)
optimizer = th.optim.Adam([
{'params': model.parameters()},
], lr=lr, weight_decay=weight_decay)
model.train()
train_losses = []
val_losses = []
train_accs = []
val_accs = []
early_stopping = EarlyStopping(patience=patience, verbose=True)
for epoch in range(n_epochs):
# traindata_list, testdata_list = loadBiData(dataname, treeDic, x_train, x_test, TDdroprate,BUdroprate)
traindata_list, testdata_list = loadSnapshotData(
dataname, treeDic, x_train, x_test, TDdroprate, BUdroprate)
train_loader = DataLoader(
traindata_list, batch_size=batchsize, shuffle=True, num_workers=5)
test_loader = DataLoader(
testdata_list, batch_size=batchsize, shuffle=True, num_workers=5)
avg_loss = []
avg_acc = []
batch_idx = 0
# tqdm_train_loader = tqdm(train_loader) # JIHO
tqdm_train_loader = train_loader
for Batch_data in tqdm_train_loader:
# Batch_data.to(device)
# out_labels= model(Batch_data)
s0 = Batch_data[0].to(device)
s1 = Batch_data[1].to(device)
s2 = Batch_data[2].to(device)
s3 = Batch_data[3].to(device)
s4 = Batch_data[4].to(device)
out_labels = model(s0, s1, s2, s3, s4)
# finalloss = F.nll_loss(out_labels, Batch_data.y)
finalloss = F.nll_loss(out_labels, Batch_data[0].y)
loss = finalloss
optimizer.zero_grad()
loss.backward()
avg_loss.append(loss.item())
optimizer.step()
_, pred = out_labels.max(dim=-1)
# correct = pred.eq(Batch_data.y).sum().item()
# train_acc = correct / len(Batch_data.y)
correct = pred.eq(Batch_data[0].y).sum().item()
train_acc = correct / len(Batch_data[0].y)
avg_acc.append(train_acc)
print("Iter {:03d} | Epoch {:05d} | Batch{:02d} | Train_Loss {:.4f}| Train_Accuracy {:.4f}".format(
iter, epoch, batch_idx, loss.item(), train_acc))
batch_idx = batch_idx + 1
train_losses.append(np.mean(avg_loss))
train_accs.append(np.mean(avg_acc))
temp_val_losses = []
temp_val_accs = []
temp_val_Acc_all, temp_val_Acc1, temp_val_Prec1, temp_val_Recll1, temp_val_F1, \
temp_val_Acc2, temp_val_Prec2, temp_val_Recll2, temp_val_F2, \
temp_val_Acc3, temp_val_Prec3, temp_val_Recll3, temp_val_F3, \
temp_val_Acc4, temp_val_Prec4, temp_val_Recll4, temp_val_F4 = [], [
], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []
model.eval()
# tqdm_test_loader = tqdm(test_loader) # JIHO
tqdm_test_loader = test_loader
for Batch_data in tqdm_test_loader:
# Batch_data.to(device)
# val_out = model(Batch_data)
s0 = Batch_data[0].to(device)
s1 = Batch_data[1].to(device)
s2 = Batch_data[2].to(device)
s3 = Batch_data[3].to(device)
s4 = Batch_data[4].to(device)
val_out = model(s0, s1, s2, s3, s4)
val_loss = F.nll_loss(val_out, Batch_data[0].y)
temp_val_losses.append(val_loss.item())
_, val_pred = val_out.max(dim=1)
correct = val_pred.eq(Batch_data[0].y).sum().item()
val_acc = correct / len(Batch_data[0].y)
Acc_all, Acc1, Prec1, Recll1, F1, Acc2, Prec2, Recll2, F2, Acc3, Prec3, Recll3, F3, Acc4, Prec4, Recll4, F4 = evaluation4class(
val_pred, Batch_data[0].y)
temp_val_Acc_all.append(Acc_all), temp_val_Acc1.append(Acc1), temp_val_Prec1.append(
Prec1), temp_val_Recll1.append(Recll1), temp_val_F1.append(F1), \
temp_val_Acc2.append(Acc2), temp_val_Prec2.append(Prec2), temp_val_Recll2.append(
Recll2), temp_val_F2.append(F2), \
temp_val_Acc3.append(Acc3), temp_val_Prec3.append(Prec3), temp_val_Recll3.append(
Recll3), temp_val_F3.append(F3), \
temp_val_Acc4.append(Acc4), temp_val_Prec4.append(Prec4), temp_val_Recll4.append(
Recll4), temp_val_F4.append(F4)
temp_val_accs.append(val_acc)
val_losses.append(np.mean(temp_val_losses))
val_accs.append(np.mean(temp_val_accs))
print("Epoch {:05d} | Val_Loss {:.4f}| Val_Accuracy {:.4f}".format(epoch, np.mean(temp_val_losses),
np.mean(temp_val_accs)))
res = ['acc:{:.4f}'.format(np.mean(temp_val_Acc_all)),
'C1:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc1), np.mean(temp_val_Prec1),
np.mean(temp_val_Recll1), np.mean(temp_val_F1)),
'C2:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc2), np.mean(temp_val_Prec2),
np.mean(temp_val_Recll2), np.mean(temp_val_F2)),
'C3:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc3), np.mean(temp_val_Prec3),
np.mean(temp_val_Recll3), np.mean(temp_val_F3)),
'C4:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc4), np.mean(temp_val_Prec4),
np.mean(temp_val_Recll4), np.mean(temp_val_F4))]
print('results:', res)
early_stopping(np.mean(temp_val_losses), np.mean(temp_val_accs), np.mean(temp_val_F1), np.mean(temp_val_F2),
np.mean(temp_val_F3), np.mean(temp_val_F4), model, 'BiGCN', dataname)
accs = np.mean(temp_val_accs)
F1 = np.mean(temp_val_F1)
F2 = np.mean(temp_val_F2)
F3 = np.mean(temp_val_F3)
F4 = np.mean(temp_val_F4)
if early_stopping.early_stop:
print("Early stopping")
accs = early_stopping.accs
F1 = early_stopping.F1
F2 = early_stopping.F2
F3 = early_stopping.F3
F4 = early_stopping.F4
break
return train_losses, val_losses, train_accs, val_accs, accs, F1, F2, F3, F4
# =========================
# MAIN
# =========================
lr = 0.0005
weight_decay = 1e-4
patience = 10
# n_epochs=200 # JIHO
n_epochs = 100
batchsize = 128
TDdroprate = 0.2
BUdroprate = 0.2
datasetname = sys.argv[1] # "Twitter15"、"Twitter16"
iterations = int(sys.argv[2])
model = "GCN"
device = th.device('cuda:1' if th.cuda.is_available() else 'cpu')
test_accs = []
NR_F1 = []
FR_F1 = []
TR_F1 = []
UR_F1 = []
for iter in range(iterations):
fold0_x_test, fold0_x_train, fold1_x_test, fold1_x_train, fold2_x_test, fold2_x_train, fold3_x_test, fold3_x_train, fold4_x_test, fold4_x_train = load5foldData(
datasetname)
treeDic = loadTree(datasetname)
train_losses, val_losses, train_accs, val_accs0, accs0, F1_0, F2_0, F3_0, F4_0 = train_GCN(
treeDic, fold0_x_test, fold0_x_train, TDdroprate, BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
train_losses, val_losses, train_accs, val_accs1, accs1, F1_1, F2_1, F3_1, F4_1 = train_GCN(
treeDic, fold1_x_test, fold1_x_train, TDdroprate, BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
train_losses, val_losses, train_accs, val_accs2, accs2, F1_2, F2_2, F3_2, F4_2 = train_GCN(
treeDic, fold2_x_test, fold2_x_train, TDdroprate, BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
train_losses, val_losses, train_accs, val_accs3, accs3, F1_3, F2_3, F3_3, F4_3 = train_GCN(
treeDic, fold3_x_test, fold3_x_train, TDdroprate, BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
train_losses, val_losses, train_accs, val_accs4, accs4, F1_4, F2_4, F3_4, F4_4 = train_GCN(
treeDic, fold4_x_test, fold4_x_train, TDdroprate, BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)
test_accs.append((accs0+accs1+accs2+accs3+accs4)/5)
NR_F1.append((F1_0+F1_1+F1_2+F1_3+F1_4)/5)
FR_F1.append((F2_0 + F2_1 + F2_2 + F2_3 + F2_4) / 5)
TR_F1.append((F3_0 + F3_1 + F3_2 + F3_3 + F3_4) / 5)
UR_F1.append((F4_0 + F4_1 + F4_2 + F4_3 + F4_4) / 5)
print("Total_Test_Accuracy: {:.4f}|NR F1: {:.4f}|FR F1: {:.4f}|TR F1: {:.4f}|UR F1: {:.4f}".format(
sum(test_accs) / iterations, sum(NR_F1) / iterations, sum(FR_F1) / iterations, sum(TR_F1) / iterations, sum(UR_F1) / iterations))
| 44.132948 | 164 | 0.602685 |
927f4207e2f063dd3ac4b358411fb003df78fa14 | 20,018 | py | Python | disarm/binary/experiment_launcher_singlelayer.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | disarm/binary/experiment_launcher_singlelayer.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | disarm/binary/experiment_launcher_singlelayer.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Binary for the DisARM experiments on VAE with a single stochastic layer."""
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import tensorflow_probability as tfp
from disarm import dataset
from disarm import networks
tfd = tfp.distributions
layers = tf.keras.layers
flags.DEFINE_enum('dataset', 'dynamic_mnist',
['static_mnist', 'dynamic_mnist',
'fashion_mnist', 'omniglot'],
'Dataset to use.')
flags.DEFINE_float('genmo_lr', 1e-4,
'Learning rate for decoder, Generation network.')
flags.DEFINE_float('infnet_lr', 1e-4,
'Learning rate for encoder, Inference network.')
flags.DEFINE_float('prior_lr', 1e-2,
'Learning rate for prior variables.')
flags.DEFINE_integer('batch_size', 50, 'Training batch size.')
flags.DEFINE_integer('num_pairs', 1,
('Number of samples pairs used gradient estimators.'
'For VIMCO, there are 2 x num_pairs independent '
'samples. For ARM++, there are num_pairs of '
'antithetic pairs.'))
flags.DEFINE_integer('num_steps', int(1e6), 'Number of training steps.')
flags.DEFINE_enum('grad_type', 'disarm',
['arm', 'disarm', 'reinforce_loo', 'relax',
'vimco', 'local-disarm'],
'Choice of gradient estimator.')
flags.DEFINE_string('encoder_type', 'linear',
'Choice supported: linear, nonlinear')
flags.DEFINE_string('logdir', '/tmp/disarm',
'Directory for storing logs.')
flags.DEFINE_bool('verbose', False,
'Whether to turn on training result logging.')
flags.DEFINE_float('temperature', None,
'Temperature for RELAX estimator.')
flags.DEFINE_float('scaling_factor', None,
'Scaling factor for RELAX estimator.')
flags.DEFINE_bool('bias_check', False,
'Carry out bias check for RELAX and baseline')
flags.DEFINE_bool('demean_input', False,
'Demean for encoder and decoder inputs.')
flags.DEFINE_bool('initialize_with_bias', False,
'Initialize the final layer bias of decoder '
'with dataset mean.')
flags.DEFINE_integer('seed', 1, 'Global random seed.')
flags.DEFINE_bool('symmetrized', False,
'Symmetrize the training objective for b and b_tilde.')
flags.DEFINE_bool('estimate_grad_basket', False,
'Estimate gradients for multiple estimators.')
flags.DEFINE_integer('num_eval_samples', 100,
'Number of samples for evaluation.')
flags.DEFINE_integer('num_train_samples', 1,
'Number of samples for evaluation.')
flags.DEFINE_bool('debug', False, 'Turn on debugging mode.')
FLAGS = flags.FLAGS
def process_batch_input(input_batch):
input_batch = tf.reshape(input_batch, [tf.shape(input_batch)[0], -1])
input_batch = tf.cast(input_batch, tf.float32)
return input_batch
def initialize_grad_variables(target_variable_list):
return [tf.Variable(tf.zeros(shape=i.shape)) for i in target_variable_list]
def estimate_gradients(input_batch, bvae_model, gradient_type, sample_size=1):
"""Estimate gradient for inference and generation networks."""
if gradient_type == 'vimco':
with tf.GradientTape(persistent=True) as tape:
genmo_loss, infnet_loss = bvae_model.get_vimco_losses(
input_batch, sample_size)
genmo_grads = tape.gradient(genmo_loss, bvae_model.decoder_vars)
prior_grads = tape.gradient(genmo_loss, bvae_model.prior_vars)
infnet_grads = tape.gradient(infnet_loss, bvae_model.encoder_vars)
elif gradient_type == 'local-disarm':
# num_samples indicates the number of antithetic pairs
with tf.GradientTape(persistent=True) as tape:
genmo_loss, infnet_loss = (
bvae_model.get_local_disarm_losses(input_batch, sample_size,
symmetrized=FLAGS.symmetrized))
genmo_grads = tape.gradient(genmo_loss, bvae_model.decoder_vars)
prior_grads = tape.gradient(genmo_loss, bvae_model.prior_vars)
infnet_vars = bvae_model.encoder_vars
infnet_grads_1 = tape.gradient(genmo_loss, infnet_vars)
infnet_grads_2 = tape.gradient(infnet_loss, infnet_vars)
# infnet_grads_1/2 are list of tf.Tensors.
infnet_grads = [infnet_grads_1[i] + infnet_grads_2[i]
for i in range(len(infnet_vars))]
elif gradient_type == 'multisample':
with tf.GradientTape(persistent=True) as tape:
genmo_loss, infnet_loss = bvae_model.get_multisample_baseline_loss(
input_batch, sample_size)
genmo_grads = tape.gradient(genmo_loss, bvae_model.decoder_vars)
prior_grads = tape.gradient(genmo_loss, bvae_model.prior_vars)
infnet_grads = tape.gradient(infnet_loss, bvae_model.encoder_vars)
elif gradient_type == 'relax':
if sample_size > 1:
raise ValueError('Relax only supports 1 sample case.')
with tf.GradientTape(persistent=True) as tape:
genmo_loss, reparam_loss, learning_signal, log_q = (
bvae_model.get_relax_loss(
input_batch,
temperature=FLAGS.temperature,
scaling_factor=FLAGS.scaling_factor))
genmo_grads = tape.gradient(genmo_loss, bvae_model.decoder_vars)
prior_grads = tape.gradient(genmo_loss, bvae_model.prior_vars)
infnet_vars = bvae_model.encoder_vars
infnet_grads_1 = tape.gradient(log_q, infnet_vars,
output_gradients=learning_signal)
infnet_grads_2 = tape.gradient(reparam_loss, infnet_vars)
# infnet_grads_1/2 are list of tf.Tensors.
infnet_grads = [infnet_grads_1[i] + infnet_grads_2[i]
for i in range(len(infnet_vars))]
else:
with tf.GradientTape(persistent=True) as tape:
elbo, _, infnet_logits, _ = bvae_model(input_batch)
genmo_loss = -1. * tf.reduce_mean(elbo)
genmo_grads = tape.gradient(genmo_loss, bvae_model.decoder_vars)
prior_grads = tape.gradient(genmo_loss, bvae_model.prior_vars)
infnet_grad_multiplier = -1. * bvae_model.get_layer_grad_estimation(
input_batch, grad_type=gradient_type)
infnet_grads = tape.gradient(
infnet_logits,
bvae_model.encoder_vars,
output_gradients=infnet_grad_multiplier)
del tape
return (genmo_grads, prior_grads, infnet_grads, genmo_loss)
@tf.function
def train_one_step(
train_batch_i,
bvae_model,
genmo_optimizer,
infnet_optimizer,
prior_optimizer,
theta_optimizer,
encoder_grad_variable,
encoder_grad_sq_variable,
grad_variable_dict,
grad_sq_variable_dict):
"""Train Discrete VAE for 1 step."""
metrics = {}
input_batch = process_batch_input(train_batch_i)
if FLAGS.grad_type in ['vimco', 'multisample']:
num_samples = FLAGS.num_pairs * 2
else:
num_samples = FLAGS.num_pairs
if FLAGS.grad_type == 'relax':
with tf.GradientTape(persistent=True) as theta_tape:
(genmo_grads, prior_grads, infnet_grads, genmo_loss) = estimate_gradients(
input_batch, bvae_model, FLAGS.grad_type, sample_size=1)
# Update generative model
genmo_vars = bvae_model.decoder_vars
genmo_optimizer.apply_gradients(list(zip(genmo_grads, genmo_vars)))
prior_vars = bvae_model.prior_vars
prior_optimizer.apply_gradients(list(zip(prior_grads, prior_vars)))
infnet_vars = bvae_model.encoder_vars
infnet_optimizer.apply_gradients(list(zip(infnet_grads, infnet_vars)))
infnet_grads_sq = [tf.square(grad_i) for grad_i in infnet_grads]
theta_vars = []
if bvae_model.control_nn:
theta_vars.extend(bvae_model.control_nn.trainable_variables)
if FLAGS.temperature is None:
theta_vars.append(bvae_model.log_temperature_variable)
if FLAGS.scaling_factor is None:
theta_vars.append(bvae_model.scaling_variable)
theta_grads = theta_tape.gradient(infnet_grads_sq, theta_vars)
theta_optimizer.apply_gradients(zip(theta_grads, theta_vars))
del theta_tape
metrics['learning_signal'] = bvae_model.mean_learning_signal
else:
(genmo_grads, prior_grads, infnet_grads, genmo_loss) = estimate_gradients(
input_batch, bvae_model, FLAGS.grad_type, num_samples)
genmo_vars = bvae_model.decoder_vars
genmo_optimizer.apply_gradients(list(zip(genmo_grads, genmo_vars)))
prior_vars = bvae_model.prior_vars
prior_optimizer.apply_gradients(list(zip(prior_grads, prior_vars)))
infnet_vars = bvae_model.encoder_vars
infnet_optimizer.apply_gradients(list(zip(infnet_grads, infnet_vars)))
batch_size_sq = tf.cast(FLAGS.batch_size * FLAGS.batch_size, tf.float32)
encoder_grad_var = bvae_model.compute_grad_variance(
encoder_grad_variable, encoder_grad_sq_variable,
infnet_grads) / batch_size_sq
if grad_variable_dict is not None:
variance_dict = dict()
for k in grad_variable_dict.keys():
if k in ['vimco', 'local-disarm']:
sample_size = 2 * FLAGS.num_pairs
else:
sample_size = 1
encoder_grads = estimate_gradients(
input_batch, bvae_model,
gradient_type=k, sample_size=sample_size)[2]
variance_dict['var/' + k] = bvae_model.compute_grad_variance(
grad_variable_dict[k], grad_sq_variable_dict[k],
encoder_grads) / batch_size_sq
else:
variance_dict = None
return (encoder_grad_var, variance_dict, genmo_loss, metrics)
@tf.function
def evaluate(model, tf_dataset, max_step=1000, num_eval_samples=None):
"""Evaluate the model."""
if FLAGS.grad_type in ['vimco', 'local-disarm']:
num_samples = FLAGS.num_pairs * 2
elif num_eval_samples:
num_samples = num_eval_samples
elif FLAGS.num_eval_samples:
num_samples = FLAGS.num_eval_samples
else:
num_samples = FLAGS.num_pairs
tf.print('Evaluate with samples: ', num_samples)
loss = 0.
n = 0.
for batch in tf_dataset.map(process_batch_input):
if n >= max_step: # used for train_ds, which is a `repeat` dataset.
break
if num_samples > 1:
batch_size = tf.shape(batch)[0]
input_batch = tf.tile(batch, [num_samples, 1])
elbo = tf.reshape(model(input_batch)[0], [num_samples, batch_size])
objectives = (tf.reduce_logsumexp(elbo, axis=0, keepdims=False) -
tf.math.log(tf.cast(tf.shape(elbo)[0], tf.float32)))
else:
objectives = model(batch)[0]
loss -= tf.reduce_mean(objectives)
n += 1.
return loss / n
def run_bias_check(model, batch, target_type, baseline_type):
"""Run bias check."""
tf.print(f'Running a bias check comparing {target_type} and {baseline_type}.')
mu = 0.
s = 0.
for step in range(1, int(1e6) + 1):
diff = run_bias_check_step(
batch,
model,
target_type=target_type,
baseline_type=baseline_type)
prev_mu = mu
mu = mu + (diff - mu) / step
s = s + (diff - mu) * (diff - prev_mu)
if step % 1000 == 0:
sigma = tf.math.sqrt(s / step)
z_score = mu / (sigma / tf.math.sqrt(float(step)))
tf.print(step, 'z_score: ', z_score, 'sigma: ', sigma)
@tf.function
def run_bias_check_step(
train_batch_i,
bvae_model,
target_type='disarm',
baseline_type='reinforce_loo'):
"""Run bias check for 1 batch."""
input_batch = process_batch_input(train_batch_i)
sample_size = FLAGS.num_pairs
infnet_grads = estimate_gradients(
input_batch, bvae_model, target_type, sample_size)[2]
baseline_infnet_grads = estimate_gradients(
input_batch, bvae_model, baseline_type, sample_size)[2]
diff = tf.concat([tf.reshape(x - y, [-1])
for x, y in zip(infnet_grads, baseline_infnet_grads)],
axis=0)
return tf.reduce_mean(diff)
def main(_):
tf.random.set_seed(FLAGS.seed)
logdir = FLAGS.logdir
os.makedirs(logdir, exist_ok=True)
genmo_lr = tf.constant(FLAGS.genmo_lr)
infnet_lr = tf.constant(FLAGS.infnet_lr)
prior_lr = tf.constant(FLAGS.prior_lr)
genmo_optimizer = tf.keras.optimizers.Adam(learning_rate=genmo_lr)
infnet_optimizer = tf.keras.optimizers.Adam(learning_rate=infnet_lr)
prior_optimizer = tf.keras.optimizers.SGD(learning_rate=prior_lr)
theta_optimizer = tf.keras.optimizers.Adam(learning_rate=infnet_lr,
beta_1=0.999)
batch_size = FLAGS.batch_size
if FLAGS.dataset == 'static_mnist':
train_ds, valid_ds, test_ds = dataset.get_static_mnist_batch(batch_size)
train_size = 50000
elif FLAGS.dataset == 'dynamic_mnist':
train_ds, valid_ds, test_ds = dataset.get_dynamic_mnist_batch(batch_size)
train_size = 50000
elif FLAGS.dataset == 'fashion_mnist':
train_ds, valid_ds, test_ds = dataset.get_dynamic_mnist_batch(
batch_size, fashion_mnist=True)
train_size = 50000
elif FLAGS.dataset == 'omniglot':
train_ds, valid_ds, test_ds = dataset.get_omniglot_batch(batch_size)
train_size = 23000
num_steps_per_epoch = int(train_size / batch_size)
train_ds_mean = dataset.get_mean_from_iterator(
train_ds, dataset_size=train_size, batch_size=batch_size)
if FLAGS.initialize_with_bias:
bias_value = -tf.math.log(
1./tf.clip_by_value(train_ds_mean, 0.001, 0.999) - 1.)
bias_initializer = tf.keras.initializers.Constant(bias_value)
else:
bias_initializer = 'zeros'
if FLAGS.encoder_type == 'linear':
encoder_hidden_sizes = [200]
encoder_activations = ['linear']
decoder_hidden_sizes = [784]
decoder_activations = ['linear']
elif FLAGS.encoder_type == 'nonlinear':
encoder_hidden_sizes = [200, 200, 200]
encoder_activations = [
layers.LeakyReLU(alpha=0.3),
layers.LeakyReLU(alpha=0.3),
'linear']
decoder_hidden_sizes = [200, 200, 784]
decoder_activations = [
layers.LeakyReLU(alpha=0.3),
layers.LeakyReLU(alpha=0.3),
'linear']
else:
raise NotImplementedError
encoder = [networks.BinaryNetwork(
encoder_hidden_sizes,
encoder_activations,
mean_xs=train_ds_mean,
demean_input=FLAGS.demean_input,
name='bvae_encoder')]
decoder = [networks.BinaryNetwork(
decoder_hidden_sizes,
decoder_activations,
demean_input=FLAGS.demean_input,
final_layer_bias_initializer=bias_initializer,
name='bvae_decoder')]
prior_logit = tf.Variable(tf.zeros([200], tf.float32))
if FLAGS.grad_type == 'relax':
control_network = tf.keras.Sequential()
control_network.add(
layers.Dense(137, activation=layers.LeakyReLU(alpha=0.3)))
control_network.add(
layers.Dense(1))
else:
control_network = None
bvae_model = networks.DiscreteVAE(
encoder,
decoder,
prior_logit,
grad_type=FLAGS.grad_type,
control_nn=control_network)
bvae_model.build(input_shape=(None, 784))
tensorboard_file_writer = tf.summary.create_file_writer(logdir)
# In order to use `tf.train.ExponentialMovingAverage`, one has to
# use `tf.Variable`.
encoder_grad_variable = initialize_grad_variables(bvae_model.encoder_vars)
encoder_grad_sq_variable = initialize_grad_variables(bvae_model.encoder_vars)
if FLAGS.estimate_grad_basket:
if FLAGS.grad_type in ['vimco', 'local-disarm']:
grad_basket = ['vimco', 'local-disarm']
elif FLAGS.grad_type == 'reinforce_loo':
grad_basket = ['arm', 'disarm', 'reinforce_loo', 'relax']
else:
raise NotImplementedError
grad_variable_dict = {
k: initialize_grad_variables(bvae_model.encoder_vars)
for k in grad_basket}
grad_sq_variable_dict = {
k: initialize_grad_variables(bvae_model.encoder_vars)
for k in grad_basket}
ckpt = tf.train.Checkpoint(
genmo_optimizer=genmo_optimizer,
infnet_optimizer=infnet_optimizer,
theta_optimizer=theta_optimizer,
encoder_grad_variable=encoder_grad_variable,
encoder_grad_sq_variable=encoder_grad_sq_variable,
grad_variable_dict=grad_variable_dict,
grad_sq_variable_dict=grad_sq_variable_dict,
bvae_model=bvae_model)
else:
grad_variable_dict = None
grad_sq_variable_dict = None
ckpt = tf.train.Checkpoint(
genmo_optimizer=genmo_optimizer,
infnet_optimizer=infnet_optimizer,
theta_optimizer=theta_optimizer,
encoder_grad_variable=encoder_grad_variable,
encoder_grad_sq_variable=encoder_grad_sq_variable,
bvae_model=bvae_model)
ckpt_manager = tf.train.CheckpointManager(
ckpt, logdir, max_to_keep=5)
if not FLAGS.debug and ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
logging.info(
'Last checkpoint was restored: %s.', ckpt_manager.latest_checkpoint)
else:
tf.print('No checkpoint to load.')
logging.info('No checkpoint to load.')
start_step = infnet_optimizer.iterations.numpy()
logging.info('Training start from step: %s', start_step)
train_iter = train_ds.__iter__()
for step_i in range(start_step, FLAGS.num_steps):
(encoder_grad_var, variance_dict, genmo_loss, metrics) = train_one_step(
train_iter.next(),
bvae_model,
genmo_optimizer,
infnet_optimizer,
prior_optimizer,
theta_optimizer,
encoder_grad_variable,
encoder_grad_sq_variable,
grad_variable_dict,
grad_sq_variable_dict)
train_loss = tf.reduce_mean(genmo_loss)
# Summarize
if step_i % 1000 == 0:
metrics.update({
'train_objective': train_loss,
'eval_metric/train': evaluate(
bvae_model, train_ds,
max_step=num_steps_per_epoch,
num_eval_samples=FLAGS.num_train_samples),
'eval_metric/valid': evaluate(
bvae_model, valid_ds,
num_eval_samples=FLAGS.num_eval_samples),
'eval_metric/test': evaluate(
bvae_model, test_ds,
num_eval_samples=FLAGS.num_eval_samples),
'var/grad': encoder_grad_var
})
if FLAGS.grad_type == 'relax':
if FLAGS.temperature is None:
metrics['relax/temperature'] = tf.math.exp(
bvae_model.log_temperature_variable)
if FLAGS.scaling_factor is None:
metrics['relax/scaling'] = bvae_model.scaling_variable
tf.print(step_i, metrics)
with tensorboard_file_writer.as_default():
for k, v in metrics.items():
tf.summary.scalar(k, v, step=step_i)
if variance_dict is not None:
tf.print(variance_dict)
for k, v in variance_dict.items():
tf.summary.scalar(k, v, step=step_i)
# Checkpoint
if step_i % 10000 == 0:
ckpt_save_path = ckpt_manager.save()
logging.info('Saving checkpoint for step %d at %s.',
step_i, ckpt_save_path)
if FLAGS.bias_check:
if FLAGS.grad_type == 'local-disarm':
baseline_type = 'vimco'
elif FLAGS.grad_type == 'vimco':
baseline_type = 'multisample'
elif FLAGS.grad_type == 'reinforce_loo':
baseline_type = 'disarm'
else:
baseline_type = 'reinforce_loo'
run_bias_check(bvae_model,
train_iter.next(),
FLAGS.grad_type,
baseline_type)
if __name__ == '__main__':
app.run(main)
| 36.264493 | 80 | 0.689729 |
41ac730759846752db8e713332d94474e5861bee | 3,451 | py | Python | examples/submission/calculation_tests/test_submit_fleur_mpi.py | Tseplyaev/aiida-fleur | 5de36d8b235e4b4edd908e7a0cb594112a461eb5 | [
"MIT"
] | null | null | null | examples/submission/calculation_tests/test_submit_fleur_mpi.py | Tseplyaev/aiida-fleur | 5de36d8b235e4b4edd908e7a0cb594112a461eb5 | [
"MIT"
] | null | null | null | examples/submission/calculation_tests/test_submit_fleur_mpi.py | Tseplyaev/aiida-fleur | 5de36d8b235e4b4edd908e7a0cb594112a461eb5 | [
"MIT"
] | null | null | null | #!/usr/bin/env runaiida
# -*- coding: utf-8 -*-
__copyright__ = (u"Copyright (c), 2016, Forschungszentrum Jülich GmbH, "
"IAS-1/PGI-1, Germany. All rights reserved.")
__license__ = "MIT license, see LICENSE.txt file"
__version__ = "0.27"
__contributors__ = "Jens Broeder"
from aiida import load_dbenv, is_dbenv_loaded
if not is_dbenv_loaded():
load_dbenv()
import sys
import os
from aiida.common.example_helpers import test_and_get_code
from aiida.orm import DataFactory
# If set to True, will ask AiiDA to run in serial mode (i.e., AiiDA will not
# invoke the mpirun command in the submission script)
run_in_serial_mode = False
################################################################
ParameterData = DataFactory('parameter')
StructureData = DataFactory('structure')
FleurinpData = DataFactory('fleur.fleurinp')
try:
dontsend = sys.argv[1]
if dontsend == "--dont-send":
submit_test = True
elif dontsend == "--send":
submit_test = False
else:
raise IndexError
except IndexError:
print >> sys.stderr, ("The first parameter can only be either "
"--send or --dont-send")
sys.exit(1)
try:
codename = sys.argv[2]
except IndexError:
codename = None
queue = None
queue = 'th1'#'th123_node'
# queue = "th1_small"
settings = None
#####
code = test_and_get_code(codename, expected_code_type='fleur.fleur')
#TODO: how to make smart path?
# get where tests folder is, then relative path
#inpxmlfile = '/usr/users/iff_th1/broeder/aiida/github/aiida-fleur/tests/inp_xml_files/W/inp.xml'
inpxmlfile = '/Users/broeder/aiida/github/aiida-fleur/tests/inp_xml_files/W/inp.xml'
fleurinp = FleurinpData(files = [inpxmlfile])
## For remote codes, it is not necessary to manually set the computer,
## since it is set automatically by new_calc
#computer = code.get_remote_computer()
#calc = code.new_calc(computer=computer)
calc = code.new_calc()
calc.label = "Test Fleur fleur_MPI"
calc.description = "Test calculation of the Fleur code"
calc.set_max_wallclock_seconds(3600) # 60 min
# Valid only for Slurm and PBS (using default values for the
# number_cpus_per_machine), change for SGE-like schedulers
#calc.set_resources({"num_machines": 1})
if run_in_serial_mode:
calc.set_withmpi(False)
## Otherwise, to specify a given # of cpus per machine, uncomment the following:
calc.set_resources({"num_machines": 2, "num_mpiprocs_per_machine": 12})
#calc.set_resources({"tot_num_mpiprocs" : 8})
# unfortunatly this does not work
#calc.set_custom_scheduler_commands("#PBS -l :ib1")
#calc.set_custon_scheduler_commands(#BSUB -P jara0043 \n#BSUB -a intelmpi")
#calc.set_custom_scheduler_commands("#BSUB -P test-clx \n#BSUB -a intelmpi")
if queue is not None:
calc.set_queue_name(queue)
calc.use_fleurinpdata(fleurinp)
#calc.use_code(code)
if settings is not None:
calc.use_settings(settings)
if submit_test:
subfolder, script_filename = calc.submit_test()
print "Test_submit for calculation (uuid='{}')".format(
calc.uuid)
print "Submit file in {}".format(os.path.join(
os.path.relpath(subfolder.abspath),
script_filename
))
else:
calc.store_all()
print "created calculation; calc=Calculation(uuid='{}') # ID={}".format(
calc.uuid, calc.dbnode.pk)
calc.submit()
print "submitted calculation; calc=Calculation(uuid='{}') # ID={}".format(
calc.uuid, calc.dbnode.pk)
| 31.09009 | 97 | 0.701246 |
3a9f95fd53587dc34876913e802f50141b0e2ded | 15,755 | py | Python | myven/lib/python3.8/site-packages/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option.py | baltham/dne-dna-code | 4a13309a790a670d2f07e635c9264a0c29976c6a | [
"MIT"
] | 1 | 2021-04-02T08:08:39.000Z | 2021-04-02T08:08:39.000Z | myven/lib/python3.8/site-packages/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option.py | baltham/dne-dna-code | 4a13309a790a670d2f07e635c9264a0c29976c6a | [
"MIT"
] | null | null | null | myven/lib/python3.8/site-packages/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option.py | baltham/dne-dna-code | 4a13309a790a670d2f07e635c9264a0c29976c6a | [
"MIT"
] | 1 | 2020-05-03T01:13:16.000Z | 2020-05-03T01:13:16.000Z | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = """
---
module: ec2_vpc_dhcp_option
short_description: Manages DHCP Options, and can ensure the DHCP options for the given VPC match what's
requested
description:
- This module removes, or creates DHCP option sets, and can associate them to a VPC.
Optionally, a new DHCP Options set can be created that converges a VPC's existing
DHCP option set with values provided.
When dhcp_options_id is provided, the module will
1. remove (with state='absent')
2. ensure tags are applied (if state='present' and tags are provided
3. attach it to a VPC (if state='present' and a vpc_id is provided.
If any of the optional values are missing, they will either be treated
as a no-op (i.e., inherit what already exists for the VPC)
To remove existing options while inheriting, supply an empty value
(e.g. set ntp_servers to [] if you want to remove them from the VPC's options)
Most of the options should be self-explanatory.
author: "Joel Thompson (@joelthompson)"
version_added: 2.1
options:
domain_name:
description:
- The domain name to set in the DHCP option sets
required: false
default: None
dns_servers:
description:
- A list of hosts to set the DNS servers for the VPC to. (Should be a
list of IP addresses rather than host names.)
required: false
default: None
ntp_servers:
description:
- List of hosts to advertise as NTP servers for the VPC.
required: false
default: None
netbios_name_servers:
description:
- List of hosts to advertise as NetBIOS servers.
required: false
default: None
netbios_node_type:
description:
- NetBIOS node type to advertise in the DHCP options.
The AWS recommendation is to use 2 (when using netbios name services)
http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html
required: false
default: None
vpc_id:
description:
- VPC ID to associate with the requested DHCP option set.
If no vpc id is provided, and no matching option set is found then a new
DHCP option set is created.
required: false
default: None
delete_old:
description:
- Whether to delete the old VPC DHCP option set when associating a new one.
This is primarily useful for debugging/development purposes when you
want to quickly roll back to the old option set. Note that this setting
will be ignored, and the old DHCP option set will be preserved, if it
is in use by any other VPC. (Otherwise, AWS will return an error.)
required: false
default: true
inherit_existing:
description:
- For any DHCP options not specified in these parameters, whether to
inherit them from the options set already applied to vpc_id, or to
reset them to be empty.
required: false
default: false
tags:
description:
- Tags to be applied to a VPC options set if a new one is created, or
if the resource_id is provided. (options must match)
required: False
default: None
aliases: [ 'resource_tags']
version_added: "2.1"
dhcp_options_id:
description:
- The resource_id of an existing DHCP options set.
If this is specified, then it will override other settings, except tags
(which will be updated to match)
required: False
default: None
version_added: "2.1"
state:
description:
- create/assign or remove the DHCP options.
If state is set to absent, then a DHCP options set matched either
by id, or tags and options will be removed if possible.
required: False
default: present
choices: [ 'absent', 'present' ]
version_added: "2.1"
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto
"""
RETURN = """
new_options:
description: The DHCP options created, associated or found
returned: when appropriate
type: dict
sample:
domain-name-servers:
- 10.0.0.1
- 10.0.1.1
netbois-name-servers:
- 10.0.0.1
- 10.0.1.1
netbios-node-type: 2
domain-name: "my.example.com"
dhcp_options_id:
description: The aws resource id of the primary DCHP options set created, found or removed
type: string
returned: when available
changed:
description: Whether the dhcp options were changed
type: bool
returned: always
"""
EXAMPLES = """
# Completely overrides the VPC DHCP options associated with VPC vpc-123456 and deletes any existing
# DHCP option set that may have been attached to that VPC.
- ec2_vpc_dhcp_option:
domain_name: "foo.example.com"
region: us-east-1
dns_servers:
- 10.0.0.1
- 10.0.1.1
ntp_servers:
- 10.0.0.2
- 10.0.1.2
netbios_name_servers:
- 10.0.0.1
- 10.0.1.1
netbios_node_type: 2
vpc_id: vpc-123456
delete_old: True
inherit_existing: False
# Ensure the DHCP option set for the VPC has 10.0.0.4 and 10.0.1.4 as the specified DNS servers, but
# keep any other existing settings. Also, keep the old DHCP option set around.
- ec2_vpc_dhcp_option:
region: us-east-1
dns_servers:
- "{{groups['dns-primary']}}"
- "{{groups['dns-secondary']}}"
vpc_id: vpc-123456
inherit_existing: True
delete_old: False
## Create a DHCP option set with 4.4.4.4 and 8.8.8.8 as the specified DNS servers, with tags
## but do not assign to a VPC
- ec2_vpc_dhcp_option:
region: us-east-1
dns_servers:
- 4.4.4.4
- 8.8.8.8
tags:
Name: google servers
Environment: Test
## Delete a DHCP options set that matches the tags and options specified
- ec2_vpc_dhcp_option:
region: us-east-1
dns_servers:
- 4.4.4.4
- 8.8.8.8
tags:
Name: google servers
Environment: Test
state: absent
## Associate a DHCP options set with a VPC by ID
- ec2_vpc_dhcp_option:
region: us-east-1
dhcp_options_id: dopt-12345678
vpc_id: vpc-123456
"""
import collections
import traceback
from time import sleep, time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info
if HAS_BOTO:
import boto.vpc
import boto.ec2
from boto.exception import EC2ResponseError
def get_resource_tags(vpc_conn, resource_id):
return dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': resource_id}))
def ensure_tags(module, vpc_conn, resource_id, tags, add_only, check_mode):
try:
cur_tags = get_resource_tags(vpc_conn, resource_id)
if tags == cur_tags:
return {'changed': False, 'tags': cur_tags}
to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
if to_delete and not add_only:
vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode)
to_add = dict((k, tags[k]) for k in tags if k not in cur_tags)
if to_add:
vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode)
latest_tags = get_resource_tags(vpc_conn, resource_id)
return {'changed': True, 'tags': latest_tags}
except EC2ResponseError as e:
module.fail_json(msg="Failed to modify tags: %s" % e.message, exception=traceback.format_exc())
def fetch_dhcp_options_for_vpc(vpc_conn, vpc_id):
"""
Returns the DHCP options object currently associated with the requested VPC ID using the VPC
connection variable.
"""
vpcs = vpc_conn.get_all_vpcs(vpc_ids=[vpc_id])
if len(vpcs) != 1 or vpcs[0].dhcp_options_id == "default":
return None
dhcp_options = vpc_conn.get_all_dhcp_options(dhcp_options_ids=[vpcs[0].dhcp_options_id])
if len(dhcp_options) != 1:
return None
return dhcp_options[0]
def match_dhcp_options(vpc_conn, tags=None, options=None):
"""
Finds a DHCP Options object that optionally matches the tags and options provided
"""
dhcp_options = vpc_conn.get_all_dhcp_options()
for dopts in dhcp_options:
if (not tags) or get_resource_tags(vpc_conn, dopts.id) == tags:
if (not options) or dopts.options == options:
return(True, dopts)
return(False, None)
def remove_dhcp_options_by_id(vpc_conn, dhcp_options_id):
associations = vpc_conn.get_all_vpcs(filters={'dhcpOptionsId': dhcp_options_id})
if len(associations) > 0:
return False
else:
vpc_conn.delete_dhcp_options(dhcp_options_id)
return True
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
dhcp_options_id=dict(type='str', default=None),
domain_name=dict(type='str', default=None),
dns_servers=dict(type='list', default=None),
ntp_servers=dict(type='list', default=None),
netbios_name_servers=dict(type='list', default=None),
netbios_node_type=dict(type='int', default=None),
vpc_id=dict(type='str', default=None),
delete_old=dict(type='bool', default=True),
inherit_existing=dict(type='bool', default=False),
tags=dict(type='dict', default=None, aliases=['resource_tags']),
state=dict(type='str', default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
params = module.params
found = False
changed = False
new_options = collections.defaultdict(lambda: None)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
region, ec2_url, boto_params = get_aws_connection_info(module)
connection = connect_to_aws(boto.vpc, region, **boto_params)
existing_options = None
# First check if we were given a dhcp_options_id
if not params['dhcp_options_id']:
# No, so create new_options from the parameters
if params['dns_servers'] is not None:
new_options['domain-name-servers'] = params['dns_servers']
if params['netbios_name_servers'] is not None:
new_options['netbios-name-servers'] = params['netbios_name_servers']
if params['ntp_servers'] is not None:
new_options['ntp-servers'] = params['ntp_servers']
if params['domain_name'] is not None:
# needs to be a list for comparison with boto objects later
new_options['domain-name'] = [params['domain_name']]
if params['netbios_node_type'] is not None:
# needs to be a list for comparison with boto objects later
new_options['netbios-node-type'] = [str(params['netbios_node_type'])]
# If we were given a vpc_id then we need to look at the options on that
if params['vpc_id']:
existing_options = fetch_dhcp_options_for_vpc(connection, params['vpc_id'])
# if we've been asked to inherit existing options, do that now
if params['inherit_existing']:
if existing_options:
for option in ['domain-name-servers', 'netbios-name-servers', 'ntp-servers', 'domain-name', 'netbios-node-type']:
if existing_options.options.get(option) and new_options[option] != [] and (not new_options[option] or [''] == new_options[option]):
new_options[option] = existing_options.options.get(option)
# Do the vpc's dhcp options already match what we're asked for? if so we are done
if existing_options and new_options == existing_options.options:
module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=existing_options.id)
# If no vpc_id was given, or the options don't match then look for an existing set using tags
found, dhcp_option = match_dhcp_options(connection, params['tags'], new_options)
# Now let's cover the case where there are existing options that we were told about by id
# If a dhcp_options_id was supplied we don't look at options inside, just set tags (if given)
else:
supplied_options = connection.get_all_dhcp_options(filters={'dhcp-options-id': params['dhcp_options_id']})
if len(supplied_options) != 1:
if params['state'] != 'absent':
module.fail_json(msg=" a dhcp_options_id was supplied, but does not exist")
else:
found = True
dhcp_option = supplied_options[0]
if params['state'] != 'absent' and params['tags']:
ensure_tags(module, connection, dhcp_option.id, params['tags'], False, module.check_mode)
# Now we have the dhcp options set, let's do the necessary
# if we found options we were asked to remove then try to do so
if params['state'] == 'absent':
if not module.check_mode:
if found:
changed = remove_dhcp_options_by_id(connection, dhcp_option.id)
module.exit_json(changed=changed, new_options={})
# otherwise if we haven't found the required options we have something to do
elif not module.check_mode and not found:
# create some dhcp options if we weren't able to use existing ones
if not found:
# Convert netbios-node-type and domain-name back to strings
if new_options['netbios-node-type']:
new_options['netbios-node-type'] = new_options['netbios-node-type'][0]
if new_options['domain-name']:
new_options['domain-name'] = new_options['domain-name'][0]
# create the new dhcp options set requested
dhcp_option = connection.create_dhcp_options(
new_options['domain-name'],
new_options['domain-name-servers'],
new_options['ntp-servers'],
new_options['netbios-name-servers'],
new_options['netbios-node-type'])
# wait for dhcp option to be accessible
found_dhcp_opt = False
start_time = time()
while time() < start_time + 300:
try:
found_dhcp_opt = connection.get_all_dhcp_options(dhcp_options_ids=[dhcp_option.id])
except EC2ResponseError as e:
if e.error_code == 'InvalidDhcpOptionID.NotFound':
sleep(3)
else:
module.fail_json(msg="Failed to describe DHCP options", exception=traceback.format_exc)
else:
break
if not found_dhcp_opt:
module.fail_json(msg="Failed to wait for {0} to be available.".format(dhcp_option.id))
changed = True
if params['tags']:
ensure_tags(module, connection, dhcp_option.id, params['tags'], False, module.check_mode)
# If we were given a vpc_id, then attach the options we now have to that before we finish
if params['vpc_id'] and not module.check_mode:
changed = True
connection.associate_dhcp_options(dhcp_option.id, params['vpc_id'])
# and remove old ones if that was requested
if params['delete_old'] and existing_options:
remove_dhcp_options_by_id(connection, existing_options.id)
module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=dhcp_option.id)
if __name__ == "__main__":
main()
| 38.1477 | 155 | 0.663916 |
a6e282d54f4d271d6693c8193c613f5582f7e1cb | 88 | py | Python | app_sys/apps.py | sivarki/hjarnuc | 4acc9437af0f0fdc44d68dd0d6923e1039a4911b | [
"Apache-2.0"
] | null | null | null | app_sys/apps.py | sivarki/hjarnuc | 4acc9437af0f0fdc44d68dd0d6923e1039a4911b | [
"Apache-2.0"
] | null | null | null | app_sys/apps.py | sivarki/hjarnuc | 4acc9437af0f0fdc44d68dd0d6923e1039a4911b | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
class AppSysConfig(AppConfig):
name = 'app_sys'
| 14.666667 | 33 | 0.75 |
a7f990dc05022a06f8d40fd2988dab796307c572 | 548 | py | Python | pypy/module/token/test/test_token.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 381 | 2018-08-18T03:37:22.000Z | 2022-02-06T23:57:36.000Z | pypy/module/token/test/test_token.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 16 | 2018-09-22T18:12:47.000Z | 2022-02-22T20:03:59.000Z | pypy/module/token/test/test_token.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 55 | 2015-08-16T02:41:30.000Z | 2022-03-20T20:33:35.000Z | class AppTestToken:
def setup_class(cls):
cls.w_token = cls.space.appexec([], """():
import token
return token""")
def test_isterminal(self):
assert self.token.ISTERMINAL(self.token.ENDMARKER)
assert not self.token.ISTERMINAL(300)
def test_isnonterminal(self):
assert self.token.ISNONTERMINAL(300)
assert not self.token.ISNONTERMINAL(self.token.NAME)
def test_iseof(self):
assert self.token.ISEOF(self.token.ENDMARKER)
assert not self.token.ISEOF(self.token.NAME)
| 28.842105 | 60 | 0.671533 |
c27745178567440d72f336463f2c1cc7ab50e1c8 | 3,520 | py | Python | cacheops/conf.py | ymguerra/django-cacheops | 962fdf53b4b1f3a38320164f2f769809612cec59 | [
"BSD-3-Clause"
] | 1,369 | 2015-01-02T19:15:23.000Z | 2022-03-30T12:31:46.000Z | cacheops/conf.py | ymguerra/django-cacheops | 962fdf53b4b1f3a38320164f2f769809612cec59 | [
"BSD-3-Clause"
] | 285 | 2015-01-05T02:50:04.000Z | 2022-03-29T08:32:56.000Z | cacheops/conf.py | ymguerra/django-cacheops | 962fdf53b4b1f3a38320164f2f769809612cec59 | [
"BSD-3-Clause"
] | 178 | 2015-01-31T09:51:23.000Z | 2022-03-31T15:25:06.000Z | from importlib import import_module
from funcy import memoize, merge
from django.conf import settings as base_settings
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import setting_changed
ALL_OPS = {'get', 'fetch', 'count', 'aggregate', 'exists'}
class Defaults:
CACHEOPS_ENABLED = True
CACHEOPS_REDIS = {}
CACHEOPS_DEFAULTS = {}
CACHEOPS = {}
CACHEOPS_PREFIX = lambda query: ''
CACHEOPS_LRU = False
CACHEOPS_CLIENT_CLASS = None
CACHEOPS_DEGRADE_ON_FAILURE = False
CACHEOPS_SENTINEL = {}
# NOTE: we don't use this fields in invalidator conditions since their values could be very long
# and one should not filter by their equality anyway.
CACHEOPS_SKIP_FIELDS = "FileField", "TextField", "BinaryField", "JSONField"
CACHEOPS_LONG_DISJUNCTION = 8
CACHEOPS_SERIALIZER = 'pickle'
FILE_CACHE_DIR = '/tmp/cacheops_file_cache'
FILE_CACHE_TIMEOUT = 60*60*24*30
class Settings(object):
def __getattr__(self, name):
res = getattr(base_settings, name, getattr(Defaults, name))
if name in ['CACHEOPS_PREFIX', 'CACHEOPS_SERIALIZER']:
res = import_string(res) if isinstance(res, str) else res
# Convert old list of classes to list of strings
if name == 'CACHEOPS_SKIP_FIELDS':
res = [f if isinstance(f, str) else f.get_internal_type(res) for f in res]
# Save to dict to speed up next access, __getattr__ won't be called
self.__dict__[name] = res
return res
settings = Settings()
setting_changed.connect(lambda setting, **kw: settings.__dict__.pop(setting, None), weak=False)
def import_string(path):
if "." in path:
module, attr = path.rsplit(".", 1)
return getattr(import_module(module), attr)
else:
return import_module(path)
@memoize
def prepare_profiles():
"""
Prepares a dict 'app.model' -> profile, for use in model_profile()
"""
profile_defaults = {
'ops': (),
'local_get': False,
'db_agnostic': True,
'lock': False,
}
profile_defaults.update(settings.CACHEOPS_DEFAULTS)
model_profiles = {}
for app_model, profile in settings.CACHEOPS.items():
if profile is None:
model_profiles[app_model.lower()] = None
continue
model_profiles[app_model.lower()] = mp = merge(profile_defaults, profile)
if mp['ops'] == 'all':
mp['ops'] = ALL_OPS
# People will do that anyway :)
if isinstance(mp['ops'], str):
mp['ops'] = {mp['ops']}
mp['ops'] = set(mp['ops'])
if 'timeout' not in mp:
raise ImproperlyConfigured(
'You must specify "timeout" option in "%s" CACHEOPS profile' % app_model)
if not isinstance(mp['timeout'], int):
raise ImproperlyConfigured(
'"timeout" option in "%s" CACHEOPS profile should be an integer' % app_model)
return model_profiles
def model_profile(model):
"""
Returns cacheops profile for a model
"""
# Django migrations these fake models, we don't want to cache them
if model.__module__ == '__fake__':
return None
model_profiles = prepare_profiles()
app = model._meta.app_label.lower()
model_name = model._meta.model_name
for guess in ('%s.%s' % (app, model_name), '%s.*' % app, '*.*'):
if guess in model_profiles:
return model_profiles[guess]
else:
return None
| 31.428571 | 100 | 0.641761 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.