hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf2f26c4d1442cc66adbcc63f4cccd985cd95f3 | 3,784 | py | Python | tensorflow_compression/python/util/packed_tensors.py | alexhepburn/compression | 9c27efc59bba2570e766c7c633a6f22dd2478739 | [
"Apache-2.0"
] | 1 | 2020-12-30T04:33:36.000Z | 2020-12-30T04:33:36.000Z | tensorflow_compression/python/util/packed_tensors.py | wwxn/compression | 734ff910119bd24bbfdb08b6f6da906c789a57a1 | [
"Apache-2.0"
] | null | null | null | tensorflow_compression/python/util/packed_tensors.py | wwxn/compression | 734ff910119bd24bbfdb08b6f6da906c789a57a1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Packed tensors in bit sequences."""
import numpy as np
import tensorflow.compat.v1 as tf
__all__ = [
"PackedTensors",
]
class PackedTensors(object):
"""Packed representation of compressed tensors.
This class can pack and unpack several tensor values into a single string. It
can also optionally store a model identifier.
The tensors currently must be rank 1 (vectors) and either have integer or
string type.
"""
def __init__(self, string=None):
self._example = tf.train.Example()
if string:
self.string = string
@property
def model(self):
"""A model identifier."""
buf = self._example.features.feature["MD"].bytes_list.value[0]
return buf.decode("ascii")
@model.setter
def model(self, value):
self._example.features.feature["MD"].bytes_list.value[:] = [
value.encode("ascii")]
@model.deleter
def model(self):
del self._example.features.feature["MD"]
@property
def string(self):
"""The string representation of this object."""
return self._example.SerializeToString()
@string.setter
def string(self, value):
self._example.ParseFromString(value)
def pack(self, tensors, arrays):
"""Packs `Tensor` values into this object."""
if len(tensors) != len(arrays):
raise ValueError("`tensors` and `arrays` must have same length.")
i = 1
for tensor, array in zip(tensors, arrays):
feature = self._example.features.feature[chr(i)]
feature.Clear()
if array.ndim != 1:
raise RuntimeError("Unexpected tensor rank: {}.".format(array.ndim))
if tensor.dtype.is_integer:
feature.int64_list.value[:] = array
elif tensor.dtype == tf.string:
feature.bytes_list.value[:] = array
else:
raise RuntimeError(
"Unexpected tensor dtype: '{}'.".format(tensor.dtype))
i += 1
# Delete any remaining, previously set arrays.
while chr(i) in self._example.features.feature:
del self._example.features.feature[chr(i)]
i += 1
def unpack(self, tensors):
"""Unpacks `Tensor` values from this object."""
# Check tensor dtype first for a more informative error message.
for x in tensors:
if not x.dtype.is_integer and x.dtype != tf.string:
raise RuntimeError("Unexpected tensor dtype: '{}'.".format(x.dtype))
# Extact numpy dtypes and call type-based API.
np_dtypes = [x.dtype.as_numpy_dtype for x in tensors]
return self.unpack_from_np_dtypes(np_dtypes)
def unpack_from_np_dtypes(self, np_dtypes):
"""Unpacks values from this object based on numpy dtypes."""
arrays = []
for i, np_dtype in enumerate(np_dtypes):
feature = self._example.features.feature[chr(i + 1)]
if np.issubdtype(np_dtype, np.integer):
arrays.append(np.array(feature.int64_list.value, dtype=np_dtype))
elif np_dtype == np.dtype(object) or np.issubdtype(np_dtype, np.bytes_):
arrays.append(np.array(feature.bytes_list.value, dtype=np_dtype))
else:
raise RuntimeError("Unexpected numpy dtype: '{}'.".format(np_dtype))
return arrays
| 34.09009 | 80 | 0.67204 |
acf2f2cf95e449ddd595bc93851509542e3e07c1 | 8,207 | py | Python | training/hydration_fe.py | schmolly/timemachine | 7d13a0406dc2d09ac67892988641ba4965bfb206 | [
"Apache-2.0"
] | null | null | null | training/hydration_fe.py | schmolly/timemachine | 7d13a0406dc2d09ac67892988641ba4965bfb206 | [
"Apache-2.0"
] | null | null | null | training/hydration_fe.py | schmolly/timemachine | 7d13a0406dc2d09ac67892988641ba4965bfb206 | [
"Apache-2.0"
] | null | null | null | import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from jax.config import config as jax_config
jax_config.update("jax_enable_x64", True)
import argparse
import time
import datetime
import numpy as np
import os
import sys
from ff import handlers
from ff.handlers.serialize import serialize_handlers
from ff.handlers.deserialize import deserialize_handlers
from rdkit import Chem
import configparser
import grpc
from training import dataset
from training import hydration_model, hydration_setup
from training import simulation
from training import service_pb2_grpc
from timemachine.lib import LangevinIntegrator
from training import water_box
# used during visualization to bring everything back to home box
def recenter(conf, box):
new_coords = []
periodicBoxSize = box
for atom in conf:
diff = np.array([0., 0., 0.])
diff += periodicBoxSize[2]*np.floor(atom[2]/periodicBoxSize[2][2]);
diff += periodicBoxSize[1]*np.floor((atom[1]-diff[1])/periodicBoxSize[1][1]);
diff += periodicBoxSize[0]*np.floor((atom[0]-diff[0])/periodicBoxSize[0][0]);
new_coords.append(atom - diff)
return np.array(new_coords)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Absolute Hydration Free Energy Script')
parser.add_argument('--config_file', type=str, required=True, help='Location of config file.')
args = parser.parse_args()
config = configparser.ConfigParser()
config.read(args.config_file)
print("Config Settings:")
config.write(sys.stdout)
general_cfg = config['general']
# basic gist of workflow:
# 1. configure learning rates for the optimizer
# 2. load freesolv dataset from SDF file
# 3. split dataset into train/test
# 4. connect to workers
# 5. deserialize off smirnoff parameters
# 6. prepare water box
# 7. for each epoch, first run on test set then shuffled training set
# 8. save parameters after each molecule
# set up learning rates
learning_rates = {}
for k, v in config['learning_rates'].items():
vals = [float(x) for x in v.split(',')]
if k == 'am1ccc':
learning_rates[handlers.AM1CCCHandler] = np.array(vals)
elif k == 'lj':
learning_rates[handlers.LennardJonesHandler] = np.array(vals)
intg_cfg = config['integrator']
suppl = Chem.SDMolSupplier(general_cfg['ligand_sdf'], removeHs=False)
data = []
for guest_idx, mol in enumerate(suppl):
label_dG = -4.184*float(mol.GetProp(general_cfg['dG'])) # in kcal/mol
label_err = 4.184*float(mol.GetProp(general_cfg['dG_err'])) # errs are positive!
data.append((mol, label_dG, label_err))
full_dataset = dataset.Dataset(data)
train_frac = float(general_cfg['train_frac'])
train_dataset, test_dataset = full_dataset.split(train_frac)
forcefield = general_cfg['forcefield']
stubs = []
worker_address_list = []
for address in config['workers']['hosts'].split(','):
worker_address_list.append(address)
for address in worker_address_list:
print("connecting to", address)
channel = grpc.insecure_channel(address,
options = [
('grpc.max_send_message_length', 500 * 1024 * 1024),
('grpc.max_receive_message_length', 500 * 1024 * 1024)
]
)
stub = service_pb2_grpc.WorkerStub(channel)
stubs.append(stub)
ff_raw = open(forcefield, "r").read()
ff_handlers = deserialize_handlers(ff_raw)
box_width = 3.0
host_system, host_coords, box, _ = water_box.prep_system(box_width)
lambda_schedule = np.array([float(x) for x in general_cfg['lambda_schedule'].split(',')])
num_steps = int(general_cfg['n_steps'])
for epoch in range(100):
print("Starting Epoch", epoch, datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
epoch_dir = os.path.join(general_cfg["out_dir"], "epoch_"+str(epoch))
if not os.path.exists(epoch_dir):
os.makedirs(epoch_dir)
epoch_params = serialize_handlers(ff_handlers)
with open(os.path.join(epoch_dir, "start_epoch_params.py"), 'w') as fh:
fh.write(epoch_params)
all_data = []
test_items = [(x, True) for x in test_dataset.data]
train_dataset.shuffle()
train_items = [(x, False) for x in train_dataset.data]
all_data.extend(test_items)
all_data.extend(train_items)
for idx, ((mol, label_dG, label_err), inference) in enumerate(all_data):
if inference:
prefix = "test"
else:
prefix = "train"
start_time = time.time()
# out_dir = os.path.join(epoch_dir, "mol_"+mol.GetProp("_Name"))\
# if not os.path.exists(out_dir):
# os.makedirs(out_dir)
# safety guard
try:
potentials, masses, vjp_fns = hydration_setup.combine_potentials(
ff_handlers,
mol,
host_system,
precision=np.float32
)
coords = hydration_setup.combine_coordinates(
host_coords,
mol
)
seed = np.random.randint(0, np.iinfo(np.int32).max)
intg = LangevinIntegrator(
float(intg_cfg['temperature']),
float(intg_cfg['dt']),
float(intg_cfg['friction']),
masses,
seed
)
sim = simulation.Simulation(
coords,
np.zeros_like(coords),
box,
potentials,
intg
)
(pred_dG, pred_err), grad_dG, du_dls = hydration_model.simulate(
sim,
num_steps,
lambda_schedule,
stubs
)
plt.plot(lambda_schedule, du_dls)
plt.ylabel("du_dlambda")
plt.xlabel("lambda")
plt.savefig(os.path.join(epoch_dir, "ti_mol_"+mol.GetProp("_Name")))
plt.clf()
loss = np.abs(pred_dG - label_dG)
# (ytz) bootstrap CI on TI is super janky
# error CIs are wrong "95% CI [{:.2f}, {:.2f}, {:.2f}]".format(pred_err.lower_bound, pred_err.value, pred_err.upper_bound),
print(prefix, "mol", mol.GetProp("_Name"), "loss {:.2f}".format(loss), "pred_dG {:.2f}".format(pred_dG), "label_dG {:.2f}".format(label_dG), "label err {:.2f}".format(label_err), "time {:.2f}".format(time.time() - start_time), "smiles:", Chem.MolToSmiles(mol))
# update ff parameters
if not inference:
loss_grad = np.sign(pred_dG - label_dG)
assert len(grad_dG) == len(vjp_fns)
for grad, handle_and_vjp_fns in zip(grad_dG, vjp_fns):
for handle, vjp_fn in handle_and_vjp_fns:
if type(handle) in learning_rates:
bounds = learning_rates[type(handle)]
dL_dp = loss_grad*vjp_fn(grad)[0]
dL_dp = np.clip(dL_dp, -bounds, bounds)
handle.params -= dL_dp
epoch_params = serialize_handlers(ff_handlers)
# write parameters after each traning molecule
with open(os.path.join(epoch_dir, "checkpoint_params_idx_"+str(idx)+"_mol_"+mol.GetProp("_Name")+".py"), 'w') as fh:
fh.write(epoch_params)
except Exception as e:
import traceback
print("Exception in mol", mol.GetProp("_Name"), Chem.MolToSmiles(mol), e)
traceback.print_exc()
# epoch_params = serialize_handlers(ff_handlers)
# with open(os.path.join(epoch_dir, "end_epoch_params.py"), 'w') as fh:
# fh.write(epoch_params) | 33.635246 | 276 | 0.584988 |
acf2f3e2eed8160ac020e628561a2b006e68953c | 20,873 | py | Python | src/simple_pressio_aria_component.py | jtencer/PyNetUQ | ce8eeca81cd4eb0ecb17e957984979a37a676f2d | [
"MIT"
] | null | null | null | src/simple_pressio_aria_component.py | jtencer/PyNetUQ | ce8eeca81cd4eb0ecb17e957984979a37a676f2d | [
"MIT"
] | null | null | null | src/simple_pressio_aria_component.py | jtencer/PyNetUQ | ce8eeca81cd4eb0ecb17e957984979a37a676f2d | [
"MIT"
] | null | null | null | import abstract_component
import os
import subprocess
import numpy as np
import exodus
from output_suppression import Suppressor
from output_suppression import suppress_stdout_stderr
import sync_times
import sys
sys.path.append("/gpfs1/jtencer/UQTk_v3.0.4-install")
import PyUQTk.pce as uqtkpce
import PyUQTk.uqtkarray as uqtkarray
class exogenous_port:
"""
Uncertain parameters set using aprepro
"""
def __init__(self, varname, nominal_value, uncertainty):
self.varname = varname
self.mean = nominal_value
self.std = uncertainty
self.value = nominal_value
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
def __repr__(self):
return "exogenous_port(%s, %s, %s)" % (self.varname, self.mean, self.std)
class endogenous_port:
"""
Connections to other components
"""
def __init__(self, sideset_name, field_name):
self.ssname = sideset_name
self.varname = field_name
def __str__(self):
return str(self.__class__) + ":" + str(self.__dict__)
def __repr__(self):
return "endogenous_port(%s, %s)" % (self.ssname, self.varname)
class simple_pressio_aria_component(abstract_component.Component):
"""
Single component in DDUQ network with foward problem implemented as an aria model
"""
def __init__(self, inputdeckfilename, outputfilename, np=1, output_times=None):
"""
Initialize component object.
Run initial simulation to make sure output exodus file exists.
"""
self.inputdeck = inputdeckfilename
self.outfile = outputfilename
self.pce_file = "%s.pce" % outputfilename.split('.')[:-1][0]
self.endogenous_output_ports = []
self.exogenous_ports = []
self.QoIs = []
self.num_procs = np
self.required_files = []
self.pc_model = None
self.num_endogenous_nodes = 0
self.num_timesteps = 0
self.output_times = output_times
if not os.path.isfile(self.outfile):
print("initializing component: %s" % inputdeckfilename)
aprfile = "%s.apr" % self.inputdeck.split('.')[:-1][0]
with open(os.devnull, 'w') as devnull:
subprocess.check_output(["aprepro", "initialize=1", self.inputdeck, aprfile], stderr=devnull)
subprocess.check_output(["sierra", "--pre", "-n", str(self.num_procs), "aria", "-i", aprfile])
subprocess.check_output(["sierra", "--run", "-n", str(self.num_procs), "aria", "-i", aprfile])
subprocess.check_output(["rm", aprfile])
self.required_files.append(inputdeckfilename)
self.required_files.append(outputfilename)
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
def __call__(self):
return self.execute()
def execute(self):
"""
Perform forward problem
"""
aprfile = "%s.apr" % self.inputdeck.split('.')[:-1][0]
apr_command=["aprepro"]
for port in self.exogenous_ports:
apr_command.append("%s=%f" % (port.varname, port.value))
apr_command.append(self.inputdeck)
apr_command.append(aprfile)
with open(os.devnull, 'w') as devnull:
subprocess.check_output(apr_command, stderr=devnull)
if self.num_procs > 1:
subprocess.check_output(["sierra", "--pre", "-n", str(self.num_procs), "aria", "-i", aprfile])
with open(os.devnull, 'w') as devnull:
p = subprocess.Popen(["sierra", "--run", "-n", str(self.num_procs), "aria", "-i", aprfile], stdout=devnull)
return p
def add_endogenous_port(self, ssname, varname):
"""
Add an endogenous port between two aria_component instances
Port is specified on the *sending* component
"""
with Suppressor():
e = exodus.exodus(self.outfile, mode='r')
ssnames = e.get_side_set_names()
e.close()
assert ssname in ssnames, "%s not a sideset in %s." % (ssname, self.outfile)
my_port = endogenous_port(ssname,varname)
self.endogenous_output_ports.append(my_port)
def add_exogenous_port(self, varname, nominal_value, uncertainty):
"""
Specify aprepro variable that will be used as an exogenous input
"""
self.exogenous_ports.append(exogenous_port(varname, nominal_value, uncertainty))
def add_QoI(self, varname):
"""
Specify global variables from exodus output to be treated at QoIs
"""
with Suppressor():
e = exodus.exodus(self.outfile, mode='r')
gnames = e.get_global_variable_names()
e.close()
assert varname in gnames, "%s not a global variable in %s." % (varname, self.outfile)
self.QoIs.append(varname)
def get_num_endogenous_ports(self):
nodes = self.get_num_endogenous_nodes()
steps = self.get_num_timesteps()
return nodes * steps
def get_num_endogenous_nodes(self):
if (self.num_endogenous_nodes == 0) and (len(self.endogenous_output_ports) > 0):
self.get_endogenous_data()
return self.num_endogenous_nodes
def get_num_timesteps(self):
if self.num_timesteps == 0:
self.num_timesteps = len(self.get_solution_times())
return self.num_timesteps
def set_solution_times(self, times):
self.output_times = times
self.num_timesteps = len(times)
def get_solution_times(self):
if self.output_times:
print("using self.output_times")
times = self.output_times
else:
print("no output times found, reading from exodus")
with Suppressor():
e = exodus.exodus(self.outfile, mode='r')
times = e.get_times()
e.close()
return times
def get_num_exogenous_ports(self):
return len(self.exogenous_ports)
def get_output_filename(self):
return self.outfile
def get_exogenous_ports(self):
return self.exogenous_ports
def get_num_QoI(self):
return len(self.QoIs)
def get_QoIs(self):
return self.QoIs
def get_num_pc_terms(self):
return self.pc_model.GetNumberPCTerms()
def get_pc_model(self):
return self.pc_model
def get_num_pc_coeffs(self):
return self.get_num_pc_terms()*self.get_num_endogenous_ports()
def get_endogenous_data(self):
"""
Retreive my output data at endogenous nodes
"""
if self.output_times:
sync_times.interpolate_to_timeline(self.outfile, self.outfile+".tmp", self.output_times)
os.rename(self.outfile+".tmp", self.outfile)
with Suppressor():
e = exodus.exodus(self.outfile, mode='r')
ss_ids = e.get_side_set_ids()
ss_names = e.get_side_set_names()
dictionary = dict(zip(ss_names, ss_ids))
# Get list of time steps for which to provide data
times = e.get_times()
self.num_timesteps = len(times)
vals=[]
for timestep in range(self.num_timesteps):
self.num_endogenous_nodes = 0
for port in self.endogenous_output_ports:
endogenous_vals = {}
ssid = e.get_side_set_node_list(dictionary[port.ssname])[1]
nodal_values = e.get_node_variable_values(port.varname,timestep+1)
side_set_unique_node_ids = set(ssid)
for nid in side_set_unique_node_ids:
endogenous_vals[nid] = nodal_values[nid-1]
vals.append(endogenous_vals)
self.num_endogenous_nodes += len(endogenous_vals)
with Suppressor():
e.close()
return vals
def get_all_data(self, varname, filename=None):
"""
Retreive my output data at all nodes
"""
if filename==None:
filename = self.outfile
with Suppressor():
e = exodus.exodus(filename, mode='r')
# Get list of time steps for which to provide data
times = e.get_times()
self.num_timesteps = len(times)
vals=[]
for timestep in range(self.num_timesteps):
nodal_values = e.get_node_variable_values(varname,timestep+1)
vals.append(nodal_values)
e.close()
return vals
def get_QoI_data(self):
with Suppressor():
e = exodus.exodus(self.outfile, mode='r')
QoI_vals = {}
for QoI in self.QoIs:
QoI_vals[QoI] = e.get_global_variable_values(QoI)
with Suppressor():
e.close()
return QoI_vals
def get_required_files(self):
return self.required_files
def add_required_files(self, files):
for f in files:
if f not in self.required_files:
self.required_files.append(f)
def generate_pc_model(self, pce_dim, nord=3, pc_type="HG", pc_alpha=0.0, pc_beta=1.0, quadtype='full'):
"""
Wrapper for uqtk PCSet with default values
"""
param = nord+1 # Parameter for quadrature point generation. Equal to number of quad points per dimension for full quadrature
self.pc_model = uqtkpce.PCSet("NISP", nord,pce_dim,pc_type, pc_alpha,pc_beta)
self.pc_model.SetQuadRule(pc_type, quadtype, param)
def initialize_PCE(self):
if os.path.isfile(self.pce_file):
# Read initial PCE values from exodus file
my_endo_pce_coeffs = np.zeros(( self.get_num_endogenous_ports(), self.get_num_pc_terms() ))
varnames = []
for coeff_idx in range(self.get_num_pc_terms()):
varnames.append('PCE_%d' % coeff_idx)
e = exodus.exodus(self.pce_file, mode='r')
ss_ids = e.get_side_set_ids()
ss_names = e.get_side_set_names()
dictionary = dict(zip(ss_names, ss_ids))
# Get list of nodes for which to provide data
#TODO: This likely broken from port change
all_side_set_node_ids = []
for port in self.endogenous_output_ports:
side_set_node_ids = e.get_side_set_node_list(dictionary[port.ssname])[1]
all_side_set_node_ids.append(side_set_node_ids)
endo_map = self.get_endogenous_data()
for timestep, node_map in enumerate(endo_map):
print("timestep: %d" % timestep)
for coeff_idx in range(self.get_num_pc_terms()):
varname = varnames[coeff_idx]
nodal_values = e.get_node_variable_values(varname,1)
for ssid in all_side_set_node_ids:
side_set_unique_node_ids = set(ssid)
for nid in side_set_unique_node_ids:
index = timestep*self.num_endogenous_nodes + node_map.keys().index(nid)
my_endo_pce_coeffs[index,coeff_idx] = nodal_values[nid-1]
e.close()
else:
endo_init = self.get_endogenous_data()
my_endo_pce_coeffs = np.zeros(( self.get_num_endogenous_ports(), self.get_num_pc_terms() ))
index = 0
for timestep in range(self.num_timesteps):
for portid,port in enumerate(self.endogenous_output_ports):
nodal_data = endo_init[timestep*len(self.endogenous_output_ports) + portid]
for nid in nodal_data:
my_endo_pce_coeffs[index,0] = nodal_data[nid]
index += 1
return my_endo_pce_coeffs
def GalerkinProjection(self,f_evaluations):
"""
Obtain PC coefficients by Galerkin Projection
Input:
f_evaluations: 1D numpy array (vector) with function to be projected,
evaluated at the quadrature points
Output:
Numpy array with PC coefficients
"""
# Get parameters
if len(f_evaluations.shape) > 1:
print("This function can only project single variables for now")
exit(1)
npce = self.pc_model.GetNumberPCTerms()
nqp = f_evaluations.shape[0] # Number of quadrature points
# UQTk array for PC coefficients for one variable
c_k_1d_uqtk = uqtkarray.dblArray1D(npce,0.0)
# UQTk array for function evaluations at quadrature points for that variable
f_uqtk = uqtkarray.dblArray1D(nqp,0.0)
for ipt in range(nqp):
f_uqtk[ipt]=f_evaluations[ipt]
# Galerkin Projection
self.pc_model.GalerkProjection(f_uqtk,c_k_1d_uqtk)
# Put PC coefficients in numpy array
c_k = np.zeros(npce)
for ip in range(npce):
c_k[ip] = c_k_1d_uqtk[ip]
# Return numpy array of PC coefficients
return c_k
def evaluate_pce(self, pc_coeffs,germ_samples):
"""
Evaluate PCE at a set of samples of the germ of this PCE
Input:
pc_coeffs: 1D numpy array with PC coefficients of the RVs to be evaluated.
Each column corresponds to one RV.
germ_samples: numpy array with samples of the PCE germ at which the RVs
are to be evaluated. Each line is one sample. The number
of columns is the number of RVs.
Output:
Numpy array with PCE evaluations
"""
# Get data set dimensions etc.
n_test_samples = germ_samples.shape[0]
ndim = germ_samples.shape[1]
npce = self.pc_model.GetNumberPCTerms()
# Put PC germ samples in a UQTk array
std_samples_uqtk = uqtkarray.dblArray2D(n_test_samples, ndim)
std_samples_uqtk.setnpdblArray(np.asfortranarray(germ_samples))
# Numpy array to store all RVs evaluated from sampled PCEs
rvs_sampled = np.zeros(n_test_samples)
# Evaluate PCE for RVs in each dimension
# Create and fill UQTk array for PC coefficients
c_k_1d_uqtk = uqtkarray.dblArray1D(npce,0.0)
for ip in range(npce):
c_k_1d_uqtk[ip] = pc_coeffs[ip]
# Create UQTk array to store outputs in
rv_from_pce_uqtk = uqtkarray.dblArray1D(n_test_samples,0.0)
# Evaluate the PCEs for each input RV at those random samples
self.pc_model.EvalPCAtCustPoints(rv_from_pce_uqtk,std_samples_uqtk,c_k_1d_uqtk)
# Put evaluated samples in numpy array
for isamp in range(n_test_samples):
rvs_sampled[isamp] = rv_from_pce_uqtk[isamp]
# Return numpy array of PCE evaluations
return rvs_sampled
def save_nodal_pce(self, pc_coeffs, meshfilename, outputfilename):
if os.path.isfile(outputfilename): os.remove(outputfilename)
print("Save nodal PCE %s" % outputfilename)
times = self.get_solution_times()
varnames = []
for coeff_idx in range(pc_coeffs.shape[1]):
varnames.append('PCE_%d' % coeff_idx)
e = exodus.copy_mesh(meshfilename, outputfilename)
e.close()
e = exodus.exodus(outputfilename, mode='a')
exodus.add_variables(e, nodal_vars=varnames)
numnodes = pc_coeffs.shape[0]/self.num_timesteps
for timestep in range(self.num_timesteps):
for coeff_idx in range(pc_coeffs.shape[1]):
varname = varnames[coeff_idx]
nodal_values = e.get_node_variable_values(varname,1)
for nidx in range(numnodes):
index = timestep*numnodes + nidx
nodal_values[nidx] = pc_coeffs[index,coeff_idx]
e.put_node_variable_values(varname,timestep+1,nodal_values)
e.put_time(timestep+1,times[timestep])
e.close()
def set_all_endogenous_values(self, pc_coeffs, germ):
"""
Sample polynomial chaos expansion for endogenous values at germ
Assign those values to the nodes on the exodus mesh
"""
endo_map = self.get_endogenous_data()
with Suppressor():
e = exodus.exodus(self.outfile, mode='a')
ss_ids = e.get_side_set_ids()
ss_names = e.get_side_set_names()
dictionary = dict(zip(ss_names, ss_ids))
index = 0
for timestep in range(self.num_timesteps):
for portid,port in enumerate(self.endogenous_output_ports):
node_map = endo_map[timestep*len(self.endogenous_output_ports) + portid]
nodal_values = e.get_node_variable_values(port.varname,timestep+1)
ssid = e.get_side_set_node_list(dictionary[port.ssname])[1]
side_set_unique_node_ids = set(ssid)
for nid in side_set_unique_node_ids:
idx = index + node_map.keys().index(nid)
endo_val = self.evaluate_pce(pc_coeffs[idx,...],germ)
nodal_values[nid-1] = endo_val
index += len(side_set_unique_node_ids)
e.put_node_variable_values(port.varname,timestep+1,nodal_values)
with Suppressor():
e.close()
def set_port_endogenous_values(self, pc_coeffs, germ, portid):
"""
Sample polynomial chaos expansion for endogenous values at germ
Assign those values to the nodes on the exodus mesh
"""
endo_map = self.get_endogenous_data()
with Suppressor():
e = exodus.exodus(self.outfile, mode='a')
ss_ids = e.get_side_set_ids()
ss_names = e.get_side_set_names()
dictionary = dict(zip(ss_names, ss_ids))
# Get list of nodes
ssname = self.endogenous_output_ports[portid].ssname
varname = self.endogenous_output_ports[portid].varname
side_set_node_ids = e.get_side_set_node_list(dictionary[ssname])[1]
for timestep, node_map in enumerate(endo_map):
nodal_values = e.get_node_variable_values(varname,timestep+1)
ssid = e.get_side_set_node_list(dictionary[ssname])[1]
side_set_unique_node_ids = set(ssid)
for nid in side_set_unique_node_ids:
index = timestep*self.num_endogenous_nodes + node_map.keys().index(nid)
endo_val = self.evaluate_pce(pc_coeffs[index,...],germ)
nodal_values[nid-1] = endo_val
e.put_node_variable_values(varname,timestep+1,nodal_values)
with Suppressor():
e.close()
def set_port_endogenous_values_experimental(self, pc_coeffs, germ, portid):
"""
Sample polynomial chaos expansion for endogenous values at germ
Assign those values to the nodes on the exodus mesh
"""
endo_map = self.get_endogenous_data()
with Suppressor():
e = exodus.exodus(self.outfile, mode='a')
ss_ids = e.get_side_set_ids()
ss_names = e.get_side_set_names()
dictionary = dict(zip(ss_names, ss_ids))
# Get list of nodes
ssname = self.endogenous_output_ports[portid].ssname
side_set_node_ids = e.get_side_set_node_list(dictionary[ssname])[1]
for timestep, node_map in enumerate(endo_map):
print(e.get_side_set_variable_names())
#nodal_values = e.get_node_variable_values(self.output_variable,timestep+1)
nodal_values = e.get_side_set_variable_values(dictionary[ssname], self.output_variable,timestep+1)
side_set_unique_node_ids = set(side_set_node_ids)
for nindix, nid in enumerate(side_set_unique_node_ids):
index = timestep*self.num_endogenous_nodes + node_map.keys().index(nid)
endo_val = self.evaluate_pce(pc_coeffs[index,...],germ)
nodal_values[nindex] = endo_val
#e.put_node_variable_values(self.output_variable,timestep+1,nodal_values)
e.put_side_set_variable_values(dictionary[ssname], self.output_variable, timestep+1, nodal_values)
with Suppressor():
e.close()
def get_quadpts(self,ndim):
"""
Generates quadrature points
Input:
pc_model: PC object with info about PCE
ndim: number of dimensions of the PCE
Output:
qdpts: numpy array of quadrature points
"""
# Get the quadrature points
qdpts_uqtk = uqtkarray.dblArray2D()
self.pc_model.GetQuadPoints(qdpts_uqtk)
totquat = self.pc_model.GetNQuadPoints() # Total number of quadrature points
# Convert quad points to a numpy array
qdpts = np.zeros((totquat,ndim))
qdpts_uqtk.getnpdblArray(qdpts)
return qdpts, totquat
| 37.541367 | 132 | 0.620802 |
acf2f4249906de1c5ce433cc26a43892f9b2eeb4 | 4,676 | py | Python | src/kobo/main.py | rchiechi/FeedstoKobo | 003a08cf253dabf6f0cc795e5109863d9c4830a1 | [
"MIT"
] | 1 | 2021-04-20T07:28:15.000Z | 2021-04-20T07:28:15.000Z | src/kobo/main.py | rchiechi/FeedstoKobo | 003a08cf253dabf6f0cc795e5109863d9c4830a1 | [
"MIT"
] | null | null | null | src/kobo/main.py | rchiechi/FeedstoKobo | 003a08cf253dabf6f0cc795e5109863d9c4830a1 | [
"MIT"
] | null | null | null | '''
Crawl RSS feeds and either send them to Pocket or render simple
html views, create PDFs of those views and upload them to Dropbox.
The PDFs are formatted to read on a Kobo e-reader.
'''
import sys
import os
import time
import logging
import logging.config
from .options import parseopts
from .dosubstack import DoSubstack
from .cache import Cache
from .pocket import DoPocket
from .dropbox import DoDropbox
from .util import cleancache, configtodict
try:
import colorama as cm
except ImportError as msg:
print("Error loading pacakge %s" , str(msg))
sys.exit()
opts, config = parseopts()
if os.isatty(sys.stdin.fileno()):
# Debug mode.
CRONMODE=False
else:
# Cron mode.
CRONMODE=True
if CRONMODE:
# CRONMODE is going to write to a log file, so no color
opts.nocolor = True
if not opts.nocolor:
cm.init(autoreset=True)
# Set up terminal logging. Set LOG to a file for cronmode, otherwise
# colorful terminal output.
logger = logging.getLogger(__package__)
logger.setLevel(getattr(logging, opts.logging.upper()))
if CRONMODE:
loghandler = logging.FileHandler(os.path.join(opts.logdir,
os.path.basename(sys.argv[0])
.split('.')[0]+'.txt'))
else:
loghandler = logging.StreamHandler()
if opts.nocolor:
loghandler.setFormatter(logging
.Formatter('%(asctime)s %(process)d %(levelname)s %(message)s'))
else:
loghandler.setFormatter(logging
.Formatter(cm.Fore.CYAN+'%(levelname)s '
+cm.Fore.YELLOW+'%(message)s'
+cm.Style.RESET_ALL))
logger.addHandler(loghandler)
############################################################
# Now we can use the logger #
############################################################
if config is None:
logger.error("No config file found.")
logger.info("Default file created at %s", opts.configdir)
sys.exit()
cache = Cache(opts)
if opts.clean:
cleancache(cache, config)
if opts.reset:
if not opts.cacheonly:
logger.warning('Reseting cache without --cacheonly is dangerous.')
time.sleep(5)
cache.reset('links')
if opts.dedupe:
cache.dedupe()
logger.info("%s links in cache", len(cache.get('links')))
try:
# For pdfkit?
os.environ['XDG_RUNTIME_DIR'] = os.path.join('/tmp',
'runtime-%s' % os.environ['USER'])
except KeyError as msg:
logger.warning("Couldn't set XDG_RUNTIME_DIR. %s" , str(msg))
# Our three main classes for pocket, dropbox and substack
pocket = DoPocket(cache, opts, config)
dropbox = DoDropbox(opts, config)
substack = DoSubstack(opts, config, cache)
def pocketloop():
'''Crawl an rss feed and cache the links to pocket.'''
logger.info("Starting RSS run.")
p_cached = pocket.rsstopocket(list(config['RSS FEEDS']))
logger.info("Cached %s urls to pocket.", p_cached.count(True))
if True in p_cached or opts.cacheonly:
cache.save()
def substackloop():
'''Substacks rendered to html on Morty
send to Pocket rendered to PDFs
and uploaded to Dropbox'''
logger.info("Starting Substack run.")
pdfopts = configtodict(config['PDFOPTIONS'],
DoDropbox.PDFOPTIONS)
ss_cached = []
for _ss in config['SUBSTACKS']:
_f = {'domain': config['SUBSTACKS'][_ss],
'fontsize': pdfopts['minimum-font-size'],
'login': config['USEROPTS']['SSLOGIN'],
'password': config['USEROPTS']['SSPASS'],
'subdir': _ss
}
if _ss in config.sections():
for _key in ('fontsize', 'login', 'password'):
if _key in config[_ss]:
_f[_key] = config[_ss][_key]
pdfs = substack.parse_ss_entry(_f)
for _uri, _pdf_uri, _title in pdfs:
if _pdf_uri is not None:
pocket.savetopocket(_f['domain'], _uri, _title)
if not opts.cacheonly and _pdf_uri is not None:
logger.debug("Attempting to upload %s from %s to dropbox."
, _pdf_uri, _f['domain'])
ss_cached.append(dropbox.pdftodropbox(_pdf_uri,
pdfopts, _f['fontsize']))
logger.info("Cached %s substacks to Dropbox.", ss_cached.count(True))
if True in ss_cached or opts.cacheonly:
cache.save()
if False in ss_cached:
logger.warning("There were errors uploading PDFs to dropbox.")
substack.cleanup()
if opts.prunedropbox:
dropbox.prunedropbox(opts.prunedropbox)
dropbox.cleanup()
logger.info("#### Done ####")
| 32.248276 | 80 | 0.610992 |
acf2f42d15c245057f9b688583714ad8ff0ce928 | 186 | py | Python | ITP449_HW01_XU_YANYU/ITP449_HW01_Q1_Xu_Yanyu.py | chloexu310/ITP | 7e5b842f14249ac80333b8c1ad1ecd002d72bd3f | [
"MIT"
] | null | null | null | ITP449_HW01_XU_YANYU/ITP449_HW01_Q1_Xu_Yanyu.py | chloexu310/ITP | 7e5b842f14249ac80333b8c1ad1ecd002d72bd3f | [
"MIT"
] | null | null | null | ITP449_HW01_XU_YANYU/ITP449_HW01_Q1_Xu_Yanyu.py | chloexu310/ITP | 7e5b842f14249ac80333b8c1ad1ecd002d72bd3f | [
"MIT"
] | null | null | null | #Yanyu Xu
#ITP_449, Spring 2020
#HW01
#Question 1
def main():
userInput = input("Enter your password:")
print("Your password is", len(userInput), "characters long")
main() | 12.4 | 64 | 0.666667 |
acf2f609e786cc1436d7902aae5c0e8de8233630 | 4,650 | py | Python | frappe/integrations/doctype/webhook/test_webhook.py | maheshghadage/frappe-praman | 276df54479ec0bd9a665924ef94120864fa0931b | [
"MIT"
] | 1 | 2020-11-13T23:19:25.000Z | 2020-11-13T23:19:25.000Z | frappe/integrations/doctype/webhook/test_webhook.py | maheshghadage/frappe-praman | 276df54479ec0bd9a665924ef94120864fa0931b | [
"MIT"
] | 5 | 2021-04-28T06:55:26.000Z | 2022-02-10T07:59:06.000Z | frappe/integrations/doctype/webhook/test_webhook.py | maheshghadage/frappe-praman | 276df54479ec0bd9a665924ef94120864fa0931b | [
"MIT"
] | 2 | 2021-05-06T06:14:40.000Z | 2021-05-06T10:05:29.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.integrations.doctype.webhook.webhook import get_webhook_headers, get_webhook_data
class TestWebhook(unittest.TestCase):
@classmethod
def setUpClass(cls):
# delete any existing webhooks
frappe.db.sql("DELETE FROM tabWebhook")
# create test webhooks
cls.create_sample_webhooks()
@classmethod
def create_sample_webhooks(cls):
samples_webhooks_data = [
{
"webhook_doctype": "User",
"webhook_docevent": "after_insert",
"request_url": "https://httpbin.org/post",
"condition": "doc.email",
"enabled": True
},
{
"webhook_doctype": "User",
"webhook_docevent": "after_insert",
"request_url": "https://httpbin.org/post",
"condition": "doc.first_name",
"enabled": False
}
]
cls.sample_webhooks = []
for wh_fields in samples_webhooks_data:
wh = frappe.new_doc("Webhook")
wh.update(wh_fields)
wh.insert()
cls.sample_webhooks.append(wh)
@classmethod
def tearDownClass(cls):
# delete any existing webhooks
frappe.db.sql("DELETE FROM tabWebhook")
def setUp(self):
# retrieve or create a User webhook for `after_insert`
webhook_fields = {
"webhook_doctype": "User",
"webhook_docevent": "after_insert",
"request_url": "https://httpbin.org/post"
}
if frappe.db.exists("Webhook", webhook_fields):
self.webhook = frappe.get_doc("Webhook", webhook_fields)
else:
self.webhook = frappe.new_doc("Webhook")
self.webhook.update(webhook_fields)
# create a User document
self.user = frappe.new_doc("User")
self.user.first_name = frappe.mock("name")
self.user.email = frappe.mock("email")
self.user.save()
# Create another test user specific to this test
self.test_user = frappe.new_doc("User")
self.test_user.email = "user1@integration.webhooks.test.com"
self.test_user.first_name = "user1"
def tearDown(self) -> None:
self.user.delete()
self.test_user.delete()
super().tearDown()
def test_webhook_trigger_with_enabled_webhooks(self):
"""Test webhook trigger for enabled webhooks"""
frappe.cache().delete_value('webhooks')
frappe.flags.webhooks = None
# Insert the user to db
self.test_user.insert()
self.assertTrue("User" in frappe.flags.webhooks)
# only 1 hook (enabled) must be queued
self.assertEqual(
len(frappe.flags.webhooks.get("User")),
1
)
self.assertTrue(self.test_user.email in frappe.flags.webhooks_executed)
self.assertEqual(
frappe.flags.webhooks_executed.get(self.test_user.email)[0],
self.sample_webhooks[0].name
)
def test_validate_doc_events(self):
"Test creating a submit-related webhook for a non-submittable DocType"
self.webhook.webhook_docevent = "on_submit"
self.assertRaises(frappe.ValidationError, self.webhook.save)
def test_validate_request_url(self):
"Test validation for the webhook request URL"
self.webhook.request_url = "httpbin.org?post"
self.assertRaises(frappe.ValidationError, self.webhook.save)
def test_validate_headers(self):
"Test validation for request headers"
# test incomplete headers
self.webhook.set("webhook_headers", [{
"key": "Content-Type"
}])
self.webhook.save()
headers = get_webhook_headers(doc=None, webhook=self.webhook)
self.assertEqual(headers, {})
# test complete headers
self.webhook.set("webhook_headers", [{
"key": "Content-Type",
"value": "application/json"
}])
self.webhook.save()
headers = get_webhook_headers(doc=None, webhook=self.webhook)
self.assertEqual(headers, {"Content-Type": "application/json"})
def test_validate_request_body_form(self):
"Test validation of Form URL-Encoded request body"
self.webhook.request_structure = "Form URL-Encoded"
self.webhook.set("webhook_data", [{
"fieldname": "name",
"key": "name"
}])
self.webhook.webhook_json = """{
"name": "{{ doc.name }}"
}"""
self.webhook.save()
self.assertEqual(self.webhook.webhook_json, None)
data = get_webhook_data(doc=self.user, webhook=self.webhook)
self.assertEqual(data, {"name": self.user.name})
def test_validate_request_body_json(self):
"Test validation of JSON request body"
self.webhook.request_structure = "JSON"
self.webhook.set("webhook_data", [{
"fieldname": "name",
"key": "name"
}])
self.webhook.webhook_json = """{
"name": "{{ doc.name }}"
}"""
self.webhook.save()
self.assertEqual(self.webhook.webhook_data, [])
data = get_webhook_data(doc=self.user, webhook=self.webhook)
self.assertEqual(data, {"name": self.user.name})
| 27.844311 | 93 | 0.714409 |
acf2f63aa44f1123e83a5f43f07a002e059a4117 | 3,955 | py | Python | lib/interface.py | yuri20198/webcam-pulse-detector | 0a12d18ccd7a9948f9b75848869a14dbe0b9f9c0 | [
"Apache-2.0"
] | 1 | 2022-01-06T04:47:43.000Z | 2022-01-06T04:47:43.000Z | lib/interface.py | LivingInABubble/webcam-pulse-detector | 0a12d18ccd7a9948f9b75848869a14dbe0b9f9c0 | [
"Apache-2.0"
] | null | null | null | lib/interface.py | LivingInABubble/webcam-pulse-detector | 0a12d18ccd7a9948f9b75848869a14dbe0b9f9c0 | [
"Apache-2.0"
] | null | null | null | import cv2
import numpy as np
"""
Wraps up some interfaces to opencv user interface methods (displaying
image frames, event handling, etc).
If desired, an alternative UI could be built and imported into get_pulse.py instead.
Opencv is used to perform much of the data analysis, but there is no reason it has to be used to handle the UI as well.
It just happens to be very effective for our purposes.
The rest of this file defines some GUI plotting functionality. There are plenty
of other ways to do simple x-y data plots in python, but this application uses
cv2.imshow to do real-time data plotting and handle user interaction.
This is entirely independent of the data calculation functions, so it can be
replaced in the get_pulse.py application easily.
"""
def combine(left, right):
"""Stack images horizontally.
"""
h = max(left.shape[0], right.shape[0])
w = left.shape[1] + right.shape[1]
# hoff = left.shape[0]
shape = list(left.shape)
shape[0] = h
shape[1] = w
comb = np.zeros(tuple(shape), left.dtype)
# left will be on left, aligned top, with right on right
comb[:left.shape[0], :left.shape[1]] = left
comb[:right.shape[0], left.shape[1]:] = right
return comb
def plotXY(data, size=(280, 640), margin=25, name="data", labels=None, skip=None,
showmax=None, bg=None, label_ndigits=None, showmax_digits=None):
if showmax_digits is None:
showmax_digits = []
if showmax is None:
showmax = []
if skip is None:
skip = []
if labels is None:
labels = []
if label_ndigits is None:
label_ndigits = []
for x, y in data:
if len(x) < 2 or len(y) < 2:
return
n_plots = len(data)
w = float(size[1])
h = size[0] / float(n_plots)
z = np.zeros((size[0], size[1], 3))
if isinstance(bg, np.ndarray):
wd = int(bg.shape[1] / bg.shape[0] * h)
bg = cv2.resize(bg, (wd, int(h)))
if len(bg.shape) == 3:
r = combine(bg[:, :, 0], z[:, :, 0])
g = combine(bg[:, :, 1], z[:, :, 1])
b = combine(bg[:, :, 2], z[:, :, 2])
else:
r = combine(bg, z[:, :, 0])
g = combine(bg, z[:, :, 1])
b = combine(bg, z[:, :, 2])
z = cv2.merge([r, g, b])[:, :-wd, ]
i = 0
P = []
for x, y in data:
x = np.array(x)
y = -np.array(y)
xx = (w - 2 * margin) * (x - x.min()) / (x.max() - x.min()) + margin
yy = (h - 2 * margin) * (y - y.min()) / (y.max() - y.min()) + margin + i * h
mx = max(yy)
if labels:
if labels[i]:
for ii in range(len(x)):
if ii % skip[i] == 0:
col = (255, 255, 255)
ss = '{0:.%sf}' % label_ndigits[i]
ss = ss.format(x[ii])
cv2.putText(z, ss, (int(xx[ii]), int((i + 1) * h)),
cv2.FONT_HERSHEY_PLAIN, 1, col)
if showmax:
if showmax[i]:
col = (0, 255, 0)
ii = np.argmax(-y)
ss = '{0:.%sf} %s' % (showmax_digits[i], showmax[i])
ss = ss.format(x[ii])
# "%0.0f %s" % (x[ii], showmax[i])
cv2.putText(z, ss, (int(xx[ii]), int((yy[ii]))),
cv2.FONT_HERSHEY_PLAIN, 2, col)
try:
pts = np.array([[x_, y_] for x_, y_ in zip(xx, yy)], np.int32)
i += 1
P.append(pts)
except ValueError:
pass # temporary
"""
#Polylines seems to have some trouble rendering multiple polys for some people
for p in P:
cv2.polylines(z, [p], False, (255,255,255),1)
"""
# hack-y alternative:
for p in P:
for i in range(len(p) - 1):
cv2.line(z, tuple(p[i]), tuple(p[i + 1]), (255, 255, 255), 1)
cv2.imshow(name, z)
| 32.958333 | 120 | 0.51378 |
acf2f6aa4dc848b2c7b4241c22e49b56af28e6c4 | 6,357 | py | Python | tests/build_/test_pod_response.py | ssalatsk/osbs-client | 592e5d9bbf22dedbe6cde4f45a678f0de96f8475 | [
"BSD-3-Clause"
] | null | null | null | tests/build_/test_pod_response.py | ssalatsk/osbs-client | 592e5d9bbf22dedbe6cde4f45a678f0de96f8475 | [
"BSD-3-Clause"
] | null | null | null | tests/build_/test_pod_response.py | ssalatsk/osbs-client | 592e5d9bbf22dedbe6cde4f45a678f0de96f8475 | [
"BSD-3-Clause"
] | 1 | 2020-09-23T16:19:33.000Z | 2020-09-23T16:19:33.000Z | """
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals, absolute_import
from copy import deepcopy
import pytest
from osbs.build.pod_response import PodResponse
class TestPodResponse(object):
GENERIC_POD_JSON = {
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {
'name': 'foo',
},
'spec': {
'containers': [
{
'image': 'foo',
'name': 'custom-build',
},
],
},
'status': {},
'phase': 'Succeeded',
}
@pytest.mark.parametrize('expect_image_ids,container_statuses', [
# No containerStatuses
({}, None),
# Empty containerStatuses
({}, []),
# No prefix
({'image': 'no-prefix'},
[{
'image': 'image',
'imageID': 'no-prefix',
}]),
# Normal case
({'image1': 'imageID1', 'image2': 'imageID2'},
[
{
'image': 'image1',
'imageID': 'docker://imageID1',
},
{
'image': 'image2',
'imageID': 'docker://imageID2',
},
]),
# New normal case
({'image3': 'imageID3', 'image4': 'imageID4'},
[
{
'image': 'image3',
'imageID': 'docker-pullable://imageID3',
},
{
'image': 'image4',
'imageID': 'docker-pullable://imageID4',
},
]),
])
def test_container_image_ids(self, expect_image_ids, container_statuses):
pod_json = deepcopy(self.GENERIC_POD_JSON)
if container_statuses is not None:
pod_json['status']['containerStatuses'] = container_statuses
pod_response = PodResponse(pod_json)
image_ids = pod_response.get_container_image_ids()
assert image_ids == expect_image_ids
@pytest.mark.parametrize('expected_reason,pod_status', [
# No container statuses but a pod message
({'reason': 'too cold'},
{
'message': 'too cold',
'reason': 'too hot',
'phase': 'Failed',
'containerStatuses': [],
}),
# No non-zero exit code container statuses but a pod reason
({'reason': 'too hot'},
{
'reason': 'too hot',
'phase': 'Failed',
'containerStatuses': [
{
'state': {
'terminated': {
'exitCode': 0
},
},
},
],
}),
# No container statuses, only pod phase available
({'reason': 'Failed'}, {'phase': 'Failed'}),
# Non-zero exit code with message
(
{
'reason': 'Container cannot run',
'exitCode': 1,
},
{
'message': 'too cold',
'reason': 'too hot',
'phase': 'Failed',
'containerStatuses': [
{
'state': {
'terminated': {
# Should ignore this one
'exitCode': 0,
},
},
},
{
'state': {
'terminated': {
'exitCode': 1,
'message': 'Container cannot run',
'reason': 'ContainerCannotRun',
},
},
},
],
}
),
# Non-zero exit code with reason
(
{
'reason': 'ContainerCannotRun',
'exitCode': 1,
},
{
'message': 'too cold',
'reason': 'too hot',
'phase': 'Failed',
'containerStatuses': [
{
'state': {
'terminated': {
'exitCode': 1,
'reason': 'ContainerCannotRun',
},
},
},
{
'state': {
'terminated': {
# Should ignore this one
'exitCode': 2,
'message': 'on fire',
'reason': 'FanFailure',
},
},
},
],
}
),
# Non-zero exit code, no explanation
(
{
'reason': 'Exit code 1',
'exitCode': 1,
'containerID': 'docker://abcde',
},
{
'message': 'too cold',
'reason': 'too hot',
'phase': 'Failed',
'containerStatuses': [
{
'state': {
# Should ignore this one
'running': {},
},
},
{
'state': {
'terminated': {
'containerID': 'docker://abcde',
'exitCode': 1,
},
},
},
],
},
),
])
def test_failure_reason(self, expected_reason,
pod_status):
pod_json = deepcopy(self.GENERIC_POD_JSON)
pod_json['status'].update(pod_status)
pod_response = PodResponse(pod_json)
fail_reason = pod_response.get_failure_reason()
assert fail_reason == expected_reason
| 29.430556 | 77 | 0.356615 |
acf2f79558f4888b1bd1576fdcb28484b4f76337 | 2,916 | py | Python | src/syft/proto/lib/zksk/secret_pb2.py | dnabanita7/PySyft | ce2510e65f5bad382e88806bcde30fa38c3c76c4 | [
"Apache-2.0"
] | 2 | 2018-07-23T20:34:10.000Z | 2020-08-01T09:09:09.000Z | packages/syft/src/syft/proto/lib/zksk/secret_pb2.py | Metrix1010/PySyft | 6477f64b63dc285059c3766deab3993653cead2e | [
"Apache-2.0"
] | 5 | 2020-09-11T05:47:12.000Z | 2020-10-13T08:36:17.000Z | packages/syft/src/syft/proto/lib/zksk/secret_pb2.py | Metrix1010/PySyft | 6477f64b63dc285059c3766deab3993653cead2e | [
"Apache-2.0"
] | 1 | 2021-08-19T12:23:01.000Z | 2021-08-19T12:23:01.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/lib/zksk/secret.proto
"""Generated protocol buffer code."""
# third party
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="proto/lib/zksk/secret.proto",
package="syft.lib.zksk",
syntax="proto3",
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1bproto/lib/zksk/secret.proto\x12\rsyft.lib.zksk"%\n\x06Secret\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\x62\x06proto3',
)
_SECRET = _descriptor.Descriptor(
name="Secret",
full_name="syft.lib.zksk.Secret",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="syft.lib.zksk.Secret.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="value",
full_name="syft.lib.zksk.Secret.value",
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=46,
serialized_end=83,
)
DESCRIPTOR.message_types_by_name["Secret"] = _SECRET
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Secret = _reflection.GeneratedProtocolMessageType(
"Secret",
(_message.Message,),
{
"DESCRIPTOR": _SECRET,
"__module__": "proto.lib.zksk.secret_pb2"
# @@protoc_insertion_point(class_scope:syft.lib.zksk.Secret)
},
)
_sym_db.RegisterMessage(Secret)
# @@protoc_insertion_point(module_scope)
| 28.871287 | 171 | 0.638203 |
acf2f7b940a7c21a74efb2c14af9dadf739745b1 | 2,599 | py | Python | tests/nhpp_test.py | Kylexi/nhpp | 00607ca62f3e09c7f97ccb87f7888a2fcbb488d3 | [
"MIT"
] | null | null | null | tests/nhpp_test.py | Kylexi/nhpp | 00607ca62f3e09c7f97ccb87f7888a2fcbb488d3 | [
"MIT"
] | null | null | null | tests/nhpp_test.py | Kylexi/nhpp | 00607ca62f3e09c7f97ccb87f7888a2fcbb488d3 | [
"MIT"
] | null | null | null | import nhpp
import math
import numpy as np
import pandas as pd
import pytest
@pytest.mark.parametrize("test_input,expected", [
({0: 1, 2: 1, 1: 0}, ([0, 1, 2], [1, 0, 1])),
({0: 1, 3: 1, 2: 2}, ([0, 2, 3], [1, 2, 1])),
])
def test_sorting(test_input, expected):
assert nhpp.nhpp._get_sorted_pairs(test_input) == expected
@pytest.mark.parametrize("test_input,expected", [
(0, 0),
(1, 5),
(0.5, 2.5),
(3.5, 2.5)
])
def test_piecewise_interp(test_input, expected):
knot_times = [0, 1, 2, 3, 5]
knot_vals = [0, 5, 1, 2, 4]
knots = dict(zip(knot_times, knot_vals))
assert nhpp.nhpp._get_piecewise_val(knots, test_input) == expected
def test_non_dict_error_catch():
knots = [0, 1, 2]
with pytest.raises(TypeError):
nhpp.get_arrivals(knots)
def test_negative_rate_error_catch():
knots = {0: 0, 1: -1, 2: 2, 3: 0}
with pytest.raises(ValueError):
nhpp.get_arrivals(knots)
def test_rate_slopes_error_catch():
knot_times = [0, 1, 2, 3, 4]
knot_vals = [0, 0, 1, 2, 3]
with pytest.raises(ValueError):
nhpp.nhpp._get_rate_slopes(knot_times, knot_vals)
def get_epsilon(knots, bins, func=None, *args, **kwargs):
knots = {0: 1, 1: 0, 2: 2}
bins = 10
data = []
max_knot_val = max(knots.values())
min_knot_dom = min(knots.keys())
max_knot_dom = max(knots.keys())
for i in range(100000):
arrivals = nhpp.get_arrivals(knots)
data.append(np.histogram(arrivals, bins, (min_knot_dom, max_knot_dom))[0])
_, bin_measure = np.histogram(arrivals, bins, (min_knot_dom, max_knot_dom))
df = pd.DataFrame(data)
if func:
check_against = [func(measure, *args, **kwargs) for measure in np.linspace(min_knot_dom, max_knot_dom, bins)]
else:
check_against = [nhpp.nhpp. _get_piecewise_val(knots, measure) for measure in np.linspace(0, 2, bins)]
max_val = max(check_against)
check = max_val*df.sum()/df.sum().max()
check, check_against = np.array(check), np.array(check_against)
return np.sum(check - check_against)
def test_eps_no_func_1():
knots = {0: 1, 1: 0, 2: 2}
bins = 10
assert(get_epsilon(knots, bins) < 1)
def test_eps_with_func_1():
knots = {0: 3, math.pi/2: 9, math.pi: 3, 3*math.pi/2: 0, 2*math.pi: 3}
bins = 10
def test_func(t):
return 3*np.sin(t) + 3
assert(get_epsilon(knots, bins, test_func) < 1)
def test_eps_with_func_2():
knots = {0: 0, 2.5: 8, 5: 0}
bins = 10
def test_func(t):
return t*(5-t)
assert(get_epsilon(knots, bins, test_func) < 1)
def test_non_dominating_piecewise():
knots = {0: 0, 2.5: 6.25, 5: 0}
bins = 10
def test_func(t):
return t*(5-t)
with pytest.raises(ValueError):
nhpp.get_arrivals(knots, test_func)
| 26.252525 | 111 | 0.677568 |
acf2f81f4ea5fefb3c7f6018c3e4222f870de791 | 5,411 | py | Python | app.py | TwilioDevEd/sdk-starter-python | 2ff9a78c3ebadc640c1632f38295237584d0c34b | [
"MIT"
] | 21 | 2016-11-21T17:08:36.000Z | 2022-03-19T20:16:55.000Z | app.py | TwilioDevEd/sdk-starter-python | 2ff9a78c3ebadc640c1632f38295237584d0c34b | [
"MIT"
] | 15 | 2017-03-31T01:20:40.000Z | 2022-01-04T21:35:20.000Z | app.py | TwilioDevEd/sdk-starter-python | 2ff9a78c3ebadc640c1632f38295237584d0c34b | [
"MIT"
] | 32 | 2016-12-22T17:33:21.000Z | 2021-06-03T02:15:58.000Z | import os
from flask import Flask, jsonify, request
from faker import Faker
from twilio.rest import Client
from twilio.jwt.access_token import AccessToken
from twilio.jwt.access_token.grants import (
SyncGrant,
VideoGrant,
ChatGrant
)
from dotenv import load_dotenv
from os.path import join, dirname
from inflection import underscore
# Convert keys to snake_case to conform with the twilio-python api definition contract
def snake_case_keys(somedict):
snake_case_dict = {}
for key, value in somedict.items():
snake_case_dict[underscore(key)] = value
return snake_case_dict
app = Flask(__name__)
fake = Faker()
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/video/')
def video():
return app.send_static_file('video/index.html')
@app.route('/sync/')
def sync():
return app.send_static_file('sync/index.html')
@app.route('/notify/')
def notify():
return app.send_static_file('notify/index.html')
@app.route('/chat/')
def chat():
return app.send_static_file('chat/index.html')
# Basic health check - check environment variables have been configured
# correctly
@app.route('/config')
def config():
return jsonify(
TWILIO_ACCOUNT_SID=os.environ['TWILIO_ACCOUNT_SID'],
TWILIO_NOTIFICATION_SERVICE_SID=os.environ.get('TWILIO_NOTIFICATION_SERVICE_SID', None),
TWILIO_API_KEY=os.environ['TWILIO_API_KEY'],
TWILIO_API_SECRET=bool(os.environ['TWILIO_API_SECRET']),
TWILIO_CHAT_SERVICE_SID=os.environ.get('TWILIO_CHAT_SERVICE_SID', None),
TWILIO_SYNC_SERVICE_SID=os.environ.get('TWILIO_SYNC_SERVICE_SID', 'default'),
)
@app.route('/token', methods=['GET'])
def randomToken():
return generateToken(fake.user_name())
@app.route('/token', methods=['POST'])
def createToken():
# Get the request json or form data
content = request.get_json() or request.form
# get the identity from the request, or make one up
identity = content.get('identity', fake.user_name())
return generateToken(identity)
@app.route('/token/<identity>', methods=['POST', 'GET'])
def token(identity):
return generateToken(identity)
def generateToken(identity):
# get credentials for environment variables
account_sid = os.environ['TWILIO_ACCOUNT_SID']
api_key = os.environ['TWILIO_API_KEY']
api_secret = os.environ['TWILIO_API_SECRET']
sync_service_sid = os.environ.get('TWILIO_SYNC_SERVICE_SID', 'default')
chat_service_sid = os.environ.get('TWILIO_CHAT_SERVICE_SID', None)
# Create access token with credentials
token = AccessToken(account_sid, api_key, api_secret, identity=identity)
# Create a Sync grant and add to token
if sync_service_sid:
sync_grant = SyncGrant(service_sid=sync_service_sid)
token.add_grant(sync_grant)
# Create a Video grant and add to token
video_grant = VideoGrant()
token.add_grant(video_grant)
# Create an Chat grant and add to token
if chat_service_sid:
chat_grant = ChatGrant(service_sid=chat_service_sid)
token.add_grant(chat_grant)
# Return token info as JSON
return jsonify(identity=identity, token=token.to_jwt())
# Notify - create a device binding from a POST HTTP request
@app.route('/register', methods=['POST'])
def register():
# get credentials for environment variables
account_sid = os.environ['TWILIO_ACCOUNT_SID']
api_key = os.environ['TWILIO_API_KEY']
api_secret = os.environ['TWILIO_API_SECRET']
service_sid = os.environ['TWILIO_NOTIFICATION_SERVICE_SID']
# Initialize the Twilio client
client = Client(api_key, api_secret, account_sid)
# Body content
content = request.get_json()
content = snake_case_keys(content)
# Get a reference to the notification service
service = client.notify.services(service_sid)
# Create the binding
binding = service.bindings.create(**content)
print(binding)
# Return success message
return jsonify(message="Binding created!")
# Notify - send a notification from a POST HTTP request
@app.route('/send-notification', methods=['POST'])
def send_notification():
# get credentials for environment variables
account_sid = os.environ['TWILIO_ACCOUNT_SID']
api_key = os.environ['TWILIO_API_KEY']
api_secret = os.environ['TWILIO_API_SECRET']
service_sid = os.environ['TWILIO_NOTIFICATION_SERVICE_SID']
# Initialize the Twilio client
client = Client(api_key, api_secret, account_sid)
service = client.notify.services(service_sid)
# Get the request json or form data
content = request.get_json() if request.get_json() else request.form
content = snake_case_keys(content)
# Create a notification with the given form data
notification = service.notifications.create(**content)
return jsonify(message="Notification created!")
@app.route('/<path:path>')
def static_file(path):
return app.send_static_file(path)
# Ensure that the Sync Default Service is provisioned
def provision_sync_default_service():
client = Client(os.environ['TWILIO_API_KEY'], os.environ['TWILIO_API_SECRET'], os.environ['TWILIO_ACCOUNT_SID'])
client.sync.services('default').fetch()
if __name__ == '__main__':
provision_sync_default_service()
app.run(debug=os.environ['DEBUG'], host='0.0.0.0')
| 31.459302 | 116 | 0.727407 |
acf2f8247449e007636025c0a87653140e27eb07 | 1,190 | py | Python | armulator/armv6/opcodes/thumb_instruction_set/thumb_instruction_set_encoding_32_bit/thumb_load_store_dual_load_store_exclusive_table_branch/strd_immediate_t1.py | matan1008/armulator | 04d24dcec6ab42326018f5e09331e5b4738d6b52 | [
"MIT"
] | 16 | 2018-01-22T14:36:49.000Z | 2021-12-17T15:39:52.000Z | armulator/armv6/opcodes/thumb_instruction_set/thumb_instruction_set_encoding_32_bit/thumb_load_store_dual_load_store_exclusive_table_branch/strd_immediate_t1.py | AhmedMounir/armulator | 04d24dcec6ab42326018f5e09331e5b4738d6b52 | [
"MIT"
] | 3 | 2019-02-19T17:51:47.000Z | 2022-03-31T20:45:21.000Z | armulator/armv6/opcodes/thumb_instruction_set/thumb_instruction_set_encoding_32_bit/thumb_load_store_dual_load_store_exclusive_table_branch/strd_immediate_t1.py | AhmedMounir/armulator | 04d24dcec6ab42326018f5e09331e5b4738d6b52 | [
"MIT"
] | 4 | 2020-06-18T23:51:03.000Z | 2022-02-09T17:43:13.000Z | from armulator.armv6.opcodes.abstract_opcodes.strd_immediate import StrdImmediate
from armulator.armv6.opcodes.opcode import Opcode
from armulator.armv6.bits_ops import zero_extend
class StrdImmediateT1(StrdImmediate, Opcode):
def __init__(self, instruction, add, wback, index, imm32, t, t2, n):
Opcode.__init__(self, instruction)
StrdImmediate.__init__(self, add, wback, index, imm32, t, t2, n)
def is_pc_changing_opcode(self):
return False
@staticmethod
def from_bitarray(instr, processor):
imm8 = instr[24:32]
rt2 = instr[20:24]
rt = instr[16:20]
rn = instr[12:16]
index = instr[7]
add = instr[8]
wback = instr[10]
imm32 = zero_extend(imm8 + "0b00", 32)
if ((wback and (rn.uint == rt.uint or rn.uint == rt2.uint)) or
rn.uint == 15 or
rt.uint in (13, 15) or
rt2.uint in (13, 15)):
print "unpredictable"
else:
return StrdImmediateT1(instr, **{"add": add, "wback": wback, "index": index, "imm32": imm32, "t": rt.uint,
"t2": rt2.uint, "n": rn.uint})
| 37.1875 | 118 | 0.581513 |
acf2f8b37e76ff877812db523abf0b7e84461680 | 3,785 | py | Python | Myblog/blog/migrations/0001_initial.py | Family-TreeSY/MyBlog | 52e74f18d37abc6e937d7cb5c752bc9dfd6ed662 | [
"MIT"
] | 5 | 2018-04-17T10:32:24.000Z | 2019-09-25T14:00:19.000Z | Myblog/blog/migrations/0001_initial.py | Family-TreeSY/MyBlog | 52e74f18d37abc6e937d7cb5c752bc9dfd6ed662 | [
"MIT"
] | 7 | 2020-06-05T18:15:00.000Z | 2022-03-11T23:20:33.000Z | Myblog/blog/migrations/0001_initial.py | Family-TreeSY/MyBlog | 52e74f18d37abc6e937d7cb5c752bc9dfd6ed662 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-04-17 08:27
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='\u540d\u57ce')),
('status', models.PositiveIntegerField(choices=[(1, '\u6b63\u5e38'), (2, '\u5220\u9664')], default=1, verbose_name='\u72b6\u6001')),
('is_nav', models.BooleanField(default=False, verbose_name='\u662f\u5426\u4e3a\u5bfc\u822a')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='\u4f5c\u8005')),
],
options={
'verbose_name': '\u5206\u7c7b',
'verbose_name_plural': '\u5206\u7c7b',
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='\u6807\u9898')),
('desc', models.CharField(blank=True, max_length=255, verbose_name='\u6458\u8981')),
('content', models.TextField(help_text='\u6b63\u6587\u5fc5\u987b\u4e3amarkdown', verbose_name='\u6b63\u6587\u5185\u5bb9')),
('status', models.PositiveIntegerField(choices=[(1, '\u4e0a\u7ebf'), (2, '\u5220\u9664')], default=1, verbose_name='\u72b6\u6001')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('last_update_time', models.DateTimeField(auto_now=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='\u4f5c\u8005')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category', verbose_name='\u5206\u7c7b')),
],
options={
'verbose_name': '\u6587\u7ae0',
'verbose_name_plural': '\u6587\u7ae0',
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='\u540d\u79f0')),
('status', models.PositiveIntegerField(choices=[(1, '\u6b63\u5e38'), (2, '\u5220\u9664')], default=1, verbose_name='\u72b6\u6001')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='\u4f5c\u8005')),
],
options={
'verbose_name': '\u6807\u7b7e',
'verbose_name_plural': '\u6807\u7b7e',
},
),
migrations.AddField(
model_name='post',
name='tag',
field=models.ManyToManyField(to='blog.Tag', verbose_name='\u6807\u7b7e'),
),
]
| 52.569444 | 149 | 0.606605 |
acf2f9493bdd27a9c634aae9b739394d482a5404 | 1,492 | py | Python | setup.py | pbelskiy/quickbuild | d06cbe82e6a5a484603b8bc5d0cce27ae1b831a5 | [
"MIT"
] | 7 | 2021-02-08T21:31:38.000Z | 2022-03-22T00:28:40.000Z | setup.py | pbelskiy/quickbuild | d06cbe82e6a5a484603b8bc5d0cce27ae1b831a5 | [
"MIT"
] | 9 | 2021-02-09T06:43:29.000Z | 2022-03-04T21:18:58.000Z | setup.py | pbelskiy/quickbuild | d06cbe82e6a5a484603b8bc5d0cce27ae1b831a5 | [
"MIT"
] | 2 | 2021-02-08T12:08:54.000Z | 2022-02-28T10:46:04.000Z | import os
import re
from setuptools import setup, find_packages
init_file_path = os.path.join(
os.path.dirname(__file__),
'quickbuild/__init__.py'
)
with open(init_file_path) as f:
try:
version = re.findall(r"__version__ = '(.*)'", f.read())[0]
except IndexError:
raise RuntimeError('Unable to get package version')
with open('README.rst') as readme_file:
README = readme_file.read()
setup_args = dict(
name='quickbuild',
version=version,
description='Python client for PMEase QuickBuild',
long_description_content_type='text/x-rst',
long_description=README,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
license='MIT',
packages=find_packages(),
package_data={'quickbuild': ['*']},
author='Petr Belskiy',
keywords=['PMEase', 'quickbuild'],
url='https://github.com/pbelskiy/quickbuild',
download_url='https://pypi.org/project/quickbuild'
)
install_requires = [
'aiohttp>3.6.2,<4.0.0',
'requests>=2.24.0,<3.0.0',
'xmltodict==0.12',
]
setup(
install_requires=install_requires,
python_requires='>=3.5',
**setup_args
)
| 26.642857 | 66 | 0.635389 |
acf2f966fcdf3fbee33e810c0a06bfbe95c9cfb7 | 2,327 | py | Python | problem_0493.py | techrabbit58/ProjectEuler | b23a2bc87acf5ee470ee62d94d43d7f6e6c1725a | [
"Unlicense"
] | null | null | null | problem_0493.py | techrabbit58/ProjectEuler | b23a2bc87acf5ee470ee62d94d43d7f6e6c1725a | [
"Unlicense"
] | null | null | null | problem_0493.py | techrabbit58/ProjectEuler | b23a2bc87acf5ee470ee62d94d43d7f6e6c1725a | [
"Unlicense"
] | null | null | null | """This solves problem #493 of Project Euler (https://projecteuler.net).
Under The Rainbow
Problem 493
70 coloured balls are placed in an urn, 10 for each of the seven rainbow colours.
What is the expected number of distinct colours in 20 randomly picked balls?
Give your answer with nine digits after the decimal point (a.bcdefghij).
"""
import random
from collections import Counter, defaultdict
from helpers import chronometric
from mathext import binomial
def initialize():
urn = []
for n in range(1, 8):
urn += [n] * 10
return urn
def draw(urn, takes):
result = []
for _ in range(takes):
random.shuffle(urn)
result.append(urn.pop())
return result[:]
def monte_carlo(rounds):
urn = initialize()
results = defaultdict(int)
for _ in range(rounds):
results[len(Counter(draw(urn[:], 20)))] += 1
results = {k: v / rounds for k, v in results.items()}
print(results)
x = 0
for k, v in results.items():
x += k * v
print('Approximate expectation =', x)
@chronometric
def under_the_rainbow():
"""The Monte Carlo approach showed that the solution must be somewhere around
6.82 so that sometimes one color is not drawn. To calculate the probabilities for all
possible draws is very complicated. So someone on the internet suggested to calculate
the probability for any color probably not to be drawn. Which is much easier.
Not to draw one color means, only to choose from the other 60 balls.
After having solved this way, I found the following of user 'tepsi' explanation in the
forum:
For any given colour, the probability of it being present in the draw is
p= 1 − nCr(60, 20) / nCr(70, 20).
The expected value of the number of colours present is just the sum of the expected
values of the number of occurrences of the individual colours, which are all equal to p
(0 with probability 1−p, 1 with probability p), so the total expectation value
is 7p.
Sigh!
"""
monte_carlo(1000)
return 7 * (1 - binomial(60, 20) / binomial(70, 20))
def run_application():
solution, elapsed = under_the_rainbow()
print('Solution =', solution)
print('Runtime =', elapsed, 'seconds')
if __name__ == '__main__':
run_application()
# last line of code
| 28.378049 | 91 | 0.683283 |
acf2fa00b3ec968f408b88c07c5e4af0d23b7e8f | 14,019 | py | Python | pso_mechanism_parameter_condition1.py | turbohiro/mechanism-parameter-optimization_PSO | d2639679a1441e715eb1766df1786dcc7fe39545 | [
"MIT"
] | 5 | 2019-04-22T08:12:11.000Z | 2020-11-24T09:03:20.000Z | pso_mechanism_parameter_condition1.py | turbohiro/mechanism-parameter-optimization_PSO | d2639679a1441e715eb1766df1786dcc7fe39545 | [
"MIT"
] | null | null | null | pso_mechanism_parameter_condition1.py | turbohiro/mechanism-parameter-optimization_PSO | d2639679a1441e715eb1766df1786dcc7fe39545 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 10 16:16:09 2018
1. 0.28410760178591277 3190.0751749837323 767276.0387534217
"""
import pandas as pd
from All_data import combustion_time
import matplotlib.pyplot as plt
import numpy as np
import subprocess
from math import sqrt
import os
class PSO:
def __init__(self):
self.w = self.getweight()
self.lr = self.getlearningrate()
self.maxgen = self.getmaxgen()
self.sizepop = self.getsizepop()
self.rangepop_x,self.rangepop_y = self.getrangepop()
self.rangespeed_x,self.rangespeed_y = self.getrangespeed()
def getweight(self):
# 惯性权重
weight = 0.8
return weight
def getlearningrate(self):
# 分别是粒子的个体和社会的学习因子,也称为加速常数
lr = (0.495,1.2)
return lr
def getmaxgen(self):
# 最大迭代次数
maxgen = 10
return maxgen
def getsizepop(self):
# 种群规模
sizepop = 40
return sizepop
def getrangepop(self):
# 粒子的位置的范围限制,x、y.z方向的限制相同 ##x,y、z即为我们要设定的参数A,b,E这三个参数的变化区间5e11,2.15e4
rangepop_x = (-1,1)
rangepop_y = (-1,1)
return rangepop_x,rangepop_y
def getrangespeed(self):
# 粒子的速度范围限制
rangespeed_x = (-0.2,0.2)
rangespeed_y = (-0.2,0.2)
return rangespeed_x,rangespeed_y
def error(self,a,b):
error=[]
for i in range(len(a)):
error.append(abs((a[i]-b[i])/b[i]))
relative_error = sum(error)/len(error)
return relative_error
def Mean_squared_error(self,a,b):
error=[]
for i in range(len(a)):
error.append((a[i]-b[i])*(a[i]-b[i]))
mse = sum(error)/len(error)
rmse = sqrt(mse)
return rmse
def mechanism_computation(self,path):
data1 = pd.read_csv(path+"/CKSoln_solution_no_1.csv")
data2 = pd.read_csv(path+"/CKSoln_solution_no_2.csv")
data3 = pd.read_csv(path+"/CKSoln_solution_no_3.csv")
data4 = pd.read_csv(path+"/CKSoln_solution_no_4.csv")
data5 = pd.read_csv(path+"/CKSoln_solution_no_5.csv")
data6 = pd.read_csv(path+"/CKSoln_solution_no_6.csv")
data7 = pd.read_csv(path+"/CKSoln_solution_no_7.csv")
data8 = pd.read_csv(path+"/CKSoln_solution_no_8.csv")
data9 = pd.read_csv(path+"/CKSoln_solution_no_9.csv")
data10 = pd.read_csv(path+"/CKSoln_solution_no_10.csv")
data11 = pd.read_csv(path+"/CKSoln_solution_no_11.csv")
data12 = pd.read_csv(path+"/CKSoln_solution_no_12.csv")
data13 = pd.read_csv(path+"/CKSoln_solution_no_13.csv")
#求解13个data的着火时间
time_simplified=[]
k1,l = combustion_time(data1,1)
time1 = data1['Time_Soln#1_(sec)'][l]
k1,l = combustion_time(data2,2)
time2 = data2['Time_Soln#2_(sec)'][l]
k1,l = combustion_time(data3,3)
time3 = data3['Time_Soln#3_(sec)'][l]
k1,l = combustion_time(data4,4)
time4 = data4['Time_Soln#4_(sec)'][l]
k1,l = combustion_time(data5,5)
time5 = data5['Time_Soln#5_(sec)'][l]
k1,l = combustion_time(data6,6)
time6 = data6['Time_Soln#6_(sec)'][l]
k1,l = combustion_time(data7,7)
time7 = data7['Time_Soln#7_(sec)'][l]
k1,l = combustion_time(data8,8)
time8 = data8['Time_Soln#8_(sec)'][l]
k1,l = combustion_time(data9,9)
time9 = data9['Time_Soln#9_(sec)'][l]
k1,l = combustion_time(data10,10)
time10 = data10['Time_Soln#10_(sec)'][l]
k1,l = combustion_time(data11,11)
time11 = data11['Time_Soln#11_(sec)'][l]
k1,l = combustion_time(data12,12)
time12 = data12['Time_Soln#12_(sec)'][l]
k1,l = combustion_time(data13,13)
time13 = data13['Time_Soln#13_(sec)'][l]
time_simplified.extend([time1,time2,time3,time4,time5,time6,time7,time8,time9,time10,time11,time12,time13])
return time_simplified
#定义适应度函数并实现种群初始化
def fitness_func(self,data,label=False):
if(label==True):
data["simplified2"] = data["simplified"].map(lambda x: x*1000000)
y=self.Mean_squared_error(list(data["simplified2"]),list(data["detailed"]))
else:
data["simplified2"] = data["simplified"].map(lambda x: x*1000000)
y=self.error(list(data["simplified2"]),list(data["detailed"]))
return y
def init(self,sizepop): #假设每个粒子只有三个未知数(位置)
pop = np.zeros((sizepop,2))
pop_r = np.zeros((sizepop,2))
v = np.zeros((sizepop,2))
fitness = np.zeros(sizepop) #20个粒子,每个粒子都有一定的初始的适应度
data={}
data2={}
data3={}
data4={}
for i in range(sizepop):
#A:5e8--5e11;E:2.15e3--2.15e5
pop[i] = [(np.random.uniform()-0.5)*2*self.rangepop_x[1],(np.random.uniform()-0.5)*2*self.rangepop_y[1]] #保证20个粒子的随机位置仍在[-2,2]之间,三个未知数的变化区间不同
v[i] = [(np.random.uniform()-0.5)*2*self.rangespeed_x[1],(np.random.uniform()-0.5)*2*self.rangespeed_y[1]]
#将参数数据放入IC16_optimized.input文件中
path3 = "C:\\E_Disk\Ansys_software\ANSYS Chemkin Pro 17.0 Release 15151 Win\workplace\C12_simplified"
IC16_file = open(path3+'\\NC12.inp')
lines = IC16_file.readlines()
IC16_file.close()
pop_r[i][0] = 5000*pop[i][0]+5000
pop_r[i][1] = 1000**pop[i][1]*1.2e7
#pop_r[i][0] = (10**pop[i][0]*1.5e5)/np.exp(-pop_r[i][1]/(1.98718*700))
pop1=str(pop_r[i][0])
pop2=str(pop_r[i][1])
#a='O2+NC12-QOOH=NC12-OOQOOH '+pop1+' 0.0 0.0'
#b='NC12-QOOH=>NC5H10+CH2O+C2H4+NC4H8+OH '+pop2+' 0.0 '+pop1
b='NC12-OOQOOH=>NC12-OQOOH+OH '+pop2+' 0.0 '+pop1
#lines[59] = a
#lines[65] = b
lines[62] = b
IC16_newfile = open(path3+'\\C12_optimized.inp','w')
for newline in lines:
IC16_newfile.write(newline)
IC16_newfile.close()
##多目标工况优化:40atm_1.5,40atm_0.5,10atm_0.5,10atm_1.5三种工况的最优值
#对20atm_1.0工况简化机理进行计算
path="C:\E_Disk\Ansys_software\ANSYS Chemkin Pro 17.0 Release 15151 Win\workplace\C12_simplified"
p = subprocess.Popen(path+'\ST.bat',shell=True,stdout=subprocess.PIPE)
out,err = p.communicate()
time_simplified = self.mechanism_computation(path)
#压力20atm,化学当量比0.5
time_detailed_20atm = [4994.202,1956.661,1520.135,1754.401,2130.096,2729.143,2261.967,1323.475,686.5373,343.5532,173.2118,4.70E+01,1.45E+01]
temperature = [700,750,800,850,900,950,1000,1050,1100,1150,1200,1300,1400]
Ignition_time = {"Temp":temperature,"simplified":time_simplified,"detailed":time_detailed_20atm}
data[i] = pd.DataFrame(Ignition_time)
os.remove(path+"/CKSoln_solution_no_1.csv")
os.remove(path+"/CKSoln_solution_no_2.csv")
os.remove(path+"/CKSoln_solution_no_3.csv")
os.remove(path+"/CKSoln_solution_no_4.csv")
os.remove(path+"/CKSoln_solution_no_5.csv")
os.remove(path+"/CKSoln_solution_no_6.csv")
os.remove(path+"/CKSoln_solution_no_7.csv")
os.remove(path+"/CKSoln_solution_no_8.csv")
os.remove(path+"/CKSoln_solution_no_9.csv")
os.remove(path+"/CKSoln_solution_no_10.csv")
os.remove(path+"/CKSoln_solution_no_11.csv")
os.remove(path+"/CKSoln_solution_no_12.csv")
os.remove(path+"/CKSoln_solution_no_13.csv")
#
print("第%d个粒子初始化数据完成." %(i))
fitness[i] = self.fitness_func(data[i])
return pop,v,fitness
#寻找初始化后的极值
def getinitbest(self,fitness,pop):
#群体最优的粒子位置和适应度值;;寻找最小值,使得适应度函数最小
gbestpop,gbestfitness = pop[fitness.argmin()].copy(),fitness.min()
#个体最优的粒子位置及其适应度值
pbestpop,pbestfitness = pop.copy(),fitness.copy()
return gbestpop,gbestfitness,pbestpop,pbestfitness
#迭代寻优
def run(self):
pop,v,fitness = self.init(self.sizepop)
gbestpop,gbestfitness,pbestpop,pbestfitness = self.getinitbest(fitness,pop)
pop_r = np.zeros((self.sizepop,2))
result = np.zeros(self.maxgen)
data={}
data2={}
data3 = {}
data4={}
for i in range(self.maxgen):
#速度更新
for j in range(self.sizepop):
v[j] =v[j]*self.w + self.lr[0]*np.random.rand()*(pbestpop[j]-pop[j])+self.lr[1]*np.random.rand()*(gbestpop-pop[j])##不使用固定权重,加了一个[0,1]之间随机变化的权重
if v[j][0]<self.rangespeed_x[0]:
v[j][0] = self.rangespeed_x[0]
if v[j][1]<self.rangespeed_y[0]:
v[j][1] = self.rangespeed_y[0]
if v[j][0]>self.rangespeed_x[1]:
v[j][0] = self.rangespeed_x[1]
if v[j][1]>self.rangespeed_y[1]:
v[j][1] = self.rangespeed_y[1]
#v[v[j][0]<self.rangespeed_x[0]*2.5e11] = self.rangespeed_x[0]
#v[v[j][1]<self.rangespeed_y[0]*3.5e3] = self.rangespeed_y[0]
#v[v[j][0]>self.rangespeed_x[1]*2.5e11] = self.rangespeed_x[1]
#v[v[j][1]>self.rangespeed_y[1]*3.5e3] = self.rangespeed_y[1]
#位置更新
for j in range(self.sizepop):
pop[j] += v[j]
if pop[j][0]<self.rangepop_x[0]:
pop[j][0] = self.rangepop_x[0]
if pop[j][1]<self.rangepop_y[0]:
pop[j][1] = self.rangepop_y[0]
if pop[j][0]>self.rangepop_x[1]:
pop[j][0] = self.rangepop_x[1]
if pop[j][1]>self.rangepop_y[1]:
pop[j][1] = self.rangepop_x[1]
#适应度更新
#将参数数据放入IC16_optimized.input文件中
for j in range(self.sizepop):
path3 = "C:\\E_Disk\Ansys_software\ANSYS Chemkin Pro 17.0 Release 15151 Win\workplace\C12_simplified"
IC16_file = open(path3+'\\NC12.inp')
lines = IC16_file.readlines()
IC16_file.close()
pop_r[j][0] = 5000*pop[j][0]+5000
pop_r[j][1] = 1000**pop[j][0]*1.2e7
#pop_r[i][0] = (10**pop[i][0]*1.5e5)/np.exp(-pop_r[i][1]/(1.98718*700))
pop1=str(pop_r[j][0])
pop2=str(pop_r[j][1])
#a='O2+NC12-QOOH=NC12-OOQOOH '+pop1+' 0.0 0.0'
#b='NC12-QOOH=>NC5H10+CH2O+C2H4+NC4H8+OH '+pop2+' 0.0 '+pop1
b='NC12-OOQOOH=>NC12-OQOOH+OH '+pop2+' 0.0 '+pop1
#lines[59] = a
#lines[65] = b
lines[62] = b
IC16_newfile = open(path3+'\\C12_optimized.inp','w')
for newline in lines:
IC16_newfile.write(newline)
IC16_newfile.close()
# #对40atm_1.5简化机理进行计算
path="C:\E_Disk\Ansys_software\ANSYS Chemkin Pro 17.0 Release 15151 Win\workplace\C12_simplified"
p = subprocess.Popen(path+'\ST.bat',shell=True,stdout=subprocess.PIPE)
out,err = p.communicate()
time_simplified = self.mechanism_computation(path)
# #压力40atm,化学当量比1.5
time_detailed_20atm = [4994.202,1956.661,1520.135,1754.401,2130.096,2729.143,2261.967,1323.475,686.5373,343.5532,173.2118,4.70E+01,1.45E+01]
temperature = [700,750,800,850,900,950,1000,1050,1100,1150,1200,1300,1400]
Ignition_time = {"Temp":temperature,"simplified":time_simplified,"detailed":time_detailed_20atm}
data[j] = pd.DataFrame(Ignition_time)
os.remove(path+"/CKSoln_solution_no_1.csv")
os.remove(path+"/CKSoln_solution_no_2.csv")
os.remove(path+"/CKSoln_solution_no_3.csv")
os.remove(path+"/CKSoln_solution_no_4.csv")
os.remove(path+"/CKSoln_solution_no_5.csv")
os.remove(path+"/CKSoln_solution_no_6.csv")
os.remove(path+"/CKSoln_solution_no_7.csv")
os.remove(path+"/CKSoln_solution_no_8.csv")
os.remove(path+"/CKSoln_solution_no_9.csv")
os.remove(path+"/CKSoln_solution_no_10.csv")
os.remove(path+"/CKSoln_solution_no_11.csv")
os.remove(path+"/CKSoln_solution_no_12.csv")
os.remove(path+"/CKSoln_solution_no_13.csv")
#
print('第%d次迭代第%d个粒子更新数据完成.' % (i+1,j))
fitness[j] = self.fitness_func(data[j])
for j in range(self.sizepop):
if fitness[j]<pbestfitness[j]:
pbestfitness[j] = fitness[j]
pbestpop[j] = pop[j].copy()
if pbestfitness.min()<gbestfitness:
gbestfitness = pbestfitness.min()
gbestpop = pop[pbestfitness.argmin()].copy()
print(gbestfitness,(5000*gbestpop[0]+5000),(1000**gbestpop[1]*1.2e7))
result[i]= gbestfitness
return result
pso = PSO()
result = pso.run()
#%%
plt.figure()
plt.plot(result)
plt.show()
| 40.991228 | 159 | 0.537128 |
acf2fa398fd55b3f82f4d1b5a2fe30163c6836f4 | 30,852 | py | Python | influxdb_client/service/sources_service.py | wasted925/influxdb-client-python | afee531fd1dc244b3d9d270e262b0a1865a7c89d | [
"MIT"
] | 380 | 2019-09-19T20:20:10.000Z | 2022-03-31T12:59:33.000Z | influxdb_client/service/sources_service.py | wasted925/influxdb-client-python | afee531fd1dc244b3d9d270e262b0a1865a7c89d | [
"MIT"
] | 362 | 2019-09-16T11:53:29.000Z | 2022-03-29T03:11:59.000Z | influxdb_client/service/sources_service.py | wasted925/influxdb-client-python | afee531fd1dc244b3d9d270e262b0a1865a7c89d | [
"MIT"
] | 130 | 2019-09-20T08:02:35.000Z | 2022-03-30T16:44:45.000Z | # coding: utf-8
"""
Influx OSS API Service.
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
class SourcesService(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None): # noqa: E501,D401,D403
"""SourcesService - a operation defined in OpenAPI."""
if api_client is None:
raise ValueError("Invalid value for `api_client`, must be defined.")
self.api_client = api_client
def delete_sources_id(self, source_id, **kwargs): # noqa: E501,D401,D403
"""Delete a source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_sources_id(source_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str source_id: The source ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_sources_id_with_http_info(source_id, **kwargs) # noqa: E501
else:
(data) = self.delete_sources_id_with_http_info(source_id, **kwargs) # noqa: E501
return data
def delete_sources_id_with_http_info(self, source_id, **kwargs): # noqa: E501,D401,D403
"""Delete a source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_sources_id_with_http_info(source_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str source_id: The source ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['source_id', 'zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_sources_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'source_id' is set
if ('source_id' not in local_var_params or
local_var_params['source_id'] is None):
raise ValueError("Missing the required parameter `source_id` when calling `delete_sources_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'source_id' in local_var_params:
path_params['sourceID'] = local_var_params['source_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/sources/{sourceID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
def get_sources(self, **kwargs): # noqa: E501,D401,D403
"""Get all sources.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sources(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:param str org: The organization name.
:return: Sources
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_sources_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_sources_with_http_info(**kwargs) # noqa: E501
return data
def get_sources_with_http_info(self, **kwargs): # noqa: E501,D401,D403
"""Get all sources.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:param str org: The organization name.
:return: Sources
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['zap_trace_span', 'org'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'org' in local_var_params:
query_params.append(('org', local_var_params['org'])) # noqa: E501
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/sources', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Sources', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
def get_sources_id(self, source_id, **kwargs): # noqa: E501,D401,D403
"""Get a source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sources_id(source_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str source_id: The source ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: Source
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_sources_id_with_http_info(source_id, **kwargs) # noqa: E501
else:
(data) = self.get_sources_id_with_http_info(source_id, **kwargs) # noqa: E501
return data
def get_sources_id_with_http_info(self, source_id, **kwargs): # noqa: E501,D401,D403
"""Get a source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sources_id_with_http_info(source_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str source_id: The source ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: Source
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['source_id', 'zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sources_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'source_id' is set
if ('source_id' not in local_var_params or
local_var_params['source_id'] is None):
raise ValueError("Missing the required parameter `source_id` when calling `get_sources_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'source_id' in local_var_params:
path_params['sourceID'] = local_var_params['source_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/sources/{sourceID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Source', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
def get_sources_id_buckets(self, source_id, **kwargs): # noqa: E501,D401,D403
"""Get buckets in a source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sources_id_buckets(source_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str source_id: The source ID. (required)
:param str zap_trace_span: OpenTracing span context
:param str org: The organization name.
:return: Buckets
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_sources_id_buckets_with_http_info(source_id, **kwargs) # noqa: E501
else:
(data) = self.get_sources_id_buckets_with_http_info(source_id, **kwargs) # noqa: E501
return data
def get_sources_id_buckets_with_http_info(self, source_id, **kwargs): # noqa: E501,D401,D403
"""Get buckets in a source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sources_id_buckets_with_http_info(source_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str source_id: The source ID. (required)
:param str zap_trace_span: OpenTracing span context
:param str org: The organization name.
:return: Buckets
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['source_id', 'zap_trace_span', 'org'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sources_id_buckets" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'source_id' is set
if ('source_id' not in local_var_params or
local_var_params['source_id'] is None):
raise ValueError("Missing the required parameter `source_id` when calling `get_sources_id_buckets`") # noqa: E501
collection_formats = {}
path_params = {}
if 'source_id' in local_var_params:
path_params['sourceID'] = local_var_params['source_id'] # noqa: E501
query_params = []
if 'org' in local_var_params:
query_params.append(('org', local_var_params['org'])) # noqa: E501
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/sources/{sourceID}/buckets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Buckets', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
def get_sources_id_health(self, source_id, **kwargs): # noqa: E501,D401,D403
"""Get the health of a source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sources_id_health(source_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str source_id: The source ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: HealthCheck
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_sources_id_health_with_http_info(source_id, **kwargs) # noqa: E501
else:
(data) = self.get_sources_id_health_with_http_info(source_id, **kwargs) # noqa: E501
return data
def get_sources_id_health_with_http_info(self, source_id, **kwargs): # noqa: E501,D401,D403
"""Get the health of a source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sources_id_health_with_http_info(source_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str source_id: The source ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: HealthCheck
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['source_id', 'zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sources_id_health" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'source_id' is set
if ('source_id' not in local_var_params or
local_var_params['source_id'] is None):
raise ValueError("Missing the required parameter `source_id` when calling `get_sources_id_health`") # noqa: E501
collection_formats = {}
path_params = {}
if 'source_id' in local_var_params:
path_params['sourceID'] = local_var_params['source_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/sources/{sourceID}/health', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HealthCheck', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
def patch_sources_id(self, source_id, source, **kwargs): # noqa: E501,D401,D403
"""Update a Source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_sources_id(source_id, source, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str source_id: The source ID. (required)
:param Source source: Source update (required)
:param str zap_trace_span: OpenTracing span context
:return: Source
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_sources_id_with_http_info(source_id, source, **kwargs) # noqa: E501
else:
(data) = self.patch_sources_id_with_http_info(source_id, source, **kwargs) # noqa: E501
return data
def patch_sources_id_with_http_info(self, source_id, source, **kwargs): # noqa: E501,D401,D403
"""Update a Source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_sources_id_with_http_info(source_id, source, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str source_id: The source ID. (required)
:param Source source: Source update (required)
:param str zap_trace_span: OpenTracing span context
:return: Source
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['source_id', 'source', 'zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_sources_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'source_id' is set
if ('source_id' not in local_var_params or
local_var_params['source_id'] is None):
raise ValueError("Missing the required parameter `source_id` when calling `patch_sources_id`") # noqa: E501
# verify the required parameter 'source' is set
if ('source' not in local_var_params or
local_var_params['source'] is None):
raise ValueError("Missing the required parameter `source` when calling `patch_sources_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'source_id' in local_var_params:
path_params['sourceID'] = local_var_params['source_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'source' in local_var_params:
body_params = local_var_params['source']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/sources/{sourceID}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Source', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
def post_sources(self, source, **kwargs): # noqa: E501,D401,D403
"""Create a source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sources(source, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Source source: Source to create (required)
:param str zap_trace_span: OpenTracing span context
:return: Source
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_sources_with_http_info(source, **kwargs) # noqa: E501
else:
(data) = self.post_sources_with_http_info(source, **kwargs) # noqa: E501
return data
def post_sources_with_http_info(self, source, **kwargs): # noqa: E501,D401,D403
"""Create a source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_sources_with_http_info(source, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Source source: Source to create (required)
:param str zap_trace_span: OpenTracing span context
:return: Source
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['source', 'zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_sources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'source' is set
if ('source' not in local_var_params or
local_var_params['source'] is None):
raise ValueError("Missing the required parameter `source` when calling `post_sources`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'source' in local_var_params:
body_params = local_var_params['source']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/sources', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Source', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
| 39.655527 | 126 | 0.618274 |
acf2fa57038ab136c01ee93d0654b21ab4852ad6 | 1,731 | py | Python | packet.py | qiujiangkun/COMP4621-Protocol | c43b497bab1ccbeff3a2c69d740bac6c16425272 | [
"MIT"
] | null | null | null | packet.py | qiujiangkun/COMP4621-Protocol | c43b497bab1ccbeff3a2c69d740bac6c16425272 | [
"MIT"
] | null | null | null | packet.py | qiujiangkun/COMP4621-Protocol | c43b497bab1ccbeff3a2c69d740bac6c16425272 | [
"MIT"
] | null | null | null | from tools import ByteBuf
class Packet:
def __init__(self, payload=b"", seq_num=0, ack_num=0):
"""
Constructor of class Packet
:param payload: Data carried in this packet
:param seq_num: Sequence number of this packet
:param ack_num: ACK number of this packet
"""
self.seq_num = seq_num
self.ack_num = ack_num
self.payload = payload
self.chk_sum = 0
def encode(self, get_bytes=True):
"""
Encode a packet into bytes
:return: A byte stream
"""
self.compute_checksum()
buf = ByteBuf()
buf.write_int(self.seq_num)
buf.write_int(self.ack_num)
buf.write_int(self.chk_sum)
buf.write_data(self.payload)
if get_bytes:
return buf.as_bytes()
else:
return buf
def decode(self, packet):
"""
Decode a packet from bytes
:param packet: A packet in bytes
:return: A Packet object
"""
buf = ByteBuf(packet)
self.seq_num = buf.read_int()
self.ack_num = buf.read_int()
self.chk_sum = buf.read_int()
self.payload = buf.read_data()
return self
def compute_checksum(self):
"""
Compute the checksum of a packet
:return: The checksum of this packet
"""
buf = ByteBuf()
buf.write_int(self.seq_num)
buf.write_int(self.ack_num)
buf.write_int(0)
buf.write_data(self.payload)
self.chk_sum = buf.checksum()
return self.chk_sum
def __str__(self):
return f"Packet seq={self.seq_num} ack={self.ack_num} chk={self.chk_sum} payload={self.payload[:20]}"
| 26.630769 | 109 | 0.575968 |
acf2fafb6e95817fa21a43958b96ebfef864bde7 | 8,396 | py | Python | trackpy.py | GJHanna/TrackPy | 0cccd6ed014ac823f8b24898d3888d88e66dc3a3 | [
"Apache-2.0"
] | null | null | null | trackpy.py | GJHanna/TrackPy | 0cccd6ed014ac823f8b24898d3888d88e66dc3a3 | [
"Apache-2.0"
] | null | null | null | trackpy.py | GJHanna/TrackPy | 0cccd6ed014ac823f8b24898d3888d88e66dc3a3 | [
"Apache-2.0"
] | null | null | null | from csv import reader, writer, DictReader, DictWriter
from argparse import ArgumentParser
from os import chdir, path, getcwd
from time import time, localtime, asctime, strftime
from datetime import datetime
from dateutil.relativedelta import relativedelta
parser = ArgumentParser()
parser.add_argument('dir', help='add task')
parser.add_argument('-a', '--add', dest='task', nargs="+", action='store', help='add task')
parser.add_argument('-l', '--list', action='store_true', help='list all tasks')
parser.add_argument('-ld', '--done', action='store_true', help='list all done tasks')
parser.add_argument('-lnd', '--notdone', action='store_true', help='list all not done tasks')
parser.add_argument('-co', '--checkout', type=int, dest='taskd', action='store', help='mark task as done')
class CSVManager(object):
fieldnames = ['task', 'status', 'added', 'finished', 'time']
csv_file = '.task.csv'
def __init__(self, row, l, ld, lnd, *args, **kwargs):
if ((not path.isfile(self.csv_file)) or (path.isfile(self.csv_file) == 0)):
self.__write()
self.row = row
self.l = l
self.ld = ld
self.lnd = lnd
def read(self):
try:
self.headers()
with open(self.csv_file, 'r') as f:
reader = DictReader(f, delimiter='\t')
i = 1
for row in reader:
number = str(i) + ') '
task, status, added, finished, time = row[self.fieldnames[0]], \
row[self.fieldnames[1]], \
asctime(localtime(float(row[self.fieldnames[2]]))), \
row[self.fieldnames[3]], \
row[self.fieldnames[4]]
if (finished != ''):
finished = asctime(localtime(float(finished)))
if (status == 'D'):
color = '\033[92m'
if (status == 'ND'):
color = '\033[91m'
if (self.ld and status == 'D'):
print(
number +
task.ljust(55 - len(number)) +
color + status.ljust(10) +
'\033[0m'
)
if (self.lnd and status == 'ND'):
print(
number +
task.ljust(55 - len(number)) +
color + status.ljust(10) +
'\033[0m'
)
if (self.l):
print(
number +
task.ljust(55 - len(number)) +
color + status.ljust(10) +
'\033[0m' + added.ljust(30) +
finished.ljust(30) +
'\033[93m' + time.ljust(5) +
'\033[0m'
)
i += 1
print('\n')
except ValueError as err:
print(err)
except OSError as err:
print(err)
except Exception as err:
print(err)
def __write(self):
with open(self.csv_file, 'w') as f:
writer = DictWriter(f, fieldnames=self.fieldnames , delimiter='\t')
writer.writeheader()
def append(self):
with open(self.csv_file, 'a') as f:
writer = DictWriter(f, fieldnames=self.fieldnames , delimiter='\t')
writer.writerow(self.row)
def headers(self):
if (self.ld or self.lnd ):
print(
'\n\033[94m' +
self.fieldnames[0].upper().ljust(55) +
self.fieldnames[1].upper().ljust(10) +
'\033[0m\n'
)
else:
print(
'\n\033[94m' +
self.fieldnames[0].upper().ljust(55) +
self.fieldnames[1].upper().ljust(10) +
self.fieldnames[2].upper().ljust(30) +
self.fieldnames[3].upper().ljust(30) +
self.fieldnames[4].upper().ljust(5) +
'\033[0m\n'
)
def append_rows(self):
with open(self.csv_file, 'a') as f:
rows = writer(f, delimiter='\t')
for datum in self.csv_data:
rows.writerow(datum)
def get_csv_data(self):
self.csv_data = []
with open(self.csv_file, 'r') as f:
rows = reader(f, delimiter='\t')
next(rows)
for row in rows:
self.csv_data.append(row)
def check_out(self, n):
try:
self.get_csv_data()
data_to_update = self.csv_data[n]
if (data_to_update[1] != 'D'):
data_to_update[1] = 'D'
data_to_update[3] = time()
start = datetime.fromtimestamp(float(data_to_update[2]))
end = datetime.fromtimestamp(float(data_to_update[3]))
duration = relativedelta(end, start)
output = ''
if (duration.years):
output += '{}y '.format(duration.years)
if (duration.months):
output += '{}m '.format(duration.months)
if (duration.days):
if (duration.days < 9):
output += '0{}d '.format(duration.days)
else:
output += '{}d '.format(duration.days)
if (duration.hours):
if (duration.hours < 9):
output += '0{}H '.format(duration.hours)
else:
output += '{}H '.format(duration.hours)
if (duration.minutes):
if (duration.minutes < 9):
output += '0{}M '.format(duration.minutes)
else:
output += '{}M '.format(duration.minutes)
data_to_update[4] = output
self.csv_data[n] = data_to_update
self.__write()
self.append_rows()
else:
raise TaskCheckedOutException
except ValueError as err:
exit('\033[91mFailed to check out\033[0m')
except IndexError as err:
exit("\033[91mTask doesn't exist in task file\033[0m")
except TaskCheckedOutException as err:
exit('\033[91mTask already checked-out\033[0m')
class TaskCheckedOutException(Exception):
def __init__(self):
pass
class Task(object):
def __init__(self, task, l, ld, lnd, co, *args, **kwargs):
self.task = task
self.co = co
self.__csv_manager = CSVManager(row=self.tokenize(), l=l, ld=ld, lnd=lnd)
def append(self):
self.__csv_manager.append()
def read(self):
self.__csv_manager.read()
def check_out(self):
if (self.co):
self.__csv_manager.check_out(self.co - 1)
def tokenize(self):
return {
'task' : self.task,
'status' : 'ND',
'added' : time(),
}
if __name__ == "__main__":
try:
args = parser.parse_args()
chdir(args.dir)
task_append = ''
if (args.task):
if (args.task > 0):
task_append = ' '.join(args.task)
else:
task_append = args.task[0]
task = Task(task=task_append, l=args.list, ld=args.done, lnd=args.notdone, co=args.taskd)
if (args.task):
task.append()
exit()
if (args.taskd):
task.check_out()
exit()
if (args.list or args.done or args.notdone):
task.read()
exit()
except Exception as err:
print(err) | 35.129707 | 106 | 0.442354 |
acf2fb32fa776e4ac8d0c8c8883e0a5a4e6a5a8d | 4,186 | py | Python | benchmark/startQiskit_noisy1356.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy1356.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy1356.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=5
# total number=44
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
prog.cx(input_qubit[3],input_qubit[0]) # number=32
prog.z(input_qubit[3]) # number=33
prog.cx(input_qubit[3],input_qubit[0]) # number=34
prog.rx(0.11938052083641225,input_qubit[1]) # number=36
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.rx(1.4765485471872026,input_qubit[2]) # number=35
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=41
prog.x(input_qubit[0]) # number=42
prog.cx(input_qubit[1],input_qubit[0]) # number=43
prog.x(input_qubit[4]) # number=30
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.rx(0.45238934211692994,input_qubit[3]) # number=38
prog.y(input_qubit[1]) # number=39
prog.rx(-2.5258404934861938,input_qubit[1]) # number=25
prog.h(input_qubit[3]) # number=29
prog.cx(input_qubit[0],input_qubit[3]) # number=22
prog.x(input_qubit[3]) # number=23
prog.cx(input_qubit[0],input_qubit[3]) # number=24
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.rx(-0.0722566310325653,input_qubit[4]) # number=37
prog.x(input_qubit[1]) # number=14
prog.cx(input_qubit[0],input_qubit[2]) # number=26
prog.x(input_qubit[2]) # number=27
prog.h(input_qubit[4]) # number=40
prog.cx(input_qubit[0],input_qubit[2]) # number=28
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = FakeVigo()
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy1356.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 31.954198 | 82 | 0.619207 |
acf2fbee4b216cb9f2a0b73993fd1c7042e2248d | 274 | py | Python | dnnlib/submission/__init__.py | thwin097/saby | 1f3d97a484788d6af703ed52fbee4cbb92290791 | [
"BSD-Source-Code"
] | 9,683 | 2019-11-26T20:55:38.000Z | 2022-03-31T22:02:27.000Z | dnnlib/submission/__init__.py | thwin097/saby | 1f3d97a484788d6af703ed52fbee4cbb92290791 | [
"BSD-Source-Code"
] | 88 | 2020-06-20T01:57:42.000Z | 2022-03-31T23:45:04.000Z | dnnlib/submission/__init__.py | thwin097/saby | 1f3d97a484788d6af703ed52fbee4cbb92290791 | [
"BSD-Source-Code"
] | 2,023 | 2019-12-12T05:39:51.000Z | 2022-03-31T02:26:09.000Z | # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
from . import run_context
from . import submit
| 30.444444 | 70 | 0.762774 |
acf2fc5484a87e369221158dfd7ab55663a6e92f | 31,506 | py | Python | Lib/distutils/command/build_ext.py | dbrgn/RustPython | 6d371cea8a62d84dbbeec5a53cfd040f45899211 | [
"CC-BY-4.0",
"MIT"
] | 11,058 | 2018-05-29T07:40:06.000Z | 2022-03-31T11:38:42.000Z | Lib/distutils/command/build_ext.py | dbrgn/RustPython | 6d371cea8a62d84dbbeec5a53cfd040f45899211 | [
"CC-BY-4.0",
"MIT"
] | 2,105 | 2018-06-01T10:07:16.000Z | 2022-03-31T14:56:42.000Z | Lib/distutils/command/build_ext.py | dbrgn/RustPython | 6d371cea8a62d84dbbeec5a53cfd040f45899211 | [
"CC-BY-4.0",
"MIT"
] | 914 | 2018-07-27T09:36:14.000Z | 2022-03-31T19:56:34.000Z | """distutils.command.build_ext
Implements the Distutils 'build_ext' command, for building extension
modules (currently limited to C extensions, should accommodate C++
extensions ASAP)."""
import contextlib
import os
import re
import sys
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler, get_python_version
from distutils.sysconfig import get_config_h_filename
from distutils.dep_util import newer_group
from distutils.extension import Extension
from distutils.util import get_platform
from distutils import log
from site import USER_BASE
# An extension name is just a dot-separated list of Python NAMEs (ie.
# the same as a fully-qualified module name).
extension_name_re = re.compile \
(r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
class build_ext(Command):
description = "build C/C++ extensions (compile/link to build directory)"
# XXX thoughts on how to deal with complex command-line options like
# these, i.e. how to make it so fancy_getopt can suck them off the
# command line and make it look like setup.py defined the appropriate
# lists of tuples of what-have-you.
# - each command needs a callback to process its command-line options
# - Command.__init__() needs access to its share of the whole
# command line (must ultimately come from
# Distribution.parse_command_line())
# - it then calls the current command class' option-parsing
# callback to deal with weird options like -D, which have to
# parse the option text and churn out some custom data
# structure
# - that data structure (in this case, a list of 2-tuples)
# will then be present in the command object by the time
# we get to finalize_options() (i.e. the constructor
# takes care of both command-line and client options
# in between initialize_options() and finalize_options())
sep_by = " (separated by '%s')" % os.pathsep
user_options = [
('build-lib=', 'b',
"directory for compiled extension modules"),
('build-temp=', 't',
"directory for temporary files (build by-products)"),
('plat-name=', 'p',
"platform name to cross-compile for, if supported "
"(default: %s)" % get_platform()),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
('include-dirs=', 'I',
"list of directories to search for header files" + sep_by),
('define=', 'D',
"C preprocessor macros to define"),
('undef=', 'U',
"C preprocessor macros to undefine"),
('libraries=', 'l',
"external C libraries to link with"),
('library-dirs=', 'L',
"directories to search for external C libraries" + sep_by),
('rpath=', 'R',
"directories to search for shared C libraries at runtime"),
('link-objects=', 'O',
"extra explicit link objects to include in the link"),
('debug', 'g',
"compile/link with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
('parallel=', 'j',
"number of parallel build jobs"),
('swig-cpp', None,
"make SWIG create C++ files (default is C)"),
('swig-opts=', None,
"list of SWIG command line options"),
('swig=', None,
"path to the SWIG executable"),
('user', None,
"add user include, library and rpath")
]
boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.extensions = None
self.build_lib = None
self.plat_name = None
self.build_temp = None
self.inplace = 0
self.package = None
self.include_dirs = None
self.define = None
self.undef = None
self.libraries = None
self.library_dirs = None
self.rpath = None
self.link_objects = None
self.debug = None
self.force = None
self.compiler = None
self.swig = None
self.swig_cpp = None
self.swig_opts = None
self.user = None
self.parallel = None
def finalize_options(self):
from distutils import sysconfig
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'),
('parallel', 'parallel'),
('plat_name', 'plat_name'),
)
if self.package is None:
self.package = self.distribution.ext_package
self.extensions = self.distribution.ext_modules
# Make sure Python's include directories (for Python.h, pyconfig.h,
# etc.) are in the include search path.
py_include = sysconfig.get_python_inc()
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
# If in a virtualenv, add its include directory
# Issue 16116
if sys.exec_prefix != sys.base_exec_prefix:
self.include_dirs.append(os.path.join(sys.exec_prefix, 'include'))
# Put the Python "system" include dir at the end, so that
# any local include dirs take precedence.
self.include_dirs.append(py_include)
if plat_py_include != py_include:
self.include_dirs.append(plat_py_include)
self.ensure_string_list('libraries')
self.ensure_string_list('link_objects')
# Life is easier if we're not forever checking for None, so
# simplify these options to empty lists if unset
if self.libraries is None:
self.libraries = []
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, str):
self.library_dirs = self.library_dirs.split(os.pathsep)
if self.rpath is None:
self.rpath = []
elif isinstance(self.rpath, str):
self.rpath = self.rpath.split(os.pathsep)
# for extensions under windows use different directories
# for Release and Debug builds.
# also Python's library directory must be appended to library_dirs
if os.name == 'nt':
# the 'libs' directory is for binary installs - we assume that
# must be the *native* platform. But we don't really support
# cross-compiling via a binary install anyway, so we let it go.
self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
if sys.base_exec_prefix != sys.prefix: # Issue 16116
self.library_dirs.append(os.path.join(sys.base_exec_prefix, 'libs'))
if self.debug:
self.build_temp = os.path.join(self.build_temp, "Debug")
else:
self.build_temp = os.path.join(self.build_temp, "Release")
# Append the source distribution include and library directories,
# this allows distutils on windows to work in the source tree
self.include_dirs.append(os.path.dirname(get_config_h_filename()))
_sys_home = getattr(sys, '_home', None)
if _sys_home:
self.library_dirs.append(_sys_home)
# Use the .lib files for the correct architecture
if self.plat_name == 'win32':
suffix = 'win32'
else:
# win-amd64 or win-ia64
suffix = self.plat_name[4:]
new_lib = os.path.join(sys.exec_prefix, 'PCbuild')
if suffix:
new_lib = os.path.join(new_lib, suffix)
self.library_dirs.append(new_lib)
# for extensions under Cygwin and AtheOS Python's library directory must be
# appended to library_dirs
if sys.platform[:6] == 'cygwin' or sys.platform[:6] == 'atheos':
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
# building third party extensions
self.library_dirs.append(os.path.join(sys.prefix, "lib",
"python" + get_python_version(),
"config"))
else:
# building python standard extensions
self.library_dirs.append('.')
# For building extensions with a shared Python library,
# Python's library directory must be appended to library_dirs
# See Issues: #1600860, #4366
if False and (sysconfig.get_config_var('Py_ENABLE_SHARED')):
if not sysconfig.python_build:
# building third party extensions
self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
else:
# building python standard extensions
self.library_dirs.append('.')
# The argument parsing will result in self.define being a string, but
# it has to be a list of 2-tuples. All the preprocessor symbols
# specified by the 'define' option will be set to '1'. Multiple
# symbols can be separated with commas.
if self.define:
defines = self.define.split(',')
self.define = [(symbol, '1') for symbol in defines]
# The option for macros to undefine is also a string from the
# option parsing, but has to be a list. Multiple symbols can also
# be separated with commas here.
if self.undef:
self.undef = self.undef.split(',')
if self.swig_opts is None:
self.swig_opts = []
else:
self.swig_opts = self.swig_opts.split(' ')
# Finally add the user include and library directories if requested
if self.user:
user_include = os.path.join(USER_BASE, "include")
user_lib = os.path.join(USER_BASE, "lib")
if os.path.isdir(user_include):
self.include_dirs.append(user_include)
if os.path.isdir(user_lib):
self.library_dirs.append(user_lib)
self.rpath.append(user_lib)
if isinstance(self.parallel, str):
try:
self.parallel = int(self.parallel)
except ValueError:
raise DistutilsOptionError("parallel should be an integer")
def run(self):
from distutils.ccompiler import new_compiler
# 'self.extensions', as supplied by setup.py, is a list of
# Extension instances. See the documentation for Extension (in
# distutils.extension) for details.
#
# For backwards compatibility with Distutils 0.8.2 and earlier, we
# also allow the 'extensions' list to be a list of tuples:
# (ext_name, build_info)
# where build_info is a dictionary containing everything that
# Extension instances do except the name, with a few things being
# differently named. We convert these 2-tuples to Extension
# instances as needed.
if not self.extensions:
return
# If we were asked to build any C/C++ libraries, make sure that the
# directory where we put them is in the library search path for
# linking extensions.
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.libraries.extend(build_clib.get_library_names() or [])
self.library_dirs.append(build_clib.build_clib)
# Setup the CCompiler object that we'll use to do all the
# compiling and linking
self.compiler = new_compiler(compiler=self.compiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
# If we are cross-compiling, init the compiler now (if we are not
# cross-compiling, init would not hurt, but people may rely on
# late initialization of compiler even if they shouldn't...)
if os.name == 'nt' and self.plat_name != get_platform():
self.compiler.initialize(self.plat_name)
# And make sure that any compile/link-related options (which might
# come from the command-line or from the setup script) are set in
# that CCompiler object -- that way, they automatically apply to
# all compiling and linking done here.
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name, value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
if self.libraries is not None:
self.compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
self.compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
self.compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
self.compiler.set_link_objects(self.link_objects)
# Now actually compile and link everything.
self.build_extensions()
def check_extensions_list(self, extensions):
"""Ensure that the list of extensions (presumably provided as a
command option 'extensions') is valid, i.e. it is a list of
Extension objects. We also support the old-style list of 2-tuples,
where the tuples are (ext_name, build_info), which are converted to
Extension instances here.
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if not isinstance(extensions, list):
raise DistutilsSetupError(
"'ext_modules' option must be a list of Extension instances")
for i, ext in enumerate(extensions):
if isinstance(ext, Extension):
continue # OK! (assume type-checking done
# by Extension constructor)
if not isinstance(ext, tuple) or len(ext) != 2:
raise DistutilsSetupError(
"each element of 'ext_modules' option must be an "
"Extension instance or 2-tuple")
ext_name, build_info = ext
log.warn("old-style (ext_name, build_info) tuple found in "
"ext_modules for extension '%s' "
"-- please convert to Extension instance", ext_name)
if not (isinstance(ext_name, str) and
extension_name_re.match(ext_name)):
raise DistutilsSetupError(
"first element of each tuple in 'ext_modules' "
"must be the extension name (a string)")
if not isinstance(build_info, dict):
raise DistutilsSetupError(
"second element of each tuple in 'ext_modules' "
"must be a dictionary (build info)")
# OK, the (ext_name, build_info) dict is type-safe: convert it
# to an Extension instance.
ext = Extension(ext_name, build_info['sources'])
# Easy stuff: one-to-one mapping from dict elements to
# instance attributes.
for key in ('include_dirs', 'library_dirs', 'libraries',
'extra_objects', 'extra_compile_args',
'extra_link_args'):
val = build_info.get(key)
if val is not None:
setattr(ext, key, val)
# Medium-easy stuff: same syntax/semantics, different names.
ext.runtime_library_dirs = build_info.get('rpath')
if 'def_file' in build_info:
log.warn("'def_file' element of build info dict "
"no longer supported")
# Non-trivial stuff: 'macros' split into 'define_macros'
# and 'undef_macros'.
macros = build_info.get('macros')
if macros:
ext.define_macros = []
ext.undef_macros = []
for macro in macros:
if not (isinstance(macro, tuple) and len(macro) in (1, 2)):
raise DistutilsSetupError(
"'macros' element of build info dict "
"must be 1- or 2-tuple")
if len(macro) == 1:
ext.undef_macros.append(macro[0])
elif len(macro) == 2:
ext.define_macros.append(macro)
extensions[i] = ext
def get_source_files(self):
self.check_extensions_list(self.extensions)
filenames = []
# Wouldn't it be neat if we knew the names of header files too...
for ext in self.extensions:
filenames.extend(ext.sources)
return filenames
def get_outputs(self):
# Sanity check the 'extensions' list -- can't assume this is being
# done in the same run as a 'build_extensions()' call (in fact, we
# can probably assume that it *isn't*!).
self.check_extensions_list(self.extensions)
# And build the list of output (built) filenames. Note that this
# ignores the 'inplace' flag, and assumes everything goes in the
# "build" tree.
outputs = []
for ext in self.extensions:
outputs.append(self.get_ext_fullpath(ext.name))
return outputs
def build_extensions(self):
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
if self.parallel:
self._build_extensions_parallel()
else:
self._build_extensions_serial()
def _build_extensions_parallel(self):
workers = self.parallel
if self.parallel is True:
workers = os.cpu_count() # may return None
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError:
workers = None
if workers is None:
self._build_extensions_serial()
return
with ThreadPoolExecutor(max_workers=workers) as executor:
futures = [executor.submit(self.build_extension, ext)
for ext in self.extensions]
for ext, fut in zip(self.extensions, futures):
with self._filter_build_errors(ext):
fut.result()
def _build_extensions_serial(self):
for ext in self.extensions:
with self._filter_build_errors(ext):
self.build_extension(ext)
@contextlib.contextmanager
def _filter_build_errors(self, ext):
try:
yield
except (CCompilerError, DistutilsError, CompileError) as e:
if not ext.optional:
raise
self.warn('building extension "%s" failed: %s' %
(ext.name, e))
def build_extension(self, ext):
sources = ext.sources
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'ext_modules' option (extension '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % ext.name)
sources = list(sources)
ext_path = self.get_ext_fullpath(ext.name)
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_path, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
# First, scan the sources for SWIG definition files (.i), run
# SWIG on 'em to create .c files, and modify the sources list
# accordingly.
sources = self.swig_sources(sources, ext)
# Next, compile the source code to object files.
# XXX not honouring 'define_macros' or 'undef_macros' -- the
# CCompiler API needs to change to accommodate this, and I
# want to do one thing at a time!
# Two possible sources for extra compiler arguments:
# - 'extra_compile_args' in Extension object
# - CFLAGS environment variable (not particularly
# elegant, but people seem to expect it and I
# guess it's useful)
# The environment variable should take precedence, and
# any sensible compiler will give precedence to later
# command line args. Hence we combine them in order:
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
# XXX outdated variable, kept here in case third-part code
# needs it.
self._built_objects = objects[:]
# Now link the object files together into a "shared object" --
# of course, first we have to figure out all the other things
# that go into the mix.
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
# Detect target language, if not provided
language = ext.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects, ext_path,
libraries=self.get_libraries(ext),
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
def swig_sources(self, sources, extension):
"""Walk the list of source files in 'sources', looking for SWIG
interface (.i) files. Run SWIG on all that are found, and
return a modified 'sources' list with SWIG source files replaced
by the generated C (or C++) files.
"""
new_sources = []
swig_sources = []
swig_targets = {}
# XXX this drops generated C/C++ files into the source tree, which
# is fine for developers who want to distribute the generated
# source -- but there should be an option to put SWIG output in
# the temp dir.
if self.swig_cpp:
log.warn("--swig-cpp is deprecated - use --swig-opts=-c++")
if self.swig_cpp or ('-c++' in self.swig_opts) or \
('-c++' in extension.swig_opts):
target_ext = '.cpp'
else:
target_ext = '.c'
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == ".i": # SWIG interface file
new_sources.append(base + '_wrap' + target_ext)
swig_sources.append(source)
swig_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not swig_sources:
return new_sources
swig = self.swig or self.find_swig()
swig_cmd = [swig, "-python"]
swig_cmd.extend(self.swig_opts)
if self.swig_cpp:
swig_cmd.append("-c++")
# Do not override commandline arguments
if not self.swig_opts:
for o in extension.swig_opts:
swig_cmd.append(o)
for source in swig_sources:
target = swig_targets[source]
log.info("swigging %s to %s", source, target)
self.spawn(swig_cmd + ["-o", target, source])
return new_sources
def find_swig(self):
"""Return the name of the SWIG executable. On Unix, this is
just "swig" -- it should be in the PATH. Tries a bit harder on
Windows.
"""
if os.name == "posix":
return "swig"
elif os.name == "nt":
# Look for SWIG in its standard installation directory on
# Windows (or so I presume!). If we find it there, great;
# if not, act like Unix and assume it's in the PATH.
for vers in ("1.3", "1.2", "1.1"):
fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
if os.path.isfile(fn):
return fn
else:
return "swig.exe"
else:
raise DistutilsPlatformError(
"I don't know how to find (much less run) SWIG "
"on platform '%s'" % os.name)
# -- Name generators -----------------------------------------------
# (extension names, filenames, whatever)
def get_ext_fullpath(self, ext_name):
"""Returns the path of the filename for a given extension.
The file is located in `build_lib` or directly in the package
(inplace option).
"""
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(modpath[-1])
if not self.inplace:
# no further work needed
# returning :
# build_dir/package/path/filename
filename = os.path.join(*modpath[:-1]+[filename])
return os.path.join(self.build_lib, filename)
# the inplace option requires to find the package directory
# using the build_py command for that
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
# returning
# package_dir/filename
return os.path.join(package_dir, filename)
def get_ext_fullname(self, ext_name):
"""Returns the fullname of a given extension name.
Adds the `package.` prefix"""
if self.package is None:
return ext_name
else:
return self.package + '.' + ext_name
def get_ext_filename(self, ext_name):
r"""Convert the name of an extension (eg. "foo.bar") into the name
of the file from which it will be loaded (eg. "foo/bar.so", or
"foo\bar.pyd").
"""
from distutils.sysconfig import get_config_var
ext_path = ext_name.split('.')
ext_suffix = get_config_var('EXT_SUFFIX')
return os.path.join(*ext_path) + ext_suffix
def get_export_symbols(self, ext):
"""Return the list of symbols that a shared extension has to
export. This either uses 'ext.export_symbols' or, if it's not
provided, "PyInit_" + module_name. Only relevant on Windows, where
the .pyd file (DLL) must export the module "PyInit_" function.
"""
initfunc_name = "PyInit_" + ext.name.split('.')[-1]
if initfunc_name not in ext.export_symbols:
ext.export_symbols.append(initfunc_name)
return ext.export_symbols
def get_libraries(self, ext):
"""Return the list of libraries to link against when building a
shared extension. On most platforms, this is just 'ext.libraries';
on Windows, we add the Python library (eg. python20.dll).
"""
# The python library is always needed on Windows. For MSVC, this
# is redundant, since the library is mentioned in a pragma in
# pyconfig.h that MSVC groks. The other Windows compilers all seem
# to need it mentioned explicitly, though, so that's what we do.
# Append '_d' to the python import library on debug builds.
if sys.platform == "win32":
from distutils._msvccompiler import MSVCCompiler
if not isinstance(self.compiler, MSVCCompiler):
template = "python%d%d"
if self.debug:
template = template + '_d'
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
else:
return ext.libraries
elif sys.platform[:6] == "cygwin":
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
elif sys.platform[:6] == "atheos":
from distutils import sysconfig
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# Get SHLIBS from Makefile
extra = []
for lib in sysconfig.get_config_var('SHLIBS').split():
if lib.startswith('-l'):
extra.append(lib[2:])
else:
extra.append(lib)
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib, "m"] + extra
elif sys.platform == 'darwin':
# Don't use the default code below
return ext.libraries
elif sys.platform[:3] == 'aix':
# Don't use the default code below
return ext.libraries
else:
from distutils import sysconfig
if False and sysconfig.get_config_var('Py_ENABLE_SHARED'):
pythonlib = 'python{}.{}{}'.format(
sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff,
sysconfig.get_config_var('ABIFLAGS'))
return ext.libraries + [pythonlib]
else:
return ext.libraries
| 41.674603 | 86 | 0.581604 |
acf2fc9b6837f609c1388cf8a5cada5cc6f6e353 | 1,924 | py | Python | kite-go/navigation/offline/experiments/quip-docs/docs.py | kiteco/kiteco-public | 74aaf5b9b0592153b92f7ed982d65e15eea885e3 | [
"BSD-3-Clause"
] | 17 | 2022-01-10T11:01:50.000Z | 2022-03-25T03:21:08.000Z | kite-go/navigation/offline/experiments/quip-docs/docs.py | kiteco/kiteco-public | 74aaf5b9b0592153b92f7ed982d65e15eea885e3 | [
"BSD-3-Clause"
] | 1 | 2022-01-13T14:28:47.000Z | 2022-01-13T14:28:47.000Z | kite-go/navigation/offline/experiments/quip-docs/docs.py | kiteco/kiteco-public | 74aaf5b9b0592153b92f7ed982d65e15eea885e3 | [
"BSD-3-Clause"
] | 7 | 2022-01-07T03:58:10.000Z | 2022-03-24T07:38:20.000Z | import argparse
import json
import logging
import os
import time
from typing import Dict, NamedTuple
import requests
def main() -> None:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
args = parse_args()
with open(args.relevant, "r") as fp:
relevant = json.load(fp)
quip = Quip(os.environ["QUIP_AUTH_TOKEN"])
suffixes = {v for vs in relevant.values() for v in vs}
logging.info(f"Found {len(suffixes)} suffixes")
titles: Dict[str, str] = {}
for suffix in suffixes:
logging.info(f"Getting {suffix}")
time.sleep(1)
try:
doc = quip.get(suffix)
titles[suffix] = doc.title
except requests.exceptions.HTTPError as e:
logging.warn(f"Skipping {suffix} because of error: {e}")
continue
with open(f"{args.docs}/{suffix}.py", "w") as fp:
fp.write(f'"""\n{doc.contents}\n"""')
logging.info(f"Retrieved {len(titles)} documents")
with open(args.titles, "w") as fp:
json.dump(titles, fp, indent=2)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--relevant", type=str)
parser.add_argument("--docs", type=str)
parser.add_argument("--titles", type=str)
return parser.parse_args()
class Doc(NamedTuple):
title: str
contents: str
class Quip:
def __init__(self, token: str) -> None:
self.base = "https://platform.quip.com/1"
self.token = token
def get(self, thread_id: str) -> Doc:
resp = requests.get(
f"{self.base}/threads/{thread_id}",
headers={"Authorization": f"Bearer {self.token}"},
)
resp.raise_for_status()
data = resp.json()
return Doc(data["thread"]["title"], data["html"])
if __name__ == "__main__":
main()
| 26.356164 | 68 | 0.596674 |
acf2fdb4af72efe56cd8071ec1edc0be2b42ddb7 | 283 | py | Python | Others/code_festival/code-festival-2017-qualc/b.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 2 | 2020-06-12T09:54:23.000Z | 2021-05-04T01:34:07.000Z | Others/code_festival/code-festival-2017-qualc/b.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 961 | 2020-06-23T07:26:22.000Z | 2022-03-31T21:34:52.000Z | Others/code_festival/code-festival-2017-qualc/b.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
def main():
n = int(input())
a = list(map(int, input().split()))
odd_patterns = 1
for ai in a:
if ai % 2 == 0:
odd_patterns *= 2
print(3 ** n - odd_patterns)
if __name__ == '__main__':
main()
| 15.722222 | 40 | 0.452297 |
acf2fe7dec2139601c0a5e3bf221f530699eebcd | 7,526 | py | Python | tests/protein/test_graphs.py | jadechip/graphein | 2a69b9d1b39c48b7e3b2c806dafcf4c469d5ff21 | [
"MIT"
] | 342 | 2020-03-11T18:29:46.000Z | 2022-03-31T14:30:16.000Z | tests/protein/test_graphs.py | jadechip/graphein | 2a69b9d1b39c48b7e3b2c806dafcf4c469d5ff21 | [
"MIT"
] | 126 | 2020-05-22T19:54:23.000Z | 2022-03-31T17:16:49.000Z | tests/protein/test_graphs.py | jadechip/graphein | 2a69b9d1b39c48b7e3b2c806dafcf4c469d5ff21 | [
"MIT"
] | 51 | 2020-04-25T06:45:25.000Z | 2022-03-09T15:22:34.000Z | """Tests for graphein.protein.graphs"""
from functools import partial
from pathlib import Path
import networkx as nx
import pytest
from graphein.protein.config import ProteinGraphConfig
from graphein.protein.edges.distance import (
add_aromatic_interactions,
add_aromatic_sulphur_interactions,
add_cation_pi_interactions,
add_delaunay_triangulation,
add_distance_threshold,
add_disulfide_interactions,
add_hydrogen_bond_interactions,
add_hydrophobic_interactions,
add_ionic_interactions,
add_k_nn_edges,
add_peptide_bonds,
)
from graphein.protein.features.nodes.aaindex import aaindex1
from graphein.protein.features.nodes.amino_acid import (
expasy_protein_scale,
meiler_embedding,
)
from graphein.protein.features.nodes.dssp import (
asa,
phi,
psi,
rsa,
secondary_structure,
)
from graphein.protein.features.sequence.embeddings import (
biovec_sequence_embedding,
esm_residue_embedding,
esm_sequence_embedding,
)
from graphein.protein.features.sequence.sequence import molecular_weight
from graphein.protein.graphs import construct_graph, read_pdb_to_dataframe
DATA_PATH = Path(__file__).resolve().parent / "test_data" / "4hhb.pdb"
def generate_graph():
"""Generate PDB network.
This is a helper function.
"""
return construct_graph(pdb_path=str(DATA_PATH))
@pytest.fixture(scope="module")
def net():
"""Generate proteingraph from 2VUI.pdb."""
return generate_graph()
@pytest.fixture()
def pdb_df():
"""Generate pdb_df from 2VIU.pdb."""
return read_pdb_to_dataframe(DATA_PATH)
def test_nodes_are_strings(net):
"""
Checks to make sure that the nodes are a string.
For expediency, checks only 1/4 of the nodes.
"""
for n in net.nodes():
assert isinstance(n, str)
# Example-based Graph Construction test
def test_construct_graph():
"""Example-based test that graph construction works correctly.
Uses 4hhb PDB file as an example test case.
"""
file_path = Path(__file__).parent / "test_data/4hhb.pdb"
G = construct_graph(pdb_path=str(file_path))
assert isinstance(G, nx.Graph)
assert len(G) == 574
# Check number of peptide bonds
peptide_bond_edges = [
(u, v)
for u, v, d in G.edges(data=True)
if d["kind"] == {"peptide_bond"}
]
assert len(peptide_bond_edges) == 570
def test_chain_selection():
"""Example-based test that chain selection works correctly.
Uses 4hhb PDB file as an example test case.
"""
file_path = Path(__file__).parent / "test_data/4hhb.pdb"
G = construct_graph(pdb_path=str(file_path))
# Check default construction contains all chains
assert G.graph["chain_ids"] == ["A", "B", "C", "D"]
# Check nodes contain residues from chains
for n, d in G.nodes(data=True):
assert d["chain_id"] in ["A", "B", "C", "D"]
# Check graph contains only chain selection
G = construct_graph(pdb_path=str(file_path), chain_selection="AD")
assert G.graph["chain_ids"] == ["A", "D"]
# Check nodes only contain residues from chain selection
for n, d in G.nodes(data=True):
assert d["chain_id"] in ["A", "D"]
# Edge construction tests
# Removed - testing with GetContacts as a dependency is not a priority right now
"""
def test_intramolecular_edges():
Example-based test that intramolecualr edge construction using GetContacts works correctly.
Uses 4hhb PDB file as an example test case.
file_path = Path(__file__).parent / "test_data/4hhb.pdb"
edge_functions = {
"edge_construction_functions": [
hydrogen_bond,
hydrophobic,
peptide_bonds,
pi_cation,
pi_stacking,
salt_bridge,
t_stacking,
van_der_waals,
]
}
config = ProteinGraphConfig(**edge_functions)
G = construct_graph(pdb_path=str(file_path), config=config)
# Todo complete
"""
def test_distance_edges():
"""Example-based test that distance-based edge construction works correctly
Uses 4hhb PDB file as an example test case.
"""
file_path = Path(__file__).parent / "test_data/4hhb.pdb"
edge_functions = {
"edge_construction_functions": [
partial(add_k_nn_edges, k=5, long_interaction_threshold=10),
add_hydrophobic_interactions,
# add_aromatic_interactions, # Todo removed for now as ring centroids require precomputing
add_aromatic_sulphur_interactions,
add_delaunay_triangulation,
add_cation_pi_interactions,
add_peptide_bonds,
add_hydrogen_bond_interactions,
add_disulfide_interactions,
add_ionic_interactions,
partial(
add_distance_threshold,
threshold=12,
long_interaction_threshold=10,
),
]
}
config = ProteinGraphConfig(**edge_functions)
G = construct_graph(pdb_path=str(file_path), config=config)
# Todo complete
# Featurisation tests
def test_node_features():
# Todo this test requires attention
# Tests node featurisers for a residue graph:
# Amino acid features, ESM embedding, DSSP features, aaindex features
file_path = Path(__file__).parent / "test_data/4hhb.pdb"
node_feature_functions = {
"node_metadata_functions": [
expasy_protein_scale, # Todo we need to refactor node data assingment flow
meiler_embedding,
# rsa,
# asa,
# phi,
# psi,
# secondary_structure,
# partial(aaindex1, accession="FAUJ880111"),
]
}
config = ProteinGraphConfig(**node_feature_functions)
G = construct_graph(pdb_path=str(file_path), config=config)
# Check for existence of features
for n, d in G.nodes(data=True):
# assert "meiler_embedding" in d # Todo these functions return pd.Series, rather than adding to the node
# assert expasy_protein_scale in d
# assert "rsa" in d
# assert "asa" in d
# assert "phi" in d
# assert "psi" in d
# assert "secondary_structure" in d
continue
@pytest.mark.skip(reason="Pretrained model download is large.")
def test_sequence_features():
# Tests sequence featurisers for a residue graph:
# ESM and BioVec embeddings, propy and sequence descriptors
file_path = Path(__file__).parent / "test_data/4hhb.pdb"
sequence_feature_functions = {
"graph_metadata_functions": [
# esm_sequence_embedding,
# esm_residue_embedding,
biovec_sequence_embedding,
molecular_weight,
]
}
config = ProteinGraphConfig(**sequence_feature_functions)
G = construct_graph(pdb_path=str(file_path), config=config)
# Check for existence on sequence-based features as node-level features
# for n, d in G.nodes(data=True):
# Todo this can probably be improved.
# This only checks for the existence and shape of the esm_embedding for each node
# assert "esm_embedding" in d
# assert len(d["esm_embedding"]) == 1280
# Check for existence of sequence-based features as Graph-level features
for chain in G.graph["chain_ids"]:
assert f"sequence_{chain}" in G.graph
# assert f"esm_embedding_{chain}" in G.graph
assert f"biovec_embedding_{chain}" in G.graph
assert f"molecular_weight_{chain}" in G.graph
| 31.358333 | 112 | 0.675658 |
acf2ff027d36adc7cdd95b4ce6a97c6fc9bc68fe | 79,265 | py | Python | python/ccxt/ftx.py | gpearson100/ccxt | ba7850b45eb6d5c98defbacb6bdf35bb92678eef | [
"MIT"
] | 2 | 2019-07-15T22:39:54.000Z | 2021-05-15T16:13:00.000Z | python/ccxt/ftx.py | gpearson100/ccxt | ba7850b45eb6d5c98defbacb6bdf35bb92678eef | [
"MIT"
] | 1 | 2021-08-23T16:27:34.000Z | 2021-08-23T16:27:34.000Z | python/ccxt/ftx.py | gpearson100/ccxt | ba7850b45eb6d5c98defbacb6bdf35bb92678eef | [
"MIT"
] | 2 | 2020-09-08T01:41:24.000Z | 2021-04-30T00:07:59.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import DuplicateOrderId
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class ftx(Exchange):
def describe(self):
return self.deep_extend(super(ftx, self).describe(), {
'id': 'ftx',
'name': 'FTX',
'countries': ['HK'],
'rateLimit': 50,
'certified': True,
'pro': True,
'hostname': 'ftx.com', # or ftx.us
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/67149189-df896480-f2b0-11e9-8816-41593e17f9ec.jpg',
'www': 'https://ftx.com',
'api': {
'public': 'https://{hostname}',
'private': 'https://{hostname}',
},
'doc': 'https://github.com/ftexchange/ftx',
'fees': 'https://ftexchange.zendesk.com/hc/en-us/articles/360024479432-Fees',
'referral': {
'url': 'https://ftx.com/#a=ccxt',
'discount': 0.05,
},
},
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingFees': False,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPositions': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTradingFees': True,
'fetchWithdrawals': True,
'setLeverage': True,
'withdraw': True,
},
'timeframes': {
'15s': '15',
'1m': '60',
'5m': '300',
'15m': '900',
'1h': '3600',
'4h': '14400',
'1d': '86400',
},
'api': {
'public': {
'get': [
'coins',
# markets
'markets',
'markets/{market_name}',
'markets/{market_name}/orderbook', # ?depth={depth}
'markets/{market_name}/trades', # ?limit={limit}&start_time={start_time}&end_time={end_time}
'markets/{market_name}/candles', # ?resolution={resolution}&limit={limit}&start_time={start_time}&end_time={end_time}
# futures
'futures',
'futures/{future_name}',
'futures/{future_name}/stats',
'funding_rates',
'indexes/{index_name}/weights',
'expired_futures',
'indexes/{market_name}/candles', # ?resolution={resolution}&limit={limit}&start_time={start_time}&end_time={end_time}
# wallet
'wallet/coins',
# leverage tokens
'lt/tokens',
'lt/{token_name}',
# etfs
'etfs/rebalance_info',
# options
'options/requests',
'options/trades',
'options/historical_volumes/BTC',
'stats/24h_options_volume',
'options/open_interest/BTC',
'options/historical_open_interest/BTC',
# spot margin
'spot_margin/history',
'spot_margin/borrow_summary',
# nfts
'nft/nfts',
'nft/{nft_id}',
'nft/{nft_id}/trades',
'nft/all_trades',
'nft/{nft_id}/account_info',
'nft/collections',
# ftx pay
'ftxpay/apps/{user_specific_id}/details',
'stats/latency_stats',
],
'post': [
'ftxpay/apps/{user_specific_id}/orders',
],
},
'private': {
'get': [
# subaccounts
'subaccounts',
'subaccounts/{nickname}/balances',
# account
'account',
'positions',
# wallet
'wallet/balances',
'wallet/all_balances',
'wallet/deposit_address/{coin}', # ?method={method}
'wallet/deposits',
'wallet/withdrawals',
'wallet/airdrops',
'wallet/withdrawal_fee',
'wallet/saved_addresses',
# orders
'orders', # ?market={market}
'orders/history', # ?market={market}
'orders/{order_id}',
'orders/by_client_id/{client_order_id}',
# conditional orders
'conditional_orders', # ?market={market}
'conditional_orders/{conditional_order_id}/triggers',
'conditional_orders/history', # ?market={market}
'fills', # ?market={market}
'funding_payments',
# leverage tokens
'lt/balances',
'lt/creations',
'lt/redemptions',
# options
'options/my_requests',
'options/requests/{request_id}/quotes',
'options/my_quotes',
'options/account_info',
'options/positions',
'options/fills',
# staking
'staking/stakes',
'staking/unstake_requests',
'staking/balances',
'staking/staking_rewards',
# otc
'otc/quotes/{quoteId}',
# spot margin
'spot_margin/borrow_rates',
'spot_margin/lending_rates',
'spot_margin/market_info', # ?market={market}
'spot_margin/borrow_history',
'spot_margin/lending_history',
'spot_margin/offers',
'spot_margin/lending_info',
# nfts
'nft/balances',
'nft/bids',
'nft/deposits',
'nft/withdrawals',
'nft/fills',
'nft/gallery/{gallery_id}',
'nft/gallery_settings',
],
'post': [
# subaccounts
'subaccounts',
'subaccounts/update_name',
'subaccounts/transfer',
# account
'account/leverage',
# wallet
'wallet/withdrawals',
'wallet/saved_addresses',
# orders
'orders',
'conditional_orders',
'orders/{order_id}/modify',
'orders/by_client_id/{client_order_id}/modify',
'conditional_orders/{order_id}/modify',
# leverage tokens
'lt/{token_name}/create',
'lt/{token_name}/redeem',
# options
'options/requests',
'options/requests/{request_id}/quotes',
'options/quotes/{quote_id}/accept',
# staking
'staking/unstake_requests',
'srm_stakes/stakes',
# otc
'otc/quotes/{quote_id}/accept',
'otc/quotes',
# spot margin
'spot_margin/offers',
# nfts
'nft/offer',
'nft/buy',
'nft/auction',
'nft/edit_auction',
'nft/cancel_auction',
'nft/bids',
'nft/redeem',
'nft/gallery_settings',
# ftx pay
'ftxpay/apps/{user_specific_id}/orders',
],
'delete': [
# subaccounts
'subaccounts',
# wallet
'wallet/saved_addresses/{saved_address_id}',
# orders
'orders/{order_id}',
'orders/by_client_id/{client_order_id}',
'orders',
'conditional_orders/{order_id}',
# options
'options/requests/{request_id}',
'options/quotes/{quote_id}',
# staking
'staking/unstake_requests/{request_id}',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'maker': self.parse_number('0.0002'),
'taker': self.parse_number('0.0007'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0007')],
[self.parse_number('2000000'), self.parse_number('0.0006')],
[self.parse_number('5000000'), self.parse_number('0.00055')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.045')],
[self.parse_number('50000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0002')],
[self.parse_number('2000000'), self.parse_number('0.00015')],
[self.parse_number('5000000'), self.parse_number('0.0001')],
[self.parse_number('10000000'), self.parse_number('0.00005')],
[self.parse_number('25000000'), self.parse_number('0')],
[self.parse_number('50000000'), self.parse_number('0')],
],
},
},
'funding': {
'withdraw': {},
},
},
'exceptions': {
'exact': {
'Please slow down': RateLimitExceeded, # {"error":"Please slow down","success":false}
'Size too small for provide': InvalidOrder, # {"error":"Size too small for provide","success":false}
'Not logged in': AuthenticationError, # {"error":"Not logged in","success":false}
'Not enough balances': InsufficientFunds, # {"error":"Not enough balances","success":false}
'InvalidPrice': InvalidOrder, # {"error":"Invalid price","success":false}
'Size too small': InvalidOrder, # {"error":"Size too small","success":false}
'Size too large': InvalidOrder, # {"error":"Size too large","success":false}
'Missing parameter price': InvalidOrder, # {"error":"Missing parameter price","success":false}
'Order not found': OrderNotFound, # {"error":"Order not found","success":false}
'Order already closed': InvalidOrder, # {"error":"Order already closed","success":false}
'Trigger price too high': InvalidOrder, # {"error":"Trigger price too high","success":false}
'Trigger price too low': InvalidOrder, # {"error":"Trigger price too low","success":false}
'Order already queued for cancellation': CancelPending, # {"error":"Order already queued for cancellation","success":false}
'Duplicate client order ID': DuplicateOrderId, # {"error":"Duplicate client order ID","success":false}
'Spot orders cannot be reduce-only': InvalidOrder, # {"error":"Spot orders cannot be reduce-only","success":false}
'Invalid reduce-only order': InvalidOrder, # {"error":"Invalid reduce-only order","success":false}
'Account does not have enough balances': InsufficientFunds, # {"success":false,"error":"Account does not have enough balances"}
},
'broad': {
'Account does not have enough margin for order': InsufficientFunds,
'Invalid parameter': BadRequest, # {"error":"Invalid parameter start_time","success":false}
'The requested URL was not found on the server': BadRequest,
'No such coin': BadRequest,
'No such subaccount': BadRequest,
'No such future': BadSymbol,
'No such market': BadSymbol,
'Do not send more than': RateLimitExceeded,
'An unexpected error occurred': ExchangeNotAvailable, # {"error":"An unexpected error occurred, please try again later(58BC21C795).","success":false}
'Please retry request': ExchangeNotAvailable, # {"error":"Please retry request","success":false}
'Please try again': ExchangeNotAvailable, # {"error":"Please try again","success":false}
'Try again': ExchangeNotAvailable, # {"error":"Try again","success":false}
'Only have permissions for subaccount': PermissionDenied, # {"success":false,"error":"Only have permissions for subaccount *sub_name*"}
},
},
'precisionMode': TICK_SIZE,
'options': {
# support for canceling conditional orders
# https://github.com/ccxt/ccxt/issues/6669
'cancelOrder': {
'method': 'privateDeleteOrdersOrderId', # privateDeleteConditionalOrdersOrderId
},
'fetchOpenOrders': {
'method': 'privateGetOrders', # privateGetConditionalOrders
},
'fetchOrders': {
'method': 'privateGetOrdersHistory', # privateGetConditionalOrdersHistory
},
'sign': {
'ftx.com': 'FTX',
'ftx.us': 'FTXUS',
},
},
})
def fetch_currencies(self, params={}):
response = self.publicGetCoins(params)
currencies = self.safe_value(response, 'result', [])
#
# {
# "success":true,
# "result": [
# {"id":"BTC","name":"Bitcoin"},
# {"id":"ETH","name":"Ethereum"},
# {"id":"ETHMOON","name":"10X Long Ethereum Token","underlying":"ETH"},
# {"id":"EOSBULL","name":"3X Long EOS Token","underlying":"EOS"},
# ],
# }
#
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'id')
code = self.safe_currency_code(id)
name = self.safe_string(currency, 'name')
result[code] = {
'id': id,
'code': code,
'info': currency,
'type': None,
'name': name,
'active': None,
'fee': None,
'precision': None,
'limits': {
'withdraw': {'min': None, 'max': None},
'amount': {'min': None, 'max': None},
},
}
return result
def fetch_markets(self, params={}):
response = self.publicGetMarkets(params)
#
# {
# 'success': True,
# "result": [
# {
# "ask":170.37,
# "baseCurrency":null,
# "bid":170.31,
# "change1h":-0.019001554672655036,
# "change24h":-0.024841165359738997,
# "changeBod":-0.03816406029469881,
# "enabled":true,
# "last":170.37,
# "name":"ETH-PERP",
# "price":170.37,
# "priceIncrement":0.01,
# "quoteCurrency":null,
# "quoteVolume24h":7742164.59889,
# "sizeIncrement":0.001,
# "type":"future",
# "underlying":"ETH",
# "volumeUsd24h":7742164.59889
# },
# {
# "ask":170.44,
# "baseCurrency":"ETH",
# "bid":170.41,
# "change1h":-0.018485459257126403,
# "change24h":-0.023825887743413515,
# "changeBod":-0.037605872388481086,
# "enabled":true,
# "last":172.72,
# "name":"ETH/USD",
# "price":170.44,
# "priceIncrement":0.01,
# "quoteCurrency":"USD",
# "quoteVolume24h":382802.0252,
# "sizeIncrement":0.001,
# "type":"spot",
# "underlying":null,
# "volumeUsd24h":382802.0252
# },
# ],
# }
#
result = []
markets = self.safe_value(response, 'result', [])
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'name')
baseId = self.safe_string_2(market, 'baseCurrency', 'underlying')
quoteId = self.safe_string(market, 'quoteCurrency', 'USD')
type = self.safe_string(market, 'type')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
# check if a market is a spot or future market
symbol = self.safe_string(market, 'name') if (type == 'future') else (base + '/' + quote)
active = self.safe_value(market, 'enabled')
sizeIncrement = self.safe_number(market, 'sizeIncrement')
priceIncrement = self.safe_number(market, 'priceIncrement')
precision = {
'amount': sizeIncrement,
'price': priceIncrement,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'type': type,
'future': (type == 'future'),
'spot': (type == 'spot'),
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': sizeIncrement,
'max': None,
},
'price': {
'min': priceIncrement,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
#
# {
# "ask":171.29,
# "baseCurrency":null, # base currency for spot markets
# "bid":171.24,
# "change1h":-0.0012244897959183673,
# "change24h":-0.031603346901854366,
# "changeBod":-0.03297013492914808,
# "enabled":true,
# "last":171.44,
# "name":"ETH-PERP",
# "price":171.29,
# "priceIncrement":0.01,
# "quoteCurrency":null, # quote currency for spot markets
# "quoteVolume24h":8570651.12113,
# "sizeIncrement":0.001,
# "type":"future",
# "underlying":"ETH", # null for spot markets
# "volumeUsd24h":8570651.12113,
# }
#
symbol = None
marketId = self.safe_string(ticker, 'name')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
type = self.safe_string(ticker, 'type')
if type == 'future':
symbol = marketId
else:
base = self.safe_currency_code(self.safe_string(ticker, 'baseCurrency'))
quote = self.safe_currency_code(self.safe_string(ticker, 'quoteCurrency'))
if (base is not None) and (quote is not None):
symbol = base + '/' + quote
if (symbol is None) and (market is not None):
symbol = market['symbol']
last = self.safe_number(ticker, 'last')
timestamp = self.safe_timestamp(ticker, 'time', self.milliseconds())
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': self.safe_number(ticker, 'bid'),
'bidVolume': self.safe_number(ticker, 'bidSize'),
'ask': self.safe_number(ticker, 'ask'),
'askVolume': self.safe_number(ticker, 'askSize'),
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': self.safe_number(ticker, 'change24h'),
'average': None,
'baseVolume': None,
'quoteVolume': self.safe_number(ticker, 'quoteVolume24h'),
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market_name': market['id'],
}
response = self.publicGetMarketsMarketName(self.extend(request, params))
#
# {
# "success":true,
# "result":{
# "ask":171.29,
# "baseCurrency":null, # base currency for spot markets
# "bid":171.24,
# "change1h":-0.0012244897959183673,
# "change24h":-0.031603346901854366,
# "changeBod":-0.03297013492914808,
# "enabled":true,
# "last":171.44,
# "name":"ETH-PERP",
# "price":171.29,
# "priceIncrement":0.01,
# "quoteCurrency":null, # quote currency for spot markets
# "quoteVolume24h":8570651.12113,
# "sizeIncrement":0.001,
# "type":"future",
# "underlying":"ETH", # null for spot markets
# "volumeUsd24h":8570651.12113,
# }
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_ticker(result, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetMarkets(params)
#
# {
# 'success': True,
# "result": [
# {
# "ask":170.44,
# "baseCurrency":"ETH",
# "bid":170.41,
# "change1h":-0.018485459257126403,
# "change24h":-0.023825887743413515,
# "changeBod":-0.037605872388481086,
# "enabled":true,
# "last":172.72,
# "name":"ETH/USD",
# "price":170.44,
# "priceIncrement":0.01,
# "quoteCurrency":"USD",
# "quoteVolume24h":382802.0252,
# "sizeIncrement":0.001,
# "type":"spot",
# "underlying":null,
# "volumeUsd24h":382802.0252
# },
# ],
# }
#
tickers = self.safe_value(response, 'result', [])
return self.parse_tickers(tickers, symbols)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market_name': market['id'],
}
if limit is not None:
request['depth'] = limit # max 100, default 20
response = self.publicGetMarketsMarketNameOrderbook(self.extend(request, params))
#
# {
# "success":true,
# "result":{
# "asks":[
# [171.95,279.865],
# [171.98,102.42],
# [171.99,124.11],
# ],
# "bids":[
# [171.93,69.749],
# [171.9,288.325],
# [171.88,87.47],
# ],
# }
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_order_book(result, symbol)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "close":177.23,
# "high":177.45,
# "low":177.2,
# "open":177.43,
# "startTime":"2019-10-17T13:27:00+00:00",
# "time":1571318820000.0,
# "volume":0.0
# }
#
return [
self.safe_integer(ohlcv, 'time'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'volume'),
]
def get_market_id(self, symbol, key, params={}):
parts = self.get_market_params(symbol, key, params)
return self.safe_string(parts, 1, symbol)
def get_market_params(self, symbol, key, params={}):
market = None
marketId = None
if symbol in self.markets:
market = self.market(symbol)
marketId = market['id']
else:
marketId = self.safe_string(params, key, symbol)
return [market, marketId]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market, marketId = self.get_market_params(symbol, 'market_name', params)
request = {
'resolution': self.timeframes[timeframe],
'market_name': marketId,
}
# max 1501 candles, including the current candle when since is not specified
limit = 1501 if (limit is None) else limit
if since is None:
request['end_time'] = self.seconds()
request['limit'] = limit
request['start_time'] = request['end_time'] - limit * self.parse_timeframe(timeframe)
else:
request['start_time'] = int(since / 1000)
request['limit'] = limit
request['end_time'] = self.sum(request['start_time'], limit * self.parse_timeframe(timeframe))
response = self.publicGetMarketsMarketNameCandles(self.extend(request, params))
#
# {
# "success": True,
# "result":[
# {
# "close":177.23,
# "high":177.45,
# "low":177.2,
# "open":177.43,
# "startTime":"2019-10-17T13:27:00+00:00",
# "time":1571318820000.0,
# "volume":0.0
# },
# {
# "close":177.26,
# "high":177.33,
# "low":177.23,
# "open":177.23,
# "startTime":"2019-10-17T13:28:00+00:00",
# "time":1571318880000.0,
# "volume":0.0
# },
# ],
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_ohlcvs(result, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":1715826,
# "liquidation":false,
# "price":171.62,
# "side":"buy",
# "size":2.095,
# "time":"2019-10-18T12:59:54.288166+00:00"
# }
#
# fetchMyTrades(private)
#
# {
# "fee": 20.1374935,
# "feeRate": 0.0005,
# "feeCurrency": "USD",
# "future": "EOS-0329",
# "id": 11215,
# "liquidity": "taker",
# "market": "EOS-0329",
# "baseCurrency": null,
# "quoteCurrency": null,
# "orderId": 8436981,
# "price": 4.201,
# "side": "buy",
# "size": 9587,
# "time": "2019-03-27T19:15:10.204619+00:00",
# "type": "order"
# }
#
# {
# "baseCurrency": "BTC",
# "fee": 0,
# "feeCurrency": "USD",
# "feeRate": 0,
# "future": null,
# "id": 664079556,
# "liquidity": "taker",
# "market": null,
# "orderId": null,
# "price": 34830.61359,
# "quoteCurrency": "USD",
# "side": "sell",
# "size": 0.0005996,
# "time": "2021-01-15T16:05:29.246135+00:00",
# "tradeId": null,
# "type": "otc"
# }
#
# with -ve fee
# {
# "id": 1171258927,
# "fee": -0.0000713875,
# "side": "sell",
# "size": 1,
# "time": "2021-03-11T13:34:35.523627+00:00",
# "type": "order",
# "price": 14.2775,
# "future": null,
# "market": "SOL/USD",
# "feeRate": -0.000005,
# "orderId": 33182929044,
# "tradeId": 582936801,
# "liquidity": "maker",
# "feeCurrency": "USD",
# "baseCurrency": "SOL",
# "quoteCurrency": "USD"
# }
#
# # from OTC order
# {
# "id": 1172129651,
# "fee": 0,
# "side": "sell",
# "size": 1.47568846,
# "time": "2021-03-11T15:04:46.893383+00:00",
# "type": "otc",
# "price": 14.60932598,
# "future": null,
# "market": null,
# "feeRate": 0,
# "orderId": null,
# "tradeId": null,
# "liquidity": "taker",
# "feeCurrency": "USD",
# "baseCurrency": "BCHA",
# "quoteCurrency": "USD"
# }
id = self.safe_string(trade, 'id')
takerOrMaker = self.safe_string(trade, 'liquidity')
marketId = self.safe_string(trade, 'market')
symbol = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
base = self.safe_currency_code(self.safe_string(trade, 'baseCurrency'))
quote = self.safe_currency_code(self.safe_string(trade, 'quoteCurrency'))
if (base is not None) and (quote is not None):
symbol = base + '/' + quote
else:
symbol = marketId
timestamp = self.parse8601(self.safe_string(trade, 'time'))
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'size')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
if (symbol is None) and (market is not None):
symbol = market['symbol']
side = self.safe_string(trade, 'side')
fee = None
feeCost = self.safe_number(trade, 'fee')
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
'rate': self.safe_number(trade, 'feeRate'),
}
orderId = self.safe_string(trade, 'orderId')
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market, marketId = self.get_market_params(symbol, 'market_name', params)
request = {
'market_name': marketId,
}
if since is not None:
request['start_time'] = int(since / 1000)
# start_time doesn't work without end_time
request['end_time'] = self.seconds()
if limit is not None:
request['limit'] = limit
response = self.publicGetMarketsMarketNameTrades(self.extend(request, params))
#
# {
# "success":true,
# "result":[
# {
# "id":1715826,
# "liquidation":false,
# "price":171.62,
# "side":"buy",
# "size":2.095,
# "time":"2019-10-18T12:59:54.288166+00:00"
# },
# {
# "id":1715763,
# "liquidation":false,
# "price":171.89,
# "side":"sell",
# "size":1.477,
# "time":"2019-10-18T12:58:38.443734+00:00"
# },
# ],
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_trades(result, market, since, limit)
def fetch_trading_fees(self, params={}):
self.load_markets()
response = self.privateGetAccount(params)
#
# {
# "success": True,
# "result": {
# "backstopProvider": True,
# "collateral": 3568181.02691129,
# "freeCollateral": 1786071.456884368,
# "initialMarginRequirement": 0.12222384240257728,
# "liquidating": False,
# "maintenanceMarginRequirement": 0.07177992558058484,
# "makerFee": 0.0002,
# "marginFraction": 0.5588433331419503,
# "openMarginFraction": 0.2447194090423075,
# "takerFee": 0.0005,
# "totalAccountValue": 3568180.98341129,
# "totalPositionSize": 6384939.6992,
# "username": "user@domain.com",
# "positions": [
# {
# "cost": -31.7906,
# "entryPrice": 138.22,
# "future": "ETH-PERP",
# "initialMarginRequirement": 0.1,
# "longOrderSize": 1744.55,
# "maintenanceMarginRequirement": 0.04,
# "netSize": -0.23,
# "openSize": 1744.32,
# "realizedPnl": 3.39441714,
# "shortOrderSize": 1732.09,
# "side": "sell",
# "size": 0.23,
# "unrealizedPnl": 0,
# },
# ],
# },
# }
#
result = self.safe_value(response, 'result', {})
return {
'info': response,
'maker': self.safe_number(result, 'makerFee'),
'taker': self.safe_number(result, 'takerFee'),
}
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetWalletBalances(params)
#
# {
# "success": True,
# "result": [
# {
# "coin": "USDTBEAR",
# "free": 2320.2,
# "total": 2340.2
# },
# ],
# }
#
result = {
'info': response,
}
balances = self.safe_value(response, 'result', [])
for i in range(0, len(balances)):
balance = balances[i]
code = self.safe_currency_code(self.safe_string(balance, 'coin'))
account = self.account()
account['free'] = self.safe_string_2(balance, 'availableWithoutBorrow', 'free')
account['total'] = self.safe_string(balance, 'total')
result[code] = account
return self.parse_balance(result)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'open': 'open',
'closed': 'closed', # filled or canceled
'triggered': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# limit orders - fetchOrder, fetchOrders, fetchOpenOrders, createOrder, editOrder
#
# {
# "createdAt": "2019-03-05T09:56:55.728933+00:00",
# "filledSize": 0,
# "future": "XRP-PERP",
# "id": 9596912,
# "market": "XRP-PERP",
# "price": 0.306525,
# "remainingSize": 31431,
# "side": "sell",
# "size": 31431,
# "status": "open",
# "type": "limit",
# "reduceOnly": False,
# "ioc": False,
# "postOnly": False,
# "clientId": null,
# }
#
# market orders - fetchOrder, fetchOrders, fetchOpenOrders, createOrder
#
# {
# "avgFillPrice": 2666.0,
# "clientId": None,
# "createdAt": "2020-02-12T00: 53: 49.009726+00: 00",
# "filledSize": 0.0007,
# "future": None,
# "id": 3109208514,
# "ioc": True,
# "market": "BNBBULL/USD",
# "postOnly": False,
# "price": None,
# "reduceOnly": False,
# "remainingSize": 0.0,
# "side": "buy",
# "size": 0.0007,
# "status": "closed",
# "type": "market"
# }
#
# createOrder(conditional, "stop", "trailingStop", or "takeProfit")
#
# {
# "createdAt": "2019-03-05T09:56:55.728933+00:00",
# "future": "XRP-PERP",
# "id": 9596912,
# "market": "XRP-PERP",
# "triggerPrice": 0.306525,
# "orderId": null,
# "side": "sell",
# "size": 31431,
# "status": "open",
# "type": "stop",
# "orderPrice": null,
# "error": null,
# "triggeredAt": null,
# "reduceOnly": False
# }
#
# editOrder(conditional, stop, trailing stop, take profit)
#
# {
# "createdAt": "2019-03-05T09:56:55.728933+00:00",
# "future": "XRP-PERP",
# "id": 9596912,
# "market": "XRP-PERP",
# "triggerPrice": 0.306225,
# "orderId": null,
# "side": "sell",
# "size": 31431,
# "status": "open",
# "type": "stop",
# "orderPrice": null,
# "error": null,
# "triggeredAt": null,
# "reduceOnly": False,
# "orderType": "market",
# "filledSize": 0,
# "avgFillPrice": null,
# "retryUntilFilled": False
# }
#
# canceled order with a closed status
#
# {
# "avgFillPrice":null,
# "clientId":null,
# "createdAt":"2020-09-01T13:45:57.119695+00:00",
# "filledSize":0.0,
# "future":null,
# "id":8553541288,
# "ioc":false,
# "liquidation":false,
# "market":"XRP/USDT",
# "postOnly":false,
# "price":0.5,
# "reduceOnly":false,
# "remainingSize":0.0,
# "side":"sell",
# "size":46.0,
# "status":"closed",
# "type":"limit"
# }
#
id = self.safe_string(order, 'id')
timestamp = self.parse8601(self.safe_string(order, 'createdAt'))
status = self.parse_order_status(self.safe_string(order, 'status'))
amount = self.safe_number(order, 'size')
filled = self.safe_number(order, 'filledSize')
remaining = self.safe_number(order, 'remainingSize')
if (remaining == 0.0) and (amount is not None) and (filled is not None):
remaining = max(amount - filled, 0)
if remaining > 0:
status = 'canceled'
symbol = None
marketId = self.safe_string(order, 'market')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
# support for delisted market ids
# https://github.com/ccxt/ccxt/issues/7113
symbol = marketId
if (symbol is None) and (market is not None):
symbol = market['symbol']
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'type')
average = self.safe_number(order, 'avgFillPrice')
price = self.safe_number_2(order, 'price', 'triggerPrice', average)
cost = None
if filled is not None and price is not None:
cost = filled * price
lastTradeTimestamp = self.parse8601(self.safe_string(order, 'triggeredAt'))
clientOrderId = self.safe_string(order, 'clientId')
stopPrice = self.safe_number(order, 'triggerPrice')
postOnly = self.safe_value(order, 'postOnly')
return {
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
'trades': None,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side, # "buy" or "sell"
# 'price': 0.306525, # send null for market orders
'type': type, # "limit", "market", "stop", "trailingStop", or "takeProfit"
'size': float(self.amount_to_precision(symbol, amount)),
# 'reduceOnly': False, # optional, default is False
# 'ioc': False, # optional, default is False, limit or market orders only
# 'postOnly': False, # optional, default is False, limit or market orders only
# 'clientId': 'abcdef0123456789', # string, optional, client order id, limit or market orders only
}
clientOrderId = self.safe_string_2(params, 'clientId', 'clientOrderId')
if clientOrderId is not None:
request['clientId'] = clientOrderId
params = self.omit(params, ['clientId', 'clientOrderId'])
method = None
if type == 'limit':
method = 'privatePostOrders'
request['price'] = float(self.price_to_precision(symbol, price))
elif type == 'market':
method = 'privatePostOrders'
request['price'] = None
elif (type == 'stop') or (type == 'takeProfit'):
method = 'privatePostConditionalOrders'
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerPrice')
if stopPrice is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter or a triggerPrice parameter for ' + type + ' orders')
else:
params = self.omit(params, ['stopPrice', 'triggerPrice'])
request['triggerPrice'] = float(self.price_to_precision(symbol, stopPrice))
if price is not None:
request['orderPrice'] = float(self.price_to_precision(symbol, price)) # optional, order type is limit if self is specified, otherwise market
elif type == 'trailingStop':
method = 'privatePostConditionalOrders'
request['trailValue'] = float(self.price_to_precision(symbol, price)) # negative for "sell", positive for "buy"
else:
raise InvalidOrder(self.id + ' createOrder() does not support order type ' + type + ', only limit, market, stop, trailingStop, or takeProfit orders are supported')
response = getattr(self, method)(self.extend(request, params))
#
# orders
#
# {
# "success": True,
# "result": [
# {
# "createdAt": "2019-03-05T09:56:55.728933+00:00",
# "filledSize": 0,
# "future": "XRP-PERP",
# "id": 9596912,
# "market": "XRP-PERP",
# "price": 0.306525,
# "remainingSize": 31431,
# "side": "sell",
# "size": 31431,
# "status": "open",
# "type": "limit",
# "reduceOnly": False,
# "ioc": False,
# "postOnly": False,
# "clientId": null,
# }
# ]
# }
#
# conditional orders
#
# {
# "success": True,
# "result": [
# {
# "createdAt": "2019-03-05T09:56:55.728933+00:00",
# "future": "XRP-PERP",
# "id": 9596912,
# "market": "XRP-PERP",
# "triggerPrice": 0.306525,
# "orderId": null,
# "side": "sell",
# "size": 31431,
# "status": "open",
# "type": "stop",
# "orderPrice": null,
# "error": null,
# "triggeredAt": null,
# "reduceOnly": False
# }
# ]
# }
#
#
result = self.safe_value(response, 'result', [])
return self.parse_order(result, market)
def edit_order(self, id, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {}
method = None
clientOrderId = self.safe_string_2(params, 'client_order_id', 'clientOrderId')
triggerPrice = self.safe_number(params, 'triggerPrice')
orderPrice = self.safe_number(params, 'orderPrice')
trailValue = self.safe_number(params, 'trailValue')
params = self.omit(params, ['client_order_id', 'clientOrderId', 'triggerPrice', 'orderPrice', 'trailValue'])
triggerPriceIsDefined = (triggerPrice is not None)
orderPriceIsDefined = (orderPrice is not None)
trailValueIsDefined = (trailValue is not None)
if triggerPriceIsDefined or orderPriceIsDefined or trailValueIsDefined:
method = 'privatePostConditionalOrdersOrderIdModify'
request['order_id'] = id
if triggerPriceIsDefined:
request['triggerPrice'] = float(self.price_to_precision(symbol, triggerPrice))
if orderPriceIsDefined:
# only for stop limit or take profit limit orders
request['orderPrice'] = float(self.price_to_precision(symbol, orderPrice))
if trailValueIsDefined:
# negative for sell orders, positive for buy orders
request['trailValue'] = float(self.price_to_precision(symbol, trailValue))
else:
if clientOrderId is None:
method = 'privatePostOrdersOrderIdModify'
request['order_id'] = id
else:
method = 'privatePostOrdersByClientIdClientOrderIdModify'
request['client_order_id'] = clientOrderId
# request['clientId'] = clientOrderId
if price is not None:
request['price'] = float(self.price_to_precision(symbol, price))
if amount is not None:
request['size'] = float(self.amount_to_precision(symbol, amount))
response = getattr(self, method)(self.extend(request, params))
#
# regular order
#
# {
# "success": True,
# "result": {
# "createdAt": "2019-03-05T11:56:55.728933+00:00",
# "filledSize": 0,
# "future": "XRP-PERP",
# "id": 9596932,
# "market": "XRP-PERP",
# "price": 0.326525,
# "remainingSize": 31431,
# "side": "sell",
# "size": 31431,
# "status": "open",
# "type": "limit",
# "reduceOnly": False,
# "ioc": False,
# "postOnly": False,
# "clientId": null,
# }
# }
#
# conditional trigger order
#
# {
# "success": True,
# "result": {
# "createdAt": "2019-03-05T09:56:55.728933+00:00",
# "future": "XRP-PERP",
# "id": 9596912,
# "market": "XRP-PERP",
# "triggerPrice": 0.306225,
# "orderId": null,
# "side": "sell",
# "size": 31431,
# "status": "open",
# "type": "stop",
# "orderPrice": null,
# "error": null,
# "triggeredAt": null,
# "reduceOnly": False,
# "orderType": "market",
# "filledSize": 0,
# "avgFillPrice": null,
# "retryUntilFilled": False
# }
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_order(result, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {}
# support for canceling conditional orders
# https://github.com/ccxt/ccxt/issues/6669
options = self.safe_value(self.options, 'cancelOrder', {})
defaultMethod = self.safe_string(options, 'method', 'privateDeleteOrdersOrderId')
method = self.safe_string(params, 'method', defaultMethod)
type = self.safe_value(params, 'type')
clientOrderId = self.safe_value_2(params, 'client_order_id', 'clientOrderId')
if clientOrderId is None:
request['order_id'] = int(id)
if (type == 'stop') or (type == 'trailingStop') or (type == 'takeProfit'):
method = 'privateDeleteConditionalOrdersOrderId'
else:
request['client_order_id'] = clientOrderId
method = 'privateDeleteOrdersByClientIdClientOrderId'
query = self.omit(params, ['method', 'type', 'client_order_id', 'clientOrderId'])
response = getattr(self, method)(self.extend(request, query))
#
# {
# "success": True,
# "result": "Order queued for cancelation"
# }
#
result = self.safe_value(response, 'result', {})
return result
def cancel_all_orders(self, symbol=None, params={}):
self.load_markets()
request = {
# 'market': market['id'], # optional
# 'conditionalOrdersOnly': False, # cancel conditional orders only
# 'limitOrdersOnly': False, # cancel existing limit orders(non-conditional orders) only
}
marketId = self.get_market_id(symbol, 'market', params)
if marketId is not None:
request['market'] = marketId
response = self.privateDeleteOrders(self.extend(request, params))
result = self.safe_value(response, 'result', {})
#
# {
# "success": True,
# "result": "Orders queued for cancelation"
# }
#
return result
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {}
clientOrderId = self.safe_value_2(params, 'client_order_id', 'clientOrderId')
method = 'privateGetOrdersOrderId'
if clientOrderId is None:
request['order_id'] = id
else:
request['client_order_id'] = clientOrderId
params = self.omit(params, ['client_order_id', 'clientOrderId'])
method = 'privateGetOrdersByClientIdClientOrderId'
response = getattr(self, method)(self.extend(request, params))
#
# {
# "success": True,
# "result": {
# "createdAt": "2019-03-05T09:56:55.728933+00:00",
# "filledSize": 10,
# "future": "XRP-PERP",
# "id": 9596912,
# "market": "XRP-PERP",
# "price": 0.306525,
# "avgFillPrice": 0.306526,
# "remainingSize": 31421,
# "side": "sell",
# "size": 31431,
# "status": "open",
# "type": "limit",
# "reduceOnly": False,
# "ioc": False,
# "postOnly": False,
# "clientId": null
# }
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_order(result)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market, marketId = self.get_market_params(symbol, 'market', params)
if marketId is not None:
request['market'] = marketId
# support for canceling conditional orders
# https://github.com/ccxt/ccxt/issues/6669
options = self.safe_value(self.options, 'fetchOpenOrders', {})
defaultMethod = self.safe_string(options, 'method', 'privateGetOrders')
method = self.safe_string(params, 'method', defaultMethod)
type = self.safe_value(params, 'type')
if (type == 'stop') or (type == 'trailingStop') or (type == 'takeProfit'):
method = 'privateGetConditionalOrders'
query = self.omit(params, ['method', 'type'])
response = getattr(self, method)(self.extend(request, query))
#
# {
# "success": True,
# "result": [
# {
# "createdAt": "2019-03-05T09:56:55.728933+00:00",
# "filledSize": 10,
# "future": "XRP-PERP",
# "id": 9596912,
# "market": "XRP-PERP",
# "price": 0.306525,
# "avgFillPrice": 0.306526,
# "remainingSize": 31421,
# "side": "sell",
# "size": 31431,
# "status": "open",
# "type": "limit",
# "reduceOnly": False,
# "ioc": False,
# "postOnly": False,
# "clientId": null
# }
# ]
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_orders(result, market, since, limit)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market, marketId = self.get_market_params(symbol, 'market', params)
if marketId is not None:
request['market'] = marketId
if limit is not None:
request['limit'] = limit # default 100, max 100
if since is not None:
request['start_time'] = int(since / 1000)
# support for canceling conditional orders
# https://github.com/ccxt/ccxt/issues/6669
options = self.safe_value(self.options, 'fetchOrders', {})
defaultMethod = self.safe_string(options, 'method', 'privateGetOrdersHistory')
method = self.safe_string(params, 'method', defaultMethod)
type = self.safe_value(params, 'type')
if (type == 'stop') or (type == 'trailingStop') or (type == 'takeProfit'):
method = 'privateGetConditionalOrdersHistory'
query = self.omit(params, ['method', 'type'])
response = getattr(self, method)(self.extend(request, query))
#
# {
# "success": True,
# "result": [
# {
# "createdAt": "2019-03-05T09:56:55.728933+00:00",
# "filledSize": 10,
# "future": "XRP-PERP",
# "id": 9596912,
# "market": "XRP-PERP",
# "price": 0.306525,
# "avgFillPrice": 0.306526,
# "remainingSize": 31421,
# "side": "sell",
# "size": 31431,
# "status": "open",
# "type": "limit",
# "reduceOnly": False,
# "ioc": False,
# "postOnly": False,
# "clientId": null
# }
# ]
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_orders(result, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market, marketId = self.get_market_params(symbol, 'market', params)
request = {}
if marketId is not None:
request['market'] = marketId
if since is not None:
request['start_time'] = int(since / 1000)
request['end_time'] = self.seconds()
response = self.privateGetFills(self.extend(request, params))
#
# {
# "success": True,
# "result": [
# {
# "fee": 20.1374935,
# "feeRate": 0.0005,
# "future": "EOS-0329",
# "id": 11215,
# "liquidity": "taker",
# "market": "EOS-0329",
# "baseCurrency": null,
# "quoteCurrency": null,
# "orderId": 8436981,
# "price": 4.201,
# "side": "buy",
# "size": 9587,
# "time": "2019-03-27T19:15:10.204619+00:00",
# "type": "order"
# }
# ]
# }
#
trades = self.safe_value(response, 'result', [])
return self.parse_trades(trades, market, since, limit)
def withdraw(self, code, amount, address, tag=None, params={}):
self.load_markets()
self.check_address(address)
currency = self.currency(code)
request = {
'coin': currency['id'],
'size': amount,
'address': address,
# 'password': 'string', # optional withdrawal password if it is required for your account
# 'code': '192837', # optional 2fa code if it is required for your account
}
if self.password is not None:
request['password'] = self.password
if tag is not None:
request['tag'] = tag
response = self.privatePostWalletWithdrawals(self.extend(request, params))
#
# {
# "success": True,
# "result": {
# "coin": "USDTBEAR",
# "address": "0x83a127952d266A6eA306c40Ac62A4a70668FE3BE",
# "tag": "null",
# "fee": 0,
# "id": 1,
# "size": "20.2",
# "status": "requested",
# "time": "2019-03-05T09:56:55.728933+00:00",
# "txid": "null"
# }
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_transaction(result, currency)
def fetch_positions(self, symbols=None, params={}):
self.load_markets()
request = {
# 'showAvgPrice': False,
}
response = self.privateGetPositions(self.extend(request, params))
#
# {
# "success": True,
# "result": [
# {
# "cost": -31.7906,
# "entryPrice": 138.22,
# "estimatedLiquidationPrice": 152.1,
# "future": "ETH-PERP",
# "initialMarginRequirement": 0.1,
# "longOrderSize": 1744.55,
# "maintenanceMarginRequirement": 0.04,
# "netSize": -0.23,
# "openSize": 1744.32,
# "realizedPnl": 3.39441714,
# "shortOrderSize": 1732.09,
# "side": "sell",
# "size": 0.23,
# "unrealizedPnl": 0,
# "collateralUsed": 3.17906
# }
# ]
# }
#
# todo unify parsePosition/parsePositions
return self.safe_value(response, 'result', [])
def fetch_account_positions(self, symbols=None, params={}):
self.load_markets()
response = self.privateGetAccount(params)
#
# {
# "result":{
# "backstopProvider":false,
# "chargeInterestOnNegativeUsd":false,
# "collateral":2830.2567913677476,
# "freeCollateral":2829.670741867416,
# "initialMarginRequirement":0.05,
# "leverage":20.0,
# "liquidating":false,
# "maintenanceMarginRequirement":0.03,
# "makerFee":0.0,
# "marginFraction":null,
# "openMarginFraction":null,
# "positionLimit":null,
# "positionLimitUsed":null,
# "positions":[
# {
# "collateralUsed":0.0,
# "cost":0.0,
# "entryPrice":null,
# "estimatedLiquidationPrice":null,
# "future":"XRP-PERP",
# "initialMarginRequirement":0.05,
# "longOrderSize":0.0,
# "maintenanceMarginRequirement":0.03,
# "netSize":0.0,
# "openSize":0.0,
# "realizedPnl":0.016,
# "shortOrderSize":0.0,
# "side":"buy",
# "size":0.0,
# "unrealizedPnl":0.0,
# }
# ],
# "spotLendingEnabled":false,
# "spotMarginEnabled":false,
# "takerFee":0.0007,
# "totalAccountValue":2830.2567913677476,
# "totalPositionSize":0.0,
# "useFttCollateral":true,
# "username":"igor.kroitor@gmail.com"
# },
# "success":true
# }
#
result = self.safe_value(response, 'result', {})
# todo unify parsePosition/parsePositions
return self.safe_value(result, 'positions', [])
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
}
response = self.privateGetWalletDepositAddressCoin(self.extend(request, params))
#
# {
# "success": True,
# "result": {
# "address": "0x83a127952d266A6eA306c40Ac62A4a70668FE3BE",
# "tag": "null"
# }
# }
#
result = self.safe_value(response, 'result', {})
address = self.safe_string(result, 'address')
tag = self.safe_string(result, 'tag')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
def parse_transaction_status(self, status):
statuses = {
# what are other statuses here?
'confirmed': 'ok', # deposits
'complete': 'ok', # withdrawals
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# airdrop
#
# {
# "id": 9147072,
# "coin": "SRM_LOCKED",
# "size": 3.12,
# "time": "2021-04-27T23:59:03.565983+00:00",
# "notes": "SRM Airdrop for FTT holdings",
# "status": "complete"
# }
#
# regular deposits
#
# {
# "coin": "TUSD",
# "confirmations": 64,
# "confirmedTime": "2019-03-05T09:56:55.728933+00:00",
# "fee": 0,
# "id": 1,
# "sentTime": "2019-03-05T09:56:55.735929+00:00",
# "size": "99.0",
# "status": "confirmed",
# "time": "2019-03-05T09:56:55.728933+00:00",
# "txid": "0x8078356ae4b06a036d64747546c274af19581f1c78c510b60505798a7ffcaf1"
# }
#
# fetchWithdrawals
#
# {
# "coin": "TUSD",
# "address": "0x83a127952d266A6eA306c40Ac62A4a70668FE3BE",
# "tag": "null",
# "fee": 0,
# "id": 1,
# "size": "99.0",
# "status": "complete",
# "time": "2019-03-05T09:56:55.728933+00:00",
# "txid": "0x8078356ae4b06a036d64747546c274af19581f1c78c510b60505798a7ffcaf1"
# }
#
# {
# "coin": 'BTC',
# "id": 1969806,
# "notes": 'Transfer to Dd6gi7m2Eg4zzBbPAxuwfEaHs6tYvyUX5hbPpsTcNPXo',
# "size": 0.003,
# "status": 'complete',
# "time": '2021-02-03T20:28:54.918146+00:00'
# }
#
code = self.safe_currency_code(self.safe_string(transaction, 'coin'))
id = self.safe_string(transaction, 'id')
amount = self.safe_number(transaction, 'size')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
timestamp = self.parse8601(self.safe_string(transaction, 'time'))
txid = self.safe_string(transaction, 'txid')
tag = None
address = self.safe_value(transaction, 'address')
if not isinstance(address, basestring):
tag = self.safe_string(address, 'tag')
address = self.safe_string(address, 'address')
if address is None:
# parse address from internal transfer
notes = self.safe_string(transaction, 'notes')
if (notes is not None) and (notes.find('Transfer to') >= 0):
address = notes[12:]
fee = self.safe_number(transaction, 'fee')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': None,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': {
'currency': code,
'cost': fee,
'rate': None,
},
}
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
self.load_markets()
response = self.privateGetWalletDeposits(params)
#
# {
# "success": True,
# "result": {
# "coin": "TUSD",
# "confirmations": 64,
# "confirmedTime": "2019-03-05T09:56:55.728933+00:00",
# "fee": 0,
# "id": 1,
# "sentTime": "2019-03-05T09:56:55.735929+00:00",
# "size": "99.0",
# "status": "confirmed",
# "time": "2019-03-05T09:56:55.728933+00:00",
# "txid": "0x8078356ae4b06a036d64747546c274af19581f1c78c510b60505798a7ffcaf1"
# }
# }
#
result = self.safe_value(response, 'result', [])
currency = None
if code is not None:
currency = self.currency(code)
return self.parse_transactions(result, currency, since, limit, {'type': 'deposit'})
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
response = self.privateGetWalletWithdrawals(params)
#
# {
# "success": True,
# "result": {
# "coin": "TUSD",
# "address": "0x83a127952d266A6eA306c40Ac62A4a70668FE3BE",
# "tag": "null",
# "fee": 0,
# "id": 1,
# "size": "99.0",
# "status": "complete",
# "time": "2019-03-05T09:56:55.728933+00:00",
# "txid": "0x8078356ae4b06a036d64747546c274af19581f1c78c510b60505798a7ffcaf1"
# }
# }
#
result = self.safe_value(response, 'result', [])
currency = None
if code is not None:
currency = self.currency(code)
return self.parse_transactions(result, currency, since, limit, {'type': 'withdrawal'})
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/api/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
baseUrl = self.implode_hostname(self.urls['api'][api])
url = baseUrl + request
if method != 'POST':
if query:
suffix = '?' + self.urlencode(query)
url += suffix
request += suffix
if api == 'private':
self.check_required_credentials()
timestamp = str(self.milliseconds())
auth = timestamp + method + request
headers = {}
if (method == 'POST') or (method == 'DELETE'):
body = self.json(query)
auth += body
headers['Content-Type'] = 'application/json'
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256)
options = self.safe_value(self.options, 'sign', {})
headerPrefix = self.safe_string(options, self.hostname, 'FTX')
keyField = headerPrefix + '-KEY'
tsField = headerPrefix + '-TS'
signField = headerPrefix + '-SIGN'
headers[keyField] = self.apiKey
headers[tsField] = timestamp
headers[signField] = signature
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to the default error handler
#
# {"error":"Invalid parameter start_time","success":false}
# {"error":"Not enough balances","success":false}
#
success = self.safe_value(response, 'success')
if not success:
feedback = self.id + ' ' + body
error = self.safe_string(response, 'error')
self.throw_exactly_matched_exception(self.exceptions['exact'], error, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
raise ExchangeError(feedback) # unknown message
def set_leverage(self, leverage, symbol=None, params={}):
# WARNING: THIS WILL INCREASE LIQUIDATION PRICE FOR OPEN ISOLATED LONG POSITIONS
# AND DECREASE LIQUIDATION PRICE FOR OPEN ISOLATED SHORT POSITIONS
if (leverage < 1) or (leverage > 20):
raise BadRequest(self.id + ' leverage should be between 1 and 20')
request = {
'leverage': leverage,
}
return self.privatePostAccountLeverage(self.extend(request, params))
| 41.630777 | 175 | 0.443664 |
acf2fff610dc681ec84d27091959193510cc9abb | 1,270 | py | Python | packages/pyre/constraints/Or.py | lijun99/pyre | 004dfd4c06489b4ba5b32877338ca6440f2d523b | [
"BSD-3-Clause"
] | 3 | 2019-08-02T21:02:47.000Z | 2021-09-08T13:59:43.000Z | packages/pyre/constraints/Or.py | lijun99/pyre | 004dfd4c06489b4ba5b32877338ca6440f2d523b | [
"BSD-3-Clause"
] | null | null | null | packages/pyre/constraints/Or.py | lijun99/pyre | 004dfd4c06489b4ba5b32877338ca6440f2d523b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2019 all rights reserved
#
# superclass
from .Constraint import Constraint
# declaration
class Or(Constraint):
"""
Given a set of constraints, a candidate satisfies this iff it satisfies any of the constraints
"""
# interface
def validate(self, value, **kwds):
"""
Check whether {value} satisfies this constraint
"""
# go through my constraints
for constraint in self.constraints:
# and look for
try:
# the first one that is satisfied
return constraint.validate(value=value, **kwds)
# if this one did not
except constraint.ConstraintViolationError:
# check the next
continue
# if they all failed, chain up
return super().validate(value=value, **kwds)
# meta-methods
def __init__(self, *constraints, **kwds):
# chain up
super().__init__(**kwds)
# save my constraints
self.constraints = constraints
# all done
return
def __str__(self):
return " or ".join("({})".format(constraint) for constraint in self.constraints)
# end of file
| 23.518519 | 98 | 0.584252 |
acf302ccb7cdacb3d9f43220d4a45c5b8073f969 | 5,281 | py | Python | my/emfit/__init__.py | almereyda/HPI | c83bfbd21ce94a96f7af01ab0a82f20535f4aefb | [
"MIT"
] | 1 | 2021-08-04T18:54:52.000Z | 2021-08-04T18:54:52.000Z | my/emfit/__init__.py | almereyda/HPI | c83bfbd21ce94a96f7af01ab0a82f20535f4aefb | [
"MIT"
] | null | null | null | my/emfit/__init__.py | almereyda/HPI | c83bfbd21ce94a96f7af01ab0a82f20535f4aefb | [
"MIT"
] | 1 | 2021-03-01T13:12:33.000Z | 2021-03-01T13:12:33.000Z | #!/usr/bin/env python3
"""
[[https://shop-eu.emfit.com/products/emfit-qs][Emfit QS]] sleep tracker
Consumes data exported by https://github.com/karlicoss/emfitexport
"""
from datetime import date
from pathlib import Path
from typing import Dict, List, Iterable, Any, Optional
from ..core import get_files
from ..core.common import mcachew
from ..core.cachew import cache_dir
from ..core.error import Res, set_error_datetime, extract_error_datetime
from ..core.pandas import DataFrameT
from my.config import emfit as config
import emfitexport.dal as dal
# todo ugh. need to make up my mind on log vs logger naming... I guessl ogger makes more sense
logger = dal.log
Emfit = dal.Emfit
# TODO move to common?
def dir_hash(path: Path):
mtimes = tuple(p.stat().st_mtime for p in get_files(path, glob='*.json'))
return mtimes
# TODO take __file__ into account somehow?
@mcachew(cache_path=cache_dir() / 'emfit.cache', hashf=lambda: dir_hash(config.export_path), logger=dal.log)
def datas() -> Iterable[Res[Emfit]]:
import dataclasses
# data from emfit is coming in UTC. There is no way (I think?) to know the 'real' timezone, and local times matter more for sleep analysis
# TODO actully this is wrong?? check this..
emfit_tz = config.timezone
for x in dal.sleeps(config.export_path):
if isinstance(x, Exception):
yield x
else:
if x.sid in config.excluded_sids:
# TODO should be responsibility of export_path (similar to HPI?)
continue
# TODO maybe have a helper to 'patch up' all dattetimes in a namedtuple/dataclass?
# TODO do the same for jawbone data?
x = dataclasses.replace(
x,
start =x.start .astimezone(emfit_tz),
end =x.end .astimezone(emfit_tz),
sleep_start=x.sleep_start.astimezone(emfit_tz),
sleep_end =x.sleep_end .astimezone(emfit_tz),
)
yield x
# TODO should be used for jawbone data as well?
def pre_dataframe() -> Iterable[Res[Emfit]]:
# TODO shit. I need some sort of interrupted sleep detection?
g: List[Emfit] = []
def flush() -> Iterable[Res[Emfit]]:
if len(g) == 0:
return
elif len(g) == 1:
r = g[0]
g.clear()
yield r
else:
err = RuntimeError(f'Multiple sleeps per night, not supported yet: {g}')
set_error_datetime(err, dt=g[0].date)
g.clear()
yield err
for x in datas():
if isinstance(x, Exception):
yield x
continue
# otherwise, Emfit
if len(g) != 0 and g[-1].date != x.date:
yield from flush()
g.append(x)
yield from flush()
def dataframe() -> DataFrameT:
from datetime import timedelta
dicts: List[Dict[str, Any]] = []
last: Optional[Emfit] = None
for s in pre_dataframe():
d: Dict[str, Any]
if isinstance(s, Exception):
edt = extract_error_datetime(s)
d = {
'date' : edt,
'error': str(s),
}
else:
dd = s.date
pday = dd - timedelta(days=1)
if last is None or last.date != pday:
hrv_change = None
else:
# todo it's change during the day?? dunno if reasonable metric
hrv_change = s.hrv_evening - last.hrv_morning
# todo maybe changes need to be handled in a more generic way?
# todo ugh. get rid of hardcoding, just generate the schema automatically
# TODO use 'workdays' provider....
d = {
'date' : dd,
'sleep_start': s.sleep_start,
'sleep_end' : s.sleep_end,
'bed_time' : s.time_in_bed, # eh, this is derived frop sleep start / end. should we compute it on spot??
# these are emfit specific
'coverage' : s.sleep_hr_coverage,
'avg_hr' : s.measured_hr_avg,
'hrv_evening': s.hrv_evening,
'hrv_morning': s.hrv_morning,
'recovery' : s.recovery,
'hrv_change' : hrv_change,
'respiratory_rate_avg': s.respiratory_rate_avg,
}
last = s # meh
dicts.append(d)
import pandas # type: ignore
return pandas.DataFrame(dicts)
from ..core import stat, Stats
def stats() -> Stats:
return stat(pre_dataframe)
from contextlib import contextmanager
from typing import Iterator
@contextmanager
def fake_data(nights: int=500) -> Iterator[None]:
from ..core.cfg import override_config
from tempfile import TemporaryDirectory
with override_config(config) as cfg, TemporaryDirectory() as td:
tdir = Path(td)
cfg.export_path = tdir
gen = dal.FakeData()
gen.fill(tdir, count=nights)
yield
# TODO remove/deprecate it? I think used by timeline
def get_datas() -> List[Emfit]:
# todo ugh. run lint properly
return list(sorted(datas(), key=lambda e: e.start)) # type: ignore
# TODO move away old entries if there is a diff??
| 32.598765 | 142 | 0.593448 |
acf302e4fd2b172d8978eabdc1cbeaee18bb8ff1 | 242 | py | Python | test_teste.py | DiegooRibeiroo/TDE3 | e89bea12c9b5edb70eb5efdb5d22ee8806315808 | [
"Apache-2.0"
] | null | null | null | test_teste.py | DiegooRibeiroo/TDE3 | e89bea12c9b5edb70eb5efdb5d22ee8806315808 | [
"Apache-2.0"
] | null | null | null | test_teste.py | DiegooRibeiroo/TDE3 | e89bea12c9b5edb70eb5efdb5d22ee8806315808 | [
"Apache-2.0"
] | null | null | null | import pytest
from calc import soma,sub,multi,div
def test_app():
assert soma(1,1) == 2
assert soma(-1,1) == 0
assert soma('-1',1) == 0
assert sub(1.5,1) == 0.5
assert multi(-1,1) == -1
assert multi(3e2,2) == 600
assert div(6,2) == 3
| 20.166667 | 35 | 0.615702 |
acf30314ea5e8f810a380a9e26562584b88e517f | 3,374 | py | Python | build/lib/particledist/MassDistribution.py | nickchak21/particledist | 59b788a894655273ec177a3a6bb4cf9526f8c402 | [
"MIT"
] | 1 | 2020-03-01T19:47:13.000Z | 2020-03-01T19:47:13.000Z | build/lib/particledist/MassDistribution.py | nickchak21/particledist | 59b788a894655273ec177a3a6bb4cf9526f8c402 | [
"MIT"
] | null | null | null | build/lib/particledist/MassDistribution.py | nickchak21/particledist | 59b788a894655273ec177a3a6bb4cf9526f8c402 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from __future__ import absolute_import, division, print_function
from time import process_time
import energyflow as ef
import numpy as np
import matplotlib.pyplot as plt
class MassDistribution:
def __init__(self, mass_list, event_list):
self.event_list = event_list
self.mass_list = mass_list
def divide_mass_bins(self, mass_bin_size, cutoff_jets):
mass_ranges = []
max_mass = max(self.mass_list)
min_mass = min(self.mass_list)
previous_mass = min_mass
for current_mass in np.arange(min_mass + mass_bin_size, max_mass, mass_bin_size):
mass_ranges.append([previous_mass, current_mass])
previous_mass = current_mass
self.mass_ranges = mass_ranges
#Sort indices into different mass ranges
index_bins = []
for mass_range in mass_ranges:
index_bins.append([])
index = 0
for event_mass in self.mass_list:
mass_bin = 0
for mass_range in mass_ranges:
if event_mass > mass_range[0] and event_mass <= mass_range[1]:
index_bins[mass_bin].append(index)
break
mass_bin += 1
index += 1
#Remove bins that don't have enough jets
remove_indices = []
i = 0
for mass_bin in index_bins:
if len(mass_bin) < cutoff_jets:
remove_indices.append(i)
i += 1
for index in sorted(remove_indices, reverse=True):
del index_bins[index]
for index in sorted(remove_indices, reverse=True):
del self.mass_ranges[index]
#Create bins with event mass and particles
self.event_mass_bins = []
i = 0
for mass_bin in index_bins:
self.event_mass_bins.append([])
for index in mass_bin:
self.event_mass_bins[i].append(self.event_list[index])
i += 1
def get_mass_ranges(self):
return self.mass_ranges
def extract_jets_into_mass_bins(self):
self.jet_mass_bins = []
i = 0
for mass_bin in self.event_mass_bins:
self.jet_mass_bins.append([])
for event in mass_bin:
for jet in event:
self.jet_mass_bins[i].append(jet)
i += 1
def max_particles_per_jet(self):
max_particles_per_jet = []
for jet_mass_bin in self.jet_mass_bins:
array_lengths = []
for i in range(len(jet_mass_bin1)):
array_lengths.append(len(jet_mass_bin1[i]))
max_particles_per_jet.append(max(array_lengths))
return max_particles_per_jet
def pad_jet_arrays(self, num_particles):
self.padded_jet_arrays = []
for mass_bin in self.jet_mass_bins:
jet_array = np.zeros((len(mass_bin),num_particles,6))
for i in range(len(mass_bin)):
for j in range(num_particles):
for k in range(6):
try:
jet_array[i,j,k] = mass_bin[i][j][k]
except IndexError:
jet_array[i,j,k] = 0
self.padded_jet_arrays.append(jet_array)
| 31.830189 | 89 | 0.570539 |
acf303223404d5f1b056f67c900f14e4a37c71b7 | 570 | py | Python | nsot/migrations/0030_add_circuit_name_slug.py | comerford/nsot | 941b11f84f5c0d210f638654a6ed34a5610af22a | [
"Apache-2.0"
] | 387 | 2015-01-08T01:41:45.000Z | 2022-03-15T09:46:32.000Z | nsot/migrations/0030_add_circuit_name_slug.py | comerford/nsot | 941b11f84f5c0d210f638654a6ed34a5610af22a | [
"Apache-2.0"
] | 215 | 2015-01-08T19:23:10.000Z | 2021-04-10T16:59:58.000Z | nsot/migrations/0030_add_circuit_name_slug.py | comerford/nsot | 941b11f84f5c0d210f638654a6ed34a5610af22a | [
"Apache-2.0"
] | 81 | 2015-01-08T19:48:34.000Z | 2021-09-28T09:20:46.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nsot', '0029_auto__add_circuit'),
]
operations = [
migrations.AddField(
model_name='circuit',
name='name_slug',
field=models.CharField(db_index=True, editable=False, max_length=255, help_text='Slugified version of the name field, used for the natural key', null=True, unique=True),
),
]
| 27.142857 | 181 | 0.664912 |
acf303704e665286184208b9616c9615719d18f6 | 482 | py | Python | project/apps/certification/migrations/0022_auto_20151114_2104.py | kostik/vrs | a347c2d901e1a6b60a85480c9d2b247157881fce | [
"BSD-3-Clause"
] | 1 | 2016-11-09T18:57:23.000Z | 2016-11-09T18:57:23.000Z | project/apps/certification/migrations/0022_auto_20151114_2104.py | kostik/vrs | a347c2d901e1a6b60a85480c9d2b247157881fce | [
"BSD-3-Clause"
] | null | null | null | project/apps/certification/migrations/0022_auto_20151114_2104.py | kostik/vrs | a347c2d901e1a6b60a85480c9d2b247157881fce | [
"BSD-3-Clause"
] | 4 | 2016-09-30T08:24:09.000Z | 2019-02-28T14:09:19.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import birth_registration.fields
class Migration(migrations.Migration):
dependencies = [
('certification', '0021_auto_20151109_2145'),
]
operations = [
migrations.AlterField(
model_name='f203',
name='E_CODE',
field=birth_registration.fields.E_CODEField(max_length=6, null=True, blank=True),
),
]
| 22.952381 | 93 | 0.649378 |
acf303a61316d21b567ef87b8bc9843aab141b96 | 1,308 | py | Python | pyroomacoustics/tests/test_rake_filters.py | HemaZ/pyroomacoustics | c401f829c71ff03a947f68f9b6b2f48346ae84b2 | [
"MIT"
] | 1 | 2019-08-04T07:34:02.000Z | 2019-08-04T07:34:02.000Z | pyroomacoustics/tests/test_rake_filters.py | HemaZ/pyroomacoustics | c401f829c71ff03a947f68f9b6b2f48346ae84b2 | [
"MIT"
] | null | null | null | pyroomacoustics/tests/test_rake_filters.py | HemaZ/pyroomacoustics | c401f829c71ff03a947f68f9b6b2f48346ae84b2 | [
"MIT"
] | 1 | 2019-05-01T21:53:52.000Z | 2019-05-01T21:53:52.000Z |
import numpy as np
import pyroomacoustics as pra
room = pra.ShoeBox([4,6], fs=16000, max_order=1)
# add sources in the room
room.add_source([2, 1.5]) # nice source
room.add_source([2,4.5]) # interferer
# add a circular beamforming array
shape = pra.circular_2D_array([2.5,3], 8, 0., 0.15)
bf = pra.Beamformer(shape, room.fs, Lg=500)
room.add_microphone_array(bf)
# run the ISM
room.image_source_model()
# the noise matrix, note that the size is the number of
# sensors multiplied by the filter size
Rn = np.eye(bf.M * bf.Lg) * 1e-5
def test_rake_max_udr_filters():
# no interferer
bf.rake_max_udr_filters(room.sources[0][:4], R_n=Rn, delay=0.015, epsilon=1e-2)
# with interferer
bf.rake_max_udr_filters(room.sources[0][:4], interferer=room.sources[1][:4], R_n=Rn, delay=0.015, epsilon=1e-2)
def test_perceptual_filters():
# no interferer
bf.rake_perceptual_filters(room.sources[0][:4], R_n=Rn)
# with interferer
bf.rake_perceptual_filters(room.sources[0][:4], interferer=room.sources[1][:4], R_n=Rn)
pass
if __name__ == '__main__':
import matplotlib.pyplot as plt
bf.rake_perceptual_filters(room.sources[0][:4], interferer=room.sources[1][:4], R_n=Rn, epsilon=0.1)
bf.plot()
room.plot(img_order=1, freq=[700., 1000., 2000.])
plt.show()
| 28.434783 | 115 | 0.698012 |
acf304ce2d2cdde4d015b073a90f5a58565499d3 | 22 | py | Python | runuwsgi/management/__init__.py | silentsokolov/django-runuwsgi | 24b9ec4308113cd5eaef35b4c5866deebaa1c132 | [
"MIT"
] | 1 | 2017-07-27T10:01:06.000Z | 2017-07-27T10:01:06.000Z | runuwsgi/management/commands/__init__.py | silentsokolov/django-runuwsgi | 24b9ec4308113cd5eaef35b4c5866deebaa1c132 | [
"MIT"
] | null | null | null | runuwsgi/management/commands/__init__.py | silentsokolov/django-runuwsgi | 24b9ec4308113cd5eaef35b4c5866deebaa1c132 | [
"MIT"
] | null | null | null | __author__ = 'silent'
| 11 | 21 | 0.727273 |
acf304face3ca4405f706ff001bd3ca856e00027 | 9,052 | py | Python | budget/budgetManager.py | deep4788/budgetManager | f7a20c9458315cf608c3ab5fdb4f2fad998d73f8 | [
"MIT"
] | 1 | 2016-10-17T16:26:33.000Z | 2016-10-17T16:26:33.000Z | budget/budgetManager.py | deep4788/budgetManager | f7a20c9458315cf608c3ab5fdb4f2fad998d73f8 | [
"MIT"
] | null | null | null | budget/budgetManager.py | deep4788/budgetManager | f7a20c9458315cf608c3ab5fdb4f2fad998d73f8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import sys
from PyQt4 import QtCore
from PyQt4 import QtGui
from csvgenerator import *
from database import *
class BudgetWidget(QtGui.QWidget):
"""This is the main Budget widget class which encapsulates the whole app."""
# First get the handle to Datastore (all the instances of BudgetWidget share dbHandle; it is a class member)
dbHandle = Datastore()
dbHandle.connect()
def __init__(self):
# First construct QWidget
super(self.__class__, self).__init__()
# Dictionary mapping months to indices
hashMapOfMonths = {'January': 0, 'February': 1, 'March': 2, 'April': 3, 'May': 4, 'June': 5, 'July': 6, 'August': 7, 'September': 8, 'October': 9, 'November': 10, 'December': 11}
# Create labels
self.header = QtGui.QLabel("Budget Manager", self)
now = datetime.datetime.now()
hashMapCurrentMonthIndex = hashMapOfMonths[now.strftime('%B')]
self.currentTotal = QtGui.QLabel("Current Total", self)
self.foodLabel = QtGui.QLabel("Food", self)
self.miscLabel = QtGui.QLabel("Miscellaneous", self)
self.foodLabelTotalValue = QtGui.QLabel("000000000000000000", self) # Hack to make label long
self.miscLabelTotalValue = QtGui.QLabel("000000000000000000", self) # Hack to make label long
# Set drop-down list for months
self.comboBoxMonth = QtGui.QComboBox(self)
self.comboBoxMonth.addItem("January")
self.comboBoxMonth.addItem("February")
self.comboBoxMonth.addItem("March")
self.comboBoxMonth.addItem("April")
self.comboBoxMonth.addItem("May")
self.comboBoxMonth.addItem("June")
self.comboBoxMonth.addItem("July")
self.comboBoxMonth.addItem("August")
self.comboBoxMonth.addItem("September")
self.comboBoxMonth.addItem("October")
self.comboBoxMonth.addItem("November")
self.comboBoxMonth.addItem("December")
self.comboBoxMonth.setCurrentIndex(hashMapCurrentMonthIndex) # Set current display month
# Set drop-down list for years
listOfThreeYear = [] # Will hold previous, current, next year
listOfThreeYear.append(str(int(now.strftime('%Y'))-1))
listOfThreeYear.append(str(int(now.strftime('%Y'))))
listOfThreeYear.append(str(int(now.strftime('%Y'))+1))
self.comboBoxYear = QtGui.QComboBox(self)
for year in listOfThreeYear:
self.comboBoxYear.addItem(year)
self.comboBoxYear.setCurrentIndex(listOfThreeYear.index(now.strftime('%Y'))) # Set current display year
# Set action method to be called when month/year comboBox is changed
self.comboBoxMonth.activated[str].connect(self.updateTotalValuesMethod)
self.comboBoxYear.activated[str].connect(self.updateTotalValuesMethod)
# Create line-edits
self.foodLineedit = QtGui.QLineEdit(self)
self.miscLineedit = QtGui.QLineEdit(self)
# Create push-buttons
self.submitButton = QtGui.QPushButton('Submit', self)
self.csvButton = QtGui.QPushButton('Generate CSV', self)
# Set labels positions
self.header.move(130, 20)
self.header.setStyleSheet('font-size: 18px')
self.comboBoxMonth.move(95, 43)
self.comboBoxYear.move(213, 43)
self.currentTotal.move(280, 75)
self.foodLabel.move(5, 95)
self.foodLabel.setStyleSheet('font-weight: bold')
self.miscLabel.move(5, 115)
self.miscLabel.setStyleSheet('font-weight: bold')
self.foodLabelTotalValue.move(300, 95)
self.miscLabelTotalValue.move(300, 115)
# Set line-edits positions and size
self.foodLineedit.move(100, 90)
self.foodLineedit.resize(90, 20)
self.miscLineedit.move(100, 110)
self.miscLineedit.resize(90, 20)
# Set position of submitButton and the action associated with it
self.submitButton.move(117, 160)
self.submitButton.clicked.connect(self.submitButtonClicked)
self.submitButton.setStyleSheet("background-color: #FFB90F; border-style: outset; border-width: 2px; border-radius: 10px; border-color: beige; font: bold 14px; min-width: 10em; padding: 6px")
# Set position of csvButton and the action associated with it
self.csvButton.move(117, 190)
self.csvButton.clicked.connect(self.csvButtonClicked)
self.csvButton.setStyleSheet("background-color: green; border-style: outset; border-width: 2px; border-radius: 10px; border-color: beige; font: bold 14px; min-width: 10em; padding: 6px")
# Set app window size, title, background color and center the widget on the screen
widgetWidth = 380
widgetHeight = 230
desktopSize = QtGui.QApplication.desktop().availableGeometry(self)
desktopWidth = desktopSize.getRect()[2]
desktopHeight = desktopSize.getRect()[3]
widgetX = (desktopWidth - widgetWidth) / 2
widgetY = (desktopHeight - widgetHeight) / 2
self.setGeometry(widgetX, widgetY, widgetWidth, widgetHeight)
self.setWindowTitle('Budget Manager')
colorPalette = self.palette()
colorPalette.setColor(QtGui.QPalette.Background, QtGui.QColor(67, 205, 128))
self.setPalette(colorPalette)
self.show()
# Fill the food label total value with the current total amount for food and misc
currentMonth = now.strftime('%B')
currentYear = now.strftime('%Y')
currFoodTotal = BudgetWidget.dbHandle.fetchFoodAccount(currentMonth, currentYear)
currMiscTotal = BudgetWidget.dbHandle.fetchMiscAccount(currentMonth, currentYear)
self.foodLabelTotalValue.setText(str(currFoodTotal))
self.miscLabelTotalValue.setText(str(currMiscTotal))
def updateTotalValuesMethod(self, text):
"""This method gets called when user changes combobox values for month/year.
It updates the total value of food/misc display on the widget.
"""
# Get the current selected month and year
currentSelectedMonth = str(self.comboBoxMonth.currentText())
currentSelectedYear = str(self.comboBoxYear.currentText())
# Set the value labels
currFoodTotal = BudgetWidget.dbHandle.fetchFoodAccount(currentSelectedMonth, currentSelectedYear)
currMiscTotal = BudgetWidget.dbHandle.fetchMiscAccount(currentSelectedMonth, currentSelectedYear)
self.foodLabelTotalValue.setText(str(currFoodTotal))
self.miscLabelTotalValue.setText(str(currMiscTotal))
def submitButtonClicked(self):
"""This method gets called when the user presses the submit button.
It updates the database based on the user entered values and also
updates the display on the widget.
"""
# Get the user entered values
foodValueEnteredByUser = self.foodLineedit.text()
miscValueEnteredByUser = self.miscLineedit.text()
if not foodValueEnteredByUser:
foodValueEnteredByUser = 0.0
else:
foodValueEnteredByUser = float(foodValueEnteredByUser)
if not miscValueEnteredByUser:
miscValueEnteredByUser = 0.0
else:
miscValueEnteredByUser = float(miscValueEnteredByUser)
# Get the current selected month and year
currentSelectedMonth = str(self.comboBoxMonth.currentText())
currentSelectedYear = str(self.comboBoxYear.currentText())
# Set the value labels
currFoodTotal = BudgetWidget.dbHandle.fetchFoodAccount(currentSelectedMonth, currentSelectedYear) + foodValueEnteredByUser
currMiscTotal = BudgetWidget.dbHandle.fetchMiscAccount(currentSelectedMonth, currentSelectedYear) + miscValueEnteredByUser
self.foodLabelTotalValue.setText(str(currFoodTotal))
self.miscLabelTotalValue.setText(str(currMiscTotal))
# Update the database with the entered values
BudgetWidget.dbHandle.insertFoodAccount(currentSelectedMonth, currentSelectedYear, foodValueEnteredByUser)
BudgetWidget.dbHandle.insertMiscAccount(currentSelectedMonth, currentSelectedYear, miscValueEnteredByUser)
# Clear the line-edits
self.foodLineedit.clear()
self.miscLineedit.clear()
def csvButtonClicked(self):
"""This method gets called when the user presses the CSV generator button.
It calls the genCSV() method to generator the CSV file.
"""
# First get the location where file needs to be saved
fLocation = QtGui.QFileDialog.getExistingDirectory(self, 'File Location', '/home', QtGui.QFileDialog.ShowDirsOnly)
# Now call the generate csv function to generate the csv file
genCSV(BudgetWidget.dbHandle, fLocation)
def main():
"""This function is where the budget app starts."""
app = QtGui.QApplication(sys.argv)
ex = BudgetWidget()
sys.exit(app.exec_())
# Execute only if run as a script
if __name__ == "__main__":
main()
| 44.156098 | 199 | 0.691891 |
acf3058aa7a700ee860e212e2d016dfa7e9df344 | 2,641 | py | Python | setup.py | bluetech/django-otp | f14a0cb2c518bd093f99ebb151015049ac7dc6ae | [
"BSD-2-Clause"
] | null | null | null | setup.py | bluetech/django-otp | f14a0cb2c518bd093f99ebb151015049ac7dc6ae | [
"BSD-2-Clause"
] | null | null | null | setup.py | bluetech/django-otp | f14a0cb2c518bd093f99ebb151015049ac7dc6ae | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import os
import os.path
import re
from setuptools import find_packages, setup
def gen_package_data(pkg_root, paths, prune=[]):
"""
Generates a value for package_data.
pkg_root is the path to the Python package we're generating package_data
for. paths is a list of paths relative to pkg_root to add. We'll search
these directories recursively, yielding a sequence of '<sub-path>/*'
strings to select every nested file for inclusion.
The optional third argument is a collection of directory names to prune
from the traversal.
"""
pkg_root = os.path.abspath(pkg_root)
# For stripping pkg_root from results.
root_re = re.compile(r'^' + re.escape(pkg_root) + r'/*')
for path in paths:
for dirpath, dirnames, _ in os.walk(os.path.join(pkg_root, path)):
dirpath = root_re.sub('', dirpath)
yield os.path.join(dirpath, '*')
if prune:
dirnames[:] = [d for d in dirnames if d not in prune]
def find_package_data(*args, **kwargs):
return list(gen_package_data(*args, **kwargs))
setup(
name='django-otp',
version='0.7.2',
description="A pluggable framework for adding two-factor authentication to Django using one-time passwords.",
license='BSD',
author="Peter Sagerson",
author_email='psagers@ignorare.net',
url='https://github.com/django-otp/django-otp',
project_urls={
"Documentation": 'https://django-otp-official.readthedocs.io/',
"Source": 'https://github.com/django-otp/django-otp',
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Security",
"Topic :: Software Development :: Libraries :: Python Modules",
],
package_dir={'': 'src'},
packages=find_packages(where='src'),
package_data={
'django_otp': find_package_data('src/django_otp', ['templates']),
'django_otp.plugins.otp_email': find_package_data('src/django_otp/plugins/otp_email', ['templates']),
'django_otp.plugins.otp_hotp': find_package_data('src/django_otp/plugins/otp_hotp', ['templates']),
'django_otp.plugins.otp_totp': find_package_data('src/django_otp/plugins/otp_totp', ['templates']),
},
zip_safe=False,
install_requires=[
'django >= 1.11',
'six >= 1.10.0'
],
extras_require={
'qrcode': ['qrcode'],
},
)
| 32.604938 | 113 | 0.64256 |
acf3061a3d3808bb9c6309ea47d59599cec87edb | 2,342 | py | Python | backend/api/migrations/0003_organizationaddress.py | kuanfan99/zeva | 57b506a108fe57438506569d5503c90c52216b2f | [
"Apache-2.0"
] | 3 | 2020-03-25T03:06:20.000Z | 2021-01-20T23:36:03.000Z | backend/api/migrations/0003_organizationaddress.py | kuanfan99/zeva | 57b506a108fe57438506569d5503c90c52216b2f | [
"Apache-2.0"
] | 740 | 2019-12-16T15:53:39.000Z | 2022-03-26T08:25:10.000Z | backend/api/migrations/0003_organizationaddress.py | kuanfan99/zeva | 57b506a108fe57438506569d5503c90c52216b2f | [
"Apache-2.0"
] | 11 | 2019-11-28T20:39:15.000Z | 2022-01-31T17:53:31.000Z | # Generated by Django 3.0.2 on 2020-01-08 22:22
import db_comments.model_mixins
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20200108_2135'),
]
operations = [
migrations.CreateModel(
name='OrganizationAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)),
('update_timestamp', models.DateTimeField(auto_now=True, null=True)),
('effective_date', models.DateField(blank=True, null=True)),
('expiration_date', models.DateField(blank=True, null=True)),
('address_line_1', models.CharField(blank=True, max_length=500, null=True)),
('address_line_2', models.CharField(blank=True, max_length=100, null=True)),
('address_line_3', models.CharField(blank=True, max_length=100, null=True)),
('city', models.CharField(blank=True, max_length=100, null=True)),
('postal_code', models.CharField(blank=True, max_length=10, null=True)),
('state', models.CharField(blank=True, max_length=50, null=True)),
('county', models.CharField(blank=True, max_length=50, null=True)),
('country', models.CharField(blank=True, max_length=100, null=True)),
('other', models.CharField(blank=True, max_length=100, null=True)),
('create_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='api_organizationaddress_CREATE_USER', to='api.UserProfile')),
('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='addresses', to='api.Organization')),
('update_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='api_organizationaddress_UPDATE_USER', to='api.UserProfile')),
],
options={
'db_table': 'organization_address',
},
bases=(models.Model, db_comments.model_mixins.DBComments),
),
]
| 55.761905 | 193 | 0.640051 |
acf30675e952563ec7b743aa2d28f1438b45d200 | 8,251 | py | Python | tests/chainer_tests/links_tests/model_tests/test_vision.py | mingxiaoh/chainer-v3 | 815ff00f5eaf7944d6e8a75662ff64a2fe046a4d | [
"BSD-3-Clause"
] | 7 | 2017-05-08T07:02:40.000Z | 2018-12-02T18:35:39.000Z | tests/chainer_tests/links_tests/model_tests/test_vision.py | mingxiaoh/chainer-v3 | 815ff00f5eaf7944d6e8a75662ff64a2fe046a4d | [
"BSD-3-Clause"
] | null | null | null | tests/chainer_tests/links_tests/model_tests/test_vision.py | mingxiaoh/chainer-v3 | 815ff00f5eaf7944d6e8a75662ff64a2fe046a4d | [
"BSD-3-Clause"
] | null | null | null | import unittest
import numpy
from chainer import cuda
from chainer.links.model.vision import resnet
from chainer.links.model.vision import vgg
from chainer import testing
from chainer.testing import attr
from chainer.variable import Variable
@unittest.skipUnless(resnet.available, 'Pillow is required')
@attr.slow
class TestResNet50Layers(unittest.TestCase):
def setUp(self):
self.link = resnet.ResNet50Layers(pretrained_model=None)
def test_available_layers(self):
result = self.link.available_layers
self.assertIsInstance(result, list)
self.assertEqual(len(result), 9)
def check_call(self):
xp = self.link.xp
# Suppress warning that arises from zero division in BatchNormalization
with numpy.errstate(divide='ignore'):
x1 = Variable(xp.asarray(numpy.random.uniform(
-1, 1, (1, 3, 224, 224)).astype(numpy.float32)))
y1 = cuda.to_cpu(self.link(x1)['prob'].data)
self.assertEqual(y1.shape, (1, 1000))
x2 = Variable(xp.asarray(numpy.random.uniform(
-1, 1, (1, 3, 128, 128)).astype(numpy.float32)))
y2 = cuda.to_cpu(self.link(x2, layers=['pool5'])['pool5'].data)
self.assertEqual(y2.shape, (1, 2048))
def test_call_cpu(self):
self.check_call()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self.check_call()
def test_prepare(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
x3 = numpy.random.uniform(0, 255, (160, 120, 3)).astype(numpy.float32)
x4 = numpy.random.uniform(0, 255, (1, 160, 120)).astype(numpy.float32)
x5 = numpy.random.uniform(0, 255, (3, 160, 120)).astype(numpy.uint8)
y1 = resnet.prepare(x1)
self.assertEqual(y1.shape, (3, 224, 224))
self.assertEqual(y1.dtype, numpy.float32)
y2 = resnet.prepare(x2)
self.assertEqual(y2.shape, (3, 224, 224))
self.assertEqual(y2.dtype, numpy.float32)
y3 = resnet.prepare(x3, size=None)
self.assertEqual(y3.shape, (3, 160, 120))
self.assertEqual(y3.dtype, numpy.float32)
y4 = resnet.prepare(x4)
self.assertEqual(y4.shape, (3, 224, 224))
self.assertEqual(y4.dtype, numpy.float32)
y5 = resnet.prepare(x5, size=None)
self.assertEqual(y5.shape, (3, 160, 120))
self.assertEqual(y5.dtype, numpy.float32)
def check_extract(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
with numpy.errstate(divide='ignore'):
result = self.link.extract([x1, x2], layers=['res3', 'pool5'])
self.assertEqual(len(result), 2)
y1 = cuda.to_cpu(result['res3'].data)
self.assertEqual(y1.shape, (2, 512, 28, 28))
self.assertEqual(y1.dtype, numpy.float32)
y2 = cuda.to_cpu(result['pool5'].data)
self.assertEqual(y2.shape, (2, 2048))
self.assertEqual(y2.dtype, numpy.float32)
x3 = numpy.random.uniform(0, 255, (80, 60)).astype(numpy.uint8)
result = self.link.extract([x3], layers=['res2'], size=None)
self.assertEqual(len(result), 1)
y3 = cuda.to_cpu(result['res2'].data)
self.assertEqual(y3.shape, (1, 256, 20, 15))
self.assertEqual(y3.dtype, numpy.float32)
def test_extract_cpu(self):
self.check_extract()
@attr.gpu
def test_extract_gpu(self):
self.link.to_gpu()
self.check_extract()
def check_predict(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
with numpy.errstate(divide='ignore'):
result = self.link.predict([x1, x2], oversample=False)
y = cuda.to_cpu(result.data)
self.assertEqual(y.shape, (2, 1000))
self.assertEqual(y.dtype, numpy.float32)
result = self.link.predict([x1, x2], oversample=True)
y = cuda.to_cpu(result.data)
self.assertEqual(y.shape, (2, 1000))
self.assertEqual(y.dtype, numpy.float32)
def test_predict_cpu(self):
self.check_predict()
@attr.gpu
def test_predict_gpu(self):
self.link.to_gpu()
self.check_predict()
@unittest.skipUnless(resnet.available, 'Pillow is required')
@attr.slow
class TestVGG16Layers(unittest.TestCase):
def setUp(self):
self.link = vgg.VGG16Layers(pretrained_model=None)
def test_available_layers(self):
result = self.link.available_layers
self.assertIsInstance(result, list)
self.assertEqual(len(result), 22)
def check_call(self):
xp = self.link.xp
x1 = Variable(xp.asarray(numpy.random.uniform(
-1, 1, (1, 3, 224, 224)).astype(numpy.float32)))
y1 = cuda.to_cpu(self.link(x1)['prob'].data)
self.assertEqual(y1.shape, (1, 1000))
def test_call_cpu(self):
self.check_call()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self.check_call()
def test_prepare(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
x3 = numpy.random.uniform(0, 255, (160, 120, 3)).astype(numpy.float32)
x4 = numpy.random.uniform(0, 255, (1, 160, 120)).astype(numpy.float32)
x5 = numpy.random.uniform(0, 255, (3, 160, 120)).astype(numpy.uint8)
y1 = vgg.prepare(x1)
self.assertEqual(y1.shape, (3, 224, 224))
self.assertEqual(y1.dtype, numpy.float32)
y2 = vgg.prepare(x2)
self.assertEqual(y2.shape, (3, 224, 224))
self.assertEqual(y2.dtype, numpy.float32)
y3 = vgg.prepare(x3, size=None)
self.assertEqual(y3.shape, (3, 160, 120))
self.assertEqual(y3.dtype, numpy.float32)
y4 = vgg.prepare(x4)
self.assertEqual(y4.shape, (3, 224, 224))
self.assertEqual(y4.dtype, numpy.float32)
y5 = vgg.prepare(x5, size=None)
self.assertEqual(y5.shape, (3, 160, 120))
self.assertEqual(y5.dtype, numpy.float32)
def check_extract(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
result = self.link.extract([x1, x2], layers=['pool3', 'fc7'])
self.assertEqual(len(result), 2)
y1 = cuda.to_cpu(result['pool3'].data)
self.assertEqual(y1.shape, (2, 256, 28, 28))
self.assertEqual(y1.dtype, numpy.float32)
y2 = cuda.to_cpu(result['fc7'].data)
self.assertEqual(y2.shape, (2, 4096))
self.assertEqual(y2.dtype, numpy.float32)
x3 = numpy.random.uniform(0, 255, (80, 60)).astype(numpy.uint8)
result = self.link.extract([x3], layers=['pool1'], size=None)
self.assertEqual(len(result), 1)
y3 = cuda.to_cpu(result['pool1'].data)
self.assertEqual(y3.shape, (1, 64, 40, 30))
self.assertEqual(y3.dtype, numpy.float32)
def test_extract_cpu(self):
self.check_extract()
@attr.gpu
def test_extract_gpu(self):
self.link.to_gpu()
self.check_extract()
def check_predict(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
result = self.link.predict([x1, x2], oversample=False)
y = cuda.to_cpu(result.data)
self.assertEqual(y.shape, (2, 1000))
self.assertEqual(y.dtype, numpy.float32)
result = self.link.predict([x1, x2], oversample=True)
y = cuda.to_cpu(result.data)
self.assertEqual(y.shape, (2, 1000))
self.assertEqual(y.dtype, numpy.float32)
def test_predict_cpu(self):
self.check_predict()
@attr.gpu
def test_predict_gpu(self):
self.link.to_gpu()
self.check_predict()
testing.run_module(__name__, __file__)
| 37.166667 | 79 | 0.615804 |
acf306a0ae39636d11f1eed13a96b77ab647d991 | 644 | py | Python | venv/bin/rst2html.py | metu-sparg/higrid | ebee0f35ea1712a01f3fdbaae132127ce4833baf | [
"BSD-3-Clause"
] | 8 | 2019-04-27T01:19:45.000Z | 2020-09-21T03:31:01.000Z | venv/bin/rst2html.py | metu-sparg/higrid | ebee0f35ea1712a01f3fdbaae132127ce4833baf | [
"BSD-3-Clause"
] | null | null | null | venv/bin/rst2html.py | metu-sparg/higrid | ebee0f35ea1712a01f3fdbaae132127ce4833baf | [
"BSD-3-Clause"
] | 5 | 2019-04-27T01:19:47.000Z | 2020-09-20T15:15:19.000Z | #!/Users/huseyinhacihabiboglu/PycharmProjects/higrid/venv/bin/python
# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)
| 26.833333 | 78 | 0.746894 |
acf307d4fb2909b8cc5786e113beffe8aa3121ea | 322 | py | Python | undp-transparency-portal-be/master_tables/management/commands/upload_master_data.py | undp/transparencyportal | 244fbb82c05d119f0acbe7f5efbb44572d9150a0 | [
"CC-BY-3.0"
] | 5 | 2019-09-10T15:05:18.000Z | 2022-02-02T02:53:32.000Z | undp-transparency-portal-be/master_tables/management/commands/upload_master_data.py | undp/transparencyportal | 244fbb82c05d119f0acbe7f5efbb44572d9150a0 | [
"CC-BY-3.0"
] | 4 | 2019-04-02T15:02:20.000Z | 2021-11-09T10:55:32.000Z | undp-transparency-portal-be/master_tables/management/commands/upload_master_data.py | undp/transparencyportal | 244fbb82c05d119f0acbe7f5efbb44572d9150a0 | [
"CC-BY-3.0"
] | 2 | 2021-09-01T14:30:29.000Z | 2021-09-01T14:32:57.000Z | from django.core.management.base import BaseCommand
from master_tables.cron import MasterTables
class Command(BaseCommand):
help = 'Runs script to upload master data from csv'
def handle(self, *args, **options):
MasterTables().do()
self.stdout.write(self.style.SUCCESS('Successfully run cron'))
| 29.272727 | 70 | 0.726708 |
acf307f2e53ad3800f20fb8c956f99a3fa87c07d | 721 | py | Python | ecommerce/serializers.py | Wassaf-Shahzad/micromasters | b1340a8c233499b1d8d22872a6bc1fe7f49fd323 | [
"BSD-3-Clause"
] | 32 | 2016-03-25T01:03:13.000Z | 2022-01-15T19:35:42.000Z | ecommerce/serializers.py | Wassaf-Shahzad/micromasters | b1340a8c233499b1d8d22872a6bc1fe7f49fd323 | [
"BSD-3-Clause"
] | 4,858 | 2016-03-03T13:48:30.000Z | 2022-03-29T22:09:51.000Z | ecommerce/serializers.py | umarmughal824/micromasters | ea92d3bcea9be4601150fc497302ddacc1161622 | [
"BSD-3-Clause"
] | 20 | 2016-08-18T22:07:44.000Z | 2021-11-15T13:35:35.000Z | """Serializers for ecommerce REST APIs"""
from rest_framework import serializers
from ecommerce.models import Coupon
class CouponSerializer(serializers.ModelSerializer):
"""Serializer for Coupon"""
program_id = serializers.SerializerMethodField()
content_type = serializers.SerializerMethodField()
class Meta:
model = Coupon
fields = ('coupon_code', 'coupon_type', 'content_type', 'amount_type', 'amount', 'program_id', 'object_id',)
def get_program_id(self, coupon):
"""Get program id from coupon program"""
return coupon.program.id
def get_content_type(self, coupon):
"""Get the content type as a string"""
return coupon.content_type.model
| 30.041667 | 116 | 0.701803 |
acf3088dd91ee449827441676a81a59617301cdf | 19,954 | py | Python | tests/unittests/core/evc/test_resolutions.py | aimar1986bupt/orion | 6d217af1f9002aa671f8a3260a687c540ca5336d | [
"BSD-3-Clause"
] | 4 | 2020-03-25T17:44:40.000Z | 2020-04-10T13:53:13.000Z | tests/unittests/core/evc/test_resolutions.py | aimar1986bupt/orion | 6d217af1f9002aa671f8a3260a687c540ca5336d | [
"BSD-3-Clause"
] | 2 | 2018-06-26T19:17:09.000Z | 2022-02-23T13:40:04.000Z | tests/unittests/core/evc/test_resolutions.py | aimar1986bupt/orion | 6d217af1f9002aa671f8a3260a687c540ca5336d | [
"BSD-3-Clause"
] | 2 | 2019-08-26T11:36:47.000Z | 2020-04-07T13:05:48.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Collection of tests resolutions in :mod:`orion.core.evc.conflicts`."""
import pytest
from orion.algo.space import Dimension
from orion.core.evc import adapters
from orion.core.evc import conflicts
@pytest.fixture
def add_dimension_resolution(new_dimension_conflict):
"""Create a resolution for a new dimension"""
return new_dimension_conflict.AddDimensionResolution(new_dimension_conflict)
@pytest.fixture
def change_dimension_resolution(changed_dimension_conflict):
"""Create a resolution for a changed prior"""
return changed_dimension_conflict.ChangeDimensionResolution(changed_dimension_conflict)
@pytest.fixture
def remove_dimension_resolution(missing_dimension_conflict):
"""Create a resolution to remove a missing dimension"""
return missing_dimension_conflict.RemoveDimensionResolution(missing_dimension_conflict,
default_value=0)
@pytest.fixture
def rename_dimension_resolution(missing_dimension_conflict, new_dimension_conflict):
"""Create a resolution to rename a missing dimension to a new dimension"""
return missing_dimension_conflict.RenameDimensionResolution(missing_dimension_conflict,
new_dimension_conflict)
@pytest.fixture
def algorithm_resolution(algorithm_conflict):
"""Create a resolution for a new algorithm configuration"""
return algorithm_conflict.AlgorithmResolution(algorithm_conflict)
@pytest.fixture
def code_resolution(code_conflict):
"""Create a resolution for a code conflict"""
return code_conflict.CodeResolution(code_conflict, adapters.CodeChange.BREAK)
@pytest.fixture
def experiment_name_resolution(create_db_instance, experiment_name_conflict):
"""Create a resolution for a code conflict"""
return experiment_name_conflict.ExperimentNameResolution(experiment_name_conflict,
new_name="new-exp-name")
class TestAddDimensionResolution(object):
"""Test methods for resolution of new dimension"""
def test_init_no_default(self, new_dimension_conflict):
"""Verify instantiation of resolution without default value"""
resolution = new_dimension_conflict.AddDimensionResolution(new_dimension_conflict)
assert resolution.default_value is Dimension.NO_DEFAULT_VALUE
def test_init_no_default_but_in_dim(self, new_dimension_with_default_conflict):
"""Verify instantiation of resolution with default value in dimension"""
resolution = new_dimension_with_default_conflict.AddDimensionResolution(
new_dimension_with_default_conflict)
assert (resolution.default_value ==
new_dimension_with_default_conflict.dimension.default_value)
def test_init_with_default(self, new_dimension_conflict,
new_dimension_with_default_conflict):
"""Verify instantiation of resolution with default value given by user"""
default_value = 1.1
resolution = new_dimension_conflict.AddDimensionResolution(
new_dimension_conflict, default_value=default_value)
assert resolution.default_value == default_value
assert new_dimension_with_default_conflict.dimension.default_value != default_value
resolution = new_dimension_conflict.AddDimensionResolution(
new_dimension_with_default_conflict, default_value=default_value)
assert resolution.default_value == default_value
def test_init_bad_default(self, new_dimension_conflict):
"""Verify instantiation of resolution with invalid default value"""
default_value = 'bad'
with pytest.raises(ValueError) as exc:
new_dimension_conflict.AddDimensionResolution(new_dimension_conflict,
default_value=default_value)
assert "could not convert string to float: 'bad'" in str(exc.value)
def test_new_prior_no_default(self, new_dimension_conflict):
"""Verify prior string without default value"""
resolution = new_dimension_conflict.AddDimensionResolution(new_dimension_conflict)
assert resolution.new_prior == 'norm(0, 2)'
def test_new_prior_default_from_dim(self, new_dimension_with_default_conflict):
"""Verify prior string with default value in dimension"""
resolution = new_dimension_with_default_conflict.AddDimensionResolution(
new_dimension_with_default_conflict)
assert resolution.new_prior == 'norm(0, 2, default_value=0.001)'
def test_new_prior_default(self, new_dimension_conflict,
new_dimension_with_default_conflict):
"""Verify prior string with new default value given by user"""
default_value = 1.2
resolution = new_dimension_with_default_conflict.AddDimensionResolution(
new_dimension_with_default_conflict, default_value=default_value)
assert resolution.new_prior == 'norm(0, 2, default_value={})'.format(default_value)
resolution = new_dimension_conflict.AddDimensionResolution(new_dimension_conflict,
default_value=default_value)
assert resolution.new_prior == 'norm(0, 2, default_value={})'.format(default_value)
def test_prefix(self, add_dimension_resolution):
"""Verify prefix of resolution with corresponding marker"""
assert add_dimension_resolution.prefix == "new~+"
def test_repr_without_default(self, add_dimension_resolution):
"""Verify resolution representation for user interface, without default value"""
assert repr(add_dimension_resolution) == "new~+norm(0, 2)"
def test_repr_default_from_dim(self, new_dimension_with_default_conflict):
"""Verify resolution representation for user interface, without default value"""
resolution = new_dimension_with_default_conflict.AddDimensionResolution(
new_dimension_with_default_conflict)
assert repr(resolution) == "new~+norm(0, 2, default_value=0.001)"
def test_adapters_without_default(self, new_dimension_conflict):
"""Verify adapters without default values (filter everything out)"""
param = {'name': 'new', 'type': 'real', 'value': Dimension.NO_DEFAULT_VALUE}
resolution = new_dimension_conflict.AddDimensionResolution(new_dimension_conflict)
resolution_adapters = resolution.get_adapters()
assert len(resolution_adapters) == 1
assert (resolution_adapters[0].configuration ==
adapters.DimensionAddition(param).configuration)
def test_adapters_with_default(self, new_dimension_conflict):
"""Verify adapters with default values"""
param = {'name': 'new', 'type': 'real', 'value': 1.1}
resolution = new_dimension_conflict.AddDimensionResolution(new_dimension_conflict,
default_value=1.1)
resolution_adapters = resolution.get_adapters()
assert len(resolution_adapters) == 1
assert (resolution_adapters[0].configuration ==
adapters.DimensionAddition(param).configuration)
def test_revert(self, new_dimension_conflict, add_dimension_resolution):
"""Verify reverting resolution set conflict to unresolved"""
assert new_dimension_conflict.is_resolved
assert add_dimension_resolution.revert() == []
assert not new_dimension_conflict.is_resolved
assert new_dimension_conflict.resolution is None
class TestChangeDimensionResolution(object):
"""Test methods for resolution of changed dimensions"""
def test_prefix(self, change_dimension_resolution):
"""Verify prefix of resolution with corresponding marker"""
assert change_dimension_resolution.prefix == 'changed~+'
def test_repr(self, change_dimension_resolution):
"""Verify resolution representation for user interface"""
assert repr(change_dimension_resolution) == 'changed~+normal(0, 2)'
def test_adapters(self, change_dimension_resolution):
"""Verify adapters with old and new priors"""
name = "changed"
old_prior = "uniform(-10, 10)"
new_prior = "normal(0, 2)"
resolution_adapters = change_dimension_resolution.get_adapters()
assert len(resolution_adapters) == 1
assert (resolution_adapters[0].configuration ==
adapters.DimensionPriorChange(name, old_prior, new_prior).configuration)
def test_revert(self, changed_dimension_conflict, change_dimension_resolution):
"""Verify reverting resolution set conflict to unresolved"""
assert changed_dimension_conflict.is_resolved
assert change_dimension_resolution.revert() == []
assert not changed_dimension_conflict.is_resolved
assert changed_dimension_conflict.resolution is None
class TestRemoveDimensionResolution(object):
"""Test methods for resolution of missing dimensions"""
def test_prefix(self, missing_dimension_conflict):
"""Verify prefix of resolution with corresponding marker"""
resolution = missing_dimension_conflict.RemoveDimensionResolution(
missing_dimension_conflict)
assert resolution.prefix == 'missing~-'
def test_repr_no_default(self, missing_dimension_conflict):
"""Verify resolution representation for user interface, without default value"""
resolution = missing_dimension_conflict.RemoveDimensionResolution(
missing_dimension_conflict)
assert repr(resolution) == 'missing~-'
def test_repr_default_from_dim(self, missing_dimension_with_default_conflict):
"""Verify resolution representation for user interface, with default value from dimension"""
resolution = missing_dimension_with_default_conflict.RemoveDimensionResolution(
missing_dimension_with_default_conflict)
assert repr(resolution) == 'missing~-0.0'
def test_repr_default(self, missing_dimension_conflict,
missing_dimension_with_default_conflict):
"""Verify resolution representation for user interface, with default provided by user"""
default_value = 1.2
resolution = missing_dimension_conflict.RemoveDimensionResolution(
missing_dimension_with_default_conflict, default_value=default_value)
assert repr(resolution) == 'missing~-{}'.format(default_value)
resolution = missing_dimension_conflict.RemoveDimensionResolution(
missing_dimension_conflict, default_value=default_value)
assert repr(resolution) == 'missing~-{}'.format(default_value)
def test_adapters_without_default(self, missing_dimension_conflict):
"""Verify adapters without default value"""
param = {'name': 'missing', 'type': 'real', 'value': Dimension.NO_DEFAULT_VALUE}
resolution = missing_dimension_conflict.RemoveDimensionResolution(
missing_dimension_conflict)
resolution_adapters = resolution.get_adapters()
assert len(resolution_adapters) == 1
assert (resolution_adapters[0].configuration ==
adapters.DimensionDeletion(param).configuration)
def test_adapters_with_default(self, missing_dimension_conflict):
"""Verify adapters with default value"""
param = {'name': 'missing', 'type': 'real', 'value': 1.2}
resolution = missing_dimension_conflict.RemoveDimensionResolution(
missing_dimension_conflict, default_value=1.2)
resolution_adapters = resolution.get_adapters()
assert len(resolution_adapters) == 1
assert (resolution_adapters[0].configuration ==
adapters.DimensionDeletion(param).configuration)
def test_revert(self, missing_dimension_conflict, remove_dimension_resolution):
"""Verify reverting resolution set conflict to unresolved"""
assert missing_dimension_conflict.is_resolved
assert remove_dimension_resolution.revert() == []
assert not missing_dimension_conflict.is_resolved
assert missing_dimension_conflict.resolution is None
class TestRenameDimensionResolution(object):
"""Test methods for renaming of missing dimensions"""
def test_init_same_prior(self, missing_dimension_conflict, new_dimension_same_prior_conflict):
"""Verify initialisation with identical priors generates no side-effect conflicts"""
assert not missing_dimension_conflict.is_resolved
assert not new_dimension_same_prior_conflict.is_resolved
resolution = missing_dimension_conflict.RenameDimensionResolution(
missing_dimension_conflict, new_dimension_same_prior_conflict)
assert missing_dimension_conflict.is_resolved
assert new_dimension_same_prior_conflict.is_resolved
assert resolution.new_conflicts == []
def test_init_different_prior(self, missing_dimension_conflict, new_dimension_conflict):
"""Verify initialisation with different priors generates a side-effect conflict"""
assert not missing_dimension_conflict.is_resolved
assert not new_dimension_conflict.is_resolved
resolution = missing_dimension_conflict.RenameDimensionResolution(
missing_dimension_conflict, new_dimension_conflict)
assert missing_dimension_conflict.is_resolved
assert new_dimension_conflict.is_resolved
assert len(resolution.new_conflicts) == 1
assert isinstance(resolution.new_conflicts[0], conflicts.ChangedDimensionConflict)
assert resolution.new_conflicts[0].old_prior == missing_dimension_conflict.prior
assert resolution.new_conflicts[0].new_prior == new_dimension_conflict.prior
def test_prefix(self, rename_dimension_resolution):
"""Verify prefix of resolution with corresponding marker"""
assert rename_dimension_resolution.prefix == 'missing~>'
def test_repr(self, rename_dimension_resolution):
"""Verify resolution representation for user interface"""
assert repr(rename_dimension_resolution) == 'missing~>new'
def test_adapters(self, rename_dimension_resolution):
"""Verify adapters with old and new names"""
old_name = "missing"
new_name = "new"
resolution_adapters = rename_dimension_resolution.get_adapters()
assert len(resolution_adapters) == 1
assert (resolution_adapters[0].configuration ==
adapters.DimensionRenaming(old_name, new_name).configuration)
def test_revert_same_prior(self, missing_dimension_conflict, new_dimension_same_prior_conflict):
"""Verify reverting resolution set conflict to unresolved"""
resolution = missing_dimension_conflict.RenameDimensionResolution(
missing_dimension_conflict, new_dimension_same_prior_conflict)
assert missing_dimension_conflict.is_resolved
assert new_dimension_same_prior_conflict.is_resolved
assert len(resolution.new_conflicts) == 0
assert resolution.revert() == []
assert len(resolution.new_conflicts) == 0
assert not missing_dimension_conflict.is_resolved
assert not new_dimension_same_prior_conflict.is_resolved
assert missing_dimension_conflict.resolution is None
assert new_dimension_same_prior_conflict.resolution is None
def test_revert_different_prior(self, missing_dimension_conflict, new_dimension_conflict,
rename_dimension_resolution):
"""Verify reverting resolution set conflict to unresolved and deprecate the side-effect
conflict
"""
assert missing_dimension_conflict.is_resolved
assert new_dimension_conflict.is_resolved
assert len(rename_dimension_resolution.new_conflicts) == 1
new_conflicts = rename_dimension_resolution.new_conflicts
assert rename_dimension_resolution.revert() == new_conflicts
assert len(rename_dimension_resolution.new_conflicts) == 0
assert new_conflicts[0].is_resolved
assert not missing_dimension_conflict.is_resolved
assert not new_dimension_conflict.is_resolved
assert missing_dimension_conflict.resolution is None
assert new_dimension_conflict.resolution is None
class TestAlgorithmResolution(object):
"""Test methods for resolution of algorithm changes"""
def test_adapters(self, algorithm_resolution):
"""Verify shallow adapters for algorithm change"""
resolution_adapters = algorithm_resolution.get_adapters()
assert len(resolution_adapters) == 1
assert resolution_adapters[0].configuration == adapters.AlgorithmChange().configuration
def test_repr(self, algorithm_resolution):
"""Verify resolution representation for user interface"""
assert repr(algorithm_resolution) == '--algorithm-change'
def test_revert(self, algorithm_conflict, algorithm_resolution):
"""Verify reverting resolution set conflict to unresolved"""
assert algorithm_conflict.is_resolved
assert algorithm_resolution.revert() == []
assert not algorithm_conflict.is_resolved
assert algorithm_conflict.resolution is None
class TestCodeResolution(object):
"""Test methods for resolution of code conflict"""
def test_wrong_input(self, code_conflict):
"""Verify initialization fails with invalid change type"""
with pytest.raises(ValueError) as exc:
code_conflict.CodeResolution(code_conflict, "yabadabadoo")
assert "Invalid code change type" in str(exc.value)
def test_adapters(self, code_conflict):
"""Verify adapters with code change types"""
for change_type in adapters.CodeChange.types:
code_resolution = code_conflict.CodeResolution(code_conflict, change_type)
resolution_adapters = code_resolution.get_adapters()
assert len(resolution_adapters) == 1
assert (resolution_adapters[0].configuration ==
adapters.CodeChange(change_type).configuration)
def test_repr(self, code_resolution):
"""Verify resolution representation for user interface"""
assert repr(code_resolution) == '--code-change-type break'
def test_revert(self, code_conflict, code_resolution):
"""Verify reverting resolution set conflict to unresolved"""
assert code_conflict.is_resolved
assert code_resolution.revert() == []
assert not code_conflict.is_resolved
assert code_conflict.resolution is None
class TestExperimentNameResolution(object):
"""Test methods for resolution of experiment name conflict"""
def test_adapters(self, experiment_name_resolution):
"""Verify there is no adapters for experiment name resolution"""
assert experiment_name_resolution.get_adapters() == []
def test_repr(self, experiment_name_resolution):
"""Verify resolution representation for user interface"""
assert repr(experiment_name_resolution) == '--branch new-exp-name'
def test_revert(self, old_config, new_config,
experiment_name_conflict, experiment_name_resolution):
"""Verify reverting resolution set conflict to unresolved and reset name in config"""
assert experiment_name_conflict.is_resolved
assert new_config['name'] == experiment_name_resolution.new_name
assert new_config['version'] == experiment_name_resolution.new_version
assert experiment_name_resolution.revert() == []
assert new_config['name'] == old_config['name']
assert new_config['version'] == old_config['version']
assert not experiment_name_conflict.is_resolved
assert experiment_name_conflict.resolution is None
| 50.261965 | 100 | 0.727373 |
acf30930ca1c2496e5cefd194fcc673c6e95e900 | 163 | py | Python | devdata/views.py | BuildForSDG/PARTNERSHIP-FOR-THE-GOALS | fcb84df42dc6f381a87570663e13a682e44607ba | [
"MIT"
] | 1 | 2020-06-03T11:22:06.000Z | 2020-06-03T11:22:06.000Z | devdata/views.py | BuildForSDG/PARTNERSHIP-FOR-THE-GOALS | fcb84df42dc6f381a87570663e13a682e44607ba | [
"MIT"
] | 7 | 2020-06-16T09:14:02.000Z | 2021-09-22T19:24:45.000Z | devdata/views.py | BuildForSDG/PARTNERSHIP-FOR-THE-GOALS | fcb84df42dc6f381a87570663e13a682e44607ba | [
"MIT"
] | 1 | 2020-06-15T14:08:16.000Z | 2020-06-15T14:08:16.000Z | from django.shortcuts import render
# from django.urls import reverse_lazy
# Create your views here.
def index(request):
return render(request,'index.html')
| 20.375 | 39 | 0.766871 |
acf309aeffa4f3426f54ed6447d3500a660dc7d0 | 24,090 | py | Python | a2t/tasks/base.py | zhuowenzheng/Ask2Transformers | f22fd980ff97351acc7e6117aaf272e5946581d4 | [
"Apache-2.0"
] | null | null | null | a2t/tasks/base.py | zhuowenzheng/Ask2Transformers | f22fd980ff97351acc7e6117aaf272e5946581d4 | [
"Apache-2.0"
] | null | null | null | a2t/tasks/base.py | zhuowenzheng/Ask2Transformers | f22fd980ff97351acc7e6117aaf272e5946581d4 | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
import inspect
import warnings
from dataclasses import dataclass, field, fields
from typing import Callable, List, Dict, Union
import re
import json
import os
import numpy as np
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from torch import negative
from a2t.utils import find_optimal_threshold, apply_threshold
@dataclass
class Features:
"""A simple class to handle the features information.
Args:
context (str): The context sentence.
label (str, optional): The label of the instance.
inst_type (str, optional): The type of the instance. This information is used for the `valid_conditions' constraints.
"""
context: str
label: str = None
inst_type: str = None
class IncorrectFeatureTypeError(Exception):
pass
@dataclass
class Task:
"""Abstract class for Tasks definition.
The method `_assert_constraints()` must be overrided.
Args:
name (str, optional): A name for the task that may be used for to differentiate task when saving. Defaults to None.
required_variables (List[str], optional): The variables required to perform the task and must be implemented by the `Features` class. Defaults to empty list.
additional_variables (List[str], optional): The variables not required to perform the task and must be implemented by the `Features` class. Defaults to empty list.
labels (List[str], optional): The labels for the task. Defaults to empty list.
templates (Dict[str, List[str]], optional): The templates/verbalizations for the task. Defaults to empty dict.
valid_conditions (Dict[str, List[str]], optional): The valid conditions or constraints for the task. Defaults to None.
negative_label_id (int, optional): The index of the negative label or -1 if no negative label exist. A negative label is for example the class `Other` on NER, that means that the specific token is not a named entity. Defaults to -1.
multi_label (bool, optional): Whether the task must be treated as multi-label or not. You should treat as multi-label task a task that contains a negative label. Defaults to False.
features_class (type, optional): The `Features` class related to the task. Default to `Features`.
"""
name: str = None
required_variables: List[str] = field(default_factory=list)
additional_variables: List[str] = field(default_factory=list)
labels: List[str] = field(default_factory=list)
templates: Dict[str, List[str]] = field(default_factory=dict)
valid_conditions: Dict[str, List[str]] = None
negative_label_id: int = -1 # -1 for no negative class
multi_label: bool = False
features_class: type = Features
def __post_init__(self):
self._assert_minimal_constraints()
self._assert_constraints()
self.label2id = {label: i for i, label in enumerate(self.labels)}
self.n_labels = len(self.labels)
if not self.templates:
self.templates = {}
# Create the templates to label mapping
self.template2label = defaultdict(list)
for label, templates in self.templates.items():
for template in templates:
self.template2label[template].append(label)
self.template_list = list(self.template2label.keys())
template2id = {template: i for i, template in enumerate(self.template_list)}
self.label2templateid = defaultdict(list)
for label, templates in self.templates.items():
self.label2templateid[label].extend([template2id[template] for template in templates])
# Create the valid_conditions matrix
if self.valid_conditions:
self._valid_conditions = {}
self._always_valid_labels = np.zeros(self.n_labels)
self._always_valid_labels[self.negative_label_id] = 1.0
for label, conditions in self.valid_conditions.items():
if label not in self.labels:
continue
for condition in conditions:
if condition == "*":
self._always_valid_labels[self.label2id[label]] = 1.0
continue
if condition not in self._valid_conditions:
self._valid_conditions[condition] = np.zeros(self.n_labels)
if self.negative_label_id >= 0:
self._valid_conditions[condition][self.negative_label_id] = 1.0
self._valid_conditions[condition][self.label2id[label]] = 1.0
else:
self._valid_conditions = None
def idx2label(idx):
return self.labels[idx]
self.idx2label = np.vectorize(idx2label)
def __repr__(self) -> str:
class_name = self.name if self.name else str(self.__class__)
labels_repr = self.labels.__repr__()
if len(labels_repr) > 89:
labels_repr = self.labels[:3].__repr__().replace("]", ", ...]")
templates_repr = len(self.template2label)
feature_class_repr = str(self.features_class)
return (
f"{class_name} ("
f"\n\tLabels: {labels_repr}"
f"\n\tTemplates: {templates_repr}"
f"\n\tFeatures: {feature_class_repr}"
"\n)"
)
def _assert_constraints(self):
raise NotImplementedError(f"{self.__class__} is an abstract class. This method should be implemented.")
def _assert_minimal_constraints(self):
assert len(self.labels) > 0, "The number of labels should be greather than 0."
assert self.negative_label_id < len(
self.labels
), "The id for the negative label should be lower than the amount of labels."
if self.negative_label_id >= 0:
assert self.templates is not None and len(
[value for values in self.templates.values() for value in values]
), "`templates` parameter must not be None nor empty."
assert all(
key in self.labels for key in self.templates.keys()
), "All the keys of templates dicts must be defined on labels."
if self.valid_conditions:
assert all(
key in self.labels for key in self.valid_conditions.keys()
), "All the keys of valid_conditions dict must be defined on labels."
assert all(
var in self.features_class.__dict__["__dataclass_fields__"]
for var in self.required_variables + self.additional_variables
), "All variables should be defined on the features_class."
assert all(
var.strip("{").strip("}") in [*self.required_variables, *self.additional_variables]
for templates in self.templates.values()
for template in templates
for var in re.findall(r"{\w+}", template)
)
def assert_features_class(self, features: List[Features]) -> None:
"""Assert that all features are instance of the task specific `Features` class.
Args:
features (List[Features]): The list of features to check.
Raises:
IncorrectFeatureTypeError: Raised when any feature is not an instance of the task specific `Features` class.
"""
for feature in features:
if not isinstance(feature, self.features_class):
raise IncorrectFeatureTypeError(
f"Incorrect feature type given. Expected {self.features_class} but obtained {type(feature)}."
)
def generate_premise_hypotheses_pairs(self, features: List[Features], sep_token: str = "</s>") -> List[str]:
"""Generate premise-hypothesis pairs based on the `Task` templates.
Args:
features (List[Features]): The list of features.
sep_token (str, optional): The model specific separator token. Defaults to "</s>".
Returns:
List[str]: The list of premise-hypothesis pairs generated from the features and templates.
"""
if not isinstance(features, list):
features = [features]
sentence_pairs = [
f"{feature.context} {sep_token} {template.format(**feature.__dict__)}"
for feature in features
for template in self.template_list
]
return sentence_pairs
def reverse_to_labels(self, template_probs: np.ndarray, collate_fn: Callable = np.max) -> np.ndarray:
"""A function that maps template probabilities to label probabilites. By default, the maximum probabilities among
label related templates is used.
Args:
template_probs (np.ndarray): (batch_size, n_templates) The templates probabilites.
collate_fn (Callable, optional): The probabilites collate function. Defaults to np.max.
Returns:
np.ndarray: (batch_size, n_labels) The labels probabilities.
"""
outputs = np.hstack(
[
collate_fn(template_probs[:, self.label2templateid[label]], axis=-1, keepdims=True)
if label in self.label2templateid
else np.zeros((template_probs.shape[0], 1))
for label in self.labels
]
)
return outputs
def apply_valid_conditions(self, features: List[Features], probs: np.ndarray) -> np.ndarray:
"""Applies the valid conditions to the labels probabilities. If a constraint is not satisfied the probability is set to 0.
Args:
features (List[Features]): (batch_size,) The list of features.
probs (np.ndarray): (batch_size, n_labels) The labels probabilities.
Returns:
np.ndarray: (batch_size, n_labels) The labels probabilities.
"""
if self._valid_conditions:
mask_matrix = np.stack(
[self._valid_conditions.get(feature.inst_type, np.zeros(self.n_labels)) for feature in features],
axis=0,
)
probs = probs * np.logical_or(mask_matrix, self._always_valid_labels) # TODO: Need a test
return probs
def compute_metrics(
self, labels: np.ndarray, output: np.ndarray, threshold: Union[str, float] = "optimize"
) -> Dict[str, float]:
"""Compute the metrics for the given task. This method is abstract and needs to be overrided.
Args:
labels (np.ndarray): (batch_size,) The correct labels.
output (np.ndarray): (batch_size, n_labels) The labels probabilities.
threshold (Union[str, float], optional): The threshold to use on the evaluation. Options:
* **"default"**: The threshold is set to 0.5.
* **"optimize"**: Optimize the threshold with the `labels`. Intended to be used on the development split.
* **`float`**: A specific float value for the threshold.
Defaults to "optimize".
Raises:
NotImplementedError: Raise if not overrided.
Returns:
Dict[str, float]: Dict with the resulting metrics.
"""
# TODO: Unittest
raise NotImplementedError("This method must be implemented.")
@classmethod
def from_config(cls, file_path: str) -> object:
"""Loads the Task instance from a configuration file.
Args:
file_path (str): The path to the configuration file.
Returns:
Task: A `Task` instance based on the configuration file.
"""
with open(file_path, "rt") as f:
config = json.load(f)
if "features_class" in config:
components = config["features_class"].split(".")
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
config["features_class"] = mod
params = set([p.name for p in fields(cls)]) | set(inspect.signature(cls).parameters.keys())
params = {key: config[key] for key in params if key in config}
return cls(**params)
def to_config(self, file_path: str) -> None:
"""Saves the task instance to a configuration file.
Args:
file_path (str): The path to the configuration file.
"""
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "wt") as f:
values = {key: value for key, value in vars(self).items()}
values["features_class"] = values["features_class"].__module__ + "." + values["features_class"].__name__
for key in ["label2id", "idx2label", "n_labels", "template2label", "label2templateid", "_valid_conditions"]:
del values[key]
json.dump(values, f, indent=4)
class ZeroaryFeatures(Features):
"""A features class for `ZeroaryTask`. It only requires a `context` argument."""
pass
@dataclass
class ZeroaryTask(Task):
"""A `Task` implementation for Text Classification like tasks.
Args:
name (str, optional): A name for the task that may be used for to differentiate task when saving. Defaults to None.
required_variables (List[str], optional): The variables required to perform the task and must be implemented by the `ZeroaryFeatures` class. Defaults to empty list.
additional_variables (List[str], optional): The variables not required to perform the task and must be implemented by the `ZeroaryFeatures` class. Defaults to empty list.
labels (List[str], optional): The labels for the task. Defaults to empty list.
templates (Dict[str, List[str]], optional): The templates/verbalizations for the task. Defaults to empty dict.
valid_conditions (Dict[str, List[str]], optional): The valid conditions or constraints for the task. Defaults to None.
multi_label (bool, optional): Whether the task must be treated as multi-label or not. You should treat as multi-label task a task that contains a negative label. Defaults to False.
features_class (type, optional): The `Features` class related to the task. Defaults to `ZeroaryFeatures`.
negative_label_id (int, optional): The index of the negative label or -1 if no negative label exist. A negative label is for example the class `Other` on NER, that means that the specific token is not a named entity. Defaults to -1.
"""
features_class: type = ZeroaryFeatures
def _assert_constraints(self):
# Assert the number of required variables to be 0
assert len(self.required_variables) == 0, "Zero-ary tasks like Text classifiation do not require any variable."
def compute_metrics(self, labels: np.ndarray, output: np.ndarray, threshold: Union[str, float] = None) -> Dict[str, float]:
"""Compute the metrics for the given task. By default on `ZeroaryTask` the Accuracy is computed.
Args:
labels (np.ndarray): (batch_size,) The correct labels.
output (np.ndarray): (batch_size, n_labels) The labels probabilities.
threshold (Union[str, float], optional): No threshold is needed on `ZeroaryTask`.
Returns:
Dict[str, float]: Dict with the resulting metrics.
"""
# TODO: Unittest
if threshold:
warnings.warn(f"{self.__class__} do not require 'threshold', ignored.")
return {"accuracy_score": accuracy_score(labels, output.argmax(-1))}
@dataclass
class UnaryFeatures(Features):
"""A features class for `UnaryTask`. It requires `context` and `X` arguments."""
X: str = None
@dataclass
class UnaryTask(Task):
"""A `Task` implementation for Span Classification like tasks.
Args:
name (str, optional): A name for the task that may be used for to differentiate task when saving. Defaults to None.
required_variables (List[str], optional): The variables required to perform the task and must be implemented by the `UnaryFeatures` class. Defaults `["X"]`.
additional_variables (List[str], optional): The variables not required to perform the task and must be implemented by the `UnaryFeatures` class. Defaults to empty list.
labels (List[str], optional): The labels for the task. Defaults to empty list.
templates (Dict[str, List[str]], optional): The templates/verbalizations for the task. Defaults to empty dict.
valid_conditions (Dict[str, List[str]], optional): The valid conditions or constraints for the task. Defaults to None.
multi_label (bool, optional): Whether the task must be treated as multi-label or not. You should treat as multi-label task a task that contains a negative label. Defaults to False.
features_class (type, optional): The `Features` class related to the task. Default to `UnaryFeatures`.
negative_label_id (int, optional): The index of the negative label or -1 if no negative label exist. A negative label is for example the class `Other` on NER, that means that the specific token is not a named entity. Defaults to -1.
"""
required_variables: List[str] = field(default_factory=lambda: ["X"])
features_class: type = UnaryFeatures
def _assert_constraints(self):
# Assert the number of required variables to be 1
assert len(self.required_variables) == 1, "Unary-ary tasks like Span classifiation requires 1 variable."
def compute_metrics(self, labels: np.ndarray, output: np.ndarray, threshold: Union[str, float] = "optimize"):
"""Compute the metrics for the given task. By default on `UnaryTask` the Accuracy is computed if
the `negative_label_id` is < 0, otherwise the Precision, Recall, F1-Score and positive Accuracy are
computed.
Args:
labels (np.ndarray): (batch_size,) The correct labels.
output (np.ndarray): (batch_size, n_labels) The labels probabilities.
threshold (Union[str, float], optional): The threshold to use on the evaluation. Options:
* "default": The threshold is set to 0.5.
* "optimize": Optimize the threshold with the `labels`. Intended to be used on the development split.
* `float`: A specific float value for the threshold.
Defaults to "optimize".
Returns:
Dict[str, float]: Dict with the resulting metrics.
"""
# TODO: Unittest
if threshold not in ["default", "optimize"] and not isinstance(threshold, float):
raise ValueError("Threshold must be either 'default', 'optimize' or a float value.")
if threshold == "default":
threshold = 0.5
if threshold == "optimize":
threshold, _ = find_optimal_threshold(labels, output, negative_label_id=self.negative_label_id)
results = {"optimal_threshold": threshold}
if self.negative_label_id < 0:
results["accuracy_score"] = accuracy_score(labels, output.argmax(-1))
else:
output_ = apply_threshold(output, threshold=threshold, negative_label_id=self.negative_label_id)
positive_labels = list(set(range(len(self.labels))) - set([self.negative_label_id]))
output_pos = output.copy()
output_pos[:, self.negative_label_id] = 0.0
results["positive_accuracy"] = accuracy_score(
labels[labels != self.negative_label_id], output_pos[labels != self.negative_label_id, :].argmax(-1)
)
pre, rec, f1, _ = precision_recall_fscore_support(labels, output_, labels=positive_labels, average="micro")
results["precision"] = pre
results["recall"] = rec
results["f1-score"] = f1
return results
@dataclass
class BinaryFeatures(Features):
"""A features class for `BinaryTask`. It requires `context`, `X` and `Y` arguments."""
X: str = None
Y: str = None
@dataclass
class BinaryTask(Task):
"""A `Task` implementation for Relation Classification like tasks.
Args:
name (str, optional): A name for the task that may be used for to differentiate task when saving. Defaults to None.
required_variables (List[str], optional): The variables required to perform the task and must be implemented by the `BinaryFeatures` class. Defaults `["X", "Y"]`.
additional_variables (List[str], optional): The variables not required to perform the task and must be implemented by the `BinaryFeatures` class. Defaults to empty list.
labels (List[str], optional): The labels for the task. Defaults to empty list.
templates (Dict[str, List[str]], optional): The templates/verbalizations for the task. Defaults to empty dict.
valid_conditions (Dict[str, List[str]], optional): The valid conditions or constraints for the task. Defaults to None.
multi_label (bool, optional): Whether the task must be treated as multi-label or not. You should treat as multi-label task a task that contains a negative label. Defaults to False.
features_class (type, optional): The `Features` class related to the task. Default to `BinaryFeatures`.
negative_label_id (int, optional): The index of the negative label or -1 if no negative label exist. A negative label is for example the class `Other` on NER, that means that the specific token is not a named entity. Defaults to -1.
"""
required_variables: List[str] = field(default_factory=lambda: ["X", "Y"])
features_class: type = BinaryFeatures
def _assert_constraints(self):
# Assert the number of required variables to be 2
assert len(self.required_variables) == 2, "Binary-ary tasks like Tuple classifiation require 2 variable."
def compute_metrics(self, labels: np.ndarray, output: np.ndarray, threshold: Union[str, float] = "optimize"):
"""Compute the metrics for the given task. By default on `BinaryTask` the Accuracy is computed if
the `negative_label_id` is < 0, otherwise the Precision, Recall, F1-Score and positive Accuracy are
computed.
Args:
labels (np.ndarray): (batch_size,) The correct labels.
output (np.ndarray): (batch_size, n_labels) The labels probabilities.
threshold (Union[str, float], optional): The threshold to use on the evaluation. Options:
* "default": The threshold is set to 0.5.
* "optimize": Optimize the threshold with the `labels`. Intended to be used on the development split.
* `float`: A specific float value for the threshold.
Defaults to "optimize".
Returns:
Dict[str, float]: Dict with the resulting metrics.
"""
# TODO: Unittest + documentation
if threshold not in ["default", "optimize"] and not isinstance(threshold, float):
raise ValueError("Threshold must be either 'default', 'optimize' or a float value.")
if threshold == "default":
threshold = 0.5
if threshold == "optimize":
threshold, _ = find_optimal_threshold(labels, output, negative_label_id=self.negative_label_id)
results = {"optimal_threshold": threshold}
if self.negative_label_id < 0:
results["accuracy_score"] = accuracy_score(labels, output.argmax(-1))
else:
output_ = apply_threshold(output, threshold=threshold, negative_label_id=self.negative_label_id)
positive_labels = list(set(range(len(self.labels))) - set([self.negative_label_id]))
output_pos = output.copy()
output_pos[:, self.negative_label_id] = 0.0
results["positive_accuracy"] = accuracy_score(
labels[labels != self.negative_label_id], output_pos[labels != self.negative_label_id, :].argmax(-1)
)
pre, rec, f1, _ = precision_recall_fscore_support(labels, output_, labels=positive_labels, average="micro")
results["precision"] = pre
results["recall"] = rec
results["f1-score"] = f1
return results
| 47.142857 | 240 | 0.654628 |
acf309ed594b4e93e11b0b397032c01d706edc5d | 652 | py | Python | sort/bubble_sort/python/capt-doki_bubble_sort.py | avi-pal/al-go-rithms | 5167a20f1db7b366ff19f2962c1746a02e4f5067 | [
"CC0-1.0"
] | 1,253 | 2017-06-06T07:19:25.000Z | 2022-03-30T17:07:58.000Z | sort/bubble_sort/python/capt-doki_bubble_sort.py | rishabh99-rc/al-go-rithms | 4df20d7ef7598fda4bc89101f9a99aac94cdd794 | [
"CC0-1.0"
] | 554 | 2017-09-29T18:56:01.000Z | 2022-02-21T15:48:13.000Z | sort/bubble_sort/python/capt-doki_bubble_sort.py | rishabh99-rc/al-go-rithms | 4df20d7ef7598fda4bc89101f9a99aac94cdd794 | [
"CC0-1.0"
] | 2,226 | 2017-09-29T19:59:59.000Z | 2022-03-25T08:59:55.000Z | # Python program for implementation of Bubble Sort
# TIME COMPLEXITY -- O(N^2)
# SPACE COMPLEXITY -- O(1)
def bubbleSort(arr):
n = len(arr)
# Traverse through all array elements
for i in range(n):
# Last i elements are already in place
for j in range(0, n-i-1):
# traverse the array from 0 to n-i-1
# Swap if the element found is greater
# than the next element
if arr[j] > arr[j+1] :
arr[j], arr[j+1] = arr[j+1], arr[j]
arr = [64, 34, 25, 12, 22, 11, 90]
bubbleSort(arr)
print ("Sorted array is:")
for i in range(len(arr)):
print (arr[i]) | 22.482759 | 51 | 0.558282 |
acf30a587fd7fdeb9d26160b9b816b1a491b4aa8 | 1,542 | py | Python | test_mark_0001/__init__.py | markregine/test_mark_0001 | ee697deb9786bdd90c255ae06b603eb79ec0afe9 | [
"MIT"
] | null | null | null | test_mark_0001/__init__.py | markregine/test_mark_0001 | ee697deb9786bdd90c255ae06b603eb79ec0afe9 | [
"MIT"
] | null | null | null | test_mark_0001/__init__.py | markregine/test_mark_0001 | ee697deb9786bdd90c255ae06b603eb79ec0afe9 | [
"MIT"
] | null | null | null | """
_____________________________________________________________________________________________
| |
| test_mark_0001 package: makes healthcare data analysis easy |
| |
| This package is to provide easy to use tools to aid in healthcare data cleaning and |
| analyses. A full applied guide to the entire package may soon be available at: |
| https://github.com/markregine/test_mark_0001 |
| |
| Current contents include: |
| - Raw data cleaning tools |
| - |
| - |
| - |
|___________________________________________________________________________________________|
CONTENTS
module1
description goes here
module2
description goes here
See http://some_website for a full guide to all the package features
"""
from .base import (print_hello, get_df)
from .version import __version__
| 51.4 | 93 | 0.402724 |
acf30accd36dbc5f7e3668ba38a746dbf4fae9ef | 2,148 | py | Python | src/app/user/schemas.py | jamshidyerzakov/fastapi-blog | 26828556482b1b5824781b321558776b5cd882ea | [
"MIT"
] | null | null | null | src/app/user/schemas.py | jamshidyerzakov/fastapi-blog | 26828556482b1b5824781b321558776b5cd882ea | [
"MIT"
] | null | null | null | src/app/user/schemas.py | jamshidyerzakov/fastapi-blog | 26828556482b1b5824781b321558776b5cd882ea | [
"MIT"
] | null | null | null | from typing import Optional
from fastapi import Body, Form
from pydantic import BaseModel, EmailStr
from tortoise.contrib.pydantic import pydantic_model_creator
from .models import User
class UserBase(BaseModel):
first_name: Optional[str] = None
class UserBaseInDB(UserBase):
id: int = None
username: Optional[str] = None
email: Optional[str] = None
is_active: Optional[bool] = True
is_superuser: Optional[bool] = False
class Config:
orm_mode = True
class UserCreate(UserBaseInDB):
""" Свойства для получения через API при создании из админки
"""
username: str
email: EmailStr
password: str
first_name: str
avatar: str = None
class UserCreateInRegistration(BaseModel):
""" Свойства для получения через API при регистрации
"""
username: str
email: EmailStr
password: str
first_name: str
avatar: str = None
class Config:
orm_mode = True
class UserUpdate(UserBaseInDB):
""" Properties to receive via API on update
"""
password: Optional[str] = Form(...)
# class User(UserBaseInDB):
# """ Additional properties to return via API
# """
# pass
class UserInDB(UserBaseInDB):
""" Additional properties stored in DB
"""
password: str
class SocialAccount(BaseModel):
""" Schema social accounts
"""
account_id: int
account_url: str
account_login: str
account_name: str
provider: str
user: UserCreateInRegistration
class Config:
orm_mode = True
class SocialAccountGet(BaseModel):
""" Schema social accounts
"""
account_id: int
account_url: str
account_login: str
account_name: str
provider: str
avatar_url: str
class Config:
orm_mode = True
class UserPublic(UserBase):
""" For public profile user
"""
id: int
# social_account: SocialAccount
class Config:
orm_mode = True
User_C_Pydantic = pydantic_model_creator(
User, name='create_user', exclude_readonly=True, exclude=('is_active', 'is_staff', 'is_superuser'))
User_G_Pydantic = pydantic_model_creator(User, name='user') | 20.457143 | 103 | 0.674581 |
acf30b8a683b31fb4c7ed0cb63e1d4616527e808 | 166 | py | Python | braulio/__init__.py | mbarakaja/braulio | 6f7ae52af1623c3cf51fa090f0e65aa393dc6e70 | [
"MIT"
] | 1 | 2018-08-23T16:44:59.000Z | 2018-08-23T16:44:59.000Z | braulio/__init__.py | mbarakaja/braulio | 6f7ae52af1623c3cf51fa090f0e65aa393dc6e70 | [
"MIT"
] | 14 | 2018-07-19T22:22:24.000Z | 2018-08-20T16:09:40.000Z | braulio/__init__.py | mbarakaja/braulio | 6f7ae52af1623c3cf51fa090f0e65aa393dc6e70 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for braulio."""
__author__ = """José María Domínguez Moreno"""
__email__ = "miso.0b11@gmail.com"
__version__ = "0.3.0"
| 20.75 | 46 | 0.650602 |
acf30bf32acc715425b2e9c0bffa5c4258860c13 | 990 | py | Python | aifeynman/S_add_sym_on_pareto.py | pranjukn/AI-Feynman | 92e67b01fc2b00ed6ebcacc67edf6122b4219ac7 | [
"MIT"
] | 470 | 2019-11-14T16:04:38.000Z | 2022-03-31T13:22:18.000Z | aifeynman/S_add_sym_on_pareto.py | pranjukn/AI-Feynman | 92e67b01fc2b00ed6ebcacc67edf6122b4219ac7 | [
"MIT"
] | 55 | 2020-04-18T03:10:40.000Z | 2022-03-31T18:40:32.000Z | aifeynman/S_add_sym_on_pareto.py | pranjukn/AI-Feynman | 92e67b01fc2b00ed6ebcacc67edf6122b4219ac7 | [
"MIT"
] | 117 | 2019-11-21T09:26:00.000Z | 2022-03-20T05:42:02.000Z | # Combines 2 pareto fromtier obtained from the separability test into a new one.
from .get_pareto import Point, ParetoSet
from sympy.parsing.sympy_parser import parse_expr
import numpy as np
import matplotlib.pyplot as plt
import os
from os import path
from sympy import Symbol, lambdify, N
from .get_pareto import Point, ParetoSet
from .S_get_expr_complexity import get_expr_complexity
def add_sym_on_pareto(pathdir,filename,PA1,idx1,idx2,PA,sym_typ):
possible_vars = ["x%s" %i for i in np.arange(0,30,1)]
PA1 = np.array(PA1.get_pareto_points()).astype('str')
for i in range(len(PA1)):
exp1 = PA1[i][2]
for j in range(len(possible_vars)-2,idx2-1,-1):
exp1 = exp1.replace(possible_vars[j],possible_vars[j+1])
exp1 = exp1.replace(possible_vars[idx1],"(" + possible_vars[idx1] + sym_typ + possible_vars[idx2] + ")")
compl = get_expr_complexity(exp1)
PA.add(Point(x=compl,y=float(PA1[i][1]),data=str(exp1)))
return PA
| 34.137931 | 112 | 0.709091 |
acf30c1ed988f25857bc73144ee7c341e6661157 | 2,906 | py | Python | test/espnet2/enh/separator/test_dccrn_separator.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 2 | 2022-02-24T09:22:57.000Z | 2022-02-24T09:38:02.000Z | test/espnet2/enh/separator/test_dccrn_separator.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 2 | 2019-04-23T04:43:33.000Z | 2019-05-13T13:06:52.000Z | test/espnet2/enh/separator/test_dccrn_separator.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 1 | 2022-03-18T21:02:16.000Z | 2022-03-18T21:02:16.000Z | from distutils.version import LooseVersion
import pytest
import torch
from torch_complex import ComplexTensor
from espnet2.enh.separator.dccrn_separator import DCCRNSeparator
is_torch_1_9_plus = LooseVersion(torch.__version__) >= LooseVersion("1.9.0")
@pytest.mark.parametrize("input_dim", [9])
@pytest.mark.parametrize("num_spk", [1, 2])
@pytest.mark.parametrize("rnn_layer", [2, 3])
@pytest.mark.parametrize("rnn_units", [256])
@pytest.mark.parametrize("masking_mode", ["E", "C", "R"])
@pytest.mark.parametrize("use_clstm", [True, False])
@pytest.mark.parametrize("bidirectional", [True, False])
@pytest.mark.parametrize("use_cbn", [True, False])
@pytest.mark.parametrize("kernel_size", [5])
@pytest.mark.parametrize("use_builtin_complex", [True, False])
@pytest.mark.parametrize("use_noise_mask", [True, False])
def test_dccrn_separator_forward_backward_complex(
input_dim,
num_spk,
rnn_layer,
rnn_units,
masking_mode,
use_clstm,
bidirectional,
use_cbn,
kernel_size,
use_builtin_complex,
use_noise_mask,
):
model = DCCRNSeparator(
input_dim=input_dim,
num_spk=num_spk,
rnn_layer=rnn_layer,
rnn_units=rnn_units,
masking_mode=masking_mode,
use_clstm=use_clstm,
bidirectional=bidirectional,
use_cbn=use_cbn,
kernel_size=kernel_size,
kernel_num=[
32,
64,
128,
],
use_builtin_complex=use_builtin_complex,
use_noise_mask=use_noise_mask,
)
model.train()
real = torch.rand(2, 10, input_dim)
imag = torch.rand(2, 10, input_dim)
x = ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
masked, flens, others = model(x, ilens=x_lens)
if use_builtin_complex and is_torch_1_9_plus:
assert isinstance(masked[0], torch.Tensor)
else:
assert isinstance(masked[0], ComplexTensor)
assert len(masked) == num_spk
masked[0].abs().mean().backward()
def test_dccrn_separator_invalid_type():
with pytest.raises(ValueError):
DCCRNSeparator(
input_dim=10,
masking_mode="fff",
)
def test_rnn_separator_output():
real = torch.rand(2, 10, 9)
imag = torch.rand(2, 10, 9)
x = ComplexTensor(real, imag)
x_lens = torch.tensor([10, 8], dtype=torch.long)
for num_spk in range(1, 3):
model = DCCRNSeparator(
input_dim=9,
num_spk=num_spk,
kernel_num=[
32,
64,
128,
],
)
model.eval()
specs, _, others = model(x, x_lens)
assert isinstance(specs, list)
assert isinstance(others, dict)
for n in range(num_spk):
assert "mask_spk{}".format(n + 1) in others
assert specs[n].shape == others["mask_spk{}".format(n + 1)].shape
| 28.213592 | 77 | 0.635582 |
acf30cb103a5e52f091e0ca6d030dbeb3fc5974b | 10,471 | py | Python | homeassistant/components/directv/media_player.py | jgrob1/core | b1444ffefb47c99fddf2a25b96de2b4f0313faa0 | [
"Apache-2.0"
] | 1 | 2020-09-07T17:15:34.000Z | 2020-09-07T17:15:34.000Z | homeassistant/components/directv/media_player.py | jgrob1/core | b1444ffefb47c99fddf2a25b96de2b4f0313faa0 | [
"Apache-2.0"
] | 45 | 2020-07-23T07:13:34.000Z | 2022-03-31T06:01:55.000Z | homeassistant/components/directv/media_player.py | ajschmidt8/home-assistant | 75153dd4a3061f27674f4adbd9283e6c46534e66 | [
"Apache-2.0"
] | null | null | null | """Support for the DirecTV receivers."""
import logging
from typing import Callable, List
from directv import DIRECTV
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_CHANNEL,
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_TVSHOW,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_OFF, STATE_PAUSED, STATE_PLAYING
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import dt as dt_util
from . import DIRECTVEntity
from .const import (
ATTR_MEDIA_CURRENTLY_RECORDING,
ATTR_MEDIA_RATING,
ATTR_MEDIA_RECORDED,
ATTR_MEDIA_START_TIME,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
KNOWN_MEDIA_TYPES = [MEDIA_TYPE_MOVIE, MEDIA_TYPE_MUSIC, MEDIA_TYPE_TVSHOW]
SUPPORT_DTV = (
SUPPORT_PAUSE
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_PLAY
)
SUPPORT_DTV_CLIENT = (
SUPPORT_PAUSE
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_PLAY
)
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List, bool], None],
) -> bool:
"""Set up the DirecTV config entry."""
dtv = hass.data[DOMAIN][entry.entry_id]
entities = []
for location in dtv.device.locations:
entities.append(
DIRECTVMediaPlayer(
dtv=dtv,
name=str.title(location.name),
address=location.address,
)
)
async_add_entities(entities, True)
class DIRECTVMediaPlayer(DIRECTVEntity, MediaPlayerEntity):
"""Representation of a DirecTV receiver on the network."""
def __init__(self, *, dtv: DIRECTV, name: str, address: str = "0") -> None:
"""Initialize DirecTV media player."""
super().__init__(
dtv=dtv,
name=name,
address=address,
)
self._assumed_state = None
self._available = False
self._is_recorded = None
self._is_standby = True
self._last_position = None
self._last_update = None
self._paused = None
self._program = None
self._state = None
async def async_update(self):
"""Retrieve latest state."""
self._state = await self.dtv.state(self._address)
self._available = self._state.available
self._is_standby = self._state.standby
self._program = self._state.program
if self._is_standby:
self._assumed_state = False
self._is_recorded = None
self._last_position = None
self._last_update = None
self._paused = None
elif self._program is not None:
self._paused = self._last_position == self._program.position
self._is_recorded = self._program.recorded
self._last_position = self._program.position
self._last_update = self._state.at
self._assumed_state = self._is_recorded
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
attributes = {}
if not self._is_standby:
attributes[ATTR_MEDIA_CURRENTLY_RECORDING] = self.media_currently_recording
attributes[ATTR_MEDIA_RATING] = self.media_rating
attributes[ATTR_MEDIA_RECORDED] = self.media_recorded
attributes[ATTR_MEDIA_START_TIME] = self.media_start_time
return attributes
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return a unique ID to use for this media player."""
if self._address == "0":
return self.dtv.device.info.receiver_id
return self._address
# MediaPlayerEntity properties and methods
@property
def state(self):
"""Return the state of the device."""
if self._is_standby:
return STATE_OFF
# For recorded media we can determine if it is paused or not.
# For live media we're unable to determine and will always return
# playing instead.
if self._paused:
return STATE_PAUSED
return STATE_PLAYING
@property
def available(self):
"""Return if able to retrieve information from DVR or not."""
return self._available
@property
def assumed_state(self):
"""Return if we assume the state or not."""
return self._assumed_state
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
if self._is_standby or self._program is None:
return None
return self._program.program_id
@property
def media_content_type(self):
"""Return the content type of current playing media."""
if self._is_standby or self._program is None:
return None
if self._program.program_type in KNOWN_MEDIA_TYPES:
return self._program.program_type
return MEDIA_TYPE_MOVIE
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
if self._is_standby or self._program is None:
return None
return self._program.duration
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self._is_standby:
return None
return self._last_position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
if self._is_standby:
return None
return self._last_update
@property
def media_title(self):
"""Return the title of current playing media."""
if self._is_standby or self._program is None:
return None
if self.media_content_type == MEDIA_TYPE_MUSIC:
return self._program.music_title
return self._program.title
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
if self._is_standby or self._program is None:
return None
return self._program.music_artist
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
if self._is_standby or self._program is None:
return None
return self._program.music_album
@property
def media_series_title(self):
"""Return the title of current episode of TV show."""
if self._is_standby or self._program is None:
return None
return self._program.episode_title
@property
def media_channel(self):
"""Return the channel current playing media."""
if self._is_standby or self._program is None:
return None
return f"{self._program.channel_name} ({self._program.channel})"
@property
def source(self):
"""Name of the current input source."""
if self._is_standby or self._program is None:
return None
return self._program.channel
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_DTV_CLIENT if self._is_client else SUPPORT_DTV
@property
def media_currently_recording(self):
"""If the media is currently being recorded or not."""
if self._is_standby or self._program is None:
return None
return self._program.recording
@property
def media_rating(self):
"""TV Rating of the current playing media."""
if self._is_standby or self._program is None:
return None
return self._program.rating
@property
def media_recorded(self):
"""If the media was recorded or live."""
if self._is_standby:
return None
return self._is_recorded
@property
def media_start_time(self):
"""Start time the program aired."""
if self._is_standby or self._program is None:
return None
return dt_util.as_local(self._program.start_time)
async def async_turn_on(self):
"""Turn on the receiver."""
if self._is_client:
raise NotImplementedError()
_LOGGER.debug("Turn on %s", self._name)
await self.dtv.remote("poweron", self._address)
async def async_turn_off(self):
"""Turn off the receiver."""
if self._is_client:
raise NotImplementedError()
_LOGGER.debug("Turn off %s", self._name)
await self.dtv.remote("poweroff", self._address)
async def async_media_play(self):
"""Send play command."""
_LOGGER.debug("Play on %s", self._name)
await self.dtv.remote("play", self._address)
async def async_media_pause(self):
"""Send pause command."""
_LOGGER.debug("Pause on %s", self._name)
await self.dtv.remote("pause", self._address)
async def async_media_stop(self):
"""Send stop command."""
_LOGGER.debug("Stop on %s", self._name)
await self.dtv.remote("stop", self._address)
async def async_media_previous_track(self):
"""Send rewind command."""
_LOGGER.debug("Rewind on %s", self._name)
await self.dtv.remote("rew", self._address)
async def async_media_next_track(self):
"""Send fast forward command."""
_LOGGER.debug("Fast forward on %s", self._name)
await self.dtv.remote("ffwd", self._address)
async def async_play_media(self, media_type, media_id, **kwargs):
"""Select input source."""
if media_type != MEDIA_TYPE_CHANNEL:
_LOGGER.error(
"Invalid media type %s. Only %s is supported",
media_type,
MEDIA_TYPE_CHANNEL,
)
return
_LOGGER.debug("Changing channel on %s to %s", self._name, media_id)
await self.dtv.tune(media_id, self._address)
| 29.330532 | 87 | 0.641008 |
acf30d88f3e61dd0a041f60e61b25648f74af64c | 3,312 | py | Python | alipay/aop/api/domain/CplifeResidentInfo.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/CplifeResidentInfo.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/CplifeResidentInfo.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class CplifeResidentInfo(object):
def __init__(self):
self._entity_id = None
self._idcard_no = None
self._name = None
self._out_entity_id = None
self._out_resident_id = None
self._type = None
@property
def entity_id(self):
return self._entity_id
@entity_id.setter
def entity_id(self, value):
self._entity_id = value
@property
def idcard_no(self):
return self._idcard_no
@idcard_no.setter
def idcard_no(self, value):
self._idcard_no = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def out_entity_id(self):
return self._out_entity_id
@out_entity_id.setter
def out_entity_id(self, value):
self._out_entity_id = value
@property
def out_resident_id(self):
return self._out_resident_id
@out_resident_id.setter
def out_resident_id(self, value):
self._out_resident_id = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.entity_id:
if hasattr(self.entity_id, 'to_alipay_dict'):
params['entity_id'] = self.entity_id.to_alipay_dict()
else:
params['entity_id'] = self.entity_id
if self.idcard_no:
if hasattr(self.idcard_no, 'to_alipay_dict'):
params['idcard_no'] = self.idcard_no.to_alipay_dict()
else:
params['idcard_no'] = self.idcard_no
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.out_entity_id:
if hasattr(self.out_entity_id, 'to_alipay_dict'):
params['out_entity_id'] = self.out_entity_id.to_alipay_dict()
else:
params['out_entity_id'] = self.out_entity_id
if self.out_resident_id:
if hasattr(self.out_resident_id, 'to_alipay_dict'):
params['out_resident_id'] = self.out_resident_id.to_alipay_dict()
else:
params['out_resident_id'] = self.out_resident_id
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CplifeResidentInfo()
if 'entity_id' in d:
o.entity_id = d['entity_id']
if 'idcard_no' in d:
o.idcard_no = d['idcard_no']
if 'name' in d:
o.name = d['name']
if 'out_entity_id' in d:
o.out_entity_id = d['out_entity_id']
if 'out_resident_id' in d:
o.out_resident_id = d['out_resident_id']
if 'type' in d:
o.type = d['type']
return o
| 28.551724 | 81 | 0.576087 |
acf30daecc51f03892f4685b6553f2fccc0ec02a | 28,444 | py | Python | python/ccxt/cryptopia.py | ccmtx/ccxt | 6547eada2be724e6645a7afd22732822e7978936 | [
"MIT"
] | null | null | null | python/ccxt/cryptopia.py | ccmtx/ccxt | 6547eada2be724e6645a7afd22732822e7978936 | [
"MIT"
] | null | null | null | python/ccxt/cryptopia.py | ccmtx/ccxt | 6547eada2be724e6645a7afd22732822e7978936 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import base64
import hashlib
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import OrderNotCached
from ccxt.base.errors import InvalidNonce
class cryptopia (Exchange):
def describe(self):
return self.deep_extend(super(cryptopia, self).describe(), {
'id': 'cryptopia',
'name': 'Cryptopia',
'rateLimit': 1500,
'countries': 'NZ', # New Zealand
'has': {
'CORS': False,
'createMarketOrder': False,
'fetchClosedOrders': 'emulated',
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOrder': 'emulated',
'fetchOrderBooks': True,
'fetchOrders': 'emulated',
'fetchOpenOrders': True,
'fetchTickers': True,
'deposit': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/29484394-7b4ea6e2-84c6-11e7-83e5-1fccf4b2dc81.jpg',
'api': {
'public': 'https://www.cryptopia.co.nz/api',
'private': 'https://www.cryptopia.co.nz/api',
'web': 'https://www.cryptopia.co.nz',
},
'www': 'https://www.cryptopia.co.nz',
'referral': 'https://www.cryptopia.co.nz/Register?referrer=kroitor',
'doc': [
'https://www.cryptopia.co.nz/Forum/Category/45',
'https://www.cryptopia.co.nz/Forum/Thread/255',
'https://www.cryptopia.co.nz/Forum/Thread/256',
],
},
'timeframes': {
'15m': 15,
'30m': 30,
'1h': 60,
'2h': 120,
'4h': 240,
'12h': 720,
'1d': 1440,
'1w': 10080,
},
'api': {
'web': {
'get': [
'Exchange/GetTradePairChart',
],
},
'public': {
'get': [
'GetCurrencies',
'GetTradePairs',
'GetMarkets',
'GetMarkets/{id}',
'GetMarkets/{hours}',
'GetMarkets/{id}/{hours}',
'GetMarket/{id}',
'GetMarket/{id}/{hours}',
'GetMarketHistory/{id}',
'GetMarketHistory/{id}/{hours}',
'GetMarketOrders/{id}',
'GetMarketOrders/{id}/{count}',
'GetMarketOrderGroups/{ids}',
'GetMarketOrderGroups/{ids}/{count}',
],
},
'private': {
'post': [
'CancelTrade',
'GetBalance',
'GetDepositAddress',
'GetOpenOrders',
'GetTradeHistory',
'GetTransactions',
'SubmitTip',
'SubmitTrade',
'SubmitTransfer',
'SubmitWithdraw',
],
},
},
'commonCurrencies': {
'ACC': 'AdCoin',
'BAT': 'BatCoin',
'BLZ': 'BlazeCoin',
'BTG': 'Bitgem',
'CC': 'CCX',
'CMT': 'Comet',
'EPC': 'ExperienceCoin',
'FCN': 'Facilecoin',
'FUEL': 'FC2', # FuelCoin != FUEL
'HAV': 'Havecoin',
'LBTC': 'LiteBitcoin',
'LDC': 'LADACoin',
'MARKS': 'Bitmark',
'NET': 'NetCoin',
'QBT': 'Cubits',
'WRC': 'WarCoin',
},
'options': {
'fetchTickersErrors': True,
},
})
def fetch_markets(self):
response = self.publicGetGetTradePairs()
result = []
markets = response['Data']
for i in range(0, len(markets)):
market = markets[i]
id = market['Id']
symbol = market['Label']
baseId = market['Symbol']
quoteId = market['BaseSymbol']
base = self.common_currency_code(baseId)
quote = self.common_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': 8,
'price': 8,
}
lot = market['MinimumTrade']
priceLimits = {
'min': market['MinimumPrice'],
'max': market['MaximumPrice'],
}
amountLimits = {
'min': lot,
'max': market['MaximumTrade'],
}
limits = {
'amount': amountLimits,
'price': priceLimits,
'cost': {
'min': market['MinimumBaseTrade'],
'max': None,
},
}
active = market['Status'] == 'OK'
result.append({
'id': id,
'symbol': symbol,
'label': market['Label'],
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'maker': market['TradeFee'] / 100,
'taker': market['TradeFee'] / 100,
'lot': limits['amount']['min'],
'active': active,
'precision': precision,
'limits': limits,
})
self.options['marketsByLabel'] = self.index_by(result, 'label')
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
response = self.publicGetGetMarketOrdersId(self.extend({
'id': self.market_id(symbol),
}, params))
orderbook = response['Data']
return self.parse_order_book(orderbook, None, 'Buy', 'Sell', 'Price', 'Volume')
def fetch_ohlcv(self, symbol, timeframe='15m', since=None, limit=None, params={}):
dataRange = 0
if since is not None:
dataRanges = [
86400,
172800,
604800,
1209600,
2592000,
7776000,
15552000,
]
numDataRanges = len(dataRanges)
now = self.seconds()
sinceSeconds = int(since / 1000)
for i in range(1, numDataRanges):
if (now - sinceSeconds) > dataRanges[i]:
dataRange = i
self.load_markets()
market = self.market(symbol)
request = {
'tradePairId': market['id'],
'dataRange': dataRange,
'dataGroup': self.timeframes[timeframe],
}
response = self.webGetExchangeGetTradePairChart(self.extend(request, params))
candles = response['Candle']
volumes = response['Volume']
for i in range(0, len(candles)):
candles[i].append(volumes[i]['basev'])
return self.parse_ohlcvs(candles, market, timeframe, since, limit)
def join_market_ids(self, ids, glue='-'):
result = str(ids[0])
for i in range(1, len(ids)):
result += glue + str(ids[i])
return result
def fetch_order_books(self, symbols=None, params={}):
self.load_markets()
if symbols is None:
raise ExchangeError(self.id + ' fetchOrderBooks requires the symbols argument as of May 2018(up to 5 symbols at max)')
numSymbols = len(symbols)
if numSymbols > 5:
raise ExchangeError(self.id + ' fetchOrderBooks accepts 5 symbols at max')
ids = self.join_market_ids(self.market_ids(symbols))
response = self.publicGetGetMarketOrderGroupsIds(self.extend({
'ids': ids,
}, params))
orderbooks = response['Data']
result = {}
for i in range(0, len(orderbooks)):
orderbook = orderbooks[i]
id = self.safe_integer(orderbook, 'TradePairId')
symbol = id
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
result[symbol] = self.parse_order_book(orderbook, None, 'Buy', 'Sell', 'Price', 'Volume')
return result
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
open = self.safe_float(ticker, 'Open')
last = self.safe_float(ticker, 'LastPrice')
change = last - open
baseVolume = self.safe_float(ticker, 'Volume')
quoteVolume = self.safe_float(ticker, 'BaseVolume')
vwap = None
if quoteVolume is not None:
if baseVolume is not None:
if baseVolume > 0:
vwap = quoteVolume / baseVolume
return {
'symbol': symbol,
'info': ticker,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'High'),
'low': self.safe_float(ticker, 'Low'),
'bid': self.safe_float(ticker, 'BidPrice'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'AskPrice'),
'askVolume': None,
'vwap': vwap,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': self.safe_float(ticker, 'Change'),
'average': self.sum(last, open) / 2,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetGetMarketId(self.extend({
'id': market['id'],
}, params))
ticker = response['Data']
return self.parse_ticker(ticker, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetGetMarkets(params)
result = {}
tickers = response['Data']
for i in range(0, len(tickers)):
ticker = tickers[i]
id = ticker['TradePairId']
recognized = (id in list(self.markets_by_id.keys()))
if not recognized:
if self.options['fetchTickersErrors']:
raise ExchangeError(self.id + ' fetchTickers() returned unrecognized pair id ' + str(id))
else:
market = self.markets_by_id[id]
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker, market)
return self.filter_by_array(result, 'symbol', symbols)
def parse_trade(self, trade, market=None):
timestamp = None
if 'Timestamp' in trade:
timestamp = trade['Timestamp'] * 1000
elif 'TimeStamp' in trade:
timestamp = self.parse8601(trade['TimeStamp'])
price = self.safe_float(trade, 'Price')
if not price:
price = self.safe_float(trade, 'Rate')
cost = self.safe_float(trade, 'Total')
id = self.safe_string(trade, 'TradeId')
if not market:
if 'TradePairId' in trade:
if trade['TradePairId'] in self.markets_by_id:
market = self.markets_by_id[trade['TradePairId']]
symbol = None
fee = None
if market:
symbol = market['symbol']
if 'Fee' in trade:
fee = {
'currency': market['quote'],
'cost': trade['Fee'],
}
return {
'id': id,
'info': trade,
'order': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': 'limit',
'side': trade['Type'].lower(),
'price': price,
'cost': cost,
'amount': trade['Amount'],
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
hours = 24 # the default
if since is not None:
elapsed = self.milliseconds() - since
hour = 1000 * 60 * 60
hours = int(int(math.ceil(elapsed / hour)))
request = {
'id': market['id'],
'hours': hours,
}
response = self.publicGetGetMarketHistoryIdHours(self.extend(request, params))
trades = response['Data']
return self.parse_trades(trades, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market = None
if symbol:
market = self.market(symbol)
request['TradePairId'] = market['id']
if limit is not None:
request['Count'] = limit # default 100
response = self.privatePostGetTradeHistory(self.extend(request, params))
return self.parse_trades(response['Data'], market, since, limit)
def fetch_currencies(self, params={}):
response = self.publicGetGetCurrencies(params)
currencies = response['Data']
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = currency['Symbol']
# todo: will need to rethink the fees
# to add support for multiple withdrawal/deposit methods and
# differentiated fees for each particular method
precision = 8 # default precision, todo: fix "magic constants"
code = self.common_currency_code(id)
active = (currency['ListingStatus'] == 'Active')
status = currency['Status'].lower()
if status != 'ok':
active = False
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': currency['Name'],
'active': active,
'status': status,
'fee': currency['WithdrawFee'],
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': currency['MinBaseTrade'],
'max': None,
},
'withdraw': {
'min': currency['MinWithdraw'],
'max': currency['MaxWithdraw'],
},
},
}
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostGetBalance(params)
balances = response['Data']
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
code = balance['Symbol']
currency = self.common_currency_code(code)
account = {
'free': balance['Available'],
'used': 0.0,
'total': balance['Total'],
}
account['used'] = account['total'] - account['free']
result[currency] = account
return self.parse_balance(result)
def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
self.load_markets()
market = self.market(symbol)
# price = float(price)
# amount = float(amount)
request = {
'TradePairId': market['id'],
'Type': self.capitalize(side),
# 'Rate': self.price_to_precision(symbol, price),
# 'Amount': self.amount_to_precision(symbol, amount),
'Rate': price,
'Amount': amount,
}
response = self.privatePostSubmitTrade(self.extend(request, params))
if not response:
raise ExchangeError(self.id + ' createOrder returned unknown error: ' + self.json(response))
id = None
filled = 0.0
status = 'open'
if 'Data' in response:
if 'OrderId' in response['Data']:
if response['Data']['OrderId']:
id = str(response['Data']['OrderId'])
else:
filled = amount
status = 'closed'
timestamp = self.milliseconds()
order = {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': price * amount,
'amount': amount,
'remaining': amount - filled,
'filled': filled,
'fee': None,
# 'trades': self.parse_trades(order['trades'], market),
}
if id:
self.orders[id] = order
return self.extend({'info': response}, order)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
response = None
try:
response = self.privatePostCancelTrade(self.extend({
'Type': 'Trade',
'OrderId': id,
}, params))
# We do not know if it is indeed canceled, but cryptopia lacks any
# reasonable method to get information on executed or canceled order.
if id in self.orders:
self.orders[id]['status'] = 'canceled'
except Exception as e:
if self.last_json_response:
message = self.safe_string(self.last_json_response, 'Error')
if message:
if message.find('does not exist') >= 0:
raise OrderNotFound(self.id + ' cancelOrder() error: ' + self.last_http_response)
raise e
return self.parse_order(response)
def parse_order(self, order, market=None):
symbol = None
if market:
symbol = market['symbol']
elif 'Market' in order:
id = order['Market']
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
else:
if id in self.options['marketsByLabel']:
market = self.options['marketsByLabel'][id]
symbol = market['symbol']
timestamp = self.safe_integer(order, 'TimeStamp')
datetime = None
if timestamp:
datetime = self.iso8601(timestamp)
amount = self.safe_float(order, 'Amount')
remaining = self.safe_float(order, 'Remaining')
filled = None
if amount is not None and remaining is not None:
filled = amount - remaining
id = self.safe_value(order, 'OrderId')
if id is not None:
id = str(id)
side = self.safe_string(order, 'Type')
if side is not None:
side = side.lower()
return {
'id': id,
'info': self.omit(order, 'status'),
'timestamp': timestamp,
'datetime': datetime,
'lastTradeTimestamp': None,
'status': self.safe_string(order, 'status'),
'symbol': symbol,
'type': 'limit',
'side': side,
'price': self.safe_float(order, 'Rate'),
'cost': self.safe_float(order, 'Total'),
'amount': amount,
'filled': filled,
'remaining': remaining,
'fee': None,
# 'trades': self.parse_trades(order['trades'], market),
}
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {
# 'Market': market['id'],
# 'TradePairId': market['id'], # Cryptopia identifier(not required if 'Market' supplied)
# 'Count': 100, # default = 100
}
if symbol is not None:
market = self.market(symbol)
request['TradePairId'] = market['id']
response = self.privatePostGetOpenOrders(self.extend(request, params))
orders = []
for i in range(0, len(response['Data'])):
orders.append(self.extend(response['Data'][i], {'status': 'open'}))
openOrders = self.parse_orders(orders, market)
for j in range(0, len(openOrders)):
self.orders[openOrders[j]['id']] = openOrders[j]
openOrdersIndexedById = self.index_by(openOrders, 'id')
cachedOrderIds = list(self.orders.keys())
result = []
for k in range(0, len(cachedOrderIds)):
id = cachedOrderIds[k]
if id in openOrdersIndexedById:
self.orders[id] = self.extend(self.orders[id], openOrdersIndexedById[id])
else:
order = self.orders[id]
if order['status'] == 'open':
if (symbol is None) or (order['symbol'] == symbol):
self.orders[id] = self.extend(order, {
'status': 'closed',
'cost': order['amount'] * order['price'],
'filled': order['amount'],
'remaining': 0.0,
})
order = self.orders[id]
if (symbol is None) or (order['symbol'] == symbol):
result.append(order)
return self.filter_by_since_limit(result, since, limit)
def fetch_order(self, id, symbol=None, params={}):
id = str(id)
orders = self.fetch_orders(symbol, None, None, params)
for i in range(0, len(orders)):
if orders[i]['id'] == id:
return orders[i]
raise OrderNotCached(self.id + ' order ' + id + ' not found in cached .orders, fetchOrder requires .orders(de)serialization implemented for self method to work properly')
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
orders = self.fetch_orders(symbol, since, limit, params)
result = []
for i in range(0, len(orders)):
if orders[i]['status'] == 'open':
result.append(orders[i])
return result
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
orders = self.fetch_orders(symbol, since, limit, params)
result = []
for i in range(0, len(orders)):
if orders[i]['status'] == 'closed':
result.append(orders[i])
return result
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
response = self.privatePostGetDepositAddress(self.extend({
'Currency': currency['id'],
}, params))
address = self.safe_string(response['Data'], 'BaseAddress')
if not address:
address = self.safe_string(response['Data'], 'Address')
self.check_address(address)
return {
'currency': code,
'address': address,
'status': 'ok',
'info': response,
}
def withdraw(self, code, amount, address, tag=None, params={}):
self.load_markets()
currency = self.currency(code)
self.check_address(address)
request = {
'Currency': currency['id'],
'Amount': amount,
'Address': address, # Address must exist in you AddressBook in security settings
}
if tag:
request['PaymentId'] = tag
response = self.privatePostSubmitWithdraw(self.extend(request, params))
return {
'info': response,
'id': response['Data'],
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
body = self.json(query, {'convertArraysToObjects': True})
hash = self.hash(self.encode(body), 'md5', 'base64')
secret = base64.b64decode(self.secret)
uri = self.encode_uri_component(url)
lowercase = uri.lower()
hash = self.binary_to_string(hash)
payload = self.apiKey + method + lowercase + nonce + hash
signature = self.hmac(self.encode(payload), secret, hashlib.sha256, 'base64')
auth = 'amx ' + self.apiKey + ':' + self.binary_to_string(signature) + ':' + nonce
headers = {
'Content-Type': 'application/json',
'Authorization': auth,
}
else:
if query:
url += '?' + self.urlencode(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def nonce(self):
return self.milliseconds()
def handle_errors(self, code, reason, url, method, headers, body):
if not isinstance(body, basestring):
return # fallback to default error handler
if len(body) < 2:
return # fallback to default error handler
fixedJSONString = self.sanitize_broken_json_string(body)
if fixedJSONString[0] == '{':
response = json.loads(fixedJSONString)
if 'Success' in response:
success = self.safe_string(response, 'Success')
if success == 'false':
error = self.safe_string(response, 'Error')
feedback = self.id
if isinstance(error, basestring):
feedback = feedback + ' ' + error
if error.find('does not exist') >= 0:
raise OrderNotFound(feedback)
if error.find('Insufficient Funds') >= 0:
raise InsufficientFunds(feedback)
if error.find('Nonce has already been used') >= 0:
raise InvalidNonce(feedback)
else:
feedback = feedback + ' ' + fixedJSONString
raise ExchangeError(feedback)
def sanitize_broken_json_string(self, jsonString):
# sometimes cryptopia will return a unicode symbol before actual JSON string.
indexOfBracket = jsonString.find('{')
if indexOfBracket >= 0:
return jsonString[indexOfBracket:]
return jsonString
def parse_json(self, response, responseBody, url, method):
return super(cryptopia, self).parseJson(response, self.sanitize_broken_json_string(responseBody), url, method)
| 38.857923 | 178 | 0.497012 |
acf30db2a7450e62645642d79c36fcbc879d5a63 | 22,853 | py | Python | doc/paddle/api/gen_doc.py | cryoco/FluidDoc | cb5414b819e33f75b57d4cc3524cd176c02b5ecc | [
"Apache-2.0"
] | null | null | null | doc/paddle/api/gen_doc.py | cryoco/FluidDoc | cb5414b819e33f75b57d4cc3524cd176c02b5ecc | [
"Apache-2.0"
] | null | null | null | doc/paddle/api/gen_doc.py | cryoco/FluidDoc | cb5414b819e33f75b57d4cc3524cd176c02b5ecc | [
"Apache-2.0"
] | null | null | null | import paddle
import os
import shutil
import time
import pkgutil
import types
import contextlib
import argparse
import json
import sys
import inspect
import ast
import logging
import importlib
"""
generate api_info_dict.json to describe all info about the apis.
"""
en_suffix = "_en.rst"
cn_suffix = "_cn.rst"
# key = id(api), value = dict of api_info{
# "id":id,
# "all_names":[], # all full_names
# "full_name":"", # the real name, and the others are the alias name
# "short_name":"", # without module name
# "alias_name":"", # without module name
# "module_name":"", # the module of the real api belongs to
# "display":True/Flase, # consider the not_display_doc_list and the display_doc_list
# "has_overwrited_doc":True/False #
# "doc_filename" # document filename without suffix
# }
api_info_dict = {}
parsed_mods = {}
fmt = logging.Formatter(
"%(asctime)s - %(funcName)s:%(lineno)d - %(levelname)s - %(message)s")
logger = logging.getLogger()
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
# console.setLevel(level)
# logger.setLevel(logging.DEBUG)
# step 1: walkthrough the paddle package to collect all the apis in api_set
def get_all_api(root_path='paddle', attr="__all__"):
"""
walk through the paddle package to collect all the apis.
"""
global api_info_dict
api_counter = 0
for filefinder, name, ispkg in pkgutil.walk_packages(
path=paddle.__path__, prefix=paddle.__name__ + '.'):
try:
if name in sys.modules:
m = sys.modules[name]
else:
# importlib.import_module(name)
m = eval(name)
continue
except AttributeError:
logger.warning("AttributeError occurred when `eval(%s)`", name)
pass
else:
api_counter += process_module(m, attr)
api_counter += process_module(paddle, attr)
logger.info('collected %d apis, %d distinct apis.', api_counter,
len(api_info_dict))
# step 1 fill field : `id` & `all_names`, type
def process_module(m, attr="__all__"):
api_counter = 0
if hasattr(m, attr):
# may have duplication of api
for api in set(getattr(m, attr)):
if api[0] == '_': continue
# Exception occurred when `id(eval(paddle.dataset.conll05.test, get_dict))`
if ',' in api: continue
# api's fullname
full_name = m.__name__ + "." + api
try:
obj = eval(full_name)
fc_id = id(obj)
except AttributeError:
logger.warning("AttributeError occurred when `id(eval(%s))`",
full_name)
pass
except:
logger.warning("Exception occurred when `id(eval(%s))`",
full_name)
else:
api_counter += 1
logger.debug("adding %s to api_info_dict.", full_name)
if fc_id in api_info_dict:
api_info_dict[fc_id]["all_names"].add(full_name)
else:
api_info_dict[fc_id] = {
"all_names": set([full_name]),
"id": fc_id,
"object": obj,
"type": type(obj).__name__,
}
return api_counter
# step 3 fill field : args, src_file, lineno, end_lineno, short_name, full_name, module_name, doc_filename
def set_source_code_attrs():
"""
should has 'full_name' first.
"""
src_file_start_ind = len(paddle.__path__[0]) - len('paddle/')
# ast module has end_lineno attr after py 3.8
for id_api in api_info_dict:
item = api_info_dict[id_api]
obj = item["object"]
obj_type_name = item["type"]
logger.debug("processing %s:%s:%s", obj_type_name, item["id"],
str(obj))
if obj_type_name == "module":
if hasattr(obj, '__file__') and obj.__file__ is not None and len(
obj.__file__) > src_file_start_ind:
api_info_dict[id_api]["src_file"] = obj.__file__[
src_file_start_ind:]
parse_module_file(obj)
api_info_dict[id_api]["full_name"] = obj.__name__
api_info_dict[id_api]["package"] = obj.__package__
api_info_dict[id_api]["short_name"] = split_name(obj.__name__)[1]
elif hasattr(obj, '__module__') and obj.__module__ in sys.modules:
mod_name = obj.__module__
mod = sys.modules[mod_name]
parse_module_file(mod)
else:
if hasattr(obj, '__name__'):
mod_name, short_name = split_name(obj.__name__)
if mod_name in sys.modules:
mod = sys.modules[mod_name]
parse_module_file(mod)
else:
logger.debug("{}, {}, {}".format(item["id"], item["type"],
item["all_names"]))
else:
found = False
for name in item["all_names"]:
mod_name, short_name = split_name(name)
if mod_name in sys.modules:
mod = sys.modules[mod_name]
parse_module_file(mod)
found = True
if not found:
logger.debug("{}, {}, {}".format(item["id"], item["type"],
item["all_names"]))
def split_name(name):
try:
r = name.rindex('.')
return [name[:r], name[r + 1:]]
except:
return ['', name]
def parse_module_file(mod):
skip_this_mod = False
if mod in parsed_mods:
skip_this_mod = True
if skip_this_mod:
return
else:
parsed_mods[mod] = True
src_file_start_ind = len(paddle.__path__[0]) - len('paddle/')
has_end_lineno = sys.version_info > (3, 8)
if hasattr(mod, '__name__') and hasattr(mod, '__file__'):
src_file = mod.__file__
mod_name = mod.__name__
logger.debug("parsing %s:%s", mod_name, src_file)
if len(mod_name) >= 6 and mod_name[:6] == 'paddle':
if os.path.splitext(src_file)[1].lower() == '.py':
mod_ast = ast.parse(open(src_file, "r").read())
for node in mod_ast.body:
short_names = []
if ((isinstance(node, ast.ClassDef) or
isinstance(node, ast.FunctionDef)) and
hasattr(node, 'name') and
hasattr(sys.modules[mod_name],
node.name) and node.name[0] != '_'):
short_names.append(node.name)
elif isinstance(node, ast.Assign):
for target in node.targets:
if hasattr(target, 'id') and target.id[0] != '_':
short_names.append(target.id)
else:
pass
for short_name in short_names:
obj_full_name = mod_name + '.' + short_name
logger.debug("processing %s", obj_full_name)
try:
obj_this = eval(obj_full_name)
obj_id = id(obj_this)
except:
logger.warning("%s maybe %s.%s", obj_full_name,
mod.__package__, short_name)
obj_full_name = mod.__package__ + '.' + short_name
try:
obj_this = eval(obj_full_name)
obj_id = id(obj_this)
except:
continue
if obj_id in api_info_dict and "lineno" not in api_info_dict[
obj_id]:
api_info_dict[obj_id]["src_file"] = src_file[
src_file_start_ind:]
api_info_dict[obj_id][
"doc_filename"] = obj_full_name.replace('.',
'/')
api_info_dict[obj_id]["full_name"] = obj_full_name
api_info_dict[obj_id]["short_name"] = short_name
api_info_dict[obj_id]["module_name"] = mod_name
api_info_dict[obj_id]["lineno"] = node.lineno
if has_end_lineno:
api_info_dict[obj_id][
"end_lineno"] = node.end_lineno
if isinstance(node, ast.FunctionDef):
api_info_dict[obj_id][
"args"] = gen_functions_args_str(node)
elif isinstance(node, ast.ClassDef):
for n in node.body:
if hasattr(
n,
'name') and n.name == '__init__':
api_info_dict[obj_id][
"args"] = gen_functions_args_str(n)
break
else:
logger.debug("%s omitted", obj_full_name)
else: # pybind11 ...
for short_name in mod.__dict__:
if short_name[0] != '_':
obj_full_name = mod_name + '.' + short_name
logger.debug("processing %s", obj_full_name)
try:
obj_this = eval(obj_full_name)
obj_id = id(obj_this)
except:
logger.warning("%s eval error", obj_full_name)
continue
if obj_id in api_info_dict and "lineno" not in api_info_dict[
obj_id]:
api_info_dict[obj_id]["src_file"] = src_file[
src_file_start_ind:]
api_info_dict[obj_id]["full_name"] = obj_full_name
api_info_dict[obj_id]["short_name"] = short_name
api_info_dict[obj_id]["module_name"] = mod_name
else:
logger.debug("%s omitted", obj_full_name)
def gen_functions_args_str(node):
str_args_list = []
if isinstance(node, ast.FunctionDef):
# 'args', 'defaults', 'kw_defaults', 'kwarg', 'kwonlyargs', 'posonlyargs', 'vararg'
for arg in node.args.args:
if not arg.arg == 'self':
str_args_list.append(arg.arg)
defarg_ind_start = len(str_args_list) - len(node.args.defaults)
for defarg_ind in range(len(node.args.defaults)):
if isinstance(node.args.defaults[defarg_ind], ast.Name):
str_args_list[defarg_ind_start + defarg_ind] += '=' + str(
node.args.defaults[defarg_ind].id)
elif isinstance(node.args.defaults[defarg_ind], ast.Constant):
str_args_list[defarg_ind_start + defarg_ind] += '=' + str(
node.args.defaults[defarg_ind].value)
if node.args.vararg is not None:
str_args_list.append('*' + node.args.vararg.arg)
if len(node.args.kwonlyargs) > 0:
if node.args.vararg is None:
str_args_list.append('*')
for kwoarg, d in zip(node.args.kwonlyargs, node.args.kw_defaults):
if isinstance(d, ast.Constant):
str_args_list.append("{}={}".format(kwoarg.arg, d.value))
elif isinstance(d, ast.Name):
str_args_list.append("{}={}".format(kwoarg.arg, d.id))
if node.args.kwarg is not None:
str_args_list.append('**' + node.args.kwarg.arg)
return ', '.join(str_args_list)
# step 2 fill field : `display`
def set_display_attr_of_apis():
"""
set the display attr
"""
display_none_apis = set(
[line.strip() for line in open("./not_display_doc_list", "r")])
display_yes_apis = set(
[line.strip() for line in open("./display_doc_list", "r")])
logger.info(
'display_none_apis has %d items, display_yes_apis has %d items',
len(display_none_apis), len(display_yes_apis))
# file the same apis
for id_api in api_info_dict:
all_names = api_info_dict[id_api]["all_names"]
display_yes = False
for n in all_names:
if n in display_yes_apis:
display_yes = True
break
if display_yes:
api_info_dict[id_api]["display"] = True
else:
display_yes = True
for n in all_names:
for dn in display_none_apis:
if n.startswith(dn):
display_yes = False
break
if not display_yes:
break
if not display_yes:
api_info_dict[id_api]["display"] = False
logger.info("set {} display to False".format(id_api))
# step 4 fill field : alias_name
def set_real_api_alias_attr():
"""
set the full_name,alias attr and so on.
"""
for line in open("./alias_api_mapping", "r"):
linecont = line.strip()
lineparts = linecont.split()
if len(lineparts) < 2:
logger.warning('line "{}" splited to {}'.format(line, lineparts))
continue
try:
real_api = lineparts[0].strip()
m = eval(real_api)
except AttributeError:
logger.warning("AttributeError: %s", real_api)
else:
api_id = id(m)
if api_id in api_info_dict:
api_info_dict[api_id]["alias_name"] = lineparts[1]
if "doc_filename" not in api_info_dict[api_id]:
api_info_dict[api_id]["doc_filename"] = real_api.replace(
'.', '/')
if "module_name" not in api_info_dict[
api_id] or "short_name" not in api_info_dict[api_id]:
mod_name, short_name = split_name(real_api)
api_info_dict[api_id]["module_name"] = mod_name
api_info_dict[api_id]["short_name"] = short_name
if 'full_name' not in api_info_dict[api_id]:
api_info_dict[api_id]["full_name"] = real_api
def get_shortest_api(api_list):
"""
find the shortest api in list.
"""
if len(api_list) == 1:
return api_list[0]
# try to find shortest path of api as the real api
shortest_len = len(api_list[0].split("."))
shortest_api = api_list[0]
for x in api_list[1:]:
len_x = len(x.split("."))
if len_x < shortest_len:
shortest_len = len_x
shortest_api = x
return shortest_api
def remove_all_en_files(path="./paddle"):
"""
remove all the existed en doc files
"""
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(en_suffix):
os.remove(os.path.join(root, file))
# using `doc_filename`
def gen_en_files(api_label_file="api_label"):
"""
generate all the en doc files.
"""
with open(api_label_file, 'w') as api_label:
for id_api, api_info in api_info_dict.items():
# api_info = api_info_dict[id_api]
if "display" in api_info and not api_info["display"]:
logger.debug("{} display False".format(id_api))
continue
if "doc_filename" not in api_info:
logger.debug(
"{} does not have doc_filename field.".format(id_api))
continue
else:
logger.debug(api_info["doc_filename"])
path = os.path.dirname(api_info["doc_filename"])
if not os.path.exists(path):
os.makedirs(path)
f = api_info["doc_filename"] + en_suffix
if os.path.exists(f):
continue
gen = EnDocGenerator()
with gen.guard(f):
if 'full_name' in api_info:
mod_name, _, short_name = api_info['full_name'].rpartition(
'.')
else:
mod_name = api_info['module_name']
short_name = api_info['short_name']
logger.warning("full_name not in api_info: %s.%s",
mod_name, short_name)
gen.module_name = mod_name
gen.api = short_name
gen.print_header_reminder()
gen.print_item()
api_label.write("{1}\t.. _api_{0}_{1}:\n".format("_".join(
mod_name.split(".")), short_name))
def check_cn_en_match(path="./paddle", diff_file="en_cn_files_diff"):
"""
skip
"""
osp_join = os.path.join
osp_exists = os.path.exists
with open(diff_file, 'w') as fo:
tmpl = "{}\t{}\n"
fo.write(tmpl.format("exist", "not_exits"))
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(en_suffix):
cf = file.replace(en_suffix, cn_suffix)
if not osp_exists(osp_join(root, cf)):
fo.write(
tmpl.format(
osp_join(root, file), osp_join(root, cf)))
elif file.endswith(cn_suffix):
ef = file.replace(cn_suffix, en_suffix)
if not osp_exists(osp_join(root, ef)):
fo.write(
tmpl.format(
osp_join(root, file), osp_join(root, ef)))
class EnDocGenerator(object):
"""
skip
"""
def __init__(self, name=None, api=None):
"""
init
"""
self.module_name = name
self.api = api
self.stream = None
@contextlib.contextmanager
def guard(self, filename):
"""
open the file
"""
assert self.stream is None, "stream must be None"
self.stream = open(filename, 'w')
yield
self.stream.close()
self.stream = None
def print_item(self):
"""
as name
"""
try:
m = eval(self.module_name + "." + self.api)
except AttributeError:
logger.warning("attribute error: module_name=" + self.module_name +
", api=" + self.api)
pass
else:
if isinstance(eval(self.module_name + "." + self.api), type):
self.print_class()
elif isinstance(
eval(self.module_name + "." + self.api),
types.FunctionType):
self.print_function()
def print_header_reminder(self):
"""
as name
"""
self.stream.write('''.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}`
!DO NOT EDIT THIS FILE MANUALLY!
''')
def _print_ref_(self):
"""
as name
"""
self.stream.write(".. _api_{0}_{1}:\n\n".format("_".join(
self.module_name.split(".")), self.api))
def _print_header_(self, name, dot, is_title):
"""
as name
"""
dot_line = dot * len(name)
if is_title:
self.stream.write(dot_line)
self.stream.write('\n')
self.stream.write(name)
self.stream.write('\n')
self.stream.write(dot_line)
self.stream.write('\n')
self.stream.write('\n')
def print_class(self):
"""
as name
"""
self._print_ref_()
self._print_header_(self.api, dot='-', is_title=False)
cls_templates = {
'default':
'''.. autoclass:: {0}
:members:
:inherited-members:
:noindex:
''',
'no-inherited':
'''.. autoclass:: {0}
:members:
:noindex:
''',
'fluid.optimizer':
'''.. autoclass:: {0}
:members:
:inherited-members:
:exclude-members: apply_gradients, apply_optimize, backward, load
:noindex:
'''
}
tmpl = 'default'
if 'fluid.dygraph' in self.module_name or \
'paddle.vision' in self.module_name or \
'paddle.callbacks' in self.module_name or \
'paddle.hapi.callbacks' in self.module_name or \
'paddle.io' in self.module_name or \
'paddle.nn' in self.module_name:
tmpl = 'no-inherited'
elif "paddle.optimizer" in self.module_name or \
"fluid.optimizer" in self.module_name:
tmpl = 'fluid.optimizer'
else:
tmpl = 'default'
api_full_name = "{}.{}".format(self.module_name, self.api)
self.stream.write(cls_templates[tmpl].format(api_full_name))
def print_function(self):
"""
as name
"""
self._print_ref_()
self._print_header_(self.api, dot='-', is_title=False)
self.stream.write('''.. autofunction:: {0}.{1}
:noindex:
'''.format(self.module_name, self.api))
def filter_api_info_dict():
for id_api in api_info_dict:
if "all_names" in api_info_dict[id_api]:
api_info_dict[id_api]["all_names"] = list(
api_info_dict[id_api]["all_names"])
if "object" in api_info_dict[id_api]:
del api_info_dict[id_api]["object"]
def reset_api_info_dict():
global api_info_dict, parsed_mods
api_info_dict = {}
parsed_mods = {}
if __name__ == "__main__":
# for api manager
reset_api_info_dict()
get_all_api(attr="__dict__")
set_display_attr_of_apis()
set_source_code_attrs()
set_real_api_alias_attr()
filter_api_info_dict()
json.dump(api_info_dict, open("api_info_dict.json", "w"), indent=4)
# for api rst files
reset_api_info_dict()
get_all_api(attr="__all__")
set_display_attr_of_apis()
set_source_code_attrs()
set_real_api_alias_attr()
filter_api_info_dict()
json.dump(api_info_dict, open("api_info_all.json", "w"), indent=4)
gen_en_files()
check_cn_en_match()
| 36.5648 | 106 | 0.514462 |
acf30db405dc132cf6e848925687f79b587cfb27 | 214 | py | Python | hypernet/src/thermophysicalModels/chemistry/chemistrySolver/surrogate/__init__.py | christian-jacobsen/hypernet | 9f62e1531eb152cc08af0b0c6b09d6fde8d42400 | [
"Apache-2.0"
] | null | null | null | hypernet/src/thermophysicalModels/chemistry/chemistrySolver/surrogate/__init__.py | christian-jacobsen/hypernet | 9f62e1531eb152cc08af0b0c6b09d6fde8d42400 | [
"Apache-2.0"
] | null | null | null | hypernet/src/thermophysicalModels/chemistry/chemistrySolver/surrogate/__init__.py | christian-jacobsen/hypernet | 9f62e1531eb152cc08af0b0c6b09d6fde8d42400 | [
"Apache-2.0"
] | null | null | null | from hypernet.src.thermophysicalModels.chemistrySolver.surrogate.basic import basic
from hypernet.src.thermophysicalModels.chemistrySolver.surrogate.deepNet import DeepNet
__all__ = [
"Basic",
"DeepNet"
]
| 26.75 | 87 | 0.808411 |
acf30e926e811ff957d395e86d22b2a7d56c6158 | 965 | py | Python | Advent2018/3a.py | SSteve/AdventOfCode | aed16209381ccd292fc02008f1f2da5d16ff1a05 | [
"MIT"
] | null | null | null | Advent2018/3a.py | SSteve/AdventOfCode | aed16209381ccd292fc02008f1f2da5d16ff1a05 | [
"MIT"
] | null | null | null | Advent2018/3a.py | SSteve/AdventOfCode | aed16209381ccd292fc02008f1f2da5d16ff1a05 | [
"MIT"
] | null | null | null | import re
repattern = re.compile(r"#(\d*) @ (\d*),(\d*): (\d*)x(\d*)")
fabric = {}
overlapping_claims = set()
all_claims = set()
with open("3.txt", "r") as infile:
for line in infile:
print(line.strip())
result = repattern.match(line.strip())
claim_number, left_offset, top_offset, width, height = \
int(result[1]), int(result[2]), int(result[3]), int(result[4]), int(result[5])
all_claims.add(claim_number)
#print(claim_number, left_offset, right_offset, width, height)
for x in range(left_offset, left_offset + width):
for y in range(top_offset, top_offset + height):
coordinate = (x, y)
#print(coordinate)
if not coordinate in fabric:
fabric[coordinate] = [claim_number]
else:
fabric[coordinate].append(claim_number)
overlaps = 0
for fabric_square in fabric.values():
if len(fabric_square) > 1:
overlapping_claims.update(fabric_square)
overlaps += 1
print(overlaps)
print(all_claims - overlapping_claims)
| 31.129032 | 81 | 0.687047 |
acf30eec451212ede0174a819a9aa936f36d92d1 | 253 | py | Python | swtest/djoser_urls.py | alldevic/swtest | 9578d4da6b468c8d7072b5673c75b14bd95ef9fa | [
"MIT"
] | null | null | null | swtest/djoser_urls.py | alldevic/swtest | 9578d4da6b468c8d7072b5673c75b14bd95ef9fa | [
"MIT"
] | null | null | null | swtest/djoser_urls.py | alldevic/swtest | 9578d4da6b468c8d7072b5673c75b14bd95ef9fa | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from rest_framework.routers import DefaultRouter
from . import djoser_views
router = DefaultRouter()
router.register("users", djoser_views.UserViewSet)
User = get_user_model()
urlpatterns = router.urls
| 21.083333 | 50 | 0.818182 |
acf3101eea0238df80b38b80f685c8578da0587a | 446 | py | Python | cfg/create_robot_positions.py | hamzaMahdi/sphero_formation | 71dd4a8097c578f9237ed1f65e3debdcc3a8cc5b | [
"MIT"
] | null | null | null | cfg/create_robot_positions.py | hamzaMahdi/sphero_formation | 71dd4a8097c578f9237ed1f65e3debdcc3a8cc5b | [
"MIT"
] | null | null | null | cfg/create_robot_positions.py | hamzaMahdi/sphero_formation | 71dd4a8097c578f9237ed1f65e3debdcc3a8cc5b | [
"MIT"
] | 1 | 2019-11-06T21:27:51.000Z | 2019-11-06T21:27:51.000Z | # note : this does not create the link between the map and the world. It only spawns the robots.
# Please make sure to go back and manually add the path to the bitmap file
file_name='my_empty_10x10_20.world'
f = open("../resources/sim/"+file_name,"w+")
x = -1.5
y = -0.1
for i in range(2):
for j in range(10):
f.write('sphero( pose [ %f %f 0.000 0.000 ] name "sphero_%d" color "blue")\n'%(x,y,i*10+j))
x -= 0.2
y += 0.2
x = -1.5
f.close()
| 31.857143 | 96 | 0.647982 |
acf310d52b792ec5ad6512296e561c0fa0bbc8d7 | 879 | py | Python | stubs/micropython-v1_9_3-esp8266/framebuf.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | stubs/micropython-v1_9_3-esp8266/framebuf.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | stubs/micropython-v1_9_3-esp8266/framebuf.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | """
Module: 'framebuf' on esp8266 v1.9.3
"""
# MCU: (sysname='esp8266', nodename='esp8266', release='2.0.0(5a875ba)', version='v1.9.3-8-g63826ac5c on 2017-11-01', machine='ESP module with ESP8266')
# Stubber: 1.1.2 - updated
from typing import Any
class FrameBuffer:
""""""
def blit(self, *argv) -> Any:
pass
def fill(self, *argv) -> Any:
pass
def fill_rect(self, *argv) -> Any:
pass
def hline(self, *argv) -> Any:
pass
def line(self, *argv) -> Any:
pass
def pixel(self, *argv) -> Any:
pass
def rect(self, *argv) -> Any:
pass
def scroll(self, *argv) -> Any:
pass
def text(self, *argv) -> Any:
pass
def vline(self, *argv) -> Any:
pass
def FrameBuffer1():
pass
GS4_HMSB = 2
MONO_HLSB = 3
MONO_HMSB = 4
MONO_VLSB = 0
MVLSB = 0
RGB565 = 1
| 16.584906 | 152 | 0.555176 |
acf311012d4419c0ebfadc02431e5d3fda4c0901 | 5,159 | py | Python | blockchain.py | Levi-Huynh/JS-INTERVIEW | 768e5577fd8f3f26c244154be9d9fd5a348f6171 | [
"MIT"
] | null | null | null | blockchain.py | Levi-Huynh/JS-INTERVIEW | 768e5577fd8f3f26c244154be9d9fd5a348f6171 | [
"MIT"
] | null | null | null | blockchain.py | Levi-Huynh/JS-INTERVIEW | 768e5577fd8f3f26c244154be9d9fd5a348f6171 | [
"MIT"
] | null | null | null | import hashlib
import json
from time import time
from uuid import uuid4
from flask import Flask, jsonify, request
class Blockchain(object):
def __init__(self):
self.chain = []
self.current_transactions = []
# Create the genesis block
self.new_block(previous_hash=1, proof=100)
def new_block(self, proof, previous_hash=None):
"""
Create a new Block in the Blockchain
A block should have:
* Index
* Timestamp
* List of current transactions
* The proof used to mine this block
* The hash of the previous block
:param proof: <int> The proof given by the Proof of Work algorithm
:param previous_hash: (Optional) <str> Hash of previous Block
:return: <dict> New Block
"""
block = {
'index': len(self.chain) + 1,
'timestamp': time(),
'transactions': self.current_transactions,
'proof': proof,
'previous_hash': previous_hash or self.hash(self.last_block)
}
# Reset the current list of transactions
self.current_transactions = []
# Append the chain to the block
self.chain.append(block)
# Return the new block
return block
def hash(self, block):
"""
Creates a SHA-256 hash of a Block
:param block": <dict> Block
"return": <str>
"""
# Use json.dumps to convert json into a string
string_block = json.dumps(block, sort_keys=True)
# Use hashlib.sha256 to create a hash
# It requires a `bytes-like` object, which is what
# .encode() does.
raw_hash = hashlib.sha256(string_block.encode())
# It converts the Python string into a byte string.
# We must make sure that the Dictionary is Ordered,
# or we'll have inconsistent hashes
# TODO: Create the block_string
# TODO: Hash this string using sha256
# By itself, the sha256 function returns the hash in a raw string
# that will likely include escaped characters.
# This can be hard to read, but .hexdigest() converts the
# hash to a string of hexadecimal characters, which is
# easier to work with and understand
hex_hash = raw_hash.hexdigest()
# TODO: Return the hashed block string in hexadecimal format
return hex_hash
@property
def last_block(self):
return self.chain[-1]
def proof_of_work(self, block):
"""
Simple Proof of Work Algorithm
Stringify the block and look for a proof.
Loop through possibilities, checking each one against `valid_proof`
in an effort to find a number that is a valid proof
:return: A valid proof for the provided block
"""
# Stringify the block and look for a proof.
block_string = json.dumps(block, sort_keys=True)
# Loop through possibilities, checking each one against `valid_proof`
# in an effort to find a number that is a valid proof
proof = 0
while self.valid_proof(block_string, proof) is False:
proof += 1
return proof
# return proof
@staticmethod
def valid_proof(block_string, proof):
"""
Validates the Proof: Does hash(block_string, proof) contain 3
leading zeroes?
Return true if the proof is valid
:param block_string: <string> The stringified block to use to
check in combination with `proof`
:param proof: <int?> The value that when combined with the
stringified previous block results in a hash that has the
correct number of leading zeroes.
:return: True if the resulting hash is a valid proof, False otherwise
"""
guess = f'{block_string}{proof}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
#:return: True if the resulting hash is a valid proof or has the right number of leading zeros, False otherwise
return guess_hash[:3] == "000"
# return True or False
# Instantiate our Node
app = Flask(__name__)
# Generate a globally unique address for this node
node_identifier = str(uuid4()).replace('-', '')
# Instantiate the Blockchain
blockchain = Blockchain()
@app.route('/mine', methods=['GET'])
def mine():
# Run the proof of work algorithm to get the next proof
proof = blockchain.proof_of_work(blockchain.last_block)
# Forge the new Block by adding it to the chain with the proof
previous_hash = blockchain.hash(blockchain.last_block)
block = blockchain.new_block(proof, previous_hash)
response = {
# TODO: Send a JSON response with the new block
'new_block': block
}
return jsonify(response), 200
@app.route('/chain', methods=['GET'])
def full_chain():
response = {
# TODO: Return the chain and its current length
'chain': blockchain.chain,
'length': len(blockchain.chain)
}
return jsonify(response), 200
# Run the program on port 5000
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| 31.266667 | 119 | 0.632875 |
acf312375ebe394cf576d11007f3800f0cafc175 | 1,296 | py | Python | setup.py | hurbcom/python-mysql-replication | 7e97754b9e77dc7a75c0af05444c2f40dd2f6018 | [
"Apache-2.0"
] | null | null | null | setup.py | hurbcom/python-mysql-replication | 7e97754b9e77dc7a75c0af05444c2f40dd2f6018 | [
"Apache-2.0"
] | null | null | null | setup.py | hurbcom/python-mysql-replication | 7e97754b9e77dc7a75c0af05444c2f40dd2f6018 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup, Command
except ImportError:
from distutils.core import setup, Command
import sys
tests_require = []
# add unittest2 to tests_require for python < 2.7
if sys.version_info < (2, 7):
tests_require.append("unittest2")
class TestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
"""
Finds all the tests modules in tests/, and runs them.
"""
from pymysqlreplication import tests
import unittest
unittest.main(tests, argv=sys.argv[:1])
version = "0.57"
setup(
name="hurb-mysql-replication",
version=version,
url="https://github.com/noplay/python-mysql-replication",
author="Julien Duponchelle",
author_email="julien@duponchelle.info",
description=("Pure Python Implementation of MySQL replication protocol "
"build on top of PyMYSQL."),
license="Apache 2",
packages=["pymysqlreplication",
"pymysqlreplication.constants",
"pymysqlreplication.tests"],
cmdclass={"test": TestCommand},
extras_require={'test': tests_require},
install_requires=['pymysql>=0.6'],
)
| 23.563636 | 76 | 0.648148 |
acf31292cb2fb9cc09d889b261e6fef749f5b57a | 8,515 | py | Python | sdk/python/pulumi_aws/waf/rate_based_rule.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/waf/rate_based_rule.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/waf/rate_based_rule.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['RateBasedRule']
class RateBasedRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
metric_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
predicates: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RateBasedRulePredicateArgs']]]]] = None,
rate_key: Optional[pulumi.Input[str]] = None,
rate_limit: Optional[pulumi.Input[float]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a WAF Rate Based Rule Resource
## Example Usage
```python
import pulumi
import pulumi_aws as aws
ipset = aws.waf.IpSet("ipset", ip_set_descriptors=[aws.waf.IpSetIpSetDescriptorArgs(
type="IPV4",
value="192.0.7.0/24",
)])
wafrule = aws.waf.RateBasedRule("wafrule",
metric_name="tfWAFRule",
rate_key="IP",
rate_limit=100,
predicates=[aws.waf.RateBasedRulePredicateArgs(
data_id=ipset.id,
negated=False,
type="IPMatch",
)],
opts=ResourceOptions(depends_on=[ipset]))
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] metric_name: The name or description for the Amazon CloudWatch metric of this rule.
:param pulumi.Input[str] name: The name or description of the rule.
:param pulumi.Input[List[pulumi.Input[pulumi.InputType['RateBasedRulePredicateArgs']]]] predicates: The objects to include in a rule (documented below).
:param pulumi.Input[str] rate_key: Valid value is IP.
:param pulumi.Input[float] rate_limit: The maximum number of requests, which have an identical value in the field specified by the RateKey, allowed in a five-minute period. Minimum value is 100.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if metric_name is None:
raise TypeError("Missing required property 'metric_name'")
__props__['metric_name'] = metric_name
__props__['name'] = name
__props__['predicates'] = predicates
if rate_key is None:
raise TypeError("Missing required property 'rate_key'")
__props__['rate_key'] = rate_key
if rate_limit is None:
raise TypeError("Missing required property 'rate_limit'")
__props__['rate_limit'] = rate_limit
__props__['tags'] = tags
__props__['arn'] = None
super(RateBasedRule, __self__).__init__(
'aws:waf/rateBasedRule:RateBasedRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
metric_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
predicates: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['RateBasedRulePredicateArgs']]]]] = None,
rate_key: Optional[pulumi.Input[str]] = None,
rate_limit: Optional[pulumi.Input[float]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'RateBasedRule':
"""
Get an existing RateBasedRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN)
:param pulumi.Input[str] metric_name: The name or description for the Amazon CloudWatch metric of this rule.
:param pulumi.Input[str] name: The name or description of the rule.
:param pulumi.Input[List[pulumi.Input[pulumi.InputType['RateBasedRulePredicateArgs']]]] predicates: The objects to include in a rule (documented below).
:param pulumi.Input[str] rate_key: Valid value is IP.
:param pulumi.Input[float] rate_limit: The maximum number of requests, which have an identical value in the field specified by the RateKey, allowed in a five-minute period. Minimum value is 100.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["metric_name"] = metric_name
__props__["name"] = name
__props__["predicates"] = predicates
__props__["rate_key"] = rate_key
__props__["rate_limit"] = rate_limit
__props__["tags"] = tags
return RateBasedRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN)
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Output[str]:
"""
The name or description for the Amazon CloudWatch metric of this rule.
"""
return pulumi.get(self, "metric_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name or description of the rule.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def predicates(self) -> pulumi.Output[Optional[List['outputs.RateBasedRulePredicate']]]:
"""
The objects to include in a rule (documented below).
"""
return pulumi.get(self, "predicates")
@property
@pulumi.getter(name="rateKey")
def rate_key(self) -> pulumi.Output[str]:
"""
Valid value is IP.
"""
return pulumi.get(self, "rate_key")
@property
@pulumi.getter(name="rateLimit")
def rate_limit(self) -> pulumi.Output[float]:
"""
The maximum number of requests, which have an identical value in the field specified by the RateKey, allowed in a five-minute period. Minimum value is 100.
"""
return pulumi.get(self, "rate_limit")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of resource tags
"""
return pulumi.get(self, "tags")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 42.575 | 202 | 0.630299 |
acf31316fcc0e9541bcaa8f1b8682eb05ad3d1ac | 2,435 | py | Python | libs/help.py | amzy-0/pck3r | ccbd9e10bdbfa239c955804910ffbd25e6e77fac | [
"Apache-2.0"
] | 44 | 2020-10-07T15:18:35.000Z | 2022-03-10T07:19:33.000Z | libs/help.py | amzy-0/pck3r | ccbd9e10bdbfa239c955804910ffbd25e6e77fac | [
"Apache-2.0"
] | 10 | 2020-10-25T22:55:44.000Z | 2021-03-26T19:38:14.000Z | libs/help.py | amzy-0/pck3r | ccbd9e10bdbfa239c955804910ffbd25e6e77fac | [
"Apache-2.0"
] | 9 | 2020-10-10T20:30:36.000Z | 2021-12-12T13:37:35.000Z | #!/usr/bin/python3
"""
Copyright [2020-2021] [M.Amin Azimi .K (amzy-0)]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__authors__ = ['M.Amin Azimi .K(amzy-0)', 'https://github.com/amzy-0/pck3r/graphs/contributors']
from os import system as syscall
from os import chdir, getcwd, getenv
if __name__ == "__main__":
print("""
This is a module not an executeable program
Alternative command :
$ python3 core_pck3r.py
OR
$ python3 installer.py
OR
$ chmod 755 core_pck3r.py ; ./core_pck3r.py
And for installing :
$ chmod 755 installer.py ; ./installer.py
""")
else:
from . import stuff
print("""%s
-----------------------------------------
| |
| pck3r : It is a versatile program and |
| |
| you avoid using useless commands and |
| |
| it is written for Ubuntu... |
| |
-----------------------------------------
\"install\" command :
$ pck3r install \"somthing\" :
{
nodejs,
wine,
ohmyzsh,
flstudio,
or ...
}
\"clear\" command :
$ pck3r clear:
{clear your terminal }
\"sys\" command :
$ pck3r sys update
(update your oprating system)
$ pck3r sys upgrade
(upgrade your oprating system)
$ pck3r sys updgr
(both, update and upgrade (full upgrade))
\"tilix\" command :
$ pck3r tilix
(tilix terminal ...)
\"dotnet\" command :
$ pck3r install dotnet
(installing .NET (dot net ) C0RE, ASP, MCS compiler , ...)
\"pkg\" command :
$ pck3r pkg <package name>
(search for packages ...)
\"update\" command :
$ pck3r update
(update to last release from github.com/amzy-0/pck3r)
"version" command :
$ pck3r version
(this command show pck3r version)
%s
""" % (stuff.YEL, stuff.NRM)
) | 22.757009 | 96 | 0.577002 |
acf3153f1c4bf8e07fe7fd8b963fc338f02f7fa1 | 477 | py | Python | django_otp/migrations/0002_otpsecrets_issuer_name.py | brucedockeray-projects/django-good-otp | 7328ff84c4f31947bc92a529edbfad613730a671 | [
"MIT"
] | 19 | 2018-09-04T12:16:16.000Z | 2021-11-04T18:07:37.000Z | django_otp/migrations/0002_otpsecrets_issuer_name.py | brucedockeray-projects/django-good-otp | 7328ff84c4f31947bc92a529edbfad613730a671 | [
"MIT"
] | 40 | 2016-12-31T05:01:11.000Z | 2022-01-29T02:51:52.000Z | django_otp/migrations/0002_otpsecrets_issuer_name.py | brucedockeray-projects/django-good-otp | 7328ff84c4f31947bc92a529edbfad613730a671 | [
"MIT"
] | 5 | 2019-04-04T21:38:31.000Z | 2021-03-09T05:33:45.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-29 03:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_otp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='otpsecrets',
name='issuer_name',
field=models.CharField(blank=True, db_index=True, max_length=40),
),
]
| 22.714286 | 77 | 0.624738 |
acf31547bc0b398baef8dc0ccaa87cb689e317c2 | 1,224 | py | Python | horny.py | brussels-sprout/smeg-alt | 4a41d4501c134addb7674ae13646684af46d14d9 | [
"MIT"
] | null | null | null | horny.py | brussels-sprout/smeg-alt | 4a41d4501c134addb7674ae13646684af46d14d9 | [
"MIT"
] | null | null | null | horny.py | brussels-sprout/smeg-alt | 4a41d4501c134addb7674ae13646684af46d14d9 | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as ET
import requests
import os
import re
from dotenv import load_dotenv
load_dotenv()
headers = {
"user-agent": "SmegAlt/1.0 (by Snekky on e621)",
}
def gelbooru(text):
if text.startswith("["):
amount = re.search("(?<=\[).+?(?=\])", text) # i still don't know regex and just rely on stackoverflow
amount = amount.group(0)
try:
amount = int(amount)
if amount > 100: amount = 100
except ValueError: # someone is trying to break the bot again
amount = 1
text = re.sub("\[.*?\]", "", text)
else:
amount = 1
text = text.replace(" ", "+")
GELBOORU_API = os.getenv('GELBOORU_API')
text = "https://gelbooru.com/index.php?page=dapi&s=post&q=index&limit=100" + GELBOORU_API + "&tags=" + text
r = requests.get(text)
xml = ET.fromstring(r.content)
returnvalue = {"posts": xml.findall("post"), "amount": amount}
return returnvalue
def e621(text):
# text = text.replace(" ", "+")
text = "https://e621.net/post/index.json" # nothing works lol unepic
r = requests.get(text, headers=headers)
return "this should be working but it is not, " + str(r.status_code)
| 31.384615 | 111 | 0.606209 |
acf315a4f2e0fa95d4edefb1bb9937b852c72783 | 77,950 | py | Python | Lib/test/test_http_cookiejar.py | Iridocyclitis562/cpython | ca7fe5063593958e5efdf90f068582837f07bd14 | [
"CNRI-Python-GPL-Compatible"
] | 1 | 2020-12-22T11:30:05.000Z | 2020-12-22T11:30:05.000Z | Lib/test/test_http_cookiejar.py | Iridocyclitis562/cpython | ca7fe5063593958e5efdf90f068582837f07bd14 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/test/test_http_cookiejar.py | Iridocyclitis562/cpython | ca7fe5063593958e5efdf90f068582837f07bd14 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | """Tests for http/cookiejar.py."""
import os
import re
import test.support
import time
import unittest
import urllib.request
import pathlib
from http.cookiejar import (time2isoz, http2time, iso2time, time2netscape,
parse_ns_headers, join_header_words, split_header_words, Cookie,
CookieJar, DefaultCookiePolicy, LWPCookieJar, MozillaCookieJar,
LoadError, lwp_cookie_str, DEFAULT_HTTP_PORT, escape_path,
reach, is_HDN, domain_match, user_domain_match, request_path,
request_port, request_host)
class DateTimeTests(unittest.TestCase):
def test_time2isoz(self):
base = 1019227000
day = 24*3600
self.assertEqual(time2isoz(base), "2002-04-19 14:36:40Z")
self.assertEqual(time2isoz(base+day), "2002-04-20 14:36:40Z")
self.assertEqual(time2isoz(base+2*day), "2002-04-21 14:36:40Z")
self.assertEqual(time2isoz(base+3*day), "2002-04-22 14:36:40Z")
az = time2isoz()
bz = time2isoz(500000)
for text in (az, bz):
self.assertRegex(text, r"^\d{4}-\d\d-\d\d \d\d:\d\d:\d\dZ$",
"bad time2isoz format: %s %s" % (az, bz))
def test_time2netscape(self):
base = 1019227000
day = 24*3600
self.assertEqual(time2netscape(base), "Fri, 19-Apr-2002 14:36:40 GMT")
self.assertEqual(time2netscape(base+day),
"Sat, 20-Apr-2002 14:36:40 GMT")
self.assertEqual(time2netscape(base+2*day),
"Sun, 21-Apr-2002 14:36:40 GMT")
self.assertEqual(time2netscape(base+3*day),
"Mon, 22-Apr-2002 14:36:40 GMT")
az = time2netscape()
bz = time2netscape(500000)
for text in (az, bz):
# Format "%s, %02d-%s-%04d %02d:%02d:%02d GMT"
self.assertRegex(
text,
r"[a-zA-Z]{3}, \d{2}-[a-zA-Z]{3}-\d{4} \d{2}:\d{2}:\d{2} GMT$",
"bad time2netscape format: %s %s" % (az, bz))
def test_http2time(self):
def parse_date(text):
return time.gmtime(http2time(text))[:6]
self.assertEqual(parse_date("01 Jan 2001"), (2001, 1, 1, 0, 0, 0.0))
# this test will break around year 2070
self.assertEqual(parse_date("03-Feb-20"), (2020, 2, 3, 0, 0, 0.0))
# this test will break around year 2048
self.assertEqual(parse_date("03-Feb-98"), (1998, 2, 3, 0, 0, 0.0))
def test_http2time_formats(self):
# test http2time for supported dates. Test cases with 2 digit year
# will probably break in year 2044.
tests = [
'Thu, 03 Feb 1994 00:00:00 GMT', # proposed new HTTP format
'Thursday, 03-Feb-94 00:00:00 GMT', # old rfc850 HTTP format
'Thursday, 03-Feb-1994 00:00:00 GMT', # broken rfc850 HTTP format
'03 Feb 1994 00:00:00 GMT', # HTTP format (no weekday)
'03-Feb-94 00:00:00 GMT', # old rfc850 (no weekday)
'03-Feb-1994 00:00:00 GMT', # broken rfc850 (no weekday)
'03-Feb-1994 00:00 GMT', # broken rfc850 (no weekday, no seconds)
'03-Feb-1994 00:00', # broken rfc850 (no weekday, no seconds, no tz)
'02-Feb-1994 24:00', # broken rfc850 (no weekday, no seconds,
# no tz) using hour 24 with yesterday date
'03-Feb-94', # old rfc850 HTTP format (no weekday, no time)
'03-Feb-1994', # broken rfc850 HTTP format (no weekday, no time)
'03 Feb 1994', # proposed new HTTP format (no weekday, no time)
# A few tests with extra space at various places
' 03 Feb 1994 0:00 ',
' 03-Feb-1994 ',
]
test_t = 760233600 # assume broken POSIX counting of seconds
result = time2isoz(test_t)
expected = "1994-02-03 00:00:00Z"
self.assertEqual(result, expected,
"%s => '%s' (%s)" % (test_t, result, expected))
for s in tests:
self.assertEqual(http2time(s), test_t, s)
self.assertEqual(http2time(s.lower()), test_t, s.lower())
self.assertEqual(http2time(s.upper()), test_t, s.upper())
def test_http2time_garbage(self):
for test in [
'',
'Garbage',
'Mandag 16. September 1996',
'01-00-1980',
'01-13-1980',
'00-01-1980',
'32-01-1980',
'01-01-1980 25:00:00',
'01-01-1980 00:61:00',
'01-01-1980 00:00:62',
'08-Oct-3697739',
'08-01-3697739',
'09 Feb 19942632 22:23:32 GMT',
'Wed, 09 Feb 1994834 22:23:32 GMT',
]:
self.assertIsNone(http2time(test),
"http2time(%s) is not None\n"
"http2time(test) %s" % (test, http2time(test)))
def test_iso2time(self):
def parse_date(text):
return time.gmtime(iso2time(text))[:6]
# ISO 8601 compact format
self.assertEqual(parse_date("19940203T141529Z"),
(1994, 2, 3, 14, 15, 29))
# ISO 8601 with time behind UTC
self.assertEqual(parse_date("1994-02-03 07:15:29 -0700"),
(1994, 2, 3, 14, 15, 29))
# ISO 8601 with time ahead of UTC
self.assertEqual(parse_date("1994-02-03 19:45:29 +0530"),
(1994, 2, 3, 14, 15, 29))
def test_iso2time_formats(self):
# test iso2time for supported dates.
tests = [
'1994-02-03 00:00:00 -0000', # ISO 8601 format
'1994-02-03 00:00:00 +0000', # ISO 8601 format
'1994-02-03 00:00:00', # zone is optional
'1994-02-03', # only date
'1994-02-03T00:00:00', # Use T as separator
'19940203', # only date
'1994-02-02 24:00:00', # using hour-24 yesterday date
'19940203T000000Z', # ISO 8601 compact format
# A few tests with extra space at various places
' 1994-02-03 ',
' 1994-02-03T00:00:00 ',
]
test_t = 760233600 # assume broken POSIX counting of seconds
for s in tests:
self.assertEqual(iso2time(s), test_t, s)
self.assertEqual(iso2time(s.lower()), test_t, s.lower())
self.assertEqual(iso2time(s.upper()), test_t, s.upper())
def test_iso2time_garbage(self):
for test in [
'',
'Garbage',
'Thursday, 03-Feb-94 00:00:00 GMT',
'1980-00-01',
'1980-13-01',
'1980-01-00',
'1980-01-32',
'1980-01-01 25:00:00',
'1980-01-01 00:61:00',
'01-01-1980 00:00:62',
'01-01-1980T00:00:62',
'19800101T250000Z',
]:
self.assertIsNone(iso2time(test),
"iso2time(%r)" % test)
class HeaderTests(unittest.TestCase):
def test_parse_ns_headers(self):
# quotes should be stripped
expected = [[('foo', 'bar'), ('expires', 2209069412), ('version', '0')]]
for hdr in [
'foo=bar; expires=01 Jan 2040 22:23:32 GMT',
'foo=bar; expires="01 Jan 2040 22:23:32 GMT"',
]:
self.assertEqual(parse_ns_headers([hdr]), expected)
def test_parse_ns_headers_version(self):
# quotes should be stripped
expected = [[('foo', 'bar'), ('version', '1')]]
for hdr in [
'foo=bar; version="1"',
'foo=bar; Version="1"',
]:
self.assertEqual(parse_ns_headers([hdr]), expected)
def test_parse_ns_headers_special_names(self):
# names such as 'expires' are not special in first name=value pair
# of Set-Cookie: header
# Cookie with name 'expires'
hdr = 'expires=01 Jan 2040 22:23:32 GMT'
expected = [[("expires", "01 Jan 2040 22:23:32 GMT"), ("version", "0")]]
self.assertEqual(parse_ns_headers([hdr]), expected)
def test_join_header_words(self):
joined = join_header_words([[("foo", None), ("bar", "baz")]])
self.assertEqual(joined, "foo; bar=baz")
self.assertEqual(join_header_words([[]]), "")
def test_split_header_words(self):
tests = [
("foo", [[("foo", None)]]),
("foo=bar", [[("foo", "bar")]]),
(" foo ", [[("foo", None)]]),
(" foo= ", [[("foo", "")]]),
(" foo=", [[("foo", "")]]),
(" foo= ; ", [[("foo", "")]]),
(" foo= ; bar= baz ", [[("foo", ""), ("bar", "baz")]]),
("foo=bar bar=baz", [[("foo", "bar"), ("bar", "baz")]]),
# doesn't really matter if this next fails, but it works ATM
("foo= bar=baz", [[("foo", "bar=baz")]]),
("foo=bar;bar=baz", [[("foo", "bar"), ("bar", "baz")]]),
('foo bar baz', [[("foo", None), ("bar", None), ("baz", None)]]),
("a, b, c", [[("a", None)], [("b", None)], [("c", None)]]),
(r'foo; bar=baz, spam=, foo="\,\;\"", bar= ',
[[("foo", None), ("bar", "baz")],
[("spam", "")], [("foo", ',;"')], [("bar", "")]]),
]
for arg, expect in tests:
try:
result = split_header_words([arg])
except:
import traceback, io
f = io.StringIO()
traceback.print_exc(None, f)
result = "(error -- traceback follows)\n\n%s" % f.getvalue()
self.assertEqual(result, expect, """
When parsing: '%s'
Expected: '%s'
Got: '%s'
""" % (arg, expect, result))
def test_roundtrip(self):
tests = [
("foo", "foo"),
("foo=bar", "foo=bar"),
(" foo ", "foo"),
("foo=", 'foo=""'),
("foo=bar bar=baz", "foo=bar; bar=baz"),
("foo=bar;bar=baz", "foo=bar; bar=baz"),
('foo bar baz', "foo; bar; baz"),
(r'foo="\"" bar="\\"', r'foo="\""; bar="\\"'),
('foo,,,bar', 'foo, bar'),
('foo=bar,bar=baz', 'foo=bar, bar=baz'),
('text/html; charset=iso-8859-1',
'text/html; charset="iso-8859-1"'),
('foo="bar"; port="80,81"; discard, bar=baz',
'foo=bar; port="80,81"; discard, bar=baz'),
(r'Basic realm="\"foo\\\\bar\""',
r'Basic; realm="\"foo\\\\bar\""')
]
for arg, expect in tests:
input = split_header_words([arg])
res = join_header_words(input)
self.assertEqual(res, expect, """
When parsing: '%s'
Expected: '%s'
Got: '%s'
Input was: '%s'
""" % (arg, expect, res, input))
class FakeResponse:
def __init__(self, headers=[], url=None):
"""
headers: list of RFC822-style 'Key: value' strings
"""
import email
self._headers = email.message_from_string("\n".join(headers))
self._url = url
def info(self): return self._headers
def interact_2965(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie2")
def interact_netscape(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie")
def _interact(cookiejar, url, set_cookie_hdrs, hdr_name):
"""Perform a single request / response cycle, returning Cookie: header."""
req = urllib.request.Request(url)
cookiejar.add_cookie_header(req)
cookie_hdr = req.get_header("Cookie", "")
headers = []
for hdr in set_cookie_hdrs:
headers.append("%s: %s" % (hdr_name, hdr))
res = FakeResponse(headers, url)
cookiejar.extract_cookies(res, req)
return cookie_hdr
class FileCookieJarTests(unittest.TestCase):
def test_constructor_with_str(self):
filename = test.support.TESTFN
c = LWPCookieJar(filename)
self.assertEqual(c.filename, filename)
def test_constructor_with_path_like(self):
filename = pathlib.Path(test.support.TESTFN)
c = LWPCookieJar(filename)
self.assertEqual(c.filename, os.fspath(filename))
def test_constructor_with_none(self):
c = LWPCookieJar(None)
self.assertIsNone(c.filename)
def test_constructor_with_other_types(self):
class A:
pass
for type_ in (int, float, A):
with self.subTest(filename=type_):
with self.assertRaises(TypeError):
instance = type_()
c = LWPCookieJar(filename=instance)
def test_lwp_valueless_cookie(self):
# cookies with no value should be saved and loaded consistently
filename = test.support.TESTFN
c = LWPCookieJar()
interact_netscape(c, "http://www.acme.com/", 'boo')
self.assertEqual(c._cookies["www.acme.com"]["/"]["boo"].value, None)
try:
c.save(filename, ignore_discard=True)
c = LWPCookieJar()
c.load(filename, ignore_discard=True)
finally:
try: os.unlink(filename)
except OSError: pass
self.assertEqual(c._cookies["www.acme.com"]["/"]["boo"].value, None)
def test_bad_magic(self):
# OSErrors (eg. file doesn't exist) are allowed to propagate
filename = test.support.TESTFN
for cookiejar_class in LWPCookieJar, MozillaCookieJar:
c = cookiejar_class()
try:
c.load(filename="for this test to work, a file with this "
"filename should not exist")
except OSError as exc:
# an OSError subclass (likely FileNotFoundError), but not
# LoadError
self.assertIsNot(exc.__class__, LoadError)
else:
self.fail("expected OSError for invalid filename")
# Invalid contents of cookies file (eg. bad magic string)
# causes a LoadError.
try:
with open(filename, "w") as f:
f.write("oops\n")
for cookiejar_class in LWPCookieJar, MozillaCookieJar:
c = cookiejar_class()
self.assertRaises(LoadError, c.load, filename)
finally:
try: os.unlink(filename)
except OSError: pass
class CookieTests(unittest.TestCase):
# XXX
# Get rid of string comparisons where not actually testing str / repr.
# .clear() etc.
# IP addresses like 50 (single number, no dot) and domain-matching
# functions (and is_HDN)? See draft RFC 2965 errata.
# Strictness switches
# is_third_party()
# unverifiability / third-party blocking
# Netscape cookies work the same as RFC 2965 with regard to port.
# Set-Cookie with negative max age.
# If turn RFC 2965 handling off, Set-Cookie2 cookies should not clobber
# Set-Cookie cookies.
# Cookie2 should be sent if *any* cookies are not V1 (ie. V0 OR V2 etc.).
# Cookies (V1 and V0) with no expiry date should be set to be discarded.
# RFC 2965 Quoting:
# Should accept unquoted cookie-attribute values? check errata draft.
# Which are required on the way in and out?
# Should always return quoted cookie-attribute values?
# Proper testing of when RFC 2965 clobbers Netscape (waiting for errata).
# Path-match on return (same for V0 and V1).
# RFC 2965 acceptance and returning rules
# Set-Cookie2 without version attribute is rejected.
# Netscape peculiarities list from Ronald Tschalar.
# The first two still need tests, the rest are covered.
## - Quoting: only quotes around the expires value are recognized as such
## (and yes, some folks quote the expires value); quotes around any other
## value are treated as part of the value.
## - White space: white space around names and values is ignored
## - Default path: if no path parameter is given, the path defaults to the
## path in the request-uri up to, but not including, the last '/'. Note
## that this is entirely different from what the spec says.
## - Commas and other delimiters: Netscape just parses until the next ';'.
## This means it will allow commas etc inside values (and yes, both
## commas and equals are commonly appear in the cookie value). This also
## means that if you fold multiple Set-Cookie header fields into one,
## comma-separated list, it'll be a headache to parse (at least my head
## starts hurting every time I think of that code).
## - Expires: You'll get all sorts of date formats in the expires,
## including empty expires attributes ("expires="). Be as flexible as you
## can, and certainly don't expect the weekday to be there; if you can't
## parse it, just ignore it and pretend it's a session cookie.
## - Domain-matching: Netscape uses the 2-dot rule for _all_ domains, not
## just the 7 special TLD's listed in their spec. And folks rely on
## that...
def test_domain_return_ok(self):
# test optimization: .domain_return_ok() should filter out most
# domains in the CookieJar before we try to access them (because that
# may require disk access -- in particular, with MSIECookieJar)
# This is only a rough check for performance reasons, so it's not too
# critical as long as it's sufficiently liberal.
pol = DefaultCookiePolicy()
for url, domain, ok in [
("http://foo.bar.com/", "blah.com", False),
("http://foo.bar.com/", "rhubarb.blah.com", False),
("http://foo.bar.com/", "rhubarb.foo.bar.com", False),
("http://foo.bar.com/", ".foo.bar.com", True),
("http://foo.bar.com/", "foo.bar.com", True),
("http://foo.bar.com/", ".bar.com", True),
("http://foo.bar.com/", "bar.com", True),
("http://foo.bar.com/", "com", True),
("http://foo.com/", "rhubarb.foo.com", False),
("http://foo.com/", ".foo.com", True),
("http://foo.com/", "foo.com", True),
("http://foo.com/", "com", True),
("http://foo/", "rhubarb.foo", False),
("http://foo/", ".foo", True),
("http://foo/", "foo", True),
("http://foo/", "foo.local", True),
("http://foo/", ".local", True),
("http://barfoo.com", ".foo.com", False),
("http://barfoo.com", "foo.com", False),
]:
request = urllib.request.Request(url)
r = pol.domain_return_ok(domain, request)
if ok: self.assertTrue(r)
else: self.assertFalse(r)
def test_missing_value(self):
# missing = sign in Cookie: header is regarded by Mozilla as a missing
# name, and by http.cookiejar as a missing value
filename = test.support.TESTFN
c = MozillaCookieJar(filename)
interact_netscape(c, "http://www.acme.com/", 'eggs')
interact_netscape(c, "http://www.acme.com/", '"spam"; path=/foo/')
cookie = c._cookies["www.acme.com"]["/"]["eggs"]
self.assertIsNone(cookie.value)
self.assertEqual(cookie.name, "eggs")
cookie = c._cookies["www.acme.com"]['/foo/']['"spam"']
self.assertIsNone(cookie.value)
self.assertEqual(cookie.name, '"spam"')
self.assertEqual(lwp_cookie_str(cookie), (
r'"spam"; path="/foo/"; domain="www.acme.com"; '
'path_spec; discard; version=0'))
old_str = repr(c)
c.save(ignore_expires=True, ignore_discard=True)
try:
c = MozillaCookieJar(filename)
c.revert(ignore_expires=True, ignore_discard=True)
finally:
os.unlink(c.filename)
# cookies unchanged apart from lost info re. whether path was specified
self.assertEqual(
repr(c),
re.sub("path_specified=%s" % True, "path_specified=%s" % False,
old_str)
)
self.assertEqual(interact_netscape(c, "http://www.acme.com/foo/"),
'"spam"; eggs')
def test_rfc2109_handling(self):
# RFC 2109 cookies are handled as RFC 2965 or Netscape cookies,
# dependent on policy settings
for rfc2109_as_netscape, rfc2965, version in [
# default according to rfc2965 if not explicitly specified
(None, False, 0),
(None, True, 1),
# explicit rfc2109_as_netscape
(False, False, None), # version None here means no cookie stored
(False, True, 1),
(True, False, 0),
(True, True, 0),
]:
policy = DefaultCookiePolicy(
rfc2109_as_netscape=rfc2109_as_netscape,
rfc2965=rfc2965)
c = CookieJar(policy)
interact_netscape(c, "http://www.example.com/", "ni=ni; Version=1")
try:
cookie = c._cookies["www.example.com"]["/"]["ni"]
except KeyError:
self.assertIsNone(version) # didn't expect a stored cookie
else:
self.assertEqual(cookie.version, version)
# 2965 cookies are unaffected
interact_2965(c, "http://www.example.com/",
"foo=bar; Version=1")
if rfc2965:
cookie2965 = c._cookies["www.example.com"]["/"]["foo"]
self.assertEqual(cookie2965.version, 1)
def test_ns_parser(self):
c = CookieJar()
interact_netscape(c, "http://www.acme.com/",
'spam=eggs; DoMain=.acme.com; port; blArgh="feep"')
interact_netscape(c, "http://www.acme.com/", 'ni=ni; port=80,8080')
interact_netscape(c, "http://www.acme.com:80/", 'nini=ni')
interact_netscape(c, "http://www.acme.com:80/", 'foo=bar; expires=')
interact_netscape(c, "http://www.acme.com:80/", 'spam=eggs; '
'expires="Foo Bar 25 33:22:11 3022"')
interact_netscape(c, 'http://www.acme.com/', 'fortytwo=')
interact_netscape(c, 'http://www.acme.com/', '=unladenswallow')
interact_netscape(c, 'http://www.acme.com/', 'holyhandgrenade')
cookie = c._cookies[".acme.com"]["/"]["spam"]
self.assertEqual(cookie.domain, ".acme.com")
self.assertTrue(cookie.domain_specified)
self.assertEqual(cookie.port, DEFAULT_HTTP_PORT)
self.assertFalse(cookie.port_specified)
# case is preserved
self.assertTrue(cookie.has_nonstandard_attr("blArgh"))
self.assertFalse(cookie.has_nonstandard_attr("blargh"))
cookie = c._cookies["www.acme.com"]["/"]["ni"]
self.assertEqual(cookie.domain, "www.acme.com")
self.assertFalse(cookie.domain_specified)
self.assertEqual(cookie.port, "80,8080")
self.assertTrue(cookie.port_specified)
cookie = c._cookies["www.acme.com"]["/"]["nini"]
self.assertIsNone(cookie.port)
self.assertFalse(cookie.port_specified)
# invalid expires should not cause cookie to be dropped
foo = c._cookies["www.acme.com"]["/"]["foo"]
spam = c._cookies["www.acme.com"]["/"]["foo"]
self.assertIsNone(foo.expires)
self.assertIsNone(spam.expires)
cookie = c._cookies['www.acme.com']['/']['fortytwo']
self.assertIsNotNone(cookie.value)
self.assertEqual(cookie.value, '')
# there should be a distinction between a present but empty value
# (above) and a value that's entirely missing (below)
cookie = c._cookies['www.acme.com']['/']['holyhandgrenade']
self.assertIsNone(cookie.value)
def test_ns_parser_special_names(self):
# names such as 'expires' are not special in first name=value pair
# of Set-Cookie: header
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'expires=eggs')
interact_netscape(c, "http://www.acme.com/", 'version=eggs; spam=eggs')
cookies = c._cookies["www.acme.com"]["/"]
self.assertIn('expires', cookies)
self.assertIn('version', cookies)
def test_expires(self):
# if expires is in future, keep cookie...
c = CookieJar()
future = time2netscape(time.time()+3600)
interact_netscape(c, "http://www.acme.com/", 'spam="bar"; expires=%s' %
future)
self.assertEqual(len(c), 1)
now = time2netscape(time.time()-1)
# ... and if in past or present, discard it
interact_netscape(c, "http://www.acme.com/", 'foo="eggs"; expires=%s' %
now)
h = interact_netscape(c, "http://www.acme.com/")
self.assertEqual(len(c), 1)
self.assertIn('spam="bar"', h)
self.assertNotIn("foo", h)
# max-age takes precedence over expires, and zero max-age is request to
# delete both new cookie and any old matching cookie
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; expires=%s' %
future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; expires=%s' %
future)
self.assertEqual(len(c), 3)
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; '
'expires=%s; max-age=0' % future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; '
'max-age=0; expires=%s' % future)
h = interact_netscape(c, "http://www.acme.com/")
self.assertEqual(len(c), 1)
# test expiry at end of session for cookies with no expires attribute
interact_netscape(c, "http://www.rhubarb.net/", 'whum="fizz"')
self.assertEqual(len(c), 2)
c.clear_session_cookies()
self.assertEqual(len(c), 1)
self.assertIn('spam="bar"', h)
# test if fractional expiry is accepted
cookie = Cookie(0, "name", "value",
None, False, "www.python.org",
True, False, "/",
False, False, "1444312383.018307",
False, None, None,
{})
self.assertEqual(cookie.expires, 1444312383)
# XXX RFC 2965 expiry rules (some apply to V0 too)
def test_default_path(self):
# RFC 2965
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/", 'spam="bar"; Version="1"')
self.assertIn("/", c._cookies["www.acme.com"])
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah", 'eggs="bar"; Version="1"')
self.assertIn("/", c._cookies["www.acme.com"])
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb",
'eggs="bar"; Version="1"')
self.assertIn("/blah/", c._cookies["www.acme.com"])
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb/",
'eggs="bar"; Version="1"')
self.assertIn("/blah/rhubarb/", c._cookies["www.acme.com"])
# Netscape
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'spam="bar"')
self.assertIn("/", c._cookies["www.acme.com"])
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah", 'eggs="bar"')
self.assertIn("/", c._cookies["www.acme.com"])
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb", 'eggs="bar"')
self.assertIn("/blah", c._cookies["www.acme.com"])
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb/", 'eggs="bar"')
self.assertIn("/blah/rhubarb", c._cookies["www.acme.com"])
def test_default_path_with_query(self):
cj = CookieJar()
uri = "http://example.com/?spam/eggs"
value = 'eggs="bar"'
interact_netscape(cj, uri, value)
# Default path does not include query, so is "/", not "/?spam".
self.assertIn("/", cj._cookies["example.com"])
# Cookie is sent back to the same URI.
self.assertEqual(interact_netscape(cj, uri), value)
def test_escape_path(self):
cases = [
# quoted safe
("/foo%2f/bar", "/foo%2F/bar"),
("/foo%2F/bar", "/foo%2F/bar"),
# quoted %
("/foo%%/bar", "/foo%%/bar"),
# quoted unsafe
("/fo%19o/bar", "/fo%19o/bar"),
("/fo%7do/bar", "/fo%7Do/bar"),
# unquoted safe
("/foo/bar&", "/foo/bar&"),
("/foo//bar", "/foo//bar"),
("\176/foo/bar", "\176/foo/bar"),
# unquoted unsafe
("/foo\031/bar", "/foo%19/bar"),
("/\175foo/bar", "/%7Dfoo/bar"),
# unicode, latin-1 range
("/foo/bar\u00fc", "/foo/bar%C3%BC"), # UTF-8 encoded
# unicode
("/foo/bar\uabcd", "/foo/bar%EA%AF%8D"), # UTF-8 encoded
]
for arg, result in cases:
self.assertEqual(escape_path(arg), result)
def test_request_path(self):
# with parameters
req = urllib.request.Request(
"http://www.example.com/rheum/rhaponticum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
self.assertEqual(request_path(req),
"/rheum/rhaponticum;foo=bar;sing=song")
# without parameters
req = urllib.request.Request(
"http://www.example.com/rheum/rhaponticum?"
"apples=pears&spam=eggs#ni")
self.assertEqual(request_path(req), "/rheum/rhaponticum")
# missing final slash
req = urllib.request.Request("http://www.example.com")
self.assertEqual(request_path(req), "/")
def test_request_port(self):
req = urllib.request.Request("http://www.acme.com:1234/",
headers={"Host": "www.acme.com:4321"})
self.assertEqual(request_port(req), "1234")
req = urllib.request.Request("http://www.acme.com/",
headers={"Host": "www.acme.com:4321"})
self.assertEqual(request_port(req), DEFAULT_HTTP_PORT)
def test_request_host(self):
# this request is illegal (RFC2616, 14.2.3)
req = urllib.request.Request("http://1.1.1.1/",
headers={"Host": "www.acme.com:80"})
# libwww-perl wants this response, but that seems wrong (RFC 2616,
# section 5.2, point 1., and RFC 2965 section 1, paragraph 3)
#self.assertEqual(request_host(req), "www.acme.com")
self.assertEqual(request_host(req), "1.1.1.1")
req = urllib.request.Request("http://www.acme.com/",
headers={"Host": "irrelevant.com"})
self.assertEqual(request_host(req), "www.acme.com")
# port shouldn't be in request-host
req = urllib.request.Request("http://www.acme.com:2345/resource.html",
headers={"Host": "www.acme.com:5432"})
self.assertEqual(request_host(req), "www.acme.com")
def test_is_HDN(self):
self.assertTrue(is_HDN("foo.bar.com"))
self.assertTrue(is_HDN("1foo2.3bar4.5com"))
self.assertFalse(is_HDN("192.168.1.1"))
self.assertFalse(is_HDN(""))
self.assertFalse(is_HDN("."))
self.assertFalse(is_HDN(".foo.bar.com"))
self.assertFalse(is_HDN("..foo"))
self.assertFalse(is_HDN("foo."))
def test_reach(self):
self.assertEqual(reach("www.acme.com"), ".acme.com")
self.assertEqual(reach("acme.com"), "acme.com")
self.assertEqual(reach("acme.local"), ".local")
self.assertEqual(reach(".local"), ".local")
self.assertEqual(reach(".com"), ".com")
self.assertEqual(reach("."), ".")
self.assertEqual(reach(""), "")
self.assertEqual(reach("192.168.0.1"), "192.168.0.1")
def test_domain_match(self):
self.assertTrue(domain_match("192.168.1.1", "192.168.1.1"))
self.assertFalse(domain_match("192.168.1.1", ".168.1.1"))
self.assertTrue(domain_match("x.y.com", "x.Y.com"))
self.assertTrue(domain_match("x.y.com", ".Y.com"))
self.assertFalse(domain_match("x.y.com", "Y.com"))
self.assertTrue(domain_match("a.b.c.com", ".c.com"))
self.assertFalse(domain_match(".c.com", "a.b.c.com"))
self.assertTrue(domain_match("example.local", ".local"))
self.assertFalse(domain_match("blah.blah", ""))
self.assertFalse(domain_match("", ".rhubarb.rhubarb"))
self.assertTrue(domain_match("", ""))
self.assertTrue(user_domain_match("acme.com", "acme.com"))
self.assertFalse(user_domain_match("acme.com", ".acme.com"))
self.assertTrue(user_domain_match("rhubarb.acme.com", ".acme.com"))
self.assertTrue(user_domain_match("www.rhubarb.acme.com", ".acme.com"))
self.assertTrue(user_domain_match("x.y.com", "x.Y.com"))
self.assertTrue(user_domain_match("x.y.com", ".Y.com"))
self.assertFalse(user_domain_match("x.y.com", "Y.com"))
self.assertTrue(user_domain_match("y.com", "Y.com"))
self.assertFalse(user_domain_match(".y.com", "Y.com"))
self.assertTrue(user_domain_match(".y.com", ".Y.com"))
self.assertTrue(user_domain_match("x.y.com", ".com"))
self.assertFalse(user_domain_match("x.y.com", "com"))
self.assertFalse(user_domain_match("x.y.com", "m"))
self.assertFalse(user_domain_match("x.y.com", ".m"))
self.assertFalse(user_domain_match("x.y.com", ""))
self.assertFalse(user_domain_match("x.y.com", "."))
self.assertTrue(user_domain_match("192.168.1.1", "192.168.1.1"))
# not both HDNs, so must string-compare equal to match
self.assertFalse(user_domain_match("192.168.1.1", ".168.1.1"))
self.assertFalse(user_domain_match("192.168.1.1", "."))
# empty string is a special case
self.assertFalse(user_domain_match("192.168.1.1", ""))
def test_wrong_domain(self):
# Cookies whose effective request-host name does not domain-match the
# domain are rejected.
# XXX far from complete
c = CookieJar()
interact_2965(c, "http://www.nasty.com/",
'foo=bar; domain=friendly.org; Version="1"')
self.assertEqual(len(c), 0)
def test_strict_domain(self):
# Cookies whose domain is a country-code tld like .co.uk should
# not be set if CookiePolicy.strict_domain is true.
cp = DefaultCookiePolicy(strict_domain=True)
cj = CookieJar(policy=cp)
interact_netscape(cj, "http://example.co.uk/", 'no=problemo')
interact_netscape(cj, "http://example.co.uk/",
'okey=dokey; Domain=.example.co.uk')
self.assertEqual(len(cj), 2)
for pseudo_tld in [".co.uk", ".org.za", ".tx.us", ".name.us"]:
interact_netscape(cj, "http://example.%s/" % pseudo_tld,
'spam=eggs; Domain=.co.uk')
self.assertEqual(len(cj), 2)
def test_two_component_domain_ns(self):
# Netscape: .www.bar.com, www.bar.com, .bar.com, bar.com, no domain
# should all get accepted, as should .acme.com, acme.com and no domain
# for 2-component domains like acme.com.
c = CookieJar()
# two-component V0 domain is OK
interact_netscape(c, "http://foo.net/", 'ns=bar')
self.assertEqual(len(c), 1)
self.assertEqual(c._cookies["foo.net"]["/"]["ns"].value, "bar")
self.assertEqual(interact_netscape(c, "http://foo.net/"), "ns=bar")
# *will* be returned to any other domain (unlike RFC 2965)...
self.assertEqual(interact_netscape(c, "http://www.foo.net/"),
"ns=bar")
# ...unless requested otherwise
pol = DefaultCookiePolicy(
strict_ns_domain=DefaultCookiePolicy.DomainStrictNonDomain)
c.set_policy(pol)
self.assertEqual(interact_netscape(c, "http://www.foo.net/"), "")
# unlike RFC 2965, even explicit two-component domain is OK,
# because .foo.net matches foo.net
interact_netscape(c, "http://foo.net/foo/",
'spam1=eggs; domain=foo.net')
# even if starts with a dot -- in NS rules, .foo.net matches foo.net!
interact_netscape(c, "http://foo.net/foo/bar/",
'spam2=eggs; domain=.foo.net')
self.assertEqual(len(c), 3)
self.assertEqual(c._cookies[".foo.net"]["/foo"]["spam1"].value,
"eggs")
self.assertEqual(c._cookies[".foo.net"]["/foo/bar"]["spam2"].value,
"eggs")
self.assertEqual(interact_netscape(c, "http://foo.net/foo/bar/"),
"spam2=eggs; spam1=eggs; ns=bar")
# top-level domain is too general
interact_netscape(c, "http://foo.net/", 'nini="ni"; domain=.net')
self.assertEqual(len(c), 3)
## # Netscape protocol doesn't allow non-special top level domains (such
## # as co.uk) in the domain attribute unless there are at least three
## # dots in it.
# Oh yes it does! Real implementations don't check this, and real
# cookies (of course) rely on that behaviour.
interact_netscape(c, "http://foo.co.uk", 'nasty=trick; domain=.co.uk')
## self.assertEqual(len(c), 2)
self.assertEqual(len(c), 4)
def test_two_component_domain_rfc2965(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
# two-component V1 domain is OK
interact_2965(c, "http://foo.net/", 'foo=bar; Version="1"')
self.assertEqual(len(c), 1)
self.assertEqual(c._cookies["foo.net"]["/"]["foo"].value, "bar")
self.assertEqual(interact_2965(c, "http://foo.net/"),
"$Version=1; foo=bar")
# won't be returned to any other domain (because domain was implied)
self.assertEqual(interact_2965(c, "http://www.foo.net/"), "")
# unless domain is given explicitly, because then it must be
# rewritten to start with a dot: foo.net --> .foo.net, which does
# not domain-match foo.net
interact_2965(c, "http://foo.net/foo",
'spam=eggs; domain=foo.net; path=/foo; Version="1"')
self.assertEqual(len(c), 1)
self.assertEqual(interact_2965(c, "http://foo.net/foo"),
"$Version=1; foo=bar")
# explicit foo.net from three-component domain www.foo.net *does* get
# set, because .foo.net domain-matches .foo.net
interact_2965(c, "http://www.foo.net/foo/",
'spam=eggs; domain=foo.net; Version="1"')
self.assertEqual(c._cookies[".foo.net"]["/foo/"]["spam"].value,
"eggs")
self.assertEqual(len(c), 2)
self.assertEqual(interact_2965(c, "http://foo.net/foo/"),
"$Version=1; foo=bar")
self.assertEqual(interact_2965(c, "http://www.foo.net/foo/"),
'$Version=1; spam=eggs; $Domain="foo.net"')
# top-level domain is too general
interact_2965(c, "http://foo.net/",
'ni="ni"; domain=".net"; Version="1"')
self.assertEqual(len(c), 2)
# RFC 2965 doesn't require blocking this
interact_2965(c, "http://foo.co.uk/",
'nasty=trick; domain=.co.uk; Version="1"')
self.assertEqual(len(c), 3)
def test_domain_allow(self):
c = CookieJar(policy=DefaultCookiePolicy(
blocked_domains=["acme.com"],
allowed_domains=["www.acme.com"]))
req = urllib.request.Request("http://acme.com/")
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
res = FakeResponse(headers, "http://acme.com/")
c.extract_cookies(res, req)
self.assertEqual(len(c), 0)
req = urllib.request.Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
req = urllib.request.Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
# set a cookie with non-allowed domain...
req = urllib.request.Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
self.assertEqual(len(c), 2)
# ... and check is doesn't get returned
c.add_cookie_header(req)
self.assertFalse(req.has_header("Cookie"))
def test_domain_block(self):
pol = DefaultCookiePolicy(
rfc2965=True, blocked_domains=[".acme.com"])
c = CookieJar(policy=pol)
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
req = urllib.request.Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
self.assertEqual(len(c), 0)
p = pol.set_blocked_domains(["acme.com"])
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
c.clear()
req = urllib.request.Request("http://www.roadrunner.net/")
res = FakeResponse(headers, "http://www.roadrunner.net/")
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
req = urllib.request.Request("http://www.roadrunner.net/")
c.add_cookie_header(req)
self.assertTrue(req.has_header("Cookie"))
self.assertTrue(req.has_header("Cookie2"))
c.clear()
pol.set_blocked_domains([".acme.com"])
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
# set a cookie with blocked domain...
req = urllib.request.Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
self.assertEqual(len(c), 2)
# ... and check is doesn't get returned
c.add_cookie_header(req)
self.assertFalse(req.has_header("Cookie"))
c.clear()
pol.set_blocked_domains([])
req = urllib.request.Request("http://acme.com/")
res = FakeResponse(headers, "http://acme.com/")
cookies = c.make_cookies(res, req)
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
req = urllib.request.Request("http://acme.com/")
c.add_cookie_header(req)
self.assertTrue(req.has_header("Cookie"))
req = urllib.request.Request("http://badacme.com/")
c.add_cookie_header(req)
self.assertFalse(pol.return_ok(cookies[0], req))
self.assertFalse(req.has_header("Cookie"))
p = pol.set_blocked_domains(["acme.com"])
req = urllib.request.Request("http://acme.com/")
c.add_cookie_header(req)
self.assertFalse(req.has_header("Cookie"))
req = urllib.request.Request("http://badacme.com/")
c.add_cookie_header(req)
self.assertFalse(req.has_header("Cookie"))
def test_secure(self):
for ns in True, False:
for whitespace in " ", "":
c = CookieJar()
if ns:
pol = DefaultCookiePolicy(rfc2965=False)
int = interact_netscape
vs = ""
else:
pol = DefaultCookiePolicy(rfc2965=True)
int = interact_2965
vs = "; Version=1"
c.set_policy(pol)
url = "http://www.acme.com/"
int(c, url, "foo1=bar%s%s" % (vs, whitespace))
int(c, url, "foo2=bar%s; secure%s" % (vs, whitespace))
self.assertFalse(
c._cookies["www.acme.com"]["/"]["foo1"].secure,
"non-secure cookie registered secure")
self.assertTrue(
c._cookies["www.acme.com"]["/"]["foo2"].secure,
"secure cookie registered non-secure")
def test_secure_block(self):
pol = DefaultCookiePolicy()
c = CookieJar(policy=pol)
headers = ["Set-Cookie: session=narf; secure; path=/"]
req = urllib.request.Request("https://www.acme.com/")
res = FakeResponse(headers, "https://www.acme.com/")
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
req = urllib.request.Request("https://www.acme.com/")
c.add_cookie_header(req)
self.assertTrue(req.has_header("Cookie"))
req = urllib.request.Request("http://www.acme.com/")
c.add_cookie_header(req)
self.assertFalse(req.has_header("Cookie"))
# secure websocket protocol
req = urllib.request.Request("wss://www.acme.com/")
c.add_cookie_header(req)
self.assertTrue(req.has_header("Cookie"))
# non-secure websocket protocol
req = urllib.request.Request("ws://www.acme.com/")
c.add_cookie_header(req)
self.assertFalse(req.has_header("Cookie"))
def test_custom_secure_protocols(self):
pol = DefaultCookiePolicy(secure_protocols=["foos"])
c = CookieJar(policy=pol)
headers = ["Set-Cookie: session=narf; secure; path=/"]
req = urllib.request.Request("https://www.acme.com/")
res = FakeResponse(headers, "https://www.acme.com/")
c.extract_cookies(res, req)
self.assertEqual(len(c), 1)
# test https removed from secure protocol list
req = urllib.request.Request("https://www.acme.com/")
c.add_cookie_header(req)
self.assertFalse(req.has_header("Cookie"))
req = urllib.request.Request("http://www.acme.com/")
c.add_cookie_header(req)
self.assertFalse(req.has_header("Cookie"))
req = urllib.request.Request("foos://www.acme.com/")
c.add_cookie_header(req)
self.assertTrue(req.has_header("Cookie"))
req = urllib.request.Request("foo://www.acme.com/")
c.add_cookie_header(req)
self.assertFalse(req.has_header("Cookie"))
def test_quote_cookie_value(self):
c = CookieJar(policy=DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/", r'foo=\b"a"r; Version=1')
h = interact_2965(c, "http://www.acme.com/")
self.assertEqual(h, r'$Version=1; foo=\\b\"a\"r')
def test_missing_final_slash(self):
# Missing slash from request URL's abs_path should be assumed present.
url = "http://www.acme.com"
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, url, "foo=bar; Version=1")
req = urllib.request.Request(url)
self.assertEqual(len(c), 1)
c.add_cookie_header(req)
self.assertTrue(req.has_header("Cookie"))
def test_domain_mirror(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
self.assertNotIn("Domain", h,
"absent domain returned with domain present")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Domain=.bar.com')
h = interact_2965(c, url)
self.assertIn('$Domain=".bar.com"', h, "domain not returned")
c = CookieJar(pol)
url = "http://foo.bar.com/"
# note missing initial dot in Domain
interact_2965(c, url, 'spam=eggs; Version=1; Domain=bar.com')
h = interact_2965(c, url)
self.assertIn('$Domain="bar.com"', h, "domain not returned")
def test_path_mirror(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
self.assertNotIn("Path", h, "absent path returned with path present")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Path=/')
h = interact_2965(c, url)
self.assertIn('$Path="/"', h, "path not returned")
def test_port_mirror(self):
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
self.assertNotIn("Port", h, "absent port returned with port present")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1; Port")
h = interact_2965(c, url)
self.assertRegex(h, r"\$Port([^=]|$)",
"port with no value not returned with no value")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Port="80"')
h = interact_2965(c, url)
self.assertIn('$Port="80"', h,
"port with single value not returned with single value")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Port="80,8080"')
h = interact_2965(c, url)
self.assertIn('$Port="80,8080"', h,
"port with multiple values not returned with multiple "
"values")
def test_no_return_comment(self):
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
h = interact_2965(c, url)
self.assertNotIn("Comment", h,
"Comment or CommentURL cookie-attributes returned to server")
def test_Cookie_iterator(self):
cs = CookieJar(DefaultCookiePolicy(rfc2965=True))
# add some random cookies
interact_2965(cs, "http://blah.spam.org/", 'foo=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
interact_netscape(cs, "http://www.acme.com/blah/", "spam=bar; secure")
interact_2965(cs, "http://www.acme.com/blah/",
"foo=bar; secure; Version=1")
interact_2965(cs, "http://www.acme.com/blah/",
"foo=bar; path=/; Version=1")
interact_2965(cs, "http://www.sol.no",
r'bang=wallop; version=1; domain=".sol.no"; '
r'port="90,100, 80,8080"; '
r'max-age=100; Comment = "Just kidding! (\"|\\\\) "')
versions = [1, 1, 1, 0, 1]
names = ["bang", "foo", "foo", "spam", "foo"]
domains = [".sol.no", "blah.spam.org", "www.acme.com",
"www.acme.com", "www.acme.com"]
paths = ["/", "/", "/", "/blah", "/blah/"]
for i in range(4):
i = 0
for c in cs:
self.assertIsInstance(c, Cookie)
self.assertEqual(c.version, versions[i])
self.assertEqual(c.name, names[i])
self.assertEqual(c.domain, domains[i])
self.assertEqual(c.path, paths[i])
i = i + 1
def test_parse_ns_headers(self):
# missing domain value (invalid cookie)
self.assertEqual(
parse_ns_headers(["foo=bar; path=/; domain"]),
[[("foo", "bar"),
("path", "/"), ("domain", None), ("version", "0")]]
)
# invalid expires value
self.assertEqual(
parse_ns_headers(["foo=bar; expires=Foo Bar 12 33:22:11 2000"]),
[[("foo", "bar"), ("expires", None), ("version", "0")]]
)
# missing cookie value (valid cookie)
self.assertEqual(
parse_ns_headers(["foo"]),
[[("foo", None), ("version", "0")]]
)
# missing cookie values for parsed attributes
self.assertEqual(
parse_ns_headers(['foo=bar; expires']),
[[('foo', 'bar'), ('expires', None), ('version', '0')]])
self.assertEqual(
parse_ns_headers(['foo=bar; version']),
[[('foo', 'bar'), ('version', None)]])
# shouldn't add version if header is empty
self.assertEqual(parse_ns_headers([""]), [])
def test_bad_cookie_header(self):
def cookiejar_from_cookie_headers(headers):
c = CookieJar()
req = urllib.request.Request("http://www.example.com/")
r = FakeResponse(headers, "http://www.example.com/")
c.extract_cookies(r, req)
return c
future = time2netscape(time.time()+3600)
# none of these bad headers should cause an exception to be raised
for headers in [
["Set-Cookie: "], # actually, nothing wrong with this
["Set-Cookie2: "], # ditto
# missing domain value
["Set-Cookie2: a=foo; path=/; Version=1; domain"],
# bad max-age
["Set-Cookie: b=foo; max-age=oops"],
# bad version
["Set-Cookie: b=foo; version=spam"],
["Set-Cookie:; Expires=%s" % future],
]:
c = cookiejar_from_cookie_headers(headers)
# these bad cookies shouldn't be set
self.assertEqual(len(c), 0)
# cookie with invalid expires is treated as session cookie
headers = ["Set-Cookie: c=foo; expires=Foo Bar 12 33:22:11 2000"]
c = cookiejar_from_cookie_headers(headers)
cookie = c._cookies["www.example.com"]["/"]["c"]
self.assertIsNone(cookie.expires)
class LWPCookieTests(unittest.TestCase):
# Tests taken from libwww-perl, with a few modifications and additions.
def test_netscape_example_1(self):
#-------------------------------------------------------------------
# First we check that it works for the original example at
# http://www.netscape.com/newsref/std/cookie_spec.html
# Client requests a document, and receives in the response:
#
# Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE
#
# Client requests a document, and receives in the response:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: SHIPPING=FEDEX; path=/fo
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# When client requests a URL in path "/foo" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001; SHIPPING=FEDEX
#
# The last Cookie is buggy, because both specifications say that the
# most specific cookie must be sent first. SHIPPING=FEDEX is the
# most specific and should thus be first.
year_plus_one = time.localtime()[0] + 1
headers = []
c = CookieJar(DefaultCookiePolicy(rfc2965 = True))
#req = urllib.request.Request("http://1.1.1.1/",
# headers={"Host": "www.acme.com:80"})
req = urllib.request.Request("http://www.acme.com:80/",
headers={"Host": "www.acme.com:80"})
headers.append(
"Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/ ; "
"expires=Wednesday, 09-Nov-%d 23:12:40 GMT" % year_plus_one)
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = urllib.request.Request("http://www.acme.com/")
c.add_cookie_header(req)
self.assertEqual(req.get_header("Cookie"), "CUSTOMER=WILE_E_COYOTE")
self.assertEqual(req.get_header("Cookie2"), '$Version="1"')
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = urllib.request.Request("http://www.acme.com/foo/bar")
c.add_cookie_header(req)
h = req.get_header("Cookie")
self.assertIn("PART_NUMBER=ROCKET_LAUNCHER_0001", h)
self.assertIn("CUSTOMER=WILE_E_COYOTE", h)
headers.append('Set-Cookie: SHIPPING=FEDEX; path=/foo')
res = FakeResponse(headers, "http://www.acme.com")
c.extract_cookies(res, req)
req = urllib.request.Request("http://www.acme.com/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
self.assertIn("PART_NUMBER=ROCKET_LAUNCHER_0001", h)
self.assertIn("CUSTOMER=WILE_E_COYOTE", h)
self.assertNotIn("SHIPPING=FEDEX", h)
req = urllib.request.Request("http://www.acme.com/foo/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
self.assertIn("PART_NUMBER=ROCKET_LAUNCHER_0001", h)
self.assertIn("CUSTOMER=WILE_E_COYOTE", h)
self.assertTrue(h.startswith("SHIPPING=FEDEX;"))
def test_netscape_example_2(self):
# Second Example transaction sequence:
#
# Assume all mappings from above have been cleared.
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo
#
# When client requests a URL in path "/ammo" on this server, it sends:
#
# Cookie: PART_NUMBER=RIDING_ROCKET_0023; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# NOTE: There are two name/value pairs named "PART_NUMBER" due to
# the inheritance of the "/" mapping in addition to the "/ammo" mapping.
c = CookieJar()
headers = []
req = urllib.request.Request("http://www.acme.com/")
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = urllib.request.Request("http://www.acme.com/")
c.add_cookie_header(req)
self.assertEqual(req.get_header("Cookie"),
"PART_NUMBER=ROCKET_LAUNCHER_0001")
headers.append(
"Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = urllib.request.Request("http://www.acme.com/ammo")
c.add_cookie_header(req)
self.assertRegex(req.get_header("Cookie"),
r"PART_NUMBER=RIDING_ROCKET_0023;\s*"
"PART_NUMBER=ROCKET_LAUNCHER_0001")
def test_ietf_example_1(self):
#-------------------------------------------------------------------
# Then we test with the examples from draft-ietf-http-state-man-mec-03.txt
#
# 5. EXAMPLES
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
#
# 5.1 Example 1
#
# Most detail of request and response headers has been omitted. Assume
# the user agent has no stored cookies.
#
# 1. User Agent -> Server
#
# POST /acme/login HTTP/1.1
# [form data]
#
# User identifies self via a form.
#
# 2. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"
#
# Cookie reflects user's identity.
cookie = interact_2965(
c, 'http://www.acme.com/acme/login',
'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
self.assertFalse(cookie)
#
# 3. User Agent -> Server
#
# POST /acme/pickitem HTTP/1.1
# Cookie: $Version="1"; Customer="WILE_E_COYOTE"; $Path="/acme"
# [form data]
#
# User selects an item for ``shopping basket.''
#
# 4. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# Shopping basket contains an item.
cookie = interact_2965(c, 'http://www.acme.com/acme/pickitem',
'Part_Number="Rocket_Launcher_0001"; '
'Version="1"; Path="/acme"');
self.assertRegex(cookie,
r'^\$Version="?1"?; Customer="?WILE_E_COYOTE"?; \$Path="/acme"$')
#
# 5. User Agent -> Server
#
# POST /acme/shipping HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
# [form data]
#
# User selects shipping method from form.
#
# 6. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Shipping="FedEx"; Version="1"; Path="/acme"
#
# New cookie reflects shipping method.
cookie = interact_2965(c, "http://www.acme.com/acme/shipping",
'Shipping="FedEx"; Version="1"; Path="/acme"')
self.assertRegex(cookie, r'^\$Version="?1"?;')
self.assertRegex(cookie, r'Part_Number="?Rocket_Launcher_0001"?;'
r'\s*\$Path="\/acme"')
self.assertRegex(cookie, r'Customer="?WILE_E_COYOTE"?;'
r'\s*\$Path="\/acme"')
#
# 7. User Agent -> Server
#
# POST /acme/process HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme";
# Shipping="FedEx"; $Path="/acme"
# [form data]
#
# User chooses to process order.
#
# 8. Server -> User Agent
#
# HTTP/1.1 200 OK
#
# Transaction is complete.
cookie = interact_2965(c, "http://www.acme.com/acme/process")
self.assertRegex(cookie, r'Shipping="?FedEx"?;\s*\$Path="\/acme"')
self.assertIn("WILE_E_COYOTE", cookie)
#
# The user agent makes a series of requests on the origin server, after
# each of which it receives a new cookie. All the cookies have the same
# Path attribute and (default) domain. Because the request URLs all have
# /acme as a prefix, and that matches the Path attribute, each request
# contains all the cookies received so far.
def test_ietf_example_2(self):
# 5.2 Example 2
#
# This example illustrates the effect of the Path attribute. All detail
# of request and response headers has been omitted. Assume the user agent
# has no stored cookies.
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
# Imagine the user agent has received, in response to earlier requests,
# the response headers
#
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# and
#
# Set-Cookie2: Part_Number="Riding_Rocket_0023"; Version="1";
# Path="/acme/ammo"
interact_2965(
c, "http://www.acme.com/acme/ammo/specific",
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"',
'Part_Number="Riding_Rocket_0023"; Version="1"; Path="/acme/ammo"')
# A subsequent request by the user agent to the (same) server for URLs of
# the form /acme/ammo/... would include the following request header:
#
# Cookie: $Version="1";
# Part_Number="Riding_Rocket_0023"; $Path="/acme/ammo";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
#
# Note that the NAME=VALUE pair for the cookie with the more specific Path
# attribute, /acme/ammo, comes before the one with the less specific Path
# attribute, /acme. Further note that the same cookie name appears more
# than once.
cookie = interact_2965(c, "http://www.acme.com/acme/ammo/...")
self.assertRegex(cookie, r"Riding_Rocket_0023.*Rocket_Launcher_0001")
# A subsequent request by the user agent to the (same) server for a URL of
# the form /acme/parts/ would include the following request header:
#
# Cookie: $Version="1"; Part_Number="Rocket_Launcher_0001"; $Path="/acme"
#
# Here, the second cookie's Path attribute /acme/ammo is not a prefix of
# the request URL, /acme/parts/, so the cookie does not get forwarded to
# the server.
cookie = interact_2965(c, "http://www.acme.com/acme/parts/")
self.assertIn("Rocket_Launcher_0001", cookie)
self.assertNotIn("Riding_Rocket_0023", cookie)
def test_rejection(self):
# Test rejection of Set-Cookie2 responses based on domain, path, port.
pol = DefaultCookiePolicy(rfc2965=True)
c = LWPCookieJar(policy=pol)
max_age = "max-age=3600"
# illegal domain (no embedded dots)
cookie = interact_2965(c, "http://www.acme.com",
'foo=bar; domain=".com"; version=1')
self.assertFalse(c)
# legal domain
cookie = interact_2965(c, "http://www.acme.com",
'ping=pong; domain="acme.com"; version=1')
self.assertEqual(len(c), 1)
# illegal domain (host prefix "www.a" contains a dot)
cookie = interact_2965(c, "http://www.a.acme.com",
'whiz=bang; domain="acme.com"; version=1')
self.assertEqual(len(c), 1)
# legal domain
cookie = interact_2965(c, "http://www.a.acme.com",
'wow=flutter; domain=".a.acme.com"; version=1')
self.assertEqual(len(c), 2)
# can't partially match an IP-address
cookie = interact_2965(c, "http://125.125.125.125",
'zzzz=ping; domain="125.125.125"; version=1')
self.assertEqual(len(c), 2)
# illegal path (must be prefix of request path)
cookie = interact_2965(c, "http://www.sol.no",
'blah=rhubarb; domain=".sol.no"; path="/foo"; '
'version=1')
self.assertEqual(len(c), 2)
# legal path
cookie = interact_2965(c, "http://www.sol.no/foo/bar",
'bing=bong; domain=".sol.no"; path="/foo"; '
'version=1')
self.assertEqual(len(c), 3)
# illegal port (request-port not in list)
cookie = interact_2965(c, "http://www.sol.no",
'whiz=ffft; domain=".sol.no"; port="90,100"; '
'version=1')
self.assertEqual(len(c), 3)
# legal port
cookie = interact_2965(
c, "http://www.sol.no",
r'bang=wallop; version=1; domain=".sol.no"; '
r'port="90,100, 80,8080"; '
r'max-age=100; Comment = "Just kidding! (\"|\\\\) "')
self.assertEqual(len(c), 4)
# port attribute without any value (current port)
cookie = interact_2965(c, "http://www.sol.no",
'foo9=bar; version=1; domain=".sol.no"; port; '
'max-age=100;')
self.assertEqual(len(c), 5)
# encoded path
# LWP has this test, but unescaping allowed path characters seems
# like a bad idea, so I think this should fail:
## cookie = interact_2965(c, "http://www.sol.no/foo/",
## r'foo8=bar; version=1; path="/%66oo"')
# but this is OK, because '<' is not an allowed HTTP URL path
# character:
cookie = interact_2965(c, "http://www.sol.no/<oo/",
r'foo8=bar; version=1; path="/%3coo"')
self.assertEqual(len(c), 6)
# save and restore
filename = test.support.TESTFN
try:
c.save(filename, ignore_discard=True)
old = repr(c)
c = LWPCookieJar(policy=pol)
c.load(filename, ignore_discard=True)
finally:
try: os.unlink(filename)
except OSError: pass
self.assertEqual(old, repr(c))
def test_url_encoding(self):
# Try some URL encodings of the PATHs.
# (the behaviour here has changed from libwww-perl)
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/foo%2f%25/"
"%3c%3c%0Anew%C3%A5/%C3%A5",
"foo = bar; version = 1")
cookie = interact_2965(
c, "http://www.acme.com/foo%2f%25/<<%0anew\345/\346\370\345",
'bar=baz; path="/foo/"; version=1');
version_re = re.compile(r'^\$version=\"?1\"?', re.I)
self.assertIn("foo=bar", cookie)
self.assertRegex(cookie, version_re)
cookie = interact_2965(
c, "http://www.acme.com/foo/%25/<<%0anew\345/\346\370\345")
self.assertFalse(cookie)
# unicode URL doesn't raise exception
cookie = interact_2965(c, "http://www.acme.com/\xfc")
def test_mozilla(self):
# Save / load Mozilla/Netscape cookie file format.
year_plus_one = time.localtime()[0] + 1
filename = test.support.TESTFN
c = MozillaCookieJar(filename,
policy=DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/",
"foo1=bar; max-age=100; Version=1")
interact_2965(c, "http://www.acme.com/",
'foo2=bar; port="80"; max-age=100; Discard; Version=1')
interact_2965(c, "http://www.acme.com/", "foo3=bar; secure; Version=1")
expires = "expires=09-Nov-%d 23:12:40 GMT" % (year_plus_one,)
interact_netscape(c, "http://www.foo.com/",
"fooa=bar; %s" % expires)
interact_netscape(c, "http://www.foo.com/",
"foob=bar; Domain=.foo.com; %s" % expires)
interact_netscape(c, "http://www.foo.com/",
"fooc=bar; Domain=www.foo.com; %s" % expires)
def save_and_restore(cj, ignore_discard):
try:
cj.save(ignore_discard=ignore_discard)
new_c = MozillaCookieJar(filename,
DefaultCookiePolicy(rfc2965=True))
new_c.load(ignore_discard=ignore_discard)
finally:
try: os.unlink(filename)
except OSError: pass
return new_c
new_c = save_and_restore(c, True)
self.assertEqual(len(new_c), 6) # none discarded
self.assertIn("name='foo1', value='bar'", repr(new_c))
new_c = save_and_restore(c, False)
self.assertEqual(len(new_c), 4) # 2 of them discarded on save
self.assertIn("name='foo1', value='bar'", repr(new_c))
def test_netscape_misc(self):
# Some additional Netscape cookies tests.
c = CookieJar()
headers = []
req = urllib.request.Request("http://foo.bar.acme.com/foo")
# Netscape allows a host part that contains dots
headers.append("Set-Cookie: Customer=WILE_E_COYOTE; domain=.acme.com")
res = FakeResponse(headers, "http://www.acme.com/foo")
c.extract_cookies(res, req)
# and that the domain is the same as the host without adding a leading
# dot to the domain. Should not quote even if strange chars are used
# in the cookie value.
headers.append("Set-Cookie: PART_NUMBER=3,4; domain=foo.bar.acme.com")
res = FakeResponse(headers, "http://www.acme.com/foo")
c.extract_cookies(res, req)
req = urllib.request.Request("http://foo.bar.acme.com/foo")
c.add_cookie_header(req)
self.assertIn("PART_NUMBER=3,4", req.get_header("Cookie"))
self.assertIn("Customer=WILE_E_COYOTE",req.get_header("Cookie"))
def test_intranet_domains_2965(self):
# Test handling of local intranet hostnames without a dot.
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://example/",
"foo1=bar; PORT; Discard; Version=1;")
cookie = interact_2965(c, "http://example/",
'foo2=bar; domain=".local"; Version=1')
self.assertIn("foo1=bar", cookie)
interact_2965(c, "http://example/", 'foo3=bar; Version=1')
cookie = interact_2965(c, "http://example/")
self.assertIn("foo2=bar", cookie)
self.assertEqual(len(c), 3)
def test_intranet_domains_ns(self):
c = CookieJar(DefaultCookiePolicy(rfc2965 = False))
interact_netscape(c, "http://example/", "foo1=bar")
cookie = interact_netscape(c, "http://example/",
'foo2=bar; domain=.local')
self.assertEqual(len(c), 2)
self.assertIn("foo1=bar", cookie)
cookie = interact_netscape(c, "http://example/")
self.assertIn("foo2=bar", cookie)
self.assertEqual(len(c), 2)
def test_empty_path(self):
# Test for empty path
# Broken web-server ORION/1.3.38 returns to the client response like
#
# Set-Cookie: JSESSIONID=ABCDERANDOM123; Path=
#
# ie. with Path set to nothing.
# In this case, extract_cookies() must set cookie to / (root)
c = CookieJar(DefaultCookiePolicy(rfc2965 = True))
headers = []
req = urllib.request.Request("http://www.ants.com/")
headers.append("Set-Cookie: JSESSIONID=ABCDERANDOM123; Path=")
res = FakeResponse(headers, "http://www.ants.com/")
c.extract_cookies(res, req)
req = urllib.request.Request("http://www.ants.com/")
c.add_cookie_header(req)
self.assertEqual(req.get_header("Cookie"),
"JSESSIONID=ABCDERANDOM123")
self.assertEqual(req.get_header("Cookie2"), '$Version="1"')
# missing path in the request URI
req = urllib.request.Request("http://www.ants.com:8080")
c.add_cookie_header(req)
self.assertEqual(req.get_header("Cookie"),
"JSESSIONID=ABCDERANDOM123")
self.assertEqual(req.get_header("Cookie2"), '$Version="1"')
def test_session_cookies(self):
year_plus_one = time.localtime()[0] + 1
# Check session cookies are deleted properly by
# CookieJar.clear_session_cookies method
req = urllib.request.Request('http://www.perlmeister.com/scripts')
headers = []
headers.append("Set-Cookie: s1=session;Path=/scripts")
headers.append("Set-Cookie: p1=perm; Domain=.perlmeister.com;"
"Path=/;expires=Fri, 02-Feb-%d 23:24:20 GMT" %
year_plus_one)
headers.append("Set-Cookie: p2=perm;Path=/;expires=Fri, "
"02-Feb-%d 23:24:20 GMT" % year_plus_one)
headers.append("Set-Cookie: s2=session;Path=/scripts;"
"Domain=.perlmeister.com")
headers.append('Set-Cookie2: s3=session;Version=1;Discard;Path="/"')
res = FakeResponse(headers, 'http://www.perlmeister.com/scripts')
c = CookieJar()
c.extract_cookies(res, req)
# How many session/permanent cookies do we have?
counter = {"session_after": 0,
"perm_after": 0,
"session_before": 0,
"perm_before": 0}
for cookie in c:
key = "%s_before" % cookie.value
counter[key] = counter[key] + 1
c.clear_session_cookies()
# How many now?
for cookie in c:
key = "%s_after" % cookie.value
counter[key] = counter[key] + 1
# a permanent cookie got lost accidentally
self.assertEqual(counter["perm_after"], counter["perm_before"])
# a session cookie hasn't been cleared
self.assertEqual(counter["session_after"], 0)
# we didn't have session cookies in the first place
self.assertNotEqual(counter["session_before"], 0)
def test_main(verbose=None):
test.support.run_unittest(
DateTimeTests,
HeaderTests,
CookieTests,
FileCookieJarTests,
LWPCookieTests,
)
if __name__ == "__main__":
test_main(verbose=True)
| 41.440723 | 101 | 0.561411 |
acf3160a0fe38d1b86295ca64f506482b154f839 | 146,657 | py | Python | colour/colorimetry/tests/tests_blackbody.py | canavandl/colour | a453cd37b6135a9092d5ea5b2aafb8d19134bdff | [
"BSD-3-Clause"
] | 1 | 2019-06-27T11:32:48.000Z | 2019-06-27T11:32:48.000Z | colour/colorimetry/tests/tests_blackbody.py | canavandl/colour | a453cd37b6135a9092d5ea5b2aafb8d19134bdff | [
"BSD-3-Clause"
] | null | null | null | colour/colorimetry/tests/tests_blackbody.py | canavandl/colour | a453cd37b6135a9092d5ea5b2aafb8d19134bdff | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.colorimetry.blackbody` module.
"""
from __future__ import division, unicode_literals
import numpy as np
import sys
if sys.version_info[:2] <= (2, 6):
import unittest2 as unittest
else:
import unittest
from colour.colorimetry import (
SpectralShape,
planck_law,
blackbody_spd)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['PLANCK_LAW_DATA',
'BLACKBODY_SPD_DATA',
'TestPlanckLaw',
'TestBlackbodySpd']
PLANCK_LAW_DATA = {
1667: {
10: 0.0,
20: 1.4105596687642286e-165,
30: 5.530067904248269e-104,
40: 2.2643024143383377e-73,
50: 4.097537659366942e-55,
60: 5.144886317514676e-43,
70: 2.002026097230864e-34,
80: 5.071435708137485e-28,
90: 4.5239512888229996e-23,
100: 3.9052618037311606e-19,
110: 6.199380452902635e-16,
120: 2.774108564921223e-13,
130: 4.700577339170552e-11,
140: 3.7222321408108333e-09,
150: 1.606767053049753e-07,
160: 4.242815847371177e-06,
170: 7.483554302967325e-05,
180: 0.0009439877838579169,
190: 0.008986537508368567,
200: 0.06739672052016699,
210: 0.4122636644142145,
220: 2.115953500760159,
230: 9.32773792033636,
240: 36.00993182785636,
250: 123.74179663748401,
260: 383.7345544530101,
270: 1086.5329211269789,
280: 2837.2351129434164,
290: 6891.670775622278,
300: 15687.798375507615,
310: 33683.44844274908,
320: 68602.63896731117,
330: 133194.33701036545,
340: 247596.97900992312,
350: 442378.11199124635,
360: 762281.907435713,
370: 1270670.6183551583,
380: 2054593.363812708,
390: 3230361.862517226,
400: 4949462.9886321705,
410: 7404596.883354958,
420: 10835600.410377722,
430: 15535001.25153879,
440: 21852948.726484377,
450: 30201282.954720583,
460: 41056532.58435318,
470: 54961670.46908595,
480: 72526503.35111238,
490: 94426622.57829192,
500: 121400895.01296306,
510: 154247523.7468478,
520: 193818754.64929816,
530: 241014345.31522235,
540: 296773946.385242,
550: 362068570.77571,
560: 437891343.8688596,
570: 525247737.38369095,
580: 625145492.0206116,
590: 738584429.837095,
600: 866546347.6172808,
610: 1009985168.2795815,
620: 1169817509.6764226,
630: 1346913810.0005624,
640: 1542090127.3731594,
650: 1756100708.9020905,
660: 1989631402.3014297,
670: 2243293961.6693907,
680: 2517621278.723494,
690: 2813063552.061537,
700: 3129985390.118351,
710: 3468663828.588369,
720: 3829287230.2622943,
730: 4211955024.492659,
740: 4616678234.80892,
750: 5043380736.452318,
760: 5491901180.664997,
770: 5961995519.29445,
780: 6453340061.49302,
790: 6965534993.828248,
800: 7498108295.795781,
810: 8050519984.367113,
820: 8622166623.644691,
830: 9212386038.779451,
840: 9820462176.886133,
850: 10445630061.640814,
860: 11087080792.442425,
870: 11743966543.367159,
880: 12415405521.548801,
890: 13100486849.00554,
900: 13798275336.239716,
910: 14507816120.112028,
920: 15228139142.490662,
930: 15958263449.970768,
940: 16697201298.524448,
950: 17443962050.26111,
960: 18197555852.54279,
970: 18956997092.503765,
980: 19721307622.57071,
990: 20489519754.869236,
1000: 21260679024.44757,
1010: 22033846723.05022,
1020: 22808102206.753956,
1030: 23582544982.139328,
1040: 24356296576.83462,
1050: 25128502201.242085,
1060: 25898332209.061493,
1070: 26664983364.869366,
1080: 27427679927.513756,
1090: 28185674558.457287,
1100: 28938249064.45347,
1110: 29684714984.095753,
1120: 30424414027.83521,
1130: 31156718381.043312,
1140: 31881030879.605392,
1150: 32596785067.378365,
1160: 33303445144.645103,
1170: 34000505816.452465,
1180: 34687492049.439545,
1190: 35363958745.45504,
1200: 36029490339.92939,
1210: 36683700332.62206,
1220: 37326230758.00247,
1230: 37956751602.155235,
1240: 38574960172.731186,
1250: 39180580428.090706,
1260: 39773362271.41721,
1270: 40353080815.21521,
1280: 40919535621.24541,
1290: 41472549920.603485,
1300: 42011969818.30645,
1310: 42537663486.42292,
1320: 43049520349.46779,
1330: 43547450265.47595,
1340: 44031382705.880585,
1350: 44501265937.04368,
1360: 44957066206.02337,
1370: 45398766932.91232,
1380: 45826367911.84631,
1390: 46239884522.55986,
1400: 46639346954.15597,
1410: 47024799442.562164,
1420: 47396299522.962166,
1430: 47753917298.31928,
1440: 48097734724.95365,
1450: 48427844915.98214,
1460: 48744351463.29889,
1470: 49047367778.644356,
1480: 49337016454.19812,
1490: 49613428643.021576,
1500: 49876743459.58188,
1510: 50127107400.49855,
1520: 50364673785.572845,
1530: 50589602219.0885,
1540: 50802058071.30568,
1550: 51002211980.00954,
1560: 51190239371.9247,
1570: 51366320003.756256,
1580: 51530637522.579056,
1590: 51683379045.25827,
1600: 51824734756.5533,
1610: 51954897525.528915,
1620: 52074062539.874115,
1630: 52182426957.7086,
1640: 52280189576.440544,
1650: 52367550518.226364,
1660: 52444710931.57021,
1670: 52511872708.59633,
1680: 52569238217.518684,
1690: 52617010049.82972,
1700: 52655390781.72925,
1710: 52684582749.31279,
1720: 52704787837.04245,
1730: 52716207279.024796,
1740: 52719041472.62516,
1750: 52713489803.95352,
1760: 52699750484.76343,
1770: 52678020400.31157,
1780: 52648494967.73607,
1790: 52611368004.517494,
1800: 52566831606.59786,
1810: 52515076035.7416,
1820: 52456289615.732574,
1830: 52390658637.011696,
1840: 52318367269.371056,
1850: 52239597482.32978,
1860: 52154528972.82877,
1870: 52063339099.89277,
1880: 51966202825.917854,
1890: 51863292664.25549,
1900: 51754778632.772896,
1910: 51640828213.0829,
1920: 51521606315.14482,
1930: 51397275246.95105,
1940: 51267994689.0221,
1950: 51133921673.44526,
1960: 50995210567.20106,
1970: 50852013059.53231,
1980: 50704478153.1194,
1990: 50552752158.83679,
2000: 50396978693.872505,
2010: 50237298683.00405,
2020: 50073850362.83138,
2030: 49906769288.77661,
2040: 49736188344.66857,
2050: 49562237754.73885,
2060: 49385045097.86284,
2070: 49204735323.88732,
2080: 49021430771.89452,
2090: 48835251190.25805,
2100: 48646313758.35394,
2110: 48454733109.796394,
2120: 48260621357.074646,
2130: 48064088117.47288,
2140: 47865240540.16127,
2150: 47664183334.35247,
2160: 47461018798.42261,
2170: 47255846849.90132,
2180: 47048765056.24102,
2190: 46839868666.279495,
2200: 46629250642.315414,
2210: 46417001692.720345,
2220: 46203210305.01523,
2230: 45987962779.34356,
2240: 45771343262.27702,
2250: 45553433780.89394,
2260: 45334314277.07314,
2270: 45114062641.950584,
2280: 44892754750.48876,
2290: 44670464496.11164,
2300: 44447263825.36195,
2310: 44223222772.53919,
2320: 43998409494.28056,
2330: 43772890304.04888,
2340: 43546729706.49399,
2350: 43319990431.65728,
2360: 43092733468.99001,
2370: 42865018101.159515,
2380: 42636901937.61749,
2390: 42408440947.90929,
2400: 42179689494.701836,
2410: 41950700366.51184,
2420: 41721524810.11636,
2430: 41492212562.62977,
2440: 41262811883.23234,
2450: 41033369584.53776,
2460: 40803931063.58682,
2470: 40574540332.457306,
2480: 40345240048.48027,
2490: 40116071544.05379,
2500: 39887074856.04776,
2510: 39658288754.7923,
2520: 39429750772.644714,
2530: 39201497232.13037,
2540: 38973563273.65309,
2550: 38745982882.77274,
2560: 38518788917.04668,
2570: 38292013132.43421,
2580: 38065686209.26208,
2590: 37839837777.75121,
2600: 37614496443.10401,
2610: 37389689810.15314,
2620: 37165444507.57282,
2630: 36941786211.653656,
2640: 36718739669.643074,
2650: 36496328722.65366,
2660: 36274576328.14166,
2670: 36053504581.958916,
2680: 35833134739.98076,
2690: 35613487239.31411,
2700: 35394581719.08893,
2710: 35176437040.837036,
2720: 34959071308.462654,
2730: 34742501887.80864,
2740: 34526745425.8233,
2750: 34311817869.33225,
2760: 34097734483.420048,
2770: 33884509869.426662,
2780: 33672157982.563717,
2790: 33460692149.15582,
2800: 33250125083.511864,
2810: 33040468904.431908,
2820: 32831735151.354935,
2830: 32623934800.15261,
2840: 32417078278.57489,
2850: 32211175481.352673,
2860: 32006235784.963108,
2870: 31802268062.06314,
2880: 31599280695.59649,
2890: 31397281592.58018,
2900: 31196278197.575497,
2910: 30996277505.849323,
2920: 30797286076.231148,
2930: 30599310043.67133,
2940: 30402355131.50605,
2950: 30206426663.434055,
2960: 30011529575.21113,
2970: 29817668426.067226,
2980: 29624847409.851635,
2990: 29433070365.911415,
3000: 29242340789.708496,
3010: 29052661843.180267,
3020: 28864036364.848946,
3030: 28676466879.684868,
3040: 28489955608.728294,
3050: 28304504478.475124,
3060: 28120115130.031036,
3070: 27936788928.038925,
3080: 27754526969.384502,
3090: 27573330091.684414,
3100: 27393198881.56198,
3110: 27214133682.7145,
3120: 27036134603.776943,
3130: 26859201525.986412,
3140: 26683334110.65162,
3150: 26508531806.431408,
3160: 26334793856.42718,
3170: 26162119305.092438,
3180: 25990507004.964264,
3190: 25819955623.220276,
3200: 25650463648.065193,
3210: 25482029394.95056,
3220: 25314651012.631836,
3230: 25148326489.066196,
3240: 24983053657.15476,
3250: 24818830200.332966,
3260: 24655653658.012356,
3270: 24493521430.87742,
3280: 24332430786.040733,
3290: 24172378862.05984,
3300: 24013362673.818764,
3310: 23855379117.277863,
3320: 23698424974.094685,
3330: 23542496916.119118,
3340: 23387591509.765675,
3350: 23233705220.26598,
3360: 23080834415.804214,
3370: 22928975371.538395,
3380: 22778124273.510197,
3390: 22628277222.44607,
3400: 22479430237.45211,
3410: 22331579259.605606,
3420: 22184720155.445377,
3430: 22038848720.363705,
3440: 21893960681.902065,
3450: 21750051702.953144,
3460: 21607117384.871376,
3470: 21465153270.494354,
3480: 21324154847.077175,
3490: 21184117549.142086,
3500: 21045036761.245327,
3510: 20906907820.663525,
3520: 20769726020.00121,
3530: 20633486609.721977,
3540: 20498184800.604794,
3550: 20363815766.12757,
3560: 20230374644.77971,
3570: 20097856542.305557,
3580: 19966256533.880424,
3590: 19835569666.220882,
3600: 19705790959.631195,
3610: 19576915409.987217,
3620: 19448937990.65966,
3630: 19321853654.378178,
3640: 19195657335.03766,
3650: 19070343949.448544,
3660: 18945908399.032257,
3670: 18822345571.46349,
3680: 18699650342.26053,
3690: 18577817576.325027,
3700: 18456842129.43257,
3710: 18336718849.67542,
3720: 18217442578.85844,
3730: 18099008153.84974,
3740: 17981410407.88698,
3750: 17864644171.84068,
3760: 17748704275.435646,
3770: 17633585548.431595,
3780: 17519282821.76397,
3790: 17405790928.6463,
3800: 17293104705.63485,
3810: 17181218993.656662,
3820: 17070128639.002058,
3830: 16959828494.282494,
3840: 16850313419.35467,
3850: 16741578282.211908,
3860: 16633617959.843605,
3870: 16526427339.063656,
3880: 16420001317.308756,
3890: 16314334803.40733,
3900: 16209422718.31989,
3910: 16105259995.851728,
3920: 16001841583.338558,
3930: 15899162442.305977,
3940: 15797217549.103363,
3950: 15696001895.513037,
3960: 15595510489.335339,
3970: 15495738354.950218,
3980: 15396680533.85614,
3990: 15298332085.186806,
4000: 15200688086.206379,
4010: 15103743632.783978,
4020: 15007493839.847599,
4030: 14911933841.818628,
4040: 14817058793.02697,
4050: 14722863868.107773,
4060: 14629344262.379925,
4070: 14536495192.207136,
4080: 14444311895.341963,
4090: 14352789631.253242,
4100: 14261923681.43762,
4110: 14171709349.715284,
4120: 14082141962.510757,
4130: 13993216869.11899,
4140: 13904929441.95713,
4150: 13817275076.802544,
4160: 13730249193.017466,
4170: 13643847233.76052,
4180: 13558064666.185781,
4190: 13472896981.629543,
4200: 13388339695.78517,
4210: 13304388348.866556,
4220: 13221038505.760393,
4230: 13138285756.167614,
4240: 13056125714.734388,
4250: 12974554021.17303,
4260: 12893566340.372952,
4270: 12813158362.502214,
4280: 12733325803.099806,
4290: 12654064403.159,
4300: 12575369929.202068,
4310: 12497238173.346615,
4320: 12419664953.36383,
4330: 12342646112.728895,
4340: 12266177520.66381,
4350: 12190255072.17286,
4360: 12114874688.071022,
4370: 12040032315.005512,
4380: 11965723925.470646,
4390: 11891945517.816345,
4400: 11818693116.250448,
4410: 11745962770.834995,
4420: 11673750557.476772,
4430: 11602052577.912268,
4440: 11530864959.687214,
4450: 11460183856.130997,
4460: 11390005446.325933,
4470: 11320325935.071846,
4480: 11251141552.845882,
4490: 11182448555.757854,
4500: 11114243225.501272,
4510: 11046521869.300163,
4520: 10979280819.851904,
4530: 10912516435.26617,
4540: 10846225099.00016,
4550: 10780403219.790243,
4560: 10715047231.580147,
4570: 10650153593.445868,
4580: 10585718789.517366,
4590: 10521739328.89725,
4600: 10458211745.576466,
4610: 10395132598.347265,
4620: 10332498470.713398,
4630: 10270305970.797773,
4640: 10208551731.247625,
4650: 10147232409.137331,
4660: 10086344685.868944,
4670: 10025885267.070593,
4680: 9965850882.492807,
4690: 9906238285.902851,
4700: 9847044254.977211,
4710: 9788265591.192287,
4720: 9729899119.713387,
4730: 9671941689.282068,
4740: 9614390172.101978,
4750: 9557241463.723253,
4760: 9500492482.925478,
4770: 9444140171.599407,
4780: 9388181494.627407,
4790: 9332613439.762735,
4800: 9277433017.507757,
4810: 9222637260.991117,
4820: 9168223225.843908,
4830: 9114187990.075008,
4840: 9060528653.945503,
4850: 9007242339.842373,
4860: 8954326192.15143,
4870: 8901777377.129566,
4880: 8849593082.77641,
4890: 8797770518.705404,
4900: 8746306916.01434,
4910: 8695199527.155441,
4920: 8644445625.805023,
4930: 8594042506.732754,
4940: 8543987485.670583,
4950: 8494277899.181412,
4960: 8444911104.527425,
4970: 8395884479.538292,
4980: 8347195422.479163,
4990: 8298841351.918492,
5000: 8250819706.5958,
5010: 8203127945.289359,
5020: 8155763546.683814,
5030: 8108724009.237831,
5040: 8062006851.05176,
5050: 8015609609.735345,
5060: 7969529842.2755375,
5070: 7923765124.904409,
5080: 7878313052.967226,
5090: 7833171240.79065,
5100: 7788337321.551186,
5110: 7743808947.143786,
5120: 7699583788.050758,
5130: 7655659533.210873,
5140: 7612033889.888802,
5150: 7568704583.544822,
5160: 7525669357.7049055,
5170: 7482925973.831058,
5180: 7440472211.19211,
5190: 7398305866.734847,
5200: 7356424754.9555235,
5210: 7314826707.771835,
5220: 7273509574.395283,
5230: 7232471221.203981,
5240: 7191709531.615959,
5250: 7151222405.962903,
5260: 7111007761.3643875,
5270: 7071063531.602623,
5280: 7031387666.997709,
5290: 6991978134.283396,
5300: 6952832916.4833975,
5310: 6913950012.788244,
5320: 6875327438.43268,
5330: 6836963224.573657,
5340: 6798855418.168842,
5350: 6761002081.855781,
5360: 6723401293.8315935,
5370: 6686051147.733297,
5380: 6648949752.518734,
5390: 6612095232.348101,
5400: 6575485726.466108,
5410: 6539119389.084758,
5420: 6502994389.266767,
5430: 6467108910.809609,
5440: 6431461152.130217,
5450: 6396049326.150313,
5460: 6360871660.182423,
5470: 6325926395.816512,
5480: 6291211788.807308,
5490: 6256726108.96227,
5500: 6222467640.030232,
5510: 6188434679.590736,
5520: 6154625538.943997,
5530: 6121038543.001583,
5540: 6087672030.177756,
5550: 6054524352.281495,
5560: 6021593874.40921,
5570: 5988878974.838122,
5580: 5956378044.920356,
5590: 5924089488.977687,
5600: 5892011724.197018,
5610: 5860143180.526486,
5620: 5828482300.572332,
5630: 5797027539.496387,
5640: 5765777364.914302,
5650: 5734730256.794444,
5660: 5703884707.357479,
5670: 5673239220.976666,
5680: 5642792314.078818,
5690: 5612542515.045953,
5700: 5582488364.117655,
5710: 5552628413.294097,
5720: 5522961226.23977,
5730: 5493485378.187879,
5740: 5464199455.845434,
5750: 5435102057.299024,
5760: 5406191791.9212675,
5770: 5377467280.277957,
5780: 5348927154.035841,
5790: 5320570055.871142,
5800: 5292394639.378694,
5810: 5264399568.981788,
5820: 5236583519.842664,
5830: 5208945177.773695,
5840: 5181483239.14921,
5850: 5154196410.818002,
5860: 5127083410.016495,
5870: 5100142964.282542,
5880: 5073373811.369919,
5890: 5046774699.163447,
5900: 5020344385.594769,
5910: 4994081638.558781,
5920: 4967985235.8307,
5930: 4942053964.983788,
5940: 4916286623.307682,
5950: 4890682017.727416,
5960: 4865238964.723016,
5970: 4839956290.249767,
5980: 4814832829.659088,
5990: 4789867427.620043,
6000: 4765058938.041453,
6010: 4740406223.994655,
6020: 4715908157.63684,
6030: 4691563620.135029,
6040: 4667371501.59066,
6050: 4643330700.964742,
6060: 4619440126.003653,
6070: 4595698693.165505,
6080: 4572105327.547136,
6090: 4548658962.811638,
6100: 4525358541.116539,
6110: 4502203013.042508,
6120: 4479191337.522691,
6130: 4456322481.772575,
6140: 4433595421.220465,
6150: 4411009139.438509,
6160: 4388562628.07429,
6170: 4366254886.7829895,
6180: 4344084923.160104,
6190: 4322051752.674704,
6200: 4300154398.603272,
6210: 4278391891.9640684,
6220: 4256763271.4520264,
6230: 4235267583.374232,
6240: 4213903881.5858974,
6250: 4192671227.426893,
6260: 4171568689.658787,
6270: 4150595344.4024453,
6280: 4129750275.0761056,
6290: 4109032572.3340096,
6300: 4088441334.0055494,
6310: 4067975665.0348797,
6320: 4047634677.4210916,
6330: 4027417490.1588655,
6340: 4007323229.1796246,
6350: 3987351027.293179,
6360: 3967500024.1298876,
6370: 3947769366.083272,
6380: 3928158206.253152,
6390: 3908665704.389244,
6400: 3889291026.8352327,
6410: 3870033346.4733367,
6420: 3850891842.6693296,
6430: 3831865701.218036,
6440: 3812954114.2892914,
6450: 3794156280.37436,
6460: 3775471404.2328115,
6470: 3756898696.8398485,
6480: 3738437375.334096,
6490: 3720086662.965818,
6500: 3701845789.0455947,
6510: 3683713988.8934264,
6520: 3665690503.7882915,
6530: 3647774580.918111,
6540: 3629965473.3301706,
6550: 3612262439.8819456,
6560: 3594664745.192371,
6570: 3577171659.593507,
6580: 3559782459.082633,
6590: 3542496425.2747626,
6600: 3525312845.355553,
6610: 3508231012.034624,
6620: 3491250223.4992895,
6630: 3474369783.368674,
6640: 3457589000.648234,
6650: 3440907189.68468,
6660: 3424323670.121268,
6670: 3407837766.853492,
6680: 3391448809.985163,
6690: 3375156134.7848577,
6700: 3358959081.642748,
6710: 3342856996.02781,
6720: 3326849228.4453974,
6730: 3310935134.395184,
6740: 3295114074.32948,
6750: 3279385413.6118903,
6760: 3263748522.476351,
6770: 3248202775.986518,
6780: 3232747553.9954944,
6790: 3217382241.1059265,
6800: 3202106226.6304355,
6810: 3186918904.552398,
6820: 3171819673.487058,
6830: 3156807936.6429987,
6840: 3141883101.783921,
6850: 3127044581.1907773,
6860: 3112291791.6242247,
6870: 3097624154.287404,
6880: 3083041094.78905,
6890: 3068542043.106918,
6900: 3054126433.551525,
6910: 3039793704.7302175,
6920: 3025543299.51154,
6930: 3011374664.989925,
6940: 2997287252.4506803,
6950: 2983280517.3352904,
6960: 2969353919.207016,
6970: 2955506921.7167926,
6980: 2941738992.569427,
6990: 2928049603.490089,
7000: 2914438230.191099,
7010: 2900904352.338997,
7020: 2887447453.521899,
7030: 2874067021.2171516,
7040: 2860762546.7592473,
7050: 2847533525.3080435,
7060: 2834379455.817233,
7070: 2821299841.003111,
7080: 2808294187.313599,
7090: 2795362004.897555,
7100: 2782502807.574328,
7110: 2769716112.8036,
7120: 2757001441.6554785,
7130: 2744358318.780856,
7140: 2731786272.3820205,
7150: 2719284834.1835284,
7160: 2706853539.4033294,
7170: 2694491926.724141,
7180: 2682199538.265074,
7190: 2669975919.5535054,
7200: 2657820619.497195,
7210: 2645733190.3566437,
7220: 2633713187.717698,
7230: 2621760170.4643836,
7240: 2609873700.751985,
7250: 2598053343.980353,
7260: 2586298668.7674456,
7270: 2574609246.923101,
7280: 2562984653.423036,
7290: 2551424466.3830743,
7300: 2539928267.0335913,
7310: 2528495639.6941915,
7320: 2517126171.748596,
7330: 2505819453.6197567,
7340: 2494575078.7451744,
7350: 2483392643.552453,
7360: 2472271747.4350348,
7370: 2461211992.728177,
7380: 2450212984.6851163,
7390: 2439274331.4534483,
7400: 2428395644.0517135,
7410: 2417576536.3461795,
7420: 2406816625.0278277,
7430: 2396115529.5895433,
7440: 2385472872.3035,
7450: 2374888278.198735,
7460: 2364361375.038923,
7470: 2353891793.3003516,
7480: 2343479166.1500645,
7490: 2333123129.42422,
7500: 2322823321.6066175,
7510: 2312579383.8074145,
7520: 2302390959.742034,
7530: 2292257695.710249,
7540: 2282179240.5754433,
7550: 2272155245.744057,
7560: 2262185365.145211,
7570: 2252269255.210504,
7580: 2242406574.8539805,
7590: 2232596985.452279,
7600: 2222840150.824943,
7610: 2213135737.214912,
7620: 2203483413.2691684,
7630: 2193882850.01956,
7640: 2184333720.8637815,
7650: 2174835701.546527,
7660: 2165388470.140799,
7670: 2155991707.029373,
7680: 2146645094.8864355,
7690: 2137348318.6593654,
7700: 2128101065.5506806,
7710: 2118903025.0001376,
7720: 2109753888.6669805,
7730: 2100653350.4123495,
7740: 2091601106.2818317,
7750: 2082596854.4881697,
7760: 2073640295.39411,
7770: 2064731131.4954088,
7780: 2055869067.4039705,
7790: 2047053809.831138,
7800: 2038285067.5711288,
7810: 2029562551.484602,
7820: 2020885974.482379,
7830: 2012255051.5092869,
7840: 2003669499.5281606,
7850: 1995129037.5039594,
7860: 1986633386.3880334,
7870: 1978182269.1025212,
7880: 1969775410.524878,
7890: 1961412537.4725332,
7900: 1953093378.6876884,
7910: 1944817664.822232,
7920: 1936585128.4227922,
7930: 1928395503.91591,
7940: 1920248527.593343,
7950: 1912143937.5974925,
7960: 1904081473.9069514,
7970: 1896060878.3221788,
7980: 1888081894.451292,
7990: 1880144267.695984,
8000: 1872247745.2375557,
8010: 1864392076.0230708,
8020: 1856577010.7516174,
8030: 1848802301.8607001,
8040: 1841067703.5127382,
8050: 1833372971.5816817,
8060: 1825717863.6397338,
8070: 1818102138.9441948,
8080: 1810525558.4244092,
8090: 1802987884.6688309,
8100: 1795488881.912185,
8110: 1788028316.0227504,
8120: 1780605954.4897437,
8130: 1773221566.410806,
8140: 1765874922.4796042,
8150: 1758565794.9735253,
8160: 1751293957.7414834,
8170: 1744059186.1918213,
8180: 1736861257.2803211,
8190: 1729699949.498313,
8200: 1722575042.8608754,
8210: 1715486318.895146,
8220: 1708433560.6287253,
8230: 1701416552.5781753,
8240: 1694435080.73762,
8250: 1687488932.567433,
8260: 1680577896.9830265,
8270: 1673701764.3437316,
8280: 1666860326.4417727,
8290: 1660053376.4913275,
8300: 1653280709.1176896,
8310: 1646542120.3465087,
8320: 1639837407.593129,
8330: 1633166369.652017,
8340: 1626528806.6862679,
8350: 1619924520.2172098,
8360: 1613353313.1140862,
8370: 1606814989.5838344,
8380: 1600309355.160931,
8390: 1593836216.6973429,
8400: 1587395382.352544,
8410: 1580986661.5836267,
8420: 1574609865.1354856,
8430: 1568264805.0310926,
8440: 1561951294.5618398,
8450: 1555669148.277977,
8460: 1549418181.9791143,
8470: 1543198212.7048109,
8480: 1537009058.7252402,
8490: 1530850539.531933,
8500: 1524722475.8285925,
8510: 1518624689.5219884,
8520: 1512557003.7129245,
8530: 1506519242.6872847,
8540: 1500511231.9071462,
8550: 1494532798.001968,
8560: 1488583768.759856,
8570: 1482663973.1188912,
8580: 1476773241.1585383,
8590: 1470911404.091118,
8600: 1465078294.253352,
8610: 1459273745.0979764,
8620: 1453497591.1854227,
8630: 1447749668.1755724,
8640: 1442029812.819568,
8650: 1436337862.9517019,
8660: 1430673657.4813666,
8670: 1425037036.385069,
8680: 1419427840.6985118,
8690: 1413845912.5087411,
8700: 1408291094.9463544,
8710: 1402763232.1777725,
8720: 1397262169.3975794,
8730: 1391787752.820915,
8740: 1386339829.6759386,
8750: 1380918248.1963468,
8760: 1375522857.613959,
8770: 1370153508.1513536,
8780: 1364810051.014575,
8790: 1359492338.3858883,
8800: 1354200223.4166043,
8810: 1348933560.2199485,
8820: 1343692203.8640027,
8830: 1338476010.3646917,
8840: 1333284836.678834,
8850: 1328118540.6972442,
8860: 1322976981.2378917,
8870: 1317860018.0391185,
8880: 1312767511.7529042,
8890: 1307699323.9381938,
8900: 1302655317.05427,
8910: 1297635354.454185,
8920: 1292639300.3782473,
8930: 1287667019.9475496,
8940: 1282718379.1575615,
8950: 1277793244.8717644,
8960: 1272891484.8153434,
8970: 1268012967.5689256,
8980: 1263157562.5623684,
8990: 1258325140.0686018,
9000: 1253515571.1975148,
9010: 1248728727.8898933,
9020: 1243964482.9114063,
9030: 1239222709.8466387,
9040: 1234503283.093172,
9050: 1229806077.8557148,
9060: 1225130970.1402736,
9070: 1220477836.7483785,
9080: 1215846555.2713478,
9090: 1211237004.0846004,
9100: 1206649062.3420136,
9110: 1202082609.9703267,
9120: 1197537527.663587,
9130: 1193013696.87764,
9140: 1188510999.8246655,
9150: 1184029319.4677522,
9160: 1179568539.5155199,
9170: 1175128544.416784,
9180: 1170709219.3552573,
9190: 1166310450.2442973,
9200: 1161932123.7216964,
9210: 1157574127.144509,
9220: 1153236348.5839226,
9230: 1148918676.8201668,
9240: 1144621001.3374648,
9250: 1140343212.3190222,
9260: 1136085200.6420567,
9270: 1131846857.8728657,
9280: 1127628076.2619343,
9290: 1123428748.7390795,
9300: 1119248768.9086337,
9310: 1115088031.0446646,
9320: 1110946430.086236,
9330: 1106823861.6326993,
9340: 1102720221.93903,
9350: 1098635407.9111915,
9360: 1094569317.1015427,
9370: 1090521847.7042768,
9380: 1086492898.5508993,
9390: 1082482369.1057355,
9400: 1078490159.4614797,
9410: 1074516170.3347733,
9420: 1070560303.0618236,
9430: 1066622459.5940468,
9440: 1062702542.4937555,
9450: 1058800454.9298736,
9460: 1054916100.673683,
9470: 1051049384.0946108,
9480: 1047200210.1560388,
9490: 1043368484.4111552,
9500: 1039554112.9988303,
9510: 1035757002.6395295,
9520: 1031977060.6312561,
9530: 1028214194.845524,
9540: 1024468313.7233624,
9550: 1020739326.2713544,
9560: 1017027142.057698,
9570: 1013331671.2083074,
9580: 1009652824.402937,
9590: 1005990512.8713367,
9600: 1002344648.3894413,
9610: 998715143.27558,
9620: 995101910.3867251,
9630: 991504863.1147605,
9640: 987923915.3827869,
9650: 984358981.6414477,
9660: 980809976.8652887,
9670: 977276816.5491424,
9680: 973759416.7045414,
9690: 970257693.8561577,
9700: 966771565.0382711,
9710: 963300947.791263,
9720: 959845760.1581377,
9730: 956405920.6810682,
9740: 952981348.3979728,
9750: 949571962.8391118,
9760: 946177684.0237142,
9770: 942798432.45663,
9780: 939434129.1250037,
9790: 936084695.4949789,
9800: 932750053.5084231,
9810: 929430125.5796785,
9820: 926124834.592341,
9830: 922834103.8960564,
9840: 919557857.3033476,
9850: 916296019.0864621,
9860: 913048513.9742442,
9870: 909815267.1490294,
9880: 906596204.2435669,
9890: 903391251.3379576,
9900: 900200334.9566219,
9910: 897023382.0652865,
9920: 893860320.0679966,
9930: 890711076.8041475,
9940: 887575580.5455389,
9950: 884453759.9934568,
9960: 881345544.2757663,
9970: 878250862.9440376,
9980: 875169645.9706867,
9990: 872101823.7461381},
5000: {
10: 1.2686657620556471e-101,
20: 1.2147510901468522e-40,
30: 1.0784359438119513e-20,
40: 6.644806632888718e-11,
50: 3.8597687701104664e-05,
60: 0.22719968663713835,
70: 99.35995854036662,
80: 8687.701348124925,
90: 262344.8542223218,
100: 3790262.780185453,
110: 32197359.716756683,
120: 184348422.5644144,
130: 781511147.7798123,
140: 2622251176.8197403,
150: 7310813379.301067,
160: 17560673506.94481,
170: 37355495596.31305,
180: 71885904020.19241,
190: 127249248362.55402,
200: 209965815507.5334,
210: 326404472526.7038,
220: 482217548459.91327,
230: 681866434701.7426,
240: 928288522250.5859,
250: 1222724003873.0156,
260: 1564694926272.392,
270: 1952111619606.5203,
280: 2381473138872.0713,
290: 2848126857131.9497,
300: 3346555559183.1343,
310: 3870666158905.362,
320: 4414060841196.303,
330: 4970277890013.342,
340: 5532995066240.421,
350: 6096192864406.664,
360: 6654278270641.816,
370: 7202171873625.862,
380: 7735362522399.224,
390: 8249934375727.928,
400: 8742571332991.204,
410: 9210543636689.166,
420: 9651681021262.902,
430: 10064336251055.02,
440: 10447342313778.72,
450: 10799965964060.963,
460: 11121859775912.46,
470: 11413014381374.893,
480: 11673712153465.965,
490: 11904483236613.303,
500: 12106064534472.861,
510: 12279362028244.258,
520: 12425416611891.379,
530: 12545373487161.81,
540: 12640455054190.627,
550: 12711937156432.8,
560: 12761128485989.89,
570: 12789352922088.682,
580: 12797934557260.469,
590: 12788185159078.791,
600: 12761393817184.904,
610: 12718818533367.135,
620: 12661679524746.469,
630: 12591154025150.178,
640: 12508372386361.768,
650: 12414415298238.336,
660: 12310311964027.225,
670: 12197039084123.23,
680: 12075520517657.367,
690: 11946627506484.299,
700: 11811179360210.418,
710: 11669944513816.889,
720: 11523641881168.229,
730: 11372942438278.482,
740: 11218470979680.795,
750: 11060807999673.422,
760: 10900491657668.412,
770: 10738019793425.453,
780: 10573851963691.01,
790: 10408411476759.814,
800: 10242087405806.607,
810: 10075236565571.938,
820: 9908185440192.857,
830: 9741232052709.107,
840: 9574647769103.143,
850: 9408679031700.016,
860: 9243549018405.45,
870: 9079459225639.514,
880: 8916590973964.725,
890: 8755106836345.099,
900: 8595151989733.47,
910: 8436855491295.577,
920: 8280331481061.894,
930: 8125680313171.294,
940: 7972989618152.377,
950: 7822335298891.998,
960: 7673782463079.693,
970: 7527386295002.167,
980: 7383192869602.831,
990: 7241239911726.184,
1000: 7101557503441.967,
1010: 6964168742295.949,
1020: 6829090353267.201,
1030: 6696333257130.915,
1040: 6565903097833.766,
1050: 6437800731389.404,
1060: 6312022678696.728,
1070: 6188561544575.374,
1080: 6067406405203.46,
1090: 5948543166032.418,
1100: 5831954892145.251,
1110: 5717622112917.179,
1120: 5605523102733.317,
1130: 5495634139416.433,
1140: 5387929741919.919,
1150: 5282382888746.81,
1160: 5178965218465.419,
1170: 5077647213605.944,
1180: 4978398369140.238,
1190: 4881187346669.046,
1200: 4785982115367.064,
1210: 4692750080666.377,
1220: 4601458201592.762,
1230: 4512073097607.392,
1240: 4424561145747.846,
1250: 4338888568807.477,
1260: 4255021515240.621,
1270: 4172926131432.8413,
1280: 4092568626930.067,
1290: 4013915333178.263,
1300: 3936932756285.639,
1310: 3861587624282.4478,
1320: 3787846929318.995,
1330: 3715677965210.2603,
1340: 3645048360705.616,
1350: 3575926108834.145,
1360: 3508279592650.1562,
1370: 3442077607679.249,
1380: 3377289381342.781,
1390: 3313884589617.787,
1400: 3251833371169.8276,
1410: 3191106339178.3506,
1420: 3131674591057.2827,
1430: 3073509716258.0884,
1440: 3016583802328.0996,
1450: 2960869439383.516,
1460: 2906339723144.103,
1470: 2852968256665.0913,
1480: 2800729150891.1646,
1490: 2749597024147.482,
1500: 2699547000673.642,
1510: 2650554708297.928,
1520: 2602596275341.4136,
1530: 2555648326834.2305,
1540: 2509687980119.586,
1550: 2464692839914.93,
1560: 2420640992893.9395,
1570: 2377511001847.645,
1580: 2335281899478.197,
1590: 2293933181874.167,
1600: 2253444801712.1387,
1610: 2213797161225.487,
1620: 2174971104977.6362,
1630: 2136947912473.8599,
1640: 2099709290642.587,
1650: 2063237366214.4482,
1660: 2027514678024.622,
1670: 1992524169261.78,
1680: 1958249179684.6357,
1690: 1924673437825.1328,
1700: 1891781053195.4666,
1710: 1859556508514.3577,
1720: 1827984651966.524,
1730: 1797050689507.7334,
1740: 1766740177226.6138,
1750: 1737039013773.0713,
1760: 1707933432862.1335,
1770: 1679409995860.9387,
1780: 1651455584465.7104,
1790: 1624057393474.6475,
1800: 1597202923661.9087,
1810: 1570879974757.1235,
1820: 1545076638534.2153,
1830: 1519781292012.7205,
1840: 1494982590774.2278,
1850: 1470669462396.0896,
1860: 1446831100004.0623,
1870: 1423456955945.166,
1880: 1400536735581.6335,
1890: 1378060391206.5276,
1900: 1356018116081.24,
1910: 1334400338594.88,
1920: 1313197716545.2458,
1930: 1292401131540.889,
1940: 1272001683523.5566,
1950: 1251990685410.1516,
1960: 1232359657853.1377,
1970: 1213100324118.2393,
1980: 1194204605078.0886,
1990: 1175664614320.4429,
2000: 1157472653369.4233,
2010: 1139621207018.2,
2020: 1122102938771.4343,
2030: 1104910686395.7659,
2040: 1088037457576.5522,
2050: 1071476425679.037,
2060: 1055220925612.0895,
2070: 1039264449792.6287,
2080: 1023600644208.824,
2090: 1008223304580.1523,
2100: 993126372612.3845,
2110: 978303932345.5569,
2120: 963750206593.0055,
2130: 949459553469.5254,
2140: 935426463006.7385,
2150: 921645553853.7559,
2160: 908111570061.2391,
2170: 894819377946.984,
2180: 881763963041.1619,
2190: 868940427109.386,
2200: 856343985251.7738,
2210: 843969963076.2278,
2220: 831813793944.1587,
2230: 819871016286.9135,
2240: 808137270991.1938,
2250: 796608298851.7905,
2260: 785279938089.9662,
2270: 774148121935.8751,
2280: 763208876273.4172,
2290: 752458317345.967,
2300: 741892649521.4453,
2310: 731508163115.2296,
2320: 721301232269.4397,
2330: 711268312887.1467,
2340: 701405940620.1116,
2350: 691710728908.6665,
2360: 682179367072.3943,
2370: 672808618450.2952,
2380: 663595318589.142,
2390: 654536373478.7831,
2400: 645628757833.1561,
2410: 636869513415.8188,
2420: 628255747408.8237,
2430: 619784630823.8049,
2440: 611453396954.155,
2450: 603259339867.2135,
2460: 595199812935.4044,
2470: 587272227405.2936,
2480: 579474051003.5607,
2490: 571802806578.9021,
2500: 564256070778.9153,
2510: 556831472761.0271,
2520: 549526692936.5657,
2530: 542339461747.0902,
2540: 535267558472.11646,
2550: 528308810067.40466,
2560: 521461090032.9903,
2570: 514722317310.1672,
2580: 508090455206.6467,
2590: 501563510349.1434,
2600: 495139531662.6506,
2610: 488816609375.69855,
2620: 482592874050.8956,
2630: 476466495640.0836,
2640: 470435682563.44073,
2650: 464498680811.9054,
2660: 458653773072.28595,
2670: 452899277874.46295,
2680: 447233548760.0843,
2690: 441654973472.18823,
2700: 436161973165.1944,
2710: 430753001634.71954,
2720: 425426544566.6931,
2730: 420181118805.2557,
2740: 415015271638.9474,
2750: 409927580104.6956,
2760: 404916650309.13214,
2770: 399981116766.7811,
2780: 395119641754.66956,
2790: 390330914682.9283,
2800: 385613651480.9595,
2810: 380966593998.75977,
2820: 376388509422.9995,
2830: 371878189707.4722,
2840: 367434451017.5299,
2850: 363056133188.14465,
2860: 358742099195.2324,
2870: 354491234639.8949,
2880: 350302447245.23926,
2890: 346174666365.4502,
2900: 342106842506.7883,
2910: 338097946860.2099,
2920: 334146970845.30255,
2930: 330252925665.24115,
2940: 326414841872.4821,
2950: 322631768944.9105,
2960: 318902774872.1773,
2970: 315226945751.9562,
2980: 311603385395.86914,
2990: 308031214944.82666,
3000: 304509572493.5437,
3010: 301037612723.99365,
3020: 297614506547.5716,
3030: 294239440755.743,
3040: 290911617678.96014,
3050: 287630254853.6374,
3060: 284394584696.9755,
3070: 281203854189.4401,
3080: 278057324564.6945,
3090: 274954271006.801,
3100: 271893982354.50558,
3110: 268875760812.4266,
3120: 265898921668.97076,
3130: 262962793020.8118,
3140: 260066715503.76102,
3150: 257210042029.87204,
3160: 254392137530.6237,
3170: 251612378706.02545,
3180: 248870153779.50146,
3190: 246164862258.40512,
3200: 243495914700.02747,
3210: 240862732482.95795,
3220: 238264747583.66956,
3230: 235701402358.19556,
3240: 233172149328.77216,
3250: 230676450975.32635,
3260: 228213779531.68597,
3270: 225783616786.39868,
3280: 223385453888.0458,
3290: 221018791154.9399,
3300: 218683137889.09836,
3310: 216378012194.39075,
3320: 214102940798.75565,
3330: 211857458880.389,
3340: 209641109897.8069,
3350: 207453445423.689,
3360: 205294024982.41098,
3370: 203162415891.17676,
3380: 201058193104.66406,
3390: 198980939063.09628,
3400: 196930243543.66254,
3410: 194905703515.20123,
3420: 192906922996.07217,
3430: 190933512915.138,
3440: 188985090975.78333,
3450: 187061281522.898,
3460: 185161715412.75366,
3470: 183286029885.70557,
3480: 181433868441.65216,
3490: 179604880718.18805,
3500: 177798722371.38562,
3510: 176015054959.14432,
3520: 174253545827.04602,
3530: 172513867996.6598,
3540: 170795700056.23572,
3550: 169098726053.7347,
3560: 167422635392.13788,
3570: 165767122726.98288,
3580: 164131887866.0759,
3590: 162516635671.3286,
3600: 160921075962.66983,
3610: 159344923423.9856,
3620: 157787897511.03925,
3630: 156249722361.3274,
3640: 154730126705.82568,
3650: 153228843782.58246,
3660: 151745611252.11664,
3670: 150280171114.57993,
3680: 148832269628.64175,
3690: 147401657232.05865,
3700: 145988088463.88965,
3710: 144591321888.31934,
3720: 143211120020.05386,
3730: 141847249251.25342,
3740: 140499479779.9655,
3750: 139167585540.02707,
3760: 137851344132.40192,
3770: 136550536757.9202,
3780: 135264948151.38937,
3790: 133994366517.04663,
3800: 132738583465.32109,
3810: 131497393950.87877,
3820: 130270596211.91989,
3830: 129057991710.70149,
3840: 127859385075.25932,
3850: 126674584042.30023,
3860: 125503399401.24103,
3870: 124345644939.36835,
3880: 123201137388.0938,
3890: 122069696370.2821,
3900: 120951144348.62692,
3910: 119845306575.05255,
3920: 118752011041.11945,
3930: 117671088429.41063,
3940: 116602372065.87828,
3950: 115545697873.12955,
3960: 114500904324.632,
3970: 113467832399.81737,
3980: 112446325540.06548,
3990: 111436229605.54878,
4000: 110437392832.91953,
4010: 109449665793.82085,
4020: 108472901354.2045,
4030: 107506954634.43825,
4040: 106551682970.18565,
4050: 105606945874.04169,
4060: 104672604997.90916,
4070: 103748524096.09862,
4080: 102834568989.13812,
4090: 101930607528.2763,
4100: 101036509560.66588,
4110: 100152146895.2109,
4120: 99277393269.06654,
4130: 98412124314.77563,
4140: 97556217528.02962,
4150: 96709552236.04074,
4160: 95872009566.51228,
4170: 95043472417.19525,
4180: 94223825426.01828,
4190: 93412954941.78036,
4200: 92610748995.3926,
4210: 91817097271.65967,
4220: 91031891081.58914,
4230: 90255023335.2173,
4240: 89486388514.9414,
4250: 88725882649.34814,
4260: 87973403287.52753,
4270: 87228849473.86292,
4280: 86492121723.28716,
4290: 85763121996.99503,
4300: 85041753678.60371,
4310: 84327921550.75113,
4320: 83621531772.12389,
4330: 82922491854.906,
4340: 82230710642.63997,
4350: 81546098288.49165,
4360: 80868566233.91116,
4370: 80198027187.68208,
4380: 79534395105.34982,
4390: 78877585169.02364,
4400: 78227513767.54327,
4410: 77584098477.00354,
4420: 76947258041.62976,
4430: 76316912354.99683,
4440: 75692982441.58543,
4450: 75075390438.66876,
4460: 74464059578.52257,
4470: 73858914170.95303,
4480: 73259879586.13588,
4490: 72666882237.76039,
4500: 72079849566.47264,
4510: 71498710023.6118,
4520: 70923393055.23444,
4530: 70353829086.42033,
4540: 69789949505.8549,
4550: 69231686650.68289,
4560: 68678973791.62749,
4570: 68131745118.37061,
4580: 67589935725.18856,
4590: 67053481596.838524,
4600: 66522319594.69096,
4610: 65996387443.103134,
4620: 65475623716.029305,
4630: 64959967823.86305,
4640: 64449360000.5067,
4650: 63943741290.66477,
4660: 63443053537.35577,
4670: 62947239369.639244,
4680: 62456242190.55349,
4690: 61970006165.259926,
4700: 61488476209.39056,
4710: 61011597977.59426,
4720: 60539317852.2787,
4730: 60071582932.54338,
4740: 59608341023.301186,
4750: 59149540624.584114,
4760: 58695130921.030365,
4770: 58245061771.548706,
4780: 57799283699.15755,
4790: 57357747880.994965,
4800: 56920406138.49664,
4810: 56487210927.738846,
4820: 56058115329.94276,
4830: 55633073042.138016,
4840: 55212038367.981926,
4850: 54794966208.73153,
4860: 54381812054.366104,
4870: 53972531974.85687,
4880: 53567082611.58146,
4890: 53165421168.880554,
4900: 52767505405.753784,
4910: 52373293627.69282,
4920: 51982744678.648544,
4930: 51595817933.130684,
4940: 51212473288.436554,
4950: 50832671157.00749,
4960: 50456372458.90984,
4970: 50083538614.43902,
4980: 49714131536.84381,
4990: 49348113625.16898,
5000: 48985447757.21427,
5010: 48626097282.60742,
5020: 48270026015.98913,
5030: 47917198230.30836,
5040: 47567578650.22543,
5050: 47221132445.62167,
5060: 46877825225.21295,
5070: 46537623030.266045,
5080: 46200492328.41531,
5090: 45866400007.57851,
5100: 45535313369.96948,
5110: 45207200126.206375,
5120: 44882028389.513565,
5130: 44559766670.015564,
5140: 44240383869.1215,
5150: 43923849273.99839,
5160: 43610132552.13191,
5170: 43299203745.97253,
5180: 42991033267.66651,
5190: 42685591893.869286,
5200: 42382850760.640526,
5210: 42082781358.41902,
5220: 41785355527.076294,
5230: 41490545451.047264,
5240: 41198323654.53716,
5250: 40908662996.802605,
5260: 40621536667.50639,
5270: 40336918182.14405,
5280: 40054781377.54157,
5290: 39775100407.422356,
5300: 39497849738.04281,
5310: 39223004143.895294,
5320: 38950538703.476845,
5330: 38680428795.12345,
5340: 38412650092.907455,
5350: 38147178562.59851,
5360: 37883990457.68579,
5370: 37623062315.461105,
5380: 37364370953.161644,
5390: 37107893464.1714,
5400: 36853607214.280266,
5410: 36601489837.99988,
5420: 36351519234.9352,
5430: 36103673566.21095,
5440: 35857931250.9519,
5450: 35614270962.816345,
5460: 35372671626.58151,
5470: 35133112414.78037,
5480: 34895572744.38887,
5490: 34660032273.56262,
5500: 34426470898.422516,
5510: 34194868749.88825,
5520: 33965206190.558884,
5530: 33737463811.64001,
5540: 33511622429.916405,
5550: 33287663084.76958,
5560: 33065567035.23951,
5570: 32845315757.12979,
5580: 32626890940.15548,
5590: 32410274485.132885,
5600: 32195448501.21094,
5610: 31982395303.14282,
5620: 31771097408.59804,
5630: 31561537535.51344,
5640: 31353698599.48328,
5650: 31147563711.187176,
5660: 30943116173.855545,
5670: 30740339480.77204,
5680: 30539217312.812202,
5690: 30339733536.017624,
5700: 30141872199.205563,
5710: 29945617531.61292,
5720: 29750953940.574196,
5730: 29557866009.23305,
5740: 29366338494.286667,
5750: 29176356323.762573,
5760: 28987904594.82738,
5770: 28800968571.626926,
5780: 28615533683.157146,
5790: 28431585521.165615,
5800: 28249109838.08271,
5810: 28068092544.982513,
5820: 27888519709.572456,
5830: 27710377554.211685,
5840: 27533652453.957344,
5850: 27358330934.638615,
5860: 27184399670.95796,
5870: 27011845484.61909,
5880: 26840655342.48128,
5890: 26670816354.739784,
5900: 26502315773.13165,
5910: 26335140989.166763,
5920: 26169279532.383606,
5930: 26004719068.629498,
5940: 25841447398.364693,
5950: 25679452454.990303,
5960: 25518722303.19925,
5970: 25359245137.350346,
5980: 25201009279.86481,
5990: 25044003179.64506,
6000: 24888215410.515327,
6010: 24733634669.683846,
6020: 24580249776.226246,
6030: 24428049669.589764,
6040: 24277023408.118217,
6050: 24127160167.59693,
6060: 23978449239.817883,
6070: 23830880031.164345,
6080: 23684442061.214924,
6090: 23539124961.36667,
6100: 23394918473.476845,
6110: 23251812448.523354,
6120: 23109796845.283222,
6130: 22968861729.02907,
6140: 22828997270.243256,
6150: 22690193743.34938,
6160: 22552441525.460987,
6170: 22415731095.14706,
6180: 22280053031.214237,
6190: 22145398011.505363,
6200: 22011756811.71411,
6210: 21879120304.215694,
6220: 21747479456.91299,
6230: 21616825332.09829,
6240: 21487149085.33015,
6250: 21358441964.325264,
6260: 21230695307.865047,
6270: 21103900544.716824,
6280: 20978049192.569237,
6290: 20853132856.981873,
6300: 20729143230.3488,
6310: 20606072090.875645,
6320: 20483911301.57037,
6330: 20362652809.247257,
6340: 20242288643.544006,
6350: 20122810915.951782,
6360: 20004211818.857975,
6370: 19886483624.601467,
6380: 19769618684.540356,
6390: 19653609428.13182,
6400: 19538448362.023926,
6410: 19424128069.159405,
6420: 19310641207.890938,
6430: 19197980511.1081,
6440: 19086138785.375534,
6450: 18975108910.08229,
6460: 18864883836.602226,
6470: 18755456587.465206,
6480: 18646820255.5391,
6490: 18538968003.222134,
6500: 18431893061.645832,
6510: 18325588729.888092,
6520: 18220048374.19636,
6530: 18115265427.220837,
6540: 18011233387.257416,
6550: 17907945817.500298,
6560: 17805396345.304287,
6570: 17703578661.456223,
6580: 17602486519.455936,
6590: 17502113734.806225,
6600: 17402454184.311867,
6610: 17303501805.387497,
6620: 17205250595.374268,
6630: 17107694610.865137,
6640: 17010827967.038687,
6650: 16914644837.001316,
6660: 16819139451.137745,
6670: 16724306096.469625,
6680: 16630139116.022264,
6690: 16536632908.199291,
6700: 16443781926.165087,
6710: 16351580677.234982,
6720: 16260023722.273083,
6730: 16169105675.09755,
6740: 16078821201.893366,
6750: 15989165020.632305,
6760: 15900131900.50017,
6770: 15811716661.331139,
6780: 15723914173.04906,
6790: 15636719355.11573,
6800: 15550127175.985933,
6810: 15464132652.56921,
6820: 15378730849.69827,
6830: 15293916879.603968,
6840: 15209685901.396633,
6850: 15126033120.55392,
6860: 15042953788.414762,
6870: 14960443201.679636,
6880: 14878496701.916914,
6890: 14797109675.075182,
6900: 14716277551.001572,
6910: 14635995802.96592,
6920: 14556259947.19071,
6930: 14477065542.38673,
6940: 14398408189.294363,
6950: 14320283530.23042,
6960: 14242687248.64045,
6970: 14165615068.656477,
6980: 14089062754.66006,
6990: 14013026110.850576,
7000: 13937500980.818802,
7010: 13862483247.125475,
7020: 13787968830.885017,
7030: 13713953691.354204,
7040: 13640433825.525787,
7050: 13567405267.726915,
7060: 13494864089.222393,
7070: 13422806397.822674,
7080: 13351228337.496468,
7090: 13280126087.988024,
7100: 13209495864.4389,
7110: 13139333917.014244,
7120: 13069636530.533468,
7130: 13000400024.105392,
7140: 12931620750.767529,
7150: 12863295097.12977,
7160: 12795419483.022211,
7170: 12727990361.147121,
7180: 12661004216.734993,
7190: 12594457567.204674,
7200: 12528346961.827414,
7210: 12462668981.394894,
7220: 12397420237.891138,
7230: 12332597374.16822,
7240: 12268197063.625782,
7250: 12204216009.894276,
7260: 12140650946.521881,
7270: 12077498636.665075,
7280: 12014755872.782787,
7290: 11952419476.334084,
7300: 11890486297.479362,
7310: 11828953214.785019,
7320: 11767817134.93149,
7330: 11707074992.424673,
7340: 11646723749.310667,
7350: 11586760394.893837,
7360: 11527181945.458025,
7370: 11467985443.991049,
7380: 11409167959.91231,
7390: 11350726588.803518,
7400: 11292658452.14252,
7410: 11234960697.040155,
7420: 11177630495.98008,
7430: 11120665046.561586,
7440: 11064061571.24539,
7450: 11007817317.102167,
7460: 10951929555.564102,
7470: 10896395582.17915,
7480: 10841212716.368103,
7490: 10786378301.18442,
7500: 10731889703.076754,
7510: 10677744311.65416,
7520: 10623939539.453909,
7530: 10570472821.712017,
7540: 10517341616.136217,
7550: 10464543402.681568,
7560: 10412075683.328552,
7570: 10359935981.86366,
7580: 10308121843.662422,
7590: 10256630835.474844,
7600: 10205460545.213299,
7610: 10154608581.74268,
7620: 10104072574.673,
7630: 10053850174.154211,
7640: 10003939050.6733,
7650: 9954336894.85372,
7660: 9905041417.256962,
7670: 9856050348.186316,
7680: 9807361437.49285,
7690: 9758972454.383488,
7700: 9710881187.231241,
7710: 9663085443.387472,
7720: 9615583048.996273,
7730: 9568371848.81085,
7740: 9521449706.011946,
7750: 9474814502.028217,
7760: 9428464136.358603,
7770: 9382396526.396633,
7780: 9336609607.25663,
7790: 9291101331.601843,
7800: 9245869669.474398,
7810: 9200912608.127157,
7820: 9156228151.857363,
7830: 9111814321.84209,
7840: 9067669155.975517,
7850: 9023790708.707895,
7860: 8980177050.886307,
7870: 8936826269.597143,
7880: 8893736468.010225,
7890: 8850905765.224688,
7900: 8808332296.116455,
7910: 8766014211.187365,
7920: 8723949676.415956,
7930: 8682136873.109787,
7940: 8640573997.759382,
7950: 8599259261.893757,
7960: 8558190891.937402,
7970: 8517367129.068894,
7980: 8476786229.080955,
7990: 8436446462.242002,
8000: 8396346113.159188,
8010: 8356483480.642905,
8020: 8316856877.572674,
8030: 8277464630.764513,
8040: 8238305080.839688,
8050: 8199376582.094843,
8060: 8160677502.373482,
8070: 8122206222.93888,
8080: 8083961138.348247,
8090: 8045940656.328253,
8100: 8008143197.651885,
8110: 7970567196.01654,
8120: 7933211097.923448,
8130: 7896073362.55833,
8140: 7859152461.673321,
8150: 7822446879.470094,
8160: 7785955112.48426,
8170: 7749675669.470907,
8180: 7713607071.29138,
8190: 7677747850.801237,
8200: 7642096552.739325,
8210: 7606651733.618052,
8220: 7571411961.61477,
8230: 7536375816.464317,
8240: 7501541889.352614,
8250: 7466908782.81142,
8260: 7432475110.614135,
8270: 7398239497.672684,
8280: 7364200579.9354725,
8290: 7330357004.28639,
8300: 7296707428.444834,
8310: 7263250520.866789,
8320: 7229984960.6468725,
8330: 7196909437.421459,
8340: 7164022651.272709,
8350: 7131323312.633634,
8360: 7098810142.194112,
8370: 7066481870.807879,
8380: 7034337239.40042,
8390: 7002374998.877866,
8400: 6970593910.036729,
8410: 6938992743.474656,
8420: 6907570279.502005,
8430: 6876325308.05434,
8440: 6845256628.605839,
8450: 6814363050.08353,
8460: 6783643390.782424,
8470: 6753096478.281498,
8480: 6722721149.360525,
8490: 6692516249.91772,
8500: 6662480634.88825,
8510: 6632613168.163517,
8520: 6602912722.5113,
8530: 6573378179.496649,
8540: 6544008429.403609,
8550: 6514802371.157696,
8560: 6485758912.249165,
8570: 6456876968.657033,
8580: 6428155464.773866,
8590: 6399593333.331313,
8600: 6371189515.326377,
8610: 6342942959.948405,
8620: 6314852624.506841,
8630: 6286917474.359644,
8640: 6259136482.842451,
8650: 6231508631.198427,
8660: 6204032908.508806,
8670: 6176708311.624117,
8680: 6149533845.096093,
8690: 6122508521.110249,
8700: 6095631359.419105,
8710: 6068901387.276098,
8720: 6042317639.37012,
8730: 6015879157.760693,
8740: 5989584991.813781,
8750: 5963434198.13826,
8760: 5937425840.522955,
8770: 5911558989.874333,
8780: 5885832724.154794,
8790: 5860246128.321541,
8800: 5834798294.266084,
8810: 5809488320.75429,
8820: 5784315313.367036,
8830: 5759278384.441451,
8840: 5734376653.012693,
8850: 5709609244.756312,
8860: 5684975291.931168,
8870: 5660473933.322893,
8880: 5636104314.187895,
8890: 5611865586.197937,
8900: 5587756907.385184,
8910: 5563777442.087823,
8920: 5539926360.896215,
8930: 5516202840.599522,
8940: 5492606064.132888,
8950: 5469135220.525092,
8960: 5445789504.846707,
8970: 5422568118.158787,
8980: 5399470267.461996,
8990: 5376495165.646263,
9000: 5353642031.440869,
9010: 5330910089.36507,
9020: 5308298569.679136,
9030: 5285806708.335879,
9040: 5263433746.932641,
9050: 5241178932.663722,
9060: 5219041518.273275,
9070: 5197020762.00865,
9080: 5175115927.5741415,
9090: 5153326284.0852165,
9100: 5131651106.02316,
9110: 5110089673.1901245,
9120: 5088641270.664635,
9130: 5067305188.757497,
9140: 5046080722.968109,
9150: 5024967173.941204,
9160: 5003963847.423977,
9170: 4983070054.223636,
9180: 4962285110.165319,
9190: 4941608336.050437,
9200: 4921039057.615379,
9210: 4900576605.49061,
9220: 4880220315.160169,
9230: 4859969526.9215,
9240: 4839823585.845712,
9250: 4819781841.738148,
9260: 4799843649.099373,
9270: 4780008367.086485,
9280: 4760275359.474805,
9290: 4740643994.61991,
9300: 4721113645.420031,
9310: 4701683689.278769,
9320: 4682353508.068188,
9330: 4663122488.092229,
9340: 4643990020.050466,
9350: 4624955499.002175,
9360: 4606018324.330781,
9370: 4587177899.708575,
9380: 4568433633.061782,
9390: 4549784936.535959,
9400: 4531231226.461684,
9410: 4512771923.320572,
9420: 4494406451.711605,
9430: 4476134240.317753,
9440: 4457954721.87291,
9450: 4439867333.129134,
9460: 4421871514.82418,
9470: 4403966711.649315,
9480: 4386152372.217459,
9490: 4368427949.031562,
9500: 4350792898.453322,
9510: 4333246680.672157,
9520: 4315788759.674445,
9530: 4298418603.213075,
9540: 4281135682.777252,
9550: 4263939473.562565,
9560: 4246829454.4413495,
9570: 4229805107.9332924,
9580: 4212865920.176319,
9590: 4196011380.897724,
9600: 4179240983.3855844,
9610: 4162554224.460393,
9620: 4145950604.44698,
9630: 4129429627.1466727,
9640: 4112990799.8096914,
9650: 4096633633.107815,
9660: 4080357641.107264,
9670: 4064162341.2418523,
9680: 4048047254.286355,
9690: 4032011904.330116,
9700: 4016055818.7509093,
9710: 4000178528.1890006,
9720: 3984379566.5214653,
9730: 3968658470.836722,
9740: 3953014781.4092946,
9750: 3937448041.674784,
9760: 3921957798.205082,
9770: 3906543600.683795,
9780: 3891205001.881875,
9790: 3875941557.633469,
9800: 3860752826.811989,
9810: 3845638371.306396,
9820: 3830597755.9976554,
9830: 3815630548.735455,
9840: 3800736320.3150773,
9850: 3785914644.454505,
9860: 3771165097.771703,
9870: 3756487259.762127,
9880: 3741880712.776395,
9890: 3727345041.998192,
9900: 3712879835.422315,
9910: 3698484683.832967,
9920: 3684159180.7821965,
9930: 3669902922.5685453,
9940: 3655715508.215864,
9950: 3641596539.4523425,
9960: 3627545620.6896772,
9970: 3613562359.0024605,
9980: 3599646364.1077194,
9990: 3585797248.3446465},
10000: {
10: 3.887203488469927e-39,
20: 2.1263381225243896e-09,
30: 7.270389972388427,
40: 278006.4431399976,
50: 121288408.9659345,
60: 5899149522.061261,
70: 83912037658.23169,
80: 561941552222.2339,
90: 2300348928646.157,
100: 6718906096241.068,
110: 15430961550717.225,
120: 29705232712018.75,
130: 50070237640716.55,
140: 76207140443906.28,
150: 107089777893860.3,
160: 141249946918281.7,
170: 177055842119693.47,
180: 212936904660538.12,
190: 247531600716775.16,
200: 279762282655718.53,
210: 308853792680412.9,
220: 334314954040919.06,
230: 355899512829198.75,
240: 373558788910910.9,
250: 387394065103131.56,
260: 397613331580524.1,
270: 404494561734100.06,
280: 408356111551676.5,
290: 409533905832335.0,
300: 408364602149916.94,
310: 405173744791887.0,
320: 400267916363576.56,
330: 393929982848871.2,
340: 386416656565035.75,
350: 377957739526733.4,
360: 368756540197383.3,
370: 358991071349785.44,
380: 348815733045389.2,
390: 338363262838112.3,
400: 327746796985811.44,
410: 317061934086171.44,
420: 306388728611300.56,
430: 295793568588974.4,
440: 285330911166871.2,
450: 275044863671471.03,
460: 264970607393980.6,
470: 255135667780876.06,
480: 245561038818550.2,
490: 236262171827290.6,
500: 227249840110142.94,
510: 218530891304550.44,
520: 210108899130680.5,
530: 201984725718295.28,
540: 194157004966510.72,
550: 186622556548648.03,
560: 179376739287466.16,
570: 172413751741437.4,
580: 165726886990893.4,
590: 159308747812487.62,
600: 153151427691746.06,
610: 147246662450968.38,
620: 141585956663931.06,
630: 136160688487699.88,
640: 130962196061762.14,
650: 125981848201140.45,
660: 121211101738207.84,
670: 116641547542579.72,
680: 112264946964805.0,
690: 108073260202968.98,
700: 104058667877434.48,
710: 100213586913833.05,
720: 96530681674499.19,
730: 93002871140611.3,
740: 89623332828517.27,
750: 86385504021556.55,
760: 83283080810925.23,
770: 80310015363826.75,
780: 77460511772606.06,
790: 74729020783302.3,
800: 72110233654788.44,
810: 69599075359284.36,
820: 67190697300562.78,
830: 64880469696787.91,
840: 62663973749908.34,
850: 60536993702254.93,
860: 58495508862928.766,
870: 56535685671251.64,
880: 54653869851588.27,
890: 52846578702902.734,
900: 51110493557181.08,
910: 49442452433094.89,
920: 47839442904775.29,
930: 46298595200129.99,
940: 44817175538612.27,
950: 43392579714599.68,
960: 42022326929447.87,
970: 40704053872753.81,
980: 39435509051300.41,
990: 38214547362498.836,
1000: 37039124907821.55,
1010: 35907294040685.9,
1020: 34817198642449.67,
1030: 33767069619586.863,
1040: 32755220614682.367,
1050: 31780043923596.305,
1060: 30840006610976.176,
1070: 29933646816215.633,
1080: 29059570241959.652,
1090: 28216446817317.18,
1100: 27403007528056.76,
1110: 26618041406213.08,
1120: 25860392671718.203,
1130: 25128958018878.918,
1140: 24422684040749.35,
1150: 23740564784686.25,
1160: 23081639432622.07,
1170: 22444990099843.57,
1180: 21829739746316.617,
1190: 21235050194852.543,
1200: 20660120250660.996,
1210: 20104183917082.36,
1220: 19566508702532.96,
1230: 19046394013932.94,
1240: 18543169632113.94,
1250: 18056194264925.29,
1260: 17584854173970.102,
1270: 17128561871107.727,
1280: 16686754881055.69,
1290: 16258894566612.695,
1300: 15844465013204.818,
1310: 15442971969628.66,
1320: 15053941842030.104,
1330: 14676920738313.15,
1340: 14311473560322.697,
1350: 13957183141286.22,
1360: 13613649426134.18,
1370: 13280488692446.316,
1380: 12957332809892.229,
1390: 12643828536149.426,
1400: 12339636847390.703,
1410: 12044432301535.984,
1420: 11757902432560.957,
1430: 11479747174247.438,
1440: 11209678311847.656,
1450: 10947418960217.227,
1460: 10692703067049.682,
1470: 10445274939919.428,
1480: 10204888795909.674,
1490: 9971308332667.812,
1500: 9744306319793.398,
1510: 9523664209522.291,
1520: 9309171765726.725,
1530: 9100626710303.217,
1540: 8897834386070.225,
1550: 8700607435344.179,
1560: 8508765493407.064,
1570: 8322134896120.353,
1580: 8140548400979.958,
1590: 7963844920944.047,
1600: 7791869270400.879,
1610: 7624471922677.426,
1620: 7461508778520.709,
1630: 7302840945013.951,
1640: 7148334524417.466,
1650: 6997860412451.147,
1660: 6851294105560.181,
1670: 6708515516729.82,
1680: 6569408799437.151,
1690: 6433862179349.25,
1700: 6301767793397.201,
1710: 6173021535874.31,
1720: 6047522911225.066,
1730: 5925174893208.117,
1740: 5805883790132.869,
1750: 5689559115884.34,
1760: 5576113466465.473,
1770: 5465462401799.543,
1780: 5357524332548.412,
1790: 5252220411714.429,
1800: 5149474430805.435,
1810: 5049212720353.256,
1820: 4951364054586.422,
1830: 4855859560067.732,
1840: 4762632628116.536,
1850: 4671618830844.469,
1860: 4582755840641.724,
1870: 4495983352958.896,
1880: 4411243012236.861,
1890: 4328478340844.46,
1900: 4247634670890.275,
1910: 4168659078781.4365,
1920: 4091500322408.2983,
1930: 4016108780839.713,
1940: 3942436396419.0015,
1950: 3870436619156.0615,
1960: 3800064353315.8223,
1970: 3731275906108.105,
1980: 3664028938388.224,
1990: 3598282417282.095,
2000: 3533996570653.425,
2010: 3471132843334.544,
2020: 3409653855045.941,
2030: 3349523359933.0933,
2040: 3290706207652.42,
2050: 3233168305941.308,
2060: 3176876584610.1294,
2070: 3121798960896.9478,
2080: 3067904306128.393,
2090: 3015162413632.585,
2100: 2963543967852.563,
2110: 2913020514610.8525,
2120: 2863564432478.125,
2130: 2815148905200.881,
2140: 2767747895145.189,
2150: 2721336117715.319,
2160: 2675889016707.9644,
2170: 2631382740564.479,
2180: 2587794119485.157,
2190: 2545100643371.1943,
2200: 2503280440561.385,
2210: 2462312257332.1523,
2220: 2422175438130.734,
2230: 2382849906512.722,
2240: 2344316146756.348,
2250: 2306555186127.1245,
2260: 2269548577767.502,
2270: 2233278384187.357,
2280: 2197727161332.0796,
2290: 2162877943206.034,
2300: 2128714227030.1108,
2310: 2095219958912.9377,
2320: 2062379520016.2144,
2330: 2030177713195.3845,
2340: 1998599750097.7117,
2350: 1967631238700.4978,
2360: 1937258171272.9185,
2370: 1907466912745.638,
2380: 1878244189472.9717,
2390: 1849577078373.0415,
2400: 1821452996431.8926,
2410: 1793859690558.1682,
2420: 1766785227775.4216,
2430: 1740217985739.7153,
2440: 1714146643570.6067,
2450: 1688560172984.121,
2460: 1663447829716.7534,
2470: 1638799145229.9697,
2480: 1614603918685.115,
2490: 1590852209179.002,
2500: 1567534328230.8567,
2510: 1544640832511.652,
2520: 1522162516807.2139,
2530: 1500090407206.8145,
2540: 1478415754509.29,
2550: 1457130027839.0234,
2560: 1436224908464.434,
2570: 1415692283811.888,
2580: 1395524241668.2212,
2590: 1375713064565.3284,
2600: 1356251224340.4968,
2610: 1337131376866.4414,
2620: 1318346356945.1892,
2630: 1299889173360.2046,
2640: 1281753004081.3303,
2650: 1263931191617.3699,
2660: 1246417238511.259,
2670: 1229204802973.0386,
2680: 1212287694645.9453,
2690: 1195659870501.1726,
2700: 1179315430856.9685,
2710: 1163248615517.9265,
2720: 1147453800030.4607,
2730: 1131925492050.6084,
2740: 1116658327820.4438,
2750: 1101647068749.5205,
2760: 1086886598097.8843,
2770: 1072371917757.325,
2780: 1058098145127.6643,
2790: 1044060510084.9753,
2800: 1030254352038.7501,
2810: 1016675117075.1373,
2820: 1003318355183.465,
2830: 990179717563.3774,
2840: 977254954009.9905,
2850: 964539910374.578,
2860: 952030526098.3743,
2870: 939722831817.1733,
2880: 927612947034.4762,
2890: 915697077861.0287,
2900: 903971514818.6467,
2910: 892432630706.3186,
2920: 881076878526.635,
2930: 869900789470.6547,
2940: 858900970959.401,
2950: 848074104740.2128,
2960: 837416945036.2754,
2970: 826926316747.6702,
2980: 816599113702.376,
2990: 806432296955.674,
3000: 796422893136.4905,
3010: 786567992839.2399,
3020: 776864749059.783,
3030: 767310375674.1722,
3040: 757902145958.8776,
3050: 748637391151.259,
3060: 739513499049.0631,
3070: 730527912647.7842,
3080: 721678128814.7516,
3090: 712961696998.8556,
3100: 704376217974.8516,
3110: 695919342621.2157,
3120: 687588770730.5648,
3130: 679382249851.6815,
3140: 671297574162.2156,
3150: 663332583371.1616,
3160: 655485161650.2518,
3170: 647753236593.4082,
3180: 640134778203.4552,
3190: 632627797905.2914,
3200: 625230347584.7656,
3210: 617940518652.51,
3220: 610756441132.0171,
3230: 603676282771.2712,
3240: 596698248177.2512,
3250: 589820577972.6666,
3260: 583041547974.2836,
3270: 576359468392.2373,
3280: 569772683049.7372,
3290: 563279568622.59,
3300: 556878533897.9797,
3310: 550568019051.9766,
3320: 544346494945.238,
3330: 538212462436.40784,
3340: 532164451712.71246,
3350: 526201021637.2828,
3360: 520320759112.73865,
3370: 514522278460.5877,
3380: 508804220816.00543,
3390: 503165253537.56995,
3400: 497604069631.54987,
3410: 492119387190.34467,
3420: 486709948844.69226,
3430: 481374521229.2724,
3440: 476111894461.34125,
3450: 470920881632.0503,
3460: 465800318310.1027,
3470: 460749062057.4196,
3480: 455765991956.4944,
3490: 450850008149.12195,
3500: 446000031386.20166,
3510: 441215002588.32056,
3520: 436493882416.8252,
3530: 431835650855.1166,
3540: 427239306799.88696,
3550: 422703867662.0448,
3560: 418228368977.071,
3570: 413811864024.5609,
3580: 409453423456.71075,
3590: 405152134935.5198,
3600: 400907102778.47723,
3610: 396717447612.5164,
3620: 392582306036.025,
3630: 388500830288.7002,
3640: 384472187929.0492,
3650: 380495561519.3396,
3660: 376570148317.8077,
3670: 372695159977.94135,
3680: 368869822254.6568,
3690: 365093374717.1939,
3700: 361365070468.56067,
3710: 357684175871.36255,
3720: 354049970279.8525,
3730: 350461745778.0513,
3740: 346918806923.7793,
3750: 343420470498.4561,
3760: 339966065262.5251,
3770: 336554931716.35895,
3780: 333186421866.5137,
3790: 329859898997.1975,
3800: 326574737446.82556,
3810: 323330322389.536,
3820: 320126049621.5456,
3830: 316961325352.2228,
3840: 313835565999.7712,
3850: 310748197991.3997,
3860: 307698657567.88074,
3870: 304686390592.38226,
3880: 301710852363.4753,
3890: 298771507432.21216,
3900: 295867829423.1807,
3910: 292999300859.4356,
3920: 290165412991.21655,
3930: 287365665628.3618,
3940: 284599566976.3272,
3950: 281866633475.72656,
3960: 279166389645.3106,
3970: 276498367928.30023,
3980: 273862108541.99686,
3990: 271257159330.59055,
4000: 268683075621.09402,
4010: 266139420082.32556,
4020: 263625762586.87003,
4030: 261141680075.95142,
4040: 258686756427.1439,
4050: 256260582324.86032,
4060: 253862755133.55032,
4070: 251492878773.54626,
4080: 249150563599.4963,
4090: 246835426281.32416,
4100: 244547089687.6584,
4110: 242285182771.67264,
4120: 240049340459.28366,
4130: 237839203539.6523,
4140: 235654418557.93512,
4150: 233494637710.23468,
4160: 231359518740.69992,
4170: 229248724840.72668,
4180: 227161924550.21158,
4190: 225098791660.81345,
4200: 223059005121.17532,
4210: 221042248944.06415,
4220: 219048212115.38684,
4230: 217076588505.03757,
4240: 215127076779.5368,
4250: 213199380316.424,
4260: 211293207120.3616,
4270: 209408269740.9155,
4280: 207544285191.97327,
4290: 205700974872.7637,
4300: 203878064490.44406,
4310: 202075283984.2189,
4320: 200292367450.95844,
4330: 198529053072.2818,
4340: 196785083043.07498,
4350: 195060203501.41135,
4360: 193354164459.84384,
4370: 191666719738.041,
4380: 189997626896.73456,
4390: 188346647172.9534,
4400: 186713545416.51468,
4410: 185098090027.74515,
4420: 183500052896.4062,
4430: 181919209341.79736,
4440: 180355338054.01263,
4450: 178808221036.3259,
4460: 177277643548.6789,
4470: 175763394052.2524,
4480: 174265264155.09464,
4490: 172783048558.78387,
4500: 171316545006.10782,
4510: 169865554229.73236,
4520: 168429879901.8445,
4530: 167009328584.7448,
4540: 165603709682.37253,
4550: 164212835392.74112,
4560: 162836520661.2683,
4570: 161474583134.9795,
4580: 160126843117.56726,
4590: 158793123525.29022,
4600: 157473249843.69214,
4610: 156167050085.1254,
4620: 154874354747.06277,
4630: 153594996771.17993,
4640: 152328811503.19376,
4650: 151075636653.44098,
4660: 149835312258.182,
4670: 148607680641.6149,
4680: 147392586378.585,
4690: 146189876257.97702,
4700: 144999399246.7739,
4710: 143821006454.77136,
4720: 142654551099.9331,
4730: 141499888474.37375,
4740: 140356875910.95868,
4750: 139225372750.50632,
4760: 138105240309.58224,
4770: 136996341848.87207,
4780: 135898542542.12318,
4790: 134811709445.64221,
4800: 133735711468.3387,
4810: 132670419342.30336,
4820: 131615705593.91013,
4830: 130571444515.43245,
4840: 129537512137.16336,
4850: 128513786200.0291,
4860: 127500146128.68689,
4870: 126496473005.09743,
4880: 125502649542.56262,
4890: 124518560060.22,
4900: 123544090457.98364,
4910: 122579128191.92497,
4920: 121623562250.08247,
4930: 120677283128.69449,
4940: 119740182808.84464,
4950: 118812154733.51408,
4960: 117893093785.03029,
4970: 116982896262.90765,
4980: 116081459862.06967,
4990: 115188683651.44702,
5000: 114304468052.94405,
5010: 113428714820.76639,
5020: 112561327021.1031,
5030: 111702209012.156,
5040: 110851266424.51071,
5050: 110008406141.842,
5060: 109173536281.9472,
5070: 108346566178.10214,
5080: 107527406360.73296,
5090: 106715968539.39763,
5100: 105912165585.0727,
5110: 105115911512.7375,
5120: 104327121464.25215,
5130: 103545711691.52306,
5140: 102771599539.95009,
5150: 102004703432.15112,
5160: 101244942851.95879,
5170: 100492238328.68286,
5180: 99746511421.6358,
5190: 99007684704.91426,
5200: 98275681752.43373,
5210: 97550427123.21039,
5220: 96831846346.88611,
5230: 96119865909.4921,
5240: 95414413239.44753,
5250: 94715416693.78673,
5260: 94022805544.61386,
5270: 93336509965.77814,
5280: 92656461019.7678,
5290: 91982590644.81654,
5300: 91314831642.22116,
5310: 90653117663.86476,
5320: 89997383199.94186,
5330: 89347563566.88364,
5340: 88703594895.47685,
5350: 88065414119.17537,
5360: 87432958962.59982,
5370: 86806167930.22217,
5380: 86184980295.23163,
5390: 85569336088.5795,
5400: 84959176088.19858,
5410: 84354441808.39522,
5420: 83755075489.41048,
5430: 83161020087.14655,
5440: 82572219263.05756,
5450: 81988617374.19968,
5460: 81410159463.43915,
5470: 80836791249.81488,
5480: 80268459119.05313,
5490: 79705110114.23149,
5500: 79146691926.58928,
5510: 78593152886.48299,
5520: 78044441954.48228,
5530: 77500508712.60603,
5540: 76961303355.69478,
5550: 76426776682.91757,
5560: 75896880089.41133,
5570: 75371565558.04951,
5580: 74850785651.33876,
5590: 74334493503.4406,
5600: 73822642812.31688,
5610: 73315187831.99586,
5620: 72812083364.95775,
5630: 72313284754.6367,
5640: 71818747878.03879,
5650: 71328429138.47217,
5660: 70842285458.38896,
5670: 70360274272.3361,
5680: 69882353520.01385,
5690: 69408481639.43973,
5700: 68938617560.21645,
5710: 68472720696.90173,
5720: 68010750942.47879,
5730: 67552668661.92525,
5740: 67098434685.87926,
5750: 66648010304.40065,
5760: 66201357260.82667,
5770: 65758437745.71905,
5780: 65319214390.902534,
5790: 64883650263.59221,
5800: 64451708860.60851,
5810: 64023354102.67904,
5820: 63598550328.824486,
5830: 63177262290.82861,
5840: 62759455147.79003,
5850: 62345094460.7546,
5860: 61934146187.427605,
5870: 61526576676.963585,
5880: 61122352664.83326,
5890: 60721441267.76604,
5900: 60323809978.76653,
5910: 59929426662.20465,
5920: 59538259548.9773,
5930: 59150277231.74091,
5940: 58765448660.21358,
5950: 58383743136.54579,
5960: 58005130310.75833,
5970: 57629580176.24676,
5980: 57257063065.350746,
5990: 56887549644.98788,
6000: 56521010912.35056,
6010: 56157418190.66498,
6020: 55796743125.01118,
6030: 55438957678.20324,
6040: 55084034126.72911,
6050: 54731945056.7478,
6060: 54382663360.14508,
6070: 54036162230.64428,
6080: 53692415159.97393,
6090: 53351395934.08896,
6100: 53013078629.446335,
6110: 52677437609.33335,
6120: 52344447520.248146,
6130: 52014083288.33144,
6140: 51686320115.848595,
6150: 51361133477.72184,
6160: 51038499118.11059,
6170: 50718393047.040924,
6180: 50400791537.08144,
6190: 50085671120.06651,
6200: 49773008583.865036,
6210: 49462780969.194756,
6220: 49154965566.4805,
6230: 48849539912.756966,
6240: 48546481788.61403,
6250: 48245769215.18505,
6260: 47947380451.17647,
6270: 47651293989.939095,
6280: 47357488556.57937,
6290: 47065943105.11122,
6300: 46776636815.64672,
6310: 46489549091.625534,
6320: 46204659557.082565,
6330: 45921948053.95327,
6340: 45641394639.41566,
6350: 45362979583.268776,
6360: 45086683365.34728,
6370: 44812486672.9709,
6380: 44540370398.42897,
6390: 44270315636.49925,
6400: 44002303682.00012,
6410: 43736316027.37641,
6420: 43472334360.317566,
6430: 43210340561.40835,
6440: 42950316701.8113,
6450: 42692245040.98005,
6460: 42436108024.40383,
6470: 42181888281.382286,
6480: 41929568622.830025,
6490: 41679132039.11067,
6500: 41430561697.89993,
6510: 41183840942.07725,
6520: 40938953287.64561,
6530: 40695882421.679016,
6540: 40454612200.29741,
6550: 40215126646.66825,
6560: 39977409949.03512,
6570: 39741446458.771835,
6580: 39507220688.46271,
6590: 39274717310.0082,
6600: 39043921152.75535,
6610: 38814817201.65283,
6620: 38587390595.43071,
6630: 38361626624.80342,
6640: 38137510730.69704,
6650: 37915028502.49917,
6660: 37694165676.33221,
6670: 37474908133.34874,
6680: 37257241898.04953,
6690: 37041153136.62318,
6700: 36826628155.30753,
6710: 36613653398.772194,
6720: 36402215448.5222,
6730: 36192301021.322266,
6740: 35983896967.641464,
6750: 35776990270.11788,
6760: 35571568042.04306,
6770: 35367617525.86626,
6780: 35165126091.717545,
6790: 34964081235.950096,
6800: 34764470579.701126,
6810: 34566281867.47115,
6820: 34369502965.72152,
6830: 34174121861.489944,
6840: 33980126661.023388,
6850: 33787505588.428726,
6860: 33596246984.340107,
6870: 33406339304.60375,
6880: 33217771118.979008,
6890: 33030531109.85608,
6900: 32844608070.98982,
6910: 32659990906.2497,
6920: 32476668628.38527,
6930: 32294630357.80747,
6940: 32113865321.384937,
6950: 31934362851.255714,
6960: 31756112383.653675,
6970: 31579103457.749695,
6980: 31403325714.50738,
6990: 31228768895.552856,
7000: 31055422842.05897,
7010: 30883277493.643147,
7020: 30712322887.278984,
7030: 30542549156.221478,
7040: 30373946528.945415,
7050: 30206505328.097076,
7060: 30040215969.458633,
7070: 29875068960.925846,
7080: 29711054901.497585,
7090: 29548164480.278698,
7100: 29386388475.49436,
7110: 29225717753.517006,
7120: 29066143267.905006,
7130: 28907656058.45312,
7140: 28750247250.25473,
7150: 28593908052.775223,
7160: 28438629758.93695,
7170: 28284403744.215263,
7180: 28131221465.745422,
7190: 27979074461.440567,
7200: 27827954349.120094,
7210: 27677852825.648987,
7220: 27528761666.087208,
7230: 27380672722.849545,
7240: 27233577924.875492,
7250: 27087469276.80936,
7260: 26942338858.189964,
7270: 26798178822.65014,
7280: 26654981397.126106,
7290: 26512738881.075874,
7300: 26371443645.707447,
7310: 26231088133.21594,
7320: 26091664856.02987,
7330: 25953166396.066685,
7340: 25815585403.996696,
7350: 25678914598.51623,
7360: 25543146765.629112,
7370: 25408274757.93671,
7380: 25274291493.936584,
7390: 25141189957.329197,
7400: 25008963196.33294,
7410: 24877604323.007202,
7420: 24747106512.58348,
7430: 24617463002.80417,
7440: 24488667093.269485,
7450: 24360712144.791622,
7460: 24233591578.756794,
7470: 24107298876.494667,
7480: 23981827578.655006,
7490: 23857171284.591957,
7500: 23733323651.75505,
7510: 23610278395.087677,
7520: 23488029286.4324,
7530: 23366570153.94321,
7540: 23245894881.50448,
7550: 23125997408.156883,
7560: 23006871727.52974,
7570: 22888511887.280117,
7580: 22770911988.53817,
7590: 22654066185.359097,
7600: 22537968684.181282,
7610: 22422613743.29073,
7620: 22307995672.29159,
7630: 22194108831.5828,
7640: 22080947631.840702,
7650: 21968506533.507614,
7660: 21856780046.286186,
7670: 21745762728.63957,
7680: 21635449187.297287,
7690: 21525834076.766697,
7700: 21416912098.85003,
7710: 21308678002.166878,
7720: 21201126581.682213,
7730: 21094252678.23956,
7740: 20988051178.099644,
7750: 20882517012.48412,
7760: 20777645157.12443,
7770: 20673430631.81598,
7780: 20569868499.976936,
7790: 20466953868.21245,
7800: 20364681885.88328,
7810: 20263047744.679703,
7820: 20162046678.199917,
7830: 20061673961.533226,
7840: 19961924910.84793,
7850: 19862794882.98377,
7860: 19764279275.048866,
7870: 19666373524.02133,
7880: 19569073106.355015,
7890: 19472373537.58994,
7900: 19376270371.966846,
7910: 19280759202.045994,
7920: 19185835658.33045,
7930: 19091495408.893116,
7940: 18997734159.008266,
7950: 18904547650.786945,
7960: 18811931662.81636,
7970: 18719882009.803337,
7980: 18628394542.22162,
7990: 18537465145.963123,
8000: 18447089741.992844,
8010: 18357264286.007812,
8020: 18267984768.099472,
8030: 18179247212.419884,
8040: 18091047676.851665,
8050: 18003382252.68139,
8060: 17916247064.276493,
8070: 17829638268.76592,
8080: 17743552055.723915,
8090: 17657984646.85762,
8100: 17572932295.697628,
8110: 17488391287.29226,
8120: 17404357937.904903,
8130: 17320828594.7147,
8140: 17237799635.520485,
8150: 17155267468.447874,
8160: 17073228531.659462,
8170: 16991679293.06818,
8180: 16910616250.053774,
8190: 16830035929.182213,
8200: 16749934885.928112,
8210: 16670309704.400278,
8220: 16591156997.069937,
8230: 16512473404.502028,
8240: 16434255595.089334,
8250: 16356500264.789387,
8260: 16279204136.864246,
8270: 16202363961.622906,
8280: 16125976516.166597,
8290: 16050038604.136644,
8300: 15974547055.465088,
8310: 15899498726.127872,
8320: 15824890497.900648,
8330: 15750719278.117184,
8340: 15676981999.43031,
8350: 15603675619.575298,
8360: 15530797121.135885,
8370: 15458343511.312525,
8380: 15386311821.693295,
8390: 15314699108.027079,
8400: 15243502449.999077,
8410: 15172718951.008781,
8420: 15102345737.950182,
8430: 15032379960.9943,
8440: 14962818793.373896,
8450: 14893659431.170612,
8460: 14824899093.10404,
8470: 14756535020.32326,
8480: 14688564476.200317,
8490: 14620984746.126007,
8500: 14553793137.307585,
8510: 14486986978.568735,
8520: 14420563620.151533,
8530: 14354520433.520355,
8540: 14288854811.168041,
8550: 14223564166.423725,
8560: 14158645933.262972,
8570: 14094097566.119549,
8580: 14029916539.69941,
8590: 13966100348.796322,
8600: 13902646508.109571,
8610: 13839552552.063461,
8620: 13776816034.628536,
8630: 13714434529.144876,
8640: 13652405628.146952,
8650: 13590726943.190327,
8660: 13529396104.680225,
8670: 13468410761.701677,
8680: 13407768581.851439,
8690: 13347467251.071726,
8700: 13287504473.485435,
8710: 13227877971.233128,
8720: 13168585484.31169,
8730: 13109624770.414572,
8740: 13050993604.773561,
8750: 12992689780.002317,
8760: 12934711105.941305,
8770: 12877055409.50438,
8780: 12819720534.526873,
8790: 12762704341.61525,
8800: 12706004707.99815,
8810: 12649619527.378994,
8820: 12593546709.79013,
8830: 12537784181.448254,
8840: 12482329884.611446,
8850: 12427181777.437487,
8860: 12372337833.843683,
8870: 12317796043.367983,
8880: 12263554411.031563,
8890: 12209610957.20278,
8900: 12155963717.462286,
8910: 12102610742.469727,
8920: 12049550097.83155,
8930: 11996779863.970285,
8940: 11944298135.99491,
8950: 11892103023.572706,
8960: 11840192650.802204,
8970: 11788565156.087467,
8980: 11737218692.013527,
8990: 11686151425.223177,
9000: 11635361536.294767,
9010: 11584847219.621408,
9020: 11534606683.291145,
9030: 11484638148.968494,
9040: 11434939851.776972,
9050: 11385510040.182846,
9060: 11336346975.880033,
9070: 11287448933.676048,
9080: 11238814201.379063,
9090: 11190441079.686155,
9100: 11142327882.072502,
9110: 11094472934.681725,
9120: 11046874576.21729,
9130: 10999531157.834946,
9140: 10952441043.036144,
9150: 10905602607.562569,
9160: 10859014239.29161,
9170: 10812674338.13294,
9180: 10766581315.925888,
9190: 10720733596.338032,
9200: 10675129614.764597,
9210: 10629767818.22887,
9220: 10584646665.283548,
9230: 10539764625.913074,
9240: 10495120181.436811,
9250: 10450711824.413275,
9260: 10406538058.545101,
9270: 10362597398.585047,
9280: 10318888370.242897,
9290: 10275409510.093105,
9300: 10232159365.48347,
9310: 10189136494.444609,
9320: 10146339465.600206,
9330: 10103766858.078302,
9340: 10061417261.42322,
9350: 10019289275.50843,
9360: 9977381510.450212,
9370: 9935692586.522121,
9380: 9894221134.07022,
9390: 9852965793.429253,
9400: 9811925214.839365,
9410: 9771098058.363811,
9420: 9730482993.80733,
9430: 9690078700.635307,
9440: 9649883867.893711,
9450: 9609897194.129696,
9460: 9570117387.313078,
9470: 9530543164.758385,
9480: 9491173253.047764,
9490: 9452006387.954533,
9500: 9413041314.367455,
9510: 9374276786.215712,
9520: 9335711566.394573,
9530: 9297344426.691776,
9540: 9259174147.714525,
9550: 9221199518.817226,
9560: 9183419338.029861,
9570: 9145832411.98704,
9580: 9108437555.857624,
9590: 9071233593.27513,
9600: 9034219356.268673,
9610: 8997393685.194557,
9620: 8960755428.668531,
9630: 8924303443.498617,
9640: 8888036594.618572,
9650: 8851953755.021992,
9660: 8816053805.69696,
9670: 8780335635.561289,
9680: 8744798141.398413,
9690: 8709440227.793814,
9700: 8674260807.072035,
9710: 8639258799.234299,
9720: 8604433131.896572,
9730: 8569782740.22838,
9740: 8535306566.892041,
9750: 8501003561.982425,
9760: 8466872682.967408,
9770: 8432912894.628675,
9780: 8399123169.003227,
9790: 8365502485.3253,
9800: 8332049829.96881,
9810: 8298764196.39044,
9820: 8265644585.073038,
9830: 8232690003.469712,
9840: 8199899465.948283,
9850: 8167271993.736356,
9860: 8134806614.866777,
9870: 8102502364.123612,
9880: 8070358282.988652,
9890: 8038373419.588365,
9900: 8006546828.641256,
9910: 7974877571.4058,
9920: 7943364715.628804,
9930: 7912007335.494175,
9940: 7880804511.572181,
9950: 7849755330.769227,
9960: 7818858886.277912,
9970: 7788114277.527709,
9980: 7757520610.135957,
9990: 7727076995.859316},
100000: {
10: 6.71890609624107e+17,
20: 2.797622826557186e+19,
30: 4.083646021499168e+19,
40: 3.2774679698581156e+19,
50: 2.2724984011014296e+19,
60: 1.5315142769174602e+19,
70: 1.0405866787743447e+19,
80: 7.211023365478845e+18,
90: 5.111049355718106e+18,
100: 3.703912490782156e+18,
110: 2.7403007528056755e+18,
120: 2.0660120250660992e+18,
130: 1.5844465013204826e+18,
140: 1.2339636847390702e+18,
150: 9.744306319793395e+17,
160: 7.79186927040088e+17,
170: 6.3017677933972e+17,
180: 5.1494744308054336e+17,
190: 4.2476346708902746e+17,
200: 3.533996570653426e+17,
210: 2.963543967852565e+17,
220: 2.503280440561384e+17,
230: 2.1287142270301098e+17,
240: 1.8214529964318922e+17,
250: 1.567534328230856e+17,
260: 1.3562512243404973e+17,
270: 1.1793154308569685e+17,
280: 1.03025435203875e+17,
290: 9.039715148186467e+16,
300: 7.964228931364902e+16,
310: 7.0437621797485176e+16,
320: 6.2523034758476584e+16,
330: 5.5687853389797976e+16,
340: 4.976040696315497e+16,
350: 4.460000313862017e+16,
360: 4.009071027784771e+16,
370: 3.613650704685607e+16,
380: 3.2657473744682548e+16,
390: 2.9586782942318064e+16,
400: 2.6868307562109412e+16,
410: 2.4454708968765844e+16,
420: 2.2305900512117548e+16,
430: 2.038780644904441e+16,
440: 1.8671354541651464e+16,
450: 1.7131654500610778e+16,
460: 1.5747324984369202e+16,
470: 1.4499939924677388e+16,
480: 1.3373571146833868e+16,
490: 1.2354409045798368e+16,
500: 1.14304468052944e+16,
510: 1.0591216558507274e+16,
520: 9827568175243378.0,
530: 9131483164222116.0,
540: 8495917608819857.0,
550: 7914669192658926.0,
560: 7382264281231687.0,
570: 6893861756021644.0,
580: 6445170886060851.0,
590: 6032380997876651.0,
600: 5652101091235055.0,
610: 5301307862944630.0,
620: 4977300858386507.0,
630: 4677663681564674.0,
640: 4400230368200013.5,
650: 4143056169789993.5,
660: 3904392115275535.0,
670: 3682662815530752.5,
680: 3476447057970112.0,
690: 3284460807098982.5,
700: 3105542284205897.5,
710: 2938638847549436.5,
720: 2782795434912008.5,
730: 2637144364570745.5,
740: 2500896319633294.5,
750: 2373332365175505.0,
760: 2253796868418128.0,
770: 2141691209885001.5,
780: 2036468188588327.5,
790: 1937627037196684.5,
800: 1844708974199285.0,
810: 1757293229569762.2,
820: 1674993488592811.5,
830: 1597454705546508.2,
840: 1524350244999908.8,
850: 1455379313730758.2,
860: 1390264650810957.2,
870: 1328750447348543.2,
880: 1270600470799814.8,
890: 1215596371746229.0,
900: 1163536153629476.5,
910: 1114232788207250.2,
920: 1067512961476458.9,
930: 1023215936548347.0,
940: 981192521483935.9,
950: 941304131436745.6,
960: 903421935626867.1,
970: 867426080707203.5,
980: 833204982996880.9,
990: 800654682864125.5,
1000: 769678255254570.9,
1010: 740185270992819.2,
1020: 712091304044972.0,
1030: 685317480426837.8,
1040: 659790064883927.9,
1050: 635440081862038.8,
1060: 612202967636844.8,
1070: 590018250782637.4,
1080: 568829258438471.1,
1090: 548582846078490.7,
1100: 529229148715441.8,
1110: 510721351665287.3,
1120: 493015479179173.7,
1130: 476070199408861.94,
1140: 459846644315378.1,
1150: 444308243259650.4,
1160: 429420569130013.06,
1170: 415151195966009.8,
1180: 401469567132094.94,
1190: 388346873179877.9,
1200: 375755938614218.25,
1210: 363671116847872.06,
1220: 352068192692043.3,
1230: 340924291786989.0,
1240: 330217796428213.94,
1250: 319928267290413.4,
1260: 310036370593628.94,
1270: 300523810294453.2,
1280: 291373264920060.0,
1290: 282568328694544.4,
1300: 274093456635951.84,
1310: 265933913328697.12,
1320: 258075725100025.44,
1330: 250505635351068.78,
1340: 243211062812984.28,
1350: 236180062516913.9,
1360: 229401289283154.2,
1370: 222863963550173.22,
1380: 216557839378051.38,
1390: 210473174473705.56,
1400: 204600702096948.84,
1410: 198931604717174.3,
1420: 193457489300308.62,
1430: 188170364114697.2,
1440: 183062616952917.62,
1450: 178126994674123.97,
1460: 173356583978561.66,
1470: 168744793332348.12,
1480: 164285335966562.47,
1490: 159972213880169.88,
1500: 155799702781354.78,
1510: 151762337906502.34,
1520: 147854900660362.1,
1530: 144072406024899.22,
1540: 140410090688010.02,
1550: 136863401846659.27,
1560: 133427986642146.11,
1570: 130099682188085.9,
1580: 126874506154399.22,
1590: 123748647873075.97,
1600: 120718459933782.02,
1610: 117780450239525.53,
1620: 114931274494564.6,
1630: 112167729098590.0,
1640: 109486744422913.47,
1650: 106885378445982.08,
1660: 104360810727006.11,
1670: 101910336697867.83,
1680: 99531362254735.45,
1690: 97221398632005.8,
1700: 94978057542294.84,
1710: 92799046567218.81,
1720: 90682164784674.86,
1730: 88625298619210.62,
1740: 86626417902912.67,
1750: 84683572135008.53,
1760: 82794886929115.22,
1770: 80958560637723.77,
1780: 79172861144158.47,
1790: 77436122812823.78,
1800: 75746743589112.81,
1810: 74103182240857.9,
1820: 72503955733693.56,
1830: 70947636733144.14,
1840: 69432851226677.516,
1850: 67958276259354.38,
1860: 66522637777079.52,
1870: 65124708571798.99,
1880: 63763306323322.69,
1890: 62437291732745.805,
1900: 61145566742734.914,
1910: 59887072840211.58,
1920: 58660789437213.766,
1930: 57465732325957.93,
1940: 56300952204339.914,
1950: 55165533268327.42,
1960: 54058591867886.57,
1970: 52979275223273.52,
1980: 51926760198695.38,
1990: 50900252130501.71,
2000: 49898983707231.64,
2010: 48922213898973.98,
2020: 47969226933642.46,
2030: 47039331317893.484,
2040: 46131858900531.85,
2050: 45246163976367.16,
2060: 44381622428588.45,
2070: 43537630907826.914,
2080: 42713606046171.055,
2090: 41908983704487.055,
2100: 41123218251487.2,
2110: 40355781873061.016,
2120: 39606163910468.414,
2130: 38873870226058.0,
2140: 38158422595245.76,
2150: 37459358123551.375,
2160: 36776228687550.4,
2170: 36108600398656.42,
2180: 35456053088702.92,
2190: 34818179816343.46,
2200: 34194586393339.23,
2210: 33584890929847.594,
2220: 32988723397867.97,
2230: 32405725212043.5,
2240: 31835548827055.09,
2250: 31277857350881.332,
2260: 30732324173233.14,
2270: 30198632608502.992,
2280: 29676475552603.67,
2290: 29165555153096.8,
2300: 28665582492042.46,
2310: 28176277281028.16,
2320: 27697367567857.785,
2330: 27228589454408.883,
2340: 26769686825187.336,
2350: 26320411086130.0,
2360: 25880520913228.37,
2370: 25449782010563.703,
2380: 25027966877364.523,
2390: 24614854583713.492,
2400: 24210230554549.098,
2410: 23813886361621.605,
2420: 23425619523080.68,
2430: 23045233310383.168,
2440: 22672536562226.797,
2450: 22307343505225.53,
2460: 21949473581057.37,
2470: 21598751279824.895,
2480: 21255005979383.035,
2490: 20918071790395.664,
2500: 20587787406896.773,
2510: 20263995962138.527,
2520: 19946544889519.617,
2530: 19635285788396.28,
2540: 19330074294585.21,
2550: 19030769955377.64,
2560: 18737236108890.098,
2570: 18449339767586.34,
2580: 18166951505808.754,
2590: 17889945351169.266,
2600: 17618198679650.586,
2610: 17351592114278.848,
2620: 17090009427233.361,
2630: 16833337445262.887,
2640: 16581465958286.596,
2650: 16334287631059.766,
2660: 16091697917790.965,
2670: 15853594979601.48,
2680: 15619879604722.28,
2690: 15390455131327.828,
2700: 15165227372910.803,
2710: 14944104546104.139,
2720: 14726997200862.775,
2730: 14513818152918.56,
2740: 14304482418427.209,
2750: 14098907150728.072,
2760: 13897011579141.205,
2770: 13698716949729.547,
2780: 13503946467955.09,
2790: 13312625243163.746,
2800: 13124680234832.918,
2810: 12940040200520.383,
2820: 12758635645454.87,
2830: 12580398773710.945,
2840: 12405263440913.037,
2850: 12233165108415.678,
2860: 12064040798908.715,
2870: 11897829053398.727,
2880: 11734469889519.129,
2890: 11573904761123.846,
2900: 11416076519120.523,
2910: 11260929373500.97,
2920: 11108408856529.13,
2930: 10958461787046.506,
2940: 10811036235858.014,
2950: 10666081492162.441,
2960: 10523548030991.246,
2970: 10383387481623.852,
2980: 10245552596945.959,
2990: 10109997223719.629,
3000: 9976676273735.705,
3010: 9845545695819.326,
3020: 9716562448660.236,
3030: 9589684474441.564,
3040: 9464870673240.574,
3050: 9342080878176.78,
3060: 9221275831282.85,
3070: 9102417160075.428,
3080: 8985467354803.008,
3090: 8870389746349.592,
3100: 8757148484772.743,
3110: 8645708518456.251,
3120: 8536035573857.646,
3130: 8428096135832.14,
3140: 8321857428514.057,
3150: 8217287396739.326,
3160: 8114354687991.163,
3170: 8013028634853.089,
3180: 7913279237953.237,
3190: 7815077149385.13,
3200: 7718393656589.585,
3210: 7623200666684.126,
3220: 7529470691225.601,
3230: 7437176831393.412,
3240: 7346292763579.575,
3250: 7256792725374.212,
3260: 7168651501933.775,
3270: 7081844412720.704,
3280: 6996347298603.18,
3290: 6912136509304.378,
3300: 6829188891190.4375,
3310: 6747481775387.058,
3320: 6666992966215.409,
3330: 6587700729936.908,
3340: 6509583783798.747,
3350: 6432621285370.585,
3360: 6356792822163.951,
3370: 6282078401526.433,
3380: 6208458440802.092,
3390: 6135913757750.542,
3400: 6064425561217.242,
3410: 5993975442047.647,
3420: 5924545364238.199,
3430: 5856117656316.932,
3440: 5788675002947.848,
3450: 5722200436751.911,
3460: 5656677330338.943,
3470: 5592089388543.884,
3480: 5528420640862.425,
3490: 5465655434079.36,
3500: 5403778425085.023,
3510: 5342774573874.018,
3520: 5282629136721.349,
3530: 5223327659530.821,
3540: 5164855971351.129,
3550: 5107200178054.552,
3560: 5050346656174.055,
3570: 4994282046894.508,
3580: 4938993250193.272,
3590: 4884467419126.502,
3600: 4830691954256.869,
3610: 4777654498219.097,
3620: 4725342930419.064,
3630: 4673745361863.556,
3640: 4622850130116.29,
3650: 4572645794377.416,
3660: 4523121130682.81,
3670: 4474265127220.076,
3680: 4426066979758.171,
3690: 4378516087187.197,
3700: 4331602047166.0625,
3710: 4285314651874.602,
3720: 4239643883867.4946,
3730: 4194579912027.525,
3740: 4150113087615.1924,
3750: 4106233940412.318,
3760: 4062933174957.2803,
3770: 4020201666869.1294,
3780: 3978030459258.5747,
3790: 3936410759223.488,
3800: 3895333934426.615,
3810: 3854791509753.3784,
3820: 3814775164047.902,
3830: 3775276726924.8184,
3840: 3736288175655.5376,
3850: 3697801632126.229,
3860: 3659809359866.3984,
3870: 3622303761146.024,
3880: 3585277374139.207,
3890: 3548722870153.0674,
3900: 3512633050920.0366,
3910: 3477000845951.869,
3920: 3441819309953.8896,
3930: 3407081620298.1562,
3940: 3372781074553.4653,
3950: 3338911088071.614,
3960: 3305465191627.6924,
3970: 3272437029113.641,
3980: 3239820355283.478,
3990: 3207609033548.876,
4000: 3175797033824.0825,
4010: 3144378430418.6167,
4020: 3113347399977.013,
4030: 3082698219463.9043,
4040: 3052425264193.8467,
4050: 3022523005904.4507,
4060: 2992986010871.9854,
4070: 2963808938068.1055,
4080: 2934986537357.1665,
4090: 2906513647732.756,
4100: 2878385195592.525,
4110: 2850596193050.724,
4120: 2823141736287.0996,
4130: 2796017003931.6074,
4140: 2769217255483.8506,
4150: 2742737829766.679,
4160: 2716574143412.7847,
4170: 2690721689383.833,
4180: 2665176035521.2383,
4190: 2639932823127.7026,
4200: 2614987765579.017,
4210: 2590336646965.295,
4220: 2565975320760.8755,
4230: 2541899708522.4116,
4240: 2518105798614.3164,
4250: 2494589644960.9663,
4260: 2471347365825.1445,
4270: 2448375142611.9565,
4280: 2425669218697.741,
4290: 2403225898283.3237,
4300: 2381041545271.119,
4310: 2359112582165.5244,
4320: 2337435488995.964,
4330: 2316006802262.227,
4340: 2294823113901.4663,
4350: 2273881070276.4233,
4360: 2253177371184.39,
4370: 2232708768886.348,
4380: 2212472067156.0845,
4390: 2192464120348.4316,
4400: 2172681832486.568,
4410: 2153122156367.7686,
4420: 2133782092687.2695,
4430: 2114658689179.6765,
4440: 2095749039777.7551,
4450: 2077050283788.138,
4460: 2058559605083.3933,
4470: 2040274231310.416,
4480: 2022191433114.404,
4490: 2004308523378.5461,
4500: 1986622856478.5042,
4510: 1969131827551.8745,
4520: 1951832871782.029,
4530: 1934723463696.0164,
4540: 1917801116476.4087,
4550: 1901063381286.5327,
4560: 1884507846608.9795,
4570: 1868132137597.0935,
4580: 1851933915439.008,
4590: 1835910876734.126,
4600: 1820060752881.735,
4610: 1804381309481.4226,
4620: 1788870345745.0815,
4630: 1773525693920.3213,
4640: 1758345218724.9421,
4650: 1743326816792.2583,
4660: 1728468416127.122,
4670: 1713767975572.2915,
4680: 1699223484285.0105,
4690: 1684832961223.5886,
4700: 1670594454643.7065,
4710: 1656506041604.286,
4720: 1642565827482.7593,
4730: 1628771945499.3826,
4740: 1615122556250.6946,
4750: 1601615847251.562,
4760: 1588250032485.9958,
4770: 1575023351966.2256,
4780: 1561934071300.1697,
4790: 1548980481266.8562,
4800: 1536160897399.8162,
4810: 1523473659578.1877,
4820: 1510917131625.423,
4830: 1498489700915.4001,
4840: 1486189777985.852,
4850: 1474015796158.8215,
4860: 1461966211168.2622,
4870: 1450039500794.284,
4880: 1438234164504.2488,
4890: 1426548723100.3413,
4900: 1414981718373.626,
4910: 1403531712764.352,
4920: 1392197289028.423,
4930: 1380977049910.033,
4940: 1369869617820.0374,
4950: 1358873634520.2827,
4960: 1347987760813.5928,
4970: 1337210676239.2769,
4980: 1326541078774.1477,
4990: 1315977684538.9148,
5000: 1305519227509.7815,
5010: 1295164459235.205,
5020: 1284912148557.7336,
5030: 1274761081340.7432,
5040: 1264710060200.0144,
5050: 1254757904240.1487,
5060: 1244903448795.5132,
5070: 1235145545175.88,
5080: 1225483060416.4197,
5090: 1215914877032.2175,
5100: 1206439892776.9695,
5110: 1197057020405.999,
5120: 1187765187443.352,
5130: 1178563335952.9712,
5140: 1169450422313.8413,
5150: 1160425416999.0703,
5160: 1151487304358.7625,
5170: 1142635082406.6785,
5180: 1133867762610.526,
5190: 1125184369685.9434,
5200: 1116583941393.9238,
5210: 1108065528341.856,
5220: 1099628193787.8003,
5230: 1091271013448.2942,
5240: 1082993075309.2667,
5250: 1074793479440.3116,
5260: 1066671337812.0234,
5270: 1058625774116.5092,
5280: 1050655923590.8562,
5290: 1042760932843.6814,
5300: 1034939959684.5034,
5310: 1027192172956.0835,
5320: 1019516752369.5392,
5330: 1011912888342.207,
5340: 1004379781838.2548,
5350: 996916644211.955,
5360: 989522697053.5531,
5370: 982197172037.7372,
5380: 974939310774.5638,
5390: 967748364662.9548,
5400: 960623594746.5071,
5410: 953564271571.7982,
5420: 946569675048.9287,
5430: 939639094314.4648,
5440: 932771827596.5293,
5450: 925967182082.1647,
5460: 919224473786.8965,
5470: 912543027426.3756,
5480: 905922176290.1104,
5490: 899361262117.3301,
5500: 892859634974.7788,
5510: 886416653136.5468,
5520: 880031682965.8585,
5530: 873704098798.7098,
5540: 867433282829.4485,
5550: 861218624998.1802,
5560: 855059522879.9738,
5570: 848955381575.8392,
5580: 842905613605.5132,
5590: 836909638801.8594,
5600: 830966884207.0535,
5610: 825076783970.3597,
5620: 819238779247.5779,
5630: 813452318102.0471,
5640: 807716855407.2653,
5650: 802031852750.9854,
5660: 796396778340.922,
5670: 790811106911.8524,
5680: 785274319634.2522,
5690: 779785904024.2954,
5700: 774345353855.3517,
5710: 768952169070.8043,
5720: 763605855698.2571,
5730: 758305925765.0981,
5740: 753051897215.3187,
5750: 747843293827.7258,
5760: 742679645135.3091,
5770: 737560486345.9745,
5780: 732485358264.398,
5790: 727453807215.1644,
5800: 722465384967.0712,
5810: 717519648658.5706,
5820: 712616160724.3851,
5830: 707754488823.252,
5840: 702934205766.7695,
5850: 698154889449.3145,
5860: 693416122779.0521,
5870: 688717493609.9824,
5880: 684058594675.0568,
5890: 679439023520.2299,
5900: 674858382439.6288,
5910: 670316278411.5775,
5920: 665812323035.7026,
5930: 661346132470.8939,
5940: 656917327374.2812,
5950: 652525532841.0446,
5960: 648170378345.2207,
5970: 643851497681.3223,
5980: 639568528906.8608,
5990: 635321114285.7306,
6000: 631108900232.4326,
6010: 626931537257.1333,
6020: 622788679911.5234,
6030: 618679986735.5045,
6040: 614605120204.6539,
6050: 610563746678.4557,
6060: 606555536349.3281,
6070: 602580163192.3584,
6080: 598637304915.823,
6090: 594726642912.3918,
6100: 590847862211.0939,
6110: 587000651429.9434,
6120: 583184702729.298,
6130: 579399711765.8593,
6140: 575645377647.3625,
6150: 571921402887.9585,
6160: 568227493364.1606,
6170: 564563358271.5327,
6180: 560928710081.9141,
6190: 557323264501.3353,
6200: 553746740428.4817,
6210: 550198859913.8053,
6220: 546679348119.18427,
6230: 543187933278.19653,
6240: 539724346656.9305,
6250: 536288322515.40356,
6260: 532879598069.4681,
6270: 529497913453.31604,
6280: 526143011682.5014,
6290: 522814638617.47345,
6300: 519512542927.6403,
6310: 516236476055.96906,
6320: 512986192184.0247,
6330: 509761448197.5526,
6340: 506562003652.5615,
6350: 503387620741.80493,
6360: 500238064261.83685,
6370: 497113101580.45337,
6380: 494012502604.618,
6390: 490936039748.8657,
6400: 487883487904.07117,
6410: 484854624406.7478,
6420: 481849229008.7025,
6430: 478867083847.15753,
6440: 475907973415.2536,
6450: 472971684532.9882,
6460: 470058006318.55225,
6470: 467166730160.0346,
6480: 464297649687.5811,
6490: 461450560745.86926,
6500: 458625261366.98956,
6510: 455821551743.7201,
6520: 453039234203.138,
6530: 450278113180.60095,
6540: 447537995194.09985,
6550: 444818688818.93726,
6560: 442120004662.7777,
6570: 439441755341.0156,
6580: 436783755452.486,
6590: 434145821555.52203,
6600: 431527772144.301,
6610: 428929427625.55145,
6620: 426350610295.5433,
6630: 423791144317.4105,
6640: 421250855698.76636,
6650: 418729572269.62103,
6660: 416227123660.6094,
6670: 413743341281.4925,
6680: 411278058299.95447,
6690: 408831109620.7031,
6700: 406402331864.7988,
6710: 403991563349.3309,
6720: 401598644067.29614,
6730: 399223415667.7825,
6740: 396865721436.4204,
6750: 394525406276.0693,
6760: 392202316687.7782,
6770: 389896300751.98474,
6780: 387607208109.98065,
6790: 385334889945.6019,
6800: 383079198967.1831,
6810: 380839989389.715,
6820: 378617116917.2766,
6830: 376410438725.6653,
6840: 374219813445.2664,
6850: 372045101144.1651,
6860: 369886163311.4356,
6870: 367742862840.68774,
6880: 365615064013.811,
6890: 363502632484.943,
6900: 361405435264.6154,
6910: 359323340704.1387,
6920: 357256218480.17053,
6930: 355203939579.4969,
6940: 353166376283.99414,
6950: 351143402155.7893,
6960: 349134892022.6241,
6970: 347140721963.3948,
6980: 345160769293.8799,
6990: 343194912552.6347,
7000: 341243031487.1208,
7010: 339305007039.9289,
7020: 337380721335.26587,
7030: 335470057665.5314,
7040: 333572900478.1436,
7050: 331689135362.46295,
7060: 329818649036.9293,
7070: 327961329336.3518,
7080: 326117065199.32367,
7090: 324285746655.8646,
7100: 322467264815.1513,
7110: 320661511853.4449,
7120: 318868381002.15063,
7130: 317087766536.0439,
7140: 315319563761.61725,
7150: 313563669005.6041,
7160: 311819979603.6328,
7170: 310088393889.0082,
7180: 308368811181.655,
7190: 306661131777.2007,
7200: 304965256936.1651,
7210: 303281088873.3107,
7220: 301608530747.12604,
7230: 299947486649.41724,
7240: 298297861595.0548,
7250: 296659561511.82666,
7260: 295032493230.42664,
7270: 293416564474.564,
7280: 291811683851.192,
7290: 290217760840.8693,
7300: 288634705788.2106,
7310: 287062429892.4927,
7320: 285500845198.3438,
7330: 283949864586.5712,
7340: 282409401765.0661,
7350: 280879371259.8707,
7360: 279359688406.3015,
7370: 277850269340.21655,
7380: 276351030989.37213,
7390: 274861891064.8902,
7400: 273382768052.82962,
7410: 271913581205.8511,
7420: 270454250534.9884,
7430: 269004696801.53046,
7440: 267564841508.9743,
7450: 266134606895.10367,
7460: 264713915924.14493,
7470: 263302692279.0116,
7480: 261900860353.67242,
7490: 260508345245.57382,
7500: 259125072748.17145,
7510: 257750969343.55508,
7520: 256385962195.1474,
7530: 255029979140.50665,
7540: 253682948684.19623,
7550: 252344799990.7478,
7560: 251015462877.71957,
7570: 249694867808.80902,
7580: 248382945887.0739,
7590: 247079628848.2287,
7600: 245784849053.98935,
7610: 244498539485.55206,
7620: 243220633737.0956,
7630: 241951066009.39136,
7640: 240689771103.47284,
7650: 239436684414.39792,
7660: 238191741925.05316,
7670: 236954880200.07034,
7680: 235726036379.77396,
7690: 234505148174.23032,
7700: 233292153857.34448,
7710: 232086992261.04077,
7720: 230889602769.50864,
7730: 229699925313.49994,
7740: 228517900364.71463,
7750: 227343468930.23093,
7760: 226176572547.02142,
7770: 225017153276.51,
7780: 223865153699.205,
7790: 222720516909.39767,
7800: 221583186509.90405,
7810: 220453106606.88052,
7820: 219330221804.7083,
7830: 218214477200.9046,
7840: 217105818381.13138,
7850: 216004191414.2245,
7860: 214909542847.31235,
7870: 213821819700.96033,
7880: 212740969464.39493,
7890: 211666940090.76736,
7900: 210599679992.4742,
7910: 209539138036.53156,
7920: 208485263540.0051,
7930: 207438006265.49194,
7940: 206397316416.64197,
7950: 205363144633.74268,
7960: 204335441989.34433,
7970: 203314159983.95496,
7980: 202299250541.73953,
7990: 201290666006.33,
8000: 200288359136.61334,
8010: 199292283102.63022,
8020: 198302391481.47336,
8030: 197318638253.25455,
8040: 196340977797.11057,
8050: 195369364887.25497,
8060: 194403754689.069,
8070: 193444102755.2442,
8080: 192490365021.96295,
8090: 191542497805.11542,
8100: 190600457796.5795,
8110: 189664202060.50287,
8120: 188733688029.6804,
8130: 187808873501.91092,
8140: 186889716636.44672,
8150: 185976175950.45346,
8160: 185068210315.51062,
8170: 184165778954.16174,
8180: 183268841436.48676,
8190: 182377357676.7341,
8200: 181491287929.96954,
8210: 180610592788.7599,
8220: 179735233179.92136,
8230: 178865170361.26593,
8240: 178000365918.406,
8250: 177140781761.59402,
8260: 176286380122.58374,
8270: 175437123551.53436,
8280: 174592974913.94925,
8290: 173753897387.64096,
8300: 172919854459.73853,
8310: 172090809923.71442,
8320: 171266727876.45435,
8330: 170447572715.35757,
8340: 169633309135.46158,
8350: 168823902126.60062,
8360: 168019316970.60107,
8370: 167219519238.49762,
8380: 166424474787.78842,
8390: 165634149759.70255,
8400: 164848510576.52142,
8410: 164067523938.91068,
8420: 163291156823.28455,
8430: 162519376479.20108,
8440: 161752150426.78098,
8450: 160989446454.15643,
8460: 160231232614.95108,
8470: 159477477225.7773,
8480: 158728148863.76593,
8490: 157983216364.1204,
8500: 157242648817.70145,
8510: 156506415568.6289,
8520: 155774486211.91153,
8530: 155046830591.10788,
8540: 154323418796.00436,
8550: 153604221160.31897,
8560: 152889208259.4327,
8570: 152178350908.144,
8580: 151471620158.4394,
8590: 150768987297.29776,
8600: 150070423844.51126,
8610: 149375901550.5281,
8620: 148685392394.325,
8630: 147998868581.29306,
8640: 147316302541.15298,
8650: 146637666925.88092,
8660: 145962934607.67148,
8670: 145292078676.91214,
8680: 144625072440.1697,
8690: 143961889418.22614,
8700: 143302503344.097,
8710: 142646888161.10425,
8720: 141995018020.94568,
8730: 141346867281.79776,
8740: 140702410506.42462,
8750: 140061622460.32663,
8760: 139424478109.8859,
8770: 138790952620.54755,
8780: 138161021355.00537,
8790: 137534659871.4204,
8800: 136911843921.64748,
8810: 136292549449.48303,
8820: 135676752588.93163,
8830: 135064429662.48808,
8840: 134455557179.43721,
8850: 133850111834.17313,
8860: 133248070504.53537,
8870: 132649410250.15875,
8880: 132054108310.83987,
8890: 131462142104.92636,
8900: 130873489227.71214,
8910: 130288127449.86124,
8920: 129706034715.83461,
8930: 129127189142.34335,
8940: 128551569016.81108,
8950: 127979152795.85304,
8960: 127409919103.7749,
8970: 126843846731.07376,
8980: 126280914632.97566,
8990: 125721101927.96347,
9000: 125164387896.33864,
9010: 124610751978.78694,
9020: 124060173774.95737,
9030: 123512633042.06848,
9040: 122968109693.51207,
9050: 122426583797.4757,
9060: 121888035575.58597,
9070: 121352445401.5622,
9080: 120819793799.87114,
9090: 120290061444.41579,
9100: 119763229157.21886,
9110: 119239277907.13426,
9120: 118718188808.55466,
9130: 118199943120.14711,
9140: 117684522243.59444,
9150: 117171907722.34898,
9160: 116662081240.39424,
9170: 116155024621.03296,
9180: 115650719825.66463,
9190: 115149148952.59961,
9200: 114650294235.86678,
9210: 114154138044.04117,
9220: 113660662879.07535,
9230: 113169851375.15779,
9240: 112681686297.5593,
9250: 112196150541.51665,
9260: 111713227131.10152,
9270: 111232899218.12318,
9280: 110755150081.02371,
9290: 110279963123.79134,
9300: 109807321874.89426,
9310: 109337209986.20094,
9320: 108869611231.93327,
9330: 108404509507.62003,
9340: 107941888829.0611,
9350: 107481733331.29971,
9360: 107024027267.608,
9370: 106568755008.48541,
9380: 106115901040.65602,
9390: 105665449966.08484,
9400: 105217386501.00201,
9410: 104771695474.93022,
9420: 104328361829.73412,
9430: 103887370618.65692,
9440: 103448707005.39705,
9450: 103012356263.16054,
9460: 102578303773.75063,
9470: 102146535026.64262,
9480: 101717035618.08817,
9490: 101289791250.21182,
9500: 100864787730.12686,
9510: 100442010969.04875,
9520: 100021446981.43231,
9530: 99603081884.10187,
9540: 99186901895.39465,
9550: 98772893334.32066,
9560: 98361042619.71613,
9570: 97951336269.4137,
9580: 97543760899.41917,
9590: 97138303223.09851,
9600: 96734950050.36034,
9610: 96333688286.86812,
9620: 95934504933.23207,
9630: 95537387084.23524,
9640: 95142321928.05011,
9650: 94749296745.461,
9660: 94358298909.11217,
9670: 93969315882.74048,
9680: 93582335220.42659,
9690: 93197344565.85986,
9700: 92814331651.58675,
9710: 92433284298.29672,
9720: 92054190414.08836,
9730: 91677037993.75917,
9740: 91301815118.09486,
9750: 90928509953.16125,
9760: 90557110749.61377,
9770: 90187605842.00795,
9780: 89819983648.10767,
9790: 89454232668.2142,
9800: 89090341484.49277,
9810: 88728298760.3068,
9820: 88368093239.55977,
9830: 88009713746.0432,
9840: 87653149182.78104,
9850: 87298388531.40172,
9860: 86945420851.49179,
9870: 86594235279.97044,
9880: 86244821030.46413,
9890: 85897167392.691,
9900: 85551263731.84332,
9910: 85207099487.98438,
9920: 84864664175.4432,
9930: 84523947382.22423,
9940: 84184938769.4087,
9950: 83847628070.57414,
9960: 83512005091.21025,
9970: 83178059708.1488,
9980: 82845781868.98933,
9990: 82515161591.5363}}
BLACKBODY_SPD_DATA = np.array([
6654278270641.8164,
6709605279251.8242,
6764825121520.0215,
6819933078643.3184,
6874924489829.6582,
6929794752622.3604,
6984539323198.4238,
7039153716641.1973,
7093633507187.9199,
7147974328452.79,
7202171873625.8623,
7256221895648.418,
7310120207365.2773,
7363862681654.5146,
7417445251535.1982,
7470863910253.4277,
7524114711347.416,
7577193768691.916,
7630097256522.6279,
7682821409440.8594,
7735362522399.2236,
7787716950668.5537,
7839881109786.6299,
7891851475489.291,
7943624583624.1924,
7995197030047.749,
8046565470505.8623,
8097726620498.582,
8148677255129.4268,
8199414208939.6592,
8249934375727.9277,
8300234708355.8584,
8350312218539.7832,
8400163976629.2471,
8449787111372.5605,
8499178809669.8564,
8548336316314.084,
8597256933720.2305,
8645938021643.2539,
8694376996885.1318,
8742571332991.2041,
8790518559936.5361,
8838216263802.2441,
8885662086442.5137,
8932853725142.3965,
8979788932266.9473,
9026465514901.832,
9072881334485.918,
9119034306436.0684,
9164922399764.5254,
9210543636689.166,
9255896092236.9062,
9300977893840.7051,
9345787220930.2656,
9390322304516.8867,
9434581426772.6758,
9478562920604.4199,
9522265169222.4082,
9565686605704.5176,
9608825712555.6133,
9651681021262.9023,
9694251111847.0938,
9736534612409.9258,
9778530198678.0938,
9820236593543.9746,
9861652566603.2188,
9902776933689.582,
9943608556407.1777,
9984146341660.2715,
10024389241180.996,
10064336251055.02,
10103986411245.57,
10143338805115.729,
10182392558949.537,
10221146841471.77,
10259600863366.846,
10297753876796.676,
10335605174918.225,
10373154091400.221,
10410399999939.768,
10447342313778.721,
10483980485220.086,
10520314005144.52,
10556342402527.148,
10592065243954.838,
10627482133143.963,
10662592710459.016,
10697396652431.82,
10731893671281.994,
10766083514438.223,
10799965964060.963,
10833540836566.229,
10866807982151.029,
10899767284320.166,
10932418659414.785,
10964762056142.584,
10996797455109.844,
11028524868355.518,
11059944338887.141,
11091055940219.014,
11121859775912.461,
11152355979118.445,
11182544712122.428,
11212426165891.754,
11242000559625.406,
11271268140306.48,
11300229182257.111,
11328883986696.256,
11357232881300.062,
11385276219765.18,
11413014381374.893,
11440447770568.145,
11467576816511.492,
11494401972674.252,
11520923716406.49,
11547142548520.236,
11573058992873.834,
11598673595959.461,
11623986926493.828,
11648999575012.27,
11673712153465.965,
11698125294822.561,
11722239652670.162,
11746055900824.623,
11769574732940.322,
11792796862124.297,
11815723020553.906,
11838353959097.84,
11860690446940.725,
11882733271211.16,
11904483236613.303,
11925941165062.002,
11947107895321.389,
11967984282647.137,
11988571198432.213,
12008869529856.135,
12028880179537.986,
12048604065192.846,
12068042119291.859,
12087195288725.926,
12106064534472.861,
12124650831268.346,
12142955167280.287,
12160978543786.838,
12178721974857.982,
12196186487040.689,
12213373119047.705,
12230282921449.855,
12246916956371.861,
12263276297191.846,
12279362028244.258,
12295175244526.406,
12310717051408.449,
12325988564347.025,
12340990908602.223,
12355725218958.162,
12370192639447.006,
12384394323076.43,
12398331431560.562,
12412005135054.371,
12425416611891.379,
12438567048324.936,
12451457638272.658,
12464089583064.539,
12476464091194.037,
12488582378072.854,
12500445665788.748,
12512055182866.818,
12523412164033.971,
12534517849986.645,
12545373487161.811,
12555980327511.164,
12566339628278.518,
12576452651780.256,
12586320665189.135,
12595944940321.068,
12605326753425.016,
12614467384975.994,
12623368119471.16,
12632030245228.828,
12640455054190.627,
12648643841726.588,
12656597906443.111,
12664318549994.041,
12671807076894.48,
12679064794337.594,
12686093012014.309,
12692893041935.688,
12699466198258.367,
12705813797112.535,
12711937156432.801,
12717837595791.857,
12723516436236.711,
12728975000127.754,
12734214610980.357,
12739236593309.203,
12744042272475.174,
12748632974534.93,
12753010026092.844,
12757174754155.734,
12761128485989.891,
12764872548980.688,
12768408270494.654,
12771736977743.973,
12774859997653.355,
12777778656729.365,
12780494280932.047,
12783008195548.846,
12785321725071.0,
12787436193072.057,
12789352922088.682,
12791073233503.762,
12792598447431.654,
12793929882605.611,
12795068856267.463,
12796016684059.291,
12796774679917.309,
12797344155967.82,
12797726422425.18,
12797922787491.859,
12797934557260.469,
12797763035617.822,
12797409524150.9,
12796875322054.816,
12796161726042.639,
12795270030257.166,
12794201526184.549,
12792957502569.717,
12791539245333.676,
12789948037492.641,
12788185159078.791,
12786251887063.008,
12784149495279.119,
12781879254350.061,
12779442431615.486,
12776840291061.33,
12774074093250.713,
12771145095256.709,
12768054550596.617,
12764803709167.744,
12761393817184.904,
12757826117119.26,
12754101847638.871,
12750222243550.609,
12746188535743.562,
12742001951133.975,
12737663712611.49,
12733175038986.91,
12728537144941.33,
12723751240976.582,
12718818533367.135,
12713740224113.178,
12708517510895.238,
12703151587029.873,
12697643641426.754,
12691994858547.029,
12686206418362.818,
12680279496318.066,
12674215263290.443,
12668014885554.613,
12661679524746.469,
12655210337828.74,
12648608477057.459,
12641875089949.822,
12635011319252.975,
12628018302913.916,
12620897174050.449,
12613649060923.275,
12606275086908.967,
12598776370474.07,
12591154025150.178,
12583409159509.971,
12575542877144.205,
12567556276639.73,
12559450451558.285,
12551226490416.475,
12542885476666.35,
12534428488677.109,
12525856599717.566,
12517170877939.469,
12508372386361.768,
12499462182855.59,
12490441320130.119,
12481310845719.219,
12472071801968.932,
12462725226025.639,
12453272149825.078,
12443713600082.1,
12434050598281.088,
12424284160667.234,
12414415298238.336,
12404445016737.533,
12394374316646.484,
12384204193179.453,
12373935636277.811,
12363569630605.447,
12353107155544.566,
12342549185192.32,
12331896688357.947,
12321150628560.488,
12310311964027.225,
12299381647692.541,
12288360627197.451,
12277249844889.68,
12266050237824.26,
12254762737764.674,
12243388271184.57,
12231927759269.893,
12220382117921.643,
12208752257759.043,
12197039084123.23,
12185243497081.41,
12173366391431.496,
12161408656707.199,
12149371177183.518,
12137254831882.795,
12125060494581.031,
12112789033814.807,
12100441312888.402,
12088018189881.537,
12075520517657.367,
12062949143870.912,
12050304910977.828,
12037588656243.65,
12024801211753.225,
12011943404420.666,
11999016055999.574,
11986019983093.574,
11972955997167.262,
11959824904557.355,
11946627506484.299,
11933364599064.09,
11920036973320.385,
11906645415196.973,
11893190705570.496,
11879673620263.432,
11866094930057.385,
11852455400706.646,
11838755792951.932,
11824996862534.492,
11811179360210.418,
11797304031765.102,
11783371618028.107,
11769382854888.08,
11755338473308.053,
11741239199340.797,
11727085754144.553,
11712878853998.805,
11698619210320.354,
11684307529679.598,
11669944513816.889,
11655530859659.211,
11641067259336.898,
11626554400200.639,
11611992964838.582,
11597383631093.629,
11582727072080.848,
11568023956205.076,
11553274947178.664,
11538480704039.338,
11523641881168.229,
11508759128307.998,
11493833090581.143,
11478864408508.326,
11463853718026.965,
11448801650509.822,
11433708832783.754,
11418575887148.539,
11403403431395.852,
11388192078828.281,
11372942438278.482,
11357655114128.41,
11342330706328.627,
11326969810417.693,
11311573017541.658,
11296140914473.592,
11280674083633.223,
11265173103106.639,
11249638546666.002,
11234070983789.426,
11218470979680.795,
11202839095289.715,
11187175887331.518,
11171481908307.24,
11155757706523.762,
11140003826113.857,
11124220807056.41,
11108409185196.574,
11092569492266.006,
11076702255903.129,
11060807999673.422,
11044887243089.707,
11028940501632.523,
11012968286770.445,
10996971105980.475,
10980949462768.436,
10964903856689.342,
10948834783367.83,
10932742734518.596,
10916628197966.777,
10900491657668.412,
10884333593730.85,
10868154482433.188,
10851954796246.691,
10835735003855.213,
10819495570175.609,
10803236956378.148,
10786959619906.879,
10770664014500.018,
10754350590210.328,
10738019793425.453,
10721672066888.24,
10705307849717.057,
10688927577426.07,
10672531681945.504,
10656120591641.879,
10639694731338.24,
10623254522334.295,
10606800382426.604,
10590332725928.654,
10573851963691.01,
10557358503121.295,
10540852748204.236,
10524335099521.66,
10507805954272.381,
10491265706292.148,
10474714746073.469,
10458153460785.447,
10441582234293.516,
10425001447179.211,
10408411476759.814,
10391812697108.0,
10375205479071.404,
10358590190292.176,
10341967195226.443,
10325336855163.762,
10308699528246.461,
10292055569489.031,
10275405330797.311,
10258749160987.756,
10242087405806.607,
10225420407948.955,
10208748507077.795,
10192072039843.021,
10175391339900.34,
10158706737930.152,
10142018561656.332,
10125327135864.982,
10108632782423.104,
10091935820297.221,
10075236565571.938,
10058535331468.398,
10041832428362.729,
10025128163804.373,
10008422842534.434,
9991716766503.8105,
9975010234891.4004,
9958303544122.207,
9941596987885.3184,
9924890857151.8496,
9908185440192.8574,
9891481022597.1055,
9874777887288.8418,
9858076314545.4297,
9841376582014.9648,
9824678964733.791,
9807983735143.9473,
9791291163110.5352,
9774601515939.0684,
9757915058392.6328,
9741232052709.1074])
class TestPlanckLaw(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.blackbody.planck_law` definition units
tests methods.
"""
def test_planck_law(self):
"""
Tests :func:`colour.colorimetry.blackbody.planck_law` definition.
"""
for temperature, wavelengths in sorted(PLANCK_LAW_DATA.items()):
for wavelength, radiance in sorted(wavelengths.items()):
np.testing.assert_almost_equal(
planck_law(wavelength * 1e-9, temperature),
radiance,
# Lower precision for Linux *Travis-ci* tests.
decimal=0)
class TestBlackbodySpd(unittest.TestCase):
"""
Defines
:func:`colour.colorimetry.blackbody.blackbody_spd`
definition unit tests methods.
"""
def test_blackbody_spd(self):
"""
Tests
:func:`colour.colorimetry.blackbody.blackbody_spd`
definition.
"""
np.testing.assert_almost_equal(
blackbody_spd(5000, SpectralShape(360, 830, 1)).values,
BLACKBODY_SPD_DATA,
# Lower precision for Linux *Travis-ci* tests.
decimal=0)
if __name__ == '__main__':
unittest.main()
| 32.203996 | 77 | 0.617448 |
acf3161f564f2de6184fe47bb225aa10c33da4ba | 3,325 | py | Python | code-everyday-challenge/n217_longest_palindrom.py | ved93/deliberate-practice-challenges | 2fccdbb9d2baaa16f888055c081a8d04804c0045 | [
"MIT"
] | null | null | null | code-everyday-challenge/n217_longest_palindrom.py | ved93/deliberate-practice-challenges | 2fccdbb9d2baaa16f888055c081a8d04804c0045 | [
"MIT"
] | null | null | null | code-everyday-challenge/n217_longest_palindrom.py | ved93/deliberate-practice-challenges | 2fccdbb9d2baaa16f888055c081a8d04804c0045 | [
"MIT"
] | null | null | null |
# https://leetcode.com/problems/longest-palindromic-substring/discuss/900639/Python-Solution-%3A-with-detailed-explanation-%3A-using-DP
# https://leetcode.com/problems/longest-palindromic-substring/discuss/2954/Python-easy-to-understand-solution-with-comments-(from-middle-to-two-ends).
def longestPalindrome( s):
longest_palindrom = ''
dp = [[0]*len(s) for _ in range(len(s))]
#filling out the diagonal by 1
for i in range(len(s)):
dp[i][i] = True
if i ==0:
longest_palindrom = s[i]
# filling the dp table
for i in range(len(s)-1,-1,-1):
# j starts from the i location : to only work on the upper side of the diagonal
for j in range(i+1,len(s)):
if s[i] == s[j]: #if the chars mathces
# if len slicied sub_string is just one letter if the characters are equal, we can say they are palindomr dp[i][j] =True
#if the slicied sub_string is longer than 1, then we should check if the inner string is also palindrom (check dp[i+1][j-1] is True)
if j-i ==1 or dp[i+1][j-1] is True:
dp[i][j] = True
# we also need to keep track of the maximum palindrom sequence
if len(longest_palindrom) <= len(s[i:j+1]):
# print("Maximum palindrom ", longest_palindrom)
longest_palindrom = s[i:j+1]
return longest_palindrom
def longestPalSubstr(st) :
n = len(st) # get length of input string
# table[i][j] will be false if substring
# str[i..j] is not palindrome. Else
# table[i][j] will be true
table = [[0 for x in range(n)] for y
in range(n)]
# All substrings of length 1 are
# palindromes
maxLength = 1
i = 0
while (i < n) :
table[i][i] = True
i = i + 1
# check for sub-string of length 2.
start = 0
i = 0
while i < n - 1 :
if (st[i] == st[i + 1]) :
table[i][i + 1] = True
start = i
maxLength = 2
i = i + 1
# Check for lengths greater than 2.
# k is length of substring
k = 3
while k <= n :
# Fix the starting index
i = 0
while i < (n - k + 1) :
# Get the ending index of
# substring from starting
# index i and length k
j = i + k - 1
# checking for sub-string from
# ith index to jth index iff
# st[i + 1] to st[(j-1)] is a
# palindrome
if (table[i + 1][j - 1] and
st[i] == st[j]) :
table[i][j] = True
if (k > maxLength) :
start = i
maxLength = k
i = i + 1
k = k + 1
# print "Longest palindrome substring is: ", printSubStr(st, start,
# start + maxLength - 1)
# return maxLength # return length of LPS
return st[start:start + maxLength]
if __name__ == "__main__":
s = "kjqlrzzfmlvyoshiktodnsjjp"
# print(longestPalSubstr(s))
print(longestPalindrome(s))
| 32.598039 | 152 | 0.503158 |
acf3165136a89b0b2bae98b46547a0931a3e1430 | 2,639 | py | Python | data_providers/augment.py | jeffreyzpan/micronet-submission | 377d34e4a4a715d2fbac189117d12e8cdc270548 | [
"Apache-2.0"
] | 2 | 2019-10-12T02:43:33.000Z | 2021-02-20T06:47:08.000Z | data_providers/augment.py | jeffreyzpan/micronet-submission | 377d34e4a4a715d2fbac189117d12e8cdc270548 | [
"Apache-2.0"
] | null | null | null | data_providers/augment.py | jeffreyzpan/micronet-submission | 377d34e4a4a715d2fbac189117d12e8cdc270548 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import torch
class Cutout(object):
"""Randomly mask out one or more patches from an image.
please refer to https://github.com/uoguelph-mlrg/Cutout/blob/master/util/cutout.py
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
if isinstance(img, np.ndarray):
h = img.shape[1]
w = img.shape[2]
else:
h = img.size(1)
w = img.size(2)
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
# center point of the cutout region
y = np.random.randint(h)
x = np.random.randint(w)
width = int(self.length / 2)
y1 = np.clip(y - width, 0, h)
y2 = np.clip(y + width, 0, h)
x1 = np.clip(x - width, 0, w)
x2 = np.clip(x + width, 0, w)
mask[y1: y2, x1: x2] = 0.0
if isinstance(img, np.ndarray):
mask = np.expand_dims(mask, axis=0)
else:
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
return img * mask
class PostNormRandomHorizontalFlip(object):
""" Random horizontal flip after normalization """
def __init__(self, flip_prob=0.5):
self.flip_prob = flip_prob
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image after random horizontal flip.
"""
if np.random.random_sample() < self.flip_prob:
np_img = img.numpy() # C, H, W
np_img = np_img[:, :, ::-1].copy()
img = torch.from_numpy(np_img).float()
return img
class PostNormRandomCrop(object):
""" Random crop after normalization """
def __init__(self, pad=4):
self.pad = pad
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image after random horizontal flip.
"""
np_img = img.numpy() # C, H, W
init_shape = np_img.shape
new_shape = [init_shape[0],
init_shape[1] + self.pad * 2,
init_shape[2] + self.pad * 2]
zeros_padded = np.zeros(new_shape)
zeros_padded[:, self.pad:init_shape[1] + self.pad, self.pad:init_shape[2] + self.pad] = np_img
# randomly crop to original size
init_x = np.random.randint(0, self.pad * 2)
init_y = np.random.randint(0, self.pad * 2)
cropped = zeros_padded[:,
init_x: init_x + init_shape[1],
init_y: init_y + init_shape[2]]
img = torch.from_numpy(cropped).float()
return img
| 24.211009 | 96 | 0.638878 |
acf3174728521758dcdb400190e8b8bd40d49a7a | 2,129 | py | Python | var/spack/repos/builtin/packages/libkml/package.py | RemoteConnectionManager/spack | f2967b6c16effd26ce007cf86cadbb645c574f50 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2020-10-15T01:08:42.000Z | 2021-10-18T01:28:18.000Z | var/spack/repos/builtin/packages/libkml/package.py | RemoteConnectionManager/spack | f2967b6c16effd26ce007cf86cadbb645c574f50 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2019-07-30T10:12:28.000Z | 2019-12-17T09:02:27.000Z | var/spack/repos/builtin/packages/libkml/package.py | RemoteConnectionManager/spack | f2967b6c16effd26ce007cf86cadbb645c574f50 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5 | 2019-07-30T09:42:14.000Z | 2021-01-25T05:39:20.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libkml(CMakePackage):
"""Reference implementation of OGC KML 2.2."""
# NOTE: The original libkml repo is https://github.com/google/libkml,
# but this project is dead. See https://github.com/google/libkml/issues/4
homepage = "https://github.com/libkml/libkml"
url = "https://github.com/libkml/libkml/archive/1.3.0.tar.gz"
version('1.3.0', sha256='8892439e5570091965aaffe30b08631fdf7ca7f81f6495b4648f0950d7ea7963')
variant('java', default=False, description='Build java bindings')
variant('python', default=False, description='Build python bindings')
extends('jdk', when='+java')
extends('python', when='+python')
# See DEPENDENCIES
depends_on('cmake@2.8:', type='build')
depends_on('boost@1.44.0:')
depends_on('expat@2.1.0:')
depends_on('minizip@1.2.8:')
depends_on('uriparser')
depends_on('zlib@1.2.8:')
depends_on('googletest@1.7.0:', type='link')
depends_on('swig', when='+java', type='build')
depends_on('swig', when='+python', type='build')
def cmake_args(self):
spec = self.spec
args = []
if '+java' in spec:
args.append('-DWITH_JAVA:BOOL=ON')
else:
args.append('-DWITH_JAVA:BOOL=OFF')
if '+python' in spec:
args.append('-DWITH_PYTHON:BOOL=ON')
else:
args.append('-DWITH_PYTHON:BOOL=OFF')
if self.run_tests:
args.append('-DBUILD_TESTING:BOOL=ON')
args.append('-DGTEST_INCLUDE_DIR:PATH={0}'.format(
spec['googletest'].prefix.include))
else:
args.append('-DBUILD_TESTING:BOOL=OFF')
return args
@run_after('install')
def darwin_fix(self):
# The shared library is not installed correctly on Darwin; fix this
if self.spec.satisfies('platform=darwin'):
fix_darwin_install_name(self.prefix.lib)
| 32.257576 | 95 | 0.637388 |
acf31788e3ef1ea76562a5666517a54207bcaeb9 | 685 | py | Python | setup.py | Ganesha2282882/Typo | 904311c5ee6de4478ea5277e463c46a4fada00ec | [
"MIT"
] | null | null | null | setup.py | Ganesha2282882/Typo | 904311c5ee6de4478ea5277e463c46a4fada00ec | [
"MIT"
] | null | null | null | setup.py | Ganesha2282882/Typo | 904311c5ee6de4478ea5277e463c46a4fada00ec | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="typopoo-Ganesha-Sharma", # Ganesha Sharma
version="0.0.1",
author="Ganesha Sharma",
author_email="sharmaganesha2@gmail.com",
description="Typo - A typo helper for python.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Ganesha2282882/Typo",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.1',
)
| 29.782609 | 51 | 0.668613 |
acf31873aac88e97b65c53068dfb7a2a118677a9 | 246 | py | Python | trick_sims/SIM_lander/Modified_data/realtime.py | iamthad/trick | 88ac5b5990228e42a653347c9d7a103acea4d137 | [
"NASA-1.3"
] | 647 | 2015-05-07T16:08:16.000Z | 2022-03-30T02:33:21.000Z | trick_sims/SIM_lander/Modified_data/realtime.py | tanglemontree/trick | f182c723495185708434e67789457eb29d52ad58 | [
"NASA-1.3"
] | 995 | 2015-04-30T19:44:31.000Z | 2022-03-31T20:14:44.000Z | trick_sims/SIM_lander/Modified_data/realtime.py | tanglemontree/trick | f182c723495185708434e67789457eb29d52ad58 | [
"NASA-1.3"
] | 251 | 2015-05-15T09:24:34.000Z | 2022-03-22T20:39:05.000Z |
trick.real_time_enable()
trick.exec_set_software_frame(0.1)
trick.itimer_enable()
trick.exec_set_enable_freeze(True)
trick.exec_set_freeze_command(True)
simControlPanel = trick.SimControlPanel()
trick.add_external_application(simControlPanel)
| 22.363636 | 47 | 0.857724 |
acf318b566738c9a231db361baa6c2ab52f7cfff | 2,357 | py | Python | src/OTLMOW/PostenMapping/Model/Post060430010.py | davidvlaminck/OTLClassPython | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | 2 | 2022-02-01T08:58:11.000Z | 2022-02-08T13:35:17.000Z | src/OTLMOW/PostenMapping/Model/Post060430010.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | src/OTLMOW/PostenMapping/Model/Post060430010.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | # coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post060430010(StandaardPost):
def __init__(self):
super().__init__(
nummer='0604.30010',
beschrijving='Verharding van ternair mengsel volgens 6-4.3, dikte 10 cm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Ternairmengselverharding',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotatie='laagRol',
defaultWaarde='verharding',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0604.30010')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Ternairmengselverharding',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagDikte.dikte',
dotnotatie='dikte',
defaultWaarde='10',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0604.30010')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Ternairmengselverharding',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotatie='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0604.30010')])
| 45.326923 | 106 | 0.57955 |
acf3191e049f275b22fa8d3955da155afa5eb3a4 | 3,316 | py | Python | examples/get_started_tensorflow_v2.py | minaremeli/adversarial-robustness-toolbox | 3454f7f11c3ade9317d11637c8c8621c9f44e8fd | [
"MIT"
] | 1,350 | 2020-07-14T08:06:55.000Z | 2022-03-31T19:22:25.000Z | examples/get_started_tensorflow_v2.py | minaremeli/adversarial-robustness-toolbox | 3454f7f11c3ade9317d11637c8c8621c9f44e8fd | [
"MIT"
] | 936 | 2020-07-14T03:33:00.000Z | 2022-03-31T23:05:29.000Z | examples/get_started_tensorflow_v2.py | minaremeli/adversarial-robustness-toolbox | 3454f7f11c3ade9317d11637c8c8621c9f44e8fd | [
"MIT"
] | 413 | 2020-07-16T16:00:16.000Z | 2022-03-29T10:31:12.000Z | """
The script demonstrates a simple example of using ART with TensorFlow v1.x. The example train a small model on the MNIST
dataset and creates adversarial examples using the Fast Gradient Sign Method. Here we use the ART classifier to train
the model, it would also be possible to provide a pretrained model to the ART classifier.
The parameters are chosen for reduced computational requirements of the script and not optimised for accuracy.
"""
import numpy as np
from art.attacks.evasion import FastGradientMethod
from art.estimators.classification import TensorFlowV2Classifier
from art.utils import load_mnist
# Step 1: Load the MNIST dataset
(x_train, y_train), (x_test, y_test), min_pixel_value, max_pixel_value = load_mnist()
# Step 2: Create the model
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPool2D
class TensorFlowModel(Model):
"""
Standard TensorFlow model for unit testing.
"""
def __init__(self):
super(TensorFlowModel, self).__init__()
self.conv1 = Conv2D(filters=4, kernel_size=5, activation="relu")
self.conv2 = Conv2D(filters=10, kernel_size=5, activation="relu")
self.maxpool = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding="valid", data_format=None)
self.flatten = Flatten()
self.dense1 = Dense(100, activation="relu")
self.logits = Dense(10, activation="linear")
def call(self, x):
"""
Call function to evaluate the model.
:param x: Input to the model
:return: Prediction of the model
"""
x = self.conv1(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.maxpool(x)
x = self.flatten(x)
x = self.dense1(x)
x = self.logits(x)
return x
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
def train_step(model, images, labels):
with tf.GradientTape() as tape:
predictions = model(images, training=True)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
model = TensorFlowModel()
loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
# Step 3: Create the ART classifier
classifier = TensorFlowV2Classifier(
model=model,
loss_object=loss_object,
train_step=train_step,
nb_classes=10,
input_shape=(28, 28, 1),
clip_values=(0, 1),
)
# Step 4: Train the ART classifier
classifier.fit(x_train, y_train, batch_size=64, nb_epochs=3)
# Step 5: Evaluate the ART classifier on benign test examples
predictions = classifier.predict(x_test)
accuracy = np.sum(np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
print("Accuracy on benign test examples: {}%".format(accuracy * 100))
# Step 6: Generate adversarial test examples
attack = FastGradientMethod(estimator=classifier, eps=0.2)
x_test_adv = attack.generate(x=x_test)
# Step 7: Evaluate the ART classifier on adversarial test examples
predictions = classifier.predict(x_test_adv)
accuracy = np.sum(np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
print("Accuracy on adversarial test examples: {}%".format(accuracy * 100))
| 33.494949 | 120 | 0.715923 |
acf319784f31c1758ee12a490b4717fb78f5ce34 | 963 | py | Python | core/models/customers.py | mthooyavan/layman-erp | 87925567ae1bd6fab1fba46a2ef4751de9ab4478 | [
"Unlicense"
] | null | null | null | core/models/customers.py | mthooyavan/layman-erp | 87925567ae1bd6fab1fba46a2ef4751de9ab4478 | [
"Unlicense"
] | null | null | null | core/models/customers.py | mthooyavan/layman-erp | 87925567ae1bd6fab1fba46a2ef4751de9ab4478 | [
"Unlicense"
] | null | null | null | from django.db import models
from localflavor.in_.models import INStateField
class Customer(models.Model):
def __str__(self):
return f"{self.name} | {self.email}"
name = models.CharField(max_length=128, blank=True, null=True)
email = models.EmailField(max_length=128, db_index=True, blank=True, null=True, default=None)
phone = models.CharField(max_length=10, db_index=True, blank=True, null=True, default=None)
address_1 = models.CharField(max_length=128)
address_2 = models.CharField(max_length=128, blank=True)
city = models.CharField(max_length=64, )
state = INStateField()
zip_code = models.CharField(max_length=6, )
latitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
longitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
| 40.125 | 97 | 0.740395 |
acf319ae43961e6abed93a193a6b40025003b3e0 | 1,170 | py | Python | tests/storage/cases/test_KT1AYZ5nGTLxC6VSn9EXD96aCsYbWGr2uzY7_babylon.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2021-05-20T16:52:08.000Z | 2021-05-20T16:52:08.000Z | tests/storage/cases/test_KT1AYZ5nGTLxC6VSn9EXD96aCsYbWGr2uzY7_babylon.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2020-12-30T16:44:56.000Z | 2020-12-30T16:44:56.000Z | tests/storage/cases/test_KT1AYZ5nGTLxC6VSn9EXD96aCsYbWGr2uzY7_babylon.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2022-03-20T19:01:00.000Z | 2022-03-20T19:01:00.000Z | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1AYZ5nGTLxC6VSn9EXD96aCsYbWGr2uzY7_babylon(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/mainnet/KT1AYZ5nGTLxC6VSn9EXD96aCsYbWGr2uzY7_babylon.json')
def test_storage_encoding_KT1AYZ5nGTLxC6VSn9EXD96aCsYbWGr2uzY7_babylon(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1AYZ5nGTLxC6VSn9EXD96aCsYbWGr2uzY7_babylon(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1AYZ5nGTLxC6VSn9EXD96aCsYbWGr2uzY7_babylon(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| 41.785714 | 112 | 0.757265 |
acf31a08b54307857792217db32ceb138f462aab | 1,064 | py | Python | test/run_test.py | srinivasgutta7/nlpaug | e6f14a7f0ae20f2450a1397a395b811ca37b7272 | [
"MIT"
] | 1 | 2019-11-11T06:47:43.000Z | 2019-11-11T06:47:43.000Z | test/run_test.py | srinivasgutta7/nlpaug | e6f14a7f0ae20f2450a1397a395b811ca37b7272 | [
"MIT"
] | null | null | null | test/run_test.py | srinivasgutta7/nlpaug | e6f14a7f0ae20f2450a1397a395b811ca37b7272 | [
"MIT"
] | null | null | null | import unittest
import sys
import logging
if __name__ == '__main__':
sys.path.append('../nlpaug')
# disable transformer's info logging
for file_name in ['tokenization_utils', 'file_utils', 'modeling_utils', 'modeling_xlnet',
'configuration_utils']:
logging.getLogger('transformers.' + file_name).setLevel(logging.ERROR)
test_dirs = [
'test/augmenter/char/',
'test/augmenter/word/',
'test/augmenter/sentence/',
'test/augmenter/audio/',
'test/augmenter/spectrogram/',
'test/model/char/',
'test/util/selection/',
'test/flow/'
]
runner = unittest.TextTestRunner()
for test_dir in test_dirs:
loader = unittest.TestLoader()
suite = loader.discover(test_dir)
runner.run(suite)
# suite = unittest.TestLoader().loadTestsFromName('augmenter.sentence.test_context_word_embs_sentence')
# runner.run(suite)
#
# suite = unittest.TestLoader().loadTestsFromName('model.char.test_keyboard')
# runner.run(suite)
| 28.756757 | 107 | 0.644737 |
acf31ab437e5702db1c019ba21fb3258bbd6916e | 13,723 | py | Python | pype/modules/ftrack/ftrack_server/lib.py | kalisp/pype | 28bbffaf2d12ccee48313cd9985e8dfa05e81a5c | [
"MIT"
] | null | null | null | pype/modules/ftrack/ftrack_server/lib.py | kalisp/pype | 28bbffaf2d12ccee48313cd9985e8dfa05e81a5c | [
"MIT"
] | null | null | null | pype/modules/ftrack/ftrack_server/lib.py | kalisp/pype | 28bbffaf2d12ccee48313cd9985e8dfa05e81a5c | [
"MIT"
] | null | null | null | import os
import sys
import logging
import getpass
import atexit
import tempfile
import threading
import datetime
import time
import queue
import pymongo
import requests
import ftrack_api
import ftrack_api.session
import ftrack_api.cache
import ftrack_api.operation
import ftrack_api._centralized_storage_scenario
import ftrack_api.event
from ftrack_api.logging import LazyLogMessage as L
from pype.api import (
Logger,
get_default_components,
decompose_url,
compose_url
)
from pype.modules.ftrack.lib.custom_db_connector import CustomDbConnector
TOPIC_STATUS_SERVER = "pype.event.server.status"
TOPIC_STATUS_SERVER_RESULT = "pype.event.server.status.result"
def get_ftrack_event_mongo_info():
database_name = (
os.environ.get("FTRACK_EVENTS_MONGO_DB") or "pype"
)
collection_name = (
os.environ.get("FTRACK_EVENTS_MONGO_COL") or "ftrack_events"
)
mongo_url = os.environ.get("FTRACK_EVENTS_MONGO_URL")
if mongo_url is not None:
components = decompose_url(mongo_url)
else:
components = get_default_components()
uri = compose_url(**components)
return uri, components["port"], database_name, collection_name
def check_ftrack_url(url, log_errors=True):
"""Checks if Ftrack server is responding"""
if not url:
print('ERROR: Ftrack URL is not set!')
return None
url = url.strip('/ ')
if 'http' not in url:
if url.endswith('ftrackapp.com'):
url = 'https://' + url
else:
url = 'https://{0}.ftrackapp.com'.format(url)
try:
result = requests.get(url, allow_redirects=False)
except requests.exceptions.RequestException:
if log_errors:
print('ERROR: Entered Ftrack URL is not accesible!')
return False
if (result.status_code != 200 or 'FTRACK_VERSION' not in result.headers):
if log_errors:
print('ERROR: Entered Ftrack URL is not accesible!')
return False
print('DEBUG: Ftrack server {} is accessible.'.format(url))
return url
class SocketBaseEventHub(ftrack_api.event.hub.EventHub):
hearbeat_msg = b"hearbeat"
heartbeat_callbacks = []
def __init__(self, *args, **kwargs):
self.sock = kwargs.pop("sock")
super(SocketBaseEventHub, self).__init__(*args, **kwargs)
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "heartbeat":
# Reply with heartbeat.
for callback in self.heartbeat_callbacks:
callback()
self.sock.sendall(self.hearbeat_msg)
return self._send_packet(self._code_name_mapping["heartbeat"])
return super(SocketBaseEventHub, self)._handle_packet(
code, packet_identifier, path, data
)
class StatusEventHub(SocketBaseEventHub):
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "connect":
event = ftrack_api.event.base.Event(
topic="pype.status.started",
data={},
source={
"id": self.id,
"user": {"username": self._api_user}
}
)
self._event_queue.put(event)
return super(StatusEventHub, self)._handle_packet(
code, packet_identifier, path, data
)
class StorerEventHub(SocketBaseEventHub):
hearbeat_msg = b"storer"
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "connect":
event = ftrack_api.event.base.Event(
topic="pype.storer.started",
data={},
source={
"id": self.id,
"user": {"username": self._api_user}
}
)
self._event_queue.put(event)
return super(StorerEventHub, self)._handle_packet(
code, packet_identifier, path, data
)
class ProcessEventHub(SocketBaseEventHub):
hearbeat_msg = b"processor"
uri, port, database, table_name = get_ftrack_event_mongo_info()
is_table_created = False
pypelog = Logger().get_logger("Session Processor")
def __init__(self, *args, **kwargs):
self.dbcon = CustomDbConnector(
self.uri,
self.database,
self.port,
self.table_name
)
super(ProcessEventHub, self).__init__(*args, **kwargs)
def prepare_dbcon(self):
try:
self.dbcon.install()
self.dbcon._database.list_collection_names()
except pymongo.errors.AutoReconnect:
self.pypelog.error(
"Mongo server \"{}\" is not responding, exiting.".format(
os.environ["AVALON_MONGO"]
)
)
sys.exit(0)
except pymongo.errors.OperationFailure:
self.pypelog.error((
"Error with Mongo access, probably permissions."
"Check if exist database with name \"{}\""
" and collection \"{}\" inside."
).format(self.database, self.table_name))
self.sock.sendall(b"MongoError")
sys.exit(0)
def wait(self, duration=None):
"""Overriden wait
Event are loaded from Mongo DB when queue is empty. Handled event is
set as processed in Mongo DB.
"""
started = time.time()
self.prepare_dbcon()
while True:
try:
event = self._event_queue.get(timeout=0.1)
except queue.Empty:
if not self.load_events():
time.sleep(0.5)
else:
try:
self._handle(event)
self.dbcon.update_one(
{"id": event["id"]},
{"$set": {"pype_data.is_processed": True}}
)
except pymongo.errors.AutoReconnect:
self.pypelog.error((
"Mongo server \"{}\" is not responding, exiting."
).format(os.environ["AVALON_MONGO"]))
sys.exit(0)
# Additional special processing of events.
if event['topic'] == 'ftrack.meta.disconnected':
break
if duration is not None:
if (time.time() - started) > duration:
break
def load_events(self):
"""Load not processed events sorted by stored date"""
ago_date = datetime.datetime.now() - datetime.timedelta(days=3)
self.dbcon.delete_many({
"pype_data.stored": {"$lte": ago_date},
"pype_data.is_processed": True
})
not_processed_events = self.dbcon.find(
{"pype_data.is_processed": False}
).sort(
[("pype_data.stored", pymongo.ASCENDING)]
)
found = False
for event_data in not_processed_events:
new_event_data = {
k: v for k, v in event_data.items()
if k not in ["_id", "pype_data"]
}
try:
event = ftrack_api.event.base.Event(**new_event_data)
except Exception:
self.logger.exception(L(
'Failed to convert payload into event: {0}',
event_data
))
continue
found = True
self._event_queue.put(event)
return found
def _handle_packet(self, code, packet_identifier, path, data):
"""Override `_handle_packet` which skip events and extend heartbeat"""
code_name = self._code_name_mapping[code]
if code_name == "event":
return
return super()._handle_packet(code, packet_identifier, path, data)
class SocketSession(ftrack_api.session.Session):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None, sock=None, Eventhub=None
):
super(ftrack_api.session.Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = ftrack_api.session.SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
if Eventhub is None:
Eventhub = ftrack_api.event.hub.EventHub
self._event_hub = Eventhub(
self._server_url,
self._api_user,
self._api_key,
sock=sock
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub in (None, True):
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = (
auto_connect_event_hub is None
)
# Register to auto-close session on exit.
atexit.register(self.close)
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', tempfile.gettempdir()
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)
| 32.44208 | 79 | 0.59025 |
acf31c56c514365c336d7dae5e3f6b72286ab6d6 | 393 | py | Python | django/vermillion/vermillion/wsgi.py | cultbepis/vermillion | 9ff537bc3ed5bd06d41fe8b4b2a5455821efea4a | [
"MIT"
] | 1 | 2021-05-28T01:34:45.000Z | 2021-05-28T01:34:45.000Z | django/vermillion/vermillion/wsgi.py | cultbepis/vermillion | 9ff537bc3ed5bd06d41fe8b4b2a5455821efea4a | [
"MIT"
] | 9 | 2021-05-28T05:38:12.000Z | 2022-02-10T10:05:56.000Z | django/vermillion/vermillion/wsgi.py | cultbepis/vermillion | 9ff537bc3ed5bd06d41fe8b4b2a5455821efea4a | [
"MIT"
] | null | null | null | """
WSGI config for config project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'vermillion.settings')
application = get_wsgi_application()
| 23.117647 | 78 | 0.78626 |
acf31d5240affe5a0c9202a7ce6ee8bd3fc14e19 | 2,092 | py | Python | finance/helpers.py | alpq654321/cs50-finance | bf710848f2a7d9d4082db0ac2722eecbb63d6786 | [
"FSFAP"
] | null | null | null | finance/helpers.py | alpq654321/cs50-finance | bf710848f2a7d9d4082db0ac2722eecbb63d6786 | [
"FSFAP"
] | null | null | null | finance/helpers.py | alpq654321/cs50-finance | bf710848f2a7d9d4082db0ac2722eecbb63d6786 | [
"FSFAP"
] | null | null | null | import csv
import os
import urllib.request
from flask import redirect, render_template, request, session
from functools import wraps
def apology(message, code=400):
"""Render message as an apology to user."""
#print(message)
def escape(s):
"""
Escape special characters.
https://github.com/jacebrowning/memegen#special-characters
"""
for old, new in [("-", "--"), (" ", "-"), ("_", "__"), ("?", "~q"),
("%", "~p"), ("#", "~h"), ("/", "~s"), ("\"", "''")]:
s = s.replace(old, new)
return s
return render_template("apology.html", top=code, bottom=escape(message)), code
def login_required(f):
"""
Decorate routes to require login.
http://flask.pocoo.org/docs/0.12/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is None:
return redirect("/login")
return f(*args, **kwargs)
return decorated_function
def lookup(symbol):
"""Look up quote for symbol."""
# Reject symbol if it starts with caret
if symbol.startswith("^"):
return None
# Reject symbol if it contains comma
if "," in symbol:
return None
# Query Alpha Vantage for quote
# https://www.alphavantage.co/documentation/
try:
# GET CSV
url = f"https://www.alphavantage.co/query?apikey={os.getenv('API_KEY')}&datatype=csv&function=TIME_SERIES_INTRADAY&interval=1min&symbol={symbol}"
webpage = urllib.request.urlopen(url)
# Parse CSV
datareader = csv.reader(webpage.read().decode("utf-8").splitlines())
print(datareader)
# Ignore first row
next(datareader)
# Parse second row
row = next(datareader)
# Ensure stock exists
try:
price = float(row[4])
except:
return None
# Return stock's name (as a str), price (as a float), and (uppercased) symbol (as a str)
pic = [] #返回图表需要用的所有信息
for i in range(0,100):
pic.append(float(row[4]))
if (i==99):
break
row = next(datareader)
return {
"price": price,
"symbol": symbol.upper(),
"name" : symbol,
"pic" : pic
}
except:
return None
def usd(value):
"""Format value as USD."""
return f"${value:,.2f}"
| 22.021053 | 147 | 0.648662 |
acf31d5a291493d82649cd3bf9b328a5ce39f5a2 | 3,534 | py | Python | bocadillo/recipes.py | schnitzelbub/bocadillo | ecfe23710853817dd025aa67b176d6e9a8263c91 | [
"MIT"
] | null | null | null | bocadillo/recipes.py | schnitzelbub/bocadillo | ecfe23710853817dd025aa67b176d6e9a8263c91 | [
"MIT"
] | null | null | null | bocadillo/recipes.py | schnitzelbub/bocadillo | ecfe23710853817dd025aa67b176d6e9a8263c91 | [
"MIT"
] | null | null | null | from typing import List, Sequence, Tuple, Any
from .meta import DocsMeta
from .templates import TemplatesMixin
from .websockets import WebSocketView
class RecipeBase:
"""Definition of the recipe interface."""
def __init__(self, prefix: str):
assert prefix.startswith("/"), "recipe prefix must start with '/'"
self.prefix = prefix
def __call__(self, api, root: str = ""):
"""Apply the recipe to an API object.
Should be implemented by subclasses.
# Parameters
api (API): an API object.
root (str): a root URL path.
"""
raise NotImplementedError
class Recipe(TemplatesMixin, RecipeBase, metaclass=DocsMeta):
"""A grouping of capabilities that can be merged back into an API.
# Parameters
name (str):
A name for the recipe.
prefix (str):
The path prefix where the recipe will be mounted.
Defaults to `"/" + name`.
templates_dir (str):
See #API.
"""
def __init__(self, name: str, prefix: str = None, **kwargs):
if prefix is None:
prefix = f"/{name}"
super().__init__(prefix=prefix, **kwargs)
self.name = name
self._http_routes: List[Tuple[str, dict, Any]] = []
self._ws_routes: List[Tuple[str, dict, WebSocketView]] = []
def route(self, pattern: str, **kwargs):
"""Register a route on the recipe.
Accepts the same arguments as `API.route()`, except `namespace` which
will be given the value of the recipe's `name`.
# See Also
- [API.route()](./api.md#route)
"""
def register(view):
self._http_routes.append((pattern, kwargs, view))
return view
return register
def websocket_route(self, pattern: str, **kwargs):
"""Register a WebSocket route on the recipe.
Accepts the same arguments as `API.websocket_route()`.
# See Also
- [API.websocket_route()](./api.md#websocket-route)
"""
def register(view):
self._ws_routes.append((pattern, kwargs, view))
return view
return register
def __call__(self, api, root: str = ""):
"""Apply the recipe to an API object."""
# Apply routes on the API
for pattern, kwargs, view in self._http_routes:
kwargs["namespace"] = self.name
api.route(root + self.prefix + pattern, **kwargs)(view)
for pattern, kwargs, view in self._ws_routes:
api.websocket_route(root + self.prefix + pattern, **kwargs)(view)
# Look for templates where the API does, if not specified
if self.templates_dir is None:
self.templates_dir = api.templates_dir
@classmethod
def book(cls, *recipes: "Recipe", prefix: str) -> "RecipeBook":
"""Build a book of recipes.
Shortcut for `RecipeBook(recipes, prefix)`.
"""
return RecipeBook(recipes, prefix)
class RecipeBook(RecipeBase):
"""A composition of multiple recipes.
# Parameters
recipes (list): a list of `Recipe` objects.
prefix (str):
A prefix that will be prepended to all of the recipes' prefixes.
"""
def __init__(self, recipes: Sequence[Recipe], prefix: str):
super().__init__(prefix)
self.recipes = recipes
def __call__(self, api, root: str = ""):
"""Apply the recipe book to an API object."""
for recipe in self.recipes:
recipe(api, root=root + self.prefix)
| 29.45 | 77 | 0.603565 |
acf31dabc913e9ec516499828f44847ab8436a46 | 3,239 | py | Python | test/functional/wallet_create_tx.py | neoncoin-project/neon | c9c2d22c5598e1ea39e6e6db6e5ea0ef342b561b | [
"MIT"
] | null | null | null | test/functional/wallet_create_tx.py | neoncoin-project/neon | c9c2d22c5598e1ea39e6e6db6e5ea0ef342b561b | [
"MIT"
] | null | null | null | test/functional/wallet_create_tx.py | neoncoin-project/neon | c9c2d22c5598e1ea39e6e6db6e5ea0ef342b561b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import NeonTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
from test_framework.blocktools import (
TIME_GENESIS_BLOCK,
)
class CreateTxWalletTest(NeonTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info('Create some old blocks')
self.nodes[0].setmocktime(TIME_GENESIS_BLOCK)
self.nodes[0].generate(200)
self.nodes[0].setmocktime(0)
self.test_anti_fee_sniping()
self.test_tx_size_too_large()
def test_anti_fee_sniping(self):
self.log.info('Check that we have some (old) blocks and that anti-fee-sniping is disabled')
assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200)
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(txid)['hex'])
assert_equal(tx['locktime'], 0)
self.log.info('Check that anti-fee-sniping is enabled when we mine a recent block')
self.nodes[0].generate(1)
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(txid)['hex'])
assert 0 < tx['locktime'] <= 201
def test_tx_size_too_large(self):
# More than 10kB of outputs, so that we hit -maxtxfee with a high feerate
outputs = {self.nodes[0].getnewaddress(address_type='bech32'): 0.000025 for i in range(400)}
raw_tx = self.nodes[0].createrawtransaction(inputs=[], outputs=outputs)
for fee_setting in ['-minrelaytxfee=0.01', '-mintxfee=0.01', '-paytxfee=0.01']:
self.log.info('Check maxtxfee in combination with {}'.format(fee_setting))
self.restart_node(0, extra_args=[fee_setting])
assert_raises_rpc_error(
-6,
"Fee exceeds maximum configured by -maxtxfee",
lambda: self.nodes[0].sendmany(dummy="", amounts=outputs),
)
assert_raises_rpc_error(
-4,
"Fee exceeds maximum configured by -maxtxfee",
lambda: self.nodes[0].fundrawtransaction(hexstring=raw_tx),
)
self.log.info('Check maxtxfee in combination with settxfee')
self.restart_node(0)
self.nodes[0].settxfee(0.01)
assert_raises_rpc_error(
-6,
"Fee exceeds maximum configured by -maxtxfee",
lambda: self.nodes[0].sendmany(dummy="", amounts=outputs),
)
assert_raises_rpc_error(
-4,
"Fee exceeds maximum configured by -maxtxfee",
lambda: self.nodes[0].fundrawtransaction(hexstring=raw_tx),
)
self.nodes[0].settxfee(0)
if __name__ == '__main__':
CreateTxWalletTest().main()
| 39.024096 | 100 | 0.650201 |
acf31e6c1f5b5b8b5f0f89c645ecfa2c06107d4d | 2,903 | py | Python | pilitgui2/api/routers/animation_types.py | skypanther/clc | c2a6b8586f0cae1143ffada8c68e24b82753ba47 | [
"MIT"
] | 2 | 2017-02-18T23:08:52.000Z | 2017-03-30T14:23:19.000Z | pilitgui2/api/routers/animation_types.py | skypanther/clc | c2a6b8586f0cae1143ffada8c68e24b82753ba47 | [
"MIT"
] | null | null | null | pilitgui2/api/routers/animation_types.py | skypanther/clc | c2a6b8586f0cae1143ffada8c68e24b82753ba47 | [
"MIT"
] | null | null | null | """
Endpoint definitions and model operations for: AnimationTypes
The prefix "animation_types" will be added to all endpoints thanks to the parent router
"""
from typing import Any, List, Optional
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from schemas.animation_types import (
AnimationType,
AnimationTypeCreate,
AnimationTypeUpdate,
)
from crud.crud_animation_type import crud_animation_type
from database import get_db
router = APIRouter()
@router.get("/", response_model=List[AnimationType])
def get_animation_types(db: Session = Depends(get_db)) -> List[Optional[AnimationType]]:
# Retrieve all animation_types.
animation_types = crud_animation_type.get_animation_types(db)
return animation_types
@router.get("/{animation_type_id}", response_model=AnimationType)
def get_animation_type(
db: Session = Depends(get_db), *, animation_type_id: int
) -> Optional[AnimationType]:
# Retrieve the animation_type with the given ID
animation_type = crud_animation_type.get_animation_type_by_id(
db, animation_type_id=animation_type_id
)
return animation_type
@router.put("/{animation_type_id}", response_model=AnimationType)
def update_animation_type(
db: Session = Depends(get_db),
*,
animation_type_id: int,
updated_animation_type: AnimationTypeUpdate
) -> Optional[AnimationType]:
# Update the animation_type with the given ID
animation_type = crud_animation_type.get_animation_type_by_id(
db, animation_type_id=animation_type_id
)
if not animation_type:
raise HTTPException(status_code=404, detail="Animation Type not found")
animation_type = crud_animation_type.update_animation_type(
db,
animation_type_obj=animation_type,
updated_animation_type_obj=updated_animation_type,
)
return animation_type
@router.post("/{animation_type_id}", response_model=AnimationType)
def create_animation_type(
db: Session = Depends(get_db), *, new_animation_type: AnimationTypeCreate
) -> Optional[AnimationType]:
# Create a animation_type
animation_type = crud_animation_type.create_animation_type(
db, animation_type_to_create=new_animation_type
)
return animation_type
@router.delete("/{animation_type_id}", response_model=AnimationType)
def delete_animation_type(
db: Session = Depends(get_db), *, animation_type_id: int
) -> Optional[AnimationType]:
# Delete the animation_type with the given ID
animation_type = crud_animation_type.get_animation_type_by_id(
db, animation_type_id=animation_type_id
)
if not animation_type:
raise HTTPException(status_code=404, detail="Animation Type not found")
deleted_animation_type = crud_animation_type.remove_animation_type(
db, animation_type_id=animation_type_id
)
return deleted_animation_type
| 34.152941 | 88 | 0.766793 |
acf31fae980dfe6c7d322278630922ed70b99f0f | 7,722 | py | Python | tests/test_theclevercarrot.py | mathiazom/recipe-scrapers | 7052bb5725e2a53d3770396a40c133ca6a56af64 | [
"MIT"
] | 811 | 2017-11-05T02:15:47.000Z | 2022-03-31T08:01:19.000Z | tests/test_theclevercarrot.py | mathiazom/recipe-scrapers | 7052bb5725e2a53d3770396a40c133ca6a56af64 | [
"MIT"
] | 409 | 2018-01-21T02:08:21.000Z | 2022-03-30T08:44:02.000Z | tests/test_theclevercarrot.py | mathiazom/recipe-scrapers | 7052bb5725e2a53d3770396a40c133ca6a56af64 | [
"MIT"
] | 298 | 2017-11-07T17:55:33.000Z | 2022-03-31T18:43:11.000Z | from recipe_scrapers.theclevercarrot import TheCleverCarrot
from tests import ScraperTest
class TestTheCleverCarrotScraper(ScraperTest):
scraper_class = TheCleverCarrot
def test_host(self):
self.assertEqual("theclevercarrot.com", self.harvester_class.host())
def test_canonical_url(self):
self.assertEqual(
"https://www.theclevercarrot.com/2017/12/how-to-make-sourdough-cinnamon-rolls-step-by-step-guide/",
self.harvester_class.canonical_url(),
)
def test_title(self):
self.assertEqual(self.harvester_class.title(), "Soft Sourdough Cinnamon Rolls")
def test_yields(self):
self.assertEqual("8 serving(s)", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://www.theclevercarrot.com/wp-content/uploads/2017/12/How-to-Make-Sourdough-Cinnamon-Rolls-a-step-by-step-guide-13-225x225.jpg",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertEqual(
[
"160 g (2/3 cup) milk, whole or 2%",
"28 g (2 tbsp) unsalted butter, melted (see note below)",
"1 large egg",
"100 g (1/2 cup) bubbly, active sourdough starter",
"24 g (2 tbsp) granulated sugar",
"300 g (2½ cups) all-purpose flour (I use King Arthur)",
"5 g (1 tsp) fine sea salt",
"cooking spray or oil, for coating",
"28 g (2 tbsp) unsalted butter",
"100 g (1/2 cup) granulated sugar",
"3 tsp. ground cinnamon",
"1 level tbsp. flour",
"2 tbsp unsalted butter, softened",
"⅓ cup whipped cream cheese, room temperature",
"¼- 1/2 cup powdered sugar, sifted (add more if you like it sweet!)",
"1-2 tbsp milk",
"For a richer dough, increase the butter to 115 (8 tbsp) and use 360 g (3 cups) flour total. A reader recommended this tip, and I have to say, it’s my preferred method.",
"Make sure the melted butter and milk mixture has cooled slightly before making the dough. If it’s too hot, the dough will become incredibly sticky like cake batter (I’ve experienced this many times). If this happens to you, don’t worry- wait for the dough to cool down before adding more flour, if needed.",
"Recent recipe update: flour has been added to the cinnamon-sugar filling as a binder to prevent the butter from leaking out of the rolls.",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
return self.assertEqual(
"Baker's Schedule\nOvernight Option\nMake the dough in the evening and let rise overnight. The following morning, roll, cut and shape the dough. Rest for 1-2 hours (second rise) before baking.\nAs an alternative, after resting for 1 hour, cover the dough and chill until ready to use. Rest at room temperature before baking. The dough should be plump and puffy before baking.\nMake-Ahead Option (Freeze): Place the cut & shaped cinnamon rolls into a parchment lined 9-inch springform pan. Cover with two layers of plastic wrap. Freeze until ready to use. The night before baking, remove the old plastic wrap and replace with fresh wrap (this prevents any condensation from dripping onto the rolls). Defrost overnight, about 10-12 hrs. at room temperature, approximately 67 F. Bake the following morning as directed.\nMake the Dough\nIn the evening: Combine the melted butter and milk in a small bowl. Cool slightly before using.\nAdd the egg, sourdough starter, and sugar to the bowl of a stand mixer fitted with the paddle attachment. Mix to combine. With the machine running, slowly pour in the milk mixture. Add the flour and salt. Continue mixing until a rough, sticky dough forms, about 1 minute. Scrape down the sides of the bowl. Cover with a damp towel and let rest for 30 minutes.\nAfter the dough has rested, switch to the dough hook. Knead on medium-low speed for 6-8 minutes (I use #2 or #3 on my stand mixer). The dough should feel soft, supple and pull away from the sides of the bowl when ready. If it’s too sticky add a small bit of flour.\nBulk Rise\nTransfer the dough to a medium-size bowl coated in butter. Cover with plastic wrap. Let rise overnight until double in size, about 8-12 + hrs. @ 67-68 F, depending on temperature.\nStretch and Fold the Dough (optional step): about 30 minutes- 1 hr. into the bulk rise stretch and fold the dough: grab a portion of the dough and stretch it upward. Fold it over toward the center of the bowl. Give the bowl a 1/4 turn; stretch and fold the dough again. Continue this technique until you’ve come full circle around the bowl (4 folds total). For video guidance, click here. This optional step will increase the overall volume of the rolls and aerate the dough.\nRoll the Dough\nIn the morning: Line a 9-inch springform pan with parchment paper. I like to scrunch the paper into a ball first, open it up, and then line the inside with enough excess to hang over the sides for easy removal. It tends to fit better this way.\nLightly oil and flour your countertop to prevent sticking. Coax the dough out of the bowl. Gently pat into a rough rectangle. Let rest for 10 minutes for easier rolling.\nDust the dough (and your rolling pin) with flour. Roll the dough into a 16 x 12-ish rectangle using a tape measure for accuracy. If the dough resists, let rest for 5-10 minutes and try again.\nMake the Cinnamon-Sugar Filling\nCombine the cinnamon, sugar and flour in a small bowl; set aside. Melt the 28 g (2 tbsp) of butter in a shallow pan or microwave. Once the butter has cooled slightly, brush the entire surface of the dough, including the top, bottom and sides. Sprinkle the dough with the cinnamon-sugar mixture leaving a 1/2-inch border around the edges. Smooth it out with your hands.\nShape & Cut the Dough\nStarting on the long side of the dough (16-inch), roll it into a log pressing down gently as you go. Take your time with this step. The log needs to be tight so the swirls stay in tact. You should end up seam side down. TIP: if the dough starts to get sticky from the heat of your hands, lightly oil or flour your fingertips, take a deep breath and try again.\nCut the dough into 2-inch sections using a oiled knife or bench scraper. I lightly “mark” the dough first to make sure each piece is roughly the same size.\nSecond Rise\nPlace the rolls into the lined pan and let rest for 1- 2 hours, or until the dough puffs up. Alternatively, if you’d like to chill or freeze the rolls, please refer to the “Make-Ahead” option in the Baker’s Schedule at the top of this recipe.\nBake the Cinnamon Rolls\nPreheat oven to 350 F. Bake the dough onto the center rack and bake for 35-40 minutes (check at the 30 minute mark). The tops should turn light golden brown when ready.\nRemove from the oven and cool in the pan for 15 minutes. This helps the butter to absorb back into the dough. Then lift up the rolls, while still on the parchment paper, and transfer to a wire rack.\nMake the Glaze\nWhile the rolls are baking or cooling make the glaze. Add softened butter, whipped cream cheese and sifted powdered sugar to the bowl of a stand mixer. Beat until smooth, thinning out the consistency with a little milk as needed. The ingredients must be soft and at room temperature for best results.\nTo serve, top the rolls with some of the glaze or lightly dust with powdered sugar. These rolls are best enjoyed slightly warm on the same day they are baked.",
self.harvester_class.instructions(),
)
| 126.590164 | 4,937 | 0.722611 |
acf32052b3dff74941b5fb4b5544a3b8c061445a | 344 | py | Python | src/sentry/features/handler.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | 1 | 2019-10-17T17:46:16.000Z | 2019-10-17T17:46:16.000Z | src/sentry/features/handler.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/features/handler.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
__all__ = ["FeatureHandler"]
class FeatureHandler(object):
features = set()
def __call__(self, feature, actor):
if feature.name not in self.features:
return None
return self.has(feature, actor)
def has(self, feature, actor):
raise NotImplementedError
| 20.235294 | 45 | 0.668605 |
acf320674af1fec7d7683c6e5ae33eba16cb2479 | 28,421 | py | Python | quantarhei/spectroscopy/labsetup.py | ohsu6072/quantarhei | 713dc77e0b99a8edca0989e0e3fe2d102516d486 | [
"MIT"
] | null | null | null | quantarhei/spectroscopy/labsetup.py | ohsu6072/quantarhei | 713dc77e0b99a8edca0989e0e3fe2d102516d486 | [
"MIT"
] | null | null | null | quantarhei/spectroscopy/labsetup.py | ohsu6072/quantarhei | 713dc77e0b99a8edca0989e0e3fe2d102516d486 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Laboratory set-up for non-linear spectroscopy
This class controls calculations of non-linear optical spectra, and
other experiments in which laboratory setting needs to be controlled.
Examples are pulse polarization setting, pulse shapes and spectra
in non-linear spectroscopy.
Class Details
-------------
"""
import numpy
from ..utils import Integer
from ..utils.vectors import X
from ..core.time import TimeAxis
from ..core.frequency import FrequencyAxis
from ..core.dfunction import DFunction
class LabSetup:
"""Laboratory set-up for non-linear spectroscopy
Class representing laboratory setup for non-linear spectroscopic
experiments. It holds information about pulse shapes and polarizations.
Pulses can be set in time- and/or frequency-domain. **Consistency between
the domains is not checked nor enforced**. Consistent conversion between
domains is provided by convenience routines [TO BE IMPLEMENTED]
Parameters
----------
nopulses : int
Number of pulses in the experiment. Default is 3.
"""
number_of_pulses = Integer("number_of_pulses")
def __init__(self, nopulses = 3):
self.number_of_pulses = nopulses
self.M4 = numpy.array([[4.0, -1.0, -1.0],
[-1.0, 4.0, -1.0],
[-1.0,-1.0, 4.0]])/30.0
self.timeaxis = None
self.freqaxis = None
self.F4eM4 = None
self.e = None
self.has_polarizations = False
self.has_freqdomain = False
self.has_timedomain = False
# time or frequency
self.axis_type = None
self.pulse_t = [None]*nopulses
self.pulse_f = [None]*nopulses
self.omega = None
def set_pulse_shapes(self, axis, params):
"""Sets the pulse properties
Pulse shapes or spectra are set in this routine. If axis is of
`TimeAxis` type, the parameters are understood as time domain,
if axis is of `FrequencyAxis` type, they are understood as frequency
domain.
Parameters
----------
axis : TimeAxis or FrequencyAxis
Quantarhei time axis object, which specifies the values for which
pulse properties are defined. If `TimeAxis` is specified, the
parameters are understood as time domain, if `FrequencyAxis`
is specified, they are understood as frequency domain.
params : dictionary
Dictionary of pulse parameters. The parameters are the following:
`ptype` is the pulse type with possible values `Gaussian` and
`numeric`. Time domain pulses are specified with their center
at t = 0.
**Gaussian** pulse has further parameters `amplitude`, `FWHM`,
and `frequency` with obvious meanings. `FWHM` is speficied in `fs`,
`frequency` is specified in energy units, while `amplitude`
is in units of [energy]/[transition dipole moment]. The formula
for the lineshape is
.. math::
\\rm{shape}(\\omega) =
\\frac{2}{\\Delta}\\sqrt{\\frac{4\\ln(2)}{\\pi}}
\\exp\\left\\{-\\frac{4\\ln(2)\\omega^2}{\\Delta^2}\\right\\}
The same formulae are used for time- and frequency domain
definitions. For time domain, :math:`t` should be used in stead of
:math:`\omega`.
**numeric** pulse is specified by a second parameters `function`
which should be of DFunction type and specifies line shape around
zero frequency.
Examples
--------
>>> import quantarhei as qr
>>> import matplotlib.pyplot as plt
>>> lab = LabSetup()
...
>>> # Time axis around 0
>>> time = qr.TimeAxis(-500.0, 1000, 1.0, atype="complete")
Gaussian pulse shape in time domain
>>> pulse2 = dict(ptype="Gaussian", FWHM=150, amplitude=1.0)
>>> params = (pulse2, pulse2, pulse2)
>>> lab.set_pulse_shapes(time, params)
Testing the pulse shape
>>> dfc = lab.get_pulse_envelop(1, time.data) # doctest: +SKIP
>>> pl = plt.plot(time.data, dfc) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
.. plot::
import quantarhei as qr
import matplotlib.pyplot as plt
lab = qr.LabSetup()
time = qr.TimeAxis(-500.0, 1000, 1.0, atype="complete")
pulse2 = dict(ptype="Gaussian", FWHM=150.0, amplitude=1.0)
params = (pulse2, pulse2, pulse2)
lab.set_pulse_shapes(time, params)
dfc = lab.get_pulse_envelop(1, time.data)
pl = plt.plot(time.data, dfc)
plt.show()
`numeric` pulse shape in time domain
>>> # We take the DFunction for creation of `numeric`ly defined
>>> # pulse shape from the previous example
>>> pls = lab.pulse_t[2]
>>> # new lab object
>>> lab2 = LabSetup()
>>> pulse1 = dict(ptype="numeric", function=pls)
>>> params = (pulse1, pulse1, pulse1)
>>> lab2.set_pulse_shapes(time, params)
Testing the pulse shape
>>> dfc = lab2.get_pulse_envelop(1, time.data) # doctest: +SKIP
>>> pl = plt.plot(time.data, dfc) # doctest: +SKIP
>>> plt.show() # we skip output here # doctest: +SKIP
Gaussian pulse shape in frequency domain
>>> lab = LabSetup()
>>> # FrequencyAxis around 0
>>> freq = qr.FrequencyAxis(-2500, 1000, 5.0)
...
>>> pulse2 = dict(ptype="Gaussian", FWHM=800, amplitude=1.0)
>>> params = (pulse2, pulse2, pulse2)
>>> lab.set_pulse_shapes(freq, params)
Testing the pulse shape
>>> # getting differnt frequency axis
>>> freq2 = qr.FrequencyAxis(-1003, 100, 20.0)
>>> # and reading spectrum at two different sets of points
>>> dfc1 = lab.get_pulse_spectrum(1, freq.data)
>>> dfc2 = lab.get_pulse_spectrum(1, freq2.data)
>>> pl1 = plt.plot(freq.data, dfc1) # doctest: +SKIP
>>> pl2 = plt.plot(freq2.data, fdc2) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
We plot in two different sets of points.
.. plot::
import quantarhei as qr
import matplotlib.pyplot as plt
lab = qr.LabSetup()
freq = qr.FrequencyAxis(-2500, 1000, 5.0)
pulse2 = dict(ptype="Gaussian", FWHM=800.0, amplitude=1.0)
params = (pulse2, pulse2, pulse2)
lab.set_pulse_shapes(freq, params)
freq2 = qr.FrequencyAxis(-1000, 100, 20.0)
dfc1 = lab.get_pulse_spectrum(1, freq.data)
dfc2 = lab.get_pulse_spectrum(1, freq2.data)
pl1 = plt.plot(freq.data, dfc1)
pl2 = plt.plot(freq2.data, dfc2)
plt.show()
`numeric` pulse shape in frequency domain
>>> # We take the DFunction for creation of `numeric`ly defined
>>> # pulse shape from the previous example
>>> pls = lab.pulse_f[2]
>>> # new lab object
>>> lab2 = LabSetup()
>>> pulse1 = dict(ptype="numeric", function=pls)
>>> params = (pulse1, pulse1, pulse1)
>>> lab2.set_pulse_shapes(freq, params)
Testing the pulse shape
>>> dfc = lab2.get_pulse_envelop(1, freq.data) # doctest: +SKIP
>>> pl = plt.plot(freq.data, dfc) # doctest: +SKIP
>>> plt.show() # we skip output here # doctest: +SKIP
Situations in which Exceptions are thrown
>>> pulse3 = dict(ptype="other", FWHM=10, amplitude=1.0)
>>> params = (pulse3, pulse3, pulse3)
>>> lab.set_pulse_shapes(time, params)
Traceback (most recent call last):
...
Exception: Unknown pulse type
>>> params = (pulse2, pulse2)
>>> lab.set_pulse_shapes(time, params)
Traceback (most recent call last):
...
Exception: set_pulses requires 3 parameter sets
>>> params = (pulse2, pulse2)
>>> lab.set_pulse_shapes(time.data, params)
Traceback (most recent call last):
...
Exception: Wrong axis paramater
>>> time = qr.TimeAxis(0.0, 1000, 1.0)
>>> lab.set_pulse_shapes(time, params)
Traceback (most recent call last):
...
Exception: TimeAxis has to be of 'complete' type use atype='complete' as a parameter of TimeAxis
"""
if isinstance(axis, TimeAxis):
if axis.atype == "complete":
self.timeaxis = axis
self.axis_type = "time"
else:
raise Exception("TimeAxis has to be of 'complete' type"+
" use atype='complete' as a parameter"+
" of TimeAxis")
elif isinstance(axis, FrequencyAxis):
self.freqaxis = axis
self.axis_type = "frequency"
else:
raise Exception("Wrong axis paramater")
if len(params) == self.number_of_pulses:
k_p = 0
for par in params:
if par["ptype"] == "Gaussian":
if self.axis_type == "time":
#
# Time domain Gaussian pulse around 0.0 as a DFunction
#
tma = self.timeaxis
fwhm = par["FWHM"]
amp = par["amplitude"]
# normalized Gaussian mupliplied by amplitude
val = (2.0/fwhm)*numpy.sqrt(numpy.log(2.0)/3.14159) \
*amp*numpy.exp(-4.0*numpy.log(2.0)*(tma.data/fwhm)**2)
self.pulse_t[k_p] = DFunction(tma, val)
elif self.axis_type == "frequency":
#
# Frequency domain Gaussian pulse around 0.0
# as a DFunction
#
fra = self.freqaxis
fwhm = par["FWHM"]
amp = par["amplitude"]
# normalized Gaussian mupliplied by amplitude
val = (2.0/fwhm)*numpy.sqrt(numpy.log(2.0)/3.14159) \
*amp*numpy.exp(-4.0*numpy.log(2.0)*(fra.data/fwhm)**2)
self.pulse_f[k_p] = DFunction(fra, val)
elif par["ptype"] == "numeric":
fce = par["function"]
if self.axis_type == "time":
#
# Create a new DFunction based on the submitted time
# axis
#
data = numpy.zeros(self.timeaxis.length)
i_p = 0
for t_p in self.timeaxis.data:
data[i_p] = fce.at(t_p)
i_p += 1
self.pulse_t[k_p] = DFunction(self.timeaxis, data)
elif self.axis_type == "frequency":
data = numpy.zeros(self.freqaxis.length)
i_p = 0
for t_p in self.freqaxis.data:
data[i_p] = fce.at(t_p)
i_p += 1
self.pulse_f[k_p] = DFunction(self.freqaxis, data)
else:
raise Exception("Unknown pulse type")
k_p += 1
if self.axis_type == "time":
self.has_timedomain = True
elif self.axis_type == "frequency":
self.has_freqdomain = True
else:
text = "set_pulses requires "+str(self.number_of_pulses) \
+" parameter sets"
raise Exception(text)
def set_polarizations(self, pulse_polarizations=(X, X, X),
detection_polarization=X):
"""Sets polarizations of the experimental pulses
Parameters
----------
pulse_polarization : tuple like
Contains three vectors of polarization of the three pulses
of the experiment. Currently we assume three pulse experiment
per default.
detection_polarization : array
Vector of detection polarization
Examples
--------
>>> import quantarhei as qr
>>> lab = LabSetup()
>>> lab.set_polarizations(pulse_polarizations=(qr.utils.vectors.X,
... qr.utils.vectors.Y,
... qr.utils.vectors.Z))
>>> print(lab.e[0,:])
[ 1. 0. 0.]
>>> print(lab.e[3,:])
[ 1. 0. 0.]
>>> print(lab.e[2,:])
[ 0. 0. 1.]
>>> lab.set_polarizations(pulse_polarizations=(qr.utils.vectors.X,
... qr.utils.vectors.Y))
Traceback (most recent call last):
...
Exception: pulse_polarizations requires 3 values
"""
if len(pulse_polarizations) == self.number_of_pulses:
self.e = numpy.zeros((4,3))
for i in range(3):
self.e[i,:] = pulse_polarizations[i]
self.e[3,:] = detection_polarization
e = self.e
F4e = numpy.zeros(3)
F4e[0] = numpy.dot(e[3,:],e[2,:])*numpy.dot(e[1,:],e[0,:])
F4e[1] = numpy.dot(e[3,:],e[1,:])*numpy.dot(e[2,:],e[0,:])
F4e[2] = numpy.dot(e[3,:],e[0,:])*numpy.dot(e[2,:],e[1,:])
self.F4eM4 = numpy.dot(F4e,self.M4)
else:
text = "pulse_polarizations requires "+ \
str(self.number_of_pulses)+" values"
raise Exception(text)
self.detection_polarization = detection_polarization
def get_pulse_polarizations(self):
"""Returns polarizations of the laser pulses
Examples
--------
>>> import quantarhei as qr
>>> lab = LabSetup()
>>> lab.set_polarizations(pulse_polarizations=(qr.utils.vectors.X,
... qr.utils.vectors.Y,
... qr.utils.vectors.Z))
>>> pols = lab.get_pulse_polarizations()
>>> print(len(pols))
3
"""
pols = []
for i in range(self.number_of_pulses):
pols.append(self.e[i,:])
return pols
def get_detection_polarization(self):
"""Returns detection polarizations
Examples
--------
>>> import quantarhei as qr
>>> lab = LabSetup()
>>> lab.set_polarizations(pulse_polarizations=(qr.utils.vectors.X,
... qr.utils.vectors.Y,
... qr.utils.vectors.Z))
>>> detpol = lab.get_detection_polarization()
>>> print(detpol)
[ 1. 0. 0.]
"""
return self.e[3,:]
def convert_to_time(self):
"""Converts pulse information from frequency domain to time domain
Examples
--------
>>> import quantarhei as qr
>>> import matplotlib.pyplot as plt
>>> lab = LabSetup()
>>> freq = qr.FrequencyAxis(-100, 200, 1.0) # atype="complete" is default
>>> pulse = dict(ptype="Gaussian", FWHM=20, amplitude=1.0)
>>> params = (pulse, pulse, pulse)
>>> lab.set_pulse_shapes(freq, params)
>>> lab.convert_to_time()
>>> # plot the original and the FT pulses
>>> pls_1f = lab.pulse_f[1] # doctest: +SKIP
>>> p1 = plt.plot(pls_1f.axis.data, pls_1f.data) # doctest: +SKIP
>>> pls_1t = lab.pulse_t[1] # doctest: +SKIP
>>> p2 = plt.plot(pls_1t.axis.data, pls_1t.data) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
.. plot::
import quantarhei as qr
import matplotlib.pyplot as plt
lab = qr.LabSetup()
freq = qr.FrequencyAxis(-100,200,1.0)
pulse = dict(ptype="Gaussian", FWHM=5, amplitude=1.0)
params = (pulse, pulse, pulse)
lab.set_pulse_shapes(freq, params)
lab.convert_to_time()
pls_1f = lab.pulse_f[1]
plt.plot(pls_1f.axis.data, pls_1f.data)
pls_1t = lab.pulse_t[1]
plt.plot(pls_1t.axis.data, pls_1t.data)
plt.show()
Now we compare back and forth Fourier transform with the original
>>> import quantarhei as qr
>>> import numpy
>>> lab = LabSetup()
>>> freq = qr.FrequencyAxis(-100,200,1.0) # atype="complete" is default
>>> pulse = dict(ptype="Gaussian", FWHM=20, amplitude=1.0)
>>> params = (pulse, pulse, pulse)
>>> lab.set_pulse_shapes(freq, params)
>>> freq_vals_1 = lab.get_pulse_spectrum(2, freq.data)
>>> lab.convert_to_time()
Here we override the original frequency domain definition
>>> lab.convert_to_frequency()
>>> freq_vals_2 = lab.get_pulse_spectrum(2, freq.data)
>>> numpy.allclose(freq_vals_2, freq_vals_1)
True
and now the other way round
>>> import quantarhei as qr
>>> import numpy
>>> lab = LabSetup()
>>> time = qr.TimeAxis(-100,200,1.0, atype="complete")
>>> pulse = dict(ptype="Gaussian", FWHM=20, amplitude=1.0)
>>> params = (pulse, pulse, pulse)
>>> lab.set_pulse_shapes(time, params)
>>> time_vals_1 = lab.get_pulse_envelop(2, time.data)
>>> lab.convert_to_frequency()
Here we override the original time domain definition
>>> lab.convert_to_time()
>>> time_vals_2 = lab.get_pulse_envelop(2, freq.data)
>>> numpy.allclose(time_vals_2, time_vals_1)
True
Situation in which excetions are thrown
>>> lab = LabSetup()
>>> lab.convert_to_time()
Traceback (most recent call last):
...
Exception: Cannot convert to time domain: frequency domain not set
"""
if self.has_freqdomain:
freq = self.freqaxis
time = freq.get_TimeAxis()
k_p = 0
for pulse in self.pulse_f:
ft_pulse = pulse.get_Fourier_transform()
# we replace the DFunction's axis attribute with the one
# calculated above; in time domain the pulses also share
# the same TimeAxis object
ft_pulse.axis = time
self.pulse_t[k_p] = ft_pulse
k_p += 1
self.timeaxis = time
self.has_timedomain = True
else:
raise Exception("Cannot convert to time domain: "+
"frequency domain not set")
def convert_to_frequency(self):
"""Converts pulse information from time domain to frequency domain
Examples
--------
>>> import quantarhei as qr
>>> lab = LabSetup()
>>> time = qr.TimeAxis(-100,200,1.0, atype="complete")
>>> pulse = dict(ptype="Gaussian", FWHM=20, amplitude=1.0)
>>> params = (pulse, pulse, pulse)
>>> lab.set_pulse_shapes(time, params)
>>> lab.convert_to_frequency()
>>> # plot the original and the FT pulses
>>> pls_1f = lab.pulse_f[1] # doctest: +SKIP
>>> plt.plot(pls_1f.axis.data, pls_1f.data) # doctest: +SKIP
>>> pls_1t = lab.pulse_t[1] # doctest: +SKIP
>>> plt.plot(pls_1t.axis.data, pls_1t.data) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
.. plot::
import quantarhei as qr
import matplotlib.pyplot as plt
lab = qr.LabSetup()
time = qr.TimeAxis(-100,200,1.0, atype="complete")
pulse = dict(ptype="Gaussian", FWHM=5, amplitude=1.0)
params = (pulse, pulse, pulse)
lab.set_pulse_shapes(time, params)
lab.convert_to_frequency()
pls_1f = lab.pulse_f[1]
plt.plot(pls_1f.axis.data, pls_1f.data)
pls_1t = lab.pulse_t[1]
plt.plot(pls_1t.axis.data, pls_1t.data)
plt.show()
Situation in which excetions are thrown
>>> lab = LabSetup()
>>> lab.convert_to_frequency()
Traceback (most recent call last):
...
Exception: Cannot convert to frequency domain: time domain not set
"""
if self.has_timedomain:
time = self.timeaxis
freq = time.get_FrequencyAxis()
k_p = 0
for pulse in self.pulse_t:
ft_pulse = pulse.get_Fourier_transform()
# we replace the DFunction's axis attribute with the one
# calculated above; in time domain the pulses also share
# the same TimeAxis object
ft_pulse.axis = freq
self.pulse_f[k_p] = ft_pulse
k_p += 1
self.freqaxis = freq
self.has_freqdomain = True
else:
raise Exception("Cannot convert to frequency domain: "+
"time domain not set")
def get_pulse_envelop(self, k, t):
"""Returns a numpy array with the pulse time-domain envelope
Parameters
----------
k : int
Index of the pulse to be returned
t : array like
Array of time points at which the pulse is returned
Examples
--------
>>> import quantarhei as qr
>>> lab = LabSetup()
>>> time = qr.TimeAxis(-100, 200, 1.0, atype="complete")
>>> pulse2 = dict(ptype="Gaussian", FWHM=30.0, amplitude=1.0)
>>> params = (pulse2, pulse2, pulse2)
>>> lab.set_pulse_shapes(time, params)
>>> dfc = lab.get_pulse_envelop(1, [-50.0, -30.0, 2.0, 30.0])
>>> print(dfc)
[ 1.41569269e-05 1.95716182e-03 3.09310793e-02 1.95716182e-03]
.. plot::
:include-source:
import quantarhei as qr
import matplotlib.pyplot as plt
lab = qr.LabSetup()
time = qr.TimeAxis(-500.0, 1000, 1.0, atype="complete")
pulse2 = dict(ptype="Gaussian", FWHM=150.0, amplitude=1.0)
params = (pulse2, pulse2, pulse2)
lab.set_pulse_shapes(time, params)
pls = lab.pulse_t[2]
lab2 = qr.LabSetup()
pulse1 = dict(ptype="numeric", function=pls)
params = (pulse1, pulse1, pulse1)
lab2.set_pulse_shapes(time, params)
dfc = lab2.get_pulse_envelop(1, time.data)
pl = plt.plot(time.data, dfc)
plt.show()
"""
return self.pulse_t[k].at(t)
def get_pulse_spectrum(self, k, omega):
"""Returns a numpy array with the pulse frequency-domain spectrum
Parameters
----------
k : int
Index of the pulse to be returned
omega : array like
Array of frequency points at which the pulse is returned
Examples
--------
>>> import quantarhei as qr
>>> lab = LabSetup()
>>> freq = qr.FrequencyAxis(-2500, 1000, 5.0)
>>> pulse2 = dict(ptype="Gaussian", FWHM=800.0, amplitude=1.0)
>>> params = (pulse2, pulse2, pulse2)
>>> lab.set_pulse_shapes(freq, params)
>>> dfc = lab.get_pulse_spectrum(1, [600.0, 700.0, 800.0, 900.0])
>>> print(dfc)
[ 2.46865554e-04 1.40563844e-04 7.33935684e-05 3.51409609e-05]
Here is a complete example with setting, getting and plotting spectrum:
.. plot::
:include-source:
import quantarhei as qr
import matplotlib.pyplot as plt
lab = qr.LabSetup()
freq = qr.FrequencyAxis(-2500, 1000, 5.0)
pulse2 = dict(ptype="Gaussian", FWHM=800.0, amplitude=1.0)
params = (pulse2, pulse2, pulse2)
lab.set_pulse_shapes(freq, params)
pls = lab.pulse_f[2]
lab2 = qr.LabSetup()
pulse1 = dict(ptype="numeric", function=pls)
params = (pulse1, pulse1, pulse1)
lab2.set_pulse_shapes(freq, params)
dfc = lab2.get_pulse_spectrum(1, freq.data)
pl = plt.plot(freq.data, dfc)
plt.show()
"""
return self.pulse_f[k].at(omega)
def set_pulse_frequencies(self, omegas):
"""Sets pulse frequencies
Parameters
----------
omegas : array of floats
Frequencies of pulses
Examples
--------
>>> lab = LabSetup()
>>> lab.set_pulse_frequencies([1.0, 2.0, 1.0])
>>> print(lab.omega)
[1.0, 2.0, 1.0]
Situation which throws an exception
>>> lab = LabSetup()
>>> lab.set_pulse_frequencies([1.0, 2.0, 1.0, 6.0])
Traceback (most recent call last):
...
Exception: Wrong number of frequencies: 3 required
"""
# FIXME: energe unit control has to be in place
if len(omegas) == self.number_of_pulses:
self.omega = omegas
else:
raise Exception("Wrong number of frequencies: "+
str(self.number_of_pulses)+" required")
def get_pulse_frequency(self, k):
"""Returns frequency of the pulse with index k
Parameters
----------
k : int
Pulse index
Examples
--------
>>> lab = LabSetup()
>>> lab.set_pulse_frequencies([1.0, 2.0, 1.0])
>>> print(lab.get_pulse_frequency(1))
2.0
"""
return self.omega[k]
class labsetup(LabSetup):
pass
| 33.955795 | 104 | 0.48524 |
acf320854f82fee7aae501501c091073748419e2 | 599 | py | Python | theapp/urls.py | ruthjomo/To-Do-app | 31aa32861d34efca4ace2064b03d07b61768f3ca | [
"Unlicense",
"MIT"
] | null | null | null | theapp/urls.py | ruthjomo/To-Do-app | 31aa32861d34efca4ace2064b03d07b61768f3ca | [
"Unlicense",
"MIT"
] | null | null | null | theapp/urls.py | ruthjomo/To-Do-app | 31aa32861d34efca4ace2064b03d07b61768f3ca | [
"Unlicense",
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url('update_task/<str:pk>/', views.updateTask, name='update_task'),
url('delete/<str:pk>/', views.deleteTask, name='delete'),
url(r'^user/profile', views.profile, name='profile'),
url(r'^update/user/',views.update_profile, name='update_profile'),
url(r'^user/(?P<username>\w+)', views.user_profile, name='user_profile'),
# url(r'^new/profile$', views.add_profile, name='add_profile'),
] | 42.785714 | 77 | 0.689482 |
acf3215046f2e3b61cd5a89e07dfd038c4638245 | 25,430 | py | Python | onmt/tests/test_beam_search.py | Linohong/OpenNMT_dialog | 4a9e598afca780723d354d599815c320706af937 | [
"MIT"
] | null | null | null | onmt/tests/test_beam_search.py | Linohong/OpenNMT_dialog | 4a9e598afca780723d354d599815c320706af937 | [
"MIT"
] | null | null | null | onmt/tests/test_beam_search.py | Linohong/OpenNMT_dialog | 4a9e598afca780723d354d599815c320706af937 | [
"MIT"
] | null | null | null | import unittest
from onmt.translate.beam import GNMTGlobalScorer
from onmt.translate.beam_search import BeamSearch
from copy import deepcopy
import torch
class GlobalScorerStub(object):
alpha = 0
beta = 0
def __init__(self):
self.length_penalty = lambda x, alpha: 1.
self.cov_penalty = lambda cov, beta: torch.zeros(
(1, cov.shape[-2]), device=cov.device, dtype=torch.float)
self.has_cov_pen = False
self.has_len_pen = False
def update_global_state(self, beam):
pass
def score(self, beam, scores):
return scores
class TestBeamSearch(unittest.TestCase):
BLOCKED_SCORE = -10e20
def test_advance_with_all_repeats_gets_blocked(self):
# all beams repeat (beam >= 1 repeat dummy scores)
beam_sz = 5
n_words = 100
repeat_idx = 47
ngram_repeat = 3
for batch_sz in [1, 3]:
beam = BeamSearch(
beam_sz, batch_sz, 0, 1, 2, 2,
torch.device("cpu"), GlobalScorerStub(), 0, 30,
False, ngram_repeat, set(),
torch.randint(0, 30, (batch_sz,)), False, 0.)
for i in range(ngram_repeat + 4):
# predict repeat_idx over and over again
word_probs = torch.full(
(batch_sz * beam_sz, n_words), -float('inf'))
word_probs[0::beam_sz, repeat_idx] = 0
attns = torch.randn(1, batch_sz * beam_sz, 53)
beam.advance(word_probs, attns)
if i <= ngram_repeat:
expected_scores = torch.tensor(
[0] + [-float('inf')] * (beam_sz - 1))\
.repeat(batch_sz, 1)
self.assertTrue(beam.topk_log_probs.equal(expected_scores))
else:
self.assertTrue(
beam.topk_log_probs.equal(
torch.tensor(self.BLOCKED_SCORE)
.repeat(batch_sz, beam_sz)))
def test_advance_with_some_repeats_gets_blocked(self):
# beam 0 and beam >=2 will repeat (beam >= 2 repeat dummy scores)
beam_sz = 5
n_words = 100
repeat_idx = 47
ngram_repeat = 3
for batch_sz in [1, 3]:
beam = BeamSearch(
beam_sz, batch_sz, 0, 1, 2, 2,
torch.device("cpu"), GlobalScorerStub(), 0, 30,
False, ngram_repeat, set(),
torch.randint(0, 30, (batch_sz,)), False, 0.)
for i in range(ngram_repeat + 4):
# non-interesting beams are going to get dummy values
word_probs = torch.full(
(batch_sz * beam_sz, n_words), -float('inf'))
if i == 0:
# on initial round, only predicted scores for beam 0
# matter. Make two predictions. Top one will be repeated
# in beam zero, second one will live on in beam 1.
word_probs[0::beam_sz, repeat_idx] = -0.1
word_probs[0::beam_sz, repeat_idx + i + 1] = -2.3
else:
# predict the same thing in beam 0
word_probs[0::beam_sz, repeat_idx] = 0
# continue pushing around what beam 1 predicts
word_probs[1::beam_sz, repeat_idx + i + 1] = 0
attns = torch.randn(1, batch_sz * beam_sz, 53)
beam.advance(word_probs, attns)
if i <= ngram_repeat:
self.assertFalse(
beam.topk_log_probs[0::beam_sz].eq(
self.BLOCKED_SCORE).any())
self.assertFalse(
beam.topk_log_probs[1::beam_sz].eq(
self.BLOCKED_SCORE).any())
else:
# now beam 0 dies (along with the others), beam 1 -> beam 0
self.assertFalse(
beam.topk_log_probs[:, 0].eq(
self.BLOCKED_SCORE).any())
self.assertTrue(
beam.topk_log_probs[:, 1:].equal(
torch.tensor(self.BLOCKED_SCORE)
.repeat(batch_sz, beam_sz-1)))
def test_repeating_excluded_index_does_not_die(self):
# beam 0 and beam >= 2 will repeat (beam 2 repeats excluded idx)
beam_sz = 5
n_words = 100
repeat_idx = 47 # will be repeated and should be blocked
repeat_idx_ignored = 7 # will be repeated and should not be blocked
ngram_repeat = 3
for batch_sz in [1, 3]:
beam = BeamSearch(
beam_sz, batch_sz, 0, 1, 2, 2,
torch.device("cpu"), GlobalScorerStub(), 0, 30,
False, ngram_repeat, {repeat_idx_ignored},
torch.randint(0, 30, (batch_sz,)), False, 0.)
for i in range(ngram_repeat + 4):
# non-interesting beams are going to get dummy values
word_probs = torch.full(
(batch_sz * beam_sz, n_words), -float('inf'))
if i == 0:
word_probs[0::beam_sz, repeat_idx] = -0.1
word_probs[0::beam_sz, repeat_idx + i + 1] = -2.3
word_probs[0::beam_sz, repeat_idx_ignored] = -5.0
else:
# predict the same thing in beam 0
word_probs[0::beam_sz, repeat_idx] = 0
# continue pushing around what beam 1 predicts
word_probs[1::beam_sz, repeat_idx + i + 1] = 0
# predict the allowed-repeat again in beam 2
word_probs[2::beam_sz, repeat_idx_ignored] = 0
attns = torch.randn(1, batch_sz * beam_sz, 53)
beam.advance(word_probs, attns)
if i <= ngram_repeat:
self.assertFalse(beam.topk_log_probs[:, 0].eq(
self.BLOCKED_SCORE).any())
self.assertFalse(beam.topk_log_probs[:, 1].eq(
self.BLOCKED_SCORE).any())
self.assertFalse(beam.topk_log_probs[:, 2].eq(
self.BLOCKED_SCORE).any())
else:
# now beam 0 dies, beam 1 -> beam 0, beam 2 -> beam 1
# and the rest die
self.assertFalse(beam.topk_log_probs[:, 0].eq(
self.BLOCKED_SCORE).any())
# since all preds after i=0 are 0, we can check
# that the beam is the correct idx by checking that
# the curr score is the initial score
self.assertTrue(beam.topk_log_probs[:, 0].eq(-2.3).all())
self.assertFalse(beam.topk_log_probs[:, 1].eq(
self.BLOCKED_SCORE).all())
self.assertTrue(beam.topk_log_probs[:, 1].eq(-5.0).all())
self.assertTrue(
beam.topk_log_probs[:, 2:].equal(
torch.tensor(self.BLOCKED_SCORE)
.repeat(batch_sz, beam_sz - 2)))
def test_doesnt_predict_eos_if_shorter_than_min_len(self):
# beam 0 will always predict EOS. The other beams will predict
# non-eos scores.
for batch_sz in [1, 3]:
beam_sz = 5
n_words = 100
_non_eos_idxs = [47, 51, 13, 88, 99]
valid_score_dist = torch.log_softmax(torch.tensor(
[6., 5., 4., 3., 2., 1.]), dim=0)
min_length = 5
eos_idx = 2
lengths = torch.randint(0, 30, (batch_sz,))
beam = BeamSearch(beam_sz, batch_sz, 0, 1, 2, 2,
torch.device("cpu"), GlobalScorerStub(),
min_length, 30, False, 0, set(),
lengths, False, 0.)
all_attns = []
for i in range(min_length + 4):
# non-interesting beams are going to get dummy values
word_probs = torch.full(
(batch_sz * beam_sz, n_words), -float('inf'))
if i == 0:
# "best" prediction is eos - that should be blocked
word_probs[0::beam_sz, eos_idx] = valid_score_dist[0]
# include at least beam_sz predictions OTHER than EOS
# that are greater than -1e20
for j, score in zip(_non_eos_idxs, valid_score_dist[1:]):
word_probs[0::beam_sz, j] = score
else:
# predict eos in beam 0
word_probs[0::beam_sz, eos_idx] = valid_score_dist[0]
# provide beam_sz other good predictions
for k, (j, score) in enumerate(
zip(_non_eos_idxs, valid_score_dist[1:])):
beam_idx = min(beam_sz-1, k)
word_probs[beam_idx::beam_sz, j] = score
attns = torch.randn(1, batch_sz * beam_sz, 53)
all_attns.append(attns)
beam.advance(word_probs, attns)
if i < min_length:
expected_score_dist = \
(i+1) * valid_score_dist[1:].unsqueeze(0)
self.assertTrue(
beam.topk_log_probs.allclose(
expected_score_dist))
elif i == min_length:
# now the top beam has ended and no others have
self.assertTrue(beam.is_finished[:, 0].eq(1).all())
self.assertTrue(beam.is_finished[:, 1:].eq(0).all())
else: # i > min_length
# not of interest, but want to make sure it keeps running
# since only beam 0 terminates and n_best = 2
pass
def test_beam_is_done_when_n_best_beams_eos_using_min_length(self):
# this is also a test that when block_ngram_repeat=0,
# repeating is acceptable
beam_sz = 5
batch_sz = 3
n_words = 100
_non_eos_idxs = [47, 51, 13, 88, 99]
valid_score_dist = torch.log_softmax(torch.tensor(
[6., 5., 4., 3., 2., 1.]), dim=0)
min_length = 5
eos_idx = 2
beam = BeamSearch(
beam_sz, batch_sz, 0, 1, 2, 2,
torch.device("cpu"), GlobalScorerStub(),
min_length, 30, False, 0, set(),
torch.randint(0, 30, (batch_sz,)), False, 0.)
for i in range(min_length + 4):
# non-interesting beams are going to get dummy values
word_probs = torch.full(
(batch_sz * beam_sz, n_words), -float('inf'))
if i == 0:
# "best" prediction is eos - that should be blocked
word_probs[0::beam_sz, eos_idx] = valid_score_dist[0]
# include at least beam_sz predictions OTHER than EOS
# that are greater than -1e20
for j, score in zip(_non_eos_idxs, valid_score_dist[1:]):
word_probs[0::beam_sz, j] = score
elif i <= min_length:
# predict eos in beam 1
word_probs[1::beam_sz, eos_idx] = valid_score_dist[0]
# provide beam_sz other good predictions in other beams
for k, (j, score) in enumerate(
zip(_non_eos_idxs, valid_score_dist[1:])):
beam_idx = min(beam_sz-1, k)
word_probs[beam_idx::beam_sz, j] = score
else:
word_probs[0::beam_sz, eos_idx] = valid_score_dist[0]
word_probs[1::beam_sz, eos_idx] = valid_score_dist[0]
# provide beam_sz other good predictions in other beams
for k, (j, score) in enumerate(
zip(_non_eos_idxs, valid_score_dist[1:])):
beam_idx = min(beam_sz-1, k)
word_probs[beam_idx::beam_sz, j] = score
attns = torch.randn(1, batch_sz * beam_sz, 53)
beam.advance(word_probs, attns)
if i < min_length:
self.assertFalse(beam.done)
elif i == min_length:
# beam 1 dies on min_length
self.assertTrue(beam.is_finished[:, 1].all())
beam.update_finished()
self.assertFalse(beam.done)
else: # i > min_length
# beam 0 dies on the step after beam 1 dies
self.assertTrue(beam.is_finished[:, 0].all())
beam.update_finished()
self.assertTrue(beam.done)
def test_beam_returns_attn_with_correct_length(self):
beam_sz = 5
batch_sz = 3
n_words = 100
_non_eos_idxs = [47, 51, 13, 88, 99]
valid_score_dist = torch.log_softmax(torch.tensor(
[6., 5., 4., 3., 2., 1.]), dim=0)
min_length = 5
eos_idx = 2
inp_lens = torch.randint(1, 30, (batch_sz,))
beam = BeamSearch(
beam_sz, batch_sz, 0, 1, 2, 2,
torch.device("cpu"), GlobalScorerStub(),
min_length, 30, True, 0, set(),
inp_lens, False, 0.)
for i in range(min_length + 2):
# non-interesting beams are going to get dummy values
word_probs = torch.full(
(batch_sz * beam_sz, n_words), -float('inf'))
if i == 0:
# "best" prediction is eos - that should be blocked
word_probs[0::beam_sz, eos_idx] = valid_score_dist[0]
# include at least beam_sz predictions OTHER than EOS
# that are greater than -1e20
for j, score in zip(_non_eos_idxs, valid_score_dist[1:]):
word_probs[0::beam_sz, j] = score
elif i <= min_length:
# predict eos in beam 1
word_probs[1::beam_sz, eos_idx] = valid_score_dist[0]
# provide beam_sz other good predictions in other beams
for k, (j, score) in enumerate(
zip(_non_eos_idxs, valid_score_dist[1:])):
beam_idx = min(beam_sz-1, k)
word_probs[beam_idx::beam_sz, j] = score
else:
word_probs[0::beam_sz, eos_idx] = valid_score_dist[0]
word_probs[1::beam_sz, eos_idx] = valid_score_dist[0]
# provide beam_sz other good predictions in other beams
for k, (j, score) in enumerate(
zip(_non_eos_idxs, valid_score_dist[1:])):
beam_idx = min(beam_sz-1, k)
word_probs[beam_idx::beam_sz, j] = score
attns = torch.randn(1, batch_sz * beam_sz, 53)
beam.advance(word_probs, attns)
if i < min_length:
self.assertFalse(beam.done)
# no top beams are finished yet
for b in range(batch_sz):
self.assertEqual(beam.attention[b], [])
elif i == min_length:
# beam 1 dies on min_length
self.assertTrue(beam.is_finished[:, 1].all())
beam.update_finished()
self.assertFalse(beam.done)
# no top beams are finished yet
for b in range(batch_sz):
self.assertEqual(beam.attention[b], [])
else: # i > min_length
# beam 0 dies on the step after beam 1 dies
self.assertTrue(beam.is_finished[:, 0].all())
beam.update_finished()
self.assertTrue(beam.done)
# top beam is finished now so there are attentions
for b in range(batch_sz):
# two beams are finished in each batch
self.assertEqual(len(beam.attention[b]), 2)
for k in range(2):
# second dim is cut down to the non-padded src length
self.assertEqual(beam.attention[b][k].shape[-1],
inp_lens[b])
# first dim is equal to the time of death
# (beam 0 died at current step - adjust for SOS)
self.assertEqual(beam.attention[b][0].shape[0], i+1)
# (beam 1 died at last step - adjust for SOS)
self.assertEqual(beam.attention[b][1].shape[0], i)
# behavior gets weird when beam is already done so just stop
break
class TestBeamSearchAgainstReferenceCase(unittest.TestCase):
# this is just test_beam.TestBeamAgainstReferenceCase repeated
# in each batch.
BEAM_SZ = 5
EOS_IDX = 2 # don't change this - all the scores would need updated
N_WORDS = 8 # also don't change for same reason
N_BEST = 3
DEAD_SCORE = -1e20
BATCH_SZ = 3
INP_SEQ_LEN = 53
def random_attn(self):
return torch.randn(1, self.BATCH_SZ * self.BEAM_SZ, self.INP_SEQ_LEN)
def init_step(self, beam, expected_len_pen):
# init_preds: [4, 3, 5, 6, 7] - no EOS's
init_scores = torch.log_softmax(torch.tensor(
[[0, 0, 0, 4, 5, 3, 2, 1]], dtype=torch.float), dim=1)
init_scores = deepcopy(init_scores.repeat(
self.BATCH_SZ * self.BEAM_SZ, 1))
new_scores = init_scores + beam.topk_log_probs.view(-1).unsqueeze(1)
expected_beam_scores, expected_preds_0 = new_scores \
.view(self.BATCH_SZ, self.BEAM_SZ * self.N_WORDS) \
.topk(self.BEAM_SZ, dim=-1)
beam.advance(deepcopy(init_scores), self.random_attn())
self.assertTrue(beam.topk_log_probs.allclose(expected_beam_scores))
self.assertTrue(beam.topk_ids.equal(expected_preds_0))
self.assertFalse(beam.is_finished.any())
self.assertFalse(beam.done)
return expected_beam_scores
def first_step(self, beam, expected_beam_scores, expected_len_pen):
# no EOS's yet
assert beam.is_finished.sum() == 0
scores_1 = torch.log_softmax(torch.tensor(
[[0, 0, 0, .3, 0, .51, .2, 0],
[0, 0, 1.5, 0, 0, 0, 0, 0],
[0, 0, 0, 0, .49, .48, 0, 0],
[0, 0, 0, .2, .2, .2, .2, .2],
[0, 0, 0, .2, .2, .2, .2, .2]]
), dim=1)
scores_1 = scores_1.repeat(self.BATCH_SZ, 1)
beam.advance(deepcopy(scores_1), self.random_attn())
new_scores = scores_1 + expected_beam_scores.view(-1).unsqueeze(1)
expected_beam_scores, unreduced_preds = new_scores\
.view(self.BATCH_SZ, self.BEAM_SZ * self.N_WORDS)\
.topk(self.BEAM_SZ, -1)
expected_bptr_1 = unreduced_preds / self.N_WORDS
# [5, 3, 2, 6, 0], so beam 2 predicts EOS!
expected_preds_1 = unreduced_preds - expected_bptr_1 * self.N_WORDS
self.assertTrue(beam.topk_log_probs.allclose(expected_beam_scores))
self.assertTrue(beam.topk_scores.allclose(
expected_beam_scores / expected_len_pen))
self.assertTrue(beam.topk_ids.equal(expected_preds_1))
self.assertTrue(beam.current_backptr.equal(expected_bptr_1))
self.assertEqual(beam.is_finished.sum(), self.BATCH_SZ)
self.assertTrue(beam.is_finished[:, 2].all()) # beam 2 finished
beam.update_finished()
self.assertFalse(beam.top_beam_finished.any())
self.assertFalse(beam.done)
return expected_beam_scores
def second_step(self, beam, expected_beam_scores, expected_len_pen):
# assumes beam 2 finished on last step
scores_2 = torch.log_softmax(torch.tensor(
[[0, 0, 0, .3, 0, .51, .2, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 5000, .48, 0, 0], # beam 2 shouldn't continue
[0, 0, 50, .2, .2, .2, .2, .2], # beam 3 -> beam 0 should die
[0, 0, 0, .2, .2, .2, .2, .2]]
), dim=1)
scores_2 = scores_2.repeat(self.BATCH_SZ, 1)
beam.advance(deepcopy(scores_2), self.random_attn())
# ended beam 2 shouldn't continue
expected_beam_scores[:, 2::self.BEAM_SZ] = self.DEAD_SCORE
new_scores = scores_2 + expected_beam_scores.view(-1).unsqueeze(1)
expected_beam_scores, unreduced_preds = new_scores\
.view(self.BATCH_SZ, self.BEAM_SZ * self.N_WORDS)\
.topk(self.BEAM_SZ, -1)
expected_bptr_2 = unreduced_preds / self.N_WORDS
# [2, 5, 3, 6, 0] repeat self.BATCH_SZ, so beam 0 predicts EOS!
expected_preds_2 = unreduced_preds - expected_bptr_2 * self.N_WORDS
# [-2.4879, -3.8910, -4.1010, -4.2010, -4.4010] repeat self.BATCH_SZ
self.assertTrue(beam.topk_log_probs.allclose(expected_beam_scores))
self.assertTrue(beam.topk_scores.allclose(
expected_beam_scores / expected_len_pen))
self.assertTrue(beam.topk_ids.equal(expected_preds_2))
self.assertTrue(beam.current_backptr.equal(expected_bptr_2))
# another beam is finished in all batches
self.assertEqual(beam.is_finished.sum(), self.BATCH_SZ)
# new beam 0 finished
self.assertTrue(beam.is_finished[:, 0].all())
# new beam 0 is old beam 3
self.assertTrue(expected_bptr_2[:, 0].eq(3).all())
beam.update_finished()
self.assertTrue(beam.top_beam_finished.all())
self.assertFalse(beam.done)
return expected_beam_scores
def third_step(self, beam, expected_beam_scores, expected_len_pen):
# assumes beam 0 finished on last step
scores_3 = torch.log_softmax(torch.tensor(
[[0, 0, 5000, 0, 5000, .51, .2, 0], # beam 0 shouldn't cont
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 5000, 0, 0],
[0, 0, 0, .2, .2, .2, .2, .2],
[0, 0, 50, 0, .2, .2, .2, .2]] # beam 4 -> beam 1 should die
), dim=1)
scores_3 = scores_3.repeat(self.BATCH_SZ, 1)
beam.advance(deepcopy(scores_3), self.random_attn())
expected_beam_scores[:, 0::self.BEAM_SZ] = self.DEAD_SCORE
new_scores = scores_3 + expected_beam_scores.view(-1).unsqueeze(1)
expected_beam_scores, unreduced_preds = new_scores\
.view(self.BATCH_SZ, self.BEAM_SZ * self.N_WORDS)\
.topk(self.BEAM_SZ, -1)
expected_bptr_3 = unreduced_preds / self.N_WORDS
# [5, 2, 6, 1, 0] repeat self.BATCH_SZ, so beam 1 predicts EOS!
expected_preds_3 = unreduced_preds - expected_bptr_3 * self.N_WORDS
self.assertTrue(beam.topk_log_probs.allclose(
expected_beam_scores))
self.assertTrue(beam.topk_scores.allclose(
expected_beam_scores / expected_len_pen))
self.assertTrue(beam.topk_ids.equal(expected_preds_3))
self.assertTrue(beam.current_backptr.equal(expected_bptr_3))
self.assertEqual(beam.is_finished.sum(), self.BATCH_SZ)
# new beam 1 finished
self.assertTrue(beam.is_finished[:, 1].all())
# new beam 1 is old beam 4
self.assertTrue(expected_bptr_3[:, 1].eq(4).all())
beam.update_finished()
self.assertTrue(beam.top_beam_finished.all())
self.assertTrue(beam.done)
return expected_beam_scores
def test_beam_advance_against_known_reference(self):
beam = BeamSearch(
self.BEAM_SZ, self.BATCH_SZ, 0, 1, 2, self.N_BEST,
torch.device("cpu"), GlobalScorerStub(),
0, 30, False, 0, set(),
torch.randint(0, 30, (self.BATCH_SZ,)), False, 0.)
expected_beam_scores = self.init_step(beam, 1)
expected_beam_scores = self.first_step(beam, expected_beam_scores, 1)
expected_beam_scores = self.second_step(beam, expected_beam_scores, 1)
self.third_step(beam, expected_beam_scores, 1)
class TestBeamWithLengthPenalty(TestBeamSearchAgainstReferenceCase):
# this could be considered an integration test because it tests
# interactions between the GNMT scorer and the beam
def test_beam_advance_against_known_reference(self):
scorer = GNMTGlobalScorer(0.7, 0., "avg", "none")
beam = BeamSearch(
self.BEAM_SZ, self.BATCH_SZ, 0, 1, 2, self.N_BEST,
torch.device("cpu"), scorer,
0, 30, False, 0, set(),
torch.randint(0, 30, (self.BATCH_SZ,)), False, 0.)
expected_beam_scores = self.init_step(beam, 1.)
expected_beam_scores = self.first_step(beam, expected_beam_scores, 3)
expected_beam_scores = self.second_step(beam, expected_beam_scores, 4)
self.third_step(beam, expected_beam_scores, 5)
| 48.623327 | 80 | 0.534998 |
acf3222bf377464a6768d695afb6a4f534693f24 | 3,119 | py | Python | openinfradays/admin.py | openstack-kr/openinfradays-2018 | 9eb0e284ab95e177dc4acca17d63ccbdaff67fb1 | [
"Apache-2.0"
] | null | null | null | openinfradays/admin.py | openstack-kr/openinfradays-2018 | 9eb0e284ab95e177dc4acca17d63ccbdaff67fb1 | [
"Apache-2.0"
] | 1 | 2018-06-17T02:21:41.000Z | 2018-06-17T02:21:41.000Z | openinfradays/admin.py | openstack-kr/openinfradays-2018 | 9eb0e284ab95e177dc4acca17d63ccbdaff67fb1 | [
"Apache-2.0"
] | 1 | 2018-05-31T11:39:02.000Z | 2018-05-31T11:39:02.000Z | from django.db import models
from django.contrib import admin
from django.contrib.flatpages.models import FlatPage
from django_summernote.admin import SummernoteModelAdmin
from django_summernote.widgets import SummernoteWidget
from modeltranslation.admin import TranslationAdmin
from .models import Program, Speaker, ProgramCategory, ProgramDate, \
ProgramTime, Room, Sponsor, SponsorLevel
class SummernoteWidgetWithCustomToolbar(SummernoteWidget):
def template_contexts(self):
contexts = super(SummernoteWidgetWithCustomToolbar, self).template_contexts()
contexts['width'] = '960px'
return contexts
class SponsorAdmin(SummernoteModelAdmin, TranslationAdmin):
formfield_overrides = {models.TextField: {'widget': SummernoteWidgetWithCustomToolbar}}
list_display = ('id', 'name', 'slug', )
ordering = ('name',)
list_editable = ('name', 'slug',)
search_fields = ('name', 'slug',)
admin.site.register(Sponsor, SponsorAdmin)
class SponsorLevelAdmin(SummernoteModelAdmin, TranslationAdmin):
list_display = ('id', 'order', 'name',)
list_editable = ('order', 'name',)
ordering = ('order',)
search_fields = ('name',)
admin.site.register(SponsorLevel, SponsorLevelAdmin)
class ProgramAdmin(SummernoteModelAdmin, TranslationAdmin):
list_display = ('id', 'name', )
list_editable = ('name', )
ordering = ('id', )
search_fields = ('name', )
admin.site.register(Program, ProgramAdmin)
class SpeakerAdmin(SummernoteModelAdmin, TranslationAdmin):
list_display = ('id', 'name', 'email', 'organization')
list_editable = ('name', 'organization')
ordering = ('id', )
search_fields = ('name', 'email')
admin.site.register(Speaker, SpeakerAdmin)
class ProgramCategoryAdmin(SummernoteModelAdmin, TranslationAdmin):
list_display = ('id', 'name', 'slug', )
list_editable = ('name', 'slug', )
ordering = ('id', )
search_fields = ('name', )
admin.site.register(ProgramCategory, ProgramCategoryAdmin)
class ProgramDateAdmin(SummernoteModelAdmin, TranslationAdmin):
list_display = ('id', 'day', )
list_editable = ('day', )
ordering = ('day', )
admin.site.register(ProgramDate, ProgramDateAdmin)
class RoomAdmin(SummernoteModelAdmin, TranslationAdmin):
list_display = ('id', 'name', )
list_editable = ('name', )
ordering = ('id', )
admin.site.register(Room, RoomAdmin)
class ProgramTimeAdmin(SummernoteModelAdmin, TranslationAdmin):
list_display = ('id', 'name', 'begin', 'end', 'day')
list_editable = ('name', 'begin', 'end', 'day')
ordering = ('id', )
admin.site.register(ProgramTime, ProgramTimeAdmin)
class SummernoteWidgetWithCustomToolbar(SummernoteWidget):
def template_contexts(self):
contexts = super(SummernoteWidgetWithCustomToolbar, self)\
.template_contexts()
contexts['width'] = '960px'
return contexts
class FlatPageAdmin(TranslationAdmin):
formfield_overrides = {
models.TextField: {'widget': SummernoteWidgetWithCustomToolbar}}
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, FlatPageAdmin)
| 32.831579 | 91 | 0.717217 |
acf3228d88c8ab1cc4e475ec9f6a0265f13a07e5 | 272 | py | Python | app/conftest.py | ayong8/AI-exp-survey | 2a98f7175d8c7aa558e45231de1dc1997f1634a7 | [
"MIT"
] | null | null | null | app/conftest.py | ayong8/AI-exp-survey | 2a98f7175d8c7aa558e45231de1dc1997f1634a7 | [
"MIT"
] | null | null | null | app/conftest.py | ayong8/AI-exp-survey | 2a98f7175d8c7aa558e45231de1dc1997f1634a7 | [
"MIT"
] | null | null | null | import pytest
from app.users.models import User
from app.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
| 18.133333 | 49 | 0.768382 |
acf322bc09198173b0bbb5475faee9e10ebdb151 | 3,978 | py | Python | sdk/python/pulumi_azure_nextgen/dbforpostgresql/v20201105preview/get_database.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/dbforpostgresql/v20201105preview/get_database.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/dbforpostgresql/v20201105preview/get_database.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetDatabaseResult',
'AwaitableGetDatabaseResult',
'get_database',
]
@pulumi.output_type
class GetDatabaseResult:
"""
Represents a Database.
"""
def __init__(__self__, charset=None, collation=None, id=None, name=None, type=None):
if charset and not isinstance(charset, str):
raise TypeError("Expected argument 'charset' to be a str")
pulumi.set(__self__, "charset", charset)
if collation and not isinstance(collation, str):
raise TypeError("Expected argument 'collation' to be a str")
pulumi.set(__self__, "collation", collation)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def charset(self) -> Optional[str]:
"""
The charset of the database.
"""
return pulumi.get(self, "charset")
@property
@pulumi.getter
def collation(self) -> Optional[str]:
"""
The collation of the database.
"""
return pulumi.get(self, "collation")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetDatabaseResult(GetDatabaseResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDatabaseResult(
charset=self.charset,
collation=self.collation,
id=self.id,
name=self.name,
type=self.type)
def get_database(database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatabaseResult:
"""
Represents a Database.
:param str database_name: The name of the database.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str server_name: The name of the server.
"""
__args__ = dict()
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:dbforpostgresql/v20201105preview:getDatabase', __args__, opts=opts, typ=GetDatabaseResult).value
return AwaitableGetDatabaseResult(
charset=__ret__.charset,
collation=__ret__.collation,
id=__ret__.id,
name=__ret__.name,
type=__ret__.type)
| 32.876033 | 193 | 0.638009 |
acf322d53cf1da61a3d260b3d2e1052430cca13a | 15,774 | py | Python | custom_components/deeds/const.py | wRieDen/deeds | 64a97d8100a0c693534e4a2b85bd27ffbc4457d2 | [
"MIT"
] | null | null | null | custom_components/deeds/const.py | wRieDen/deeds | 64a97d8100a0c693534e4a2b85bd27ffbc4457d2 | [
"MIT"
] | null | null | null | custom_components/deeds/const.py | wRieDen/deeds | 64a97d8100a0c693534e4a2b85bd27ffbc4457d2 | [
"MIT"
] | null | null | null | """ Constants """
import voluptuous as vol
from datetime import datetime, timedelta
import dateutil.parser
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt
# from homeassistant.const import CONF_NAME, CONF_PATH
import re
class DeedsDate(dateutil.relativedelta.relativedelta):
"""date class"""
def __init__(
self,
years=0,
months=0,
days=0,
leapdays=0,
weeks=0,
hours=0,
minutes=0,
seconds=0,
microseconds=0,
year=None,
month=None,
day=None,
weekday=None,
yearday=None,
nlyearday=None,
hour=None,
minute=None,
second=None,
microsecond=None,
week=0,
monday=0,
tuesday=0,
wednesday=0,
thursday=0,
friday=0,
saturday=0,
sunday=0,
timezone=None,
):
super().__init__(
years=years,
months=months,
days=days,
leapdays=leapdays,
weeks=weeks,
hours=hours,
minutes=minutes,
seconds=seconds,
microseconds=microseconds,
year=year,
month=month,
day=day,
weekday=weekday,
yearday=yearday,
nlyearday=nlyearday,
hour=hour,
minute=minute,
second=second,
microsecond=microsecond,
)
self.has_absolute_values = any(x is not None for x in (self.year, self.month, self.day, self.hour, self.minute, self.second, self.microsecond))
self.has_relative_values = any(x not in (None, 0) for x in (self.years, self.months, self.weeks, self.days, self.hours, self.minutes, self.seconds, self.microseconds))
self.is_absolute = self.has_absolute_values and not self.has_relative_values
self.is_relative = self.has_relative_values and not self.has_absolute_values
self.monday = monday
self.tuesday = tuesday
self.wednesday = wednesday
self.thursday = thursday
self.friday = friday
self.saturday = saturday
self.sunday = sunday
self.timezone = timezone
self.weekdays = {
"monday": monday,
"tuesday": tuesday,
"wednesday": wednesday,
"thursday": thursday,
"friday": friday,
"saturday": saturday,
"sunday": sunday,
}
if self.timezone is None:
self.timezone = dt.now().tzinfo
@classmethod
def from_string(cls, text):
date = None
if text == "now":
date = dt.now()
elif text == "today":
date = dt.now().replace(hour=0, minute=0, second=0, microsecond=0)
elif text == "min":
date = datetime.min.replace(tzinfo=dt.now().tzinfo)
# standard formats, preferred: 2021-5-23 21:33:12
isodate = dt.now()
isotext = re.sub(r"^\s*(\d{2}:\d{2}(:\d{2})?)\s*$", isodate.strftime("%Y-%m-%d") + r" \g<1>", text)
try:
date = dateutil.parser.isoparse(isotext)
if date.tzinfo is None:
date = date.replace(tzinfo=dt.now().tzinfo)
except:
pass
if date is not None:
return cls(
year=date.year,
month=date.month,
day=date.day,
hour=date.hour,
minute=date.minute,
second=date.second,
timezone=date.tzinfo,
)
# flexible format eg: "1m 2d 21h"
match = re.match(
r"^(?:"
r"(?P<year>\d*\.?\d+)\s*(y|year|years|yearly)|"
r"(?P<month>\d*\.?\d+)\s*(m|month|months|monthly)|"
r"(?P<day>\d*\.?\d+)\s*(d|day|days|daily)|"
r"(?P<hour>\d*\.?\d+)\s*(h|hour|hours|hourly)|"
r"(?P<minute>\d*\.?\d+)\s*(min|minute|minutes)|"
r"(?P<second>\d*\.?\d+)\s*(s|sec|second|seconds)|"
r"(?P<week>\d*\.?\d+)\s*(w|week|weeks|weekly)|"
r"(?P<monday>\d*)\s*(mo|mon|monday|mondays)|"
r"(?P<tuesday>\d*)\s*(tu|tue|tues|tuesday|tuesdays)|"
r"(?P<wednesday>\d*)\s*(we|wed|wednesday|wednesdays)|"
r"(?P<thursday>\d*)\s*(th|thu|thur|thurs|thursday|thursdays)|"
r"(?P<friday>\d*)\s*(fr|fri|friday|fridays)|"
r"(?P<saturday>\d*)\s*(sa|sat|saturday|saturdays)|"
r"(?P<sunday>\d*)\s*(su|sun|sunday|sundays)|"
r"[\s\-_:.])*$",
text,
)
if match is not None:
groups = {k: (1 if v == "" else v) for k, v in match.groupdict().items() if v is not None}
return cls(
years=int(groups.get("year", 0)),
months=int(groups.get("month", 0)),
days=int(groups.get("day", 0)),
hours=int(groups.get("hour", 0)),
minutes=int(groups.get("minute", 0)),
seconds=int(groups.get("second", 0)),
weeks=int(groups.get("week", 0)),
monday=int(groups.get("monday", 0)),
tuesday=int(groups.get("tuesday", 0)),
wednesday=int(groups.get("wednesday", 0)),
thursday=int(groups.get("thursday", 0)),
friday=int(groups.get("friday", 0)),
saturday=int(groups.get("saturday", 0)),
sunday=int(groups.get("sunday", 0)),
)
else:
return None
@classmethod
def from_datetime(cls, date):
return cls(
year=date.year,
month=date.month,
day=date.day,
hour=date.hour,
minute=date.minute,
second=date.second,
timezone=date.tzinfo,
)
def has_weekday_attribute(self):
return any({v > 0 for v in self.weekdays.values()})
def get_timedelta(self):
return timedelta(
weeks=self.weeks,
days=self.day,
hours=self.hour,
minutes=self.minute,
seconds=self.second,
)
def get_datetime(self):
return datetime(
year=self.year,
month=self.month,
day=self.day,
hour=self.hour,
minute=self.minute,
second=self.second,
tzinfo=self.timezone,
)
def is_valid_date(self):
try:
if self.is_absolute and self.get_datetime() is not None:
return True
return False
except ValueError as e:
print(f"value error: {e}")
print(f"invalid date: {self.print()}")
return False
def is_valid_period(self):
if self.is_relative:
return True
return False
def get_max_relative_unit(self):
for k, v in {
"years": self.years,
"months": self.months,
"weeks": self.weeks,
"days": self.days,
"hours": self.hours,
"minutes": self.minutes,
"seconds": self.seconds,
"microseconds": self.microseconds,
}.items():
if v is not None and v > 0:
return k
return None
def print(self):
print(f"year: {self.year}, month: {self.month}, day: {self.day}, hour: {self.hour}, minute: {self.minute}, second: {self.second}, weekdays: {self.weekdays}")
def check_date(value):
dd = DeedsDate.from_string(value)
if dd is not None and dd.is_valid_date():
return dd
raise vol.Invalid(f"Invalid Date: {value}")
def check_period(value):
dd = DeedsDate.from_string(value)
if dd is not None and dd.is_valid_period():
return dd
raise vol.Invalid(f"Invalid Period: {value}")
def check_date_period(value):
dd = DeedsDate.from_string(value)
if dd is not None and (dd.is_valid_date() or dd.is_valid_period()):
return dd
raise vol.Invalid(f"Invalid Date or Period: {value}")
def check_bool_int(value):
if isinstance(value, bool):
if value:
return -1
return 0
if isinstance(value, int) and value >= 0:
return value
raise vol.Invalid(f"Invalid Input: {value}")
def check_round_up(value):
if value in (True, False, "years", "months", "weeks", "days", "hours", "minutes"):
return value
raise vol.Invalid(f"Invalid Input: {value}")
# Base component constants
DOMAIN = "deeds"
DOMAIN_DATA = f"{DOMAIN}_data"
VERSION = "1.0.0"
PLATFORM = "sensor"
# API call
API_SERVICE = "api_call"
API_NAME = "name"
API_ACTION = "action"
API_ARGS = "args"
API_SCHEMA = vol.Schema(
{
vol.Required(API_NAME): cv.string,
vol.Required(API_ACTION): cv.string,
vol.Optional(API_ARGS): dict,
}
)
# Attributes
ATTRIBUTION = "Sensor data calculated by Deeds Integration"
ATTR_LAST_COMPLETION = "last_completion"
ATTR_NEXT_COMPLETION = "next_completion"
ATTR_NEXT_TIMESTAMP = "next_timestamp"
ATTR_NEXT_INTERVAL = "next_interval"
ATTR_RATING = "rating"
ATTR_SUCCESSFUL_COMPLETIONS = "successful_completions"
ATTR_MISSED_COMPLETIONS = "missed_completions"
ATTR_CURRENT_STREAK = "current_streak"
ATTR_LONGEST_STREAK = "longest_streak"
# ATTR_REMAINING_SECONDS = "remaining_seconds"
# ATTR_REMAINING_TIME = "remaining_time"
# ATTR_REMAINING_TIME_SHORT = "remaining_time_short"
ATTR_REMIND = "remind"
ATTR_VALID = "valid"
# Storage
STORAGE_KEY = "deeds"
STORAGE_VERSION = 1
STORE_LAST_COMPLETION = "last_completion"
STORE_NEXT_COMPLETION = "next_completion"
STORE_NEXT_INTERVAL = "next_interval"
STORE_RATING = "rating"
STORE_SUCCESSFUL_COMPLETIONS = "successful_completions"
STORE_MISSED_COMPLETIONS = "missed_completions"
STORE_CURRENT_STREAK = "current_streak"
STORE_LONGEST_STREAK = "longest_streak"
# Device classes
BINARY_SENSOR_DEVICE_CLASS = "connectivity"
# Configuration
CONF_SENSOR = "sensor"
CONF_ENABLED = "enabled"
CONF_DATE = "date"
CONF_DATE_TEMPLATE = "date_template"
CONF_SENSORS = "sensors"
CONF_UNIT_OF_MEASUREMENT = "unit_of_measurement"
CONF_ID_PREFIX = "id_prefix"
# general config
CONF_NAME = "name"
CONF_COMMENT = "comment"
# icon config
CONF_ICON_NORMAL = "icon_normal"
CONF_ICON_TODAY = "icon_today"
CONF_ICON_SOON = "icon_soon"
# time config
CONF_REPEAT = "repeat" # number of times to repeat activity (True: unlimited or number)
CONF_START = "start" # date on which the activity should be completed the first time
CONF_MAX_INTERVAL = "max_interval" # maximum time between two subsequent activity completions
CONF_FIXED_INTERVAL = "fixed_interval" # time between two activity completions
CONF_ROUND_UP = "round_up" # rounding up the to the next month, day, hour etc...
CONF_ROUND_UP_OFFSET = "round_up_offset" # rounding up the to the next month, day, hour etc...
CONF_REMINDER_PERIOD = "reminder_period" # time window where reminders are activated for activity
CONF_VALID_PERIOD = "valid_period" # time window where activity completion is accepted
CONF_COUNT = "count" # time window where activity completion is accepted
CONF_RESCHEDULE_INTERVAL = "reschedule_interval"
# Defaults
DEFAULT_NAME = DOMAIN
DEFAULT_ICON_NORMAL = "mdi:calendar-blank"
DEFAULT_ICON_TODAY = "mdi:calendar-star"
DEFAULT_ICON_SOON = "mdi:calendar"
DEFAULT_UNIT_OF_MEASUREMENT = "Days"
DEFAULT_ID_PREFIX = "deeds_"
DEFAULT_COUNT = 1
DEFAULT_ROUND_UP = True
DEFAULT_ROUND_UP_OFFSET = "min"
DEFAULT_REPEAT = True
DEFAULT_START = "now"
DEFAULT_MAX_INTERVAL = "1d"
DEFAULT_FIXED_INTERVAL = "1d"
DEFAULT_REMINDER_PERIOD = "1d"
DEFAULT_VALID_PERIOD = "1y"
# INTERVAL_SCHEMA = vol.Schema(
# {vol.Required(vol.Any(CONF_MAX_INTERVAL, CONF_FIXED_INTERVAL, msg=CONF_DATE_REQD_ERROR)): object},
# extra=vol.ALLOW_EXTRA,
# )
# SENSOR_CONFIG_SCHEMA = vol.Schema(
# {
# vol.Required(CONF_NAME): cv.string,
# vol.Optional(CONF_COMMENT): cv.string,
# vol.Optional(CONF_ICON_NORMAL, default=DEFAULT_ICON_NORMAL): cv.icon,
# vol.Optional(CONF_ICON_TODAY, default=DEFAULT_ICON_TODAY): cv.icon,
# vol.Optional(CONF_ICON_SOON, default=DEFAULT_ICON_SOON): cv.icon,
# vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=DEFAULT_UNIT_OF_MEASUREMENT): cv.string,
# vol.Optional(CONF_ID_PREFIX, default=DEFAULT_ID_PREFIX): cv.string,
# vol.Optional(CONF_REPEAT, default=DEFAULT_REPEAT): check_bool_int,
# vol.Optional(CONF_START, default=DEFAULT_START): check_date_period,
# vol.Optional(CONF_ROUND_UP, default=DEFAULT_ROUND_UP): check_round_up,
# vol.Exclusive(CONF_MAX_INTERVAL, "interval"): check_period,
# vol.Exclusive(CONF_FIXED_INTERVAL, "interval"): check_period,
# vol.Optional(CONF_REMINDER_PERIOD, default=DEFAULT_REMINDER_PERIOD): check_period,
# vol.Optional(CONF_VALID_PERIOD, default=DEFAULT_VALID_PERIOD): check_period,
# vol.Optional(CONF_COUNT, default=DEFAULT_COUNT): int,
# }
# )
# SENSOR_SCHEMA = vol.All(SENSOR_CONFIG_SCHEMA, INTERVAL_SCHEMA)
# CONFIG_SCHEMA = vol.Schema(
# {DOMAIN: vol.Schema({vol.Optional(CONF_SENSORS): vol.All(cv.ensure_list, [SENSOR_SCHEMA])})},
# extra=vol.ALLOW_EXTRA,
# )
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_SENSORS): vol.All(
cv.ensure_list,
[
vol.All(
vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_COMMENT): cv.string,
vol.Optional(CONF_ICON_NORMAL, default=DEFAULT_ICON_NORMAL): cv.icon,
vol.Optional(CONF_ICON_TODAY, default=DEFAULT_ICON_TODAY): cv.icon,
vol.Optional(CONF_ICON_SOON, default=DEFAULT_ICON_SOON): cv.icon,
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=DEFAULT_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_ID_PREFIX, default=DEFAULT_ID_PREFIX): cv.string,
vol.Optional(CONF_REPEAT, default=DEFAULT_REPEAT): check_bool_int,
vol.Optional(CONF_START): check_date_period,
vol.Optional(CONF_ROUND_UP, default=DEFAULT_ROUND_UP): check_round_up,
vol.Optional(CONF_ROUND_UP_OFFSET, default=DEFAULT_ROUND_UP_OFFSET): check_date,
vol.Exclusive(CONF_MAX_INTERVAL, "interval"): check_period,
vol.Exclusive(CONF_FIXED_INTERVAL, "interval"): check_period,
vol.Optional(CONF_REMINDER_PERIOD, default=DEFAULT_REMINDER_PERIOD): check_period,
vol.Optional(CONF_VALID_PERIOD, default=DEFAULT_VALID_PERIOD): check_period,
vol.Optional(CONF_COUNT, default=DEFAULT_COUNT): int,
vol.Optional(CONF_RESCHEDULE_INTERVAL): check_period,
}
),
vol.Schema(
{
vol.Required(vol.Any(CONF_MAX_INTERVAL, CONF_FIXED_INTERVAL)): object,
},
extra=vol.ALLOW_EXTRA,
),
)
],
)
}
)
},
extra=vol.ALLOW_EXTRA,
)
ICON = DEFAULT_ICON_NORMAL
| 33.777302 | 175 | 0.585457 |
acf3230d7d9085a3007fb77468a95bd5ce05b4bc | 2,664 | py | Python | docs/source/conf.py | kingtaurus/pytorch-examples | 2bf23f105237e03ee2501f29670fb6a9ca915096 | [
"BSD-3-Clause"
] | null | null | null | docs/source/conf.py | kingtaurus/pytorch-examples | 2bf23f105237e03ee2501f29670fb6a9ca915096 | [
"BSD-3-Clause"
] | null | null | null | docs/source/conf.py | kingtaurus/pytorch-examples | 2bf23f105237e03ee2501f29670fb6a9ca915096 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
current_dir = os.path.dirname(__file__)
target_dir = os.path.abspath(os.path.join(current_dir, "../.."))
sys.path.insert(0, target_dir)
print(target_dir)
# -- Project information -----------------------------------------------------
project = "PyTorchExamples"
copyright = "2022, Meta"
author = "Meta"
# The full version, including alpha/beta/rc tags
release = "1.11"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.napoleon", "sphinx.ext.autodoc", 'sphinx_panels']
panels_add_bootstrap_css = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "pytorch_sphinx_theme"
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
panels_add_fontawesome_latex = True
html_theme_options = {
'pytorch_project': 'examples',
'collapse_navigation': False,
'display_version': True,
'logo_only': False,
'analytics_id': 'UA-117752657-2',
}
| 34.153846 | 79 | 0.693694 |
acf3234751ee2485114bde9a59886e6f127908a5 | 2,195 | py | Python | cross-modal-search/app.py | saoc90/examples | 372ae19fed483394381306c505550195fe2dc3cc | [
"Apache-2.0"
] | null | null | null | cross-modal-search/app.py | saoc90/examples | 372ae19fed483394381306c505550195fe2dc3cc | [
"Apache-2.0"
] | 399 | 2021-03-24T07:46:28.000Z | 2022-03-31T07:11:14.000Z | cross-modal-search/app.py | saoc90/examples | 372ae19fed483394381306c505550195fe2dc3cc | [
"Apache-2.0"
] | null | null | null | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import click
from jina import Flow
from dataset import input_index_data
cur_dir = os.path.dirname(os.path.abspath(__file__))
def config(model_name):
os.environ['JINA_PARALLEL'] = os.environ.get('JINA_PARALLEL', '1')
os.environ['JINA_SHARDS'] = os.environ.get('JINA_SHARDS', '1')
os.environ['JINA_PORT'] = '45678'
os.environ['JINA_USE_REST_API'] = 'true'
if model_name == 'clip':
os.environ['JINA_IMAGE_ENCODER'] = os.environ.get('JINA_IMAGE_ENCODER', 'docker://jinahub/pod.encoder.clipimageencoder:0.0.1-1.0.7')
os.environ['JINA_TEXT_ENCODER'] = os.environ.get('JINA_TEXT_ENCODER', 'docker://jinahub/pod.encoder.cliptextencoder:0.0.1-1.0.7')
os.environ['JINA_TEXT_ENCODER_INTERNAL'] = 'yaml/clip/text-encoder.yml'
elif model_name == 'vse':
os.environ['JINA_IMAGE_ENCODER'] = os.environ.get('JINA_IMAGE_ENCODER', 'docker://jinahub/pod.encoder.vseimageencoder:0.0.5-1.0.7')
os.environ['JINA_TEXT_ENCODER'] = os.environ.get('JINA_TEXT_ENCODER', 'docker://jinahub/pod.encoder.vsetextencoder:0.0.6-1.0.7')
os.environ['JINA_TEXT_ENCODER_INTERNAL'] = 'yaml/vse/text-encoder.yml'
@click.command()
@click.option('--task', '-t', type=click.Choice(['index', 'query'], case_sensitive=False), default='query')
@click.option('--num_docs', '-n', default=50)
@click.option('--request_size', '-s', default=16)
@click.option('--data_set', '-d', type=click.Choice(['f30k', 'f8k'], case_sensitive=False), default='f8k')
@click.option('--model_name', '-m', type=click.Choice(['clip', 'vse'], case_sensitive=False), default='clip')
def main(task, num_docs, request_size, data_set, model_name):
config(model_name)
if task == 'index':
with Flow.load_config('flow-index.yml') as f:
f.index(
input_fn=input_index_data(num_docs, request_size, data_set),
request_size=request_size
)
elif task == 'query':
with Flow.load_config('flow-query.yml') as f:
f.use_rest_gateway()
f.block()
if __name__ == '__main__':
main()
| 42.211538 | 140 | 0.670615 |
acf3239bd795206052c7f776748f307f56fe44fb | 292 | py | Python | aula010 - IF ELSE/ex031.py | miradouro/CursoEmVideo-Python | cc7b05a9a4aad8e6ef3b29453d83370094d75e41 | [
"MIT"
] | null | null | null | aula010 - IF ELSE/ex031.py | miradouro/CursoEmVideo-Python | cc7b05a9a4aad8e6ef3b29453d83370094d75e41 | [
"MIT"
] | null | null | null | aula010 - IF ELSE/ex031.py | miradouro/CursoEmVideo-Python | cc7b05a9a4aad8e6ef3b29453d83370094d75e41 | [
"MIT"
] | null | null | null | distancia = float(input('Qual é a distancia de sua viagem? '))
print('Você está prestes a começar uma viagem de {:.0f}km'.format(distancia))
if distancia <= 200:
preco = distancia * 0.50
else:
preco = distancia * 0.45
print('E o preço da sua passagem será de R${:.2f}'.format(preco))
| 36.5 | 77 | 0.684932 |
acf3246827714af022addbb5c06aa6e2d46f416b | 4,063 | py | Python | sdk/network/azure-mgmt-dns/azure/mgmt/dns/v2018_03_01_preview/aio/_dns_management_client.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/network/azure-mgmt-dns/azure/mgmt/dns/v2018_03_01_preview/aio/_dns_management_client.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/network/azure-mgmt-dns/azure/mgmt/dns/v2018_03_01_preview/aio/_dns_management_client.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import DnsManagementClientConfiguration
from .operations import RecordSetsOperations
from .operations import ZonesOperations
from .. import models
class DnsManagementClient(object):
"""The DNS Management Client.
:ivar record_sets: RecordSetsOperations operations
:vartype record_sets: azure.mgmt.dns.v2018_03_01_preview.aio.operations.RecordSetsOperations
:ivar zones: ZonesOperations operations
:vartype zones: azure.mgmt.dns.v2018_03_01_preview.aio.operations.ZonesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = DnsManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.record_sets = RecordSetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.zones = ZonesOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "DnsManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| 46.170455 | 133 | 0.704898 |
acf3246d86969320844140d38a5a8cd4340f8f72 | 721 | py | Python | audio/audio.py | ronhandler/gitroot | beb81c4b826939f16e57a98ac5845d8acecf151d | [
"Unlicense"
] | null | null | null | audio/audio.py | ronhandler/gitroot | beb81c4b826939f16e57a98ac5845d8acecf151d | [
"Unlicense"
] | null | null | null | audio/audio.py | ronhandler/gitroot | beb81c4b826939f16e57a98ac5845d8acecf151d | [
"Unlicense"
] | null | null | null | #!/usr/bin/python3 -B
import sys
import pyaudio
import wave
import sys
if __name__ == "__main__":
CHUNK = 1024
if len(sys.argv) < 2:
print("Plays a wave file.")
print("Usage: %s filename.wav" % sys.argv[0])
sys.exit(-1)
wf = wave.open(sys.argv[1], 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
data = wf.readframes(CHUNK)
while data != '':
stream.write(data)
data = wf.readframes(CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
sys.exit(0);
| 18.487179 | 70 | 0.557559 |
acf3248822098270fcded5d9c6596082250a1ba3 | 254 | py | Python | OpenGLCffi/GLES2/EXT/OES/EGL_image.py | cydenix/OpenGLCffi | c78f51ae5e6b655eb2ea98f072771cf69e2197f3 | [
"MIT"
] | null | null | null | OpenGLCffi/GLES2/EXT/OES/EGL_image.py | cydenix/OpenGLCffi | c78f51ae5e6b655eb2ea98f072771cf69e2197f3 | [
"MIT"
] | null | null | null | OpenGLCffi/GLES2/EXT/OES/EGL_image.py | cydenix/OpenGLCffi | c78f51ae5e6b655eb2ea98f072771cf69e2197f3 | [
"MIT"
] | null | null | null | from OpenGLCffi.GLES2 import params
@params(api='gles2', prms=['target', 'image'])
def glEGLImageTargetTexture2DOES(target, image):
pass
@params(api='gles2', prms=['target', 'image'])
def glEGLImageTargetRenderbufferStorageOES(target, image):
pass
| 21.166667 | 58 | 0.751969 |
acf324dd20aef7c3c9b143657a52c3bb3632cf26 | 14,399 | py | Python | util/pd-gem5/pd-gem5.py | alianmohammad/pd-gem5-latest | cfcf6aa004c168d6abdfedcd595de4b30f5e88ee | [
"BSD-3-Clause"
] | null | null | null | util/pd-gem5/pd-gem5.py | alianmohammad/pd-gem5-latest | cfcf6aa004c168d6abdfedcd595de4b30f5e88ee | [
"BSD-3-Clause"
] | null | null | null | util/pd-gem5/pd-gem5.py | alianmohammad/pd-gem5-latest | cfcf6aa004c168d6abdfedcd595de4b30f5e88ee | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2015 The University of Wisconsin Madison
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Mohammad Alian
# This script launch pd-gem5
# usage: python pd-gem5.py <config file>
import sys, os, thread, commands
import socket
import time
import subprocess
import threading
import signal
import fcntl
import struct
import re
import collections
configFile = sys.argv[1]
# A dictionary of parameters
params = {}
machines = {}
# dictionary that maps each simulated node to a physical node
sim_to_phy = {}
# dictionary that maps each subprocess to a simulated node
gem5_processes = {}
this = socket.gethostname()
run_dir = ""
ckpt_dir = ""
script_dir = ""
submit_script = ""
sync_ip = ''
sync_port = 5000
sw_ip = ''
sw_port = ''
running = True
monitoring = True
num_nodes = 0
# kill all threads and gem5 processes
def cleanup():
global running
# kill barrier, monitor and main threads
running = False
time.sleep(1)
# kill gem5 processes if they are still running
for p in gem5_processes.keys():
if type(p) == str:
submission_line = subprocess.Popen("grep submitted %s/%s/pdgem5sim.log"\
%(run_dir, gem5_processes[p]), stdout=subprocess.PIPE, stderr=subprocess.PIPE, \
shell = True).communicate()[0].rstrip()
if submission_line != '':
job_id = re.split('\.|\(', submission_line)[1]
os.system("condor_rm %s > /dev/null" %(job_id))
else:
try:
os.killpg(p.pid, signal.SIGTERM)
except Exception as ex:
pass
sys.exit(0)
# submit a job to HTCondor
# MODIFY THIS FUNCTION IF YOU ARE USING OTHER SIMULATION POOL MANAGEMENT SOFTWARE
def submit_to_HTCondor(job_path):
submit_script =("executable = /bin/sh\n"
"arguments = %s\n"
"initialdir = %s\n"
"output = %s\n"
"error = %s\n"
"log = %s\n"
"Rank = TARGET.Mips\n"
"Requirements =\n"
"universe = vanilla\n"
"getenv = true\n"
"queue\n")\
% (job_path + "/job.sh",
job_path,
job_path + "/pdgem5sim.out",
job_path + "/pdgem5sim.err",
job_path + "/pdgem5sim.log")
f0 = open(job_path + '/condor.rcS', 'w')
f0.write(submit_script)
f0.close()
command_line = 'condor_submit ' + job_path + '/condor.rcS'
os.system(command_line)
# returns the ip address of a given network interface
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa( fcntl.ioctl(s.fileno(), 0x8915,\
struct.pack('256s', ifname[:15]))[20:24])
# get the ip address of this machines, we run barrier on this machine
sync_ip = get_ip_address('eth0')
def launch_gem5_process(cmd, sim):
global gem5_processes
job_path = run_dir + '/' + sim
if sim_to_phy[sim] == 'HTCondor':
f0 = open(job_path + '/job.sh', 'w')
f0.write('#!/bin/sh\n')
f0.write(cmd)
f0.close()
key = 'HTCondor_' + sim
gem5_processes[key] = sim
submit_to_HTCondor(job_path)
else:
host_ip = socket.gethostbyname(sim_to_phy[sim])
command_line = ''
# if the physical host is localhost then we don't need to ssh
if (host_ip == '127.0.0.1') or (host_ip == '127.0.1.1') or\
host_ip == sync_ip:
command_line = 'cd ' + job_path + ';' + cmd +\
' >pdgem5sim.out 2>pdgem5sim.err'
# we should ssh to machines and launch cmd on them
else:
command_line = 'ssh ' + sim_to_phy[sim] + ' \'cd ' + job_path + ';'\
+ cmd + ' >pdgem5sim.out 2>pdgem5sim.err\''
p = subprocess.Popen(command_line, shell = True, preexec_fn=os.setsid)
gem5_processes[p] = sim
tap_connected(sim)
def perpare_dir():
# clean up the old files
os.system("rm -rf %s > /dev/null" % (run_dir))
# make new directories
os.system("mkdir -p %s > /dev/null 2>&1" %(run_dir))
# make sure that checkpoint directory exists
os.system("mkdir -p %s > /dev/null 2>&1" %(params['pd-gem5_dir'] +'/ckptdir/'))
# make run directory for each node, make sure that checkpoint dir exists for
# each node
for machine in machines:
(phy, sim) = machine.split(':')
os.system("mkdir -p %s/%s > /dev/null 2>&1" %(run_dir, sim))
os.system("mkdir -p %s/%s > /dev/null 2>&1" %((params['pd-gem5_dir'] +\
'/ckptdir/' + params['ckpt_dir'] + '/', sim)))
# this function prepare gem5 commandline for each simualted node
def prepare_cmd(sims):
cmd = ''
cmd_debug = params['pd-gem5_dir'] + '/' + params['gem5_binary'] + ' ' + \
'--debug-flags=' + params['debug_flags'] + ' '
if params['debug_start'] != '0':
cmd_debug += '--debug-start=' + params['debug_start'] + ' '
cmd_nodebug = params['pd-gem5_dir'] + '/' + params['gem5_binary'] + ' '
debug_flag = False
for sim in sims.split(','):
if sim in params['trace_on']:
debug_flag = True
break
if debug_flag:
cmd = cmd_debug + params['pd-gem5_dir'] + '/configs/example/'
else:
cmd = cmd_nodebug + params['pd-gem5_dir'] + '/configs/example/'
# add switch specific options
if sims.split(',')[0] == 'sw':
cmd += 'sw.py '
# add node specific options
else:
cmd += params['fs_script'] + ' '\
+ '--switch-tap-ip=' + sw_ip + ' '\
+ '--switch-tap-port=' + sw_port + ' '
if len(sims.split(',')) == 1:
cmd += '--pd-gem5 '
# right now we just support quad or mono
else:
cmd += '--quad '
if params['script_dir'] != '0':
if len(sims.split(',')) == 4:
if sims.split(',')[0] != 'tux0':
cmd += '--script=' + script_dir + '/' + sims.split(',')[0] + '.sh '\
+ '--script1=' + script_dir + '/' + sims.split(',')[1] + '.sh '\
+ '--script2=' + script_dir + '/' + sims.split(',')[2] + '.sh '\
+ '--script3=' + script_dir + '/' + sims.split(',')[3] + '.sh '
else:
cmd += '--script=' + params['script_tux0'] + ' '\
+ '--script1=' + script_dir + '/' + sims.split(',')[1] + '.sh '\
+ '--script2=' + script_dir + '/' + sims.split(',')[2] + '.sh '\
+ '--script3=' + script_dir + '/' + sims.split(',')[3] + '.sh '
else:
if sims.split(',')[0] != 'tux0':
cmd += '--script=' + script_dir + '/' + sims.split(',')[0] + '.sh '
else:
cmd += '--script=' + params['script_tux0'] + ' '
# add common options for both switch and nodes
cmd += '--checkpoint-dir=' + ckpt_dir + '/' + sims + ' '\
+ params['other_command_line_options'] + ' '\
'--sync-quantum=' + params['sync_period'] + ' '\
+ '--num-nodes=' + str(num_nodes - 1) + ' '\
+ '--nic-delay=' + params['nic_delay'] + ' '\
+ '--nic-speed=' + params['nic_speed'] + ' '\
+ '--etherlink-delay=' + params['link_delay'] + ' '\
+ '--etherlink-speed=' + params['link_speed'] + ' '\
+ '--sw-delay=' + params['sw_delay'] + ' '\
+ '--sw-speed=' + params['sw_speed'] + ' '
if 'etherdump' in params.keys():
cmd += '--etherdump=' + run_dir + '/' + sims + '/etherdump.pcap '
return cmd
def get_sw_ip_port():
tap_line = ''
# wait till sw gem5 process updates the content of it's log file
time.sleep(1)
if (os.path.isfile("%s/sw/pdgem5sim.err" %(run_dir)) == False):
cleanup()
sys.exit(0)
while tap_line == '':
tap_line = subprocess.Popen("grep tap %s/sw/pdgem5sim.err"\
%(run_dir), stdout=subprocess.PIPE,\
shell = True).communicate()[0].rstrip()
sw_ip = tap_line.split(' ')[6]
sw_port = tap_line.split(' ')[7].rstrip()
return sw_ip, sw_port
def tap_connected(sim):
line = ''
while line == '' and running:
line = subprocess.Popen("grep Listening %s/%s/pdgem5sim.err"\
%(run_dir, sim), stdout=subprocess.PIPE,\
shell = True).communicate()[0].rstrip()
def check_nodes_status():
flag = False
for p in gem5_processes.keys():
# type of p is "strting" if gem5 process is launched using HTCondor
if type(p) == str:
submission_line = ''
# wait till gem5 process updates the content of it's log file
while submission_line == '':
submission_line = subprocess.Popen("grep submitted %s/%s/pdgem5sim.log"\
%(run_dir, gem5_processes[p]), stdout=subprocess.PIPE,\
stderr=subprocess.PIPE, shell = True).communicate()[0].rstrip()
time.sleep(1)
job_id = re.split('\.|\(', submission_line)[1]
job_status = subprocess.Popen("condor_q %s | grep %s" %(job_id, job_id),
stdout=subprocess.PIPE, shell = True).communicate()[0].rstrip()
if job_status == '':
print ("gem5 process simulating %s is finished/killed. Please"
" check %s/%s/pdgem5sim.err(out) for more information")\
%(gem5_processes[p], run_dir, gem5_processes[p])
flag = True
else:
# poll output is None if process is still running
if p.poll() != None:
print ("gem5 process simulating %s is finished/killed. Please"
" check %s/%s/pdgem5sim.err(out) for more information")\
%(gem5_processes[p], run_dir, gem5_processes[p])
flag = True
return flag
def monitor_cluster():
while monitoring:
time.sleep(1)
if check_nodes_status():
cleanup()
return
# read confing file and extract the parmeters
configCont = open(configFile,'r').readlines()
for i in configCont:
if len(i) > 1 and not i.strip()[0] == '#':
iSplit = map(lambda x:x.strip(), i.split('=')[1:])
params[i.split('=')[0]] = '='.join(iSplit)
print "preparing to start pd-gem5"
print "configuration file = " + params['run_name']
run_dir = params['run_dir'] + '/' + params['run_name']
ckpt_dir = params['pd-gem5_dir'] + '/ckptdir/' + params['ckpt_dir']
script_dir = params['pd-gem5_dir'] + '/scriptdir/' + params['script_dir']
machines = params['machine_names'].split(' ')
num_nodes = len(machines)
for machine in machines:
(phy, sim) = machine.split(':')
#num_nodes += len(sim.split(','))
# right now we just support quad or mono
if (len(sim.split(',')) != 1) and (len(sim.split(',')) != 4):
print ("right now we just support quad or mono simulation!"\
"please modify machine_names config\n")
running = False
monitoring = False
sys.exit(0)
sim_to_phy[sim] = phy
# clean up previous directories, create new ones
perpare_dir()
# take a copy of "config file" to run_dir
os.system("cp %s %s"%(configFile,run_dir))
# start monitoring the progress of launched gem5 processes and kill simulation
# if any of them encountered any problem
t_monitor = threading.Thread(name='monitor_thread', target = monitor_cluster)
t_monitor.daemon = True
#threads.append(t_monitor)
t_monitor.start()
cmds = {}
# prepare cmd for switch
cmds['sw'] = prepare_cmd('sw')
try:
launch_gem5_process(cmds['sw'], 'sw')
print "switch started"
# get ip address of the physical machine that is simulating switch +
# the port number of the switch tap device
sw_ip, sw_port = get_sw_ip_port()
sim_to_phy_ordered = collections.OrderedDict(sorted(sim_to_phy.items()))
# prepare cmd for nodes and start them
for sim in sim_to_phy_ordered.keys():
if sim != 'sw' and running:
cmds[sim] = prepare_cmd(sim)
launch_gem5_process(cmds[sim], sim)
print "%s started" %(sim)
# busy loop till simulation finish OR keyboard interrupt receive
print "pd-gem5 started"
while running:
time.sleep(1)
except KeyboardInterrupt:
print "CTRL+C pressed.\nLet's cleanup and exit pd-gem5 ..."
cleanup()
| 39.666667 | 96 | 0.597819 |
acf32543930c45fbab18a819111f2395ceb8b07f | 5,075 | py | Python | src/ymslparser/__init__.py | xeonchen/ymsl-parser | 2ff099950b8fa5dd13694d02bef662bc70d89011 | [
"MIT"
] | null | null | null | src/ymslparser/__init__.py | xeonchen/ymsl-parser | 2ff099950b8fa5dd13694d02bef662bc70d89011 | [
"MIT"
] | null | null | null | src/ymslparser/__init__.py | xeonchen/ymsl-parser | 2ff099950b8fa5dd13694d02bef662bc70d89011 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import csv
import datetime
import logging
import operator
import re
from .utils import *
from .validator import invalidate
logger = logging.getLogger('ymsl')
class League(object):
def __init__(self):
self.name = 'YMSL'
self.teams = {}
self.weeks = []
def __del__(self):
self.teams.clear()
def add_team(self, team):
self.teams[team.name] = team
def add_week(self, week):
self.weeks.append(week)
def get_team(self, team):
return self.teams.setdefault(team, Team(team))
class Team(object):
def __init__(self, name):
self.name = name
self.slots = []
def __str__(self):
return 'Team %s: %s' % (self.name, [str(slot) for slot in self.slots])
def clear(self):
self.slots.clear()
def add_slot(self, time_slot):
self.slots.append(time_slot)
class Week(object):
def __init__(self, name, date):
self.name = name
self.date = date
self.slots = []
def __str__(self):
return 'Week %s: %s' % (self.name, self.date)
def add_slot(self, time_slot):
self.slots.append(time_slot)
class TimeSlot(object):
def __init__(self, week, idx, start_time, field, team1, team2):
self.week = week
self.idx = idx
self.start_time = start_time
self.field = field
self.team1 = team1
self.team2 = team2
def __str__(self):
return '%s @%s %s vs %s' % (self.start_time, self.field, self.team1.name, self.team2.name)
class Parser(object):
@staticmethod
def parse(filename, **kw):
with open(filename, 'r', **kw) as f:
parser = Parser(csv.reader(f))
return parser.do_parse()
def __init__(self, content):
self._rows = [row for row in content]
assert invalidate(self._rows[:])
self.league = League()
def do_parse(self):
for week in self._parse_weeks():
self.league.add_week(week)
return self.league
def has_rows(self):
return not not self._rows
def _pop_next_row(self):
if not self.has_rows():
return None
return self._rows.pop(0)
def _unpop_row(self, row):
self._rows.insert(0, row)
def _parse_weeks(self):
num_weeks = 0
date_pattern = re.compile('\d+')
week_pattern = re.compile('第[一二三四五六七八九十]+週')
while self.has_rows():
num_weeks += 1
row = self._pop_next_row()
name, tournament, date = row[0].split()
year_str = date_pattern.search(name)[0]
month, day = map(int, date_pattern.findall(date))
is_last_year = '春季熱身賽' == tournament and month > 10
year = int(year_str) + (1910 if is_last_year else 1911)
self.league.name = name[len(year_str):]
self.league.year = year
self.league.tournament = tournament
week = Week(week_pattern.search(date)[
0], datetime.date(year, month, day))
logger.info(week)
row = self._pop_next_row()
if row is None:
return
upper_header = [remove_spaces(col) for col in row if col]
upper_slots = list(self._parse_time_slot())
if not upper_slots:
return
row = self._pop_next_row()
if row is None:
return
lower_header = [remove_spaces(col) for col in row if col]
lower_slots = list(self._parse_time_slot())
assert len(lower_slots) == len(upper_slots)
fields = list(filter(lambda col: not col.startswith(
'(') and not col.endswith(')'), upper_header[2:] + lower_header[2:]))
timeslots = [list(a) + list(b)[1:]
for a, b in zip(upper_slots, lower_slots)]
for slot in timeslots:
(index, start_time), teams = slot[0], slot[1:]
start_time = datetime.datetime.strptime(start_time, '%H:%M')
start_time = datetime.datetime(
year, month, day, start_time.hour, start_time.minute)
for i, team_pair in enumerate(teams):
team1, team2 = map(lambda name: self.league.get_team(
remove_spaces(name)), team_pair)
slot = TimeSlot(week, int(index), start_time,
fields[i], team1, team2)
logger.info(slot)
team1.add_slot(slot)
team2.add_slot(slot)
week.add_slot(slot)
yield week
def _parse_time_slot(self):
while self.has_rows():
row = self._pop_next_row()
if not row[0].isdigit():
self._unpop_row(row)
return
teams = [remove_spaces(col) for col in row if col]
odd, even = teams[0:][::2], teams[1:][::2]
yield zip(odd, even)
| 29.166667 | 98 | 0.54936 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.