content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
import math
import warnings
from functools import reduce
import numpy as np
import torch
from backpack import backpack, extend
from backpack.extensions import BatchGrad
from gym.utils import seeding
from torchvision import datasets, transforms
from dacbench import AbstractEnv
warnings.filterwarnings("ignore")
class SGDEnv(AbstractEnv):
"""
Environment to control the learning rate of adam
"""
def __init__(self, config):
"""
Initialize SGD Env
Parameters
-------
config : objdict
Environment configuration
"""
super(SGDEnv, self).__init__(config)
self.batch_size = config.training_batch_size
self.validation_batch_size = config.validation_batch_size
self.no_cuda = config.no_cuda
self.current_batch_size = config.training_batch_size
self.env_seed = config.seed
self.seed(self.env_seed)
self.use_cuda = not self.no_cuda and torch.cuda.is_available()
self.device = torch.device("cuda" if self.use_cuda else "cpu")
self.training_validation_ratio = 0.8
# self.test_dataset = None
self.train_dataset = None
self.validation_dataset = None
self.train_loader = None
# self.test_loader = None
self.validation_loader = None
self.train_loader_it = None
self.validation_loader_it = None
self.train_batch_index = 0
self.epoch_index = 0
self.current_training_loss = None
self.loss_batch = None
self.model = None
self.parameter_count = 0
self.layer_sizes = []
self.loss_function = torch.nn.NLLLoss(reduction="none")
self.loss_function = extend(self.loss_function)
self.initial_lr = config.lr * torch.ones(
1, device=self.device, requires_grad=False
)
self.current_lr = config.lr * torch.ones(
1, device=self.device, requires_grad=False
)
# Adam parameters
self.beta1 = config.beta1
self.beta2 = config.beta2
self.m = 0
self.v = 0
self.epsilon = 1.0e-08
self.t = 0
self.step_count = torch.zeros(1, device=self.device, requires_grad=False)
self.prev_descent = None
self.learning_rate = 0.001
self.predictiveChangeVarDiscountedAverage = torch.zeros(
1, device=self.device, requires_grad=False
)
self.predictiveChangeVarUncertainty = torch.zeros(
1, device=self.device, requires_grad=False
)
self.lossVarDiscountedAverage = torch.zeros(
1, device=self.device, requires_grad=False
)
self.lossVarUncertainty = torch.zeros(
1, device=self.device, requires_grad=False
)
self.discount_factor = 0.9
self.firstOrderMomentum = torch.zeros(
1, device=self.device, requires_grad=False
)
self.secondOrderMomentum = torch.zeros(
1, device=self.device, requires_grad=False
)
self.writer = None
if "reward_function" in config.keys():
self.get_reward = config["reward_function"]
else:
self.get_reward = self.get_default_reward
if "state_method" in config.keys():
self.get_state = config["state_method"]
else:
self.get_state = self.get_default_state
def seed(self, seed=None):
"""
Set rng seed
Parameters
----------
seed:
seed for rng
"""
_, seed = seeding.np_random(seed)
if seed is not None:
torch.manual_seed(seed)
np.random.seed(seed)
return [seed]
def step(self, action):
"""
Execute environment step
Parameters
----------
action : list
action to execute
Returns
-------
np.array, float, bool, dict
state, reward, done, info
"""
done = super(SGDEnv, self).step_()
self.step_count += 1
index = 0
if not isinstance(action, float):
action = action[0]
action = torch.Tensor([action]).to(self.device)
new_lr = 10 ** (-action)
self.current_lr = new_lr
delta_w = torch.mul(
new_lr,
self.firstOrderMomentum
/ (torch.sqrt(self.secondOrderMomentum) + self.epsilon),
)
for i, p in enumerate(self.model.parameters()):
layer_size = self.layer_sizes[i]
p.data = p.data - delta_w[index: index + layer_size].reshape(
shape=p.data.shape
)
index += layer_size
self._set_zero_grad()
reward = self.get_reward(self)
return self.get_state(self), reward, done, {}
def reset(self):
"""
Reset environment
Returns
-------
np.array
Environment state
"""
super(SGDEnv, self).reset_()
dataset = self.instance[0]
instance_seed = self.instance[1]
construct_model = self.instance[2]
self.seed(instance_seed)
self.model = construct_model().to(self.device)
self.training_validation_ratio = 0.8
train_dataloader_args = {"batch_size": self.batch_size}
validation_dataloader_args = {"batch_size": self.validation_batch_size}
if self.use_cuda:
param = {"num_workers": 1, "pin_memory": True, "shuffle": True}
train_dataloader_args.update(param)
validation_dataloader_args.update(param)
if dataset == "MNIST":
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
train_dataset = datasets.MNIST(
"../data", train=True, download=True, transform=transform
)
# self.test_dataset = datasets.MNIST('../data', train=False, transform=transform)
else:
raise NotImplementedError
training_dataset_limit = math.floor(
len(train_dataset) * self.training_validation_ratio
)
validation_dataset_limit = len(train_dataset)
self.train_dataset = torch.utils.data.Subset(
train_dataset, range(0, training_dataset_limit - 1)
)
self.validation_dataset = torch.utils.data.Subset(
train_dataset, range(training_dataset_limit, validation_dataset_limit)
)
self.train_loader = torch.utils.data.DataLoader(
self.train_dataset, **train_dataloader_args
)
# self.test_loader = torch.utils.data.DataLoader(self.test_dataset, **train_dataloader_args)
self.validation_loader = torch.utils.data.DataLoader(
self.validation_dataset, **validation_dataloader_args
)
self.train_batch_index = 0
self.epoch_index = 0
self.train_loader_it = iter(self.train_loader)
self.validation_loader_it = iter(self.validation_loader)
self.parameter_count = 0
self.layer_sizes = []
for p in self.model.parameters():
layer_size = reduce(lambda x, y: x * y, p.shape)
self.layer_sizes.append(layer_size)
self.parameter_count += layer_size
self.model = extend(self.model)
self._set_zero_grad()
self.model.train()
self.current_training_loss = None
self.loss_batch = None
# Adam parameters
self.m = 0
self.v = 0
self.t = 0
self.step_count = torch.zeros(1, device=self.device, requires_grad=False)
self.current_lr = self.initial_lr
self.prev_descent = torch.zeros(
(self.parameter_count,), device=self.device, requires_grad=False
)
self.get_default_reward(self)
return self.get_state(self)
def set_writer(self, writer):
self.writer = writer
def close(self):
"""
No additional cleanup necessary
Returns
-------
bool
Cleanup flag
"""
return True
def render(self, mode: str = "human"):
"""
Render env in human mode
Parameters
----------
mode : str
Execution mode
"""
if mode != "human":
raise NotImplementedError
pass
def get_default_state(self, _):
"""
Gather state description
Returns
-------
dict
Environment state
"""
gradients = self._get_gradients()
self.firstOrderMomentum, self.secondOrderMomentum = self._get_momentum(
gradients
)
(
predictiveChangeVarDiscountedAverage,
predictiveChangeVarUncertainty,
) = self._get_predictive_change_features(
self.current_lr, self.firstOrderMomentum, self.secondOrderMomentum
)
lossVarDiscountedAverage, lossVarUncertainty = self._get_loss_features()
state = {
"predictiveChangeVarDiscountedAverage": predictiveChangeVarDiscountedAverage,
"predictiveChangeVarUncertainty": predictiveChangeVarUncertainty,
"lossVarDiscountedAverage": lossVarDiscountedAverage,
"lossVarUncertainty": lossVarUncertainty,
"currentLR": self.current_lr,
"trainingLoss": self.current_training_loss,
"validationLoss": self.current_validation_loss,
}
return state
def _set_zero_grad(self):
index = 0
for i, p in enumerate(self.model.parameters()):
if p.grad is None:
continue
layer_size = self.layer_sizes[i]
p.grad.zero_()
index += layer_size
def _train_batch_(self):
(data, target) = self.train_loader_it.next()
data, target = data.to(self.device), target.to(self.device)
self.current_batch_size = data.size()[0]
output = self.model(data)
loss = self.loss_function(output, target)
with backpack(BatchGrad()):
loss.mean().backward()
loss_value = loss.mean()
reward = self._get_validation_loss()
self.loss_batch = loss
self.current_training_loss = torch.unsqueeze(loss_value.detach(), dim=0)
self.train_batch_index += 1
return reward
def get_default_reward(self, _):
try:
reward = self._train_batch_()
except StopIteration:
self.train_batch_index = 0
self.epoch_index += 1
self.train_loader_it = iter(self.train_loader)
reward = self._train_batch_()
return reward
def _get_val_loss(self):
self.model.eval()
validation_loss = torch.zeros(1, device=self.device, requires_grad=False)
with torch.no_grad():
for data, target in self.validation_loader:
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
validation_loss += self.loss_function(output, target).mean()
validation_loss /= len(self.validation_loader.dataset)
self.model.train()
return validation_loss
def _get_validation_loss_(self):
self.model.eval()
(data, target) = self.validation_loader_it.next()
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
validation_loss = self.loss_function(output, target).mean()
validation_loss = torch.unsqueeze(validation_loss.detach(), dim=0)
self.current_validation_loss = validation_loss
self.model.train()
return -validation_loss.item() # negative because it is the reward
def _get_validation_loss(self):
try:
validation_loss = self._get_validation_loss_()
except StopIteration:
self.validation_loader_it = iter(self.validation_loader)
validation_loss = self._get_validation_loss_()
return validation_loss
def _get_gradients(self):
gradients = []
for p in self.model.parameters():
if p.grad is None:
continue
gradients.append(p.grad.flatten())
gradients = torch.cat(gradients, dim=0)
return gradients
def _get_momentum(self, gradients):
self.t += 1
self.m = self.beta1 * self.m + (1 - self.beta1) * gradients
self.v = self.beta2 * self.v + (1 - self.beta2) * torch.square(gradients)
bias_corrected_m = self.m / (1 - self.beta1 ** self.t)
bias_corrected_v = self.v / (1 - self.beta2 ** self.t)
return bias_corrected_m, bias_corrected_v
def _get_adam_feature(self, learning_rate, m, v):
epsilon = 1.0e-8
return torch.mul(learning_rate, m / (torch.sqrt(v) + epsilon))
def _get_loss_features(self):
with torch.no_grad():
loss_var = torch.log(torch.var(self.loss_batch))
self.lossVarDiscountedAverage = (
self.discount_factor * self.lossVarDiscountedAverage
+ (1 - self.discount_factor) * loss_var
)
self.lossVarUncertainty = (
self.discount_factor * self.lossVarUncertainty
+ (1 - self.discount_factor)
* (loss_var - self.lossVarDiscountedAverage) ** 2
)
return self.lossVarDiscountedAverage, self.lossVarUncertainty
def _get_predictive_change_features(self, lr, m, v):
batch_gradients = []
for i, (name, param) in enumerate(self.model.named_parameters()):
grad_batch = param.grad_batch.reshape(
self.current_batch_size, self.layer_sizes[i]
)
batch_gradients.append(grad_batch)
batch_gradients = torch.cat(batch_gradients, dim=1)
update_value = self._get_adam_feature(lr, m, v)
predictive_change = torch.log(
torch.var(-1 * torch.matmul(batch_gradients, update_value))
)
self.predictiveChangeVarDiscountedAverage = (
self.discount_factor * self.predictiveChangeVarDiscountedAverage
+ (1 - self.discount_factor) * predictive_change
)
self.predictiveChangeVarUncertainty = (
self.discount_factor * self.predictiveChangeVarUncertainty
+ (1 - self.discount_factor)
* (predictive_change - self.predictiveChangeVarDiscountedAverage) ** 2
)
return (
self.predictiveChangeVarDiscountedAverage,
self.predictiveChangeVarUncertainty,
)
| dacbench/envs/sgd.py | 14,795 | Environment to control the learning rate of adam
Initialize SGD Env
Parameters
-------
config : objdict
Environment configuration
No additional cleanup necessary
Returns
-------
bool
Cleanup flag
Gather state description
Returns
-------
dict
Environment state
Render env in human mode
Parameters
----------
mode : str
Execution mode
Reset environment
Returns
-------
np.array
Environment state
Set rng seed
Parameters
----------
seed:
seed for rng
Execute environment step
Parameters
----------
action : list
action to execute
Returns
-------
np.array, float, bool, dict
state, reward, done, info
self.test_dataset = None self.test_loader = None Adam parameters self.test_dataset = datasets.MNIST('../data', train=False, transform=transform) self.test_loader = torch.utils.data.DataLoader(self.test_dataset, **train_dataloader_args) Adam parameters negative because it is the reward | 924 | en | 0.297289 |
from __future__ import print_function
import sys
import os
import subprocess
from .simsalabim import __version__, __copyright__
from . import add_quant_info as quant
from . import helpers
def main(argv):
print('dinosaur-adapter version %s\n%s' % (__version__, __copyright__))
print('Issued command:', os.path.basename(__file__) + " " + " ".join(map(str, sys.argv[1:])))
args, params = parseArgs()
run_dinosaur(args.dinosaur_jar_path, args.mzml_fns, args.output_folder, args.spectrum_output_format, params)
def parseArgs():
import argparse
apars = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
requiredNamed = apars.add_argument_group('required arguments')
requiredNamed.add_argument('--dinosaur_jar_path', metavar = "JAR", required = True,
help='''Path to the Dinosaur .jar file.
''')
apars.add_argument('--mzml_fns', default=None, metavar = "M", nargs='*',
help='''mzML file(s). To easily specify multiple files one can use wildcards, e.g. my_spectrum_files/*.mzML
''')
apars.add_argument('--file_list_file', default=None, metavar = "L",
help='''Text file with paths to mzML files, one per line.
''')
apars.add_argument('--output_folder', default="./dinosaur/", metavar='O',
help='''Output folder.
''')
apars.add_argument('--dinosaur_mem', default=8.0, metavar='M', type=float,
help='''Memory for allocated for Dinosaur in GB.
''')
apars.add_argument('--dinosaur_flags', default="", metavar='O',
help='''Extra command line flags to pass to Dinosaur, as indicated in Dinosaur's help text.
''')
apars.add_argument('--spectrum_output_format', default=None, metavar='F',
help='''If you want updated spectrum files with the new MS1 features assigned to the MS2 spectra, set this to the desired output format (ms2, mgf or mzML).
''')
apars.add_argument('--split_precursors',
help='''for .mzML or .ms2 output this creates a new spectrum for each precursor, e.g.
if spectrum with scan number 132 matches two precursors, we generate two spectra
with scan numbers 13201 and 13202. This can be useful if your downstream
analysis includes tools that do not support multiple precursors per spectrum,
such as MSGF+. For MGF output this flag is always set, as it does not support
multiple precursors per spectrum.
''',
action='store_true')
# ------------------------------------------------
args = apars.parse_args()
if not args.mzml_fns:
if args.file_list_file and len(args.file_list_file) > 0:
with open(args.file_list_file, 'r') as f:
args.mzml_fns = list(filter(lambda x : len(x) > 0, map(lambda x : re.sub(r"[\n\t\s]*", "", x), f.read().splitlines())))
else:
sys.exit("No input mzML files specified. Use either --mzml_fns or --file_list_file.")
elif args.file_list_file and len(args.file_list_file) > 0:
sys.exit("Ambiguous mzML input. Use either --mzml_fns or --file_list_file, not both.")
params = dict()
params['splitPrecursors'] = args.split_precursors
params['dinosaurMemory'] = args.dinosaur_mem
params['dinosaurFlags'] = args.dinosaur_flags
return args, params
def run_dinosaur(dinosaur_jar_path, mzml_fns, output_folder, spectrum_output_format, params):
dinosaur_binary = "java -Xmx%dM -jar %s --seed=1" % (int(params['dinosaurMemory']*1000), dinosaur_jar_path)
helpers.createDir(output_folder)
for mzml_fn in mzml_fns:
baseFN = helpers.getBase(helpers.getFileName(mzml_fn))
dinosaur_output_file = os.path.join(output_folder, baseFN + ".features.tsv")
if not os.path.isfile(dinosaur_output_file):
cmd_dinosaur = "%s --force --outDir=%s %s %s;" % (dinosaur_binary, output_folder, params['dinosaurFlags'], mzml_fn)
helpers.executeCmd(cmd_dinosaur)
else:
print("Found dinosaur output file at %s, remove this file to re-run Dinosaur on this file" % (dinosaur_output_file))
output_fn = os.path.join(output_folder, baseFN + ".dummy.txt")
if spectrum_output_format:
output_fn = os.path.join(output_folder, baseFN + ".recalibrated." + spectrum_output_format)
params['specPrecMapFile'] = os.path.join(output_folder, baseFN + ".feature_map.tsv")
if not os.path.isfile(params['specPrecMapFile']):
quant.add_accurate_precursors(dinosaur_output_file, mzml_fn, output_fn, params)
if output_fn.endswith(".dummy.txt"):
os.remove(output_fn)
else:
print("Found dinosaur mapping file at %s, remove this file to re-run Dinosaur on this file" % (params['specPrecMapFile']))
if __name__ == '__main__':
main(sys.argv[1:])
| simsalabim/dinosaur_adapter.py | 5,225 | ------------------------------------------------ | 48 | en | 0.115767 |
# _*_ coding: utf-8 _*_
"""
util_urlfilter.py by xianhu
"""
import re
import pybloom_live
from .util_config import CONFIG_URLPATTERN_ALL
class UrlFilter(object):
"""
class of UrlFilter, to filter url by regexs and (bloomfilter or set)
"""
def __init__(self, black_patterns=(CONFIG_URLPATTERN_ALL,), white_patterns=(r"^http",), capacity=None):
"""
constructor, use variable of BloomFilter if capacity else variable of set
"""
self._re_black_list = [re.compile(pattern, flags=re.IGNORECASE) for pattern in black_patterns] if black_patterns else []
self._re_white_list = [re.compile(pattern, flags=re.IGNORECASE) for pattern in white_patterns] if white_patterns else []
self._url_set = set() if not capacity else None
self._bloom_filter = pybloom_live.ScalableBloomFilter(capacity, error_rate=0.001) if capacity else None
return
def update(self, url_list):
"""
update this urlfilter using url_list
"""
if self._url_set is not None:
self._url_set.update(url_list)
else:
for url in url_list:
self._bloom_filter.add(url)
return
def check(self, url):
"""
check the url based on self._re_black_list and self._re_white_list
"""
# if url in black_list, return False
for re_black in self._re_black_list:
if re_black.search(url):
return False
# if url in white_list, return True
for re_white in self._re_white_list:
if re_white.search(url):
return True
return False if self._re_white_list else True
def check_and_add(self, url):
"""
check the url to make sure that the url hasn't been fetched, and add url to urlfilter
"""
result = False
if self.check(url):
if self._url_set is not None:
result = url not in self._url_set
self._url_set.add(url)
else:
result = not self._bloom_filter.add(url)
return result
| spider/utilities/util_urlfilter.py | 2,125 | class of UrlFilter, to filter url by regexs and (bloomfilter or set)
constructor, use variable of BloomFilter if capacity else variable of set
check the url based on self._re_black_list and self._re_white_list
check the url to make sure that the url hasn't been fetched, and add url to urlfilter
update this urlfilter using url_list
util_urlfilter.py by xianhu
_*_ coding: utf-8 _*_ if url in black_list, return False if url in white_list, return True | 453 | en | 0.764258 |
#!/usr/bin/env python
'''command long'''
import threading
import time, os
import math
from pymavlink import mavutil
from MAVProxy.modules.lib import mp_module
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from threading import Thread
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
class CmdlongModule(mp_module.MPModule):
def __init__(self, mpstate):
super(CmdlongModule, self).__init__(mpstate, "cmdlong")
self.add_command('setspeed', self.cmd_do_change_speed, "do_change_speed")
self.add_command('setyaw', self.cmd_condition_yaw, "condition_yaw")
self.add_command('offboard', self.offboard_mode, "offboard")
self.add_command('p_mode', self.position_mode, "p_mode")
self.add_command('m_mode', self.manual_mode, "m_mode")
self.add_command('a_mode', self.altitude_mode, "a_mode")
self.add_command('takeoff2', self.cmd_takeoff_2, "takeoff2")
self.add_command('takeoff3', self.takeoff_3, "takeoff3")
self.add_command('music',self.music,"music")
self.add_command('land2', self.land_2, "land2")
self.add_command('fly', self.fly, "fly")
self.add_command('x', self.x, "x")
self.add_command('y', self.y, "y")
self.add_command('z', self.z, "z")
self.add_command('h', self.h, "h")
self.add_command('yaw', self.yaw, "yaw")
self.add_command('takeoff', self.cmd_takeoff, "takeoff")
self.add_command('velocity', self.cmd_velocity, "velocity")
self.add_command('position', self.cmd_position, "position")
self.add_command('st', self.start_position_thread, "start_position_thread")
self.add_command('attitude', self.cmd_attitude, "attitude")
self.add_command('cammsg', self.cmd_cammsg, "cammsg")
self.add_command('camctrlmsg', self.cmd_camctrlmsg, "camctrlmsg")
self.add_command('posvel', self.cmd_posvel, "posvel")
self.add_command('parachute', self.cmd_parachute, "parachute",
['<enable|disable|release>'])
self.add_command('long', self.cmd_long, "execute mavlink long command",
self.cmd_long_commands())
self.dis_max = 0
self.dis_min = 100
self.dis_diff = self.dis_max - self.dis_min
self.svo_x_max = 0
self.svo_x_min = 0
self.svo_y_max = 0
self.svo_y_min = 0
self.x_diff = self.svo_x_max - self.svo_x_min
self.y_diff = self.svo_y_max - self.svo_y_min
self.list_x = []
self.list_y = []
self.list_z = []
self.svo_x = 0
self.svo_y = 0
self.svo_z = 0
#thread_obj = Thread(target = self.show_svo_2d)
#thread_obj = Thread(target = self.show_svo)
#thread_obj.setDaemon(True)
#thread_obj.start()
def cmd_long_commands(self):
atts = dir(mavutil.mavlink)
atts = filter( lambda x : x.lower().startswith("mav_cmd"), atts)
ret = []
for att in atts:
ret.append(att)
ret.append(str(att[8:]))
return ret
def cmd_takeoff(self, args):
'''take off'''
if ( len(args) != 1):
print("Usage: takeoff ALTITUDE_IN_METERS")
return
if (len(args) == 1):
altitude = float(args[0])
print("Take Off started")
self.master.mav.command_long_send(
self.settings.target_system, # target_system
mavutil.mavlink.MAV_COMP_ID_SYSTEM_CONTROL, # target_component
mavutil.mavlink.MAV_CMD_NAV_TAKEOFF, # command
0, # confirmation
0, # param1
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
altitude) # param7
def cmd_parachute(self, args):
'''parachute control'''
usage = "Usage: parachute <enable|disable|release>"
if len(args) != 1:
print(usage)
return
cmds = {
'enable' : mavutil.mavlink.PARACHUTE_ENABLE,
'disable' : mavutil.mavlink.PARACHUTE_DISABLE,
'release' : mavutil.mavlink.PARACHUTE_RELEASE
}
if not args[0] in cmds:
print(usage)
return
cmd = cmds[args[0]]
self.master.mav.command_long_send(
self.settings.target_system, # target_system
0, # target_component
mavutil.mavlink.MAV_CMD_DO_PARACHUTE,
0,
cmd,
0, 0, 0, 0, 0, 0)
def cmd_camctrlmsg(self, args):
'''camctrlmsg'''
print("Sent DIGICAM_CONFIGURE CMD_LONG")
self.master.mav.command_long_send(
self.settings.target_system, # target_system
0, # target_component
mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONFIGURE, # command
0, # confirmation
10, # param1
20, # param2
30, # param3
40, # param4
50, # param5
60, # param6
70) # param7
def cmd_cammsg(self, args):
'''cammsg'''
print("Sent DIGICAM_CONTROL CMD_LONG")
self.master.mav.command_long_send(
self.settings.target_system, # target_system
0, # target_component
mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONTROL, # command
0, # confirmation
10, # param1
20, # param2
30, # param3
40, # param4
50, # param5
60, # param6
70) # param7
def cmd_do_change_speed(self, args):
'''speed value'''
if ( len(args) != 1):
print("Usage: speed SPEED_VALUE")
return
if (len(args) == 1):
speed = float(args[0])
print("SPEED %s" % (str(speed)))
self.master.mav.command_long_send(
self.settings.target_system, # target_system
mavutil.mavlink.MAV_COMP_ID_SYSTEM_CONTROL, # target_component
mavutil.mavlink.MAV_CMD_DO_CHANGE_SPEED, # command
0, # confirmation
0, # param1
speed, # param2 (Speed value)
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
def cmd_condition_yaw(self, args):
'''yaw angle angular_speed angle_mode'''
if ( len(args) != 3):
print("Usage: yaw ANGLE ANGULAR_SPEED MODE:[0 absolute / 1 relative]")
return
if (len(args) == 3):
angle = float(args[0])
angular_speed = float(args[1])
angle_mode = float(args[2])
print("ANGLE %s" % (str(angle)))
self.master.mav.command_long_send(
self.settings.target_system, # target_system
mavutil.mavlink.MAV_COMP_ID_SYSTEM_CONTROL, # target_component
mavutil.mavlink.MAV_CMD_CONDITION_YAW, # command
0, # confirmation
angle, # param1 (angle value)
angular_speed, # param2 (angular speed value)
0, # param3
angle_mode, # param4 (mode: 0->absolute / 1->relative)
0, # param5
0, # param6
0) # param7
def cmd_velocity(self, args):
'''velocity x-ms y-ms z-ms'''
if (len(args) != 3):
print("Usage: velocity x y z (m/s)")
return
if (len(args) == 3):
x_mps = float(args[0])
y_mps = float(args[1])
z_mps = float(args[2])
print("x:%f, y:%f, z:%f" % (x_mps, y_mps, z_mps))
self.master.mav.set_position_target_local_ned_send(
0, # system time in milliseconds
1, # target system
0, # target component
8, # coordinate frame MAV_FRAME_BODY_NED
455, # type mask (vel only)
0, 0, 0, # position x,y,z
x_mps, y_mps, z_mps, # velocity x,y,z
0, 0, 0, # accel x,y,z
0, 0) # yaw, yaw rate
def mavlink_packet(self, msg):
type = msg.get_type()
if type == 'DISTANCE_SENSOR':
#print "distance find\n"
#print isinstance(msg,subclass)
#print msg.current_distance
#print msg.__class__
#self.console.set_status('distance','distance %s' % msg.current_distance)
#print msg.current_distance
if self.dis_max < msg.current_distance:
self.dis_max = msg.current_distance
if self.dis_min > msg.current_distance:
self.dis_min = msg.current_distance
self.dis_diff = self.dis_max - self.dis_min
#self.msg.current_distance =
if type == 'SVO_POSITION_RAW':
#self.svo_x = msg.position_x
#self.svo_y = msg.position_y
#self.svo_z = msg.position_z
if self.svo_x_max < msg.position_x:
self.svo_x_max = msg.position_x
if self.svo_x_min > msg.position_x:
self.svo_x_min = msg.position_x
if self.svo_y_max < msg.position_y:
self.svo_y_max = msg.position_y
if self.svo_y_min > msg.position_y:
self.svo_y_min = msg.position_y
self.x_diff = self.svo_x_max - self.svo_x_min
self.y_diff = self.svo_y_max - self.svo_y_min
#print self.dis_max
#print self.dis_min
elif type == 'LOCAL_POSITION_NED':
self.console.set_status('position_ned_x','position_x %s' % msg.x)
self.svo_x = msg.x
#print type(self.svo_x)
#self.console.set_status('position_ned_y','position_y %s' % msg.y)
self.svo_y = msg.y
#print (svo_y)
#self.console.set_status('position_ned_z','position_ned %s' % msg.z)
self.svo_z = msg.z
def show_svo_2d(self):
fig = plt.figure()
#self.ax = p3.Axes3D(fig)
self.ax = fig.add_subplot(1, 1, 1)
num = 0
self.ax.set_xlabel('X')
self.ax.set_ylabel('Y')
self.ax.set_title('2D Test')
self.ax.set_xlim([-1, 1])
self.ax.set_ylim([-1, 1])
self.num = 0
#self.lineData = self.ax.scatter(1, 1, c = 'b', marker = '.')
self.lineData, = self.ax.plot([],[])
line_ani = animation.FuncAnimation(fig, self.update_lines_2d,self.Gen_RandLine_2d,
interval=100, blit=False)
plt.show()
def show_svo(self):
fig = plt.figure()
#self.ax = p3.Axes3D(fig)
self.ax = fig.add_subplot(1, 1, 1, projection="3d")
num = 0
self.ax.set_xlabel('X')
num = 0
self.ax.set_xlabel('X')
self.ax.set_xlim3d([-1.0, 1.0])
self.ax.set_ylabel('Y')
self.ax.set_ylim3d([-1.0, 1.0])
self.ax.set_zlabel('Z')
self.ax.set_zlim3d([-1.0, 1.0])
self.ax.set_title('3D Test')
self.num = 0
#line_ani = animation.FuncAnimation(fig, self.update_lines,self.Gen_RandLine,
# interval=10, blit=False)
self.lineData = self.ax.scatter([1], [1], [1], c = 'b', marker = '.')
line_ani = animation.FuncAnimation(fig, self.update_lines,self.Gen_RandLine,
interval=10, blit=False)
plt.show()
def data_stream(self):
pass
def Gen_RandLine_2d(self):
if len(self.list_x)<200:
self.list_x.append(self.svo_x)
self.list_y.append(self.svo_y)
self.list_z.append(self.svo_z)
else:
self.list_x.append(self.svo_x)
self.list_x = self.list_x[1:]
self.list_y.append(self.svo_y)
self.list_y = self.list_y[1:]
self.list_z.append(self.svo_z)
self.list_z = self.list_z[1:]
#for i in range(2):
#list_x = self.svo_x
#list_y = self.svo_y
self.list_x.append(float(self.svo_x))
self.list_y.append(float(self.svo_y))
#self.list_z.append(float(self.svo_z))
lineData = [self.list_x,self.list_y]
#lineData = [list_x,list_y]
#print type(list_x)
#print lineData
#time.sleep(0.02)
#self.ax.set_zlim(min(data[2]), max(data[2]))
#lineData = [self.list_x,self.list_y,self.list_z]
yield lineData
def update_lines_2d(self,data):
#print "data",data
#lineData = self.ax.scatter(data[0], data[1], data[2], c = 'b', marker = '.')
#self.lineData.set_data([(data[0], data[1])])
self.lineData.set_xdata(data[0])
self.lineData.set_ydata(data[1])
self.num = self.num + 1
#self.ax.set_xlim(min(data[0]), max(data[0]))
#self.ax.set_ylim(min(data[1]), max(data[1]))
if self.num == 100:
#self.ax.cla()
#print self.num
self.num = 0
self.ax.set_xlim(min(data[0])-1, max(data[0])+1)
self.ax.set_ylim(min(data[1])-1, max(data[1])+1)
return self.lineData,
def Gen_RandLine(self):
'''
if len(self.list_x)<70:
self.list_x.append(self.svo_x)
self.list_y.append(self.svo_y)
self.list_z.append(self.svo_z)
else:
self.list_x.append(self.svo_x)
self.list_x = self.list_x[1:]
self.list_y.append(self.svo_y)
self.list_y = self.list_y[1:]
self.list_z.append(self.svo_z)
self.list_z = self.list_z[1:]
'''
#for i in range(2):
list_x = self.svo_x
list_y = self.svo_y
list_z = self.svo_z
#self.list_x.append(float(self.svo_x))
#self.list_y.append(float(self.svo_y))
#self.list_z.append(float(self.svo_z))
#lineData = [self.list_x,self.list_y,self.list_z]
lineData = [[list_x],[list_y],[list_z]]
#print type(list_x)
#print lineData
#self.ax.set_xlim(min(data[0]), max(data[0]))
#self.ax.set_ylim(min(data[1]), max(data[1]))
#self.ax.set_zlim(min(data[2]), max(data[2]))
#lineData = [self.list_x,self.list_y,self.list_z]
yield lineData
def update_lines(self,data):
#print "data",data
#lineData = self.ax.scatter(data[0], data[1], data[2], c = 'b', marker = '.')
self.lineData.set_offsets([(data[0], data[1])])
#self.lineData.set_data([data[0], data[1]])
self.lineData.set_3d_properties([data[2]], "z")
self.num = self.num + 1
if self.num == 200:
#self.ax.cla()
#print self.num
self.num = 0
self.ax.set_xlabel('X')
#self.ax.set_xlim3d([-1.0, 1.0])
self.ax.set_ylabel('Y')
#self.ax.set_ylim3d([-1.0, 1.0])
self.ax.set_zlabel('Z')
#self.ax.set_zlim3d([-1.0, 1.0])
self.ax.set_title('3D Test')
print "xdiff",self.x_diff
print "ydiff",self.y_diff
#lineData = ax.scatter(data[0], data[1], data[2], c = 'b', marker = '.')
#plt.pause(0.01)
#ax = p3.Axes3D(fig)
return self.lineData
def position_mode(self,args):
print "position mode!!!!!!!!!!!!!!!!!"
self.list_x = []
self.list_y = []
self.list_z = []
#self.start_position_thread(1)
time.sleep(0.5)
#self.master.set_mode(221,6,0)
self.master.set_mode(129,3,0)
self.dis_max = 0
self.dis_min = 100
self.svo_x_max = 0
self.svo_x_min = 0
self.svo_y_max = 0
self.svo_y_min = 0
self.x_diff = self.svo_x_max - self.svo_x_min
self.y_diff = self.svo_y_max - self.svo_y_min
def manual_mode(self,args):
print "manual mode!!!!!!!!!!!!!!!!!"
print self.master.__class__
#self.start_position_thread(1)
#time.sleep(0.5)
#self.master.set_mode(221,6,0)
self.master.set_mode(129,1,0)
self.v_z = float(args[0])
def altitude_mode(self,args):
print "altitude mode!!!!!!!!!!!!!!!!!"
#self.start_position_thread(1)
#time.sleep(0.5)
#self.master.set_mode(221,6,0)
self.master.set_mode(129,2,0)
#self.v_z = float(370)
#self.dis_max = 0
#self.dis_min = 100
def offboard_mode(self,args):
print "offboard!!!!!!!!!!!!!!!!!"
#self.cmd_position_2(1)
self.start_offboard_thread(1)
time.sleep(0.5)
self.master.set_mode(221,6,0)
#self.master.set_mode(1,3,0)
def cmd_takeoff_2(self, args):
'''position z-m'''
if (len(args) != 1):
print("Usage: position z (meters)")
return
if (len(args) == 1):
# x_m = float(0)
# y_m = float(0)
z_m = float(args[0])
# print("x:%f, y:%f, z:%f" % (x_m, y_m, z_m))
self.master.mav.set_position_target_local_ned_send(
0, # system time in milliseconds
1, # target system
0, # target component
8, # coordinate frame MAV_FRAME_BODY_NED
5571, # type mask (pos only)
0, 0, z_m, # position x,y,z
0, 0, 0, # velocity x,y,z
0, 0, 0, # accel x,y,z
0, 0) # yaw, yaw rate
def takeoff_3(self,args):
self.type_mask = 5571
#self.type_mask = 3576
self.x_m = float(0)
self.y_m = float(0)
self.z_m = float(1.5)
self.v_x = float(0)
self.v_y = float(0)
self.v_z = float(0)
#self.cmd_position([1,1,1])
def music(self,args):
self.master.mav.command_long_send(
self.settings.target_system, # target_system
1, # target_component
0, # command
1, # confirmation
0, # param1
0, # param2 (Speed value)
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
print self.settings.target_system
print mavutil.mavlink.MAV_COMP_ID_SYSTEM_CONTROL
def land_2(self,args):
self.type_mask = 9671
self.v_x = float(0)
self.v_y = float(0)
self.v_z = float(0)
#def h(self,args):
# self.type_mask = 1479
# self.v_x = float(0)
# self.v_y = float(0)
# self.v_z = float(0)
def x(self,args):
#print self.master.flightmode
self.type_mask = 1479
if self.master.flightmode == "POSCTL":
#print self.master
self.v_x = float(args[0])*0.5
elif self.master.flightmode == "ALTCTL":
#print self.master
self.v_x = float(args[0])*1
elif self.master.flightmode == "MANUAL":
#print self.master
self.v_x = float(args[0])*1
#self.v_z = -4
self.button = 1
def y(self,args):
self.type_mask = 1479
if self.master.flightmode == "POSCTL":
self.v_y = float(args[0])*0.5
elif self.master.flightmode == "ALTCTL":
self.v_y = float(args[0])*1
elif self.master.flightmode == "MANUAL":
self.v_y = float(args[0])*1
#self.v_z = -4
self.button = 1
def z(self,args):
self.type_mask = 1479
#self.v_z = float(args[0])
if self.master.flightmode == "POSCTL":
self.v_z = self.v_z + int(args[0])
elif self.master.flightmode == "ALTCTL":
self.v_z = self.v_z + int(args[0])
elif self.master.flightmode == "MANUAL":
self.v_z = self.v_z + int(args[0])*0.1
self.button = 1
def yaw(self,args):
self.type_mask = 1479
#self.yaw_rate = float(float(args[0])*(math.pi/6.0))
self.yaw_rate = float(args[0])*1.5
self.button = 1
#time.sleep(0.5)
#self.yaw_rate = float(0)
def h(self,args):
self.type_mask = 1479
self.v_x = float(0)
self.v_y = float(0)
if self.master.flightmode == "POSCTL":
self.v_z = float(args[0])
elif self.master.flightmode == "ALTCTL":
self.v_z = float(args[0])
elif self.master.flightmode == "MANUAL":
pass
self.yaw_rate = float(0)
self.button = 0
def fly(self,args):
self.type_mask = 1479
self.v_x = float(1)
time.sleep(2)
self.v_x = float(0)
self.v_y = float(1)
time.sleep(2)
self.v_y = float(0)
self.v_x = float(-1)
time.sleep(2)
self.v_x = float(0)
self.v_y = float(-1)
time.sleep(2)
self.v_y = float(0)
def start_position_thread(self,args):
thread_obj = threading.Thread(target=self._cmd_position_2)
thread_obj.setDaemon(True)
thread_obj.start()
#pass
def start_offboard_thread(self,args):
thread_obj = threading.Thread(target=self._cmd_position_2_offboard)
thread_obj.start()
def _cmd_position_2_offboard(self):
'''position x-m y-m z-m'''
#if (len(args) != 3):
# print("Usage: position x y z (meters)")
# return
#if (len(args) == 3):
self.type_mask = 17863
self.x_m = float(0)
self.y_m = float(0)
self.z_m = float(0)
self.v_x = float(0)
self.v_y = float(0)
self.v_z = float(0)
self.yaw_rate = float(0)
#print("x:%f, y:%f, z:%f" % (x_m, y_m, z_m))
while 1:
time.sleep(0.05)
#print "type_mask:%s\n" % self.type_mask
#print "v_x:%s\n" % self.v_x
#print "v_y:%s\n" % self.v_y
#print "v_z:%s\n" % self.v_z
#print "z_m:%s\n" % self.z_m
#print "send idle"
self.master.mav.set_position_target_local_ned_send(
0, # system time in milliseconds
1, # target system
0, # target component
8, # coordinate frame MAV_FRAME_BODY_NED
self.type_mask, # type mask (pos only) 42707
self.x_m, self.y_m, self.z_m, # position x,y,z
self.v_x, self.v_y, self.v_z, # velocity x,y,z
0, 0, 0, # accel x,y,z
0, self.yaw_rate) # yaw, yaw rate
def _cmd_position_2(self):
print "position2"
'''position x-m y-m z-m'''
#if (len(args) != 3):
# print("Usage: position x y z (meters)")
# return
#if (len(args) == 3):
#self.type_mask = 17863
#self.x_m = float(0)
#self.y_m = float(0)
#self.z_m = float(0)
self.v_x = 0
self.v_y = 0
self.v_z = 0
self.yaw_rate = 0
self.button = 0
#print("x:%f, y:%f, z:%f" % (x_m, y_m, z_m))
i = 0
while 1:
time.sleep(0.05)
#print "type_mask:%s\n" % self.type_mask
#print "v_x:%s\n" % self.v_x
#print "v_y:%s\n" % self.v_y
#print "v_z:%s\n" % self.v_z
#print "z_m:%s\n" % self.z_m
#print "send idle"
self.master.mav.manual_control_send(self.master.target_system,
self.v_x, self.v_y,
self.v_z, self.yaw_rate,
self.button)
i = i + 1
if 0:
#if i == 100:
print "x",(int(self.v_x))
print "y",(int(self.v_y))
print "z",(int(self.v_z))
print "yaw",(int(self.yaw_rate))
print "dis_diff",(self.dis_diff)
print "x_diff",(self.x_diff)
print "y_diff",(self.y_diff)
print "button",self.button
print "target",(self.master.target_system)
i = 0
def cmd_position3(self, args):
'''position x-m y-m z-m'''
if (len(args) != 3):
print("Usage: position x y z (meters)")
return
if (len(args) == 3):
x_m = float(args[0])
y_m = float(args[1])
z_m = float(args[2])
print("x:%f, y:%f, z:%f" % (x_m, y_m, z_m))
self.master.mav.set_position_target_local_ned_send(
0, # system time in milliseconds
1, # target system
0, # target component
8, # coordinate frame MAV_FRAME_BODY_NED
1479, # type mask (pos only)
0, 0, 0,# position x,y,z
x_m, y_m, z_m, # velocity x,y,z
0, 0, 0, # accel x,y,z
0, 0) # yaw, yaw rate
def cmd_position(self, args):
'''position x-m y-m z-m'''
if (len(args) != 3):
print("Usage: position x y z (meters)")
return
if (len(args) == 3):
x_m = float(args[0])
y_m = float(args[1])
z_m = float(args[2])
print("x:%f, y:%f, z:%f" % (x_m, y_m, z_m))
self.master.mav.set_position_target_local_ned_send(
0, # system time in milliseconds
1, # target system
0, # target component
8, # coordinate frame MAV_FRAME_BODY_NED
3576, # type mask (pos only)
x_m, y_m, z_m, # position x,y,z
0, 0, 0, # velocity x,y,z
0, 0, 0, # accel x,y,z
0, 0) # yaw, yaw rate
def cmd_attitude(self, args):
'''attitude q0 q1 q2 q3 thrust'''
if len(args) != 5:
print("Usage: attitude q0 q1 q2 q3 thrust (0~1)")
return
if len(args) == 5:
q0 = float(args[0])
q1 = float(args[1])
q2 = float(args[2])
q3 = float(args[3])
thrust = float(args[4])
att_target = [q0, q1, q2, q3]
print("q0:%.3f, q1:%.3f, q2:%.3f q3:%.3f thrust:%.2f" % (q0, q1, q2, q3, thrust))
self.master.mav.set_attitude_target_send(
0, # system time in milliseconds
1, # target system
0, # target component
63, # type mask (ignore all except attitude + thrust)
att_target, # quaternion attitude
0, # body roll rate
0, # body pich rate
0, # body yaw rate
thrust) # thrust
def cmd_posvel(self, args):
'''posvel mapclick vN vE vD'''
ignoremask = 511
latlon = None
try:
latlon = self.module('map').click_position
except Exception:
pass
if latlon is None:
print "set latlon to zeros"
latlon = [0, 0]
else:
ignoremask = ignoremask & 504
print "found latlon", ignoremask
vN = 0
vE = 0
vD = 0
if (len(args) == 3):
vN = float(args[0])
vE = float(args[1])
vD = float(args[2])
ignoremask = ignoremask & 455
print "ignoremask",ignoremask
print latlon
self.master.mav.set_position_target_global_int_send(
0, # system time in ms
1, # target system
0, # target component
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT,
ignoremask, # ignore
int(latlon[0] * 1e7),
int(latlon[1] * 1e7),
10,
vN, vE, vD, # velocity
0, 0, 0, # accel x,y,z
0, 0) # yaw, yaw rate
def cmd_long(self, args):
'''execute supplied command long'''
if len(args) < 1:
print("Usage: long <command> [arg1] [arg2]...")
return
command = None
if args[0].isdigit():
command = int(args[0])
else:
try:
command = eval("mavutil.mavlink." + args[0])
except AttributeError as e:
try:
command = eval("mavutil.mavlink.MAV_CMD_" + args[0])
except AttributeError as e:
pass
if command is None:
print("Unknown command long ({0})".format(args[0]))
return
floating_args = [ float(x) for x in args[1:] ]
while len(floating_args) < 7:
floating_args.append(float(0))
self.master.mav.command_long_send(self.settings.target_system,
self.settings.target_component,
command,
0,
*floating_args)
def init(mpstate):
'''initialise module'''
return CmdlongModule(mpstate)
| pycalc/MAVProxy/modules/mavproxy_cmdlong.py | 30,985 | !/usr/bin/env pythonthread_obj = Thread(target = self.show_svo_2d)thread_obj = Thread(target = self.show_svo)thread_obj.setDaemon(True)thread_obj.start() target_system target_component command confirmation param1 param2 param3 param4 param5 param6 param7 target_system target_component target_system target_component command confirmation param1 param2 param3 param4 param5 param6 param7 target_system target_component command confirmation param1 param2 param3 param4 param5 param6 param7 target_system target_component command confirmation param1 param2 (Speed value) param3 param4 param5 param6 param7 target_system target_component command confirmation param1 (angle value) param2 (angular speed value) param3 param4 (mode: 0->absolute / 1->relative) param5 param6 param7 system time in milliseconds target system target component coordinate frame MAV_FRAME_BODY_NED type mask (vel only) position x,y,z velocity x,y,z accel x,y,z yaw, yaw rateprint "distance find\n"print isinstance(msg,subclass)print msg.current_distanceprint msg.__class__self.console.set_status('distance','distance %s' % msg.current_distance)print msg.current_distanceself.msg.current_distance = self.svo_x = msg.position_xself.svo_y = msg.position_yself.svo_z = msg.position_zprint self.dis_maxprint self.dis_minprint type(self.svo_x)self.console.set_status('position_ned_y','position_y %s' % msg.y)print (svo_y)self.console.set_status('position_ned_z','position_ned %s' % msg.z)self.ax = p3.Axes3D(fig)self.lineData = self.ax.scatter(1, 1, c = 'b', marker = '.')self.ax = p3.Axes3D(fig)line_ani = animation.FuncAnimation(fig, self.update_lines,self.Gen_RandLine, interval=10, blit=False)for i in range(2):list_x = self.svo_xlist_y = self.svo_yself.list_z.append(float(self.svo_z))lineData = [list_x,list_y]print type(list_x)print lineDatatime.sleep(0.02)self.ax.set_zlim(min(data[2]), max(data[2]))lineData = [self.list_x,self.list_y,self.list_z]print "data",datalineData = self.ax.scatter(data[0], data[1], data[2], c = 'b', marker = '.')self.lineData.set_data([(data[0], data[1])])self.ax.set_xlim(min(data[0]), max(data[0]))self.ax.set_ylim(min(data[1]), max(data[1]))self.ax.cla()print self.numfor i in range(2):self.list_x.append(float(self.svo_x))self.list_y.append(float(self.svo_y))self.list_z.append(float(self.svo_z))lineData = [self.list_x,self.list_y,self.list_z]print type(list_x)print lineDataself.ax.set_xlim(min(data[0]), max(data[0]))self.ax.set_ylim(min(data[1]), max(data[1]))self.ax.set_zlim(min(data[2]), max(data[2]))lineData = [self.list_x,self.list_y,self.list_z]print "data",datalineData = self.ax.scatter(data[0], data[1], data[2], c = 'b', marker = '.')self.lineData.set_data([data[0], data[1]])self.ax.cla()print self.numself.ax.set_xlim3d([-1.0, 1.0])self.ax.set_ylim3d([-1.0, 1.0])self.ax.set_zlim3d([-1.0, 1.0])lineData = ax.scatter(data[0], data[1], data[2], c = 'b', marker = '.')plt.pause(0.01)ax = p3.Axes3D(fig)self.start_position_thread(1)self.master.set_mode(221,6,0)self.start_position_thread(1)time.sleep(0.5)self.master.set_mode(221,6,0)self.start_position_thread(1)time.sleep(0.5)self.master.set_mode(221,6,0)self.v_z = float(370)self.dis_max = 0self.dis_min = 100self.cmd_position_2(1)self.master.set_mode(1,3,0) x_m = float(0) y_m = float(0) print("x:%f, y:%f, z:%f" % (x_m, y_m, z_m)) system time in milliseconds target system target component coordinate frame MAV_FRAME_BODY_NED type mask (pos only) position x,y,z velocity x,y,z accel x,y,z yaw, yaw rateself.type_mask = 3576self.cmd_position([1,1,1]) target_system target_component command confirmation param1 param2 (Speed value) param3 param4 param5 param6 param7def h(self,args): self.type_mask = 1479 self.v_x = float(0) self.v_y = float(0) self.v_z = float(0)print self.master.flightmodeprint self.masterprint self.masterprint self.masterself.v_z = -4self.v_z = -4self.v_z = float(args[0])self.yaw_rate = float(float(args[0])*(math.pi/6.0))time.sleep(0.5)self.yaw_rate = float(0)passif (len(args) != 3): print("Usage: position x y z (meters)") returnif (len(args) == 3):print("x:%f, y:%f, z:%f" % (x_m, y_m, z_m))print "type_mask:%s\n" % self.type_maskprint "v_x:%s\n" % self.v_xprint "v_y:%s\n" % self.v_yprint "v_z:%s\n" % self.v_zprint "z_m:%s\n" % self.z_mprint "send idle" system time in milliseconds target system target component coordinate frame MAV_FRAME_BODY_NED type mask (pos only) 42707 position x,y,z velocity x,y,z accel x,y,z yaw, yaw rateif (len(args) != 3): print("Usage: position x y z (meters)") returnif (len(args) == 3):self.type_mask = 17863self.x_m = float(0)self.y_m = float(0)self.z_m = float(0)print("x:%f, y:%f, z:%f" % (x_m, y_m, z_m))print "type_mask:%s\n" % self.type_maskprint "v_x:%s\n" % self.v_xprint "v_y:%s\n" % self.v_yprint "v_z:%s\n" % self.v_zprint "z_m:%s\n" % self.z_mprint "send idle"if i == 100: system time in milliseconds target system target component coordinate frame MAV_FRAME_BODY_NED type mask (pos only) position x,y,z velocity x,y,z accel x,y,z yaw, yaw rate system time in milliseconds target system target component coordinate frame MAV_FRAME_BODY_NED type mask (pos only) position x,y,z velocity x,y,z accel x,y,z yaw, yaw rate system time in milliseconds target system target component type mask (ignore all except attitude + thrust) quaternion attitude body roll rate body pich rate body yaw rate thrust system time in ms target system target component ignore velocity accel x,y,z yaw, yaw rate | 5,499 | en | 0.260864 |
""" This module is intended to extend functionality of the code provided by original authors.
The process is as follows:
1. User has to provide source root path containing (possibly nested) folders with dicom files
2. The program will recreate the structure in the destination root path and anonymize all
dicom files.
"""
import argparse
import json
import logging
import logging.config
import random
from pathlib import Path
from typing import Optional
import pydicom
from dicomanonymizer.anonym_state import AnonState
from dicomanonymizer.dicom_utils import fix_exposure
from dicomanonymizer.simpledicomanonymizer import (
anonymize_dicom_file,
initialize_actions,
)
from dicomanonymizer.utils import (
LOGS_PATH,
PROJ_ROOT,
ActionsDict,
Path_Str,
get_dirs,
to_Path,
try_valid_dir,
)
# setup logging (create dirs, if it is first time)
LOGS_PATH.mkdir(parents=True, exist_ok=True)
logging.config.fileConfig(
PROJ_ROOT / "dicomanonymizer/config/logging.ini",
defaults={"logfilename": (LOGS_PATH / "file.log").as_posix()},
disable_existing_loggers=False,
)
logger = logging.getLogger(__name__)
_STATE_PATH = Path.home() / ".dicomanonymizer/cache"
_STATE_PATH.mkdir(parents=True, exist_ok=True)
def get_extra_rules(
use_extra: bool,
extra_json_path: Path_Str,
) -> Optional[ActionsDict]:
"""Helper to provide custom (project level/user level) anonymization
rules as a mapping of tags -> action function.
Args:
use_extra (bool): If use extra rules.
extra_json_path (Path_Str): Path to extra rules json file.
It should be flat json with action as a key and list of tags as value.
Returns:
Optional[ActionsDict]: extra rules mapping (tags -> action function)
"""
# Define the actions dict for additional tags (customization)
extra_rules = None
if use_extra:
# default or user provided path to extra rules json file
with open(extra_json_path, "r") as fout:
extra_rules = json.load(fout)
for key in extra_rules:
tag_list = extra_rules[key]
tag_list = [tuple(elem) for elem in tag_list]
extra_rules[key] = tag_list
extra_rules = initialize_actions(extra_rules)
return extra_rules
def anonymize_dicom_folder(
in_path: Path_Str, out_path: Path_Str, debug: bool = False, **kwargs
):
"""Anonymize dicom files in `in_path`, if `in_path` doesn't
contain dicom files, will do nothing. Debug == True will do
sort of dry run to check if all good for the large data storages
Args:
in_path (Path_Str): path to the folder containing dicom files
out_path (Path_Str): path to the folder there anonymized copies
will be saved
debuf (bool): if true, will do a "dry" run
"""
# check and prepare
in_path = to_Path(in_path)
try_valid_dir(in_path)
out_path = to_Path(out_path)
out_path.mkdir(parents=True, exist_ok=True)
logger.info(f"Processing: {in_path}")
# work itself
in_files = [p for p in in_path.iterdir() if p.is_file()]
if not in_files:
logger.info(f"Folder {in_path} doesn't have dicom files, skip.")
return
if debug:
# anonymize just one file
f_in = random.choice(in_files)
f_out = out_path / f_in.name
try:
anonymize_dicom_file(f_in, f_out)
except Exception as e:
logger.info(f_in)
logger.exception(e)
raise e
else:
for f_in in in_files:
f_out = out_path / f_in.name
try:
anonymize_dicom_file(f_in, f_out, **kwargs)
except Exception as e:
logger.info(f_in)
logger.exception(e)
raise e
def anonymize_root_folder(
in_root: Path_Str,
out_root: Path_Str,
**kwargs,
):
"""The fuction will get all nested folders from `in_root`
and perform anonymization of all folders containg dicom-files
Will recreate the `in_root` folders structure in the `out_root`
Args:
in_root (Path_Str): source root folder (presumably has
some dicom-files inide, maybe nested)
out_root (Path_Str): destination root folder, will create
if not exists
"""
in_root = to_Path(in_root)
try_valid_dir(in_root)
out_root = to_Path(out_root)
out_root.mkdir(parents=True, exist_ok=True)
in_dirs = get_dirs(in_root)
state = AnonState(_STATE_PATH)
state.init_state()
state.load_state()
def get_tags_callback(dataset: pydicom.Dataset):
state.tag_counter.update(dataset.dir())
logger.info(
"Processed paths will be added to the cache, if cache exist and has some paths included, they will be skipped"
)
logger.info(
f"if, you need to process data again delete files {_STATE_PATH}, please"
)
# will try to process all folders, if exception will dump state before raising
try:
for in_d in in_dirs:
rel_path = in_d.relative_to(in_root)
if str(rel_path) in state.visited_folders:
logger.info(f"{in_d} path is in cache, skipping")
continue
else:
out_d = out_root / rel_path
anonymize_dicom_folder(
in_d, out_d, ds_callback=get_tags_callback, **kwargs
)
# update state
state.visited_folders[str(rel_path)] = True
except Exception as e:
raise e
finally:
# before saving updated state let's flag tags not seen previously
prev_state = AnonState(_STATE_PATH)
prev_state.init_state()
prev_state.load_state()
new_tags = set(state.tag_counter.keys()).difference(
prev_state.tag_counter.keys()
)
if new_tags:
logger.warning(
f"During the anonymization new tags: {new_tags} were present"
)
else:
logger.info("No new tags werer present")
# now we can save the current state
state.save_state()
# Add CLI args
parser = argparse.ArgumentParser(description="Batch dicom-anonymization CLI")
parser.add_argument(
"--type",
type=str,
choices=["batch", "folder"],
default="batch",
help="Process only one folder - folder or all nested folders - batch, default = batch",
)
parser.add_argument(
"--extra-rules",
default="",
help="Path to json file defining extra rules for additional tags. Defalult in project.",
)
parser.add_argument(
"--no-extra",
action="store_true",
help="Only use a rules from DICOM-standard basic de-id profile",
)
parser.add_argument(
"--debug", action="store_true", help="Will do a dry run (one file per folder)"
)
parser.add_argument(
"src",
type=str,
help="Absolute path to the folder containing dicom-files or nested folders with dicom-files",
)
parser.add_argument(
"dst",
type=str,
help="Absolute path to the folder where to save anonymized copy of src",
)
def main():
# parse args
args = parser.parse_args()
in_path = Path(args.src)
out_path = Path(args.dst)
debug = args.debug
path = args.extra_rules
if not path:
path = PROJ_ROOT / "dicomanonymizer/resources/extra_rules.json"
extra_rules = get_extra_rules(use_extra=not args.no_extra, extra_json_path=path)
# fix known issue with dicom
fix_exposure()
msg = f"""
Start a job: {args.type}, debug set to {args.debug}
Will anonymize data at: {in_path} and save to {out_path}
"""
logger.info(msg)
# anonymize
if args.type == "batch":
anonymize_root_folder(
in_path, out_path, debug=debug, extra_anonymization_rules=extra_rules
)
elif args.type == "folder":
anonymize_dicom_folder(
in_path, out_path, debug=debug, extra_anonymization_rules=extra_rules
)
logger.info("Well done!")
if __name__ == "__main__":
main()
| dicomanonymizer/batch_anonymizer.py | 8,067 | Anonymize dicom files in `in_path`, if `in_path` doesn't
contain dicom files, will do nothing. Debug == True will do
sort of dry run to check if all good for the large data storages
Args:
in_path (Path_Str): path to the folder containing dicom files
out_path (Path_Str): path to the folder there anonymized copies
will be saved
debuf (bool): if true, will do a "dry" run
The fuction will get all nested folders from `in_root`
and perform anonymization of all folders containg dicom-files
Will recreate the `in_root` folders structure in the `out_root`
Args:
in_root (Path_Str): source root folder (presumably has
some dicom-files inide, maybe nested)
out_root (Path_Str): destination root folder, will create
if not exists
Helper to provide custom (project level/user level) anonymization
rules as a mapping of tags -> action function.
Args:
use_extra (bool): If use extra rules.
extra_json_path (Path_Str): Path to extra rules json file.
It should be flat json with action as a key and list of tags as value.
Returns:
Optional[ActionsDict]: extra rules mapping (tags -> action function)
This module is intended to extend functionality of the code provided by original authors.
The process is as follows:
1. User has to provide source root path containing (possibly nested) folders with dicom files
2. The program will recreate the structure in the destination root path and anonymize all
dicom files.
setup logging (create dirs, if it is first time) Define the actions dict for additional tags (customization) default or user provided path to extra rules json file check and prepare work itself anonymize just one file will try to process all folders, if exception will dump state before raising update state before saving updated state let's flag tags not seen previously now we can save the current state Add CLI args parse args fix known issue with dicom anonymize | 1,922 | en | 0.802947 |
import pickle
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('Train.csv')
# check for categorical attributes
cat_col = []
for x in df.dtypes.index:
if df.dtypes[x] == 'object':
cat_col.append(x)
cat_col.remove('Item_Identifier')
cat_col.remove('Outlet_Identifier')
item_weight_mean = df.pivot_table(values = "Item_Weight", index = 'Item_Identifier')
miss_bool = df['Item_Weight'].isnull()
for i, item in enumerate(df['Item_Identifier']):
if miss_bool[i]:
if item in item_weight_mean:
df['Item_Weight'][i] = item_weight_mean.loc[item]['Item_Weight']
else:
df['Item_Weight'][i] = np.mean(df['Item_Weight'])
outlet_size_mode = df.pivot_table(values='Outlet_Size', columns='Outlet_Type', aggfunc=(lambda x: x.mode()[0]))
miss_bool = df['Outlet_Size'].isnull()
df.loc[miss_bool, 'Outlet_Size'] = df.loc[miss_bool, 'Outlet_Type'].apply(lambda x: outlet_size_mode[x])
# replace zeros with mean
df.loc[:, 'Item_Visibility'].replace([0], [df['Item_Visibility'].mean()], inplace=True)
# combine item fat content
df['Item_Fat_Content'] = df['Item_Fat_Content'].replace({'LF':'Low Fat', 'reg':'Regular', 'low fat':'Low Fat'})
df['Item_Fat_Content'].value_counts()
#Creation of New Attributes
df['New_Item_Type'] = df['Item_Identifier'].apply(lambda x: x[:2])
df['New_Item_Type'] = df['New_Item_Type'].map({'FD':'Food', 'NC':'Non-Consumable', 'DR':'Drinks'})
df.loc[df['New_Item_Type']=='Non-Consumable', 'Item_Fat_Content'] = 'Non-Edible'
# create small values for establishment year
df['Outlet_Years'] = 2013 - df['Outlet_Establishment_Year']
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
df['Outlet'] = le.fit_transform(df['Outlet_Identifier'])
cat_col = ['Item_Fat_Content', 'Item_Type', 'Outlet_Size', 'Outlet_Location_Type', 'Outlet_Type', 'New_Item_Type']
for col in cat_col:
df[col] = le.fit_transform(df[col])
#Input Split
X = df.drop(columns=['Outlet_Establishment_Year', 'Item_Identifier', 'Outlet_Identifier', 'Item_Outlet_Sales'])
Y = df['Item_Outlet_Sales']
#Model Training
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error
def train(model, X, Y):
# train the model
model.fit(X, Y)
# predict the training set
pred = model.predict(X)
# perform cross-validation
cv_score = cross_val_score(model, X, Y, scoring='neg_mean_squared_error', cv=5)
cv_score = np.abs(np.mean(cv_score))
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor()
train(model, X, Y)
coef = pd.Series(model.feature_importances_, X.columns).sort_values(ascending=False)
file = open('model.pkl','wb')
#dump information to that file
pickle.dump(model, file) | bigmart.py | 2,911 | check for categorical attributes replace zeros with mean combine item fat contentCreation of New Attributes create small values for establishment yearInput SplitModel Training train the model predict the training set perform cross-validationdump information to that file | 270 | en | 0.748699 |
"""State-Split Transformation
-----------------------------
(C) Frank-Rene Schaefer
The 'State-Split' is a procedure transforms a state machine that triggers on
some 'pure' values (e.g. Unicode Characters) into a state machine that triggers
on the code unit sequences (e.g. UTF8 Code Units) that correspond to the
original values. For example, a state transition on a Unicode Character
'0x1329D' as shown below,
[ A ]--->( 0x1329D )---->[ B ]
is translated into a sequence of UTF16 transitions with a new intermediate
state 'i' as follows.
[ A ]--( 0xD80C )-->[ i ]-->( 0xDE9E )-->[ B ]
This is so, since the character 0x1329D in Unicode is represented as the
sequence 0xD80C, 0xDE9E. The present algorithm exploits the fact that
translations of adjacent character result in sequences of adjacent intervals.
.----------------------------------------------------------------------------.
| This procedure is to be used for encodings of dynamic size, i.e. where the |
| number of code units to represent a 'pure' value changes depending on the |
| value itself (e.g. UTF8, UTF16). |
'----------------------------------------------------------------------------'
PRINCIPLE:
A state transition is described by a 'trigger set' and a target state. If an
input occurs that belongs to the 'trigger set' the state machine transits into
the specific target state. Trigger sets are composed of one ore more intervals
of adjacent values. If the encoding has some type of continuity, it can be
assumed that an interval in the pure values can be represented by a sequence of
intervals in the transformed state machine. This is, indeed true for the
encodings UTF8 and UTF16.
The algorithm below considers intervals of pure values and translates them
into interval sequences. All interval sequences of a triggger set that
triggers to a target state are then combined into a set of state transitions.
A unicode transition from state A to state B:
[ A ]-->(x0, x1)-->[ B ]
is translated into a chain of utf8-byte sequence transitions that might look
like this
[ A ]-->(b0)-->[ 1 ]-->(c0,c1)-->[ B ]
\ /
`->(d1)-->[ 2 ]---(e0,e1)---'
That means that intermediate states may be introduced to reflect the different
byte sequences that represent the original interval.
IDEAS:
In a simple approach one would translate each element of a interval into an
utf8-byte sequence and generate state transitions between A and B. Such an
approach, however, produces a huge computational overhead and charges the later
Hopcroft Minimization with a huge state machine.
To avoid such an hughe computational effort, the Hopcroft Minimzation can be
prepared on the basis of transition intervals.
(A) Backwards: In somewhat greater intervals, the following might occur:
.-->(d1)-->[ 1 ]---(A3,BF)---.
/ \
/ ,->(d1)-->[ 2 ]---(80,BF)--. \
/ / \ \
[ A ]-->(b0)-->[ 3 ]-->(80,BF)-->[ B ]
\ /
`->(d1)-->[ 4 ]---(80,81)---'
That means, that for states 2 and 3 the last transition is on [80, BF]
to state B. Thus, the intermediate states 2 and 3 are equivalent. Both
can be replaced by a single state.
(B) Forwards: The first couple of bytes in the correspondent utf8 sequences
might be the same. Then, no branch is required until the first differing
byte.
PROCESS:
(1) The original interval translated into a list of interval sequence
that represent the values in the target encoding.
(2) The interval sequences are plugged in between the state A and B
of the state machine.
"""
from quex.engine.state_machine.state.core import DFA_State
import quex.engine.state_machine.transformation.base as base
import quex.engine.state_machine.index as state_machine_index
from quex.engine.misc.interval_handling import NumberSet
from quex.engine.misc.tools import flatten_list_of_lists
from collections import defaultdict
class EncodingTrafoBySplit(base.EncodingTrafo):
"""Transformation that takes a lexatom and produces a lexatom sequence.
"""
def __init__(self, Name, ErrorRangeByCodeUnitDb):
base.EncodingTrafo.__init__(self, Name,
NumberSet.from_range(0, 0x110000),
ErrorRangeByCodeUnitDb)
def do_transition(self, from_target_map, FromSi, ToSi, BadLexatomSi):
"""Translates to transition 'FromSi' --> 'ToSi' inside the state
machine according to the specific coding (see derived class, i.e.
UTF8 or UTF16).
'BadLexatomSi' is None => no bad lexatom detection.
else, transitions to 'bad lexatom state' are added
on invalid code units.
RETURNS: [0] True if complete, False else.
[1] StateDb of newly generated states.
"""
number_set = from_target_map[ToSi]
# Check whether a modification is necessary
if number_set.least_greater_bound() <= self.UnchangedRange:
# 'UnchangedRange' => No change to numerical values.
return True, None
if not self.cut_forbidden_range(number_set):
# 'number_set' solely contains forbidden elements.
del from_target_map[ToSi]
return False, None
transformed_interval_sequence_list = flatten_list_of_lists(
self.get_interval_sequences(interval)
for interval in number_set.get_intervals(PromiseToTreatWellF=True)
)
# Second, enter the new transitions.
new_target_map, \
new_state_db = self.plug_interval_sequences(FromSi, ToSi,
transformed_interval_sequence_list,
BadLexatomSi)
# Absorb new transitions into the target map of the 'from state'.
del from_target_map[ToSi]
from_target_map.update(new_target_map)
return True, new_state_db
def _do_single(self, Code):
number_set = NumberSet.from_range(Code, Code+1)
if number_set.is_empty():
return -1
interval_list = number_set.get_intervals(PromiseToTreatWellF=True)
assert len(interval_list) == 1
interval_sequence_list = self.get_interval_sequences(interval_list[0])
# A single code element can only produce a single interval sequence!
assert len(interval_sequence_list) == 1
assert all(x.size() == 1 for x in interval_sequence_list[0])
return [x.begin for x in interval_sequence_list[0]]
def variable_character_sizes_f(self):
return True
def lexatom_n_per_character_in_state_machine(self, SM):
lexatom_n = None
for state in SM.states.itervalues():
for number_set in state.target_map.get_map().itervalues():
candidate_lexatom_n = self.lexatom_n_per_character(number_set)
if candidate_lexatom_n is None: return None
elif lexatom_n is None: lexatom_n = candidate_lexatom_n
elif lexatom_n != candidate_lexatom_n: return None
return lexatom_n
def hopcroft_minimization_always_makes_sense(self):
return True
def plug_interval_sequences(self, FromSi, ToSi, IntervalSequenceList,
BadLexatomSi):
"""Transform the list of interval sequences into intermediate state
transitions.
'BadLexatomSi' is None => no bad lexatom detection.
else, transitions to 'bad lexatom state' are added
on invalid code units.
RETURN: [0] Target map update for the first state.
[1] State Db update for intermediate states.
"""
def simplify(tm_db, tm_end_inv, ToSi):
"""Those states which trigger on the same intervals to 'ToSi' are
equivalent, i.e. can replaced by one state.
"""
# Find the states that trigger on the same interval list to the
# terminal 'ToSi'.
equivalence_db = {}
replacement_db = {}
for from_si, interval_list in tm_end_inv.iteritems():
key = tuple(sorted(interval_list))
equivalent_si = equivalence_db.get(key)
if equivalent_si is None: equivalence_db[key] = from_si
else: replacement_db[from_si] = equivalent_si
# Replace target states which are equivalent
result = {}
for from_si, tm in tm_db.iteritems():
new_tm = defaultdict(NumberSet)
for target_si, interval in tm.iteritems():
replacement_si = replacement_db.get(target_si)
if replacement_si is not None: target_si = replacement_si
new_tm[target_si].quick_append_interval(interval)
if any(number_set.is_empty() for si, number_set in new_tm.items()):
for si, number_set in new_tm.iteritems():
print "#sim", si, number_set
if from_si in tm_end_inv:
for interval in tm_end_inv[from_si]:
new_tm[ToSi].quick_append_interval(interval)
result[from_si] = new_tm
return result
tm_db, \
tm_end_inv, \
position_db = _get_intermediate_transition_maps(FromSi, ToSi,
IntervalSequenceList)
result_tm_db = simplify(tm_db, tm_end_inv, ToSi)
if BadLexatomSi is not None:
for si, position in position_db.iteritems():
# The 'positon 0' is done by 'do_state_machine'. It is concerned
# with the first state's transition.
assert position != 0
self._add_transition_to_bad_lexatom_detector(result_tm_db[si],
BadLexatomSi,
position)
for tm in result_tm_db.itervalues():
assert not any(number_set.is_empty() for number_set in tm.itervalues())
# Generate the target map to be inserted into state 'FromSi'.
# Generate list of intermediate states that implement the sequence
# of intervals.
first_tm = result_tm_db.pop(FromSi)
new_state_db = dict(
(si, DFA_State.from_TargetMap(tm)) for si, tm in result_tm_db.iteritems()
)
return first_tm, new_state_db
def __bunch_iterable(IntervalSequenceList, Index):
"""Iterate over sub-bunches of sequence in 'IntervalSequenceList' which are
the same at the given 'Position'. The 'IntervalSequenceList' must be sorted!
That is, same intervals must be adjacent.
EXAMPLE:
Index = 1
IntervalSequenceList = [
[ interval01, interval12, interval21, ],
[ interval01, interval12, interval21, ],
[ interval02, interval12, interval22, interval30 ],
[ interval02, interval13, interval22, interval30 ],
[ interval02, interval13, interval23, ] ]
That is, the interval sequences are grouped according to groups where the
second interval (Index=1) is equal, the yields are as follows:
(1) [ [ interval01, interval12, interval21, ],
[ interval01, interval12, interval21, ] ]
(2) [ [ interval02, interval12, interval22, interval30 ] ]
(3) [ [ interval02, interval13, interval22, interval30 ],
[ interval02, interval13, interval23, ] ]
NOTE: Two sequences of different lengths are *never* grouped together
-- by purpose.
The index is provided in order to avoid the creation of shorted sub-
sequences. Instead, the caller focusses on sub-sequences behind 'Index'.
Obviously, this function only makes sense if the intervals before 'Index'
are all the same.
YIELDS: [0] Interval which is the same for group of sequenes at 'Index'.
[1] Group of sequences.
[2] 'LastF' -- telling whether the interval is the last in the
sequence.
"""
prev_interval = None
prev_i = -1
prev_last_f = False
for i, sequence in enumerate(IntervalSequenceList):
interval = sequence[Index]
if interval.is_empty(): print "#bu:", interval; assert False
L = len(sequence)
last_f = L == Index + 1
if interval != prev_interval or last_f != prev_last_f:
if prev_i != -1:
yield prev_interval, IntervalSequenceList[prev_i:i], prev_last_f
prev_i = i
prev_interval = interval
prev_last_f = last_f
yield prev_interval, IntervalSequenceList[prev_i:], prev_last_f
def _get_intermediate_transition_maps(FromSi, ToSi, interval_sequence_list):
"""Several transitions are to be inserted in between state 'FromSi' and
'ToSi'. The transitions result from the list of sequences in
'interval_sequence_list'. This function develops the transition maps
of the states involved. Also, it notifies about the 'position' of each
state in the code unit sequence. Thus, the caller may insert error-detectors
on invalid code units.
FORBIDDEN: There cannot be a sequence that starts with the exact intervals
as a shorter sequences. Example:
[ (0, 1), (0, 2), (0, 3) ] #
[ (0, 1), (0, 2) ] # Bad, very bad!
This would mean that after (0, 1), (0, 2) the 'ToSi' is reached, but then
after (0, 3) again. The result is an *iteration* on 'ToSi'
--(0, 1)-->( A )--(0, 2)-->( ToSi )---->
| |
'-<-(0, 3)--'
Consequently, such a list of interval sequences cannot represent a linear
transition.
RETURNS: [0] Transition Map DB: state_index --> 'TransitionMap'
with TransitionMap: target_state_index --> Interval
That is 'TransitionMap[target_state_index]' tells through which
intervals the 'state_index' triggers to 'target_states'
The 'Transition Map DB' does not contain transitions to the
'ToSi'--the end state.
[1] Inverse End Transition Map:
Transitions to the end state are stored inversely:
from_state_index --> list of Interval-s
The end state can be reached by more than one interval, so a
list of Interval-s is associated with the transition
'from_state_index' to 'ToSi'.
[1] PositionDB: state_index --> position in code unit sequence.
"""
# Sort the list of sequences, so that adjacent intervals are listed one
# after the other. This is necessary for '__bunch_iterable()' to function.
interval_sequence_list.sort()
worklist = [
# The state at 'BeginStateIndex' is concerned with the intervals
# at position '0' in the 'interval_sequence_list'. The list needs to
# be grouped according to the first interval, and for each distinct
# interval a transition to another state must be generated.
(FromSi, interval_sequence_list, 0)
]
tm_db = defaultdict(dict)
tm_end_inv = defaultdict(list)
position_db = {}
while worklist:
si, sequence_group, index = worklist.pop()
# -- State 'si' triggers on intervals at 'index' in 'sequence_group'.
tm = tm_db[si]
# -- State 'si' comes at position 'index' in a sequence of code units.
# (position of 'FromSi' shall not appear in the 'position_db' since
# the error detection of the first state is done in the caller.)
if si != FromSi: position_db[si] = index
# Group the sequences according to the interval at position 'index'.
for interval, sub_group, last_f in __bunch_iterable(sequence_group, index):
# Transit to new state for the given sub-group of sequences.
if not last_f:
# For each 'interval' a deliberate target state is generated.
# => each target state is only reached by a single Interval.
new_si = state_machine_index.get()
tm[new_si] = interval
worklist.append((new_si, sub_group, index+1))
else:
# If the 'interval' is the last in the sequence, the 'ToSi' is
# reached. Obviously this may/should happen more than once.
tm_end_inv[si].append(interval)
return tm_db, tm_end_inv, position_db
| quex/engine/state_machine/transformation/state_split.py | 17,246 | Check whether a modification is necessary 'UnchangedRange' => No change to numerical values. 'number_set' solely contains forbidden elements. Second, enter the new transitions. Absorb new transitions into the target map of the 'from state'. A single code element can only produce a single interval sequence! Find the states that trigger on the same interval list to the terminal 'ToSi'. Replace target states which are equivalent The 'positon 0' is done by 'do_state_machine'. It is concerned with the first state's transition. Generate the target map to be inserted into state 'FromSi'. Generate list of intermediate states that implement the sequence of intervals. Sort the list of sequences, so that adjacent intervals are listed one after the other. This is necessary for '__bunch_iterable()' to function. The state at 'BeginStateIndex' is concerned with the intervals at position '0' in the 'interval_sequence_list'. The list needs to be grouped according to the first interval, and for each distinct interval a transition to another state must be generated. -- State 'si' triggers on intervals at 'index' in 'sequence_group'. -- State 'si' comes at position 'index' in a sequence of code units. (position of 'FromSi' shall not appear in the 'position_db' since the error detection of the first state is done in the caller.) Group the sequences according to the interval at position 'index'. Transit to new state for the given sub-group of sequences. For each 'interval' a deliberate target state is generated. => each target state is only reached by a single Interval. If the 'interval' is the last in the sequence, the 'ToSi' is reached. Obviously this may/should happen more than once. | 1,696 | en | 0.857725 |
# small demo for listmode TOF MLEM without subsets
import os
import matplotlib.pyplot as py
import pyparallelproj as ppp
from pyparallelproj.wrapper import joseph3d_fwd, joseph3d_fwd_tof, joseph3d_back, joseph3d_back_tof
import numpy as np
import argparse
import ctypes
from time import time
#---------------------------------------------------------------------------------
# parse the command line
parser = argparse.ArgumentParser()
parser.add_argument('--ngpus', help = 'number of GPUs to use', default = 0, type = int)
parser.add_argument('--nsubsets', help = 'number of subsets', default = 28, type = int)
parser.add_argument('--tpb', help = 'threads per block', default = 64, type = int)
parser.add_argument('--nontof', help = 'non-TOF instead of TOF', action = 'store_true')
parser.add_argument('--img_mem_order', help = 'memory layout for image', default = 'C',
choices = ['C','F'])
args = parser.parse_args()
#---------------------------------------------------------------------------------
ngpus = args.ngpus
nsubsets = args.nsubsets
tpb = args.tpb
tof = not args.nontof
img_mem_order = args.img_mem_order
subset = 0
if tof:
ntofbins = 27
else:
ntofbins = 1
np.random.seed(1)
#---------------------------------------------------------------------------------
# setup a scanner with one ring
scanner = ppp.RegularPolygonPETScanner(ncrystals_per_module = np.array([16,9]),
nmodules = np.array([28,5]))
# setup a test image
voxsize = np.array([2.,2.,2.])
n0 = 250
n1 = 250
n2 = max(1,int((scanner.xc2.max() - scanner.xc2.min()) / voxsize[2]))
# setup a random image
img = np.zeros((n0,n1,n2), dtype = np.float32, order = img_mem_order)
img[(n0//6):(5*n0//6),(n1//6):(5*n1//6),:] = 1
img_origin = (-(np.array(img.shape) / 2) + 0.5) * voxsize
# generate sinogram parameters and the projector
sd = np.array([[0,1,2],
[0,2,1],
[1,2,0],
[1,0,2],
[2,0,1],
[2,1,0]])
for sdo in sd:
sino_params = ppp.PETSinogramParameters(scanner, ntofbins = ntofbins, tofbin_width = 23.,
spatial_dim_order = sdo)
proj = ppp.SinogramProjector(scanner, sino_params, img.shape, nsubsets = nsubsets,
voxsize = voxsize, img_origin = img_origin, ngpus = ngpus,
tof = tof, sigma_tof = 60./2.35, n_sigmas = 3.,
threadsperblock = tpb)
# do a forward / back projection of subset 0 - same as img_fwd = proj.fwd_project(img, 0)
# we just write out the single steps to time the python overhead separately
#img_fwd = proj.fwd_project(img, 0)
#ones_sino = np.ones(img_fwd.shape, dtype = np.float32)
#back = proj.back_project(ones_sino, 0)
subset_slice = proj.subset_slices[subset]
sigma_tof = np.full(proj.nLORs[subset], proj.sigma_tof, dtype = ctypes.c_float).ravel()
tofcenter_offset = np.zeros(proj.nLORs[subset], dtype = ctypes.c_float).ravel()
xstart = proj.xstart[subset_slice].ravel()
xend = proj.xend[subset_slice].ravel()
img_ravel = img.ravel(order = img_mem_order)
subset_nLORs = proj.nLORs[subset]
img_fwd = np.zeros(subset_nLORs*proj.ntofbins, dtype = ctypes.c_float)
back_img = np.zeros(proj.nvox, dtype = ctypes.c_float)
sino = np.ones(subset_nLORs*proj.ntofbins, dtype = ctypes.c_float)
#--- time fwd projection
t0 = time()
if tof:
ok = joseph3d_fwd_tof(xstart, xend, img_ravel, proj.img_origin, proj.voxsize,
img_fwd, subset_nLORs, proj.img_dim,
proj.tofbin_width, sigma_tof, tofcenter_offset,
proj.nsigmas, proj.ntofbins,
threadsperblock = proj.threadsperblock, ngpus = proj.ngpus, lm = False)
else:
ok = joseph3d_fwd(xstart, xend, img_ravel, proj.img_origin, proj.voxsize,
img_fwd, subset_nLORs, proj.img_dim,
threadsperblock = proj.threadsperblock, ngpus = proj.ngpus, lm = False)
t1 = time()
#--- time back projection
t2 = time()
if tof:
ok = joseph3d_back_tof(xstart, xend, back_img, proj.img_origin, proj.voxsize,
sino, subset_nLORs, proj.img_dim,
proj.tofbin_width, sigma_tof, tofcenter_offset,
proj.nsigmas, proj.ntofbins,
threadsperblock = proj.threadsperblock, ngpus = proj.ngpus, lm = False)
else:
ok = joseph3d_back(xstart, xend, back_img, proj.img_origin, proj.voxsize,
sino, subset_nLORs, proj.img_dim,
threadsperblock = proj.threadsperblock, ngpus = proj.ngpus, lm = False)
t3 = time()
print(f'{sdo} {t1-t0} {t3-t2}')
| examples/projector_order_test.py | 4,951 | small demo for listmode TOF MLEM without subsets--------------------------------------------------------------------------------- parse the command line------------------------------------------------------------------------------------------------------------------------------------------------------------------ setup a scanner with one ring setup a test image setup a random image generate sinogram parameters and the projector do a forward / back projection of subset 0 - same as img_fwd = proj.fwd_project(img, 0) we just write out the single steps to time the python overhead separatelyimg_fwd = proj.fwd_project(img, 0)ones_sino = np.ones(img_fwd.shape, dtype = np.float32)back = proj.back_project(ones_sino, 0)--- time fwd projection--- time back projection | 766 | en | 0.375549 |
import asyncio
import random
import re
import textwrap
import discord
from .. import utils, errors, cmd
from ..servermodule import ServerModule, registered
from ..enums import PrivilegeLevel
@registered
class TruthGame(ServerModule):
MODULE_NAME = "Truth Game"
MODULE_SHORT_DESCRIPTION = "Tools to play *Truth*."
RECOMMENDED_CMD_NAMES = ["truth", "troof", "trufe"]
_SECRET_TOKEN = utils.SecretToken()
_cmdd = {}
_HELP_SUMMARY = """
`{modhelp}` - Truth game.
"""
DEFAULT_SETTINGS = {
"enabled channels": []
}
_PARTICIPANT_DELIMITER = " --> "
_RULES_STRING = textwrap.dedent("""
**Rules for a game of _Truth_**:
idk, ask the people playing it.
""").strip()
async def _initialize(self, resources):
self._client = resources.client
self._res = resources
self._enabled_channels = None
self._load_settings()
self._res.suppress_autokill(True)
return
def _load_settings(self):
settings = self._res.get_settings(default=self.DEFAULT_SETTINGS)
self._enabled_channels = []
try:
self._enabled_channels = settings["enabled channels"]
if self._enabled_channels is None:
print("DEBUGGING: truthgame.py TruthGame._load_settings() enabled channels is None!")
self._enabled_channels = []
except KeyError:
self._enabled_channels = settings["enabled channels"] = []
self._res.save_settings(settings)
return
def _save_settings(self):
settings = self._res.get_settings()
settings["enabled channels"] = self._enabled_channels
self._res.save_settings(settings)
return
@cmd.add(_cmdd, "rules")
async def _cmdf_enable(self, substr, msg, privilege_level):
"""`{cmd}` - View game rules."""
await self._client.send_msg(msg, self._RULES_STRING)
return
@cmd.add(_cmdd, "newgame", top=True)
@cmd.minimum_privilege(PrivilegeLevel.TRUSTED)
async def _cmdf_newgame(self, substr, msg, privilege_level):
"""`{cmd}` - New game."""
channel = msg.channel
await self._abort_if_not_truth_channel(channel)
await self._new_game(channel)
await self._client.send_msg(channel, "Truth game cleared.")
return
@cmd.add(_cmdd, "in", top=True)
async def _cmdf_in(self, substr, msg, privilege_level):
"""
`{cmd}` - Adds you to the game.
This command also allows moderators to add other users and arbitrary strings as participants.
**Example:** `{cmd} an elephant` - Adds "an elephant" as a participant.
"""
channel = msg.channel
await self._abort_if_not_truth_channel(channel)
new_participant = None
if (privilege_level < PrivilegeLevel.MODERATOR) or (len(substr) == 0):
new_participant = "<@" + msg.author.id + ">"
else:
new_participant = substr
if self._PARTICIPANT_DELIMITER in new_participant:
await self._client.send_msg(channel, "Error: Not allowed to use the delimiter characters.")
raise errors.OperationAborted
if new_participant in self._get_participants(channel):
await self._client.send_msg(channel, "Error: {} is already a participant.".format(new_participant))
else:
await self._add_participant(channel, new_participant)
await self._client.send_msg(channel, "Added {} to the game.".format(new_participant))
return
@cmd.add(_cmdd, "out", top=True)
async def _cmdf_out(self, substr, msg, privilege_level):
"""
`{cmd}` - Removes you from the game.
This command also allows moderators to remove other users and arbitrary strings.
**Example:** `{cmd} an elephant` - Removes "an elephant" as a participant.
"""
channel = msg.channel
await self._abort_if_not_truth_channel(channel)
participant = None
if (privilege_level < PrivilegeLevel.MODERATOR) or (len(substr) == 0):
participant = "<@" + msg.author.id + ">"
else:
participant = substr
if participant in self._get_participants(channel):
await self._remove_participant(channel, participant)
await self._client.send_msg(channel, "Removed {} from the game.".format(participant))
else:
await self._client.send_msg(channel, "Error: {} is not already a participant.".format(participant))
return
@cmd.add(_cmdd, "enablechannel")
@cmd.minimum_privilege(PrivilegeLevel.ADMIN)
async def _cmdf_enable(self, substr, msg, privilege_level):
"""`{cmd}` - Enable Truth in this channel."""
channel = msg.channel
if channel.id in self._enabled_channels:
await self._client.send_msg(channel, "This channel is already a Truth game channel.")
else:
self._enabled_channels.append(channel.id)
self._save_settings()
await self._client.send_msg(channel, "This channel is now a Truth game channel.")
return
@cmd.add(_cmdd, "disablechannel")
@cmd.minimum_privilege(PrivilegeLevel.ADMIN)
async def _cmdf_disable(self, substr, msg, privilege_level):
"""`{cmd}` - Disable Truth in this channel."""
channel = msg.channel
if channel.id in self._enabled_channels:
self._enabled_channels.remove(channel.id)
self._save_settings()
await self._client.send_msg(channel, "This channel is no longer a Truth game channel.")
else:
await self._client.send_msg(channel, "This channel is not a Truth game channel.")
return
@cmd.add(_cmdd, "viewenabled")
async def _cmdf_viewenabled(self, substr, msg, privilege_level):
"""`{cmd}` - View all channels that are enabled as Truth channels."""
buf = None
if len(self._enabled_channels) == 0:
buf = "No channels have Truth game enabled."
else:
buf = "**Truth game enabled channels:**"
for channel_id in self._enabled_channels:
buf += "\n<#{0}> (ID: {0})".format(channel_id)
await self._client.send_msg(msg, buf)
return
# TODO: Edit this to use the topic string abstraction methods.
# Currently, it only consideres user mentions to be participants!
@cmd.add(_cmdd, "choose", "random", "rand")
async def _cmdf_choosetruth(self, substr, msg, privilege_level):
"""`{cmd}` - Pick a random participant other than yourself."""
topic = msg.channel.topic
if topic is None:
await self._client.send_msg(msg, "There doesn't appear to be a truth game in here.")
raise errors.OperationAborted
mentions = utils.get_all_mentions(topic)
if len(mentions) == 0:
await self._client.send_msg(msg, "There doesn't appear to be a truth game in here.")
raise errors.OperationAborted
try:
mentions.remove(msg.author.id)
if len(mentions) == 0:
await self._client.send_msg(msg, "<@{}>".format(msg.author.id))
raise errors.OperationAborted
except ValueError:
pass
choice = random.choice(mentions)
buf = "<@{}>\n".format(choice)
buf += "My choices were: "
for mention in mentions:
user = self._client.search_for_user(mention, enablenamesearch=False, serverrestriction=self._res.server)
if user is None:
buf += "<@{}>, ".format(mention)
else:
buf += "{}, ".format(user.name)
buf = buf[:-2]
await self._client.send_msg(msg, buf)
return
################################
### TOPIC STRING ABSTRACTION ###
################################
def _get_participants(self, channel):
topic = channel.topic
if topic is None:
return []
return topic.split(self._PARTICIPANT_DELIMITER)
# PRECONDITION: participant_str contains printable characters.
# PRECONDITION: participant_str does not contain the delimiter.
async def _add_participant(self, channel, participant_str):
topic = channel.topic
new_topic = None
if topic == "":
new_topic = participant_str
else:
new_topic = topic + self._PARTICIPANT_DELIMITER + participant_str
await self._client.edit_channel(channel, topic=new_topic)
return
# PRECONDITION: participant_str in self._get_participants(channel)
async def _remove_participant(self, channel, participant_str):
participants_list = self._get_participants(channel)
participants_list.remove(participant_str)
new_topic = self._PARTICIPANT_DELIMITER.join(participants_list)
await self._client.edit_channel(channel, topic=new_topic)
return
async def _new_game(self, channel):
await self._client.edit_channel(channel, topic="")
return
########################
### GENERAL SERVICES ###
########################
async def _abort_if_not_truth_channel(self, channel):
if not channel.id in self._enabled_channels:
await self._client.send_msg(channel, "Error: Truth isn't enabled on this channel.")
raise errors.OperationAborted
return
| mentionbot/servermodules/truthgame.py | 9,114 | TODO: Edit this to use the topic string abstraction methods. Currently, it only consideres user mentions to be participants! TOPIC STRING ABSTRACTION PRECONDITION: participant_str contains printable characters. PRECONDITION: participant_str does not contain the delimiter. PRECONDITION: participant_str in self._get_participants(channel) GENERAL SERVICES | 361 | en | 0.683052 |
"""
Module for all Form Tests.
"""
import pytest
from django.utils.translation import gettext_lazy as _
from my_blog.users.forms import UserCreationForm
from my_blog.users.models import User
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
"""
Test class for all tests related to the UserCreationForm
"""
def test_username_validation_error_msg(self, user: User):
"""
Tests UserCreation Form's unique validator functions correctly by testing:
1) A new user with an existing username cannot be added.
2) Only 1 error is raised by the UserCreation Form
3) The desired error message is raised
"""
# The user already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": user.username,
"password1": user.password,
"password2": user.password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
assert form.errors["username"][0] == _("This username has already been taken.")
| my_blog/users/tests/test_forms.py | 1,163 | Test class for all tests related to the UserCreationForm
Tests UserCreation Form's unique validator functions correctly by testing:
1) A new user with an existing username cannot be added.
2) Only 1 error is raised by the UserCreation Form
3) The desired error message is raised
Module for all Form Tests.
The user already exists, hence cannot be created. | 369 | en | 0.84332 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Base operator for SQL to GCS operators.
"""
import abc
import json
import warnings
from tempfile import NamedTemporaryFile
import unicodecsv as csv
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.utils.decorators import apply_defaults
class BaseSQLToGCSOperator(BaseOperator, metaclass=abc.ABCMeta):
"""
:param sql: The SQL to execute.
:type sql: str
:param bucket: The bucket to upload to.
:type bucket: str
:param filename: The filename to use as the object name when uploading
to Google Cloud Storage. A {} should be specified in the filename
to allow the operator to inject file numbers in cases where the
file is split due to size.
:type filename: str
:param schema_filename: If set, the filename to use as the object name
when uploading a .json file containing the BigQuery schema fields
for the table that was dumped from the database.
:type schema_filename: str
:param approx_max_file_size_bytes: This operator supports the ability
to split large table dumps into multiple files (see notes in the
filename param docs above). This param allows developers to specify the
file size of the splits. Check https://cloud.google.com/storage/quotas
to see the maximum allowed file size for a single object.
:type approx_max_file_size_bytes: long
:param export_format: Desired format of files to be exported.
:type export_format: str
:param field_delimiter: The delimiter to be used for CSV files.
:type field_delimiter: str
:param gzip: Option to compress file for upload (does not apply to schemas).
:type gzip: bool
:param schema: The schema to use, if any. Should be a list of dict or
a str. Pass a string if using Jinja template, otherwise, pass a list of
dict. Examples could be seen: https://cloud.google.com/bigquery/docs
/schemas#specifying_a_json_schema_file
:type schema: str or list
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param google_cloud_storage_conn_id: (Deprecated) The connection ID used to connect to Google Cloud
Platform. This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:param parameters: a parameters dict that is substituted at query runtime.
:type parameters: dict
"""
template_fields = ('sql', 'bucket', 'filename', 'schema_filename', 'schema', 'parameters')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(self, # pylint: disable=too-many-arguments
sql,
bucket,
filename,
schema_filename=None,
approx_max_file_size_bytes=1900000000,
export_format='json',
field_delimiter=',',
gzip=False,
schema=None,
parameters=None,
gcp_conn_id='google_cloud_default',
google_cloud_storage_conn_id=None,
delegate_to=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
if google_cloud_storage_conn_id:
warnings.warn(
"The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = google_cloud_storage_conn_id
self.sql = sql
self.bucket = bucket
self.filename = filename
self.schema_filename = schema_filename
self.approx_max_file_size_bytes = approx_max_file_size_bytes
self.export_format = export_format.lower()
self.field_delimiter = field_delimiter
self.gzip = gzip
self.schema = schema
self.parameters = parameters
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.parameters = parameters
def execute(self, context):
cursor = self.query()
files_to_upload = self._write_local_data_files(cursor)
# If a schema is set, create a BQ schema JSON file.
if self.schema_filename:
files_to_upload.append(self._write_local_schema_file(cursor))
# Flush all files before uploading
for tmp_file in files_to_upload:
tmp_file['file_handle'].flush()
self._upload_to_gcs(files_to_upload)
# Close all temp file handles.
for tmp_file in files_to_upload:
tmp_file['file_handle'].close()
def convert_types(self, schema, col_type_dict, row):
"""Convert values from DBAPI to output-friendly formats."""
return [
self.convert_type(value, col_type_dict.get(name))
for name, value in zip(schema, row)
]
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
schema = list(map(lambda schema_tuple: schema_tuple[0], cursor.description))
col_type_dict = self._get_col_type_dict()
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
if self.export_format == 'csv':
file_mime_type = 'text/csv'
else:
file_mime_type = 'application/json'
files_to_upload = [{
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
}]
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
for row in cursor:
# Convert datetime objects to utc seconds, and decimals to floats.
# Convert binary type object to string encoded with base64.
row = self.convert_types(schema, col_type_dict, row)
if self.export_format == 'csv':
csv_writer.writerow(row)
else:
row_dict = dict(zip(schema, row))
# TODO validate that row isn't > 2MB. BQ enforces a hard row size of 2MB.
tmp_file_handle.write(json.dumps(row_dict, sort_keys=True).encode('utf-8'))
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
# Stop if the file exceeds the file size limit.
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
files_to_upload.append({
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
})
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
return files_to_upload
def _configure_csv_file(self, file_handle, schema):
"""Configure a csv writer with the file_handle and write schema
as headers for the new file.
"""
csv_writer = csv.writer(file_handle, encoding='utf-8',
delimiter=self.field_delimiter)
csv_writer.writerow(schema)
return csv_writer
@abc.abstractmethod
def query(self):
"""Execute DBAPI query."""
@abc.abstractmethod
def field_to_bigquery(self, field):
"""Convert a DBAPI field to BigQuery schema format."""
@abc.abstractmethod
def convert_type(self, value, schema_type):
"""Convert a value from DBAPI to output-friendly formats."""
def _get_col_type_dict(self):
"""
Return a dict of column name and column type based on self.schema if not None.
"""
schema = []
if isinstance(self.schema, str):
schema = json.loads(self.schema)
elif isinstance(self.schema, list):
schema = self.schema
elif self.schema is not None:
self.log.warning('Using default schema due to unexpected type.'
'Should be a string or list.')
col_type_dict = {}
try:
col_type_dict = {col['name']: col['type'] for col in schema}
except KeyError:
self.log.warning('Using default schema due to missing name or type. Please '
'refer to: https://cloud.google.com/bigquery/docs/schemas'
'#specifying_a_json_schema_file')
return col_type_dict
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema = [self.field_to_bigquery(field) for field in cursor.description]
self.log.info('Using schema for %s: %s', self.schema_filename, schema)
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
tmp_schema_file_handle.write(json.dumps(schema, sort_keys=True).encode('utf-8'))
schema_file_to_upload = {
'file_name': self.schema_filename,
'file_handle': tmp_schema_file_handle,
'file_mime_type': 'application/json',
}
return schema_file_to_upload
def _upload_to_gcs(self, files_to_upload):
"""
Upload all of the file splits (and optionally the schema .json file) to
Google Cloud Storage.
"""
hook = GCSHook(
google_cloud_storage_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
for tmp_file in files_to_upload:
hook.upload(self.bucket, tmp_file.get('file_name'),
tmp_file.get('file_handle').name,
mime_type=tmp_file.get('file_mime_type'),
gzip=self.gzip if tmp_file.get('file_name') == self.schema_filename else False)
| airflow/providers/google/cloud/operators/sql_to_gcs.py | 11,474 | :param sql: The SQL to execute.
:type sql: str
:param bucket: The bucket to upload to.
:type bucket: str
:param filename: The filename to use as the object name when uploading
to Google Cloud Storage. A {} should be specified in the filename
to allow the operator to inject file numbers in cases where the
file is split due to size.
:type filename: str
:param schema_filename: If set, the filename to use as the object name
when uploading a .json file containing the BigQuery schema fields
for the table that was dumped from the database.
:type schema_filename: str
:param approx_max_file_size_bytes: This operator supports the ability
to split large table dumps into multiple files (see notes in the
filename param docs above). This param allows developers to specify the
file size of the splits. Check https://cloud.google.com/storage/quotas
to see the maximum allowed file size for a single object.
:type approx_max_file_size_bytes: long
:param export_format: Desired format of files to be exported.
:type export_format: str
:param field_delimiter: The delimiter to be used for CSV files.
:type field_delimiter: str
:param gzip: Option to compress file for upload (does not apply to schemas).
:type gzip: bool
:param schema: The schema to use, if any. Should be a list of dict or
a str. Pass a string if using Jinja template, otherwise, pass a list of
dict. Examples could be seen: https://cloud.google.com/bigquery/docs
/schemas#specifying_a_json_schema_file
:type schema: str or list
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param google_cloud_storage_conn_id: (Deprecated) The connection ID used to connect to Google Cloud
Platform. This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:param parameters: a parameters dict that is substituted at query runtime.
:type parameters: dict
Configure a csv writer with the file_handle and write schema
as headers for the new file.
Return a dict of column name and column type based on self.schema if not None.
Upload all of the file splits (and optionally the schema .json file) to
Google Cloud Storage.
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
Convert a value from DBAPI to output-friendly formats.
Convert values from DBAPI to output-friendly formats.
Convert a DBAPI field to BigQuery schema format.
Execute DBAPI query.
Base operator for SQL to GCS operators.
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=too-many-arguments If a schema is set, create a BQ schema JSON file. Flush all files before uploading Close all temp file handles. Convert datetime objects to utc seconds, and decimals to floats. Convert binary type object to string encoded with base64. TODO validate that row isn't > 2MB. BQ enforces a hard row size of 2MB. Append newline to make dumps BigQuery compatible. Stop if the file exceeds the file size limit. | 4,336 | en | 0.782952 |
from shallowflow.api.source import AbstractListOutputSource
from shallowflow.api.config import Option
class ForLoop(AbstractListOutputSource):
"""
Outputs an integer from the specified range.
"""
def description(self):
"""
Returns a description for the actor.
:return: the actor description
:rtype: str
"""
return "Outputs an integer from the specified range."
def _define_options(self):
"""
For configuring the options.
"""
super()._define_options()
self._option_manager.add(Option(name="start", value_type=int, def_value=1,
help="The starting value"))
self._option_manager.add(Option(name="end", value_type=int, def_value=10,
help="The last value (incl)"))
self._option_manager.add(Option(name="step", value_type=int, def_value=1,
help="The increment between values"))
def _get_item_type(self):
"""
Returns the type of the individual items that get generated, when not outputting a list.
:return: the type that gets generated
"""
return int
def setup(self):
"""
Prepares the actor for use.
:return: None if successful, otherwise error message
:rtype: str
"""
result = super().setup()
if result is None:
if self.get("end") < self.get("start"):
result = "End value (%s) must be smaller than start (%d)!" % (self.get("end"), self.get("start"))
return result
def _do_execute(self):
"""
Performs the actual execution.
:return: None if successful, otherwise error message
:rtype: str
"""
i = self.get("start")
step = self.get("step")
end = self.get("end")
while i <= end:
self._output.append(i)
i += step
return None
| base/src/shallowflow/base/sources/_ForLoop.py | 2,016 | Outputs an integer from the specified range.
For configuring the options.
Performs the actual execution.
:return: None if successful, otherwise error message
:rtype: str
Returns the type of the individual items that get generated, when not outputting a list.
:return: the type that gets generated
Returns a description for the actor.
:return: the actor description
:rtype: str
Prepares the actor for use.
:return: None if successful, otherwise error message
:rtype: str | 473 | en | 0.52556 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.vision_v1p3beta1.types import image_annotator
from google.longrunning import operations_pb2 # type: ignore
from .base import ImageAnnotatorTransport, DEFAULT_CLIENT_INFO
class ImageAnnotatorGrpcTransport(ImageAnnotatorTransport):
"""gRPC backend transport for ImageAnnotator.
Service that performs Google Cloud Vision API detection tasks
over client images, such as face, landmark, logo, label, and
text detection. The ImageAnnotator service returns detected
entities from the images.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'vision.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'vision.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def batch_annotate_images(self) -> Callable[
[image_annotator.BatchAnnotateImagesRequest],
image_annotator.BatchAnnotateImagesResponse]:
r"""Return a callable for the batch annotate images method over gRPC.
Run image detection and annotation for a batch of
images.
Returns:
Callable[[~.BatchAnnotateImagesRequest],
~.BatchAnnotateImagesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'batch_annotate_images' not in self._stubs:
self._stubs['batch_annotate_images'] = self.grpc_channel.unary_unary(
'/google.cloud.vision.v1p3beta1.ImageAnnotator/BatchAnnotateImages',
request_serializer=image_annotator.BatchAnnotateImagesRequest.serialize,
response_deserializer=image_annotator.BatchAnnotateImagesResponse.deserialize,
)
return self._stubs['batch_annotate_images']
@property
def async_batch_annotate_files(self) -> Callable[
[image_annotator.AsyncBatchAnnotateFilesRequest],
operations_pb2.Operation]:
r"""Return a callable for the async batch annotate files method over gRPC.
Run asynchronous image detection and annotation for a list of
generic files, such as PDF files, which may contain multiple
pages and multiple images per page. Progress and results can be
retrieved through the ``google.longrunning.Operations``
interface. ``Operation.metadata`` contains ``OperationMetadata``
(metadata). ``Operation.response`` contains
``AsyncBatchAnnotateFilesResponse`` (results).
Returns:
Callable[[~.AsyncBatchAnnotateFilesRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'async_batch_annotate_files' not in self._stubs:
self._stubs['async_batch_annotate_files'] = self.grpc_channel.unary_unary(
'/google.cloud.vision.v1p3beta1.ImageAnnotator/AsyncBatchAnnotateFiles',
request_serializer=image_annotator.AsyncBatchAnnotateFilesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['async_batch_annotate_files']
def close(self):
self.grpc_channel.close()
__all__ = (
'ImageAnnotatorGrpcTransport',
)
| google/cloud/vision/v1p3beta1/vision-v1p3beta1-py/google/cloud/vision_v1p3beta1/services/image_annotator/transports/grpc.py | 14,205 | gRPC backend transport for ImageAnnotator.
Service that performs Google Cloud Vision API detection tasks
over client images, such as face, landmark, logo, label, and
text detection. The ImageAnnotator service returns detected
entities from the images.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
Return a callable for the async batch annotate files method over gRPC.
Run asynchronous image detection and annotation for a list of
generic files, such as PDF files, which may contain multiple
pages and multiple images per page. Progress and results can be
retrieved through the ``google.longrunning.Operations``
interface. ``Operation.metadata`` contains ``OperationMetadata``
(metadata). ``Operation.response`` contains
``AsyncBatchAnnotateFilesResponse`` (results).
Returns:
Callable[[~.AsyncBatchAnnotateFilesRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
Return a callable for the batch annotate images method over gRPC.
Run image detection and annotation for a batch of
images.
Returns:
Callable[[~.BatchAnnotateImagesRequest],
~.BatchAnnotateImagesResponse]:
A function that, when called, will call the underlying RPC
on the server.
Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
Return the channel designed to connect to this service.
Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
-*- coding: utf-8 -*- Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore Ignore credentials if a channel was passed. If a channel was explicitly provided, set it. Create SSL credentials with client_cert_source or application default SSL credentials. The base transport sets the host, credentials and scopes Wrap messages. This must be done after self._grpc_channel exists Sanity check: Only create a new client if we do not already have one. Return the client from cache. Generate a "stub function" on-the-fly which will actually make the request. gRPC handles serialization and deserialization, so we just need to pass in the functions for each. Generate a "stub function" on-the-fly which will actually make the request. gRPC handles serialization and deserialization, so we just need to pass in the functions for each. | 6,927 | en | 0.804054 |
import io
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
def resize_axis(tensor, axis, new_size, fill_value=0, random_sampling=False):
"""Truncates or pads a tensor to new_size on on a given axis.
Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
size increases, the padding will be performed at the end, using fill_value.
Args:
tensor: The tensor to be resized.
axis: An integer representing the dimension to be sliced.
new_size: An integer or 0d tensor representing the new value for
tensor.shape[axis].
fill_value: Value to use to fill any new entries in the tensor. Will be
cast to the type of tensor.
Returns:
The resized tensor.
"""
tensor = torch.Tensor(tensor)
shape = list(tensor.shape)
pad_shape = shape[:]
pad_shape[axis] = max(0, new_size - shape[axis])
start = 0 if shape[axis] <= new_size else np.random.randint(
shape[axis] - new_size) # random clip
old_length = shape[axis]
shape[axis] = min(shape[axis], new_size)
resized = torch.cat([
torch.index_select(tensor, dim=axis, index=torch.randint(old_length, (new_size,))
) if start > 0 and random_sampling else torch.narrow(tensor, dim=axis, start=start, length=shape[axis]),
torch.Tensor(*pad_shape).fill_(fill_value)
], dim=axis)
return resized
class CircleLoss(torch.nn.Module):
def __init__(self, m=0.25, gamma=256):
super(CircleLoss, self).__init__()
self.m = m
self.gamma = gamma
self.loss = torch.nn.CrossEntropyLoss()
def forward(self, logits, labels):
alpha = torch.clamp_min(logits + self.m, min=0).detach() # an
alpha[labels] = torch.clamp_min(-logits[labels] + 1 + self.m, min=0).detach() # ap
delta = torch.ones_like(logits, device=logits.device, dtype=logits.dtype) * self.m # delta_n
delta[labels] = 1 - self.m # delta_p
return self.loss(alpha * (logits - delta) * self.gamma, labels) | utils.py | 2,222 | Truncates or pads a tensor to new_size on on a given axis.
Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
size increases, the padding will be performed at the end, using fill_value.
Args:
tensor: The tensor to be resized.
axis: An integer representing the dimension to be sliced.
new_size: An integer or 0d tensor representing the new value for
tensor.shape[axis].
fill_value: Value to use to fill any new entries in the tensor. Will be
cast to the type of tensor.
Returns:
The resized tensor.
random clip an ap delta_n delta_p | 575 | en | 0.672781 |
from .infinity import INFINITY
import json
from typing import List, Tuple, Any, Type, Union, TypeVar, Generic, Optional, Dict, cast, Callable
T = TypeVar('T')
class PageProperty(Generic[T]):
"""
A class to represent a property that varies depending on the pages of a spectral sequence.
This is the main helper class that encapsulates any property of a class, edge, or chart
that varies depending on the page.
Examples:
>>> p = PageProperty(1)
>>> p[4] = 7
>>> p[2]
1
>>> p[4]
7
"""
def __init__(self,
value : T,
parent : Optional[Any] = None,
callback : Optional[Callable[[], None]] = None,
):
""" Initialize the PageProperty to always have value v."""
self._values : List[Tuple[int, T]] = [(0, value)]
self.set_parent(parent)
self._callback = callback
def set_parent(self, parent : Optional[Any]):
self._parent = parent
def set_callback(self, callback : Callable[[], None]):
self._callback = callback
def _needs_update(self):
if self._parent:
self._parent._needs_update()
if self._callback:
self._callback()
def _find_index(self, target_page : int) -> Tuple[int, bool]:
result_idx = None
for (idx, (page, _)) in enumerate(self._values):
if page > target_page:
break
result_idx = idx
# We need to help out the type checker here
if result_idx is None:
raise ValueError(f"Page Property indexed with negative index: {target_page}")
return (result_idx, self._values[result_idx][0] == target_page)
def __getitem__(self, x : Union[int, slice]) -> T:
stop = None
if type(x) == slice:
stop = x.stop or INFINITY
x = x.start or 0
if type(x) != int:
raise TypeError(f"Expected integer, got {type(x).__name__}.")
assert type(x) is int # Make type analysis thing happy
(idx, _) = self._find_index(x)
if stop:
(idx2, _) = self._find_index(stop - 1)
if idx != idx2:
raise ValueError("Indexed with slice but value is inconsistent across slice.")
return self._values[idx][1]
def __setitem__(self, p : Union[int, slice], v : T) -> None:
if hasattr(v, "set_parent"):
v.set_parent(self)
if type(p) is int:
self._setitem_single(p, v)
self._merge_redundant()
self._needs_update()
return
if type(p) is not slice:
raise TypeError("Excepted int or slice!")
start = p.start or 0
stop = p.stop or INFINITY
orig_value = self[stop]
(start_idx, _) = self._setitem_single(start, v)
(end_idx, hit_end) = self._find_index(stop)
if not hit_end and stop < INFINITY:
(end_idx, _) = self._setitem_single(stop, orig_value)
if stop == INFINITY:
end_idx += 1
del self._values[start_idx + 1 : end_idx]
self._merge_redundant()
self._needs_update()
def _setitem_single(self, p : int, v : T):
(idx, hit) = self._find_index(p)
if hit:
self._values[idx] = (p, v)
else:
idx += 1
self._values.insert(idx, (p, v))
return (idx, hit)
def _merge_redundant(self):
for i in range(len(self._values) - 1, 0, -1):
if self._values[i][1] == self._values[i-1][1]:
del self._values[i]
def __repr__(self) -> str:
values = ", ".join([f"{page}: {value}" for (page, value) in self._values])
return f"PageProperty{{{values}}}"
def __eq__(self, other):
if type(other) != PageProperty:
return False
return self._values == other._values
def map_values_in_place(self, f):
for i in range(len(self._values)):
(p, v) = self._values[i]
self._values[i] = (p, f(v))
def to_json(self) -> Dict[str, Any]:
if len(self._values) == 1:
return self._values[0][1]
else:
return {"type" : "PageProperty", "values" : self._values }
@staticmethod
def from_json(json_obj : Dict[str, Any]) -> "PageProperty[Any]":
result : PageProperty[Any] = PageProperty(None)
result._values = [cast(Tuple[int, Any], tuple(x)) for x in json_obj["values"]]
return result
S = TypeVar('S')
PagePropertyOrValue = Union[S, PageProperty[S]]
def ensure_page_property(v : PagePropertyOrValue[S], parent : Optional[Any] = None) -> PageProperty[S]:
if(type(v) is PageProperty):
result = v
else:
result = PageProperty(v)
if parent:
result.set_parent(parent)
return result | chart/chart/python/spectralsequence_chart/page_property.py | 5,040 | A class to represent a property that varies depending on the pages of a spectral sequence.
This is the main helper class that encapsulates any property of a class, edge, or chart
that varies depending on the page.
Examples:
>>> p = PageProperty(1)
>>> p[4] = 7
>>> p[2]
1
>>> p[4]
7
Initialize the PageProperty to always have value v.
We need to help out the type checker here Make type analysis thing happy | 436 | en | 0.815055 |
# Generated by Django 2.1.3 on 2018-12-08 05:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('staf', '0008_auto_20181207_1525'),
]
operations = [
migrations.AddField(
model_name='dataset',
name='process',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='staf.Process'),
),
]
| src/staf/migrations/0009_dataset_process.py | 489 | Generated by Django 2.1.3 on 2018-12-08 05:56 | 45 | en | 0.597899 |
"""about command for osxphotos CLI"""
from textwrap import dedent
import click
from osxphotos._constants import OSXPHOTOS_URL
from osxphotos._version import __version__
MIT_LICENSE = """
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
APACHE_2_0_LICENSE = """
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
BSD_3_CLAUSE_LICENSE = """
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be
used to endorse or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
ISC_LICENSE = """
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
"""
LICENSE = dedent(
f"""
osxphotos is copyright (c) 2019-2022 by Rhet Turnbull and is licensed under the MIT license:
{MIT_LICENSE}
osxphotos uses the following 3rd party software licensed under the BSD-3-Clause License:
Click (Copyright 2014 Pallets), ptpython (Copyright (c) 2015, Jonathan Slenders)
{BSD_3_CLAUSE_LICENSE}
osxphotos uses the following 3rd party software licensed under the Apache 2.0 License:
tenacity (Copyright Julien Danjou)
{APACHE_2_0_LICENSE}
osxphotos uses the following 3rd part software licensed under the ISC License:
xdg (Copyright 2016-2021 Scott Stevenson <scott@stevenson.io>)
{ISC_LICENSE}
"""
)
@click.command(name="about")
@click.pass_obj
@click.pass_context
def about(ctx, cli_obj):
"""Print information about osxphotos including license."""
click.echo_via_pager(
f"osxphotos, version {__version__}\n\n"
f"Source code available at: {OSXPHOTOS_URL}\n"
f"{LICENSE}"
)
| osxphotos/cli/about.py | 15,763 | Print information about osxphotos including license.
about command for osxphotos CLI | 84 | en | 0.840753 |
# -*- coding: utf-8 -*-
"""
Logging for the hubble daemon
"""
import logging
import time
import hubblestack.splunklogging
# These patterns will not be logged by "conf_publisher" and "emit_to_splunk"
PATTERNS_TO_FILTER = ["password", "token", "passphrase", "privkey",
"keyid", "s3.key", "splunk_token"]
# While hubble doesn't use these, salt modules can, so let's define them anyway
SPLUNK = logging.SPLUNK = 25
PROFILE = logging.PROFILE = 15
TRACE = logging.TRACE = 5
GARBAGE = logging.GARBAGE = 1
QUIET = logging.QUIET = 1000
LOG_LEVELS = {
'all': logging.NOTSET,
'debug': logging.DEBUG,
'error': logging.ERROR,
'critical': logging.CRITICAL,
'garbage': GARBAGE,
'info': logging.INFO,
'profile': PROFILE,
'quiet': QUIET,
'trace': TRACE,
'warning': logging.WARNING,
}
logging.addLevelName(SPLUNK, 'SPLUNK')
logging.addLevelName(QUIET, 'QUIET')
logging.addLevelName(PROFILE, 'PROFILE')
logging.addLevelName(TRACE, 'TRACE')
logging.addLevelName(GARBAGE, 'GARBAGE')
def _splunk(self, message, *args, **kwargs):
if self.isEnabledFor(logging.SPLUNK):
self._log(logging.SPLUNK, message, args, **kwargs)
def _quiet(self, message, *args, **kwargs):
if self.isEnabledFor(logging.QUIET):
self._log(logging.QUIET, message, args, **kwargs)
def _profile(self, message, *args, **kwargs):
if self.isEnabledFor(logging.PROFILE):
self._log(logging.PROFILE, message, args, **kwargs)
def _trace(self, message, *args, **kwargs):
if self.isEnabledFor(logging.TRACE):
self._log(logging.TRACE, message, args, **kwargs)
def _garbage(self, message, *args, **kwargs):
if self.isEnabledFor(logging.GARBAGE):
self._log(logging.GARBAGE, message, args, **kwargs)
logging.Logger.splunk = _splunk
logging.Logger.quiet = _quiet
logging.Logger.profile = _profile
logging.Logger.trace = _trace
logging.Logger.garbage = _garbage
SPLUNK_HANDLER = None
class MockRecord(object):
""" Fake record that mimicks a logging record """
def __init__(self, message, levelname, asctime, name):
self.message = message
self.levelname = levelname
self.asctime = asctime
self.name = name
# Set up an early log handler for use while we're generating config.
# Will be removed when we set up the console or file logger.
TEMP_HANDLER = logging.StreamHandler()
TEMP_HANDLER.setLevel(logging.INFO)
TEMP_HANDLER.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(name)s: %(message)s'))
logging.root.handlers.insert(0, TEMP_HANDLER)
def _remove_temp_handler():
"""
Remove temporary handler if it exists
"""
if TEMP_HANDLER and TEMP_HANDLER in logging.root.handlers:
logging.root.handlers.remove(TEMP_HANDLER)
def setup_console_logger(log_level='error',
log_format='%(asctime)s [%(levelname)-5s] %(message)s',
date_format='%H:%M:%S'):
"""
Sets up logging to STDERR, allowing for configurable level, format, and
date format.
"""
_remove_temp_handler()
rootlogger = logging.getLogger()
handler = logging.StreamHandler()
handler.setLevel(LOG_LEVELS.get(log_level, logging.ERROR))
formatter = logging.Formatter(log_format, date_format)
handler.setFormatter(formatter)
rootlogger.addHandler(handler)
def setup_file_logger(log_file,
log_level='error',
log_format='%(asctime)s,%(msecs)03d [%(levelname)-5s] [%(name)s:%(lineno)d] '
' %(message)s',
date_format='%Y-%m-%d %H:%M:%S',
max_bytes=100000000,
backup_count=1):
"""
Sets up logging to a file. By default will auto-rotate those logs every
100MB and keep one backup.
"""
_remove_temp_handler()
rootlogger = logging.getLogger()
handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=max_bytes,
backupCount=backup_count)
handler.setLevel(LOG_LEVELS.get(log_level, logging.ERROR))
formatter = logging.Formatter(log_format, date_format)
handler.setFormatter(formatter)
rootlogger.addHandler(handler)
def setup_splunk_logger():
"""
Sets up logging to splunk.
"""
_remove_temp_handler()
rootlogger = logging.getLogger()
handler = hubblestack.splunklogging.SplunkHandler()
handler.setLevel(logging.SPLUNK)
rootlogger.addHandler(handler)
global SPLUNK_HANDLER
SPLUNK_HANDLER = handler
def emit_to_splunk(message, level, name):
"""
Emit a single message to splunk
"""
if isinstance(message, (list, dict)):
message = filter_logs(message, remove_dots=False)
if SPLUNK_HANDLER is None:
return False
handler = SPLUNK_HANDLER
handler.emit(MockRecord(message, level, time.asctime(), name))
return True
def workaround_salt_log_handler_queues():
"""
Build a fake log handler and add it to LOGGING_STORE_HANDLER and LOGGING_NULL_HANDLER
"""
class _FakeLogHandler(object):
level = 10
count = 0
def handle(self, _record):
""" Receive a record and increase the count """
self.count += 1
flh = _FakeLogHandler()
import salt.log.setup as sls
sls.LOGGING_STORE_HANDLER.sync_with_handlers([flh])
sls.LOGGING_NULL_HANDLER.sync_with_handlers([flh])
# if flh.count > 0:
# log.info("pretended to handle %d logging record(s)
# for salt.log.setup.LOGGING_*_HANDLER", flh.count)
def filter_logs(opts_to_log, remove_dots=True):
"""
Filters out keys containing certain patterns to avoid sensitive information being sent to logs
Works on dictionaries and lists
This function was located at extmods/modules/conf_publisher.py previously
"""
filtered_conf = _remove_sensitive_info(opts_to_log, PATTERNS_TO_FILTER)
if remove_dots:
for key in filtered_conf.keys():
if '.' in key:
filtered_conf[key.replace('.', '_')] = filtered_conf.pop(key)
return filtered_conf
def _remove_sensitive_info(obj, patterns_to_filter):
"""
Filter known sensitive info
"""
if isinstance(obj, dict):
obj = {
key: _remove_sensitive_info(value, patterns_to_filter)
for key, value in obj.items()
if not any(patt in key for patt in patterns_to_filter)}
elif isinstance(obj, list):
obj = [_remove_sensitive_info(item, patterns_to_filter)
for item in obj]
return obj
| hubblestack/log.py | 6,628 | Fake record that mimicks a logging record
Filter known sensitive info
Remove temporary handler if it exists
Emit a single message to splunk
Filters out keys containing certain patterns to avoid sensitive information being sent to logs
Works on dictionaries and lists
This function was located at extmods/modules/conf_publisher.py previously
Receive a record and increase the count
Sets up logging to STDERR, allowing for configurable level, format, and
date format.
Sets up logging to a file. By default will auto-rotate those logs every
100MB and keep one backup.
Sets up logging to splunk.
Build a fake log handler and add it to LOGGING_STORE_HANDLER and LOGGING_NULL_HANDLER
Logging for the hubble daemon
-*- coding: utf-8 -*- These patterns will not be logged by "conf_publisher" and "emit_to_splunk" While hubble doesn't use these, salt modules can, so let's define them anyway Set up an early log handler for use while we're generating config. Will be removed when we set up the console or file logger. if flh.count > 0: log.info("pretended to handle %d logging record(s) for salt.log.setup.LOGGING_*_HANDLER", flh.count) | 1,139 | en | 0.846832 |
import os
class ConfigParams:
def __init__(self,configPath):
self.env_dist = os.environ
#权限验证
self.api_key = ""
# userID = ""
# ip = "0.0.0.0"
#模型相关存放根目录
self.modelPath = os.path.join(os.getcwd(),"model")
cpuCores = 0
threads = 2
port = 33388
batchSize = 10
#每个算法使用的GPU数量
self.GPUDevices = 1
topK = 80
featureSize = 512
zmqthreads = 2
self.CPU = 0
self.zmqAddr = "tcp://{}:5560".format(self.env_dist["ZMQ_ADDR"]) if "ZMQ_ADDR" in self.env_dist else "tcp://127.0.0.1:5570"
print(str(self.zmqAddr))
self.helmet_ids = self.parseAI("HELMET") if "HELMET" in self.env_dist else []
self.pose_ids = self.parseAI("POSE") if "POSE" in self.env_dist else []
self.track_coal_ids = self.parseAI("TRACK_COAL") if "TRACK_COAL" in self.env_dist else []
self.smoke_phone_ids = self.parseAI("SMOKEPHONE") if "SMOKEPHONE" in self.env_dist else []
# self.helmet_ids = [1,1,1]
# self.pose_ids = []
# self.track_coal_ids = []
# self.smoke_phone_ids = []
def loadConfig(self,configPath):
pass
def generateDefaultConfig(self,configPath):
pass
def initEasylogging(self,logConfig):
pass
def printParams(self):
print("run configParams function printParams")
pass
def parseAI(self,key):
ai_ids = []
for i in self.env_dist[key].split(','):
ai_ids.append(int(i))
return ai_ids
| common/configParams.py | 1,622 | 权限验证 userID = "" ip = "0.0.0.0"模型相关存放根目录每个算法使用的GPU数量 self.helmet_ids = [1,1,1] self.pose_ids = [] self.track_coal_ids = [] self.smoke_phone_ids = [] | 148 | en | 0.271399 |
import sys, gzip, logging
from .in_util import TimeReport, detectFileChrom, extendFileList, dumpReader
#========================================
# Schema for AStorage
#========================================
_TRASCRIPT_PROPERTIES = [
{"name": "Ensembl_geneid", "tp": "str", "opt": "repeat"},
{"name": "Ensembl_transcriptid", "tp": "str", "opt": "repeat"},
{"name": "Ensembl_proteinid", "tp": "str", "opt": "repeat"},
{"name": "refcodon", "tp": "str", "opt": "repeat"},
{"name": "codonpos", "tp": "str", "opt": "repeat"},
{"name": "FATHMM_score", "tp": "num"},
{"name": "FATHMM_pred", "tp": "str", "opt": "dict"},
{"name": "GENCODE_basic", "tp": "str"},
{"name": "HGVSc_ANNOVAR", "tp": "str"},
{"name": "HGVSp_ANNOVAR", "tp": "str"},
{"name": "HGVSc_snpEff", "tp": "str"},
{"name": "HGVSp_snpEff", "tp": "str"},
{"name": "MPC_score", "tp": "num"},
{"name": "MutationTaster_score", "tp": "num"},
{"name": "MutationAssessor_pred", "tp": "str", "opt": "dict"},
{"name": "Polyphen2_HDIV_score", "tp": "num"},
{"name": "Polyphen2_HDIV_pred", "tp": "str", "opt": "dict"},
{"name": "Polyphen2_HVAR_score", "tp": "num"},
{"name": "Polyphen2_HVAR_pred", "tp": "str", "opt": "dict"},
{"name": "SIFT_score", "tp": "num"},
{"name": "SIFT_pred", "tp": "str", "opt": "dict"},
{"name": "SIFT4G_score", "tp": "num"},
{"name": "SIFT4G_pred", "tp": "str", "opt": "dict"},
{"name": "Uniprot_acc", "tp": "str"}
]
#===============================================
_FACETS_PROPERTIES = [
{"name": "MetaLR_score", "tp": "num"},
{"name": "MetaLR_rankscore", "tp": "num"},
{"name": "MetaLR_pred", "opt": "dict", "tp": "str"},
{"name": "MutPred_score", "tp": "str"},
{"name": "MutPred_rankscore", "tp": "num"},
{"name": "MutPred_protID", "tp": "str"},
{"name": "MutPred_AAchange", "tp": "str"},
{"name": "MutPred_Top5features", "tp": "str"},
{"name": "MPC_rankscore", "tp": "num"},
{"name": "PrimateAI_score", "tp": "num"},
{"name": "PrimateAI_rankscore", "tp": "num"},
{"name": "REVEL_score", "tp": "num"},
{"name": "SIFT4G_converted_rankscore", "tp": "num"},
{
"name": "transcripts", "tp": "list",
"item": {
"tp": "dict", "items": _TRASCRIPT_PROPERTIES
}
}
]
#===============================================
_VARIANT_PROPERTIES = [
{"name": "ALT", "tp": "str", "opt": "gene"},
{"name": "REF", "tp": "str", "opt": "gene"},
{"name": "CADD_raw", "tp": "num"},
{"name": "CADD_phred", "tp": "num"},
{"name": "DANN_score", "tp": "num"},
{"name": "DANN_rankscore", "tp": "num"},
{"name": "Eigen_raw_coding", "tp": "num"},
{"name": "Eigen_raw_coding_rankscore", "tp": "num"},
{"name": "Eigen_phred_coding", "tp": "num"},
{"name": "Eigen_PC_raw_coding", "tp": "num"},
{"name": "Eigen_PC_raw_coding_rankscore", "tp": "num"},
{"name": "Eigen_PC_phred_coding", "tp": "num"},
{"name": "GTEx_V7_gene", "tp": "str", "opt": "repeat"},
{"name": "GTEx_V7_tissue", "tp": "str"},
{"name": "MutationTaster_score", "tp": "str"},
{"name": "MutationTaster_pred", "tp": "str"},
{"name": "PrimateAI_pred", "tp": "str", "opt": "dict"},
{"name": "Geuvadis_eQTL_target_gene", "tp": "str"},
{
"name": "facets",
"tp": "list",
"item": {
"tp": "dict",
"items": _FACETS_PROPERTIES
}
}
]
#===============================================
SCHEMA_DBNSFP_4 = {
"name": "DBNSFP",
"key": "hg38",
"io": {
"block-type": "page-cluster",
"max-var-count": 50
},
"filter-list": {"ref": "REF", "alt": "ALT"},
"top": {
"tp": "list",
"item": {
"tp": "dict",
"items": _VARIANT_PROPERTIES
}
}
}
#========================================
# Ingest logic
#========================================
VARIANT_TAB = [
["REF", str],
["ALT", str],
["MutationTaster_score", str],
["MutationTaster_pred", str],
["PrimateAI_pred", str],
["CADD_raw", float],
["CADD_phred", float],
["DANN_score", float],
["DANN_rankscore", float],
["Eigen_raw_coding", float],
["Eigen_raw_coding_rankscore", float],
["Eigen_phred_coding", float],
["Eigen_PC_raw_coding", float],
["Eigen_PC_raw_coding_rankscore", float],
["Eigen_PC_phred_coding", float],
["GTEx_V7_gene", str],
["GTEx_V7_tissue", str],
["Geuvadis_eQTL_target_gene", str]
]
#========================================
FACET_TAB = [
["refcodon", str],
["codonpos", str],
["SIFT4G_converted_rankscore", float],
["MetaLR_score", float],
["MetaLR_rankscore", float],
["MetaLR_pred", str],
["REVEL_score", float],
["MutPred_score", str],
["MutPred_rankscore", float],
["MutPred_protID", str],
["MutPred_AAchange", str],
["MutPred_Top5features", str],
["MPC_rankscore", float],
["PrimateAI_score", float],
["PrimateAI_rankscore", float]
]
#========================================
TRANSCRIPT_TAB = [
["Ensembl_geneid", str],
["Ensembl_transcriptid", str],
["Ensembl_proteinid", str],
["Uniprot_acc", str],
["HGVSc_ANNOVAR", str],
["HGVSp_ANNOVAR", str],
["HGVSc_snpEff", str],
["HGVSp_snpEff", str],
["GENCODE_basic", str],
["SIFT_score", float],
["SIFT_pred", str],
["SIFT4G_score", float],
["SIFT4G_pred", str],
["Polyphen2_HDIV_score", float],
["Polyphen2_HDIV_pred", str],
["Polyphen2_HVAR_score", float],
["Polyphen2_HVAR_pred", str],
["MutationAssessor_score", float],
["MutationAssessor_pred", str],
["FATHMM_score", float],
["FATHMM_pred", str],
["MPC_score", float]
]
ALL_TABS = [VARIANT_TAB, FACET_TAB, TRANSCRIPT_TAB]
#========================================
FLD_NAME_MAP = {
"ref": "REF",
"alt": "ALT",
"Eigen_pred_coding": "Eigen_phred_coding"
}
def _normFieldName(name):
global FLD_NAME_MAP
name = name.replace('-', '_')
return FLD_NAME_MAP.get(name, name)
#========================================
def setupFields(field_line):
global ALL_TABS, FLD_NAME_MAP
assert field_line.startswith('#')
field_names = field_line[1:].split()
assert field_names[0].startswith("chr")
assert field_names[1].startswith("pos")
fields_idxs = {_normFieldName(name): idx
for idx, name in enumerate(field_names)}
errors = 0
for tab in ALL_TABS:
for field_info in tab:
idx = fields_idxs.get(field_info[0])
if idx is None:
errors += 1
logging.error("No field registered: %s" % field_info[0])
else:
if len(field_info) == 2:
field_info.append(idx)
else:
field_info[2] = idx
if errors > 0:
logging.info("Available fields:\n=====\n"
+ "\n".join(sorted(fields_idxs.keys())))
assert errors == 0
#========================================
def iterFields(fields, properties_tab):
for name, tp, idx in properties_tab:
val = fields[idx]
if val == '.':
yield name, None
else:
yield name, tp(val)
def iterDeepFields(fields, properties_tab):
for name, tp, idx in properties_tab:
val_seq = []
for val in fields[idx].split(';'):
if val == '.':
val_seq.append(None)
else:
val_seq.append(tp(val))
yield name, val_seq
#========================================
class DataCollector:
def __init__(self):
self.mCounts = [0, 0, 0]
self.mCurRecord = None
def getCounts(self):
return self.mCounts
def ingestLine(self, line):
global VARIANT_TAB, FACET_TAB, TRANSCRIPT_TAB
if line.endswith('\n'):
line = line[:-1]
fields = line.split('\t')
chrom = "chr" + str(fields[0])
pos = int(fields[1])
new_record = False
if self.mCurRecord is None or (chrom, pos) != self.mCurRecord[0]:
new_record = True
new_variant = new_record
var_data = dict()
for name, val in iterFields(fields, VARIANT_TAB):
var_data[name] = val
if not new_variant and val != self.mCurRecord[1][-1][name]:
new_variant = True
facet_data = {name: val
for name, val in iterFields(fields, FACET_TAB)}
tr_data_seq = None
for name, val_seq in iterDeepFields(fields, TRANSCRIPT_TAB):
if tr_data_seq is None:
tr_data_seq = [{name: val} for val in val_seq]
else:
for idx, val in enumerate(val_seq):
tr_data_seq[idx][name] = val
if tr_data_seq is None:
tr_data_seq = []
facet_data["transcripts"] = tr_data_seq
self.mCounts[2] += len(tr_data_seq)
self.mCounts[1] += 1
ret = None
if new_record:
self.mCounts[0] += 1
var_data["facets"] = [facet_data]
ret, self.mCurRecord = self.mCurRecord, [(chrom, pos), [var_data]]
elif new_variant:
self.mCounts[0] += 1
var_data["facets"] = [facet_data]
self.mCurRecord[1].append(var_data)
else:
self.mCurRecord[1][-1]["facets"].append(facet_data)
return ret
def finishUp(self):
return self.mCurRecord
#========================================
#========================================
class ReaderDBNSFP4:
def __init__(self, file_list, chrom_loc = "chr"):
self.mFiles = extendFileList(file_list)
self.mChromLoc = chrom_loc
def read(self):
exceptions = 0
for chrom_file in self.mFiles:
chrom = detectFileChrom(chrom_file, self.mChromLoc)
logging.info("Evaluation of %s in %s" % (chrom, chrom_file))
with gzip.open(chrom_file, 'rt') as text_inp:
time_rep = TimeReport("chr" + chrom)
collector = DataCollector()
for line_no, line in enumerate(text_inp):
if line_no == 0:
setupFields(line)
continue
try:
info = collector.ingestLine(line)
if info is not None:
yield info
if (line_no % 10000) == 0:
total_var, _, _ = collector.getCounts()
time_rep.portion(total_var)
except IndexError:
exceptions += 1
info = collector.finishUp()
if info:
yield info
total_var, total_facets, total_tr = collector.getCounts()
time_rep.done(total_var)
logging.info("transcripts: %d, facets: %d, exceptions: %d"
% (total_tr, total_facets, exceptions))
#========================================
def reader_dbNSFP4(properties, schema_h = None):
return ReaderDBNSFP4(
properties["file_list"],
properties.get("chrom_loc", "chr"))
#========================================
if __name__ == '__main__':
logging.root.setLevel(logging.INFO)
reader = reader_dbNSFP4({"file_list": sys.argv[1]})
dumpReader(reader)
| a_storage/ingest/in_dbnsfp4.py | 12,656 | ======================================== Schema for AStorage============================================================================================================================================================================================================================= Ingest logic======================================================================================================================================================================================================================================================================================================================================================================================================================================================== | 734 | en | 0.292612 |
# Source: https://gist.github.com/redknightlois/c4023d393eb8f92bb44b2ab582d7ec20
from torch.optim.optimizer import Optimizer
import torch
import math
class Ralamb(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-4):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(Ralamb, self).__init__(params, defaults)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ralamb does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, radam_step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
radam_step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
radam_step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = radam_step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
radam_step = p_data_fp32.clone()
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
radam_step.addcdiv_(-radam_step_size, exp_avg, denom)
else:
radam_step.add_(-radam_step_size, exp_avg)
radam_norm = radam_step.pow(2).sum().sqrt()
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
if 0 in (weight_norm, radam_norm):
trust_ratio = 1
else:
trust_ratio = weight_norm / radam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = radam_norm
state['trust_ratio'] = trust_ratio
if N_sma >= 5:
p_data_fp32.addcdiv_(-radam_step_size * trust_ratio, exp_avg, denom)
else:
p_data_fp32.add_(-radam_step_size * trust_ratio, exp_avg)
p.data.copy_(p_data_fp32)
return loss | pywick/optimizers/ralamb.py | 3,979 | Source: https://gist.github.com/redknightlois/c4023d393eb8f92bb44b2ab582d7ec20 Decay the first and second moment running average coefficient m_t v_t more conservative since it's an approximated value more conservative since it's an approximated value | 250 | en | 0.919329 |
import sys
import json
import hashlib
import gc
from operator import *
import shlex
from pyspark import StorageLevel
from pyspark.sql import SQLContext
from pyspark.sql.functions import *
from pyspark.sql.types import *
import numpy as np
from subjectivity_clues import clues
def expect(name, var, expected, op=eq):
if op(var, expected):
log('[checkpoint] {} = {}'.format(name, expected))
else:
log('[error] {} = {}'.format(name, expected))
raise Exception(name)
def log(message):
log_file = 'sample_subjectivity_tweets.log'
with open(log_file, 'a') as f:
f.write(message)
f.write('\n')
f.flush()
f.close()
print message
def to_json(name, jsons):
filename = '{}.json'.format(name)
with open(filename, 'w') as f:
for j in jsons:
f.write(j)
f.write('\n')
def to_csv(name, jsons):
filename = '{}.csv'.format(name)
with open(filename, 'w') as f:
for tweet in jsons:
t = json.loads(tweet)
body = t['body'].replace('\n', ' ').replace('\r', '').replace('"', '""')
f.write('"{}",{},{},"{}"\n'.format(t['id'], t['verb'], t['postedTime'], body))
def sample(rdd, size, seed):
items = rdd.collect()
rand = np.random.RandomState(seed)
sampled = rand.choice(items, size=size, replace=False)
expect('sampled', len(set(sampled)), size)
return sampled.tolist()
def sha(name, ext='json'):
BUF_SIZE = 65536
filename = '{}.{}'.format(name, ext)
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
def read_and_parse_clues():
DEFAULT_FILENAME = os.getcwd() + os.sep + 'subjectivity_clues' + os.sep + 'subjclueslen1-HLTEMNLP05.tff'
lines = None
with open(DEFAULT_FILENAME, 'r') as f:
lines = f.readlines()
clues = dict()
for l in lines:
clue = dict(token.split('=') for token in shlex.split(l))
word = clue['word1']
clues[word] = clue
return clues
def calculate_relevant(lexicons, sentence):
PRIORPOLARITY = {
'positive': 1,
'negative': -1,
'both': 0,
'neutral': 0
}
TYPE = {
'strongsubj': 2,
'weaksubj': 1
}
total_score = 0
for w in sentence.split(' '):
if w not in lexicons.keys():
continue
total_score += PRIORPOLARITY[lexicons[w]['priorpolarity']] * TYPE[lexicons[w]['type']]
return total_score
# Make sure Python uses UTF-8 as tweets contains emoticon and unicode
reload(sys)
sys.setdefaultencoding('utf-8')
# Use SQLContext for better support
sqlContext = SQLContext(sc)
# Define storage level
DISK_ONLY_2 = StorageLevel(True, False, False, False, 2)
MEMORY_AND_DISK = StorageLevel(True, True, False, False, 1)
# Read GNIP's JSON file
directory = "tweets"
datasets = sqlContext.read.json(directory)
log('# Completed reading JSON files')
# Check checksum count
file_count = datasets.where(datasets['verb'].isNull()).count()
expect('file_count', file_count, 21888)
# Check post count
all_posts = datasets.where(datasets['verb'] == 'post')
all_posts_count = all_posts.count()
expect('all_posts_count', all_posts_count, 1570398)
# Check share count
all_shares = datasets.where(datasets['verb'] == 'share')
all_shares_count = all_shares.count()
expect('all_shares_count', all_shares_count, 1112590)
# Check dataset count
info_dataset = datasets.select('info')
info_dataset.registerTempTable('info')
all_tweets_count = info_dataset.select('info.activity_count').groupBy().sum('activity_count').collect()[0][0]
expect('all_tweets_count', all_tweets_count, 2682988)
expect('all_tweets_count', all_tweets_count, all_posts_count + all_shares_count)
log('# Completed validating tweets count')
# Remove post authored by @ChipotleTweet and news agencies
chipotle_tweet = 'id:twitter.com:141341662'
users_to_remove = [chipotle_tweet, 'id:twitter.com:759251', 'id:twitter.com:91478624', 'id:twitter.com:28785486',
'id:twitter.com:1652541', 'id:twitter.com:51241574', 'id:twitter.com:807095',
'id:twitter.com:34713362', 'id:twitter.com:3090733766', 'id:twitter.com:1367531',
'id:twitter.com:14293310', 'id:twitter.com:3108351', 'id:twitter.com:14173315',
'id:twitter.com:292777349', 'id:twitter.com:428333', 'id:twitter.com:624413',
'id:twitter.com:20562637', 'id:twitter.com:13918492', 'id:twitter.com:16184358',
'id:twitter.com:625697849', 'id:twitter.com:2467791', 'id:twitter.com:9763482',
'id:twitter.com:14511951', 'id:twitter.com:6017542', 'id:twitter.com:26574283',
'id:twitter.com:115754870']
all_posts_wo_specific_users = all_posts.where(~ col('actor.id').isin(users_to_remove))
all_posts_w_specific_users = all_posts.where(col('actor.id').isin(users_to_remove)).count()
expect('all_posts_wo_specific_users', all_posts_wo_specific_users.count(), all_posts_count - all_posts_w_specific_users)
# Remove share retweet of tweet by @ChipotleTweet and news agencies
all_shares_wo_specific_users = all_shares.where(~ col('object.actor.id').isin(users_to_remove))
all_shares_w_specific_users = all_shares.where(col('object.actor.id').isin(users_to_remove)).count()
expect('all_shares_wo_specific_users', all_shares_wo_specific_users.count(), all_shares_count - all_shares_w_specific_users)
# Generate tweets pool with only English tweet
tweets_pool = all_posts_wo_specific_users.unionAll(all_shares_wo_specific_users).filter("twitter_lang = 'en'")
tweets_pool.persist(MEMORY_AND_DISK)
tweets_pool_count = tweets_pool.count()
# Adding all post to all share will be greater than tweet pool because of non-English tweet
expected_tweets_pool_count = all_posts_count - all_posts_w_specific_users + \
all_shares_count - all_shares_w_specific_users
expect('tweets_pool_count', tweets_pool_count, expected_tweets_pool_count, op=lt)
log('# Completed constructing tweets pool')
# Check language of tweets
languages = tweets_pool.select('twitter_lang').distinct()
languages_count = languages.count()
language_check = languages.first()['twitter_lang']
expect('languages_count', languages_count, 1)
expect('language_check', language_check, 'en')
log('# Completed validating language variety')
# Take top 80% of tweets by length
tweets_pool_str_lengths = tweets_pool.select(length('body').alias('length')).rdd.map(lambda x: x.length).collect()
lengths_np = np.array(tweets_pool_str_lengths)
p = np.percentile(lengths_np, 20)
final_tweets_pool = tweets_pool.filter(length('body') >= p)
final_tweets_pool.persist(MEMORY_AND_DISK)
tweets_pool.unpersist(blocking=True)
final_tweets_pool_count = final_tweets_pool.count()
percentage_kept = float(final_tweets_pool_count) / tweets_pool_count
expect('percentage_kept', percentage_kept, 0.8, op=gt)
log('# Completed sampling top 80% of tweets by body length')
# Sampling
final_tweets_ids = final_tweets_pool.select(final_tweets_pool['id']).rdd.sortBy(lambda x: x.id).map(lambda x: x.id)
# Development tweets
dev_seed = 10102016
number_of_dev_samples = 3000
dev_posts = sample(final_tweets_ids, number_of_dev_samples, dev_seed)
dev_posts_count = len(dev_posts)
expect('dev_posts_count', dev_posts_count, number_of_dev_samples)
log('# Completed sampling dev tweets')
dev_posts_file = "dev_posts"
dev_posts_jsons = final_tweets_pool[final_tweets_pool['id'].isin(dev_posts)].toJSON().collect()
to_json(dev_posts_file, dev_posts_jsons)
to_csv(dev_posts_file, dev_posts_jsons)
expect('dev_posts_file', sha(dev_posts_file), '74447296831c8e3061fc0ee739f549c5b08b85a3')
expect('dev_posts_file', sha(dev_posts_file, ext='csv'), '6acfd1f8d238bc5d25d97d2c9e6f6b177699389a')
log('Exporting dev post to {}'.format(dev_posts_file))
log('# Completed exporting dev tweets')
del dev_posts_jsons
gc.collect()
# Find distinct set of tweets (unique body text)
post_pool = final_tweets_pool.where(final_tweets_pool['verb'] == 'post')
post_pool.persist(MEMORY_AND_DISK)
post_pool_ids = post_pool.select(post_pool['id']).rdd.sortBy(lambda x: x.id).map(lambda x: x.id).collect()
expect('post_pool', post_pool.count(), 1124935)
share_pool = final_tweets_pool.where(final_tweets_pool['verb'] == 'share')
share_pool.persist(MEMORY_AND_DISK)
expect('share_pool', share_pool.count(), 846141)
broadcast_post_ids = sc.broadcast(set(post_pool_ids))
unique_share_ids = share_pool.select(share_pool['id'], share_pool['object.id'].alias('object_id')).rdd.filter(lambda row: row['object_id'] not in broadcast_post_ids.value).map(lambda row: row.id).collect()
expect('unique_share_pool', len(unique_share_ids), 193006)
log('# Completed finding unique share tweet')
# Constructing distinct tweet pool
broadcast_unique_share_ids = sc.broadcast(unique_share_ids)
distinct_tweets_pool = final_tweets_pool.\
select(final_tweets_pool['id'], final_tweets_pool['body']).\
rdd.\
filter(lambda row: row['id'] in broadcast_post_ids.value or row['id'] in broadcast_unique_share_ids.value)
distinct_tweets_pool.persist(MEMORY_AND_DISK)
distinct_tweets_count = distinct_tweets_pool.count()
expect('distinct_tweets_pool', distinct_tweets_count, 1124935 + 193006)
# Exclude development tweets
tweets_unsampled = distinct_tweets_pool.toDF().where(~ col('id').isin(dev_posts))
tweets_unsampled.persist(MEMORY_AND_DISK)
tweets_unsampled_count = tweets_unsampled.count()
# no. of dev intersect post pool: 1718, no. of share dev intersect unique share pool: 293
expect('tweets_unsampled', tweets_unsampled_count, 1124935 + 193006 - 1718 - 293)
log('# Completed constructing unsampled tweets')
# Calculate subjectivity
lexicons = read_and_parse_clues()
udfBodyToRelevant = udf(lambda body: calculate_relevant(lexicons, body), IntegerType())
tweets_lexicon = tweets_unsampled.withColumn('score', udfBodyToRelevant('body'))
tweets_lexicon.persist(MEMORY_AND_DISK)
log('# Completed constructing tweet lexicon')
# Take top and bottom
number_of_tweets_each = 1500
positive_tweets = tweets_lexicon.orderBy(desc('score')).take(number_of_tweets_each)
negative_tweets = tweets_lexicon.orderBy(asc('score')).take(number_of_tweets_each)
# Cut top and bottom via score for more deterministic sampling
min_positive_score = positive_tweets[-1]['score']
min_negative_score = negative_tweets[-1]['score']
expect('min_positive_score', min_positive_score, 7)
expect('min_negative_score', min_negative_score, -5)
positive_tweets = tweets_lexicon.filter('score > {}'.format(min_positive_score - 1)).orderBy(desc('score')).collect()
expect('positive_tweets', len(positive_tweets), 2012)
negative_tweets = tweets_lexicon.filter('score < {}'.format(min_negative_score + 1)).orderBy(asc('score')).collect()
expect('positive_tweets', len(negative_tweets), 1715)
positive_tweet_file = "positive_tweets"
positive_tweets_ids = map(lambda t: t['id'], positive_tweets)
positive_tweet_jsons = final_tweets_pool[final_tweets_pool['id'].isin(positive_tweets_ids)].toJSON().collect()
to_json(positive_tweet_file, positive_tweet_jsons)
to_csv(positive_tweet_file, positive_tweet_jsons)
log('Exporting positive tweets to {}'.format(positive_tweet_file))
log('# Completed exporting positive tweets')
expect('positive_tweet_file', sha(positive_tweet_file), 'cb2f8b691ccf3eae9846c67735f413a49befea28')
expect('positive_tweet_file', sha(positive_tweet_file, ext='csv'), 'd3d43ab4e03fdf106b9191f4e0161cfcde3f040e')
negative_tweet_file = "negative_tweets"
negative_tweet_ids = map(lambda t: t['id'], negative_tweets)
negative_tweet_jsons = final_tweets_pool[final_tweets_pool['id'].isin(negative_tweet_ids)].toJSON().collect()
to_json(negative_tweet_file, negative_tweet_jsons)
to_csv(negative_tweet_file, negative_tweet_jsons)
log('Exporting negative tweets to {}'.format(negative_tweet_file))
log('# Completed exporting negative tweets')
expect('negative_tweet_file', sha(negative_tweet_file), '086c43427078092e538a779b8b06a71341b8da48')
expect('negative_tweet_file', sha(negative_tweet_file, ext='csv'), 'd10a1a95156c28d844e9c4e668d766963c0636a4')
| step_2/scripts/sample_subjectivity_tweets.py | 12,231 | Make sure Python uses UTF-8 as tweets contains emoticon and unicode Use SQLContext for better support Define storage level Read GNIP's JSON file Check checksum count Check post count Check share count Check dataset count Remove post authored by @ChipotleTweet and news agencies Remove share retweet of tweet by @ChipotleTweet and news agencies Generate tweets pool with only English tweet Adding all post to all share will be greater than tweet pool because of non-English tweet Check language of tweets Take top 80% of tweets by length Sampling Development tweets Find distinct set of tweets (unique body text) Constructing distinct tweet pool Exclude development tweets no. of dev intersect post pool: 1718, no. of share dev intersect unique share pool: 293 Calculate subjectivity Take top and bottom Cut top and bottom via score for more deterministic sampling | 863 | en | 0.750845 |
import os
import sys
from setuptools import find_packages, setup
IS_RTD = os.environ.get("READTHEDOCS", None)
version = "0.4.0b14.dev0"
long_description = open(os.path.join(os.path.dirname(__file__), "README.rst")).read()
install_requires = [
"morepath==0.19",
"alembic",
"rulez>=0.1.4,<0.2.0",
"inverter>=0.1.0<0.2.0",
"more.cors",
"celery",
"redis",
"jsl",
"pyyaml>=4.2b1",
"more.jsonschema",
"sqlalchemy",
"sqlalchemy_utils",
"more.signals",
"DateTime",
"transitions",
"jsonpath_ng",
"python-dateutil",
"more.jwtauth",
"more.itsdangerous",
"sqlsoup",
"celery",
"gunicorn",
"itsdangerous",
"pyyaml",
"passlib",
"jsonschema",
"more.transaction",
"zope.sqlalchemy",
"python-dateutil",
"more.cors",
"sqlalchemy_jsonfield",
"sqlsoup",
"celery",
"gunicorn",
"itsdangerous",
"pyyaml",
"passlib",
"jsonschema",
"more.transaction",
"zope.sqlalchemy",
"more.basicauth",
"cryptography",
"elasticsearch>7.0.0,<8.0.0",
"pamela",
"click",
"cookiecutter",
"eventlet",
"wsgigzip",
"psycopg2",
"colander",
"deform",
"more.chameleon",
"more.static",
"RestrictedPython",
"beaker",
"zstandard",
"oauthlib[signedtoken]",
"requests-oauthlib",
]
if IS_RTD is None:
install_requires.append("python-ldap")
setup(
name="morpfw",
version=version,
description="Web framework based on morepath",
long_description=long_description,
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords="",
author="Izhar Firdaus",
author_email="izhar@kagesenshi.org",
url="http://github.com/morpframework/morpfw",
license="Apache-2.0",
packages=find_packages(exclude=["ez_setup", "examples", "tests"]),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require={
"test": [
"nose",
"webtest",
"pytest",
"pytest-html",
"pytest_postgresql",
"pytest_rabbitmq",
"pytest-annotate",
"pytest-cov",
"pika",
"mirakuru",
],
"docs": ["sphinxcontrib-httpdomain", "sphinx-click"],
},
entry_points={
"morepath": ["scan=morpfw"],
"console_scripts": [
"morpfw=morpfw.cli.main:main",
"mfw-runmodule=morpfw.cli:run_module",
"mfw-profilemodule=morpfw.cli:run_module_profile",
],
},
)
| setup.py | 2,620 | Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers | 71 | en | 0.40275 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class UpdateHistoryProperty(Model):
"""An update history of the ImmutabilityPolicy of a blob container.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar update: The ImmutabilityPolicy update type of a blob container,
possible values include: put, lock and extend. Possible values include:
'put', 'lock', 'extend'
:vartype update: str or
~azure.mgmt.storage.v2018_11_01.models.ImmutabilityPolicyUpdateType
:ivar immutability_period_since_creation_in_days: The immutability period
for the blobs in the container since the policy creation, in days.
:vartype immutability_period_since_creation_in_days: int
:ivar timestamp: Returns the date and time the ImmutabilityPolicy was
updated.
:vartype timestamp: datetime
:ivar object_identifier: Returns the Object ID of the user who updated the
ImmutabilityPolicy.
:vartype object_identifier: str
:ivar tenant_id: Returns the Tenant ID that issued the token for the user
who updated the ImmutabilityPolicy.
:vartype tenant_id: str
:ivar upn: Returns the User Principal Name of the user who updated the
ImmutabilityPolicy.
:vartype upn: str
"""
_validation = {
'update': {'readonly': True},
'immutability_period_since_creation_in_days': {'readonly': True},
'timestamp': {'readonly': True},
'object_identifier': {'readonly': True},
'tenant_id': {'readonly': True},
'upn': {'readonly': True},
}
_attribute_map = {
'update': {'key': 'update', 'type': 'str'},
'immutability_period_since_creation_in_days': {'key': 'immutabilityPeriodSinceCreationInDays', 'type': 'int'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'object_identifier': {'key': 'objectIdentifier', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'upn': {'key': 'upn', 'type': 'str'},
}
def __init__(self, **kwargs):
super(UpdateHistoryProperty, self).__init__(**kwargs)
self.update = None
self.immutability_period_since_creation_in_days = None
self.timestamp = None
self.object_identifier = None
self.tenant_id = None
self.upn = None
| sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_11_01/models/update_history_property.py | 2,805 | An update history of the ImmutabilityPolicy of a blob container.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar update: The ImmutabilityPolicy update type of a blob container,
possible values include: put, lock and extend. Possible values include:
'put', 'lock', 'extend'
:vartype update: str or
~azure.mgmt.storage.v2018_11_01.models.ImmutabilityPolicyUpdateType
:ivar immutability_period_since_creation_in_days: The immutability period
for the blobs in the container since the policy creation, in days.
:vartype immutability_period_since_creation_in_days: int
:ivar timestamp: Returns the date and time the ImmutabilityPolicy was
updated.
:vartype timestamp: datetime
:ivar object_identifier: Returns the Object ID of the user who updated the
ImmutabilityPolicy.
:vartype object_identifier: str
:ivar tenant_id: Returns the Tenant ID that issued the token for the user
who updated the ImmutabilityPolicy.
:vartype tenant_id: str
:ivar upn: Returns the User Principal Name of the user who updated the
ImmutabilityPolicy.
:vartype upn: str
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- | 1,551 | en | 0.650908 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/resources/paid_organic_search_term_view.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/resources/paid_organic_search_term_view.proto',
package='google.ads.googleads.v6.resources',
syntax='proto3',
serialized_options=b'\n%com.google.ads.googleads.v6.resourcesB\036PaidOrganicSearchTermViewProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V6.Resources\312\002!Google\\Ads\\GoogleAds\\V6\\Resources\352\002%Google::Ads::GoogleAds::V6::Resources',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nEgoogle/ads/googleads/v6/resources/paid_organic_search_term_view.proto\x12!google.ads.googleads.v6.resources\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto\"\xbd\x02\n\x19PaidOrganicSearchTermView\x12Q\n\rresource_name\x18\x01 \x01(\tB:\xe0\x41\x03\xfa\x41\x34\n2googleads.googleapis.com/PaidOrganicSearchTermView\x12\x1d\n\x0bsearch_term\x18\x03 \x01(\tB\x03\xe0\x41\x03H\x00\x88\x01\x01:\x9d\x01\xea\x41\x99\x01\n2googleads.googleapis.com/PaidOrganicSearchTermView\x12\x63\x63ustomers/{customer_id}/paidOrganicSearchTermViews/{campaign_id}~{ad_group_id}~{base64_search_term}B\x0e\n\x0c_search_termB\x8b\x02\n%com.google.ads.googleads.v6.resourcesB\x1ePaidOrganicSearchTermViewProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V6.Resources\xca\x02!Google\\Ads\\GoogleAds\\V6\\Resources\xea\x02%Google::Ads::GoogleAds::V6::Resourcesb\x06proto3'
,
dependencies=[google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_PAIDORGANICSEARCHTERMVIEW = _descriptor.Descriptor(
name='PaidOrganicSearchTermView',
full_name='google.ads.googleads.v6.resources.PaidOrganicSearchTermView',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v6.resources.PaidOrganicSearchTermView.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003\372A4\n2googleads.googleapis.com/PaidOrganicSearchTermView', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='search_term', full_name='google.ads.googleads.v6.resources.PaidOrganicSearchTermView.search_term', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\352A\231\001\n2googleads.googleapis.com/PaidOrganicSearchTermView\022ccustomers/{customer_id}/paidOrganicSearchTermViews/{campaign_id}~{ad_group_id}~{base64_search_term}',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_search_term', full_name='google.ads.googleads.v6.resources.PaidOrganicSearchTermView._search_term',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=199,
serialized_end=516,
)
_PAIDORGANICSEARCHTERMVIEW.oneofs_by_name['_search_term'].fields.append(
_PAIDORGANICSEARCHTERMVIEW.fields_by_name['search_term'])
_PAIDORGANICSEARCHTERMVIEW.fields_by_name['search_term'].containing_oneof = _PAIDORGANICSEARCHTERMVIEW.oneofs_by_name['_search_term']
DESCRIPTOR.message_types_by_name['PaidOrganicSearchTermView'] = _PAIDORGANICSEARCHTERMVIEW
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PaidOrganicSearchTermView = _reflection.GeneratedProtocolMessageType('PaidOrganicSearchTermView', (_message.Message,), {
'DESCRIPTOR' : _PAIDORGANICSEARCHTERMVIEW,
'__module__' : 'google.ads.googleads.v6.resources.paid_organic_search_term_view_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.resources.PaidOrganicSearchTermView)
})
_sym_db.RegisterMessage(PaidOrganicSearchTermView)
DESCRIPTOR._options = None
_PAIDORGANICSEARCHTERMVIEW.fields_by_name['resource_name']._options = None
_PAIDORGANICSEARCHTERMVIEW.fields_by_name['search_term']._options = None
_PAIDORGANICSEARCHTERMVIEW._options = None
# @@protoc_insertion_point(module_scope)
| google/ads/google_ads/v6/proto/resources/paid_organic_search_term_view_pb2.py | 5,490 | Generated protocol buffer code.
-*- coding: utf-8 -*- Generated by the protocol buffer compiler. DO NOT EDIT! source: google/ads/googleads/v6/resources/paid_organic_search_term_view.proto @@protoc_insertion_point(imports) @@protoc_insertion_point(class_scope:google.ads.googleads.v6.resources.PaidOrganicSearchTermView) @@protoc_insertion_point(module_scope) | 361 | en | 0.580453 |
from itertools import product
from json import dumps
import logging
import nox # noqa
from pathlib import Path # noqa
import sys
# add parent folder to python path so that we can import noxfile_utils.py
# note that you need to "pip install -r noxfile-requiterements.txt" for this file to work.
sys.path.append(str(Path(__file__).parent / "ci_tools"))
from nox_utils import PY27, PY37, PY36, PY35, PY38, PY39, power_session, rm_folder, rm_file, PowerSession # noqa
pkg_name = "genbadge"
gh_org = "smarie"
gh_repo = "python-genbadge"
ENVS = {
PY39: {"coverage": False, "pkg_specs": {"pip": ">19"}},
PY27: {"coverage": False, "pkg_specs": {"pip": ">10"}},
PY35: {"coverage": False, "pkg_specs": {"pip": ">10"}},
PY36: {"coverage": False, "pkg_specs": {"pip": ">19"}},
PY38: {"coverage": False, "pkg_specs": {"pip": ">19"}},
# IMPORTANT: this should be last so that the folder docs/reports is not deleted afterwards
PY37: {"coverage": True, "pkg_specs": {"pip": ">19"}}, # , "pytest-html": "1.9.0"
}
# set the default activated sessions, minimal for CI
nox.options.sessions = ["tests", "flake8"] # , "docs", "gh_pages"
nox.options.reuse_existing_virtualenvs = True # this can be done using -r
# if platform.system() == "Windows": >> always use this for better control
nox.options.default_venv_backend = "conda"
# os.environ["NO_COLOR"] = "True" # nox.options.nocolor = True does not work
# nox.options.verbose = True
nox_logger = logging.getLogger("nox")
# nox_logger.setLevel(logging.INFO) NO !!!! this prevents the "verbose" nox flag to work !
class Folders:
root = Path(__file__).parent
ci_tools = root / "ci_tools"
runlogs = root / Path(nox.options.envdir or ".nox") / "_runlogs"
runlogs.mkdir(parents=True, exist_ok=True)
dist = root / "dist"
site = root / "site"
site_reports = site / "reports"
reports_root = root / "docs" / "reports"
test_reports = reports_root / "junit"
test_xml = test_reports / "junit.xml"
test_html = test_reports / "report.html"
test_badge = test_reports / "junit-badge.svg"
coverage_reports = reports_root / "coverage"
coverage_xml = coverage_reports / "coverage.xml"
coverage_intermediate_file = root / ".coverage"
coverage_badge = coverage_reports / "coverage-badge.svg"
flake8_reports = reports_root / "flake8"
flake8_intermediate_file = root / "flake8stats.txt"
flake8_badge = flake8_reports / "flake8-badge.svg"
@power_session(envs=ENVS, logsdir=Folders.runlogs)
def tests(session: PowerSession, coverage, pkg_specs):
"""Run the test suite, including test reports generation and coverage reports. """
# As soon as this runs, we delete the target site and coverage files to avoid reporting wrong coverage/etc.
rm_folder(Folders.site)
rm_folder(Folders.reports_root)
# delete the .coverage files if any (they are not supposed to be any, but just in case)
rm_file(Folders.coverage_intermediate_file)
rm_file(Folders.root / "coverage.xml")
# CI-only dependencies
# Did we receive a flag through positional arguments ? (nox -s tests -- <flag>)
# install_ci_deps = False
# if len(session.posargs) == 1:
# assert session.posargs[0] == "keyrings.alt"
# install_ci_deps = True
# elif len(session.posargs) > 1:
# raise ValueError("Only a single positional argument is accepted, received: %r" % session.posargs)
# uncomment and edit if you wish to uninstall something without deleting the whole env
# session.run2("pip uninstall pytest-asyncio --yes")
# install all requirements
# session.install_reqs(phase="pip", phase_reqs=("pip",), versions_dct=pkg_specs)
session.install_reqs(setup=True, install=True, tests=True, extras=("all",), versions_dct=pkg_specs)
# install CI-only dependencies
# if install_ci_deps:
# session.install2("keyrings.alt")
# list all (conda list alone does not work correctly on github actions)
# session.run2("conda list")
conda_prefix = Path(session.bin)
if conda_prefix.name == "bin":
conda_prefix = conda_prefix.parent
session.run2("conda list", env={"CONDA_PREFIX": str(conda_prefix), "CONDA_DEFAULT_ENV": session.get_session_id()})
# Fail if the assumed python version is not the actual one
session.run2("python ci_tools/check_python_version.py %s" % session.python)
# install self so that it is recognized by pytest
session.run2("pip install -e . --no-deps")
# check that it can be imported even from a different folder
session.run2(['python', '-c', '"import os; os.chdir(\'./docs/\'); import %s"' % pkg_name])
# finally run all tests
if not coverage:
# simple: pytest only
session.run2("python -m pytest --cache-clear -v %s/tests/" % pkg_name)
else:
# coverage + junit html reports + badge generation
session.install_reqs(phase="coverage", phase_reqs=["coverage", "pytest-html", "requests"],
versions_dct=pkg_specs)
# --coverage + junit html reports
session.run2("coverage run --source {pkg_name} "
"-m pytest --cache-clear --junitxml={test_xml} --html={test_html} -v {pkg_name}/tests/"
"".format(pkg_name=pkg_name, test_xml=Folders.test_xml, test_html=Folders.test_html))
session.run2("coverage report")
session.run2("coverage xml -o {covxml}".format(covxml=Folders.coverage_xml))
session.run2("coverage html -d {dst}".format(dst=Folders.coverage_reports))
# delete this intermediate file, it is not needed anymore
rm_file(Folders.coverage_intermediate_file)
# --generates the badge for the test results and fail build if less than x% tests pass
nox_logger.info("Generating badge for tests coverage")
# Use our own package to generate the badge
session.run2("genbadge tests -i %s -o %s -t 100" % (Folders.test_xml, Folders.test_badge))
session.run2("genbadge coverage -i %s -o %s" % (Folders.coverage_xml, Folders.coverage_badge))
@power_session(python=PY38, logsdir=Folders.runlogs)
def flake8(session: PowerSession):
"""Launch flake8 qualimetry."""
session.install("-r", str(Folders.ci_tools / "flake8-requirements.txt"))
session.run2("pip install -e .[flake8]")
rm_folder(Folders.flake8_reports)
rm_file(Folders.flake8_intermediate_file)
# Options are set in `setup.cfg` file
session.run("flake8", pkg_name, "--exit-zero", "--format=html", "--htmldir", str(Folders.flake8_reports),
"--statistics", "--tee", "--output-file", str(Folders.flake8_intermediate_file))
# generate our badge
session.run2("genbadge flake8 -i %s -o %s" % (Folders.flake8_intermediate_file, Folders.flake8_badge))
rm_file(Folders.flake8_intermediate_file)
@power_session(python=[PY37])
def docs(session: PowerSession):
"""Generates the doc and serves it on a local http server. Pass '-- build' to build statically instead."""
session.install_reqs(phase="docs", phase_reqs=["mkdocs-material", "mkdocs", "pymdown-extensions", "pygments"])
if session.posargs:
# use posargs instead of "serve"
session.run2("mkdocs -f ./docs/mkdocs.yml %s" % " ".join(session.posargs))
else:
session.run2("mkdocs serve -f ./docs/mkdocs.yml")
@power_session(python=[PY37])
def publish(session: PowerSession):
"""Deploy the docs+reports on github pages. Note: this rebuilds the docs"""
session.install_reqs(phase="mkdocs", phase_reqs=["mkdocs-material", "mkdocs", "pymdown-extensions", "pygments"])
# possibly rebuild the docs in a static way (mkdocs serve does not build locally)
session.run2("mkdocs build -f ./docs/mkdocs.yml")
# check that the doc has been generated with coverage
if not Folders.site_reports.exists():
raise ValueError("Test reports have not been built yet. Please run 'nox -s tests-3.7' first")
# publish the docs
session.run2("mkdocs gh-deploy -f ./docs/mkdocs.yml")
# publish the coverage - now in github actions only
# session.install_reqs(phase="codecov", phase_reqs=["codecov", "keyring"])
# # keyring set https://app.codecov.io/gh/<org>/<repo> token
# import keyring # (note that this import is not from the session env but the main nox env)
# codecov_token = keyring.get_password("https://app.codecov.io/gh/<org>/<repo>>", "token")
# # note: do not use --root nor -f ! otherwise "There was an error processing coverage reports"
# session.run2('codecov -t %s -f %s' % (codecov_token, Folders.coverage_xml))
@power_session(python=[PY37])
def release(session: PowerSession):
"""Create a release on github corresponding to the latest tag"""
# Get current tag using setuptools_scm and make sure this is not a dirty/dev one
from setuptools_scm import get_version # (note that this import is not from the session env but the main nox env)
from setuptools_scm.version import guess_next_dev_version
version = []
def my_scheme(version_):
version.append(version_)
return guess_next_dev_version(version_)
current_tag = get_version(".", version_scheme=my_scheme)
# create the package
session.install_reqs(phase="setup.py#dist", phase_reqs=["setuptools_scm"])
rm_folder(Folders.dist)
session.run2("python setup.py sdist bdist_wheel")
if version[0].dirty or not version[0].exact:
raise ValueError("You need to execute this action on a clean tag version with no local changes.")
# Did we receive a token through positional arguments ? (nox -s release -- <token>)
if len(session.posargs) == 1:
# Run from within github actions - no need to publish on pypi
gh_token = session.posargs[0]
publish_on_pypi = False
elif len(session.posargs) == 0:
# Run from local commandline - assume we want to manually publish on PyPi
publish_on_pypi = True
# keyring set https://docs.github.com/en/rest token
import keyring # (note that this import is not from the session env but the main nox env)
gh_token = keyring.get_password("https://docs.github.com/en/rest", "token")
assert len(gh_token) > 0
else:
raise ValueError("Only a single positional arg is allowed for now")
# publish the package on PyPi
if publish_on_pypi:
# keyring set https://upload.pypi.org/legacy/ your-username
# keyring set https://test.pypi.org/legacy/ your-username
session.install_reqs(phase="PyPi", phase_reqs=["twine"])
session.run2("twine upload dist/* -u smarie") # -r testpypi
# create the github release
session.install_reqs(phase="release", phase_reqs=["click", "PyGithub"])
session.run2("python ci_tools/github_release.py -s {gh_token} "
"--repo-slug {gh_org}/{gh_repo} -cf ./docs/changelog.md "
"-d https://{gh_org}.github.io/{gh_repo}/changelog.html {tag}"
"".format(gh_token=gh_token, gh_org=gh_org, gh_repo=gh_repo, tag=current_tag))
@nox.session(python=False)
def gha_list(session):
"""(mandatory arg: <base_session_name>) Prints all sessions available for <base_session_name>, for GithubActions."""
# see https://stackoverflow.com/q/66747359/7262247
# get the desired base session to generate the list for
if len(session.posargs) != 1:
raise ValueError("This session has a mandatory argument: <base_session_name>")
session_func = globals()[session.posargs[0]]
# list all sessions for this base session
try:
session_func.parametrize
except AttributeError:
sessions_list = ["%s-%s" % (session_func.__name__, py) for py in session_func.python]
else:
sessions_list = ["%s-%s(%s)" % (session_func.__name__, py, param)
for py, param in product(session_func.python, session_func.parametrize)]
# print the list so that it can be caught by GHA.
# Note that json.dumps is optional since this is a list of string.
# However it is to remind us that GHA expects a well-formatted json list of strings.
print(dumps(sessions_list))
# if __name__ == '__main__':
# # allow this file to be executable for easy debugging in any IDE
# nox.run(globals())
| noxfile.py | 12,326 | Generates the doc and serves it on a local http server. Pass '-- build' to build statically instead.
Launch flake8 qualimetry.
(mandatory arg: <base_session_name>) Prints all sessions available for <base_session_name>, for GithubActions.
Deploy the docs+reports on github pages. Note: this rebuilds the docs
Create a release on github corresponding to the latest tag
Run the test suite, including test reports generation and coverage reports.
noqa noqa add parent folder to python path so that we can import noxfile_utils.py note that you need to "pip install -r noxfile-requiterements.txt" for this file to work. noqa IMPORTANT: this should be last so that the folder docs/reports is not deleted afterwards , "pytest-html": "1.9.0" set the default activated sessions, minimal for CI , "docs", "gh_pages" this can be done using -r if platform.system() == "Windows": >> always use this for better control os.environ["NO_COLOR"] = "True" nox.options.nocolor = True does not work nox.options.verbose = True nox_logger.setLevel(logging.INFO) NO !!!! this prevents the "verbose" nox flag to work ! As soon as this runs, we delete the target site and coverage files to avoid reporting wrong coverage/etc. delete the .coverage files if any (they are not supposed to be any, but just in case) CI-only dependencies Did we receive a flag through positional arguments ? (nox -s tests -- <flag>) install_ci_deps = False if len(session.posargs) == 1: assert session.posargs[0] == "keyrings.alt" install_ci_deps = True elif len(session.posargs) > 1: raise ValueError("Only a single positional argument is accepted, received: %r" % session.posargs) uncomment and edit if you wish to uninstall something without deleting the whole env session.run2("pip uninstall pytest-asyncio --yes") install all requirements session.install_reqs(phase="pip", phase_reqs=("pip",), versions_dct=pkg_specs) install CI-only dependencies if install_ci_deps: session.install2("keyrings.alt") list all (conda list alone does not work correctly on github actions) session.run2("conda list") Fail if the assumed python version is not the actual one install self so that it is recognized by pytest check that it can be imported even from a different folder finally run all tests simple: pytest only coverage + junit html reports + badge generation --coverage + junit html reports delete this intermediate file, it is not needed anymore --generates the badge for the test results and fail build if less than x% tests pass Use our own package to generate the badge Options are set in `setup.cfg` file generate our badge use posargs instead of "serve" possibly rebuild the docs in a static way (mkdocs serve does not build locally) check that the doc has been generated with coverage publish the docs publish the coverage - now in github actions only session.install_reqs(phase="codecov", phase_reqs=["codecov", "keyring"]) keyring set https://app.codecov.io/gh/<org>/<repo> token import keyring (note that this import is not from the session env but the main nox env) codecov_token = keyring.get_password("https://app.codecov.io/gh/<org>/<repo>>", "token") note: do not use --root nor -f ! otherwise "There was an error processing coverage reports" session.run2('codecov -t %s -f %s' % (codecov_token, Folders.coverage_xml)) Get current tag using setuptools_scm and make sure this is not a dirty/dev one (note that this import is not from the session env but the main nox env) create the package Did we receive a token through positional arguments ? (nox -s release -- <token>) Run from within github actions - no need to publish on pypi Run from local commandline - assume we want to manually publish on PyPi keyring set https://docs.github.com/en/rest token (note that this import is not from the session env but the main nox env) publish the package on PyPi keyring set https://upload.pypi.org/legacy/ your-username keyring set https://test.pypi.org/legacy/ your-username -r testpypi create the github release see https://stackoverflow.com/q/66747359/7262247 get the desired base session to generate the list for list all sessions for this base session print the list so that it can be caught by GHA. Note that json.dumps is optional since this is a list of string. However it is to remind us that GHA expects a well-formatted json list of strings. if __name__ == '__main__': allow this file to be executable for easy debugging in any IDE nox.run(globals()) | 4,454 | en | 0.764985 |
#!usr/bin/env python3
# -*- coding:utf-8 -*-
__author__ = 'yanqiong'
import random
import secrets
from bisect import bisect_right
from sgqlc.operation import Operation
from pandas.core.internals import BlockManager
from tqsdk.ins_schema import ins_schema, _add_all_frags
RD = random.Random(secrets.randbits(128)) # 初始化随机数引擎,使用随机数作为seed,防止用户同时拉起多个策略,产生同样的 seed
def _generate_uuid(prefix=''):
return f"{prefix + '_' if prefix else ''}{RD.getrandbits(128):032x}"
def _query_for_quote(symbol):
"""
返回请求某个合约的合约信息的 query_pack
调用次函数应该全部都是sdk的代码主动请求合约信息
用户请求合约信息一定是 PYSDK_api 开头的请求,因为用户请求的合约信息在回测时带有 timestamp 参数,是不应该调用此函数的
"""
symbol_list = symbol if isinstance(symbol, list) else [symbol]
op = Operation(ins_schema.rootQuery)
query = op.multi_symbol_info(instrument_id=symbol_list)
_add_all_frags(query)
return {
"aid": "ins_query",
"query_id": _generate_uuid(prefix='PYSDK_quote_'),
"query": op.__to_graphql__()
}
def _query_for_init():
"""
返回某些类型合约的 query
todo: 为了兼容旧版提供给用户的 api._data["quote"].items() 类似用法,应该限制交易所 ["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"]
"""
op = Operation(ins_schema.rootQuery)
query = op.multi_symbol_info(class_=["FUTURE", "INDEX", "OPTION", "COMBINE", "CONT"],
exchange_id=["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"])
_add_all_frags(query)
return op.__to_graphql__()
night_trading_table = {
"DCE.a": ["21:00:00", "23:00:00"],
"DCE.b": ["21:00:00", "23:00:00"],
"DCE.c": ["21:00:00", "23:00:00"],
"DCE.cs": ["21:00:00", "23:00:00"],
"DCE.m": ["21:00:00", "23:00:00"],
"DCE.y": ["21:00:00", "23:00:00"],
"DCE.p": ["21:00:00", "23:00:00"],
"DCE.l": ["21:00:00", "23:00:00"],
"DCE.v": ["21:00:00", "23:00:00"],
"DCE.pp": ["21:00:00", "23:00:00"],
"DCE.j": ["21:00:00", "23:00:00"],
"DCE.jm": ["21:00:00", "23:00:00"],
"DCE.i": ["21:00:00", "23:00:00"],
"DCE.eg": ["21:00:00", "23:00:00"],
"DCE.eb": ["21:00:00", "23:00:00"],
"DCE.rr": ["21:00:00", "23:00:00"],
"DCE.pg": ["21:00:00", "23:00:00"],
"CZCE.CF": ["21:00:00", "23:00:00"],
"CZCE.CY": ["21:00:00", "23:00:00"],
"CZCE.SA": ["21:00:00", "23:00:00"],
"CZCE.SR": ["21:00:00", "23:00:00"],
"CZCE.TA": ["21:00:00", "23:00:00"],
"CZCE.OI": ["21:00:00", "23:00:00"],
"CZCE.MA": ["21:00:00", "23:00:00"],
"CZCE.FG": ["21:00:00", "23:00:00"],
"CZCE.RM": ["21:00:00", "23:00:00"],
"CZCE.ZC": ["21:00:00", "23:00:00"],
"CZCE.TC": ["21:00:00", "23:00:00"],
"SHFE.rb": ["21:00:00", "23:00:00"],
"SHFE.hc": ["21:00:00", "23:00:00"],
"SHFE.fu": ["21:00:00", "23:00:00"],
"SHFE.bu": ["21:00:00", "23:00:00"],
"SHFE.ru": ["21:00:00", "23:00:00"],
"SHFE.sp": ["21:00:00", "23:00:00"],
"INE.nr": ["21:00:00", "23:00:00"],
"SHFE.cu": ["21:00:00", "25:00:00"],
"SHFE.al": ["21:00:00", "25:00:00"],
"SHFE.zn": ["21:00:00", "25:00:00"],
"SHFE.pb": ["21:00:00", "25:00:00"],
"SHFE.ni": ["21:00:00", "25:00:00"],
"SHFE.sn": ["21:00:00", "25:00:00"],
"SHFE.ss": ["21:00:00", "25:00:00"],
"SHFE.au": ["21:00:00", "26:30:00"],
"SHFE.ag": ["21:00:00", "26:30:00"],
"INE.sc": ["21:00:00", "26:30:00"],
}
def _quotes_add_night(quotes):
"""为 quotes 中应该有夜盘但是市价合约文件中没有夜盘的品种,添加夜盘时间"""
for symbol in quotes:
product_id = quotes[symbol].get("product_id")
if quotes[symbol].get("trading_time") and product_id:
key = f"{quotes[symbol].get('exchange_id')}.{product_id}"
if key in night_trading_table and (not quotes[symbol]["trading_time"].get("night")):
quotes[symbol]["trading_time"]["night"] = [night_trading_table[key]]
def _bisect_value(a, x, priority="right"):
"""
返回 bisect_right() 取得下标对应的值,当插入点距离前后元素距离相等,priority 表示优先返回右边的值还是左边的值
a: 必须是已经排序好(升序排列)的 list
bisect_right : Return the index where to insert item x in list a, assuming a is sorted.
"""
assert priority in ['left', 'right']
insert_index = bisect_right(a, x)
if 0 < insert_index < len(a):
left_dis = x - a[insert_index - 1]
right_dis = a[insert_index] - x
if left_dis == right_dis:
mid_index = insert_index - 1 if priority == "left" else insert_index
elif left_dis < right_dis:
mid_index = insert_index - 1
else:
mid_index = insert_index
else:
assert insert_index == 0 or insert_index == len(a)
mid_index = 0 if insert_index == 0 else (len(a) - 1)
return a[mid_index]
class BlockManagerUnconsolidated(BlockManager):
"""mock BlockManager for unconsolidated, 不会因为自动合并同类型的 blocks 而导致 k 线数据不更新"""
def __init__(self, *args, **kwargs):
BlockManager.__init__(self, *args, **kwargs)
self._is_consolidated = False
self._known_consolidated = False
def _consolidate_inplace(self): pass
| tqsdk/utils.py | 5,470 | mock BlockManager for unconsolidated, 不会因为自动合并同类型的 blocks 而导致 k 线数据不更新
返回 bisect_right() 取得下标对应的值,当插入点距离前后元素距离相等,priority 表示优先返回右边的值还是左边的值
a: 必须是已经排序好(升序排列)的 list
bisect_right : Return the index where to insert item x in list a, assuming a is sorted.
返回某些类型合约的 query
todo: 为了兼容旧版提供给用户的 api._data["quote"].items() 类似用法,应该限制交易所 ["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"]
返回请求某个合约的合约信息的 query_pack
调用次函数应该全部都是sdk的代码主动请求合约信息
用户请求合约信息一定是 PYSDK_api 开头的请求,因为用户请求的合约信息在回测时带有 timestamp 参数,是不应该调用此函数的
为 quotes 中应该有夜盘但是市价合约文件中没有夜盘的品种,添加夜盘时间
!usr/bin/env python3 -*- coding:utf-8 -*- 初始化随机数引擎,使用随机数作为seed,防止用户同时拉起多个策略,产生同样的 seed | 620 | zh | 0.842235 |
# -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from qiskit_aqua.algorithms.components.optimizers import Optimizer
from ._nloptimizer import minimize
import logging
try:
import nlopt
except ImportError:
raise ImportWarning('nlopt cannot be imported')
logger = logging.getLogger(__name__)
class ESCH(Optimizer):
"""ESCH (evolutionary algorithm)
NLopt global optimizer, derivative-free
http://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/#esch-evolutionary-algorithm
"""
ESCH_CONFIGURATION = {
'name': 'ESCH',
'description': 'GN_ESCH Optimizer',
'input_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'esch_schema',
'type': 'object',
'properties': {
'max_evals': {
'type': 'integer',
'default': 1000
}
},
'additionalProperties': False
},
'support_level': {
'gradient': Optimizer.SupportLevel.ignored,
'bounds': Optimizer.SupportLevel.supported,
'initial_point': Optimizer.SupportLevel.required
},
'options': ['max_evals'],
'optimizer': ['global']
}
def __init__(self, configuration=None):
super().__init__(configuration or self.ESCH_CONFIGURATION.copy())
def init_args(self):
pass
def optimize(self, num_vars, objective_function, gradient_function=None, variable_bounds=None, initial_point=None):
super().optimize(num_vars, objective_function, gradient_function, variable_bounds, initial_point)
return minimize(nlopt.GN_ESCH, objective_function, variable_bounds, initial_point, **self._options)
| qiskit_aqua/algorithms/components/optimizers/nlopts/esch.py | 2,367 | ESCH (evolutionary algorithm)
NLopt global optimizer, derivative-free
http://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/#esch-evolutionary-algorithm
-*- coding: utf-8 -*- Copyright 2018 IBM. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================= | 798 | en | 0.76759 |
# Generated by Django 3.2.11 on 2022-02-10 16:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("cabins", "0020_auto_20211111_1825"),
("cabins", "0021_booking_is_declined"),
]
operations = []
| backend/apps/cabins/migrations/0022_merge_20220210_1705.py | 268 | Generated by Django 3.2.11 on 2022-02-10 16:05 | 46 | en | 0.64043 |
#! /usr/bin/env python3
"""Unit tests for smartcard.readers.ReaderGroups
This test case can be executed individually, or with all other test cases
thru testsuite_framework.py.
__author__ = "http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import platform
import unittest
from smartcard.System import readergroups
from smartcard.scard import resourceManager
if 'winscard' == resourceManager and \
-1 == platform.platform().find('Windows-7'):
class testcase_readergroups(unittest.TestCase):
"""Test smartcard framework readersgroups."""
pinpadgroup = 'Pinpad$Readers'
biogroup = 'Biometric$Readers'
def testcase_readergroup_add(self):
"""tests groups=groups+[newgroups]"""
# take a snapshot of current groups
groupssnapshot = list(readergroups())
groups = readergroups()
# add pinpad group
groups = groups + [self.pinpadgroup]
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
# add pinpad a second time and biometric once
groups = groups + [self.biogroup, self.pinpadgroup]
self.assertEqual(
groups, groupssnapshot + [self.pinpadgroup, self.biogroup])
# clean-up
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
def testcase_readergroup_iadd(self):
"""test groups+=[newgroups]"""
# take a snapshot of current groups
groupssnapshot = list(readergroups())
groups = readergroups()
# add pinpad group
groups += [self.pinpadgroup]
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
# add pinpad a second time and biometric once
groups += [self.biogroup, self.pinpadgroup]
self.assertEqual(
groups, groupssnapshot + [self.pinpadgroup, self.biogroup])
# clean-up
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
def testcase_readergroup_radd(self):
"""test groups=[newgroups]+groups"""
# take a snapshot of current groups
groupssnapshot = list(readergroups())
groups = readergroups()
# add pinpad group
zgroups = [self.pinpadgroup] + groups
self.assertEqual(groups, groupssnapshot)
self.assertEqual(zgroups, groupssnapshot + [self.pinpadgroup])
self.assertTrue(isinstance(zgroups, type([])))
self.assertTrue(isinstance(groups, type(readergroups())))
# add pinpad a tiwce and biometric once
zgroups = \
[self.pinpadgroup, self.biogroup, self.pinpadgroup] + groups
self.assertEqual(groups, groupssnapshot)
self.assertEqual(
zgroups, groupssnapshot + [self.pinpadgroup, self.biogroup])
self.assertTrue(isinstance(zgroups, type([])))
self.assertTrue(isinstance(groups, type(readergroups())))
def testcase_readergroup_append(self):
"""test groups.append(newgroups)"""
# take a snapshot of current groups
groupssnapshot = list(readergroups())
groups = readergroups()
# add pinpad group
groups.append(self.pinpadgroup)
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
# add pinpad a second time
groups.append(self.pinpadgroup)
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
# add biometric once
groups.append(self.biogroup)
self.assertEqual(
groups, groupssnapshot + [self.pinpadgroup, self.biogroup])
# clean-up
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
def testcase_readergroup_insert(self):
"""test groups.insert(i,newgroups)"""
# take a snapshot of current groups
groupssnapshot = list(readergroups())
groups = readergroups()
# add pinpad group
groups.insert(0, self.pinpadgroup)
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
# add pinpad a second time
groups.insert(1, self.pinpadgroup)
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
# add biometric once
groups.insert(1, self.biogroup)
self.assertEqual(
groups, groupssnapshot + [self.pinpadgroup, self.biogroup])
# clean-up
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
def suite():
suite1 = unittest.makeSuite(testcase_readergroups)
return unittest.TestSuite((suite1))
if __name__ == '__main__':
unittest.main()
| cacreader/pyscard-2.0.2/smartcard/test/framework/testcase_readergroups.py | 5,702 | Test smartcard framework readersgroups.
tests groups=groups+[newgroups]
test groups.append(newgroups)
test groups+=[newgroups]
test groups.insert(i,newgroups)
test groups=[newgroups]+groups
Unit tests for smartcard.readers.ReaderGroups
This test case can be executed individually, or with all other test cases
thru testsuite_framework.py.
__author__ = "http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
! /usr/bin/env python3 take a snapshot of current groups add pinpad group add pinpad a second time and biometric once clean-up take a snapshot of current groups add pinpad group add pinpad a second time and biometric once clean-up take a snapshot of current groups add pinpad group add pinpad a tiwce and biometric once take a snapshot of current groups add pinpad group add pinpad a second time add biometric once clean-up take a snapshot of current groups add pinpad group add pinpad a second time add biometric once clean-up | 1,719 | en | 0.742088 |
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import logging as log
from io import IOBase
import networkx as nx
import numpy as np
from openvino.tools.mo.ops.elementwise import Mul
from openvino.tools.mo.ops.split import AttributedVariadicSplit
from openvino.tools.mo.front.common.partial_infer.utils import float_array, int64_array
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import add_outputs_identity
from openvino.tools.mo.front.kaldi.loader.utils import find_next_tag, read_placeholder, find_next_component, get_name_from_path, \
find_end_of_component, end_of_nnet_tag, read_binary_integer32_token, get_parameters, read_token_value, \
collect_until_token, collect_until_token_and_read, create_edge_attrs, get_args_for_specifier
from openvino.tools.mo.front.kaldi.utils import read_binary_vector
from openvino.tools.mo.graph.graph import Node, Graph
from openvino.tools.mo.ops.const import Const
from openvino.tools.mo.utils.error import Error
from openvino.tools.mo.utils.utils import refer_to_faq_msg
def load_parallel_component(file_descr, graph: Graph, prev_layer_id):
"""
Load ParallelComponent of the Kaldi model.
ParallelComponent contains parallel nested networks.
VariadicSplit is inserted before nested networks.
Outputs of nested networks concatenate with layer Concat.
:param file_descr: descriptor of the model file
:param graph: graph with the topology.
:param prev_layer_id: id of the input layers for parallel component layer
:return: id of the concat layer - last layer of the parallel component layers
"""
nnet_count = read_token_value(file_descr, b'<NestedNnetCount>')
log.debug('Model contains parallel component with {} nested networks'.format(nnet_count))
split_points = []
outputs = []
inputs = []
for i in range(nnet_count):
read_token_value(file_descr, b'<NestedNnet>')
collect_until_token(file_descr, b'<Nnet>')
g = Graph()
load_kalid_nnet1_model(g, file_descr, 'Nested_net_{}'.format(i))
# input to nnet1 models is of a rank 1 but we also insert batch_size to 0th axis
# 1st axis contains input_size of the nested subnetwork
# we split input from the main network to subnetworks
input_node = Node(g, 'Parameter')
split_points.append(input_node['shape'][1])
g.remove_node(input_node.id)
mapping = {node: graph.unique_id(node) for node in g.nodes(data=False) if node in graph}
g = nx.relabel_nodes(g, mapping)
for val in mapping.values():
g.node[val]['name'] = val
graph.add_nodes_from(g.nodes(data=True))
graph.add_edges_from(g.edges(data=True))
sorted_nodes = tuple(nx.topological_sort(g))
outputs.append(Node(graph, sorted_nodes[-1]))
inputs.append(Node(graph, sorted_nodes[0]))
split_id = graph.unique_id(prefix='NestedNets/VariadicSplit')
attrs = {'out_ports_count': nnet_count, 'size_splits': split_points, 'axis': 1, 'name': split_id}
variadic_split_node = AttributedVariadicSplit(graph, attrs).create_node()
prev_layer_node = Node(graph, prev_layer_id)
prev_layer_node.add_output_port(0)
graph.create_edge(prev_layer_node, variadic_split_node, 0, 0, create_edge_attrs(prev_layer_id, variadic_split_node.id, prev_layer_id))
concat_id = graph.unique_id(prefix='Concat')
graph.add_node(concat_id, parameters=None, op='concat', kind='op')
concat_node = Node(graph, concat_id)
# Connect each output of variadic_split_node to each subnetwork's inputs in ParallelComponent
# and each subnetwork's output to concat_node
for i, (input_node, output_node) in enumerate(zip(inputs, outputs)):
output_node.add_output_port(0)
concat_node.add_input_port(i)
graph.create_edge(output_node, concat_node, 0, i, create_edge_attrs(output_node.id, concat_id, output_node.id, i, 0))
graph.create_edge(variadic_split_node, input_node, i, 0, create_edge_attrs(variadic_split_node.id, input_node.id, variadic_split_node.id, 0, i))
return concat_id
def load_kaldi_model(graph, nnet_path):
"""
Structure of the file is the following:
magic-number(16896)<Nnet> <Next Layer Name> weights etc.
:param nnet_path:
:return:
"""
nnet_name = None
if isinstance(nnet_path, str):
file_desc = open(nnet_path, "rb")
nnet_name = get_name_from_path(nnet_path)
elif isinstance(nnet_path, IOBase):
file_desc = nnet_path
else:
raise Error('Unsupported type of Kaldi model')
tag = find_next_tag(file_desc)
# start new model / submodel
if tag == '<Nnet>':
load_function = load_kalid_nnet1_model
elif tag == '<TransitionModel>':
while tag != '<Nnet>' and tag != '<Nnet3>':
tag = find_next_tag(file_desc)
if tag == '<Nnet3>':
load_function = load_kaldi_nnet3_model
else:
load_function = load_kalid_nnet2_model
elif tag == '<Nnet3>':
load_function = load_kaldi_nnet3_model
else:
raise Error('Kaldi model should start with <Nnet> or <TransitionModel> tag. ',
refer_to_faq_msg(89))
read_placeholder(file_desc, 1)
return load_function(graph, file_desc, nnet_name)
def load_kalid_nnet1_model(graph, file_descr, name):
prev_layer_id = 'Parameter'
graph.add_node(prev_layer_id, name=prev_layer_id, kind='op', op='Parameter', parameters=None)
# find out output layer, it can be only one due to chain structure of nnet1 model
output_layer = None
while True:
component_type = find_next_component(file_descr)
if component_type == end_of_nnet_tag.lower()[1:-1]:
break
layer_o = read_binary_integer32_token(file_descr)
layer_i = read_binary_integer32_token(file_descr)
if component_type == 'parallelcomponent':
prev_layer_id = load_parallel_component(file_descr, graph, prev_layer_id)
find_end_of_component(file_descr, component_type)
continue
start_index = file_descr.tell()
end_tag, end_index = find_end_of_component(file_descr, component_type)
end_index -= len(end_tag)
layer_id = graph.unique_id(prefix=component_type)
graph.add_node(layer_id,
parameters=get_parameters(file_descr, start_index, end_index),
op=component_type,
kind='op',
layer_i=layer_i,
layer_o=layer_o)
if hasattr(graph, 'op_names_statistic'):
graph.op_names_statistic[component_type] += 1
prev_node = Node(graph, prev_layer_id)
if prev_node.op == 'Parameter':
prev_node['shape'] = int64_array([1, layer_i])
prev_node.add_output_port(0)
Node(graph, layer_id).add_input_port(0)
graph.create_edge(prev_node, Node(graph, layer_id), 0, 0, create_edge_attrs(prev_layer_id, layer_id, prev_layer_id))
prev_layer_id = layer_id
output_layer = layer_id
log.debug('{} (type is {}) was loaded'.format(prev_layer_id, component_type))
# Tensor names information corresponding to a node is stored on outgoing edges.
# As output nodes do not have outgoing edges, fake outputs are required. In the following code
# for each output Identity node is added, and tensor name for the output is kept
# on (output, fake output) edge. After Result nodes adding transformation fake outputs
# are deleted from graph.
assert output_layer is not None, "Output layer is not found in graph"
add_outputs_identity(graph, [output_layer], lambda g, output, fake_output: g.create_edge(
Node(g, output), Node(g, fake_output), 0, 0, create_edge_attrs(output, fake_output, output)))
def load_kalid_nnet2_model(graph, file_descr, nnet_name):
input_name = 'Input'
graph.add_node(input_name, name=input_name, kind='op', op='Parameter', parameters=None, shape=None)
prev_layer_id = input_name
all_components = load_components(file_descr, graph)
used_layers = set()
for layer_id in all_components:
prev_node = Node(graph, prev_layer_id)
if prev_node.op == 'Parameter':
parameters = Node(graph, layer_id).parameters
input_dim = read_token_value(parameters, b'<InputDim>')
prev_node['shape'] = int64_array([1, input_dim])
prev_node.add_output_port(0)
Node(graph, layer_id).add_input_port(0)
graph.create_edge(prev_node, Node(graph, layer_id), 0, 0, create_edge_attrs(prev_layer_id, layer_id, prev_layer_id))
used_layers.add(prev_layer_id)
prev_layer_id = layer_id
log.debug('{} and {} were connected'.format(prev_layer_id, layer_id))
# Tensor names information corresponding to a node is stored on outgoing edges.
# As output nodes do not have outgoing edges, fake outputs are required. In the following code
# for each output Identity node is added, and tensor name for the output is kept
# on (output, fake output) edge. After Result nodes adding transformation fake outputs
# are deleted from graph.
output_layers = graph.nodes - used_layers
add_outputs_identity(graph, output_layers, lambda g, output, fake_output: g.create_edge(
Node(g, output), Node(g, fake_output), 0, 0, create_edge_attrs(output, fake_output, output)))
def load_kaldi_nnet3_model(graph, file_descr, nnet_name):
file_descr.read(1)
component_layer_map = load_topology_map(file_descr, graph)
# add information for shape calculation for MemoryOffset
# shape calculation for MemoryOffset can't be done through shape of previous layer because
# it is separated in 2 parts to remove cycle from graph
for node in graph.get_op_nodes(**{'op': 'Parameter'}):
for o_n_name, params in node.get_outputs():
o_n = Node(graph, o_n_name)
if o_n['op'] == 'MemoryOffset':
# don't take batch from Parameter, it will be overwritten
# take only second dimension because we have only 2 dimensions
o_n['parameters']['element_size'] = int64_array([1, node.shape[1]])
load_components(file_descr, graph, component_layer_map)
load_priors(file_descr, graph)
def load_priors(file_descr, graph):
try:
collect_until_token(file_descr, b'<Priors>')
except Error:
# just ignore if priors were not found
return
if graph.graph['cmd_params'].counts is not None:
graph.graph['priors'] = read_binary_vector(file_descr)
else:
log.error("Model contains Prior values, if you want to embed them into the generated IR add option --counts=\"\" to command line",
extra={'is_warning': True})
def load_components(file_descr, graph, component_layer_map=None):
num_components = collect_until_token_and_read(file_descr, b'<NumComponents>')
log.debug('Network contains {} components'.format(num_components))
is_nnet3 = False if component_layer_map is None else True
if not is_nnet3:
collect_until_token(file_descr, b'<Components>')
all_components = list()
name = ""
for _ in range(num_components):
if is_nnet3:
name = collect_until_token_and_read(file_descr, b'<ComponentName>', np.string_)
component_type = find_next_component(file_descr)
if component_type == end_of_nnet_tag.lower()[1:-1]:
break
start_index = file_descr.tell()
end_tag, end_index = find_end_of_component(file_descr, component_type)
# read dim info where possible to simplify shape calculation for MemoryOffset
# shape calculation for MemoryOffset can't be done through shape of previous layer because
# it is separated in 2 parts to remove cycle from graph
file_descr.seek(start_index)
dim = 0
dim_words = {b'<Dim>', b'<InputDim>'}
for dim_word in dim_words:
try:
collect_until_token(file_descr, dim_word, size_search_zone=end_index - start_index)
cur_index = file_descr.tell()
if start_index < cur_index < end_index:
dim = read_binary_integer32_token(file_descr)
break
else:
file_descr.seek(start_index)
except Error:
file_descr.seek(start_index)
if is_nnet3:
if name in component_layer_map:
layer_id = component_layer_map[name][0]
for layer in component_layer_map[name]:
node = Node(graph, layer)
node['parameters'] = get_parameters(file_descr, start_index, end_index)
node['op'] = component_type
# Read dim info where possible to simplify shape calculation for MemoryOffset
for o_n_name, params in node.get_outputs():
o_n = Node(graph, o_n_name)
if o_n['op'] == 'MemoryOffset' and dim != 0:
o_n['parameters']['element_size'] = int64_array([1, dim])
else:
raise Error("Something wrong with layer {}".format(name))
else:
layer_id = graph.unique_id(prefix=component_type)
graph.add_node(layer_id,
parameters=get_parameters(file_descr, start_index, end_index),
op=component_type,
kind='op')
if hasattr(graph, 'op_names_statistic'):
graph.op_names_statistic[component_type] += 1
all_components.append(layer_id)
log.debug('{} (type is {}) was loaded'.format(layer_id, component_type))
return all_components
def load_topology_map(file_descr, graph):
not_finished = True
component_layer_map = {}
layer_node_map = {}
while not_finished:
not_finished = read_node(file_descr, graph, component_layer_map, layer_node_map)
return component_layer_map
def read_node(file_descr, graph, component_layer_map, layer_node_map):
s = file_descr.readline()
if s == b'\n':
return False
tokens = s.split(b' ')
if tokens[0] == b'input-node':
in_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
in_name = str(in_name).strip('b').replace('\'', "")
in_shape = mo_array([1, s[s.find(b'dim=') + len(b'dim='):].split(b' ')[0]], dtype=np.int)
if in_name not in layer_node_map:
graph.add_node(in_name, name=in_name, kind='op', op='Parameter', parameters=None, shape=in_shape)
layer_node_map[in_name] = in_name
else:
Node(graph, in_name)['op'] = 'Parameter'
Node(graph, in_name)['shape'] = in_shape
elif tokens[0] == b'component-node':
layer_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
layer_name = str(layer_name).strip('b').replace('\'', "")
component_name = s[s.find(b'component=') + len(b'component='):].split(b' ')[0]
if layer_name not in layer_node_map:
node_name = graph.unique_id(prefix=layer_name)
graph.add_node(node_name,
parameters=None,
op=None,
kind='op')
layer_node_map[layer_name] = node_name
else:
node_name = layer_node_map[layer_name]
if component_name in component_layer_map:
component_layer_map[component_name].append(node_name)
else:
component_layer_map[component_name] = [node_name]
# parse input
in_node_id = parse_input_for_node(s[s.find(b'input=') + 6:], graph, layer_node_map)
# don't create cyclic edges node to itself to avoid removing later
if in_node_id != node_name:
out_port = len(Node(graph, in_node_id).out_nodes())
in_port = len(Node(graph, node_name).in_nodes())
Node(graph, node_name).add_input_port(in_port)
Node(graph, in_node_id).add_output_port(out_port, skip_if_exist=True)
graph.add_edge(in_node_id, node_name, **create_edge_attrs(in_node_id, node_name, in_node_id, in_port, out_port))
elif tokens[0] == b'output-node':
layer_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
layer_name = str(layer_name).strip('b').replace('\'', "")
node_name = graph.unique_id(prefix=layer_name)
graph.add_node(node_name,
parameters=None,
op='Identity',
kind='op')
out_name = graph.unique_id(prefix=node_name + "_out")
graph.add_node(out_name,
parameters=None,
op='Result',
kind='op')
Node(graph, node_name).add_input_port(0)
Node(graph, node_name).add_output_port(0)
Node(graph, out_name).add_input_port(0)
graph.add_edge(node_name, out_name, **create_edge_attrs(node_name, out_name, node_name))
# parse input
in_node_id = parse_input_for_node(s[s.find(b'input=') + len(b'input='):], graph, layer_node_map)
out_port = len(Node(graph, in_node_id).out_nodes())
Node(graph, in_node_id).add_output_port(out_port)
graph.create_edge(Node(graph, in_node_id), Node(graph, node_name), out_port, 0, create_edge_attrs(in_node_id, node_name, in_node_id, 0, out_port))
objective_type = s[s.find(b'objective=') + 10:].split(b' ')[0].split(b'\n')[0]
if objective_type != b'linear':
raise Error("Unsupported objective-type for output {}".format(node_name))
elif tokens[0] == b'dim-range-node':
layer_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
layer_name = str(layer_name).strip('b').replace('\'', "")
offset = int(s[s.find(b'dim-offset=') + len(b'dim-offset='):].split(b' ')[0])
dim = int(s[s.find(b'dim=') + len(b'dim='):].split(b' ')[0])
if layer_name in layer_node_map:
node_name = layer_node_map[layer_name]
node = Node(graph, node_name)
node['parameters'] = {'offset': mo_array([offset]), 'dim': mo_array([dim]), 'axis': mo_array([1])}
node['op'] = 'Crop'
else:
node_name = graph.unique_id(prefix=layer_name)
graph.add_node(node_name,
parameters={'offset': mo_array([offset]), 'dim': mo_array([dim]), 'axis': mo_array([1])},
op='Crop',
kind='op')
layer_node_map[layer_name] = node_name
node = Node(graph, node_name)
in_node_id = parse_input_for_node(s[s.find(b'input-node=') + len(b'input-node='):], graph, layer_node_map)
out_port = len(Node(graph, in_node_id).out_nodes())
in_port = len(Node(graph, node_name).in_nodes())
node.add_input_port(in_port)
Node(graph, in_node_id).add_output_port(out_port)
graph.create_edge(Node(graph, in_node_id), node, out_port, in_port, create_edge_attrs(in_node_id, node_name, in_node_id, in_port, out_port))
# read dim info where possible to simplify shape calculation for MemoryOffset
# shape calculation for MemoryOffset can't be done through shape of previous layer because
# it is separated in 2 parts to remove cycle from graph
for o_n_name, params in node.get_outputs():
o_n = Node(graph, o_n_name)
if o_n['op'] == 'MemoryOffset':
o_n['parameters']['element_size'] = int64_array([1, dim])
else:
raise Error("Unsupported node specifier {}".format(tokens[0]))
return True
def parse_input_for_node(string, graph, component_layer_map):
return parse_specifier(string, graph, component_layer_map)
def parse_specifier(string, graph, layer_node_map):
pos = string.find(b'(')
if pos == -1:
# node name
input_name = str(string.split(b' ')[0]).strip('b').replace("\'", '').replace('\\n', '')
if input_name not in layer_node_map:
node_name = graph.unique_id(prefix=input_name)
graph.add_node(node_name, parameters=[], op="", kind='op')
layer_node_map[input_name] = node_name
else:
node_name = layer_node_map[input_name]
return node_name
spec = string[:pos]
args = get_args_for_specifier(string[pos:])
if spec == b'Append':
nodes = []
for i in range(len(args)):
nodes.append(parse_specifier(args[i], graph, layer_node_map))
layer_name = 'Append_'
for node in nodes:
layer_name = layer_name + node + "_"
if layer_name not in layer_node_map:
concat_name = graph.unique_id(prefix=layer_name)
graph.add_node(concat_name,
parameters=None,
op='concat',
kind='op')
layer_node_map[layer_name] = concat_name
i = 0
Node(graph, concat_name).add_sequence_of_ports('in', range(len(nodes)))
for node in nodes:
out_port = len(Node(graph, node).out_nodes())
Node(graph, node).add_output_port(out_port)
graph.create_edge(Node(graph, node), Node(graph, concat_name), out_port, i, create_edge_attrs(node, concat_name, node, i, out_port))
i = i + 1
else:
concat_name = layer_node_map[layer_name]
return concat_name
elif spec == b'Offset':
node = parse_specifier(args[0], graph, layer_node_map)
t = int(args[1])
if len(args) > 2:
raise Error("ModelOptimizer supports only 2 arguments for Offset")
layer_name = 'Offset_' + node + '_'
if t < 0:
layer_name = layer_name + '_' + str(-t)
else:
layer_name = layer_name + str(t)
if layer_name not in layer_node_map:
memory_name = graph.unique_id(prefix=layer_name)
layer_node_map[layer_name] = memory_name
memory_name_2 = memory_name + '_out'
graph.add_node(memory_name,
parameters=dict(t=t, pair_name=memory_name_2, has_default=False),
op='MemoryOffset',
kind='op')
out_port = len(Node(graph, node).out_nodes())
in_port = len(Node(graph, memory_name).in_nodes())
Node(graph, memory_name).add_input_port(in_port)
Node(graph, node).add_output_port(out_port, skip_if_exist=True)
graph.create_edge(Node(graph, node), Node(graph, memory_name), out_port, in_port, create_edge_attrs(node, memory_name, node, in_port, out_port))
else:
memory_name = layer_node_map[layer_name]
return memory_name
elif spec == b'Sum':
nodes = []
for i in range(len(args)):
nodes.append(parse_specifier(args[i], graph, layer_node_map))
layer_name = 'Sum_'
for node in nodes:
layer_name = layer_name + node + "_"
if layer_name not in layer_node_map:
sum_name = graph.unique_id(prefix=layer_name)
graph.add_node(sum_name, parameters=None, op='Add', kind='op')
layer_node_map[layer_name] = sum_name
else:
sum_name = layer_node_map[layer_name]
for i, node in enumerate(nodes):
out_port = len(Node(graph, node).out_nodes())
Node(graph, node).add_output_port(out_port, skip_if_exist=True)
Node(graph, sum_name).add_input_port(i)
graph.add_edge(node, sum_name, **create_edge_attrs(node, sum_name, node, i))
return sum_name
elif spec == b'IfDefined':
node_id = parse_specifier(args[0], graph, layer_node_map)
node = Node(graph, node_id)
if node.op == 'MemoryOffset':
node['parameters']['has_default'] = True
return node_id
elif spec == b'ReplaceIndex':
node = parse_specifier(args[0], graph, layer_node_map)
return node
elif spec == b'Scale':
node_name = parse_specifier(args[1], graph, layer_node_map)
scale_value = float(args[0])
layer_name = '{}/Mul/{}'.format(node_name, scale_value)
if layer_name not in layer_node_map:
scale_name = graph.unique_id(prefix=layer_name)
scale_node = Mul(graph, {'name': scale_name}).create_node()
layer_node_map[layer_name] = scale_name
scale_const_name = 'Const_{}'.format(scale_value)
const_node = Const(graph, {'name': scale_const_name, 'value': float_array([scale_value])}).create_node()
node = Node(graph, node_name)
graph.create_edge(const_node, scale_node, 0, 0, create_edge_attrs(const_node.id, scale_node.id, const_node.id))
out_port = len(node.out_nodes())
graph.create_edge(node, scale_node, out_port, 1, create_edge_attrs(node_name, scale_node.id, node_name, 1, out_port))
else:
scale_name = layer_node_map[layer_name]
return scale_name
| tools/mo/openvino/tools/mo/front/kaldi/loader/loader.py | 25,367 | Structure of the file is the following:
magic-number(16896)<Nnet> <Next Layer Name> weights etc.
:param nnet_path:
:return:
Load ParallelComponent of the Kaldi model.
ParallelComponent contains parallel nested networks.
VariadicSplit is inserted before nested networks.
Outputs of nested networks concatenate with layer Concat.
:param file_descr: descriptor of the model file
:param graph: graph with the topology.
:param prev_layer_id: id of the input layers for parallel component layer
:return: id of the concat layer - last layer of the parallel component layers
Copyright (C) 2018-2022 Intel Corporation SPDX-License-Identifier: Apache-2.0 input to nnet1 models is of a rank 1 but we also insert batch_size to 0th axis 1st axis contains input_size of the nested subnetwork we split input from the main network to subnetworks Connect each output of variadic_split_node to each subnetwork's inputs in ParallelComponent and each subnetwork's output to concat_node start new model / submodel find out output layer, it can be only one due to chain structure of nnet1 model Tensor names information corresponding to a node is stored on outgoing edges. As output nodes do not have outgoing edges, fake outputs are required. In the following code for each output Identity node is added, and tensor name for the output is kept on (output, fake output) edge. After Result nodes adding transformation fake outputs are deleted from graph. Tensor names information corresponding to a node is stored on outgoing edges. As output nodes do not have outgoing edges, fake outputs are required. In the following code for each output Identity node is added, and tensor name for the output is kept on (output, fake output) edge. After Result nodes adding transformation fake outputs are deleted from graph. add information for shape calculation for MemoryOffset shape calculation for MemoryOffset can't be done through shape of previous layer because it is separated in 2 parts to remove cycle from graph don't take batch from Parameter, it will be overwritten take only second dimension because we have only 2 dimensions just ignore if priors were not found read dim info where possible to simplify shape calculation for MemoryOffset shape calculation for MemoryOffset can't be done through shape of previous layer because it is separated in 2 parts to remove cycle from graph Read dim info where possible to simplify shape calculation for MemoryOffset parse input don't create cyclic edges node to itself to avoid removing later parse input read dim info where possible to simplify shape calculation for MemoryOffset shape calculation for MemoryOffset can't be done through shape of previous layer because it is separated in 2 parts to remove cycle from graph node name | 2,758 | en | 0.856036 |
#!/usr/bin/env python
"""
SHUTDOWN.PY
Shutdown Plugin
(C) 2015, rGunti
"""
import dot3k.lcd as lcd
import dot3k.backlight as backlight
import time, datetime, copy, math, psutil
import sys
import os
from dot3k.menu import Menu, MenuOption
class Shutdown(MenuOption):
def __init__(self):
self.last = self.millis()
MenuOption.__init__(self)
def redraw(self, menu):
lcd.clear()
lcd.set_cursor_position(3,1)
lcd.write("Bye (^_^)/")
for x in reversed(range(127)):
backlight.rgb(0, x * 2, 0)
lcd.clear()
os.system("halt")
sys.exit(0)
class Reboot(MenuOption):
def __init__(self):
self.last = self.millis()
MenuOption.__init__(self)
def redraw(self, menu):
lcd.clear()
lcd.set_cursor_position(3,1)
lcd.write("Bye (^_^)/")
for x in reversed(range(127)):
backlight.rgb(0, x * 2, 0)
lcd.clear()
os.system("reboot")
sys.exit(0)
class QuitScript(MenuOption):
def __init__(self):
self.last = self.millis()
MenuOption.__init__(self)
def redraw(self, menu):
lcd.clear()
lcd.set_cursor_position(3,1)
lcd.write("Bye (^_^)/")
for x in reversed(range(127)):
backlight.rgb(0, x * 2, 0)
lcd.clear()
sys.exit(0) | display/plugins/Shutdown.py | 1,171 | SHUTDOWN.PY
Shutdown Plugin
(C) 2015, rGunti
!/usr/bin/env python | 68 | en | 0.581802 |
from collections import Iterable
from itertools import combinations
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next
num_procs = 24
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type):
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
real_type = msat_get_rational_type(menv)
delta, x_delta = decl_consts(menv, delta_name, real_type)
transm_time, x_transm_time = decl_consts(menv, "tot_transm_time",
real_type)
curr2next = {delta: x_delta, transm_time: x_transm_time}
mgr = TokenManager("mgr", menv, enc, delta)
stations = [Station("st{}".format(i), menv, enc, mgr, delta)
for i in range(num_procs)]
for s, x_s in mgr.symb2next.items():
curr2next[s] = x_s
for comp in stations:
for s, x_s in comp.symb2next.items():
assert s not in curr2next.keys()
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
# init: tot_transm_time = 0
init = msat_make_equal(menv, transm_time, zero)
# invar: delta >= 0
init = msat_make_and(menv, init, msat_make_geq(menv, delta, zero))
trans = msat_make_geq(menv, x_delta, zero)
# only 1 station moves
for s0, s1 in combinations(stations, 2):
trans = msat_make_and(menv, trans,
msat_make_or(menv, s0.stutter, s1.stutter))
# sync stations and mgr
st_acquire = stations[0].acquire
for st in stations[1:]:
st_acquire = msat_make_or(menv, st_acquire, st.acquire)
trans = msat_make_and(menv, trans,
msat_make_iff(menv, mgr.acquire, st_acquire))
st_release = stations[0].release
for st in stations[1:]:
st_release = msat_make_or(menv, st_release, st.release)
trans = msat_make_and(menv, trans,
msat_make_iff(menv, mgr.release, st_release))
# (mgr.counting & mgr.idle') -> total_transm_time' = total_transm_time + mgr.c
lhs = msat_make_and(menv, mgr.counting, mgr.x_idle)
rhs = msat_make_equal(menv, x_transm_time,
msat_make_plus(menv, transm_time, mgr.c))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# !(mgr.counting & mgr.idle') -> total_transm_time' = total_transm_time
lhs = msat_make_not(menv, lhs)
rhs = msat_make_equal(menv, x_transm_time, transm_time)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
init = msat_make_and(menv, init, mgr.init)
trans = msat_make_and(menv, trans, mgr.trans)
for s in stations:
init = msat_make_and(menv, init, s.init)
trans = msat_make_and(menv, trans, s.trans)
# (G F (mgr.counting & mgr.idle')) -> G F total_transm_time < 10
lhs = enc.make_G(enc.make_F(msat_make_and(menv, mgr.counting,
enc.make_X(mgr.idle))))
rhs = msat_make_lt(menv, transm_time, msat_make_number(menv, "10"))
rhs = enc.make_G(enc.make_F(rhs))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
"""Synchronous component"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(self.menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(self._symb(c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(self.menv, b_vars[idx][0]),
msat_make_not(self.menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(self.menv, pred, it[0])
x_pred = msat_make_and(self.menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
class TokenManager(Module):
"""TokenManager module"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder, delta):
super().__init__(name, menv, enc)
real_type = msat_get_rational_type(menv)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt_symbs, evts, x_evts = self._enum("evt", 3)
c, x_c = self._symb("c", real_type)
timeout, x_timeout = self._symb("timeout", real_type)
self.timeout = timeout
self.x_timeout = x_timeout
self.c = c
self.idle = loc
self.counting = msat_make_not(menv, loc)
self.x_idle = x_loc
self.x_counting = msat_make_not(menv, x_loc)
self.acquire = evts[0]
self.release = evts[1]
self.stutter = evts[2]
self.symb2next = {loc: x_loc, c: x_c, timeout: x_timeout}
for s, x_s in evt_symbs:
assert s not in self.symb2next
self.symb2next[s] = x_s
zero = msat_make_number(menv, "0")
# bound evt
bound_evt = evts[0]
x_bound_evt = x_evts[0]
for evt, x_evt in zip(evts[1:], x_evts[1:]):
bound_evt = msat_make_or(menv, bound_evt, evt)
x_bound_evt = msat_make_or(menv, x_bound_evt, x_evt)
self.init = bound_evt
self.trans = x_bound_evt
# idle & c = 0 & timeout = 0
self.init = msat_make_and(
menv,
msat_make_and(menv, self.init, self.idle),
msat_make_and(menv,
msat_make_equal(menv, c, zero),
msat_make_equal(menv, timeout, zero)))
# invar: counting -> c <= timeout
rhs = msat_make_leq(menv, c, timeout)
self.init = msat_make_and(menv, self.init,
msat_make_impl(menv, self.counting, rhs))
rhs = msat_make_leq(menv, x_c, x_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, self.x_counting, rhs))
# (delta > 0 | stutter) -> c' = c + delta & l' = l & timeout' = timeout
lhs = msat_make_or(menv, self.stutter,
msat_make_gt(menv, delta, zero))
rhs = msat_make_and(
menv,
msat_make_and(menv, msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))),
msat_make_equal(menv, x_timeout, timeout))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, msat_make_equal(menv, delta, zero),
msat_make_or(menv, self.acquire, self.release))
# (idle) -> (acquire & counting' & c' = 0)
lhs = msat_make_and(menv, disc_t, self.idle)
rhs = msat_make_and(menv, self.acquire,
msat_make_and(menv, self.x_counting,
msat_make_equal(menv, x_c, zero)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (counting) -> (release & idle' & c' = 0 & timeout' = 0)
lhs = msat_make_and(menv, disc_t, self.counting)
rhs = msat_make_and(
menv,
msat_make_and(menv, self.x_idle, self.release),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_timeout, zero)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Station(Module):
"""Station module"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder, mgr, delta):
super().__init__(name, menv, enc)
real_type = msat_get_rational_type(menv)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt_symbs, evts, x_evts = self._enum("evt", 3)
req_time, x_req_time = self._symb("req_time", real_type)
self.idle = loc
self.transm = msat_make_not(menv, loc)
self.x_idle = x_loc
self.x_transm = msat_make_not(menv, x_loc)
self.acquire = evts[0]
self.release = evts[1]
self.stutter = evts[2]
self.symb2next = {loc: x_loc, req_time: x_req_time}
for s, x_s in evt_symbs:
assert s not in self.symb2next
self.symb2next[s] = x_s
zero = msat_make_number(menv, "0")
# bound evt
bound_evt = evts[0]
x_bound_evt = x_evts[0]
for evt, x_evt in zip(evts[1:], x_evts[1:]):
bound_evt = msat_make_or(menv, bound_evt, evt)
x_bound_evt = msat_make_or(menv, x_bound_evt, x_evt)
self.init = bound_evt
self.trans = x_bound_evt
# idle
self.init = msat_make_and(menv, self.init, self.idle)
# invar: req_time > 0
self.init = msat_make_and(menv, self.init,
msat_make_gt(menv, req_time, zero))
self.trans = msat_make_and(menv, self.trans,
msat_make_gt(menv, x_req_time, zero))
# (delta > 0 | stutter) -> l' = l & req_time' = req_time
lhs = msat_make_or(menv, self.stutter,
msat_make_gt(menv, delta, zero))
rhs = msat_make_and(
menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_req_time, req_time))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, msat_make_equal(menv, delta, zero),
msat_make_or(menv, self.acquire, self.release))
# (idle) -> (acquire & transm' & mgr.timeout' = req_time & req_time' = req_time)
lhs = msat_make_and(menv, disc_t, self.idle)
rhs = msat_make_and(
menv,
msat_make_and(menv, self.acquire, self.x_transm),
msat_make_and(menv,
msat_make_equal(menv, mgr.x_timeout, req_time),
msat_make_equal(menv, x_req_time, req_time)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (transm) -> (release & mgr.c > 0 & idle')
lhs = msat_make_and(menv, disc_t, self.transm)
rhs = msat_make_and(
menv, self.release,
msat_make_and(menv, msat_make_gt(menv, mgr.c, zero), self.x_idle))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
| benchmarks/ltl_timed_transition_system/token_ring/f3/token_ring_0024.py | 13,536 | Synchronous component
Station module
TokenManager module
init: tot_transm_time = 0 invar: delta >= 0 only 1 station moves sync stations and mgr (mgr.counting & mgr.idle') -> total_transm_time' = total_transm_time + mgr.c !(mgr.counting & mgr.idle') -> total_transm_time' = total_transm_time (G F (mgr.counting & mgr.idle')) -> G F total_transm_time < 10 bound evt idle & c = 0 & timeout = 0 invar: counting -> c <= timeout (delta > 0 | stutter) -> c' = c + delta & l' = l & timeout' = timeout (idle) -> (acquire & counting' & c' = 0) (counting) -> (release & idle' & c' = 0 & timeout' = 0) bound evt idle invar: req_time > 0 (delta > 0 | stutter) -> l' = l & req_time' = req_time (idle) -> (acquire & transm' & mgr.timeout' = req_time & req_time' = req_time) (transm) -> (release & mgr.c > 0 & idle') | 802 | en | 0.524222 |
# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
From: https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
| src/utils/console_functions.py | 1,193 | Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "
", "
") (Str)
From: https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
Print iterations progress Print New Line on Complete | 716 | en | 0.357814 |
"""Julia set generator without optional PIL-based image drawing"""
import time
#from memory_profiler import profile
# area of complex space to investigate
x1, x2, y1, y2 = -1.8, 1.8, -1.8, 1.8
c_real, c_imag = -0.62772, -.42193
@profile
def calculate_z_serial_purepython(maxiter, zs, cs):
"""Calculate output list using Julia update rule"""
with profile.timestamp("create_output_list"):
output = [0] * len(zs)
time.sleep(1)
with profile.timestamp("create_range_of_zs"):
iterations = range(len(zs))
with profile.timestamp("calculate_output"):
for i in iterations:
n = 0
z = zs[i]
c = cs[i]
while n < maxiter and abs(z) < 2:
z = z * z + c
n += 1
output[i] = n
return output
@profile
def calc_pure_python(draw_output, desired_width, max_iterations):
"""Create a list of complex co-ordinates (zs) and complex parameters (cs), build Julia set and display"""
x_step = (float(x2 - x1) / float(desired_width))
y_step = (float(y1 - y2) / float(desired_width))
x = []
y = []
ycoord = y2
while ycoord > y1:
y.append(ycoord)
ycoord += y_step
xcoord = x1
while xcoord < x2:
x.append(xcoord)
xcoord += x_step
# set width and height to the generated pixel counts, rather than the
# pre-rounding desired width and height
# build a list of co-ordinates and the initial condition for each cell.
# Note that our initial condition is a constant and could easily be removed,
# we use it to simulate a real-world scenario with several inputs to our
# function
zs = []
cs = []
for ycoord in y:
for xcoord in x:
zs.append(complex(xcoord, ycoord))
cs.append(complex(c_real, c_imag))
print "Length of x:", len(x)
print "Total elements:", len(zs)
start_time = time.time()
output = calculate_z_serial_purepython(max_iterations, zs, cs)
end_time = time.time()
secs = end_time - start_time
print calculate_z_serial_purepython.func_name + " took", secs, "seconds"
# this sum is expected for 1000^2 grid with 300 iterations
assert sum(output) == 33219980
# Calculate the Julia set using a pure Python solution with
# reasonable defaults for a laptop
# set draw_output to True to use PIL to draw an image
calc_pure_python(draw_output=False, desired_width=1000, max_iterations=300)
#calc_pure_python(draw_output=False, desired_width=10, max_iterations=300)
| codes/01_profiling/memory_profiler/julia1_memoryprofiler2.py | 2,580 | from memory_profiler import profile area of complex space to investigate set width and height to the generated pixel counts, rather than the pre-rounding desired width and height build a list of co-ordinates and the initial condition for each cell. Note that our initial condition is a constant and could easily be removed, we use it to simulate a real-world scenario with several inputs to our function this sum is expected for 1000^2 grid with 300 iterations Calculate the Julia set using a pure Python solution with reasonable defaults for a laptop set draw_output to True to use PIL to draw an imagecalc_pure_python(draw_output=False, desired_width=10, max_iterations=300) | 676 | en | 0.854869 |
import os
import csv
# File path
election_dataCSV = os.path.join('.', 'election_data.csv')
# The total number of votes cast
# A complete list of candidates who received votes
# The percentage of votes each candidate won
# The total number of votes each candidate won
# The winner of the election based on popular vote.
# Declaring my variables
total_votes = 0
khan_votes = 0
correy_votes = 0
li_votes = 0
otooley_votes = 0
# percent_votes = 0
# total_votes_candidate = 0
# winner = 0
# Open file as read
with open ('election_data.csv','r') as csvfile:
# Identifying CSV file with delimiter set
csvreader = csv.reader(csvfile, delimiter=',')
header = next(csvreader)
# firstRow = next(csvreader)
# total_votes += 1
# previous_row = int(firstRow[0])
# Add rows to list
for row in csvreader:
#Adding total number of votes cast
total_votes += 1
#Candidates that received votes
if row[2] == "Khan":
khan_votes += 1
elif row[2] == "Correy":
correy_votes += 1
elif row[2] == "Li":
li_votes += 1
elif row[2] == "O'Tooley":
otooley_votes +=1
# Create a list of the candidates
candidates_list = ["Khan", "Correy", "Li", "O'Tooley"]
votes = [khan_votes, correy_votes, li_votes, otooley_votes]
# Pair candidates and votes together
dict_candidates_and_votes = dict(zip(candidates_list,votes))
# Find the winner by using the max function
key = max(dict_candidates_and_votes, key = dict_candidates_and_votes.get)
# Calculating the percentage of votes per candidate
khan_percentage = (khan_votes/total_votes) *100
correy_percentage = (correy_votes/total_votes) *100
li_percentage = (li_votes/total_votes) *100
otooley_percentage = (otooley_votes/total_votes) *100
# Print conclusion
print(f"Election Results")
print(f"----------------------------")
print(f"Total Votes: {total_votes}")
print(f"----------------------------")
print(f"Khan: {khan_percentage:.3f}% ({khan_votes})")
print(f"Correy: {correy_percentage:.3f}% ({correy_votes})")
print(f"Li: {li_percentage:.3f}% ({li_votes})")
print(f"O'Tooley: {otooley_percentage:.3f}% ({otooley_votes})")
print(f"----------------------------")
print(f"Winner: {key}")
print(f"----------------------------")
# Export results into txt file
file = open('election_output.txt','w')
file.write("Election Results: Total Votes - 1048575, Khan - 63.094% (661583), Correy - 19.936% (209046), Li: - 13.958% (146360), O'Tooley - 3.012% (31586), Winner - Khan")
file.close
| PyPoll/main.py | 2,695 | File path The total number of votes cast A complete list of candidates who received votes The percentage of votes each candidate won The total number of votes each candidate won The winner of the election based on popular vote. Declaring my variables percent_votes = 0 total_votes_candidate = 0 winner = 0 Open file as read Identifying CSV file with delimiter set firstRow = next(csvreader) total_votes += 1 previous_row = int(firstRow[0]) Add rows to listAdding total number of votes castCandidates that received votes Create a list of the candidates Pair candidates and votes together Find the winner by using the max function Calculating the percentage of votes per candidate Print conclusion Export results into txt file | 725 | en | 0.938027 |
'''
A collection of functions to perform portfolio analysis.
Max Gosselin, 2019
'''
import numpy as np
import pandas as pd
from scipy import optimize
def portfolio_metrics(weights, avg_xs_returns, covariance_matrix):
''' Compute basic portfolio metrics: return, stdv, sharpe ratio '''
portfolio_return = np.sum(weights * avg_xs_returns)
portfolio_stdv = np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
portfolio_sharpe = portfolio_return / portfolio_stdv
tickers = covariance_matrix.columns
metrics = {
'return': portfolio_return,
'stdv': portfolio_stdv,
'sharpe': portfolio_sharpe,
'weights': weights
}
metrics.update(dict([(ticker, weight) for ticker, weight in zip(tickers, weights)]).items())
return metrics
def simulate_portfolios(iters, xs_stats, covariance_matrix):
''' What we want here is to randomly generate portfolios that will sit
inside the efficiency frontier for illustrative purposes '''
# Set up an empty array to store our generated portfolios
simulations = []
while iters > 1:
weights = np.random.random(len(xs_stats.columns))
weights /= np.sum(weights)
simulations.append(portfolio_metrics(weights, xs_stats.loc['Avg'], covariance_matrix))
iters -= 1
return simulations
def solve_minvar(xs_avg, covariance_matrix):
''' Solve for the weights of the minimum variance portfolio
Constraints:
sum of weights = 1,
weights bound by [0, 0.2],
Returns the weights and the jacobian used to generate the solution.
'''
def __minvar(weights, xs_avg, covariance_matrix):
''' Anonymous function to compute stdv '''
return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
p_size = len(xs_avg)
args = (xs_avg, covariance_matrix)
constraints = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1}]
bounds = [(0, 0.2)] * p_size
minimized_weights = optimize.minimize(__minvar, np.zeros(p_size), args=args,
method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter':1000})
return minimized_weights
def solve_maxsharpe(xs_avg, covariance_matrix):
''' Solve for the weights of the maximum Sharpe ratio portfolio
Constraints:
sum of weights = 1,
weights bound by [0, 0.2],
Returns the weights and the jacobian used to generate the solution.
'''
def __max_by_min_sharpe(weights, xs_avg, covariance_matrix):
''' Anonymous function to compute sharpe ratio, note that since scipy only minimizes we go negative. '''
pm = portfolio_metrics(weights, xs_avg, covariance_matrix)
return -pm['return'] / pm['stdv']
p_size = len(xs_avg)
args = (xs_avg, covariance_matrix)
constraints = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1}]
bounds = [(0, 0.2)] * p_size
minimized_weights = optimize.minimize(__max_by_min_sharpe, ((1/p_size) * np.ones(p_size)), args=args,
method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter':1000})
return minimized_weights
def solve_for_target_return(xs_avg, covariance_matrix, target):
''' Solve for the weights of the minimum variance portfolio which has
a specific targeted return.
Constraints:
sum of weights = 1,
weights bound by [0, 0.2],
portfolio return = target return,
Returns the weights and the jacobian used to generate the solution.
'''
def __minvar(weights, xs_avg, covariance_matrix):
''' Anonymous function to compute stdv '''
return np.sqrt(np.dot(weights.T, np.dot(weights, covariance_matrix)))
def __match_target(weights):
''' Anonymous function to check equality with the target return '''
return np.sum(weights * xs_avg)
p_size = len(xs_avg)
args = (xs_avg, covariance_matrix)
constraints = [
{'type': 'eq', 'fun': lambda x: np.sum(x) - 1},
{'type': 'eq', 'fun': lambda x: __match_target(x) - target},
]
bounds = [(0, 0.2)] * p_size
minimized_weights = optimize.minimize(__minvar, ((1/p_size) * np.ones(p_size)), args=args,
method='SLSQP', bounds=bounds, constraints=constraints, options={'maxiter':1000})
return minimized_weights
def generate_efficient_frontier(targets, xs_avg, covariance_matrix):
portfolios = []
for target in targets:
p_weights = solve_for_target_return(xs_avg, covariance_matrix, target)
portfolios.append(portfolio_metrics(p_weights['x'], xs_avg, covariance_matrix))
return portfolios
| portfolio_functions.py | 4,837 | Anonymous function to check equality with the target return
Anonymous function to compute sharpe ratio, note that since scipy only minimizes we go negative.
Anonymous function to compute stdv
Anonymous function to compute stdv
Compute basic portfolio metrics: return, stdv, sharpe ratio
What we want here is to randomly generate portfolios that will sit
inside the efficiency frontier for illustrative purposes
Solve for the weights of the minimum variance portfolio which has
a specific targeted return.
Constraints:
sum of weights = 1,
weights bound by [0, 0.2],
portfolio return = target return,
Returns the weights and the jacobian used to generate the solution.
Solve for the weights of the maximum Sharpe ratio portfolio
Constraints:
sum of weights = 1,
weights bound by [0, 0.2],
Returns the weights and the jacobian used to generate the solution.
Solve for the weights of the minimum variance portfolio
Constraints:
sum of weights = 1,
weights bound by [0, 0.2],
Returns the weights and the jacobian used to generate the solution.
A collection of functions to perform portfolio analysis.
Max Gosselin, 2019
Set up an empty array to store our generated portfolios | 1,217 | en | 0.854758 |
## @example pyfast_and_pyside2_custom_window.py
# This example demonstrates how to use FAST in an existing PySide2 application.
#
# @m_class{m-block m-warning} @par PySide2 Qt Version
# @parblock
# For this example you <b>must</b> use the same Qt version of PySide2 as used in FAST (5.14.0)
# Do this with: <b>pip install pyside2==5.14.0</b>
# @endparblock
#
# @image html images/examples/python/pyfast_and_pyside_custom_window.jpg width=350px;
from PySide2.QtWidgets import *
from PySide2.QtOpenGL import QGLWidget
from PySide2.QtCore import Slot
import PySide2.QtSvg # Must import this before fast due to conflicting symbols
from shiboken2 import wrapInstance
import fast
import threading
import sys
#fast.Reporter.setGlobalReportMethod(fast.Reporter.COUT)
# Create a simple window widget with pyside2
class Window(QWidget):
def __init__(self):
super(Window, self).__init__()
self.setWindowTitle('pyFAST + PySide2')
# Create button
self.button = QPushButton("Restart FAST pipeline")
# Create FAST view
self.view = fast.View()
self.installEventFilter(wrapInstance(int(self.view.asQGLWidget()), QGLWidget))
self.view.set2DMode()
# Create layout and add widgets
layout = QVBoxLayout()
layout.addWidget(wrapInstance(int(self.view.asQGLWidget()), QGLWidget))
layout.addWidget(self.button)
self.setLayout(layout)
# Connect button click event
self.button.clicked.connect(self.restartPipeline)
self.resize(512, 512)
@Slot()
def restartPipeline(self):
# Create FAST computation thread
# This is needed to run computations smoothly in the background
# The computation thread must live in the object to avoid being destroyed when this function is done.
self.computationThread = fast.ComputationThread.create()
self.computationThread.addView(self.view)
# Setup a FAST pipeline
streamer = fast.ImageFileStreamer \
.create(fast.Config.getTestDataPath() + '/US/Heart/ApicalFourChamber/US-2D_#.mhd')
renderer = fast.ImageRenderer.create() \
.connect(streamer)
self.view.removeAllRenderers()
self.view.addRenderer(renderer)
self.view.reinitialize()
self.computationThread.start()
if __name__ == '__main__':
# Create the Qt Application
app = QApplication(sys.argv)
# Create and show the window
window = Window()
window.show()
# Run the main Qt loop
sys.exit(app.exec_()) | source/FAST/Examples/Python/pyfast_and_pyside2_custom_window.py | 2,566 | @example pyfast_and_pyside2_custom_window.py This example demonstrates how to use FAST in an existing PySide2 application. @m_class{m-block m-warning} @par PySide2 Qt Version @parblock For this example you <b>must</b> use the same Qt version of PySide2 as used in FAST (5.14.0) Do this with: <b>pip install pyside2==5.14.0</b> @endparblock @image html images/examples/python/pyfast_and_pyside_custom_window.jpg width=350px; Must import this before fast due to conflicting symbolsfast.Reporter.setGlobalReportMethod(fast.Reporter.COUT) Create a simple window widget with pyside2 Create button Create FAST view Create layout and add widgets Connect button click event Create FAST computation thread This is needed to run computations smoothly in the background The computation thread must live in the object to avoid being destroyed when this function is done. Setup a FAST pipeline Create the Qt Application Create and show the window Run the main Qt loop | 962 | en | 0.588513 |
# Project Quex (http://quex.sourceforge.net); License: MIT;
# (C) 2005-2020 Frank-Rene Schaefer;
#_______________________________________________________________________________
from quex.input.setup import NotificationDB
from quex.input.regular_expression.pattern import Pattern_Prep
import quex.input.regular_expression.core as regular_expression
from quex.input.code.base import SourceRef, \
SourceRef_DEFAULT, \
SourceRefObject
from quex.engine.state_machine.core import DFA
import quex.engine.state_machine.construction.sequentialize as sequentialize
import quex.engine.state_machine.construction.repeat as repeat
import quex.engine.state_machine.algebra.difference as difference
import quex.engine.state_machine.algebra.intersection as intersection
import quex.engine.state_machine.algorithm.beautifier as beautifier
import quex.engine.state_machine.check.swallow as swallow
import quex.engine.state_machine.check.outrun as outrun
import quex.engine.state_machine.check.identity as identity
import quex.engine.state_machine.check.tail as tail
from quex.engine.misc.tools import typed
from quex.engine.misc.interval_handling import NumberSet
from quex.engine.counter import IndentationCount_Pre, \
cc_type_name_db, \
cc_type_db
from quex.engine.counter_builder import CountActionMap_Builder
import quex.engine.misc.error as error
import quex.engine.misc.error_check as error_check
from quex.engine.misc.file_in import check, \
check_or_die, \
skip_whitespace, \
read_identifier, \
read_integer
from quex.constants import E_CharacterCountType
from quex.blackboard import setup as Setup
def parse_CountActionMap(fh):
return _base_parse(fh, CountActionMapFromParser_Builder(fh))
def parse_IndentationSetup(fh):
return _base_parse(fh, IndentationSetup_Builder(fh))
def _base_parse(fh, builder, IndentationSetupF=False):
"""Parses pattern definitions of the form:
[ \t] => grid 4;
[:intersection([:alpha:], [\X064-\X066]):] => space 1;
In other words the right hand side *must* be a character set.
ADAPTS: result to contain parsing information.
"""
# NOTE: Catching of EOF happens in caller: parse_section(...)
#
while 1 + 1 == 2:
skip_whitespace(fh)
if check(fh, ">"):
break
# A regular expression state machine
pattern, identifier, sr = _parse_definition_head(fh, builder.identifier_list)
if pattern is None and not builder.keyword_else_f:
error.log("Keyword '\\else' cannot be used in indentation setup.", fh)
# '_parse_definition_head()' ensures that only identifiers mentioned in
# 'result' are accepted.
if builder.requires_count():
count = _read_value_specifier(fh, identifier, 1)
builder.specify(identifier, pattern, count, sr)
else:
builder.specify(identifier, pattern, sr)
if not check(fh, ";"):
error.log("Missing ';' after '%s' specification." % identifier, fh)
return builder.finalize()
class CharacterSetVsAction_BuilderBase:
def __init__(self, IdentifierList, KeywordElseAdmissibleF):
self.identifier_list = IdentifierList
self.keyword_else_f = KeywordElseAdmissibleF
class CountActionMapFromParser_Builder(CharacterSetVsAction_BuilderBase):
"""Line/column number count specification.
___________________________________________________________________________
The main result of the parsing the the Base's .count_command_map which is
an instance of CountActionMap_Builder.
____________________________________________________________________________
"""
@typed(sr=SourceRef)
def __init__(self, fh):
self.sr = SourceRef.from_FileHandle(fh)
self.__fh = fh
self._ca_map_builder = CountActionMap_Builder()
CharacterSetVsAction_BuilderBase.__init__(self,
("columns", "grid", "lines"),
KeywordElseAdmissibleF=True)
def finalize(self):
# Finalize / Produce 'LineColumnCount' object.
#
ca_map = self._ca_map_builder.finalize(
Setup.buffer_encoding.source_set.minimum(),
Setup.buffer_encoding.source_set.least_greater_bound(),
self.sr)
_check_grid_values_integer_multiples(ca_map)
check_defined(ca_map, self.sr, E_CharacterCountType.LINE)
return ca_map
def requires_count(self):
return True
@typed(sr=SourceRef, Identifier=(str,str))
def specify(self, Identifier, Pattern, Count, sr):
if Pattern is None:
self._ca_map_builder.define_else(cc_type_db[Identifier], Count, sr)
else:
trigger_set = _extract_trigger_set(sr, Identifier, Pattern)
self._ca_map_builder.add(trigger_set, cc_type_db[Identifier], Count, sr)
class IndentationSetup_Builder(CharacterSetVsAction_BuilderBase):
"""Indentation counter specification.
____________________________________________________________________________
The base's .count_command_map contains information about how to count the
space at the beginning of the line. The count until the first non-whitespace
is the 'indentation'.
+bad:
The spec contains information about what characters are not supposed to
appear in indentation (bad characters). Depending on the philosophical
basis, some might consider 'space' as evil, others consider 'tab' as evil.
+newline:
A detailed state machine can be defined for 'newline'. This might be
'\n|(\r\n)' or more complex things.
+suppressor:
A newline might be suppressed by '\' for example. For that, it might be
specified as 'newline suppressor'.
____________________________________________________________________________
"""
@typed(sr=SourceRef)
def __init__(self, fh):
self.__fh = fh
self.sm_whitespace = SourceRefObject("whitespace", None)
self.sm_badspace = SourceRefObject("bad", None)
self.sm_newline = SourceRefObject("newline", None)
self.sm_newline_suppressor = SourceRefObject("suppressor", None)
self.sm_suspend_list = []
if fh == -1: self.sr = SourceRef_DEFAULT
else: self.sr = SourceRef.from_FileHandle(self.__fh)
CharacterSetVsAction_BuilderBase.__init__(self,
("whitespace", "suspend", "newline", "suppressor", "bad"),
KeywordElseAdmissibleF=False)
def finalize(self):
# Finalize / Produce 'IndentationCount' object.
#
if self.sm_whitespace.get() is None:
self.sm_whitespace.set(self.__sm_whitespace_default(), SourceRef_DEFAULT)
if self.sm_newline.get() is None:
self.sm_newline.set(self.__sm_newline_default(), SourceRef_DEFAULT)
# -- consistency
self._consistency_check()
# Transform 'SourceRefObject' into 'Pattern_Prep' objects
# (TODO: Why not use it in the first place?)
def get_pattern(SRO):
if SRO is None or SRO.get() is None: return None
return Pattern_Prep(SRO.get(), PatternString="<indentation %s>" % SRO.name, Sr=SRO.sr)
pattern_suspend_list = [ get_pattern(sro) for sro in self.sm_suspend_list ]
pattern_suspend_list = [ x for x in pattern_suspend_list if x is not None ]
if self.sm_newline_suppressor.set_f():
sm_suppressed_newline = sequentialize.do([self.sm_newline_suppressor.get(),
self.sm_newline.get()])
sm_suppressed_newline = beautifier.do(sm_suppressed_newline)
pattern_suppressed_newline = Pattern_Prep(sm_suppressed_newline,
PatternString="<indentation suppressed newline>",
Sr=self.sm_newline_suppressor.sr)
else:
pattern_suppressed_newline = None
return IndentationCount_Pre(self.sr,
get_pattern(self.sm_whitespace),
get_pattern(self.sm_badspace),
get_pattern(self.sm_newline),
pattern_suppressed_newline,
pattern_suspend_list)
def requires_count(self):
return False
def specify(self, identifier, pattern, sr):
sm = pattern.extract_sm()
if identifier == "whitespace":
self.__specify(self.sm_whitespace, sm, sr)
elif identifier == "bad":
self.__specify(self.sm_badspace, sm, sr)
elif identifier == "newline":
self.__specify(self.sm_newline, sm, sr)
elif identifier == "suppressor":
self.__specify(self.sm_newline_suppressor, sm , sr)
elif identifier == "suspend":
self.__specify_suspend(sm, sr)
else:
return False
return True
@typed(sr=SourceRef)
def __specify(self, member_ref, Sm, sr):
assert Sm is not None
_error_if_defined_before(member_ref, sr)
if not Sm.is_DFA_compliant(): Sm = beautifier.do(Sm)
member_ref.set(Sm, sr)
@typed(sr=SourceRef)
def __specify_suspend(self, Sm, sr):
for before in self.sm_suspend_list:
if not identity.do(before.get(), Sm): continue
error.log("'suspend' has been defined before;", sr, DontExitF=True)
error.log("at this place.", before.sr)
sm_suspend = SourceRefObject("suspend", None)
self.__specify(sm_suspend, Sm, sr)
self.sm_suspend_list.append(sm_suspend)
def __sm_newline_default(self):
"""Default newline: '(\n)|(\r\n)'
"""
sm = DFA.from_character_set(NumberSet(ord('\n')))
if Setup.dos_carriage_return_newline_f:
sm.add_transition_sequence(sm.init_state_index, [ord('\r'), ord('\n')])
return sm
def __sm_whitespace_default(self):
"""Try to define default whitespace ' ' or '\t' if their positions
are not yet occupied in the count_command_map.
"""
sm_whitespace = DFA.from_character_set(NumberSet.from_integer_list([ord(' '), ord('\t')]))
sm_whitespace = beautifier.do(repeat.do(sm_whitespace, 1))
if self.sm_badspace.get() is not None:
sm_whitespace = difference.do(sm_whitespace, self.sm_badspace.get())
if sm_whitespace.is_Empty() \
or outrun.do(self.sm_badspace.get(), sm_whitespace):
error.log("Cannot define default 'whitespace' in the frame of the given\n"
"definition of 'bad'.", self.sm_badspace.sr)
return sm_whitespace
def _consistency_check(self):
"""
Required defintions:
-- WHITESPACE (Default done automatically) => Assert.
-- NEWLINE (Default done automatically) => Assert.
Inadmissible 'eat-into'.
-- SUPPRESSOR shall not eat into [NEWLINE]
-- NEWLINE shall not eat into [WHITESPACE, BADSPACE, SUSPEND, SUPPRESSOR]
-- WHITESPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND].
-- BADSPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND].
No common lexemes:
-- WHITESPACE and BADSPACE may not have common lexemes.
Outrun:
-- NEWLINE may not start with SUSPEND and vice versa
-- NEWLINE may not start with SUPPRESSOR and vice versa
-- SUPPRESSOR may not start with SUSPEND and vice versa
-- WHITESPACE shall not outrun BADSPACE, but the contrary is ok.
(BADSPACE may outrun WHITESPACE (e.g: lexeme with 'tab' after whitespace')
"""
# (1) Required definitions _____________________________________________
assert self.sm_whitespace.set_f()
assert self.sm_newline.set_f()
whitespace = self.sm_whitespace
newline = self.sm_newline
badspace = self.sm_badspace
suppressor = self.sm_newline_suppressor
suspend_list = self.sm_suspend_list
# (2) Inadmissible 'eat-into' __________________________________________
#
cmp_list = [
(newline, badspace), (newline, whitespace), (newline, suppressor),
(suppressor, newline),
(whitespace, newline), (whitespace, suppressor),
(badspace, newline), (badspace, suppressor),
] \
+ [ (whitespace, x) for x in suspend_list ] \
+ [ (newline, x) for x in suspend_list ] \
+ [ (badspace, x) for x in suspend_list ]
def _error(FormatStr, Sro0, Sro1):
error.log(FormatStr % (Sro0.name, Sro1.name), Sro0.sr, DontExitF=True)
error.log("'%s' defined here." % Sro1.name, Sro1.sr)
def _iterate(SroPairList):
for first_sro, second_sro in cmp_list:
first, second = first_sro.get(), second_sro.get()
if first is None or second is None: continue
yield first_sro, first, second_sro, second
for first_sro, first, second_sro, second in _iterate(cmp_list):
if swallow.ending_A_beginning_B(first, second):
_error("'%s' may eat into beginning of '%s'.", first_sro, second_sro)
elif swallow.inside_A_match_B(first, second):
_error("'%s' may swallow something matched by '%s'.", first_sro, second_sro)
for sm_suspend in self.sm_suspend_list:
only_common_f, \
common_f = tail.do(self.sm_newline.get(), sm_suspend.get())
error_check.tail(only_common_f, common_f,
"indentation handler's newline", self.sm_newline.sr,
"suspend", sm_suspend.sr)
# (3) Inadmissible common lexemes _____________________________________
#
if badspace.get() and not intersection.do([badspace.get(), whitespace.get()]).is_Empty():
_error("'%s' and '%s' match on common lexemes.", whitespace, badspace)
# (3) Inadmissible outruns ____________________________________________
#
cmp_list = [ (newline, suppressor), (suppressor, newline), (whitespace, badspace) ]
for x in suspend_list:
cmp_list.extend([
(newline, x), (x, newline),
(suppressor, x), (x, suppressor)
])
for first_sro, first, second_sro, second in _iterate(cmp_list):
if outrun.do(second, first):
_error("'%s' may outrun '%s'.", first_sro, second_sro)
def _parse_definition_head(fh, IdentifierList):
if check(fh, "\\default"):
error.log("'\\default' has been replaced by keyword '\\else' since quex 0.64.9!", fh)
elif check(fh, "\\else"):
pattern = None
else:
pattern = regular_expression.parse(fh, AllowPreContextF=False,
AllowPostContextF=False)
skip_whitespace(fh)
check_or_die(fh, "=>", " after character set definition.")
skip_whitespace(fh)
identifier = read_identifier(fh, OnMissingStr="Missing identifier following '=>'.")
error.verify_word_in_list(identifier, IdentifierList,
"Unrecognized specifier '%s'." % identifier, fh)
skip_whitespace(fh)
return pattern, identifier, SourceRef.from_FileHandle(fh)
def _read_value_specifier(fh, Keyword, Default=None):
skip_whitespace(fh)
value = read_integer(fh)
if value is not None: return value
# not a number received, is it an identifier?
variable = read_identifier(fh)
if variable: return variable
elif Default is not None: return Default
error.log("Missing integer or variable name after keyword '%s'." % Keyword, fh)
__CountActionMap_DEFAULT = None
def LineColumnCount_Default():
global __CountActionMap_DEFAULT
if __CountActionMap_DEFAULT is None:
builder = CountActionMap_Builder()
builder.add(NumberSet(ord('\n')), E_CharacterCountType.LINE, 1, SourceRef_DEFAULT)
builder.add(NumberSet(ord('\t')), E_CharacterCountType.GRID, 4, SourceRef_DEFAULT)
builder.define_else(E_CharacterCountType.COLUMN, 1, SourceRef_DEFAULT) # Define: "\else"
__CountActionMap_DEFAULT = builder.finalize(
Setup.buffer_encoding.source_set.minimum(),
Setup.buffer_encoding.source_set.least_greater_bound(), # Apply: "\else"
SourceRef_DEFAULT)
return __CountActionMap_DEFAULT
def _error_if_defined_before(Before, sr):
if not Before.set_f(): return
error.log("'%s' has been defined before;" % Before.name, sr,
DontExitF=True)
error.log("at this place.", Before.sr)
def _extract_trigger_set(sr, Keyword, Pattern):
if Pattern is None:
return None
elif isinstance(Pattern, NumberSet):
return Pattern
def check_can_be_matched_by_single_character(SM):
bad_f = False
init_state = SM.get_init_state()
if SM.get_init_state().is_acceptance():
bad_f = True
elif len(SM.states) != 2:
bad_f = True
# Init state MUST transit to second state. Second state MUST not have any transitions
elif len(init_state.target_map.get_target_state_index_list()) != 1:
bad_f = True
else:
tmp = set(SM.states.keys())
tmp.remove(SM.init_state_index)
other_state_index = next(iter(tmp))
if len(SM.states[other_state_index].target_map.get_target_state_index_list()) != 0:
bad_f = True
if bad_f:
error.log("For '%s' only patterns are addmissible which\n" % Keyword + \
"can be matched by a single character, e.g. \" \" or [a-z].", sr)
sm = Pattern.extract_sm()
check_can_be_matched_by_single_character(sm)
transition_map = sm.get_init_state().target_map.get_map()
assert len(transition_map) == 1
return list(transition_map.values())[0]
def _check_grid_values_integer_multiples(CaMap):
"""If there are no spaces and the grid is on a homogeneous scale,
=> then the grid can be transformed into 'easy-to-compute' spaces.
"""
grid_value_list = []
min_info = None
for character_set, info in CaMap:
if info.cc_type == E_CharacterCountType.COLUMN:
return
elif info.cc_type != E_CharacterCountType.GRID:
continue
elif type(info.value) in (str, str):
# If there is one single 'variable' grid value,
# then no assumptions can be made.
return
grid_value_list.append(info.value)
if min_info is None or info.value < min_info.value:
min_info = info
if min_info is None:
return
# Are all grid values a multiple of the minimum?
if all(x % min_info.value == 0 for x in grid_value_list):
error.warning("Setup does not contain spaces, only grids (tabulators). All grid\n" \
"widths are multiples of %i. The grid setup %s is equivalent to\n" \
% (min_info.value, repr(sorted(grid_value_list))[1:-1]) + \
"a setup with space counts %s. Space counts are faster to compute.\n" \
% repr([x / min_info.value for x in sorted(grid_value_list)])[1:-1],
min_info.sr)
return
def check_defined(CaMap, SourceReference, CCT):
"""Checks whether the character counter type has been defined in the
map.
THROWS: Error in case that is has not been defined.
"""
for character_set, info in CaMap:
if info.cc_type == CCT:
return
error.warning("Setup does not define '%s'." % cc_type_name_db[CCT], SourceReference,
SuppressCode=NotificationDB.warning_counter_setup_without_newline)
| quex/input/files/specifier/counter.py | 21,323 | Line/column number count specification.
___________________________________________________________________________
The main result of the parsing the the Base's .count_command_map which is
an instance of CountActionMap_Builder.
____________________________________________________________________________
Indentation counter specification.
____________________________________________________________________________
The base's .count_command_map contains information about how to count the
space at the beginning of the line. The count until the first non-whitespace
is the 'indentation'.
+bad:
The spec contains information about what characters are not supposed to
appear in indentation (bad characters). Depending on the philosophical
basis, some might consider 'space' as evil, others consider 'tab' as evil.
+newline:
A detailed state machine can be defined for 'newline'. This might be
'
|(
)' or more complex things.
+suppressor:
A newline might be suppressed by '' for example. For that, it might be
specified as 'newline suppressor'.
____________________________________________________________________________
Default newline: '(
)|(
)'
Try to define default whitespace ' ' or ' ' if their positions
are not yet occupied in the count_command_map.
Parses pattern definitions of the form:
[ ] => grid 4;
[:intersection([:alpha:], [\X064-\X066]):] => space 1;
In other words the right hand side *must* be a character set.
ADAPTS: result to contain parsing information.
If there are no spaces and the grid is on a homogeneous scale,
=> then the grid can be transformed into 'easy-to-compute' spaces.
Required defintions:
-- WHITESPACE (Default done automatically) => Assert.
-- NEWLINE (Default done automatically) => Assert.
Inadmissible 'eat-into'.
-- SUPPRESSOR shall not eat into [NEWLINE]
-- NEWLINE shall not eat into [WHITESPACE, BADSPACE, SUSPEND, SUPPRESSOR]
-- WHITESPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND].
-- BADSPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND].
No common lexemes:
-- WHITESPACE and BADSPACE may not have common lexemes.
Outrun:
-- NEWLINE may not start with SUSPEND and vice versa
-- NEWLINE may not start with SUPPRESSOR and vice versa
-- SUPPRESSOR may not start with SUSPEND and vice versa
-- WHITESPACE shall not outrun BADSPACE, but the contrary is ok.
(BADSPACE may outrun WHITESPACE (e.g: lexeme with 'tab' after whitespace')
Checks whether the character counter type has been defined in the
map.
THROWS: Error in case that is has not been defined.
Project Quex (http://quex.sourceforge.net); License: MIT; (C) 2005-2020 Frank-Rene Schaefer; _______________________________________________________________________________ NOTE: Catching of EOF happens in caller: parse_section(...) A regular expression state machine '_parse_definition_head()' ensures that only identifiers mentioned in 'result' are accepted. Finalize / Produce 'LineColumnCount' object. Finalize / Produce 'IndentationCount' object. -- consistency Transform 'SourceRefObject' into 'Pattern_Prep' objects (TODO: Why not use it in the first place?) (1) Required definitions _____________________________________________ (2) Inadmissible 'eat-into' __________________________________________ (3) Inadmissible common lexemes _____________________________________ (3) Inadmissible outruns ____________________________________________ not a number received, is it an identifier? Define: "\else" Apply: "\else" Init state MUST transit to second state. Second state MUST not have any transitions If there is one single 'variable' grid value, then no assumptions can be made. Are all grid values a multiple of the minimum? | 3,868 | en | 0.695558 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=2
# total number=20
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=17
c.append(cirq.Z.on(input_qubit[1])) # number=18
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=19
c.append(cirq.Y.on(input_qubit[1])) # number=2
c.append(cirq.Y.on(input_qubit[1])) # number=4
c.append(cirq.Y.on(input_qubit[1])) # number=3
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=14
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.X.on(input_qubit[0])) # number=8
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.X.on(input_qubit[0])) # number=11
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=12
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq347.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | data/p2DJ/New/program/cirq/startCirq347.py | 2,182 | !/usr/bin/env python -*- coding: utf-8 -*- @Time : 5/15/20 4:49 PM @File : grover.py qubit number=2 total number=20thatsNoCode Symbols for the rotation angles in the QAOA circuit. circuit begin number=1 number=17 number=18 number=19 number=2 number=4 number=3 number=13 number=14 number=15 number=8 number=9 number=10 number=11 number=12 circuit end | 355 | en | 0.318577 |
import requests
import urllib.parse
import posixpath
import pandas as pd
def get_enrollment_dates(course):
'''Takes a course object and returns student dates of enrollment.
Useful for handling late registrations and modified deadlines.
Example:
course.get_enrollment_date()'''
url_path = posixpath.join("api", "v1", "courses", course['course_id'], "enrollments")
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
resp = None
students = []
while resp is None or resp.links['current']['url'] != resp.links['last']['url']:
resp = requests.get(
url = api_url if resp is None else resp.links['next']['url'],
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"type": ["StudentEnrollment"],
"per_page":"100"
}
)
students.extend(resp.json())
enrollment_dates = {}
for st in students:
enrollment_dates[str(st['user_id'])] = str(st['created_at']).strip('Z').replace('T','-').replace(':','-')[:16]
return enrollment_dates
def get_assignments(course):
'''Takes a course object and returns
a Pandas data frame with all existing assignments and their attributes/data
Example:
course.get_assignments()'''
url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments")
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
resp = requests.get(
url=api_url,
headers={
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"per_page": "10000"
},
)
assignments = resp.json()
assign_data = pd.DataFrame.from_dict(assignments)
return assign_data
def get_assignment_lock_date(course, assignment):
'''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.
Example:
course.get_assignment_due_date('worksheet_01')'''
assignments = get_assignments(course)
assignments = assignments[['name', 'lock_at']].query('name == @assignment')
lock_date = assignments['lock_at'].to_numpy()[0]
if lock_date is None:
return lock_date
lock_date = lock_date.replace("T", "-")
lock_date = lock_date.replace(":", "-")
return lock_date[:16]
def get_assignment_due_date(course, assignment):
'''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.
Example:
course.get_assignment_due_date('worksheet_01')'''
assignments = get_assignments(course)
assignments = assignments[['name', 'due_at']].query('name == @assignment')
due_date = assignments['due_at'].to_numpy()[0]
if due_date is None:
return due_date
due_date = due_date.replace("T", "-")
due_date = due_date.replace(":", "-")
return due_date[:16]
def get_assignment_unlock_date(course, assignment):
'''Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.
Example:
course.get_assignment_unlock_date('worksheet_01')'''
assignments = get_assignments(course)
assignments = assignments[['name', 'unlock_at']].query('name == @assignment')
unlock_date = assignments['unlock_at'].to_numpy()[0]
if unlock_date is None:
return unlock_date
unlock_date = unlock_date.replace("T", "-").replace(':', '-')
return unlock_date[:16]
def get_assignment_id(course, assignment):
'''Takes a course object and the name of a Canvas assignment and returns the Canvas ID.
Example:
course.get_assignment_id('worksheet_01')'''
assignments = get_assignments(course)
assignments = assignments[['name', 'id']].query('name == @assignment')
return assignments['id'].values[0]
def get_grades(course, assignment):
'''Takes a course object, an assignment name, and get the grades for that assignment from Canvas.
Example:
course.get_grades(course, 'worksheet_01')'''
assignment_id = get_assignment_id(course, assignment)
url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions")
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
resp = None
scores = {}
while resp is None or resp.links['current']['url'] != resp.links['last']['url']:
resp = requests.get(
url = api_url if resp is None else resp.links['next']['url'],
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"per_page":"100"
}
)
scores.update( {res['user_id'] : res['score'] for res in resp.json()} )
return scores
def grades_need_posting(course, assignment):
'''Takes a course object, an assignment name, and get the grades for that assignment from Canvas.
Example:
course.get_grades(course, 'worksheet_01')'''
assignment_id = get_assignment_id(course, assignment)
url_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions")
api_url = urllib.parse.urljoin(course['hostname'], url_path)
token = course['token']
#get enrollments to avoid the test student's submissions
real_stu_ids = list(get_enrollment_dates(course).keys())
resp = None
posted_flags = []
while resp is None or resp.links['current']['url'] != resp.links['last']['url']:
resp = requests.get(
url = api_url if resp is None else resp.links['next']['url'],
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"per_page":"100"
}
)
posted_flags.extend([ (subm_grd['posted_at'] is not None) for subm_grd in resp.json() if subm_grd['user_id'] in real_stu_ids])
return not all(posted_flags)
def post_grade(course, assignment, student, score):
'''Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas.
Example:
course.post_grades(dsci100, 'worksheet_01', '23423', 10)'''
assignment_id = get_assignment_id(course, assignment)
url_post_path = posixpath.join("api", "v1", "courses", course['course_id'], "assignments", assignment_id, "submissions", student)
api_url = urllib.parse.urljoin(course['hostname'], url_post_path)
token = course['token']
resp = requests.put(
url = urllib.parse.urljoin(api_url, student),
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json+canvas-string-ids"
},
json={
"submission": {"posted_grade": score}
},
)
| scripts/canvas.py | 7,108 | Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.
Example:
course.get_assignment_due_date('worksheet_01')
Takes a course object and the name of a Canvas assignment and returns the Canvas ID.
Example:
course.get_assignment_id('worksheet_01')
Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.
Example:
course.get_assignment_due_date('worksheet_01')
Takes a course object and the name of a Canvas assignment and returns the due date. Returns None if no due date assigned.
Example:
course.get_assignment_unlock_date('worksheet_01')
Takes a course object and returns
a Pandas data frame with all existing assignments and their attributes/data
Example:
course.get_assignments()
Takes a course object and returns student dates of enrollment.
Useful for handling late registrations and modified deadlines.
Example:
course.get_enrollment_date()
Takes a course object, an assignment name, and get the grades for that assignment from Canvas.
Example:
course.get_grades(course, 'worksheet_01')
Takes a course object, an assignment name, and get the grades for that assignment from Canvas.
Example:
course.get_grades(course, 'worksheet_01')
Takes a course object, an assignment name, student id, and score to upload. Posts to Canvas.
Example:
course.post_grades(dsci100, 'worksheet_01', '23423', 10)
get enrollments to avoid the test student's submissions | 1,496 | en | 0.731063 |
# -*- coding: utf-8 -*-
"""
pygments.lexers.graphics
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for computer graphics and plotting related languages.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, include, bygroups, using, \
this, default
from pygments.token import Text, Comment, Operator, Keyword, Name, \
Number, Punctuation, String
__all__ = ['GLShaderLexer', 'PostScriptLexer', 'AsymptoteLexer', 'GnuplotLexer',
'PovrayLexer', 'HLSLShaderLexer']
class GLShaderLexer(RegexLexer):
"""
GLSL (OpenGL Shader) lexer.
.. versionadded:: 1.1
"""
name = 'GLSL'
aliases = ['glsl']
filenames = ['*.vert', '*.frag', '*.geo']
mimetypes = ['text/x-glslsrc']
tokens = {
'root': [
(r'^#.*', Comment.Preproc),
(r'//.*', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?',
Operator),
(r'[?:]', Operator), # quick hack for ternary
(r'\bdefined\b', Operator),
(r'[;{}(),\[\]]', Punctuation),
# FIXME when e is present, no decimal point needed
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float),
(r'0[xX][0-9a-fA-F]*', Number.Hex),
(r'0[0-7]*', Number.Oct),
(r'[1-9][0-9]*', Number.Integer),
(words((
# Storage qualifiers
'attribute', 'const', 'uniform', 'varying',
'buffer', 'shared', 'in', 'out',
# Layout qualifiers
'layout',
# Interpolation qualifiers
'flat', 'smooth', 'noperspective',
# Auxiliary qualifiers
'centroid', 'sample', 'patch',
# Parameter qualifiers. Some double as Storage qualifiers
'inout',
# Precision qualifiers
'lowp', 'mediump', 'highp', 'precision',
# Invariance qualifiers
'invariant',
# Precise qualifiers
'precise',
# Memory qualifiers
'coherent', 'volatile', 'restrict', 'readonly', 'writeonly',
# Statements
'break', 'continue', 'do', 'for', 'while', 'switch',
'case', 'default', 'if', 'else', 'subroutine',
'discard', 'return', 'struct'),
prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
# Boolean values
'true', 'false'),
prefix=r'\b', suffix=r'\b'),
Keyword.Constant),
(words((
# Miscellaneous types
'void', 'atomic_uint',
# Floating-point scalars and vectors
'float', 'vec2', 'vec3', 'vec4',
'double', 'dvec2', 'dvec3', 'dvec4',
# Integer scalars and vectors
'int', 'ivec2', 'ivec3', 'ivec4',
'uint', 'uvec2', 'uvec3', 'uvec4',
# Boolean scalars and vectors
'bool', 'bvec2', 'bvec3', 'bvec4',
# Matrices
'mat2', 'mat3', 'mat4', 'dmat2', 'dmat3', 'dmat4',
'mat2x2', 'mat2x3', 'mat2x4', 'dmat2x2', 'dmat2x3', 'dmat2x4',
'mat3x2', 'mat3x3', 'mat3x4', 'dmat3x2', 'dmat3x3',
'dmat3x4', 'mat4x2', 'mat4x3', 'mat4x4', 'dmat4x2', 'dmat4x3', 'dmat4x4',
# Floating-point samplers
'sampler1D', 'sampler2D', 'sampler3D', 'samplerCube',
'sampler1DArray', 'sampler2DArray', 'samplerCubeArray',
'sampler2DRect', 'samplerBuffer',
'sampler2DMS', 'sampler2DMSArray',
# Shadow samplers
'sampler1DShadow', 'sampler2DShadow', 'samplerCubeShadow',
'sampler1DArrayShadow', 'sampler2DArrayShadow',
'samplerCubeArrayShadow', 'sampler2DRectShadow',
# Signed integer samplers
'isampler1D', 'isampler2D', 'isampler3D', 'isamplerCube',
'isampler1DArray', 'isampler2DArray', 'isamplerCubeArray',
'isampler2DRect', 'isamplerBuffer',
'isampler2DMS', 'isampler2DMSArray',
# Unsigned integer samplers
'usampler1D', 'usampler2D', 'usampler3D', 'usamplerCube',
'usampler1DArray', 'usampler2DArray', 'usamplerCubeArray',
'usampler2DRect', 'usamplerBuffer',
'usampler2DMS', 'usampler2DMSArray',
# Floating-point image types
'image1D', 'image2D', 'image3D', 'imageCube',
'image1DArray', 'image2DArray', 'imageCubeArray',
'image2DRect', 'imageBuffer',
'image2DMS', 'image2DMSArray',
# Signed integer image types
'iimage1D', 'iimage2D', 'iimage3D', 'iimageCube',
'iimage1DArray', 'iimage2DArray', 'iimageCubeArray',
'iimage2DRect', 'iimageBuffer',
'iimage2DMS', 'iimage2DMSArray',
# Unsigned integer image types
'uimage1D', 'uimage2D', 'uimage3D', 'uimageCube',
'uimage1DArray', 'uimage2DArray', 'uimageCubeArray',
'uimage2DRect', 'uimageBuffer',
'uimage2DMS', 'uimage2DMSArray'),
prefix=r'\b', suffix=r'\b'),
Keyword.Type),
(words((
# Reserved for future use.
'common', 'partition', 'active', 'asm', 'class',
'union', 'enum', 'typedef', 'template', 'this',
'resource', 'goto', 'inline', 'noinline', 'public',
'static', 'extern', 'external', 'interface', 'long',
'short', 'half', 'fixed', 'unsigned', 'superp', 'input',
'output', 'hvec2', 'hvec3', 'hvec4', 'fvec2', 'fvec3',
'fvec4', 'sampler3DRect', 'filter', 'sizeof', 'cast',
'namespace', 'using'),
prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
# All names beginning with "gl_" are reserved.
(r'gl_\w*', Name.Builtin),
(r'[a-zA-Z_]\w*', Name),
(r'\.', Punctuation),
(r'\s+', Text),
],
}
class HLSLShaderLexer(RegexLexer):
"""
HLSL (Microsoft Direct3D Shader) lexer.
.. versionadded:: 2.3
"""
name = 'HLSL'
aliases = ['hlsl']
filenames = ['*.hlsl', '*.hlsli']
mimetypes = ['text/x-hlsl']
tokens = {
'root': [
(r'^#.*', Comment.Preproc),
(r'//.*', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?',
Operator),
(r'[?:]', Operator), # quick hack for ternary
(r'\bdefined\b', Operator),
(r'[;{}(),.\[\]]', Punctuation),
# FIXME when e is present, no decimal point needed
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?f?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?f?', Number.Float),
(r'0[xX][0-9a-fA-F]*', Number.Hex),
(r'0[0-7]*', Number.Oct),
(r'[1-9][0-9]*', Number.Integer),
(r'"', String, 'string'),
(words((
'asm','asm_fragment','break','case','cbuffer','centroid','class',
'column_major','compile','compile_fragment','const','continue',
'default','discard','do','else','export','extern','for','fxgroup',
'globallycoherent','groupshared','if','in','inline','inout',
'interface','line','lineadj','linear','namespace','nointerpolation',
'noperspective','NULL','out','packoffset','pass','pixelfragment',
'point','precise','return','register','row_major','sample',
'sampler','shared','stateblock','stateblock_state','static',
'struct','switch','tbuffer','technique','technique10',
'technique11','texture','typedef','triangle','triangleadj',
'uniform','vertexfragment','volatile','while'),
prefix=r'\b', suffix=r'\b'),
Keyword),
(words(('true','false'), prefix=r'\b', suffix=r'\b'),
Keyword.Constant),
(words((
'auto','catch','char','const_cast','delete','dynamic_cast','enum',
'explicit','friend','goto','long','mutable','new','operator',
'private','protected','public','reinterpret_cast','short','signed',
'sizeof','static_cast','template','this','throw','try','typename',
'union','unsigned','using','virtual'),
prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
(words((
'dword','matrix','snorm','string','unorm','unsigned','void','vector',
'BlendState','Buffer','ByteAddressBuffer','ComputeShader',
'DepthStencilState','DepthStencilView','DomainShader',
'GeometryShader','HullShader','InputPatch','LineStream',
'OutputPatch','PixelShader','PointStream','RasterizerState',
'RenderTargetView','RasterizerOrderedBuffer',
'RasterizerOrderedByteAddressBuffer',
'RasterizerOrderedStructuredBuffer','RasterizerOrderedTexture1D',
'RasterizerOrderedTexture1DArray','RasterizerOrderedTexture2D',
'RasterizerOrderedTexture2DArray','RasterizerOrderedTexture3D',
'RWBuffer','RWByteAddressBuffer','RWStructuredBuffer',
'RWTexture1D','RWTexture1DArray','RWTexture2D','RWTexture2DArray',
'RWTexture3D','SamplerState','SamplerComparisonState',
'StructuredBuffer','Texture1D','Texture1DArray','Texture2D',
'Texture2DArray','Texture2DMS','Texture2DMSArray','Texture3D',
'TextureCube','TextureCubeArray','TriangleStream','VertexShader'),
prefix=r'\b', suffix=r'\b'),
Keyword.Type),
(words((
'bool','double','float','int','half','min16float','min10float',
'min16int','min12int','min16uint','uint'),
prefix=r'\b', suffix=r'([1-4](x[1-4])?)?\b'),
Keyword.Type), # vector and matrix types
(words((
'abort','abs','acos','all','AllMemoryBarrier',
'AllMemoryBarrierWithGroupSync','any','AppendStructuredBuffer',
'asdouble','asfloat','asin','asint','asuint','asuint','atan',
'atan2','ceil','CheckAccessFullyMapped','clamp','clip',
'CompileShader','ConsumeStructuredBuffer','cos','cosh','countbits',
'cross','D3DCOLORtoUBYTE4','ddx','ddx_coarse','ddx_fine','ddy',
'ddy_coarse','ddy_fine','degrees','determinant',
'DeviceMemoryBarrier','DeviceMemoryBarrierWithGroupSync','distance',
'dot','dst','errorf','EvaluateAttributeAtCentroid',
'EvaluateAttributeAtSample','EvaluateAttributeSnapped','exp',
'exp2','f16tof32','f32tof16','faceforward','firstbithigh',
'firstbitlow','floor','fma','fmod','frac','frexp','fwidth',
'GetRenderTargetSampleCount','GetRenderTargetSamplePosition',
'GlobalOrderedCountIncrement','GroupMemoryBarrier',
'GroupMemoryBarrierWithGroupSync','InterlockedAdd','InterlockedAnd',
'InterlockedCompareExchange','InterlockedCompareStore',
'InterlockedExchange','InterlockedMax','InterlockedMin',
'InterlockedOr','InterlockedXor','isfinite','isinf','isnan',
'ldexp','length','lerp','lit','log','log10','log2','mad','max',
'min','modf','msad4','mul','noise','normalize','pow','printf',
'Process2DQuadTessFactorsAvg','Process2DQuadTessFactorsMax',
'Process2DQuadTessFactorsMin','ProcessIsolineTessFactors',
'ProcessQuadTessFactorsAvg','ProcessQuadTessFactorsMax',
'ProcessQuadTessFactorsMin','ProcessTriTessFactorsAvg',
'ProcessTriTessFactorsMax','ProcessTriTessFactorsMin',
'QuadReadLaneAt','QuadSwapX','QuadSwapY','radians','rcp',
'reflect','refract','reversebits','round','rsqrt','saturate',
'sign','sin','sincos','sinh','smoothstep','sqrt','step','tan',
'tanh','tex1D','tex1D','tex1Dbias','tex1Dgrad','tex1Dlod',
'tex1Dproj','tex2D','tex2D','tex2Dbias','tex2Dgrad','tex2Dlod',
'tex2Dproj','tex3D','tex3D','tex3Dbias','tex3Dgrad','tex3Dlod',
'tex3Dproj','texCUBE','texCUBE','texCUBEbias','texCUBEgrad',
'texCUBElod','texCUBEproj','transpose','trunc','WaveAllBitAnd',
'WaveAllMax','WaveAllMin','WaveAllBitOr','WaveAllBitXor',
'WaveAllEqual','WaveAllProduct','WaveAllSum','WaveAllTrue',
'WaveAnyTrue','WaveBallot','WaveGetLaneCount','WaveGetLaneIndex',
'WaveGetOrderedIndex','WaveIsHelperLane','WaveOnce',
'WavePrefixProduct','WavePrefixSum','WaveReadFirstLane',
'WaveReadLaneAt'),
prefix=r'\b', suffix=r'\b'),
Name.Builtin), # built-in functions
(words((
'SV_ClipDistance','SV_ClipDistance0','SV_ClipDistance1',
'SV_Culldistance','SV_CullDistance0','SV_CullDistance1',
'SV_Coverage','SV_Depth','SV_DepthGreaterEqual',
'SV_DepthLessEqual','SV_DispatchThreadID','SV_DomainLocation',
'SV_GroupID','SV_GroupIndex','SV_GroupThreadID','SV_GSInstanceID',
'SV_InnerCoverage','SV_InsideTessFactor','SV_InstanceID',
'SV_IsFrontFace','SV_OutputControlPointID','SV_Position',
'SV_PrimitiveID','SV_RenderTargetArrayIndex','SV_SampleIndex',
'SV_StencilRef','SV_TessFactor','SV_VertexID',
'SV_ViewportArrayIndex'),
prefix=r'\b', suffix=r'\b'),
Name.Decorator), # system-value semantics
(r'\bSV_Target[0-7]?\b', Name.Decorator),
(words((
'allow_uav_condition','branch','call','domain','earlydepthstencil',
'fastopt','flatten','forcecase','instance','loop','maxtessfactor',
'numthreads','outputcontrolpoints','outputtopology','partitioning',
'patchconstantfunc','unroll'),
prefix=r'\b', suffix=r'\b'),
Name.Decorator), # attributes
(r'[a-zA-Z_]\w*', Name),
(r'\\$', Comment.Preproc), # backslash at end of line -- usually macro continuation
(r'\s+', Text),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
}
class PostScriptLexer(RegexLexer):
"""
Lexer for PostScript files.
The PostScript Language Reference published by Adobe at
<http://partners.adobe.com/public/developer/en/ps/PLRM.pdf>
is the authority for this.
.. versionadded:: 1.4
"""
name = 'PostScript'
aliases = ['postscript', 'postscr']
filenames = ['*.ps', '*.eps']
mimetypes = ['application/postscript']
delimiter = r'()<>\[\]{}/%\s'
delimiter_end = r'(?=[%s])' % delimiter
valid_name_chars = r'[^%s]' % delimiter
valid_name = r"%s+%s" % (valid_name_chars, delimiter_end)
tokens = {
'root': [
# All comment types
(r'^%!.+\n', Comment.Preproc),
(r'%%.*\n', Comment.Special),
(r'(^%.*\n){2,}', Comment.Multiline),
(r'%.*\n', Comment.Single),
# String literals are awkward; enter separate state.
(r'\(', String, 'stringliteral'),
(r'[{}<>\[\]]', Punctuation),
# Numbers
(r'<[0-9A-Fa-f]+>' + delimiter_end, Number.Hex),
# Slight abuse: use Oct to signify any explicit base system
(r'[0-9]+\#(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)'
r'((e|E)[0-9]+)?' + delimiter_end, Number.Oct),
(r'(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?'
+ delimiter_end, Number.Float),
(r'(\-|\+)?[0-9]+' + delimiter_end, Number.Integer),
# References
(r'\/%s' % valid_name, Name.Variable),
# Names
(valid_name, Name.Function), # Anything else is executed
# These keywords taken from
# <http://www.math.ubc.ca/~cass/graphics/manual/pdf/a1.pdf>
# Is there an authoritative list anywhere that doesn't involve
# trawling documentation?
(r'(false|true)' + delimiter_end, Keyword.Constant),
# Conditionals / flow control
(r'(eq|ne|g[et]|l[et]|and|or|not|if(?:else)?|for(?:all)?)'
+ delimiter_end, Keyword.Reserved),
(words((
'abs', 'add', 'aload', 'arc', 'arcn', 'array', 'atan', 'begin',
'bind', 'ceiling', 'charpath', 'clip', 'closepath', 'concat',
'concatmatrix', 'copy', 'cos', 'currentlinewidth', 'currentmatrix',
'currentpoint', 'curveto', 'cvi', 'cvs', 'def', 'defaultmatrix',
'dict', 'dictstackoverflow', 'div', 'dtransform', 'dup', 'end',
'exch', 'exec', 'exit', 'exp', 'fill', 'findfont', 'floor', 'get',
'getinterval', 'grestore', 'gsave', 'gt', 'identmatrix', 'idiv',
'idtransform', 'index', 'invertmatrix', 'itransform', 'length',
'lineto', 'ln', 'load', 'log', 'loop', 'matrix', 'mod', 'moveto',
'mul', 'neg', 'newpath', 'pathforall', 'pathbbox', 'pop', 'print',
'pstack', 'put', 'quit', 'rand', 'rangecheck', 'rcurveto', 'repeat',
'restore', 'rlineto', 'rmoveto', 'roll', 'rotate', 'round', 'run',
'save', 'scale', 'scalefont', 'setdash', 'setfont', 'setgray',
'setlinecap', 'setlinejoin', 'setlinewidth', 'setmatrix',
'setrgbcolor', 'shfill', 'show', 'showpage', 'sin', 'sqrt',
'stack', 'stringwidth', 'stroke', 'strokepath', 'sub', 'syntaxerror',
'transform', 'translate', 'truncate', 'typecheck', 'undefined',
'undefinedfilename', 'undefinedresult'), suffix=delimiter_end),
Name.Builtin),
(r'\s+', Text),
],
'stringliteral': [
(r'[^()\\]+', String),
(r'\\', String.Escape, 'escape'),
(r'\(', String, '#push'),
(r'\)', String, '#pop'),
],
'escape': [
(r'[0-8]{3}|n|r|t|b|f|\\|\(|\)', String.Escape, '#pop'),
default('#pop'),
],
}
class AsymptoteLexer(RegexLexer):
"""
For `Asymptote <http://asymptote.sf.net/>`_ source code.
.. versionadded:: 1.2
"""
name = 'Asymptote'
aliases = ['asy', 'asymptote']
filenames = ['*.asy']
mimetypes = ['text/x-asymptote']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/\*.*?\*/)+'
tokens = {
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment),
],
'statements': [
# simple string (TeX friendly)
(r'"(\\\\|\\"|[^"])*"', String),
# C style string (with character escapes)
(r"'", String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
(r'(and|controls|tension|atleast|curl|if|else|while|for|do|'
r'return|break|continue|struct|typedef|new|access|import|'
r'unravel|from|include|quote|static|public|private|restricted|'
r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword),
# Since an asy-type-name can be also an asy-function-name,
# in the following we test if the string " [a-zA-Z]" follows
# the Keyword.Type.
# Of course it is not perfect !
(r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|'
r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|'
r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|'
r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|'
r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|'
r'path3|pen|picture|point|position|projection|real|revolution|'
r'scaleT|scientific|segment|side|slice|splitface|string|surface|'
r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|'
r'transformation|tree|triangle|trilinear|triple|vector|'
r'vertex|void)(?=\s+[a-zA-Z])', Keyword.Type),
# Now the asy-type-name which are not asy-function-name
# except yours !
# Perhaps useless
(r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|'
r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|'
r'picture|position|real|revolution|slice|splitface|ticksgridT|'
r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type),
(r'[a-zA-Z_]\w*:(?!:)', Name.Label),
(r'[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[\w*\s])+?(?:\s|\*))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(\{)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[\w*\s])+?(?:\s|\*))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
'string': [
(r"'", String, '#pop'),
(r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'\n', String),
(r"[^\\'\n]+", String), # all other characters
(r'\\\n', String),
(r'\\n', String), # line continuation
(r'\\', String), # stray backslash
],
}
def get_tokens_unprocessed(self, text):
from pygments.lexers._asy_builtins import ASYFUNCNAME, ASYVARNAME
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name and value in ASYFUNCNAME:
token = Name.Function
elif token is Name and value in ASYVARNAME:
token = Name.Variable
yield index, token, value
def _shortened(word):
dpos = word.find('$')
return '|'.join(word[:dpos] + word[dpos+1:i] + r'\b'
for i in range(len(word), dpos, -1))
def _shortened_many(*words):
return '|'.join(map(_shortened, words))
class GnuplotLexer(RegexLexer):
"""
For `Gnuplot <http://gnuplot.info/>`_ plotting scripts.
.. versionadded:: 0.11
"""
name = 'Gnuplot'
aliases = ['gnuplot']
filenames = ['*.plot', '*.plt']
mimetypes = ['text/x-gnuplot']
tokens = {
'root': [
include('whitespace'),
(_shortened('bi$nd'), Keyword, 'bind'),
(_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'),
(_shortened('f$it'), Keyword, 'fit'),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'),
(r'else\b', Keyword),
(_shortened('pa$use'), Keyword, 'pause'),
(_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'),
(_shortened('sa$ve'), Keyword, 'save'),
(_shortened('se$t'), Keyword, ('genericargs', 'optionarg')),
(_shortened_many('sh$ow', 'uns$et'),
Keyword, ('noargs', 'optionarg')),
(_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear',
'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int',
'pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'sy$stem', 'up$date'),
Keyword, 'genericargs'),
(_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'test$'),
Keyword, 'noargs'),
(r'([a-zA-Z_]\w*)(\s*)(=)',
bygroups(Name.Variable, Text, Operator), 'genericargs'),
(r'([a-zA-Z_]\w*)(\s*\(.*?\)\s*)(=)',
bygroups(Name.Function, Text, Operator), 'genericargs'),
(r'@[a-zA-Z_]\w*', Name.Constant), # macros
(r';', Keyword),
],
'comment': [
(r'[^\\\n]', Comment),
(r'\\\n', Comment),
(r'\\', Comment),
# don't add the newline to the Comment token
default('#pop'),
],
'whitespace': [
('#', Comment, 'comment'),
(r'[ \t\v\f]+', Text),
],
'noargs': [
include('whitespace'),
# semicolon and newline end the argument list
(r';', Punctuation, '#pop'),
(r'\n', Text, '#pop'),
],
'dqstring': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
(r'\n', String, '#pop'), # newline ends the string too
],
'sqstring': [
(r"''", String), # escaped single quote
(r"'", String, '#pop'),
(r"[^\\'\n]+", String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # normal backslash
(r'\n', String, '#pop'), # newline ends the string too
],
'genericargs': [
include('noargs'),
(r'"', String, 'dqstring'),
(r"'", String, 'sqstring'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'(\d+\.\d*|\.\d+)', Number.Float),
(r'-?\d+', Number.Integer),
('[,.~!%^&*+=|?:<>/-]', Operator),
(r'[{}()\[\]]', Punctuation),
(r'(eq|ne)\b', Operator.Word),
(r'([a-zA-Z_]\w*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-zA-Z_]\w*', Name),
(r'@[a-zA-Z_]\w*', Name.Constant), # macros
(r'\\\n', Text),
],
'optionarg': [
include('whitespace'),
(_shortened_many(
"a$ll", "an$gles", "ar$row", "au$toscale", "b$ars", "bor$der",
"box$width", "cl$abel", "c$lip", "cn$trparam", "co$ntour", "da$ta",
"data$file", "dg$rid3d", "du$mmy", "enc$oding", "dec$imalsign",
"fit$", "font$path", "fo$rmat", "fu$nction", "fu$nctions", "g$rid",
"hid$den3d", "his$torysize", "is$osamples", "k$ey", "keyt$itle",
"la$bel", "li$nestyle", "ls$", "loa$dpath", "loc$ale", "log$scale",
"mac$ros", "map$ping", "map$ping3d", "mar$gin", "lmar$gin",
"rmar$gin", "tmar$gin", "bmar$gin", "mo$use", "multi$plot",
"mxt$ics", "nomxt$ics", "mx2t$ics", "nomx2t$ics", "myt$ics",
"nomyt$ics", "my2t$ics", "nomy2t$ics", "mzt$ics", "nomzt$ics",
"mcbt$ics", "nomcbt$ics", "of$fsets", "or$igin", "o$utput",
"pa$rametric", "pm$3d", "pal$ette", "colorb$ox", "p$lot",
"poi$ntsize", "pol$ar", "pr$int", "obj$ect", "sa$mples", "si$ze",
"st$yle", "su$rface", "table$", "t$erminal", "termo$ptions", "ti$cs",
"ticsc$ale", "ticsl$evel", "timef$mt", "tim$estamp", "tit$le",
"v$ariables", "ve$rsion", "vi$ew", "xyp$lane", "xda$ta", "x2da$ta",
"yda$ta", "y2da$ta", "zda$ta", "cbda$ta", "xl$abel", "x2l$abel",
"yl$abel", "y2l$abel", "zl$abel", "cbl$abel", "xti$cs", "noxti$cs",
"x2ti$cs", "nox2ti$cs", "yti$cs", "noyti$cs", "y2ti$cs", "noy2ti$cs",
"zti$cs", "nozti$cs", "cbti$cs", "nocbti$cs", "xdti$cs", "noxdti$cs",
"x2dti$cs", "nox2dti$cs", "ydti$cs", "noydti$cs", "y2dti$cs",
"noy2dti$cs", "zdti$cs", "nozdti$cs", "cbdti$cs", "nocbdti$cs",
"xmti$cs", "noxmti$cs", "x2mti$cs", "nox2mti$cs", "ymti$cs",
"noymti$cs", "y2mti$cs", "noy2mti$cs", "zmti$cs", "nozmti$cs",
"cbmti$cs", "nocbmti$cs", "xr$ange", "x2r$ange", "yr$ange",
"y2r$ange", "zr$ange", "cbr$ange", "rr$ange", "tr$ange", "ur$ange",
"vr$ange", "xzeroa$xis", "x2zeroa$xis", "yzeroa$xis", "y2zeroa$xis",
"zzeroa$xis", "zeroa$xis", "z$ero"), Name.Builtin, '#pop'),
],
'bind': [
('!', Keyword, '#pop'),
(_shortened('all$windows'), Name.Builtin),
include('genericargs'),
],
'quit': [
(r'gnuplot\b', Keyword),
include('noargs'),
],
'fit': [
(r'via\b', Name.Builtin),
include('plot'),
],
'if': [
(r'\)', Punctuation, '#pop'),
include('genericargs'),
],
'pause': [
(r'(mouse|any|button1|button2|button3)\b', Name.Builtin),
(_shortened('key$press'), Name.Builtin),
include('genericargs'),
],
'plot': [
(_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex',
'mat$rix', 's$mooth', 'thru$', 't$itle',
'not$itle', 'u$sing', 'w$ith'),
Name.Builtin),
include('genericargs'),
],
'save': [
(_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'),
Name.Builtin),
include('genericargs'),
],
}
class PovrayLexer(RegexLexer):
"""
For `Persistence of Vision Raytracer <http://www.povray.org/>`_ files.
.. versionadded:: 0.11
"""
name = 'POVRay'
aliases = ['pov']
filenames = ['*.pov', '*.inc']
mimetypes = ['text/x-povray']
tokens = {
'root': [
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*\n', Comment.Single),
(r'(?s)"(?:\\.|[^"\\])+"', String.Double),
(words((
'break', 'case', 'debug', 'declare', 'default', 'define', 'else',
'elseif', 'end', 'error', 'fclose', 'fopen', 'for', 'if', 'ifdef',
'ifndef', 'include', 'local', 'macro', 'range', 'read', 'render',
'statistics', 'switch', 'undef', 'version', 'warning', 'while',
'write'), prefix=r'#', suffix=r'\b'),
Comment.Preproc),
(words((
'aa_level', 'aa_threshold', 'abs', 'acos', 'acosh', 'adaptive', 'adc_bailout',
'agate', 'agate_turb', 'all', 'alpha', 'ambient', 'ambient_light', 'angle',
'aperture', 'arc_angle', 'area_light', 'asc', 'asin', 'asinh', 'assumed_gamma',
'atan', 'atan2', 'atanh', 'atmosphere', 'atmospheric_attenuation',
'attenuating', 'average', 'background', 'black_hole', 'blue', 'blur_samples',
'bounded_by', 'box_mapping', 'bozo', 'break', 'brick', 'brick_size',
'brightness', 'brilliance', 'bumps', 'bumpy1', 'bumpy2', 'bumpy3', 'bump_map',
'bump_size', 'case', 'caustics', 'ceil', 'checker', 'chr', 'clipped_by', 'clock',
'color', 'color_map', 'colour', 'colour_map', 'component', 'composite', 'concat',
'confidence', 'conic_sweep', 'constant', 'control0', 'control1', 'cos', 'cosh',
'count', 'crackle', 'crand', 'cube', 'cubic_spline', 'cylindrical_mapping',
'debug', 'declare', 'default', 'degrees', 'dents', 'diffuse', 'direction',
'distance', 'distance_maximum', 'div', 'dust', 'dust_type', 'eccentricity',
'else', 'emitting', 'end', 'error', 'error_bound', 'exp', 'exponent',
'fade_distance', 'fade_power', 'falloff', 'falloff_angle', 'false',
'file_exists', 'filter', 'finish', 'fisheye', 'flatness', 'flip', 'floor',
'focal_point', 'fog', 'fog_alt', 'fog_offset', 'fog_type', 'frequency', 'gif',
'global_settings', 'glowing', 'gradient', 'granite', 'gray_threshold',
'green', 'halo', 'hexagon', 'hf_gray_16', 'hierarchy', 'hollow', 'hypercomplex',
'if', 'ifdef', 'iff', 'image_map', 'incidence', 'include', 'int', 'interpolate',
'inverse', 'ior', 'irid', 'irid_wavelength', 'jitter', 'lambda', 'leopard',
'linear', 'linear_spline', 'linear_sweep', 'location', 'log', 'looks_like',
'look_at', 'low_error_factor', 'mandel', 'map_type', 'marble', 'material_map',
'matrix', 'max', 'max_intersections', 'max_iteration', 'max_trace_level',
'max_value', 'metallic', 'min', 'minimum_reuse', 'mod', 'mortar',
'nearest_count', 'no', 'normal', 'normal_map', 'no_shadow', 'number_of_waves',
'octaves', 'off', 'offset', 'omega', 'omnimax', 'on', 'once', 'onion', 'open',
'orthographic', 'panoramic', 'pattern1', 'pattern2', 'pattern3',
'perspective', 'pgm', 'phase', 'phong', 'phong_size', 'pi', 'pigment',
'pigment_map', 'planar_mapping', 'png', 'point_at', 'pot', 'pow', 'ppm',
'precision', 'pwr', 'quadratic_spline', 'quaternion', 'quick_color',
'quick_colour', 'quilted', 'radial', 'radians', 'radiosity', 'radius', 'rainbow',
'ramp_wave', 'rand', 'range', 'reciprocal', 'recursion_limit', 'red',
'reflection', 'refraction', 'render', 'repeat', 'rgb', 'rgbf', 'rgbft', 'rgbt',
'right', 'ripples', 'rotate', 'roughness', 'samples', 'scale', 'scallop_wave',
'scattering', 'seed', 'shadowless', 'sin', 'sine_wave', 'sinh', 'sky', 'sky_sphere',
'slice', 'slope_map', 'smooth', 'specular', 'spherical_mapping', 'spiral',
'spiral1', 'spiral2', 'spotlight', 'spotted', 'sqr', 'sqrt', 'statistics', 'str',
'strcmp', 'strength', 'strlen', 'strlwr', 'strupr', 'sturm', 'substr', 'switch', 'sys',
't', 'tan', 'tanh', 'test_camera_1', 'test_camera_2', 'test_camera_3',
'test_camera_4', 'texture', 'texture_map', 'tga', 'thickness', 'threshold',
'tightness', 'tile2', 'tiles', 'track', 'transform', 'translate', 'transmit',
'triangle_wave', 'true', 'ttf', 'turbulence', 'turb_depth', 'type',
'ultra_wide_angle', 'up', 'use_color', 'use_colour', 'use_index', 'u_steps',
'val', 'variance', 'vaxis_rotate', 'vcross', 'vdot', 'version', 'vlength',
'vnormalize', 'volume_object', 'volume_rendered', 'vol_with_light',
'vrotate', 'v_steps', 'warning', 'warp', 'water_level', 'waves', 'while', 'width',
'wood', 'wrinkles', 'yes'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'bicubic_patch', 'blob', 'box', 'camera', 'cone', 'cubic', 'cylinder', 'difference',
'disc', 'height_field', 'intersection', 'julia_fractal', 'lathe',
'light_source', 'merge', 'mesh', 'object', 'plane', 'poly', 'polygon', 'prism',
'quadric', 'quartic', 'smooth_triangle', 'sor', 'sphere', 'superellipsoid',
'text', 'torus', 'triangle', 'union'), suffix=r'\b'),
Name.Builtin),
# TODO: <=, etc
(r'[\[\](){}<>;,]', Punctuation),
(r'[-+*/=]', Operator),
(r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo),
(r'[a-zA-Z_]\w*', Name),
(r'[0-9]+\.[0-9]*', Number.Float),
(r'\.[0-9]+', Number.Float),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\s+', Text),
]
}
| env/lib/python3.7/site-packages/pygments/lexers/graphics.py | 39,040 | For `Asymptote <http://asymptote.sf.net/>`_ source code.
.. versionadded:: 1.2
GLSL (OpenGL Shader) lexer.
.. versionadded:: 1.1
For `Gnuplot <http://gnuplot.info/>`_ plotting scripts.
.. versionadded:: 0.11
HLSL (Microsoft Direct3D Shader) lexer.
.. versionadded:: 2.3
Lexer for PostScript files.
The PostScript Language Reference published by Adobe at
<http://partners.adobe.com/public/developer/en/ps/PLRM.pdf>
is the authority for this.
.. versionadded:: 1.4
For `Persistence of Vision Raytracer <http://www.povray.org/>`_ files.
.. versionadded:: 0.11
pygments.lexers.graphics
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for computer graphics and plotting related languages.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
-*- coding: utf-8 -*- quick hack for ternary FIXME when e is present, no decimal point needed Storage qualifiers Layout qualifiers Interpolation qualifiers Auxiliary qualifiers Parameter qualifiers. Some double as Storage qualifiers Precision qualifiers Invariance qualifiers Precise qualifiers Memory qualifiers Statements Boolean values Miscellaneous types Floating-point scalars and vectors Integer scalars and vectors Boolean scalars and vectors Matrices Floating-point samplers Shadow samplers Signed integer samplers Unsigned integer samplers Floating-point image types Signed integer image types Unsigned integer image types Reserved for future use. All names beginning with "gl_" are reserved. quick hack for ternary FIXME when e is present, no decimal point needed vector and matrix types built-in functions system-value semantics attributes backslash at end of line -- usually macro continuation all other characters line continuation stray backslash All comment types String literals are awkward; enter separate state. Numbers Slight abuse: use Oct to signify any explicit base system References Names Anything else is executed These keywords taken from <http://www.math.ubc.ca/~cass/graphics/manual/pdf/a1.pdf> Is there an authoritative list anywhere that doesn't involve trawling documentation? Conditionals / flow control: optional Comment or Whitespace line continuation simple string (TeX friendly) C style string (with character escapes) Since an asy-type-name can be also an asy-function-name, in the following we test if the string " [a-zA-Z]" follows the Keyword.Type. Of course it is not perfect ! Now the asy-type-name which are not asy-function-name except yours ! Perhaps useless functions return arguments method name signature function declarations return arguments method name signature all other characters line continuation stray backslash macros don't add the newline to the Comment token semicolon and newline end the argument list all other characters line continuation stray backslash newline ends the string too escaped single quote all other characters line continuation normal backslash newline ends the string too macros TODO: <=, etc | 2,958 | en | 0.726236 |
import os
import sys
from typing import Dict
from typing import List
from typing import Optional
import pkg_resources
from setuptools import find_packages
from setuptools import setup
def get_version() -> str:
version_filepath = os.path.join(os.path.dirname(__file__), "optuna", "version.py")
with open(version_filepath) as f:
for line in f:
if line.startswith("__version__"):
return line.strip().split()[-1][1:-1]
assert False
def get_long_description() -> str:
readme_filepath = os.path.join(os.path.dirname(__file__), "README.md")
with open(readme_filepath) as f:
return f.read()
def get_install_requires() -> List[str]:
return [
"alembic",
"cliff",
"cmaes>=0.6.0",
"colorlog",
"joblib",
"numpy",
"packaging>=20.0",
"scipy!=1.4.0",
"sqlalchemy>=1.1.0",
"tqdm",
]
def get_tests_require() -> List[str]:
return get_extras_require()["testing"]
def get_extras_require() -> Dict[str, List[str]]:
requirements = {
"checking": ["black", "hacking", "isort", "mypy==0.782", "blackdoc"],
"codecov": ["codecov", "pytest-cov"],
"doctest": [
"cma",
"matplotlib>=3.0.0",
"pandas",
"plotly>=4.0.0",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"mlflow",
],
"document": [
# TODO(hvy): Unpin `sphinx` version after:
# https://github.com/sphinx-doc/sphinx/issues/8105.
"sphinx==3.0.4",
# As reported in: https://github.com/readthedocs/sphinx_rtd_theme/issues/949,
# `sphinx_rtd_theme` 0.5.0 is still not compatible with `sphinx` >= 3.0.
"sphinx_rtd_theme<0.5.0",
"sphinx-gallery",
"sphinx-plotly-directive",
"pillow",
"matplotlib",
"scikit-learn",
],
"example": [
"catboost",
"chainer",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"nbval",
"scikit-image",
"scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py.
"xgboost",
"keras",
"tensorflow>=2.0.0",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=0.8.1",
"thop",
"skorch",
"stable-baselines3>=0.7.0",
"catalyst",
]
+ (
["torch==1.7.0", "torchvision==0.8.1", "torchaudio==0.7.0"]
if sys.platform == "darwin"
else ["torch==1.7.0+cpu", "torchvision==0.8.1+cpu", "torchaudio==0.7.0"]
)
+ (
[
"allennlp==1.2.0",
"fastai<2",
"dask[dataframe]",
"dask-ml",
]
if sys.version_info[:2] < (3, 8)
else []
),
"experimental": ["redis"],
"testing": [
# TODO(toshihikoyanase): Remove the version constraint after resolving the issue
# https://github.com/optuna/optuna/issues/1000.
"bokeh<2.0.0",
"chainer>=5.0.0",
"cma",
"fakeredis",
"lightgbm",
"matplotlib>=3.0.0",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"plotly>=4.0.0",
"pytest",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"xgboost",
"keras",
"tensorflow",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=0.8.1",
"skorch",
"catalyst",
]
+ (
["torch==1.7.0", "torchvision==0.8.1", "torchaudio==0.7.0"]
if sys.platform == "darwin"
else ["torch==1.7.0+cpu", "torchvision==0.8.1+cpu", "torchaudio==0.7.0"]
)
+ (["allennlp==1.2.0", "fastai<2"] if sys.version_info[:2] < (3, 8) else []),
"tests": ["fakeredis", "pytest"],
"optional": [
"bokeh<2.0.0", # optuna/cli.py, optuna/dashboard.py.
"matplotlib>=3.0.0", # optuna/visualization/matplotlib
"pandas", # optuna/study.py
"plotly>=4.0.0", # optuna/visualization.
"redis", # optuna/storages/redis.py.
"scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py.
],
"integration": [
# TODO(toshihikoyanase): Remove the version constraint after resolving the issue
# https://github.com/optuna/optuna/issues/1000.
"chainer>=5.0.0",
"cma",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"xgboost",
"keras",
"tensorflow",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=0.8.1",
"skorch",
"catalyst",
]
+ (
["torch==1.7.0", "torchvision==0.8.1", "torchaudio==0.7.0"]
if sys.platform == "darwin"
else ["torch==1.7.0+cpu", "torchvision==0.8.1+cpu", "torchaudio==0.7.0"]
)
+ (["allennlp==1.2.0", "fastai<2"] if sys.version_info[:2] < (3, 8) else []),
}
return requirements
def find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:
for pkg in pkgs:
try:
return pkg_resources.get_distribution(pkg)
except pkg_resources.DistributionNotFound:
pass
return None
setup(
name="optuna",
version=get_version(),
description="A hyperparameter optimization framework",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Takuya Akiba",
author_email="akiba@preferred.jp",
url="https://optuna.org/",
packages=find_packages(),
package_data={
"optuna": [
"storages/_rdb/alembic.ini",
"storages/_rdb/alembic/*.*",
"storages/_rdb/alembic/versions/*.*",
"py.typed",
]
},
python_requires=">=3.6",
install_requires=get_install_requires(),
tests_require=get_tests_require(),
extras_require=get_extras_require(),
entry_points={
"console_scripts": ["optuna = optuna.cli:main"],
"optuna.command": [
"create-study = optuna.cli:_CreateStudy",
"delete-study = optuna.cli:_DeleteStudy",
"study set-user-attr = optuna.cli:_StudySetUserAttribute",
"studies = optuna.cli:_Studies",
"dashboard = optuna.cli:_Dashboard",
"study optimize = optuna.cli:_StudyOptimize",
"storage upgrade = optuna.cli:_StorageUpgrade",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| setup.py | 7,862 | TODO(hvy): Unpin `sphinx` version after: https://github.com/sphinx-doc/sphinx/issues/8105. As reported in: https://github.com/readthedocs/sphinx_rtd_theme/issues/949, `sphinx_rtd_theme` 0.5.0 is still not compatible with `sphinx` >= 3.0. optuna/visualization/param_importances.py. TODO(toshihikoyanase): Remove the version constraint after resolving the issue https://github.com/optuna/optuna/issues/1000. optuna/cli.py, optuna/dashboard.py. optuna/visualization/matplotlib optuna/study.py optuna/visualization. optuna/storages/redis.py. optuna/visualization/param_importances.py. TODO(toshihikoyanase): Remove the version constraint after resolving the issue https://github.com/optuna/optuna/issues/1000. | 705 | en | 0.700849 |
from ..coefficient_array import PwCoeffs
from scipy.sparse import dia_matrix
import numpy as np
def make_kinetic_precond(kpointset, c0, eps=0.1, asPwCoeffs=True):
"""
Preconditioner
P = 1 / (||k|| + ε)
Keyword Arguments:
kpointset --
"""
nk = len(kpointset)
nc = kpointset.ctx().num_spins()
if nc == 1 and nk == 1 and not asPwCoeffs:
# return as np.matrix
kp = kpointset[0]
gkvec = kp.gkvec()
assert (gkvec.num_gvec() == gkvec.count())
N = gkvec.count()
d = np.array([
1 / (np.sum((np.array(gkvec.gkvec(i)))**2) + eps)
for i in range(N)
])
return DiagonalPreconditioner(
D=dia_matrix((d, 0), shape=(N, N)), c0=c0)
else:
P = PwCoeffs(dtype=np.float64, ctype=dia_matrix)
for k in range(nk):
kp = kpointset[k]
gkvec = kp.gkvec()
assert (gkvec.num_gvec() == gkvec.count())
N = gkvec.count()
d = np.array([
1 / (np.sum(
(np.array(gkvec.gkvec_cart(i)))**2) + eps)
for i in range(N)
])
for ispn in range(nc):
P[k, ispn] = dia_matrix((d, 0), shape=(N, N))
return DiagonalPreconditioner(P, c0)
class Preconditioner:
def __init__(self):
pass
class DiagonalPreconditioner(Preconditioner):
"""
Apply diagonal preconditioner and project resulting gradient to satisfy the constraint.
"""
def __init__(self, D, c0):
super().__init__()
self.c0 = c0
self.D = D
def __matmul__(self, other):
"""
"""
from ..coefficient_array import CoefficientArray
from .ot_transformations import lagrangeMult
out = type(other)(dtype=other.dtype)
if isinstance(other, CoefficientArray):
for key, Dl in self.D.items():
out[key] = Dl * other[key]
else:
raise ValueError('wrong type given')
ll = lagrangeMult(other, self.c0, self)
return out + ll
def __mul__(self, s):
"""
"""
from ..coefficient_array import CoefficientArray
import numpy as np
if np.isscalar(s):
for key, Dl in self.D.items():
self.D[key] = s*Dl
elif isinstance(s, CoefficientArray):
out = type(s)(dtype=s.dtype)
for key in s.keys():
out[key] = self.D[key] * s[key]
return out
__lmul__ = __mul__
__rmul__ = __mul__
def __neg__(self):
"""
"""
from ..coefficient_array import CoefficientArray
if isinstance(self.D, CoefficientArray):
out_data = type(self.D)(dtype=self.D.dtype, ctype=self.D.ctype)
out = DiagonalPreconditioner(out_data, self.c0)
for k, v in self.D.items():
out.D[k] = -v
return out
else:
out = DiagonalPreconditioner(self.D, self.c0)
out.D = -self.D
return out
def __getitem__(self, key):
return self.D[key]
class IdentityPreconditioner(Preconditioner):
def __init__(self, c0, _f=1):
super().__init__()
self.c0 = c0
self._f = _f
def __matmul__(self, other):
from .ot_transformations import lagrangeMult
ll = lagrangeMult(other, self.c0, self)
return self._f * other + ll
def __mul__(self, s):
return self._f * s
def __neg__(self):
return IdentityPreconditioner(self.c0, _f=-self._f)
def __getitem__(self, key):
return self._f
__lmul__ = __mul__
__rmul__ = __mul__
| python_module/sirius/ot/ot_precondition.py | 3,716 | Apply diagonal preconditioner and project resulting gradient to satisfy the constraint.
Preconditioner
P = 1 / (||k|| + ε)
Keyword Arguments:
kpointset --
return as np.matrix | 204 | en | 0.618913 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
# @author: Edgar Magana, Cisco Systems, Inc.
#
"""
Configuration consolidation for the Nexus Driver
This module will export the configuration parameters
from the nexus.ini file
"""
from quantum.common.utils import find_config_file
from quantum.plugins.cisco.common import cisco_configparser as confp
CP = confp.CiscoConfigParser(find_config_file({'plugin': 'cisco'},
"nexus.ini"))
SECTION = CP['SWITCH']
NEXUS_IP_ADDRESS = SECTION['nexus_ip_address']
NEXUS_FIRST_PORT = SECTION['nexus_first_port']
NEXUS_SECOND_PORT = SECTION['nexus_second_port']
NEXUS_SSH_PORT = SECTION['nexus_ssh_port']
SECTION = CP['DRIVER']
NEXUS_DRIVER = SECTION['name']
| quantum/plugins/cisco/nexus/cisco_nexus_configuration.py | 1,407 | Configuration consolidation for the Nexus Driver
This module will export the configuration parameters
from the nexus.ini file
vim: tabstop=4 shiftwidth=4 softtabstop=4 Copyright 2011 Cisco Systems, Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. @author: Sumit Naiksatam, Cisco Systems, Inc. @author: Edgar Magana, Cisco Systems, Inc. | 865 | en | 0.7657 |
'''
Neuron simulator export for:
Components:
net1 (Type: network)
sim1 (Type: Simulation: length=1.0 (SI time) step=5.0E-5 (SI time))
hhcell (Type: cell)
passive (Type: ionChannelPassive: conductance=1.0E-11 (SI conductance))
na (Type: ionChannelHH: conductance=1.0E-11 (SI conductance))
k (Type: ionChannelHH: conductance=1.0E-11 (SI conductance))
pulseGen1 (Type: pulseGenerator: delay=0.0 (SI time) duration=1.0E8 (SI time) amplitude=8.000000000000001E-11 (SI current))
This NEURON file has been generated by org.neuroml.export (see https://github.com/NeuroML/org.neuroml.export)
org.neuroml.export v1.5.3
org.neuroml.model v1.5.3
jLEMS v0.9.9.0
'''
import neuron
import time
import hashlib
h = neuron.h
h.load_file("stdlib.hoc")
h.load_file("stdgui.hoc")
h("objref p")
h("p = new PythonObject()")
class NeuronSimulation():
def __init__(self, tstop, dt, seed=123456789):
print("\n Starting simulation in NEURON of %sms generated from NeuroML2 model...\n"%tstop)
self.seed = seed
self.randoms = []
self.next_global_id = 0 # Used in Random123 classes for elements using random(), etc.
self.next_spiking_input_id = 0 # Used in Random123 classes for elements using random(), etc.
'''
Adding simulation Component(id=sim1 type=Simulation) of network/component: net1 (Type: network)
'''
# ###################### Population: hhpop
print("Population hhpop contains 1 instance(s) of component: hhcell of type: cell")
h.load_file("hhcell.hoc")
a_hhpop = []
h("{ n_hhpop = 1 }")
h("objectvar a_hhpop[n_hhpop]")
for i in range(int(h.n_hhpop)):
h("a_hhpop[%i] = new hhcell()"%i)
h("access a_hhpop[%i].soma"%i)
self.next_global_id+=1
h("proc initialiseV_hhpop() { for i = 0, n_hhpop-1 { a_hhpop[i].set_initial_v() } }")
h("objref fih_hhpop")
h('{fih_hhpop = new FInitializeHandler(0, "initialiseV_hhpop()")}')
h("proc initialiseIons_hhpop() { for i = 0, n_hhpop-1 { a_hhpop[i].set_initial_ion_properties() } }")
h("objref fih_ion_hhpop")
h('{fih_ion_hhpop = new FInitializeHandler(1, "initialiseIons_hhpop()")}')
# Adding single input: Component(id=null type=explicitInput)
h("objref explicitInput_pulseGen1a_hhpop0_soma")
h("a_hhpop[0].soma { explicitInput_pulseGen1a_hhpop0_soma = new pulseGen1(0.5) } ")
trec = h.Vector()
trec.record(h._ref_t)
h.tstop = tstop
h.dt = dt
h.steps_per_ms = 1/h.dt
# ###################### File to save: time.dat (time)
# Column: time
h(' objectvar v_time ')
h(' { v_time = new Vector() } ')
h(' { v_time.record(&t) } ')
h.v_time.resize((h.tstop * h.steps_per_ms) + 1)
self.initialized = False
self.sim_end = -1 # will be overwritten
def run(self):
self.initialized = True
sim_start = time.time()
print("Running a simulation of %sms (dt = %sms; seed=%s)" % (h.tstop, h.dt, self.seed))
h.run()
self.sim_end = time.time()
sim_time = self.sim_end - sim_start
print("Finished NEURON simulation in %f seconds (%f mins)..."%(sim_time, sim_time/60.0))
self.save_results()
def advance(self):
if not self.initialized:
h.finitialize()
self.initialized = True
h.fadvance()
###############################################################################
# Hash function to use in generation of random value
# This is copied from NetPyNE: https://github.com/Neurosim-lab/netpyne/blob/master/netpyne/simFuncs.py
###############################################################################
def _id32 (self,obj):
return int(hashlib.md5(obj).hexdigest()[0:8],16) # convert 8 first chars of md5 hash in base 16 to int
###############################################################################
# Initialize the stim randomizer
# This is copied from NetPyNE: https://github.com/Neurosim-lab/netpyne/blob/master/netpyne/simFuncs.py
###############################################################################
def _init_stim_randomizer(self,rand, stimType, gid, seed):
#print("INIT STIM %s; %s; %s; %s"%(rand, stimType, gid, seed))
rand.Random123(self._id32(stimType), gid, seed)
def save_results(self):
print("Saving results at t=%s..."%h.t)
if self.sim_end < 0: self.sim_end = time.time()
# ###################### File to save: time.dat (time)
py_v_time = [ t/1000 for t in h.v_time.to_python() ] # Convert to Python list for speed...
f_time_f2 = open('time.dat', 'w')
num_points = len(py_v_time) # Simulation may have been stopped before tstop...
for i in range(num_points):
f_time_f2.write('%f'% py_v_time[i]) # Save in SI units...
f_time_f2.close()
print("Saved data to: time.dat")
save_end = time.time()
save_time = save_end - self.sim_end
print("Finished saving results in %f seconds"%(save_time))
print("Done")
quit()
if __name__ == '__main__':
ns = NeuronSimulation(tstop=1000.0, dt=0.049999997, seed=123456789)
ns.run()
| src/test/resources/expected/neuron/hhcell/main_script.py | 5,456 | Neuron simulator export for:
Components:
net1 (Type: network)
sim1 (Type: Simulation: length=1.0 (SI time) step=5.0E-5 (SI time))
hhcell (Type: cell)
passive (Type: ionChannelPassive: conductance=1.0E-11 (SI conductance))
na (Type: ionChannelHH: conductance=1.0E-11 (SI conductance))
k (Type: ionChannelHH: conductance=1.0E-11 (SI conductance))
pulseGen1 (Type: pulseGenerator: delay=0.0 (SI time) duration=1.0E8 (SI time) amplitude=8.000000000000001E-11 (SI current))
This NEURON file has been generated by org.neuroml.export (see https://github.com/NeuroML/org.neuroml.export)
org.neuroml.export v1.5.3
org.neuroml.model v1.5.3
jLEMS v0.9.9.0
Used in Random123 classes for elements using random(), etc. Used in Random123 classes for elements using random(), etc. Population: hhpop Adding single input: Component(id=null type=explicitInput) File to save: time.dat (time) Column: time will be overwritten Hash function to use in generation of random value This is copied from NetPyNE: https://github.com/Neurosim-lab/netpyne/blob/master/netpyne/simFuncs.py convert 8 first chars of md5 hash in base 16 to int Initialize the stim randomizer This is copied from NetPyNE: https://github.com/Neurosim-lab/netpyne/blob/master/netpyne/simFuncs.pyprint("INIT STIM %s; %s; %s; %s"%(rand, stimType, gid, seed)) File to save: time.dat (time) Convert to Python list for speed... Simulation may have been stopped before tstop... Save in SI units... | 1,534 | en | 0.585608 |
# Character field ID when accessed: 100000201
# ParentID: 32226
# ObjectID: 0
| scripts/quest/autogen_q32226s.py | 78 | Character field ID when accessed: 100000201 ParentID: 32226 ObjectID: 0 | 71 | en | 0.433372 |
# Josh Aaron Miller 2021
# VenntDB methods for Characters
import venntdb
from constants import *
# VenntDB Methods
def character_exists(self, username, char_id):
return self.get_character(username, char_id) is not None
def get_character(self, username, char_id):
self.assert_valid("accounts", username, "characters")
if self.is_valid("accounts", username, "characters", char_id):
return self.db["accounts"][username]["characters"][char_id]
return None
def create_character(self, username, character):
self.assert_valid("accounts", username, "characters")
self.db["accounts"][username]["characters"][character["id"]] = character
self.save_db()
def get_characters(self, username):
self.assert_valid("accounts", username, "characters")
return self.db["accounts"][username]["characters"]
def get_attr(self, username, char_id, attr):
self.assert_valid("accounts", username, "characters", char_id)
return self.get_character(username, char_id)[attr]
def set_attr(self, username, char_id, attr, val):
self.assert_valid("accounts", username, "characters", char_id)
self.get_character(username, char_id)[attr] = val
self.save_db()
| db_characters.py | 1,239 | Josh Aaron Miller 2021 VenntDB methods for Characters VenntDB Methods | 69 | en | 0.472623 |
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from batchgenerators.utilities.file_and_folder_operations import *
def remove_trailing_slash(filename: str):
while filename.endswith('/'):
filename = filename[:-1]
return filename
def maybe_add_0000_to_all_niigz(folder):
nii_gz = subfiles(folder, suffix='.nii.gz')
for n in nii_gz:
n = remove_trailing_slash(n)
if not n.endswith('_0000.nii.gz'):
os.rename(n, n[:-7] + '_0000.nii.gz')
| nnunet/utilities/file_endings.py | 1,058 | Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 628 | en | 0.858488 |
from dataclasses import dataclass
from apple.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class BackupInitialized(Streamable):
"""
Stores user decision regarding import of backup info
"""
user_initialized: bool # Stores if user made a selection in UI. (Skip vs Import backup)
user_skipped: bool # Stores if user decided to skip import of backup info
backup_info_imported: bool # Stores if backup info has been imported
new_wallet: bool # Stores if this wallet is newly created / not restored from backup
| apple/wallet/settings/settings_objects.py | 577 | Stores user decision regarding import of backup info
Stores if user made a selection in UI. (Skip vs Import backup) Stores if user decided to skip import of backup info Stores if backup info has been imported Stores if this wallet is newly created / not restored from backup | 276 | en | 0.904552 |
""" This file holds all the chapter 2 areas of the game. """
from time import sleep
# from classes import Player, Difficulty
from chapters.chapter import Chapter
from chapters.chapter3 import Chapter3
from other.sounds_effects import GameSounds
from game import player1, sounds, Difficulty
from choices import _player_choice, error_message
from other.colors import print_green, print_yellow, print_red, print_sleep, print_blue
class Chapter2(Chapter):
"""Contains all the main chapter 2 areas of the game."""
chapter_num = 2
def checkpoints(self):
"""runs movement to levels -- checkpoint when leaving area"""
return {'0': self.game,
'1': self.good_ending_and_continue,
'bad': self.bad_ending,
'3': self.woods_area,
}
def good_ending_and_continue(self):
"""Simply plays the good ending scene and then drops the player into chapter 2."""
self.good_ending()
Chapter3().game()
def game(self):
"""start of ch2"""
self.start()
print_sleep(
'Upon driving the car through the broken roads area, the sun is certainly dwindling and time in the car'
'says 2:35 AM.\nYou continue to grow yourself tired and restless from everything that had led to this '
'point\n', 2.5)
choices = [str(x) for x in range(1, 3)]
choice_options = [
'Due to the car getting low on gas, you must make a tough decision. (1) Drive back to the local gas '
'station in town (2) Turn off the car and set up a camp fire in the woods: ']
choice = _player_choice(choices, choice_options)
if choice == '1':
sounds.zombie_attack_inside()
print_sleep(
'While attempting to put the car in reverse and head backwards to the local gas station in town, '
'a swarm of zombies arise on the car while the car gets stuck into gear!\n', 2.5)
if not player1.user_attack():
return
player1.total_kills += 5
print_green('You have successfully killed off the heaping swarm of zombies surrounding the car!\n', 1)
self.continue_message()
elif choice == '2':
print_sleep(
'You have parked the car near the closet woods area and now need to gather up some supplies for a camp '
'fire.\n', 2)
self.woods_area()
def woods_area(self):
"""Checkpoint save 3"""
player1.checkpoint_save('3')
print_sleep(
'You have successfully gathered up some sticks and still need a source of flame to begin the campfire.\n',
2)
choices = [str(x) for x in range(1, 3)]
choice_options = [
'You can either test your luck in creating a fire by (1) Creating friction: Use sticks and rub against '
'nearby wood chips (2) Search for other useful resources: ']
choice = _player_choice(choices, choice_options)
if choice == '1':
sounds.flame_ignite()
print_sleep('Whoosh! after a few minutes of trying to create friction, the birth of a small ash turns into '
'a flame!\n', 2.5)
self.continue_message()
elif choice == '2':
sounds.zombie_attack_outside()
print_red(
'Whilst looking around for more resources, you begin hearing a group of 3 zombies running towards '
'you!\n', 2)
if not player1.user_attack():
return
player1.total_kills += 3
print_green('You have successfully killed off the group of 3 zombies running towards you!\n', 1)
self.continue_message()
| chapters/chapter2.py | 3,800 | Contains all the main chapter 2 areas of the game.
runs movement to levels -- checkpoint when leaving area
start of ch2
Simply plays the good ending scene and then drops the player into chapter 2.
Checkpoint save 3
This file holds all the chapter 2 areas of the game.
from classes import Player, Difficulty | 309 | en | 0.919212 |
import numpy
from fframework import asfunction, OpFunction
__all__ = ['Angle']
class Angle(OpFunction):
"""Transforms a mesh into the angle of the mesh to the x axis."""
def __init__(self, mesh):
"""*mesh* is the mesh Function."""
self.mesh = asfunction(mesh)
def __call__(self, ps):
"""Returns the arctan2. The (y, x) coordinate is in the last
dimension."""
meshT = self.mesh(ps).T
return numpy.arctan2(meshT[0], meshT[1]).T
| moviemaker3/math/angle.py | 494 | Transforms a mesh into the angle of the mesh to the x axis.
Returns the arctan2. The (y, x) coordinate is in the last
dimension.
*mesh* is the mesh Function. | 159 | en | 0.776611 |
from pathlib import Path
import numba
import numpy as np
from det3d.core.bbox.geometry import (
points_count_convex_polygon_3d_jit,
points_in_convex_polygon_3d_jit,
)
try:
from spconv.utils import rbbox_intersection, rbbox_iou
except:
print("Import spconv fail, no support for sparse convolution!")
def points_count_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)):
rbbox_corners = center_to_corner_box3d(
rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis
)
surfaces = corner_to_surfaces_3d(rbbox_corners)
return points_count_convex_polygon_3d_jit(points[:, :3], surfaces)
def riou_cc(rbboxes, qrbboxes, standup_thresh=0.0):
# less than 50ms when used in second one thread. 10x slower than gpu
boxes_corners = center_to_corner_box2d(
rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4]
)
boxes_standup = corner_to_standup_nd(boxes_corners)
qboxes_corners = center_to_corner_box2d(
qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4]
)
qboxes_standup = corner_to_standup_nd(qboxes_corners)
# if standup box not overlapped, rbbox not overlapped too.
standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0)
return rbbox_iou(boxes_corners, qboxes_corners, standup_iou, standup_thresh)
def rinter_cc(rbboxes, qrbboxes, standup_thresh=0.0):
# less than 50ms when used in second one thread. 10x slower than gpu
boxes_corners = center_to_corner_box2d(
rbboxes[:, :2], rbboxes[:, 2:4], rbboxes[:, 4]
)
boxes_standup = corner_to_standup_nd(boxes_corners)
qboxes_corners = center_to_corner_box2d(
qrbboxes[:, :2], qrbboxes[:, 2:4], qrbboxes[:, 4]
)
qboxes_standup = corner_to_standup_nd(qboxes_corners)
# if standup box not overlapped, rbbox not overlapped too.
standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0)
return rbbox_intersection(
boxes_corners, qboxes_corners, standup_iou, standup_thresh
)
def corners_nd(dims, origin=0.5):
"""generate relative box corners based on length per dim and
origin point.
Args:
dims (float array, shape=[N, ndim]): array of length per dim
origin (list or array or float): origin point relate to smallest point.
Returns:
float array, shape=[N, 2 ** ndim, ndim]: returned corners.
point layout example: (2d) x0y0, x0y1, x1y0, x1y1;
(3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
where x0 < x1, y0 < y1, z0 < z1
"""
ndim = int(dims.shape[1])
corners_norm = np.stack(
np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1
).astype(dims.dtype)
# now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1
# (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
# so need to convert to a format which is convenient to do other computing.
# for 2d boxes, format is clockwise start with minimum point
# for 3d boxes, please draw lines by your hand.
if ndim == 2:
# generate clockwise box corners
corners_norm = corners_norm[[0, 1, 3, 2]]
elif ndim == 3:
corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]
corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)
corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2 ** ndim, ndim])
return corners
@numba.njit
def corners_2d_jit(dims, origin=0.5):
ndim = 2
corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype)
corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)
corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim))
return corners
@numba.njit
def corners_3d_jit(dims, origin=0.5):
ndim = 3
corners_norm = np.array(
[0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1],
dtype=dims.dtype,
).reshape((8, 3))
corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]
corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)
corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape((1, 2 ** ndim, ndim))
return corners
@numba.njit
def corner_to_standup_nd_jit(boxes_corner):
num_boxes = boxes_corner.shape[0]
ndim = boxes_corner.shape[-1]
result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype)
for i in range(num_boxes):
for j in range(ndim):
result[i, j] = np.min(boxes_corner[i, :, j])
for j in range(ndim):
result[i, j + ndim] = np.max(boxes_corner[i, :, j])
return result
def corner_to_standup_nd(boxes_corner):
assert len(boxes_corner.shape) == 3
standup_boxes = []
standup_boxes.append(np.min(boxes_corner, axis=1))
standup_boxes.append(np.max(boxes_corner, axis=1))
return np.concatenate(standup_boxes, -1)
def rbbox2d_to_near_bbox(rbboxes):
"""convert rotated bbox to nearest 'standing' or 'lying' bbox.
Args:
rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes
Returns:
bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes
"""
rots = rbboxes[..., -1]
rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi))
cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis]
bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4])
bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:])
return bboxes
def rotation_3d_in_axis(points, angles, axis=0):
# points: [N, point_size, 3]
rot_sin = np.sin(angles)
rot_cos = np.cos(angles)
ones = np.ones_like(rot_cos)
zeros = np.zeros_like(rot_cos)
if axis == 1:
rot_mat_T = np.stack(
[
[rot_cos, zeros, -rot_sin],
[zeros, ones, zeros],
[rot_sin, zeros, rot_cos],
]
)
elif axis == 2 or axis == -1:
rot_mat_T = np.stack(
[
[rot_cos, -rot_sin, zeros],
[rot_sin, rot_cos, zeros],
[zeros, zeros, ones],
]
)
elif axis == 0:
rot_mat_T = np.stack(
[
[zeros, rot_cos, -rot_sin],
[zeros, rot_sin, rot_cos],
[ones, zeros, zeros],
]
)
else:
raise ValueError("axis should in range")
return np.einsum("aij,jka->aik", points, rot_mat_T)
def rotation_points_single_angle(points, angle, axis=0):
# points: [N, 3]
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
if axis == 1:
rot_mat_T = np.array(
[[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]],
dtype=points.dtype,
)
elif axis == 2 or axis == -1:
rot_mat_T = np.array(
[[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]],
dtype=points.dtype,
)
elif axis == 0:
rot_mat_T = np.array(
[[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]],
dtype=points.dtype,
)
else:
raise ValueError("axis should in range")
return points @ rot_mat_T
def rotation_2d(points, angles):
"""rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angles (float array, shape=[N]): rotation angle.
Returns:
float array: same shape as points
"""
rot_sin = np.sin(angles)
rot_cos = np.cos(angles)
rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]])
return np.einsum("aij,jka->aik", points, rot_mat_T)
def rotation_box(box_corners, angle):
"""rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angle (float): rotation angle.
Returns:
float array: same shape as points
"""
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T = np.array(
[[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype
)
return box_corners @ rot_mat_T
def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 0.5, 0.5), axis=2):
"""convert kitti locations, dimensions and angles to corners
Args:
centers (float array, shape=[N, 3]): locations in kitti label file.
dims (float array, shape=[N, 3]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
origin (list or array or float): origin point relate to smallest point.
use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar.
axis (int): rotation axis. 1 for camera and 2 for lidar.
Returns:
[type]: [description]
"""
# 'length' in kitti format is in x axis.
# yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)
# center in kitti format is [0.5, 1.0, 0.5] in xyz.
corners = corners_nd(dims, origin=origin)
# corners: [N, 8, 3]
if angles is not None:
corners = rotation_3d_in_axis(corners, angles, axis=axis)
corners += centers.reshape([-1, 1, 3])
return corners
def center_to_corner_box2d(centers, dims, angles=None, origin=0.5):
"""convert kitti locations, dimensions and angles to corners.
format: center(xy), dims(xy), angles(clockwise when positive)
Args:
centers (float array, shape=[N, 2]): locations in kitti label file.
dims (float array, shape=[N, 2]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
Returns:
[type]: [description]
"""
# 'length' in kitti format is in x axis.
# xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)
# center in kitti format is [0.5, 1.0, 0.5] in xyz.
corners = corners_nd(dims, origin=origin)
# corners: [N, 4, 2]
if angles is not None:
corners = rotation_2d(corners, angles)
corners += centers.reshape([-1, 1, 2])
return corners
@numba.jit(nopython=True)
def box2d_to_corner_jit(boxes):
num_box = boxes.shape[0]
corners_norm = np.zeros((4, 2), dtype=boxes.dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)
corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype)
for i in range(num_box):
rot_sin = np.sin(boxes[i, -1])
rot_cos = np.cos(boxes[i, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2]
return box_corners
def rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.5], axis=2):
return center_to_corner_box3d(
rbboxes[..., :3], rbboxes[..., 3:6], rbboxes[..., 6], origin, axis=axis
)
def rbbox3d_to_bev_corners(rbboxes, origin=0.5):
return center_to_corner_box2d(
rbboxes[..., :2], rbboxes[..., 3:5], rbboxes[..., 6], origin
)
def minmax_to_corner_2d(minmax_box):
ndim = minmax_box.shape[-1] // 2
center = minmax_box[..., :ndim]
dims = minmax_box[..., ndim:] - center
return center_to_corner_box2d(center, dims, origin=0.0)
def minmax_to_corner_2d_v2(minmax_box):
# N, 4 -> N 4 2
return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2)
def minmax_to_corner_3d(minmax_box):
ndim = minmax_box.shape[-1] // 2
center = minmax_box[..., :ndim]
dims = minmax_box[..., ndim:] - center
return center_to_corner_box3d(center, dims, origin=0.0)
def minmax_to_center_2d(minmax_box):
ndim = minmax_box.shape[-1] // 2
center_min = minmax_box[..., :ndim]
dims = minmax_box[..., ndim:] - center_min
center = center_min + 0.5 * dims
return np.concatenate([center, dims], axis=-1)
def center_to_minmax_2d_0_5(centers, dims):
return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1)
def center_to_minmax_2d(centers, dims, origin=0.5):
if origin == 0.5:
return center_to_minmax_2d_0_5(centers, dims)
corners = center_to_corner_box2d(centers, dims, origin=origin)
return corners[:, [0, 2]].reshape([-1, 4])
def limit_period(val, offset=0.5, period=np.pi):
return val - np.floor(val / period + offset) * period
def projection_matrix_to_CRT_kitti(proj):
# P = C @ [R|T]
# C is upper triangular matrix, so we need to inverse CR and use QR
# stable for all kitti camera projection matrix
CR = proj[0:3, 0:3]
CT = proj[0:3, 3]
RinvCinv = np.linalg.inv(CR)
Rinv, Cinv = np.linalg.qr(RinvCinv)
C = np.linalg.inv(Cinv)
R = np.linalg.inv(Rinv)
T = Cinv @ CT
return C, R, T
def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100):
fku = C[0, 0]
fkv = -C[1, 1]
u0v0 = C[0:2, 2]
z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis]
b = bbox_image
box_corners = np.array(
[[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype
)
near_box_corners = (box_corners - u0v0) / np.array(
[fku / near_clip, -fkv / near_clip], dtype=C.dtype
)
far_box_corners = (box_corners - u0v0) / np.array(
[fku / far_clip, -fkv / far_clip], dtype=C.dtype
)
ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2]
ret_xyz = np.concatenate([ret_xy, z_points], axis=1)
return ret_xyz
def get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100):
fku = C[0, 0]
fkv = -C[1, 1]
u0v0 = C[0:2, 2]
num_box = bboxes.shape[0]
z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[
np.newaxis, :, np.newaxis
]
z_points = np.tile(z_points, [num_box, 1, 1])
box_corners = minmax_to_corner_2d_v2(bboxes)
near_box_corners = (box_corners - u0v0) / np.array(
[fku / near_clip, -fkv / near_clip], dtype=C.dtype
)
far_box_corners = (box_corners - u0v0) / np.array(
[fku / far_clip, -fkv / far_clip], dtype=C.dtype
)
ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=1) # [8, 2]
ret_xyz = np.concatenate([ret_xy, z_points], axis=-1)
return ret_xyz
@numba.njit
def _add_rgb_to_points_kernel(points_2d, image, points_rgb):
num_points = points_2d.shape[0]
image_h, image_w = image.shape[:2]
for i in range(num_points):
img_pos = np.floor(points_2d[i]).astype(np.int32)
if img_pos[0] >= 0 and img_pos[0] < image_w:
if img_pos[1] >= 0 and img_pos[1] < image_h:
points_rgb[i, :] = image[img_pos[1], img_pos[0], :]
# image[img_pos[1], img_pos[0]] = 0
def add_rgb_to_points(points, image, rect, Trv2c, P2, mean_size=[5, 5]):
kernel = np.ones(mean_size, np.float32) / np.prod(mean_size)
# image = cv2.filter2D(image, -1, kernel)
points_cam = lidar_to_camera(points[:, :3], rect, Trv2c)
points_2d = project_to_image(points_cam, P2)
points_rgb = np.zeros([points_cam.shape[0], 3], dtype=points.dtype)
_add_rgb_to_points_kernel(points_2d, image, points_rgb)
return points_rgb
def project_to_image(points_3d, proj_mat):
points_shape = list(points_3d.shape)
points_shape[-1] = 1
points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1)
point_2d = points_4 @ proj_mat.T
point_2d_res = point_2d[..., :2] / point_2d[..., 2:3]
return point_2d_res
def camera_to_lidar(points, r_rect, velo2cam):
points_shape = list(points.shape[0:-1])
if points.shape[-1] == 3:
points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)
lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T)
return lidar_points[..., :3]
def lidar_to_camera(points, r_rect, velo2cam):
points_shape = list(points.shape[:-1])
if points.shape[-1] == 3:
points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)
camera_points = points @ (r_rect @ velo2cam).T
return camera_points[..., :3]
def box_camera_to_lidar(data, r_rect, velo2cam):
xyz = data[:, 0:3]
l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam)
return np.concatenate([xyz_lidar, w, l, h, r], axis=1)
def box_lidar_to_camera(data, r_rect, velo2cam):
xyz_lidar = data[:, 0:3]
w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam)
return np.concatenate([xyz, l, h, w, r], axis=1)
def remove_outside_points(points, rect, Trv2c, P2, image_shape):
# 5x faster than remove_outside_points_v1(2ms vs 10ms)
C, R, T = projection_matrix_to_CRT_kitti(P2)
image_bbox = [0, 0, image_shape[1], image_shape[0]]
frustum = get_frustum(image_bbox, C)
frustum -= T
frustum = np.linalg.inv(R) @ frustum.T
frustum = camera_to_lidar(frustum.T, rect, Trv2c)
frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...])
indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces)
points = points[indices.reshape([-1])]
return points
@numba.jit(nopython=True)
def iou_jit(boxes, query_boxes, eps=1.0):
"""calculate box iou. note that jit version runs 2x faster than cython in
my machine!
Parameters
----------
boxes: (N, 4) ndarray of float
query_boxes: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
for k in range(K):
box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * (
query_boxes[k, 3] - query_boxes[k, 1] + eps
)
for n in range(N):
iw = (
min(boxes[n, 2], query_boxes[k, 2])
- max(boxes[n, 0], query_boxes[k, 0])
+ eps
)
if iw > 0:
ih = (
min(boxes[n, 3], query_boxes[k, 3])
- max(boxes[n, 1], query_boxes[k, 1])
+ eps
)
if ih > 0:
ua = (
(boxes[n, 2] - boxes[n, 0] + eps)
* (boxes[n, 3] - boxes[n, 1] + eps)
+ box_area
- iw * ih
)
overlaps[n, k] = iw * ih / ua
return overlaps
@numba.jit(nopython=True)
def iou_3d_jit(boxes, query_boxes, add1=True):
"""calculate box iou3d,
----------
boxes: (N, 6) ndarray of float
query_boxes: (K, 6) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
if add1:
add1 = 1.0
else:
add1 = 0.0
for k in range(K):
box_area = (
(query_boxes[k, 3] - query_boxes[k, 0] + add1)
* (query_boxes[k, 4] - query_boxes[k, 1] + add1)
* (query_boxes[k, 5] - query_boxes[k, 2] + add1)
)
for n in range(N):
iw = (
min(boxes[n, 3], query_boxes[k, 3])
- max(boxes[n, 0], query_boxes[k, 0])
+ add1
)
if iw > 0:
ih = (
min(boxes[n, 4], query_boxes[k, 4])
- max(boxes[n, 1], query_boxes[k, 1])
+ add1
)
if ih > 0:
il = (
min(boxes[n, 5], query_boxes[k, 5])
- max(boxes[n, 2], query_boxes[k, 2])
+ add1
)
if il > 0:
ua = float(
(boxes[n, 3] - boxes[n, 0] + add1)
* (boxes[n, 4] - boxes[n, 1] + add1)
* (boxes[n, 5] - boxes[n, 2] + add1)
+ box_area
- iw * ih * il
)
overlaps[n, k] = iw * ih * il / ua
return overlaps
@numba.jit(nopython=True)
def iou_nd_jit(boxes, query_boxes, add1=True):
"""calculate box iou nd, 2x slower than iou_jit.
----------
boxes: (N, ndim * 2) ndarray of float
query_boxes: (K, ndim * 2) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
N = boxes.shape[0]
K = query_boxes.shape[0]
ndim = boxes.shape[1] // 2
overlaps = np.zeros((N, K), dtype=boxes.dtype)
side_lengths = np.zeros((ndim,), dtype=boxes.dtype)
if add1:
add1 = 1.0
else:
add1 = 0.0
invalid = False
for k in range(K):
qbox_area = query_boxes[k, ndim] - query_boxes[k, 0] + add1
for i in range(1, ndim):
qbox_area *= query_boxes[k, ndim + i] - query_boxes[k, i] + add1
for n in range(N):
invalid = False
for i in range(ndim):
side_length = (
min(boxes[n, i + ndim], query_boxes[k, i + ndim])
- max(boxes[n, i], query_boxes[k, i])
+ add1
)
if side_length <= 0:
invalid = True
break
side_lengths[i] = side_length
if not invalid:
box_area = boxes[n, ndim] - boxes[n, 0] + add1
for i in range(1, ndim):
box_area *= boxes[n, ndim + i] - boxes[n, i] + add1
inter = side_lengths[0]
for i in range(1, ndim):
inter *= side_lengths[i]
# inter = np.prod(side_lengths)
ua = float(box_area + qbox_area - inter)
overlaps[n, k] = inter / ua
return overlaps
def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0.5)):
rbbox_corners = center_to_corner_box3d(
rbbox[:, :3], rbbox[:, 3:6], rbbox[:, -1], origin=origin, axis=z_axis
)
surfaces = corner_to_surfaces_3d(rbbox_corners)
indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
return indices
def corner_to_surfaces_3d(corners):
"""convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
"""
# box_corners: [N, 8, 3], must from corner functions in this module
surfaces = np.array(
[
[corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]],
[corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]],
[corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]],
[corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]],
[corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]],
[corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]],
]
).transpose([2, 0, 1, 3])
return surfaces
@numba.jit(nopython=True)
def corner_to_surfaces_3d_jit(corners):
"""convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
"""
# box_corners: [N, 8, 3], must from corner functions in this module
num_boxes = corners.shape[0]
surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype)
corner_idxes = np.array(
[0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7]
).reshape(6, 4)
for i in range(num_boxes):
for j in range(6):
for k in range(4):
surfaces[i, j, k] = corners[i, corner_idxes[j, k]]
return surfaces
def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range):
"""assign a 0/1 label to each voxel based on whether
the center of voxel is in gt_box. LIDAR.
"""
voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)
coors_range = np.array(coors_range, dtype=gt_boxes.dtype)
shift = coors_range[:3]
voxel_origins = coors[:, ::-1] * voxel_size + shift
voxel_centers = voxel_origins + voxel_size * 0.5
gt_box_corners = center_to_corner_box3d(
gt_boxes[:, :3] - voxel_size * 0.5,
gt_boxes[:, 3:6] + voxel_size,
gt_boxes[:, 6],
origin=[0.5, 0.5, 0.5],
axis=2,
)
gt_surfaces = corner_to_surfaces_3d(gt_box_corners)
ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces)
return np.any(ret, axis=1).astype(np.int64)
def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range):
"""assign a 0/1 label to each voxel based on whether
the center of voxel is in gt_box. LIDAR.
"""
voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)
coors_range = np.array(coors_range, dtype=gt_boxes.dtype)
shift = coors_range[:3]
voxel_origins = coors[:, ::-1] * voxel_size + shift
voxel_maxes = voxel_origins + voxel_size
voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1)
voxel_corners = minmax_to_corner_3d(voxel_minmax)
gt_box_corners = center_to_corner_box3d(
gt_boxes[:, :3],
gt_boxes[:, 3:6],
gt_boxes[:, 6],
origin=[0.5, 0.5, 0.5],
axis=2,
)
gt_surfaces = corner_to_surfaces_3d(gt_box_corners)
voxel_corners_flat = voxel_corners.reshape([-1, 3])
ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces)
ret = ret.reshape([-1, 8, ret.shape[-1]])
return ret.any(-1).any(-1).astype(np.int64)
def image_box_region_area(img_cumsum, bbox):
"""check a 2d voxel is contained by a box. used to filter empty
anchors.
Summed-area table algorithm:
==> W
------------------
| | |
|------A---------B
| | |
| | |
|----- C---------D
Iabcd = ID-IB-IC+IA
Args:
img_cumsum: [M, H, W](yx) cumsumed image.
bbox: [N, 4](xyxy) bounding box,
"""
N = bbox.shape[0]
M = img_cumsum.shape[0]
ret = np.zeros([N, M], dtype=img_cumsum.dtype)
ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]]
IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]]
IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]]
IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]]
ret = ID - IB - IC + IA
return ret
def get_minimum_bounding_box_bv(points, voxel_size, bound, downsample=8, margin=1.6):
x_vsize = voxel_size[0]
y_vsize = voxel_size[1]
max_x = points[:, 0].max()
max_y = points[:, 1].max()
min_x = points[:, 0].min()
min_y = points[:, 1].min()
max_x = np.floor(max_x / (x_vsize * downsample) + 1) * (x_vsize * downsample)
max_y = np.floor(max_y / (y_vsize * downsample) + 1) * (y_vsize * downsample)
min_x = np.floor(min_x / (x_vsize * downsample)) * (x_vsize * downsample)
min_y = np.floor(min_y / (y_vsize * downsample)) * (y_vsize * downsample)
max_x = np.minimum(max_x + margin, bound[2])
max_y = np.minimum(max_y + margin, bound[3])
min_x = np.maximum(min_x - margin, bound[0])
min_y = np.maximum(min_y - margin, bound[1])
return np.array([min_x, min_y, max_x, max_y])
def box3d_to_bbox(box3d, rect, Trv2c, P2):
box3d_to_cam = box_lidar_to_camera(box3d, rect, Trv2c)
box_corners = center_to_corner_box3d(
box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1
)
box_corners_in_image = project_to_image(box_corners, P2)
# box_corners_in_image: [N, 8, 2]
minxy = np.min(box_corners_in_image, axis=1)
maxxy = np.max(box_corners_in_image, axis=1)
bbox = np.concatenate([minxy, maxxy], axis=1)
return bbox
def change_box3d_center_(box3d, src, dst):
dst = np.array(dst, dtype=box3d.dtype)
src = np.array(src, dtype=box3d.dtype)
box3d[..., :3] += box3d[..., 3:6] * (dst - src)
def encode_parts(relative_shifts):
parts = np.zeros((len(relative_shifts),), dtype=np.int32)
mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] >= 0)
parts[mask] = 0
mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] >= 0)
parts[mask] = 1
mask = (relative_shifts[:, 0] < 0) & (relative_shifts[:, 1] < 0)
parts[mask] = 2
mask = (relative_shifts[:, 0] >= 0) & (relative_shifts[:, 1] < 0)
parts[mask] = 3
return parts | det3d/core/bbox/box_np_ops.py | 29,715 | assign a 0/1 label to each voxel based on whether
the center of voxel is in gt_box. LIDAR.
assign a 0/1 label to each voxel based on whether
the center of voxel is in gt_box. LIDAR.
convert kitti locations, dimensions and angles to corners.
format: center(xy), dims(xy), angles(clockwise when positive)
Args:
centers (float array, shape=[N, 2]): locations in kitti label file.
dims (float array, shape=[N, 2]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
Returns:
[type]: [description]
convert kitti locations, dimensions and angles to corners
Args:
centers (float array, shape=[N, 3]): locations in kitti label file.
dims (float array, shape=[N, 3]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
origin (list or array or float): origin point relate to smallest point.
use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar.
axis (int): rotation axis. 1 for camera and 2 for lidar.
Returns:
[type]: [description]
convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
generate relative box corners based on length per dim and
origin point.
Args:
dims (float array, shape=[N, ndim]): array of length per dim
origin (list or array or float): origin point relate to smallest point.
Returns:
float array, shape=[N, 2 ** ndim, ndim]: returned corners.
point layout example: (2d) x0y0, x0y1, x1y0, x1y1;
(3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
where x0 < x1, y0 < y1, z0 < z1
check a 2d voxel is contained by a box. used to filter empty
anchors.
Summed-area table algorithm:
==> W
------------------
| | |
|------A---------B
| | |
| | |
|----- C---------D
Iabcd = ID-IB-IC+IA
Args:
img_cumsum: [M, H, W](yx) cumsumed image.
bbox: [N, 4](xyxy) bounding box,
calculate box iou3d,
----------
boxes: (N, 6) ndarray of float
query_boxes: (K, 6) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
calculate box iou. note that jit version runs 2x faster than cython in
my machine!
Parameters
----------
boxes: (N, 4) ndarray of float
query_boxes: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
calculate box iou nd, 2x slower than iou_jit.
----------
boxes: (N, ndim * 2) ndarray of float
query_boxes: (K, ndim * 2) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
convert rotated bbox to nearest 'standing' or 'lying' bbox.
Args:
rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes
Returns:
bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes
rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angles (float array, shape=[N]): rotation angle.
Returns:
float array: same shape as points
rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angle (float): rotation angle.
Returns:
float array: same shape as points
less than 50ms when used in second one thread. 10x slower than gpu if standup box not overlapped, rbbox not overlapped too. less than 50ms when used in second one thread. 10x slower than gpu if standup box not overlapped, rbbox not overlapped too. now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1 (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 so need to convert to a format which is convenient to do other computing. for 2d boxes, format is clockwise start with minimum point for 3d boxes, please draw lines by your hand. generate clockwise box corners points: [N, point_size, 3] points: [N, 3] 'length' in kitti format is in x axis. yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) center in kitti format is [0.5, 1.0, 0.5] in xyz. corners: [N, 8, 3] 'length' in kitti format is in x axis. xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) center in kitti format is [0.5, 1.0, 0.5] in xyz. corners: [N, 4, 2] N, 4 -> N 4 2 P = C @ [R|T] C is upper triangular matrix, so we need to inverse CR and use QR stable for all kitti camera projection matrix [8, 2] [8, 2] image[img_pos[1], img_pos[0]] = 0 image = cv2.filter2D(image, -1, kernel) 5x faster than remove_outside_points_v1(2ms vs 10ms) inter = np.prod(side_lengths) box_corners: [N, 8, 3], must from corner functions in this module box_corners: [N, 8, 3], must from corner functions in this module box_corners_in_image: [N, 8, 2] | 5,087 | en | 0.722076 |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HContractUnitR03_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HContractUnitR03_ConnectedLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HContractUnitR03_ConnectedLHS, self).__init__(name='HContractUnitR03_ConnectedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HContractUnitR03_ConnectedLHS')
self["equations"] = []
# Set the node attributes
# match class Class(Class) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__Class"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Class')
# Add the edges
self.add_edges([
])
# define evaluation methods for each match class.
def eval_attr11(self, attr_value, this):
return True
# define evaluation methods for each match association.
def constraint(self, PreNode, graph):
return True
| UML2ER/contracts/unit/HContractUnitR03_ConnectedLHS.py | 1,298 | Creates the himesis graph representing the AToM3 model HContractUnitR03_ConnectedLHS
Flag this instance as compiled now Add the edges Set the graph attributes Set the node attributes match class Class(Class) node Add the edges define evaluation methods for each match class. define evaluation methods for each match association. | 330 | en | 0.706277 |
import numpy as np
import networkx as nx
if __name__ == '__main__':
from ged4py.algorithm import graph_edit_dist
else:
from .ged4py.algorithm import graph_edit_dist
def rearrange_adj_matrix(matrix, ordering):
assert matrix.ndim == 2
# Check that matrix is square
assert matrix.shape[0] == matrix.shape[1]
num_nodes = matrix.shape[0]
assert len(ordering) == num_nodes
# Swap rows into correct ordering
matrix = matrix[ordering, :]
# Swap columns into correct ordering
matrix = matrix[:, ordering]
return matrix
def rand_permute_adj_matrix(matrix):
"""Randomly permute the order of vertices in the adjacency matrix, while maintaining the connectivity
between them."""
num_vertices = matrix.shape[0]
rand_order = np.arange(num_vertices)
np.random.shuffle(rand_order)
matrix_permuted = rearrange_adj_matrix(matrix, rand_order)
return matrix_permuted
def ged_from_adj(adj_mat_1, adj_mat_2, directed=False, ged_function=graph_edit_dist.compare):
"""Calculate the graph edit distance between two graphs"""
if directed:
create_using = nx.DiGraph
else:
create_using = nx.Graph
g1 = nx.from_numpy_matrix(adj_mat_1, create_using=create_using())
g2 = nx.from_numpy_matrix(adj_mat_2, create_using=create_using())
return ged_function(g1, g2)
def ged_from_adj_nx(adj_mat_1, adj_mat_2, directed=False):
"""Calculate the graph edit distance between two graphs using the networkx implementation"""
return ged_from_adj(adj_mat_1, adj_mat_2, directed=directed, ged_function=nx.graph_edit_distance)
def ged_from_adj_ged4py(adj_mat_1, adj_mat_2, directed=False):
"""Calculate the graph edit distance between two graphs using the ged4py implementation"""
return ged_from_adj(adj_mat_1, adj_mat_2, directed=directed, ged_function=graph_edit_dist.compare)
def is_isomorphic_from_adj(adj_mat_1, adj_mat_2):
"""Checks whether two graphs are isomorphic taking adjacency matrices as inputs"""
g1 = nx.from_numpy_matrix(adj_mat_1, create_using=nx.DiGraph())
g2 = nx.from_numpy_matrix(adj_mat_2, create_using=nx.DiGraph())
return nx.is_isomorphic(g1, g2)
def adj_matrix_to_edge_list(adj_matrix, directed=True, first_id=0, weighted=False):
num_nodes = adj_matrix.shape[0]
if directed:
num_edges = np.sum(adj_matrix)
else:
num_edges = int(np.sum(adj_matrix) / 2)
if weighted:
edge_list = np.zeros([num_edges, 3], dtype=np.int32)
else:
edge_list = np.zeros([num_edges, 2], dtype=np.int32)
i = 0
for node_in in range(num_nodes):
if directed:
range_2 = range(num_nodes)
else:
range_2 = range(node_in + 1, num_nodes)
for node_out in range_2:
edge_val = adj_matrix[node_in, node_out]
if edge_val > 0:
# If there is a connection
if weighted:
edge_list[i] = (node_in + first_id, node_out + first_id, edge_val)
else:
edge_list[i] = (node_in + first_id, node_out + first_id)
i += 1
return edge_list
def edge_list_to_textfile(edge_list, filepath, weighted=False):
with open(filepath, 'w') as file:
if weighted:
for i, j, weight in edge_list:
file.write(f"{i} {j} {weight}\n")
else:
for i, j in edge_list:
file.write(f"{i} {j}\n")
return
| utils/graph_utils.py | 3,483 | Calculate the graph edit distance between two graphs
Calculate the graph edit distance between two graphs using the ged4py implementation
Calculate the graph edit distance between two graphs using the networkx implementation
Checks whether two graphs are isomorphic taking adjacency matrices as inputs
Randomly permute the order of vertices in the adjacency matrix, while maintaining the connectivity
between them.
Check that matrix is square Swap rows into correct ordering Swap columns into correct ordering If there is a connection | 536 | en | 0.888002 |
import time
from typing import Optional, Dict
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pack_padded_sequence
from utils import TensorboardWriter, AverageMeter, save_checkpoint, accuracy, \
clip_gradient, adjust_learning_rate
from metrics import Metrics
class Trainer:
"""
Encoder-decoder pipeline. Tearcher Forcing is used during training and validation.
Parameters
----------
caption_model : str
Type of the caption model
epochs : int
We should train the model for __ epochs
device : torch.device
Use GPU or not
word_map : Dict[str, int]
Word2id map
rev_word_map : Dict[int, str]
Id2word map
start_epoch : int
We should start training the model from __th epoch
epochs_since_improvement : int
Number of epochs since last improvement in BLEU-4 score
best_bleu4 : float
Best BLEU-4 score until now
train_loader : DataLoader
DataLoader for training data
val_loader : DataLoader
DataLoader for validation data
encoder : nn.Module
Encoder (based on CNN)
decoder : nn.Module
Decoder (based on LSTM)
encoder_optimizer : optim.Optimizer
Optimizer for encoder (Adam) (if fine-tune)
decoder_optimizer : optim.Optimizer
Optimizer for decoder (Adam)
loss_function : nn.Module
Loss function (cross entropy)
grad_clip : float
Gradient threshold in clip gradients
tau : float
Penalty term τ for doubly stochastic attention in paper: show, attend and tell
fine_tune_encoder : bool
Fine-tune encoder or not
tensorboard : bool, optional, default=False
Enable tensorboard or not?
log_dir : str, optional
Path to the folder to save logs for tensorboard
"""
def __init__(
self,
caption_model: str,
epochs: int,
device: torch.device,
word_map: Dict[str, int],
rev_word_map: Dict[int, str],
start_epoch: int,
epochs_since_improvement: int,
best_bleu4: float,
train_loader: DataLoader,
val_loader: DataLoader,
encoder: nn.Module,
decoder: nn.Module,
encoder_optimizer: optim.Optimizer,
decoder_optimizer: optim.Optimizer,
loss_function: nn.Module,
grad_clip: float,
tau: float,
fine_tune_encoder: bool,
tensorboard: bool = False,
log_dir: Optional[str] = None
) -> None:
self.device = device # GPU / CPU
self.caption_model = caption_model
self.epochs = epochs
self.word_map = word_map
self.rev_word_map = rev_word_map
self.start_epoch = start_epoch
self.epochs_since_improvement = epochs_since_improvement
self.best_bleu4 = best_bleu4
self.train_loader = train_loader
self.val_loader = val_loader
self.encoder = encoder
self.decoder = decoder
self.encoder_optimizer = encoder_optimizer
self.decoder_optimizer = decoder_optimizer
self.loss_function = loss_function
self.tau = tau
self.grad_clip = grad_clip
self.fine_tune_encoder = fine_tune_encoder
self.print_freq = 100 # print training/validation stats every __ batches
# setup visualization writer instance
self.writer = TensorboardWriter(log_dir, tensorboard)
self.len_epoch = len(self.train_loader)
def train(self, epoch: int) -> None:
"""
Train an epoch
Parameters
----------
epoch : int
Current number of epoch
"""
self.decoder.train() # train mode (dropout and batchnorm is used)
self.encoder.train()
batch_time = AverageMeter() # forward prop. + back prop. time
data_time = AverageMeter() # data loading time
losses = AverageMeter(tag='loss', writer=self.writer) # loss (per word decoded)
top5accs = AverageMeter(tag='top5acc', writer=self.writer) # top5 accuracy
start = time.time()
# batches
for i, (imgs, caps, caplens) in enumerate(self.train_loader):
data_time.update(time.time() - start)
# Move to GPU, if available
imgs = imgs.to(self.device)
caps = caps.to(self.device)
caplens = caplens.to(self.device)
# forward encoder
imgs = self.encoder(imgs)
# forward decoder
if self.caption_model == 'att2all':
scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(imgs, caps, caplens)
else:
scores, caps_sorted, decode_lengths, sort_ind = self.decoder(imgs, caps, caplens)
# since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores = pack_padded_sequence(scores, decode_lengths, batch_first=True)[0]
targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)[0]
# calc loss
loss = self.loss_function(scores, targets)
# doubly stochastic attention regularization (in paper: show, attend and tell)
if self.caption_model == 'att2all':
loss += self.tau * ((1. - alphas.sum(dim = 1)) ** 2).mean()
# clear gradient of last batch
self.decoder_optimizer.zero_grad()
if self.encoder_optimizer is not None:
self.encoder_optimizer.zero_grad()
# backward
loss.backward()
# clip gradients
if self.grad_clip is not None:
clip_gradient(self.decoder_optimizer, self.grad_clip)
if self.encoder_optimizer is not None:
clip_gradient(self.encoder_optimizer, self.grad_clip)
# update weights
self.decoder_optimizer.step()
if self.encoder_optimizer is not None:
self.encoder_optimizer.step()
# set step for tensorboard
step = (epoch - 1) * self.len_epoch + i
self.writer.set_step(step=step, mode='train')
# keep track of metrics
top5 = accuracy(scores, targets, 5)
losses.update(loss.item(), sum(decode_lengths))
top5accs.update(top5, sum(decode_lengths))
batch_time.update(time.time() - start)
start = time.time()
# print status
if i % self.print_freq == 0:
print(
'Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data Load Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(self.train_loader),
batch_time = batch_time,
data_time = data_time,
loss = losses,
top5 = top5accs
)
)
def validate(self) -> float:
"""
Validate an epoch.
Returns
-------
bleu4 : float
BLEU-4 score
"""
self.decoder.eval() # eval mode (no dropout or batchnorm)
if self.encoder is not None:
self.encoder.eval()
batch_time = AverageMeter()
losses = AverageMeter()
top5accs = AverageMeter()
start = time.time()
ground_truth = list() # ground_truth (true captions) for calculating BLEU-4 score
prediction = list() # prediction (predicted captions)
# explicitly disable gradient calculation to avoid CUDA memory error
# solves the issue #57
with torch.no_grad():
# Batches
for i, (imgs, caps, caplens, allcaps) in enumerate(self.val_loader):
# move to device, if available
imgs = imgs.to(self.device)
caps = caps.to(self.device)
caplens = caplens.to(self.device)
# forward encoder
if self.encoder is not None:
imgs = self.encoder(imgs)
# forward decoder
if self.caption_model == 'att2all':
scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(imgs, caps, caplens)
else:
scores, caps_sorted, decode_lengths, sort_ind = self.decoder(imgs, caps, caplens)
# since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores_copy = scores.clone()
scores = pack_padded_sequence(scores, decode_lengths, batch_first = True)[0]
targets = pack_padded_sequence(targets, decode_lengths, batch_first = True)[0]
# calc loss
loss = self.loss_function(scores, targets)
# doubly stochastic attention regularization (in paper: show, attend and tell)
if self.caption_model == 'att2all':
loss += self.tau * ((1. - alphas.sum(dim = 1)) ** 2).mean()
# keep track of metrics
losses.update(loss.item(), sum(decode_lengths))
top5 = accuracy(scores, targets, 5)
top5accs.update(top5, sum(decode_lengths))
batch_time.update(time.time() - start)
start = time.time()
if i % self.print_freq == 0:
print('Validation: [{0}/{1}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})\t'.format(i, len(self.val_loader),
batch_time = batch_time,
loss = losses,
top5 = top5accs)
)
# store ground truth captions and predicted captions of each image
# for n images, each of them has one prediction and multiple ground truths (a, b, c...):
# prediction = [ [hyp1], [hyp2], ..., [hypn] ]
# ground_truth = [ [ [ref1a], [ref1b], [ref1c] ], ..., [ [refna], [refnb] ] ]
# ground truth
allcaps = allcaps[sort_ind] # because images were sorted in the decoder
for j in range(allcaps.shape[0]):
img_caps = allcaps[j].tolist()
img_captions = list(
map(
lambda c: [w for w in c if w not in {self.word_map['<start>'], self.word_map['<pad>']}],
img_caps
)
) # remove <start> and pads
ground_truth.append(img_captions)
# prediction
_, preds = torch.max(scores_copy, dim = 2)
preds = preds.tolist()
temp_preds = list()
for j, p in enumerate(preds):
temp_preds.append(preds[j][:decode_lengths[j]]) # remove pads
preds = temp_preds
prediction.extend(preds)
assert len(ground_truth) == len(prediction)
# calc BLEU-4 and CIDEr score
metrics = Metrics(ground_truth, prediction, self.rev_word_map)
bleu4 = metrics.belu[3] # BLEU-4
cider = metrics.cider # CIDEr
print(
'\n * LOSS - {loss.avg:.3f}, TOP-5 ACCURACY - {top5.avg:.3f}, BLEU-4 - {bleu}, CIDEr - {cider}\n'.format(
loss = losses,
top5 = top5accs,
bleu = bleu4,
cider = cider
)
)
return bleu4
def run_train(self) -> None:
# epochs
for epoch in range(self.start_epoch, self.epochs):
# decay learning rate if there is no improvement for 8 consecutive epochs
# terminate training if there is no improvement for 20 consecutive epochs
if self.epochs_since_improvement == 20:
break
if self.epochs_since_improvement > 0 and self.epochs_since_improvement % 8 == 0:
adjust_learning_rate(self.decoder_optimizer, 0.8)
if self.fine_tune_encoder:
adjust_learning_rate(self.encoder_optimizer, 0.8)
# train an epoch
self.train(epoch = epoch)
# validate an epoch
recent_bleu4 = self.validate()
# epochs num since last improvement
is_best = recent_bleu4 > self.best_bleu4
self.best_bleu4 = max(recent_bleu4, self.best_bleu4)
if not is_best:
self.epochs_since_improvement += 1
print("\nEpochs since last improvement: %d\n" % (self.epochs_since_improvement,))
else:
self.epochs_since_improvement = 0
# save checkpoint
save_checkpoint(
epoch = epoch,
epochs_since_improvement = self.epochs_since_improvement,
encoder = self.encoder,
decoder = self.decoder,
encoder_optimizer = self.encoder_optimizer,
decoder_optimizer = self.decoder_optimizer,
caption_model = self.caption_model,
bleu4 = recent_bleu4,
is_best = is_best
)
| trainer/trainer.py | 14,308 | Encoder-decoder pipeline. Tearcher Forcing is used during training and validation.
Parameters
----------
caption_model : str
Type of the caption model
epochs : int
We should train the model for __ epochs
device : torch.device
Use GPU or not
word_map : Dict[str, int]
Word2id map
rev_word_map : Dict[int, str]
Id2word map
start_epoch : int
We should start training the model from __th epoch
epochs_since_improvement : int
Number of epochs since last improvement in BLEU-4 score
best_bleu4 : float
Best BLEU-4 score until now
train_loader : DataLoader
DataLoader for training data
val_loader : DataLoader
DataLoader for validation data
encoder : nn.Module
Encoder (based on CNN)
decoder : nn.Module
Decoder (based on LSTM)
encoder_optimizer : optim.Optimizer
Optimizer for encoder (Adam) (if fine-tune)
decoder_optimizer : optim.Optimizer
Optimizer for decoder (Adam)
loss_function : nn.Module
Loss function (cross entropy)
grad_clip : float
Gradient threshold in clip gradients
tau : float
Penalty term τ for doubly stochastic attention in paper: show, attend and tell
fine_tune_encoder : bool
Fine-tune encoder or not
tensorboard : bool, optional, default=False
Enable tensorboard or not?
log_dir : str, optional
Path to the folder to save logs for tensorboard
Train an epoch
Parameters
----------
epoch : int
Current number of epoch
Validate an epoch.
Returns
-------
bleu4 : float
BLEU-4 score
GPU / CPU print training/validation stats every __ batches setup visualization writer instance train mode (dropout and batchnorm is used) forward prop. + back prop. time data loading time loss (per word decoded) top5 accuracy batches Move to GPU, if available forward encoder forward decoder since we decoded starting with <start>, the targets are all words after <start>, up to <end> remove timesteps that we didn't decode at, or are pads pack_padded_sequence is an easy trick to do this calc loss doubly stochastic attention regularization (in paper: show, attend and tell) clear gradient of last batch backward clip gradients update weights set step for tensorboard keep track of metrics print status eval mode (no dropout or batchnorm) ground_truth (true captions) for calculating BLEU-4 score prediction (predicted captions) explicitly disable gradient calculation to avoid CUDA memory error solves the issue 57 Batches move to device, if available forward encoder forward decoder since we decoded starting with <start>, the targets are all words after <start>, up to <end> remove timesteps that we didn't decode at, or are pads pack_padded_sequence is an easy trick to do this calc loss doubly stochastic attention regularization (in paper: show, attend and tell) keep track of metrics store ground truth captions and predicted captions of each image for n images, each of them has one prediction and multiple ground truths (a, b, c...): prediction = [ [hyp1], [hyp2], ..., [hypn] ] ground_truth = [ [ [ref1a], [ref1b], [ref1c] ], ..., [ [refna], [refnb] ] ] ground truth because images were sorted in the decoder remove <start> and pads prediction remove pads calc BLEU-4 and CIDEr score BLEU-4 CIDEr epochs decay learning rate if there is no improvement for 8 consecutive epochs terminate training if there is no improvement for 20 consecutive epochs train an epoch validate an epoch epochs num since last improvement save checkpoint | 3,449 | en | 0.748056 |
# Get substring using 'start' and 'end' position.
def get_substring_or_empty(data, start, end=''):
if start in data:
if '' == start:
f = 0
else:
f = len(start)
f = data.find(start) + f
data = data[f:]
else:
return ''
if end in data:
if '' == end:
f = len(data)
else:
f = data.find(end)
data = data[:f]
else:
return ''
data = data.strip()
return data
| utils.py | 501 | Get substring using 'start' and 'end' position. | 47 | en | 0.441224 |
import cmath
import math
cv =150
cvconv = 736
t1 =440
t2 = 254
polos = 10
freq = 60
r1 = 0.012
R2L = 0.018
X1 = 0.08
X2L = X1
Rp = 58
Xm = 54
print("\nConsidere que o motor é alimentado com tensão de fase igual a 254 V, conexão Y e atinge escorregamento igual a 1,8%")
print("\nA - Corrente no estator\n")
s = 0.018
print("R2L_s = ", R2L/s, "Ohm")
print("(1-s)*(R2L_s) = ", (1-s)*(R2L/s), "Ohm")
Z1 = r1+complex(0,X1)
print("Z1 = ", Z1, "Ohm")
Z2 = R2L/s+complex(0,X2L)
print("Z2 = ", Z2, "Ohm")
Zn = Rp*complex(0,Xm)/complex(Rp,Xm)
print("Zn = ", Zn, "Ohm")
Zeq1 = Zn*Z2/(Zn+Z2)
print("Zeq1 = ", Zeq1, "Ohm")
Zeq2 = Z1+Zeq1
print("Zeq2 = ", Zeq2, "Ohm")
I1 = t2/Zeq2
print("I1 = ", I1, "A")
I1p = cmath.polar(I1)
print("\nB - Fator de pontecia\n")
FP = cmath.cos(I1p[1])
FPreal = round(FP.real,5)
print("FP = ", FPreal)
print("\nC - Potencia de entrada\n")
Pe = t2*I1p[0]*cmath.cos(I1p[1])
pereal = round(Pe.real,3)
print("Pe = ", pereal, "W")
Pe3 = 3*pereal
print("Pe3 = ", Pe3, "W")
print("\nD - Corrente no rotor\n")
E1 = t2-Z1*I1
E1p = cmath.polar(E1)
print("E1 = ", E1p, "V")
I2L = E1/Z2
I2Lp = cmath.polar(I2L)
print("I2L = ", I2Lp, "A")
print("\nE - Potencia na carga\n")
#professor ultiliza dados polares
Ps = ((R2L*(1-s))/s)*I2Lp[0]*I2Lp[0]
print("Ps = ", Ps, "W")
Ps3 = 3*Ps
print("Ps3 = ", Ps3, "W")
print("\nF - Velocidade do eixo\n")
ns = 120*freq/polos
print("ns = ", ns, "rpm")
n = (1-s)*ns
print("n = ", n, "rpm")
w = 2*math.pi*n/60
w = round(w,3)
print("w = ", w, "rad/s")
print("\nG - Torque na carga\n")
t = Ps3/w
print("t = ", t, "Nm")
print("\nH - Rendimento do motor\n")
eni = Ps3/Pe3*100
print("eni = ", eni, "%")
| P5/Brasilia/Q7 - BR.py | 1,667 | professor ultiliza dados polares | 32 | pt | 0.887097 |
# +
import numpy as np
import holoviews as hv
from holoviews import opts
import matplotlib.pyplot as plt
from plotsun import plot_sun
hv.extension('bokeh', 'matplotlib')
# -
# # Load data
data = np.load('npz_timeseries/subset.npz')
arr = data['arr']
stack = data['stack']
sun = data['sun']
print(arr.shape, stack.shape, sun.shape)
stack[:,:,25]
plt.imshow(stack[:,:,25], cmap='binary')
# +
stack = hv.Dataset((np.arange(stack.shape[2]),
np.arange(stack.shape[0]),
np.arange(stack.shape[1]),
stack),
['Time', 'x', 'y'], 'Shadows')
stack
# -
arr = hv.Dataset((np.arange(arr.shape[0]),
np.arange(arr.shape[1]),
arr),
['x', 'y'], 'Elevation')
arr
# # View
opts.defaults(
opts.GridSpace(shared_xaxis=True, shared_yaxis=True),
opts.Image(cmap='viridis', invert_yaxis=True, width=400, height=400),
opts.Labels(text_color='white', text_font_size='8pt',
text_align='left', text_baseline='bottom'),
opts.Path(color='white'),
opts.Spread(width=600),
opts.Overlay(show_legend=False))
elevation = arr.to(hv.Image, ['x', 'y'])
shadows = stack.to(hv.Image, ['x', 'y'])
elevation
dims = {'figsize':(4,5), 'top':1, 'bottom':0, 'left':0.2, 'right':0.95}
plot_sun(sunposition=sun, d=dims)
elevation * shadows
stack[:,:,24]
| datashader_nb.py | 1,401 | + - Load data + - View | 24 | en | 0.390463 |
# -*- coding: utf-8 -*-
from django.conf.urls import include
from django.conf.urls import url
from rest_framework.routers import DefaultRouter
from .views import *
# register的可选参数 base_name: 用来生成urls名字,如果viewset中没有包含queryset, base_name一定要有
router = DefaultRouter()
router.register(r'idcs', IdcViewSet)
router.register(r'racks', RackViewSet)
router.register(r'servers', ServerViewSet)
router.register(r'sshusers', SSHUserViewSet)
router.register(r'businesslines', BusinessLineViewSet)
router.register(r'projects', ProjectViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^api_dashboard/$', APIDashBoardView.as_view()),
url(r'^api_local_ssh_user/$', APILocalSSHUserView.as_view()),
]
| backend/category/urls.py | 758 | -*- coding: utf-8 -*- register的可选参数 base_name: 用来生成urls名字,如果viewset中没有包含queryset, base_name一定要有 | 95 | zh | 0.737162 |
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from djangocms_versioning.constants import PUBLISHED, VERSION_STATES
from djangocms_versioning.versionables import _cms_extension
from polymorphic.utils import get_base_polymorphic_model
from rangefilter.filters import DateRangeFilter
from .helpers import get_rangefilter_expires_default
class SimpleListMultiselectFilter(admin.SimpleListFilter):
def value_as_list(self):
return self.value().split(',') if self.value() else []
def _update_query(self, changelist, include=None, exclude=None):
selected_list = self.value_as_list()
if include and include not in selected_list:
selected_list.append(include)
if exclude and exclude in selected_list:
selected_list.remove(exclude)
if selected_list:
compiled_selection = ','.join(selected_list)
return changelist.get_query_string({self.parameter_name: compiled_selection})
else:
return changelist.get_query_string(remove=[self.parameter_name])
class ContentTypeFilter(SimpleListMultiselectFilter):
title = _("Content Type")
parameter_name = "content_type"
template = 'djangocms_content_expiry/multiselect_filter.html'
def lookups(self, request, model_admin):
lookup_list = []
for content_model in _cms_extension().versionables_by_content:
# Only add references to the inherited concrete model i.e. not referenced polymorphic models
if hasattr(content_model, "polymorphic_ctype"):
content_model = get_base_polymorphic_model(content_model)
# Create an entry
content_type = ContentType.objects.get_for_model(content_model)
lookup_list_entry = (content_type.pk, content_type)
# Only add unique entries
if lookup_list_entry not in lookup_list:
lookup_list.append(lookup_list_entry)
return lookup_list
def queryset(self, request, queryset):
content_types = self.value()
if not content_types:
return queryset
return queryset.filter(version__content_type__in=content_types.split(','))
def choices(self, changelist):
yield {
'selected': self.value() is None,
'query_string': changelist.get_query_string(remove=[self.parameter_name]),
'display': 'All',
'initial': True,
}
for lookup, title in self.lookup_choices:
yield {
'selected': str(lookup) in self.value_as_list(),
'query_string': changelist.get_query_string({self.parameter_name: lookup}),
'include_query_string': self._update_query(changelist, include=str(lookup)),
'exclude_query_string': self._update_query(changelist, exclude=str(lookup)),
'display': title,
}
class VersionStateFilter(SimpleListMultiselectFilter):
title = _("Version State")
parameter_name = "state"
default_filter_value = PUBLISHED
show_all_param_value = "_all_"
template = 'djangocms_content_expiry/multiselect_filter.html'
def _is_default(self, filter_value):
if self.default_filter_value == filter_value and self.value() is None:
return True
return False
def _get_all_query_string(self, changelist):
"""
If there's a default value set the all parameter needs to be provided
however, if a default is not set the all parameter is not required.
"""
# Default setting in use
if self.default_filter_value:
return changelist.get_query_string(
{self.parameter_name: self.show_all_param_value}
)
# Default setting not in use
return changelist.get_query_string(remove=[self.parameter_name])
def _is_all_selected(self):
state = self.value()
# Default setting in use
if self.default_filter_value and state == self.show_all_param_value:
return True
# Default setting not in use
elif not self.default_filter_value and not state:
return True
return False
def _update_query(self, changelist, include=None, exclude=None):
selected_list = self.value_as_list()
if self.show_all_param_value in selected_list:
selected_list.remove(self.show_all_param_value)
if include and include not in selected_list:
selected_list.append(include)
if exclude and exclude in selected_list:
selected_list.remove(exclude)
if selected_list:
compiled_selection = ','.join(selected_list)
return changelist.get_query_string({self.parameter_name: compiled_selection})
else:
return changelist.get_query_string(remove=[self.parameter_name])
def lookups(self, request, model_admin):
return VERSION_STATES
def queryset(self, request, queryset):
state = self.value()
# Default setting in use
if self.default_filter_value:
if not state:
return queryset.filter(version__state=self.default_filter_value)
elif state != "_all_":
return queryset.filter(version__state__in=state.split(','))
# Default setting not in use
elif not self.default_filter_value and state:
return queryset.filter(version__state__in=state.split(','))
return queryset
def choices(self, changelist):
yield {
"selected": self._is_all_selected(),
"query_string": self._get_all_query_string(changelist),
"display": _("All"),
'initial': True,
}
for lookup, title in self.lookup_choices:
lookup_value = str(lookup)
yield {
"selected": str(lookup) in self.value_as_list() or self._is_default(lookup_value),
"query_string": changelist.get_query_string(
{self.parameter_name: lookup}
),
'include_query_string': self._update_query(changelist, include=str(lookup_value)),
'exclude_query_string': self._update_query(changelist, exclude=str(lookup_value)),
"display": title,
}
class AuthorFilter(admin.SimpleListFilter):
"""
An author filter limited to those users who have added expiration dates
"""
title = _("Version Author")
parameter_name = "created_by"
def lookups(self, request, model_admin):
from django.utils.encoding import force_text
User = get_user_model()
options = []
qs = model_admin.get_queryset(request)
authors = qs.values_list('version__created_by', flat=True).distinct()
users = User.objects.filter(pk__in=authors)
for user in users:
options.append(
(force_text(user.pk), user.get_full_name() or user.get_username())
)
return options
def queryset(self, request, queryset):
if self.value():
return queryset.filter(created_by=self.value()).distinct()
return queryset
class ContentExpiryDateRangeFilter(DateRangeFilter):
def queryset(self, request, queryset):
queryset = super().queryset(request, queryset)
# By default the widget should default to show a default duration and not all content
# expiry records
if not any('expires__range' in seed for seed in request.GET):
default_gte, default_lte = get_rangefilter_expires_default()
queryset = queryset.filter(expires__range=(default_gte, default_lte))
return queryset
| djangocms_content_expiry/filters.py | 7,876 | An author filter limited to those users who have added expiration dates
If there's a default value set the all parameter needs to be provided
however, if a default is not set the all parameter is not required.
Only add references to the inherited concrete model i.e. not referenced polymorphic models Create an entry Only add unique entries Default setting in use Default setting not in use Default setting in use Default setting not in use Default setting in use Default setting not in use By default the widget should default to show a default duration and not all content expiry records | 591 | en | 0.657196 |
import keras
from sklearn.metrics import roc_auc_score
from src.predictionAlgorithms.machineLearning.helpers.validation import Validation
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import glob
class Callbacks(keras.callbacks.Callback):
validationSequences = []
algorithm = None
number = 1
validation_frequency = 1
size = 64
step = 1
base = 4
def set_step(self, step):
self.step = step
return self
def set_base(self, base):
self.base = base
return base
def set_size(self, size):
self.size = size
return self
def set_validation_frequency(self, frequency):
self.validation_frequency = frequency
return self
def set_validation_data(self, validation_data):
self.validationSequences = validation_data
return self
def set_algorithm(self, algorithm):
self.algorithm = algorithm
return self
def on_train_begin(self, logs={}):
# Initialize the lists for holding the logs, losses and accuracies
self.losses = []
self.acc = []
self.val_losses = []
self.val_acc = []
self.logs = []
epoch_graphs = glob.glob('../output/*')
for f in epoch_graphs:
os.remove(f)
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
if self.number % self.validation_frequency != 0:
self.number += 1
return
validation = Validation()
validation.set_validation_data(self.validationSequences)\
.set_dimensions(self.size)\
.set_base(self.base)\
.set_step(self.step)\
.validate(self.algorithm)
self.number += 1
self.logs.append(logs)
self.losses.append(logs.get('loss'))
self.acc.append(logs.get('acc'))
self.val_losses.append(logs.get('val_loss'))
self.val_acc.append(logs.get('val_acc'))
if len(self.losses) > 1:
N = np.arange(0, len(self.losses))
plt.figure()
plt.plot(N, self.losses, label="train_loss")
plt.plot(N, self.acc, label="train_acc")
plt.plot(N, self.val_losses, label="val_loss")
plt.plot(N, self.val_acc, label="val_acc")
plt.title("Training Loss and Accuracy [Epoch {}]".format(epoch))
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.savefig('../output/Epoch-{}.png'.format(epoch))
plt.close()
return
def on_batch_begin(self, batch, logs={}):
return
def on_batch_end(self, batch, logs={}):
return
| src/predictionAlgorithms/machineLearning/helpers/callbacks.py | 2,828 | Initialize the lists for holding the logs, losses and accuracies | 64 | en | 0.871922 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tokenize
from hacking import core
LOCALS_TEXT_MAP = {
'locals': 'locals()',
'self': 'self.__dict__'
}
@core.flake8ext
def hacking_no_locals(logical_line, physical_line, tokens, noqa):
"""Do not use locals() or self.__dict__ for string formatting.
Okay: 'locals()'
Okay: 'locals'
Okay: locals()
Okay: print(locals())
H501: print("%(something)" % locals())
H501: LOG.info(_("%(something)") % self.__dict__)
Okay: print("%(something)" % locals()) # noqa
"""
if noqa:
return
for_formatting = False
for token_type, text, start, _, _ in tokens:
if text == "%" and token_type == tokenize.OP:
for_formatting = True
if for_formatting and token_type == tokenize.NAME:
for k, v in LOCALS_TEXT_MAP.items():
if text == k and v in logical_line:
yield (start[1],
"H501: Do not use %s for string formatting" % v)
| hacking/checks/dictlist.py | 1,533 | Do not use locals() or self.__dict__ for string formatting.
Okay: 'locals()'
Okay: 'locals'
Okay: locals()
Okay: print(locals())
H501: print("%(something)" % locals())
H501: LOG.info(_("%(something)") % self.__dict__)
Okay: print("%(something)" % locals()) # noqa
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 799 | en | 0.807915 |
# Source:https://github.com/Show-Me-the-Code/show-me-the-code
# Author:renzongxian
# Date:2014-11-30
# Python 3.4
"""
第 0001 题:做为 Apple Store App 独立开发者,你要搞限时促销,为你的应用生成激活码
(或者优惠券),使用 Python 如何生成 200 个激活码(或者优惠券)?
"""
import uuid
def generate_key():
key_list = []
for i in range(200):
uuid_key = uuid.uuid3(uuid.NAMESPACE_DNS, str(uuid.uuid1()))
key_list.append(str(uuid_key).replace('-', ''))
return key_list
if __name__ == '__main__':
print(generate_key())
| renzongxian/0001/0001.py | 606 | 第 0001 题:做为 Apple Store App 独立开发者,你要搞限时促销,为你的应用生成激活码
(或者优惠券),使用 Python 如何生成 200 个激活码(或者优惠券)?
Source:https://github.com/Show-Me-the-Code/show-me-the-code Author:renzongxian Date:2014-11-30 Python 3.4 | 200 | zh | 0.811679 |
from art import logo_blackjack
from replit import clear
import random
def deal_card():
"""Return random card"""
cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
card = random.choice(cards)
return card
def calculate_score(cards):
"""Take a list of cards and return the score"""
if sum(cards) == 21 and len(cards) == 2:
return 0
if 11 in cards and sum(cards) > 21:
cards.remove(11)
cards.append(1)
return sum(cards)
def compare(current_score_of_user, current_score_of_computer):
if current_score_of_user > 21 and current_score_of_computer > 21:
return "You went over. You lose"
if current_score_of_user == current_score_of_computer:
return "DRAW"
elif current_score_of_computer == 0:
return "You lose. Opponent has a blackjack"
elif current_score_of_user == 0:
return "You win with blackjack"
elif current_score_of_user > 21:
return "You went over. You lose"
elif current_score_of_computer > 21:
return "Opponent went over. You win"
elif current_score_of_user > current_score_of_computer:
return "You win"
else:
return "You lose"
def play_game():
print(logo_blackjack)
user_cards = []
computer_cards = []
is_game_over = False
for i in range(2):
user_cards.append(deal_card())
computer_cards.append(deal_card())
while not is_game_over:
current_score_of_user = calculate_score(user_cards)
current_score_of_computer = calculate_score(computer_cards)
print(f"Your cards: {user_cards} and current score of yours: {current_score_of_user}")
print(f"Computer's first card: [{computer_cards[0]}]")
if current_score_of_user == 0 or current_score_of_computer == 0 or current_score_of_user > 21:
is_game_over = True
else:
want_card = input("To get another card type 'y', to pass type 'n': ")
if want_card == "y":
user_cards.append(deal_card())
else:
is_game_over = True
while current_score_of_computer != 0 and current_score_of_computer < 17:
computer_cards.append(deal_card())
current_score_of_computer = calculate_score(computer_cards)
print(f"Your final hand: {user_cards} and final score: {current_score_of_user}")
print(f"Computer's final hand: {computer_cards}, final score: {current_score_of_computer}")
print(compare(current_score_of_user, current_score_of_computer))
while input("Do you want to play a game of blackjack? Type 'y' or 'n': ") == "y":
clear()
play_game()
| Programs/day_11_blackjack.py | 2,637 | Take a list of cards and return the score
Return random card | 60 | en | 0.761562 |
# Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for flax.nn.linear."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
import jax
from jax import random
from jax.nn import initializers
import jax.numpy as jnp
import numpy as np
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
class LinearTest(parameterized.TestCase):
def test_dense(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 3))
dense_module = nn.Dense(
features=4,
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, _ = dense_module.init_with_output(rng, x)
self.assertEqual(y.shape, (1, 4))
np.testing.assert_allclose(y, np.full((1, 4), 4.))
def test_dense_extra_batch_dims(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 2, 3))
dense_module = nn.Dense(
features=4,
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, _ = dense_module.init_with_output(rng, x)
np.testing.assert_allclose(y, np.full((1, 2, 4), 4.))
def test_dense_no_bias(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 3))
dense_module = nn.Dense(
features=4,
use_bias=False,
kernel_init=initializers.ones,
)
y, _ = dense_module.init_with_output(rng, x)
np.testing.assert_allclose(y, np.full((1, 4), 3.))
def test_dense_is_dense_general(self):
x = jax.random.normal(random.PRNGKey(0), (5, 3))
dense_module = nn.Dense(
features=4,
use_bias=True,
bias_init=initializers.normal(),
)
y1, _ = dense_module.init_with_output(dict(params=random.PRNGKey(1)), x)
dg_module = nn.DenseGeneral(
features=4,
use_bias=True,
bias_init=initializers.normal(),
)
y2, _ = dg_module.init_with_output(dict(params=random.PRNGKey(1)), x)
np.testing.assert_allclose(y1, y2)
def test_dense_general_batch_dim_raises(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 3, 2, 5))
with self.assertRaises(ValueError):
dg_module = nn.DenseGeneral(
features=4,
batch_dims=(0, 2),
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
dg_module.init_with_output(rng, x)
def test_dense_general_two_out(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 3))
dg_module = nn.DenseGeneral(
features=(2, 2),
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, _ = dg_module.init_with_output(rng, x)
np.testing.assert_allclose(y, np.full((1, 2, 2), 4.))
def test_dense_general_two_in(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 2, 2))
dg_module = nn.DenseGeneral(
features=3,
axis=(-2, 2),
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, _ = dg_module.init_with_output(rng, x)
np.testing.assert_allclose(y, np.full((1, 3), 5.))
def test_dense_general_batch_dim(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((2, 1, 3, 5))
state = {'counter': 0.}
def _counter_init(rng, shape, dtype, state):
del rng, dtype
state['counter'] += 1.
return jnp.full(shape, state['counter'])
counter_init = functools.partial(_counter_init, state=state)
dg_module = nn.DenseGeneral(
features=7,
axis=(3, -2),
batch_dims=0,
bias_init=initializers.ones,
kernel_init=counter_init,
)
y, _ = dg_module.init_with_output(rng, x)
target = np.concatenate(
[np.full((1, 1, 7), 16.), np.full((1, 1, 7), 31.)], axis=0)
np.testing.assert_allclose(y, target)
@parameterized.parameters([((-2, 3), (), 'bijk,jklm->bilm'),
((3, -2), (), 'bijk,jklm->bilm'),
((-2, 3), (0,), 'bijk,bjklm->bilm')])
def test_dense_general_vs_numpy(self, axis, batch_dims, einsum_expr):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((16, 8, 9, 10))
dg_module = nn.DenseGeneral(
features=(11, 12),
axis=axis,
batch_dims=batch_dims,
bias_init=initializers.ones,
kernel_init=initializers.normal(),
)
y, initial_params = dg_module.init_with_output(rng, x)
target = np.einsum(einsum_expr, x, initial_params['params']['kernel']) + 1.
np.testing.assert_allclose(y, target, atol=1e-6)
@parameterized.parameters([((3,),), (3,)])
def test_conv(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 8, 3))
conv_module = nn.Conv(
features=4,
kernel_size=kernel_size,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))
np.testing.assert_allclose(y, np.full((1, 6, 4), 10.))
@parameterized.parameters([((3,),), (3,)])
def test_single_input_conv(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((8, 3))
conv_module = nn.Conv(
features=4,
kernel_size=kernel_size,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))
np.testing.assert_allclose(y, np.full((6, 4), 10.))
@parameterized.parameters([((3,),), (3,)])
def test_group_conv(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 8, 4))
conv_module = nn.Conv(
features=4,
kernel_size=kernel_size,
feature_group_count=2,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 2, 4))
np.testing.assert_allclose(y, np.full((1, 6, 4), 7.))
@parameterized.parameters([((3,),), (3,)])
def test_conv_transpose(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((1, 8, 3))
conv_transpose_module = nn.ConvTranspose(
features=4,
kernel_size=kernel_size,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_transpose_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))
correct_ans = np.array([[[ 4., 4., 4., 4.],
[ 7., 7., 7., 7.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[ 7., 7., 7., 7.],
[ 4., 4., 4., 4.]]])
np.testing.assert_allclose(y, correct_ans)
@parameterized.parameters([((3,),), (3,)])
def test_single_input_conv_transpose(self, kernel_size):
rng = dict(params=random.PRNGKey(0))
x = jnp.ones((8, 3))
conv_transpose_module = nn.ConvTranspose(
features=4,
kernel_size=kernel_size,
padding='VALID',
kernel_init=initializers.ones,
bias_init=initializers.ones,
)
y, initial_params = conv_transpose_module.init_with_output(rng, x)
self.assertEqual(initial_params['params']['kernel'].shape, (3, 3, 4))
correct_ans = np.array([[ 4., 4., 4., 4.],
[ 7., 7., 7., 7.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[10., 10., 10., 10.],
[ 7., 7., 7., 7.],
[ 4., 4., 4., 4.]])
np.testing.assert_allclose(y, correct_ans)
def test_embed(self):
rng = dict(params=random.PRNGKey(0))
x = jnp.arange(4)[None]
dummy_embedding = jnp.broadcast_to(
jnp.arange(4)[..., None], (4, 3)).astype(jnp.float32)
embed_module = nn.Embed(
num_embeddings=4,
features=3,
embedding_init=lambda rng, shape, dtype: dummy_embedding,
)
y, initial_params = embed_module.init_with_output(rng, x)
np.testing.assert_allclose(y, dummy_embedding[None])
z = embed_module.apply(initial_params, jnp.ones((3,)), method=embed_module.attend)
np.testing.assert_allclose(z, 3. * jnp.arange(4))
def test_non_final_axis(self):
class Foo(nn.Module):
@nn.compact
def __call__(self, x):
return nn.DenseGeneral(features=6, axis=1, name='dense')(x)
x = jnp.ones((2, 4, 8))
y, variables = Foo().init_with_output(random.PRNGKey(0), x)
self.assertEqual(jax.tree_map(jnp.shape, variables['params']), {
'dense': {'kernel': (4, 6), 'bias': (6,)}
})
self.assertEqual(y.shape, (2, 8, 6))
def test_non_final_axes(self):
class Foo(nn.Module):
@nn.compact
def __call__(self, x):
return nn.DenseGeneral(features=6, axis=(0, 1), name='dense')(x)
x = jnp.ones((2, 4, 8))
y, variables = Foo().init_with_output(random.PRNGKey(0), x)
self.assertEqual(jax.tree_map(jnp.shape, variables['params']), {
'dense': {'kernel': (2, 4, 6), 'bias': (6,)}
})
self.assertEqual(y.shape, (8, 6))
if __name__ == '__main__':
absltest.main()
| tests/linen/linen_linear_test.py | 10,432 | Tests for flax.nn.linear.
Copyright 2021 The Flax Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Parse absl flags test_srcdir and test_tmpdir. | 628 | en | 0.838535 |
import os, sys, shutil, glob, math
import regex as re
from doconce import globals
from .doconce import read_file, write_file, doconce2format, handle_index_and_bib, preprocess
from .misc import option, help_print_options, check_command_line_options, system, _abort, \
find_file_with_extensions, folder_checker, doconce_version, _rmdolog, errwarn, debugpr
from .common import INLINE_TAGS, remove_code_and_tex
import json
from .ipynb import img2ipynb
from .html import movie2html
docstring_jupyterbook = ('Usage:\n'
'\033[1mdoconce jupyterbook <file>[.do.txt] [options]\033[0m\n'
'Create directories and files for Jupyter Book version: 0.8\n'
'\n'
'Example:\n'
'doconce jupyterbook filename.do.txt --sep=chapter --sep_section=subsection --show_titles\n')
_registered_cmdline_opts_jupyterbook = [
('-h', 'Show this help page'),
('--help', 'Show this help page'),
('--sep=', 'Specify separator for DocOnce file into jupyter-book chapters. [chapter|section|subsection]'),
('--sep_section=', 'Specify separator for DocOnce file into jupyter-book sections. '
'[chapter|section|subsection], optional'),
('--dest=', 'Destination folder for the content'),
('--dest_toc=', 'Destination folder for the _toc.yml file'),
('--show_titles', 'Print out the titles detected based on the separator headers. '
'This can be helpful for the file passed to the --titles option'),
('--titles=', 'File with page titles, i.e. titles in TOC on the left side of the page. Default is \'auto\': '
'assign titles based on the separator headers')
]
# Get the list of options for doconce jupyterbook
_legal_cmdline_opts_jupyterbook, _ = list(zip(*_registered_cmdline_opts_jupyterbook))
_legal_cmdline_opts_jupyterbook = list(_legal_cmdline_opts_jupyterbook)
# Get the list of opitions for doconce in general
_legal_command_line_options = [opt for opt, help in globals._registered_command_line_options]
def jupyterbook():
"""
Create content and TOC for building a jupyter-book version 0.8: https://jupyterbook.org/intro
This function is called directly from bin/doconce
"""
# Print help
if len(sys.argv) < 2:
doconce_version()
print(docstring_jupyterbook)
print("Try 'doconce jupyterbook --help' for more information.")
sys.exit(1)
if option('help') or '-h' in sys.argv:
print_help_jupyterbook()
sys.exit(1)
# Check options
# NB: _legal_command_line_options allows options defined in misc.py/global.py
if not check_command_line_options(1, option_list=_legal_cmdline_opts_jupyterbook + _legal_command_line_options):
_abort()
# Destination directories
dest = option('dest=', default='./', option_list=_legal_cmdline_opts_jupyterbook)
dest = folder_checker(dest)
dest_toc = option('dest_toc=', default='./', option_list=_legal_cmdline_opts_jupyterbook)
dest_toc = folder_checker(dest_toc)
# Get options
sep = option('sep=', default='section', option_list=_legal_cmdline_opts_jupyterbook)
sep_section = option('sep_section=', default='', option_list=_legal_cmdline_opts_jupyterbook)
globals.encoding = option('encoding=', default='')
titles_opt = option('titles=', default='auto', option_list=_legal_cmdline_opts_jupyterbook)
show_titles_opt = option('show_titles', default=False, option_list=_legal_cmdline_opts_jupyterbook)
# Check if the file exists, then read it in
dirname, basename, ext, filename = find_file_with_extensions(sys.argv[1], allowed_extensions=['.do.txt'])
if not filename:
errwarn('*** error: file %s does not exist' % globals.filename)
_abort()
globals.dirname = dirname
if dirname:
# cd into the DocOnce file's directory, then fix dest and dest_toc
os.chdir(dirname)
errwarn('*** doconce format now works in directory %s' % dirname)
# fix dest, dest_roc, and finally dirname
dest = os.path.relpath(dest or '.', start=dirname) + '/'
if dest.startswith('./'):
dest = dest[2:]
dest_toc = os.path.relpath(dest_toc or '.', start=dirname) + '/'
if dest_toc.startswith('./'):
dest_toc = dest_toc[2:]
#dirname = ''
globals.filename = filename
globals.dofile_basename = basename
# NOTE: The following is a reworking of code from doconce.py > format_driver
_rmdolog() # always start with clean log file with errors
preprocessor_options = [arg for arg in sys.argv[1:]
if not arg.startswith('--')]
format = 'pandoc'
filename_preprocessed = preprocess(globals.filename, format, preprocessor_options)
# Run parts of file2file code in format_driver.
# Cannot use it directly because file2file writes to file. Consider to modularize file2file
filestr = read_file(filename_preprocessed, _encoding=globals.encoding)
# Remove pandoc's title/author/date metadata, which does not get rendered appropriately in
# markdown/jupyter-book. Consider to write this metadata to the _config.yml file
for tag in 'TITLE', 'AUTHOR', 'DATE':
if re.search(r'^%s:.*' % tag, filestr, re.MULTILINE):
errwarn('*** warning : Removing heading with %s. Consider to place it in _config.yml' % tag.lower())
filestr = re.sub(r'^%s:.*' % tag, '', filestr, flags=re.MULTILINE)
# Remove TOC tag
tag = 'TOC'
if re.search(r'^%s:.*' % tag, filestr, re.MULTILINE):
errwarn('*** warning : Removing the %s tag' % tag.lower())
filestr = re.sub(r'^%s:.*' % tag, '', filestr, flags=re.MULTILINE)
# Format citations and add bibliography in DocOnce's html format
pattern_tag = r'[\w _\-]*'
pattern = r'cite(?:(\[' + pattern_tag + '\]))?\{(' + pattern_tag + ')\}'
if re.search(pattern, filestr):
filestr = handle_index_and_bib(filestr, 'html')
# Delete any non-printing characters, commands, and comments
# Using regex:
m = re.search(r'\A\s*^(?:#.*\s*|!split\s*)*', filestr, re.MULTILINE)
if m:
filestr = filestr[m.end():]
# No-regex method. This could be an alternative to the previous regex
'''skip = ''
for line in filestr.splitlines():
if not line.strip():
skip += line + '\n'
elif not line.startswith('#') and not line.startswith('!'):
break
else:
skip += line +'\n'
filestr = filestr[len(skip):]
'''
# Description of relevant variables
# sep : Divide the text in jupyter-book chapters, see --sep
# chapters : ['whole chapter 1', 'whole chapter 2', 'summary']
# chapter_titles : ['Chapter 1', 'Chapter 2', 'Summary']
# chapter_titles_auto : ['Header 1', 'Header 2', 'Last Header in DocOnce file']
# chapter_basenames : ['01_mybook', '02_mybook', '03_mybook']
#
# If sep_section is not empty, these variables become relevant
# sep_section : Subdivide the jupyter-book chapters in sections, see --sep_section
# sec_list : [['subsection1','subsection2], ['subsection1'] , []]
# sec_title_list : [['Subsection 1.1', 'Subsection 1.2'], ['Subsection 2.1'], []]
# sec_title_list_auto : [['Subheader 1.1', 'Subheader 1.2'], ['Subheader 2.1'], ['Last Subheader in DocOnce file']]
# sec_basename_list : [['01_01_mybook', '01_02_mybook'], ['02_01_mybook'], []]
# Split the DocOnce file in jupyter-book chapters
chapters = split_file(filestr, INLINE_TAGS[sep])
sec_list = [[]] * len(chapters)
sec_title_list_auto = None
# Extract all jupyter-book sections based on --sep_section
if sep_section:
for c, chap in enumerate(chapters):
# Any text before the first jupyter-book section is part of a jupyter-book chapter,
# the rest consists in jupyter-book sections
m = re.search(INLINE_TAGS[sep_section], chap, flags=re.MULTILINE)
if m:
pos_sep_section = m.start() if m else 0
# Write text before the first jupyter-book section as chapter
chapters[c] = split_file(chap[:pos_sep_section:], INLINE_TAGS[sep_section])[0]
# The text after the first match of sep_section are jupyter-book sections
sec_list[c] = split_file(chap[pos_sep_section:], INLINE_TAGS[sep_section])
# Get titles from title file in options
chapter_titles, sec_title_list = read_title_file(titles_opt, chapters, sec_list)
# Extract and write titles to each jupyter-book chapter/section.
# Also get the basenames for the files to be created later
def int_formatter(_list):
return '%0' + str(max(2, math.floor(math.log(len(_list) + 0.01, 10)) + 1)) + 'd_'
chapter_formatter = int_formatter(chapters)
chapters, chapter_titles, chapter_titles_auto = titles_to_chunks(chapters, chapter_titles, sep=sep,
chapter_formatter=chapter_formatter, tags=INLINE_TAGS)
chapter_basenames = [chapter_formatter % (i + 1) + basename for i in range(len(chapters))]
sec_basename_list = [[]] * len(chapters)
if sep_section:
# The following contains section titles extracted automatically
sec_title_list_auto = [[]] * len(sec_title_list)
for c, sections in enumerate(sec_list):
section_formatter = chapter_formatter % (c + 1) + int_formatter(sections)
sec_list[c], section_titles, section_titles_auto = titles_to_chunks(sections, sec_title_list[c],
sep=sep_section, sep2=sep,
chapter_formatter=section_formatter, tags=INLINE_TAGS)
sec_title_list[c] = section_titles
sec_title_list_auto[c] = section_titles_auto
sec_basename_list[c] = [section_formatter % (i + 1) + basename for i in range(len(sections))]
# Print out the detected titles if --show_titles was used
if show_titles_opt:
if sep_section == '':
print('\n===== Titles detected using the %s separator:' % sep)
else:
print('\n===== Titles detected using the %s and %s separators:' % (sep, sep_section))
for c in range(len(chapter_titles_auto)):
print(chapter_titles_auto[c])
if sep_section:
for s in range(len(sec_title_list_auto[c])):
print(sec_title_list_auto[c][s])
print('=====')
# Description of relevant variables
# all_texts : ['====== Chapter 1 ======\n Some text', '====== Subsection 1.1 ======\n Some text', ..]
# all_basenames : ['01_mybook','01_01_mybook','01_02_mybook','02_mybook']
# all_suffix : ['.md','.md','.ipynb','.md']
# all_fnames : ['01_mybook.md','01_01_mybook.md','01_02_mybook.ipynb','02_mybook.md']
# all_titles : ['Chapter 1','Subsection 1.1', 'Subsection 1.2','Chapter 2']
# all_nestings : [0, 1, 1, 0] # 0 or 1 for jupyter-book chapters or sections, respectively
#
# filestr_md : DocOnce input formatted to pandoc
# filestr_ipynb : DocOnce input formatted to ipynb
# all_texts_md : list of all chapters and sections from filestr_md
# all_texts_ipynb : list of all chapters and sections from filestr_ipynb
# all_texts_formatted : list of chapters and sections from filestr_ipynb
# Flatten all texts, basenames, titles, etc for jupyter-book chapters and sections
all_texts = []
all_basenames = []
all_titles = []
all_nestings = []
for c in range(len(chapters)):
all_texts.append(chapters[c])
all_basenames.append(chapter_basenames[c])
all_titles.append(chapter_titles[c])
all_nestings.append(0)
for s in range(len(sec_list[c])):
all_texts.append(sec_list[c][s])
all_basenames.append(sec_basename_list[c][s])
all_titles.append(sec_title_list[c][s])
all_nestings.append(1)
# Create markdown or ipynb filenames for each jupyter-book chapter section
all_suffix = identify_format(all_texts)
all_fnames = [b + s for b, s in zip(all_basenames,all_suffix)]
# Mark the beginning of each jupyter-book chapter and section with its filename in a comment
all_markings = list(map(lambda x: '!split\n<!-- jupyter-book %s -->\n' % x, all_fnames))
all_texts = [m + t for m, t in zip(all_markings, all_texts)]
# Merge all jupyter-book chapters and sections back to a single DocOnce text.
# Then convert to pandoc and ipynb
filestr = ''.join(all_texts)
filestr_md, bg_session = doconce2format(filestr, 'pandoc')
filestr_ipynb, bg_session = doconce2format(filestr, 'ipynb')
# Split the texts (formatted to md and ipynb) to individual jupyter-book chapters/sections
all_texts_md = split_file(filestr_md, '<!-- !split -->\n<!-- jupyter-book .* -->\n')
all_texts_ipynb = split_ipynb(filestr_ipynb, all_fnames)
if len(all_texts_md) != len(all_texts_ipynb):
errwarn('*** error : the lengths of .md and .ipynb files should be the same')
_abort()
# Flatten the formatted texts
all_texts_formatted = [[]] * len(all_fnames)
for i in range(len(all_fnames)):
all_texts_formatted[i] = all_texts_md[i]
if all_fnames[i].endswith('.ipynb'):
all_texts_formatted[i] = all_texts_ipynb[i]
# Fix all links whose destination is in a different document
# e.g. <a href="#Langtangen_2012"> to <a href="02_jupyterbook.html#Langtangen_2012">
all_texts_formatted = resolve_links_destinations(all_texts_formatted, all_basenames)
# Fix the path of FIGUREs and MOVIEs.
# NB: at the time of writing (03-2021) movies are not supported by Jupyter Book
all_texts_formatted = [fix_media_src(t, '', dest) for t in all_texts_formatted]
# Write chapters and sections to file
for i in range(len(all_texts_formatted)):
write_file(all_texts_formatted[i], dest + all_fnames[i], _encoding=globals.encoding)
# Create the _toc.yml file
yml_text = create_toc_yml(all_basenames, titles=all_titles, nesting_levels=all_nestings, dest=dest, dest_toc=dest_toc)
write_file(yml_text, dest_toc + '_toc.yml', _encoding=globals.encoding)
print('\nWrote _toc.yml and %d chapter files to these folders:\n %s\n %s' %
(len(all_fnames), os.path.realpath(dest_toc), os.path.realpath(dest)))
def split_file(filestr, separator):
"""Split the text of a doconce file by a regex string.
Split the text of a doconce file by a separator regex (e.g. the values of
the INLINE_TAGS dictionary from common.py) and return the chunks of text.
Note that the first chunk contains any text before the first separator.
:param str filestr: text string
:param str separator: regex text, e.g. INLINE_TAGS['chapter'], see common.py
:return: list of text chunks
:rtype: list[str]
"""
chunks = []
c = re.compile(separator, flags=re.MULTILINE)
if re.search(c, filestr) is None:
print('pattern of separator not found in file')
chunks.append(filestr)
else:
pos_prev = 0
for m in re.finditer(c, filestr):
if m.start() == 0:
continue
# Skip separators used for illustration of doconce syntax inside !bc and !ec directives
if filestr[:m.start()].rfind('!bc') > filestr[:m.start()].rfind('!ec'):
errwarn('*** warning : skipped a separator, '
'which appeared to be inside the !bc and !ec directives')
continue
chunk = filestr[pos_prev:m.start()]
chunks.append(chunk)
pos_prev = m.start()
chunk = filestr[pos_prev:]
chunks.append(chunk)
return chunks
def split_ipynb(ipynb_text, filenames):
"""Split a Jupyter notebook based on filenames present in its blocks
Given the text of a Jupyter notebook marked with the output filename
in comments (e.g. <!-- jupyter-book 02_mybook.ipynb -->), return a list of
Jupyter notebooks separated accordingly.
:param str ipynb_text: ipynb code marked with individual filenames i.e. <!-- jupyter-book 02_mybook.ipynb -->
:param list[str] filenames: filenames
:return: ipynb_texts with the ipynb code for each block
:rtype: list[str]
"""
# An ipynb is a python dictionary
ipynb_dict = json.loads(ipynb_text)
cells = ipynb_dict.pop('cells')
# Find the markings with filename in the ipynb blocks
ind_fname = []
block_sources = [''.join(c['source']) for c in cells]
for fname in filenames:
marking = '<!-- jupyter-book % s -->' % fname
for b, block in enumerate(block_sources):
if block.find(marking) > -1:
ind_fname.append(b)
break
if len(ind_fname) != len(filenames):
errwarn('*** error : could not find all markings in ipynb')
_abort()
# For each file create a dictionary with the relevant ipynb blocks, then convert to text
ipynb_texts = [''] * len(filenames)
for i, ind_start in enumerate(ind_fname):
ind_end = None
if i + 1 < len(ind_fname):
ind_end = ind_fname[i + 1]
block_dict = ipynb_dict.copy()
block_dict['cells'] = cells[ind_start:ind_end]
ipynb_texts[i] = json.dumps(block_dict, indent=1, separators=(',', ':'))
return ipynb_texts
def read_title_file(titles_opt, chapters, sec_list):
"""Helper function to read and process a file with titles
Read the file containing titles and process them according to the number of jupyter-book chapters and sections.
len(sec_list) should be the same as len(chapters), and its elements can be empty lists
:param str titles_opt: 'auto' or file containing titles
:param list[str] chapters: DocOnce texts consisting in Jupyter-book chapters
:param list[list[str]] sec_list: DocOnce texts consisting in Jupyter-book sections.
:return: tuple with chapter and section titles
:rtype: (list[str], list[list[str]])
"""
chapter_titles = []
sec_title_list = [[]] * len(chapters)
if titles_opt != 'auto':
chapter_titles = [''] * len(chapters)
input_titles = read_to_list(titles_opt)
for c in range(len(chapters)):
chapter_titles[c] = input_titles.pop(0) if len(input_titles) else ''
section = []
for _ in range(len(sec_list[c])):
section.append(input_titles.pop(0) if len(input_titles) else '')
sec_title_list[c] = section
if len(input_titles):
errwarn('*** warning : number of titles is larger than chapters and sections detected. '
'These titles will be ignored')
return chapter_titles, sec_title_list
def titles_to_chunks(chunks, title_list, sep, sep2=None, chapter_formatter='%02d_', tags=INLINE_TAGS):
"""Helper function to extract assign titles to jupyter-book chapters/sections (here called chunks)
Jupyter-book files must have a # header with the title (see doc jupyter-book >
Types of content source files > Rules for all content types). This function
extracts title from the title file or from the headers given by the separator
provided in the options. If no title is found, provide a default title as e.g.
03_mydoconcefile.
:param list[str] chunks: list of text string
:param list[str] title_list: titles for the chunks. Empty if --titles is us
:param str sep: separator: chapter|section|subsection
:param str sep2: second separator in case the first fails: chapter|section|subsection
:param dict tags: tag patterns, e.g. INLINE_TAGS from common.py
:param str chapter_formatter: formatter for default filenames
:return: tuple with the chunks of text having a # header, titles, titles detected
:rtype: (list[str], list[str], list[str])
"""
title_list_out = title_list.copy()
# title list can be empty (when --titles='auto')
if not len(title_list_out):
title_list_out = [''] * len(chunks)
title_list_detected = [''] * len(chunks)
# Process each chunk: detect and write title in the header of a chapter/section
for i, chunk in enumerate(chunks):
title = ''
# Try to find and remove any title from headers in each chunk
if title == '':
chunk, title = create_title(chunk, sep, tags)
# Same, this time using the second optional separator
if title == '' and sep2:
chunk, title = create_title(chunk, sep2, tags)
# Set default title
if title == '':
title = chapter_formatter % (i + 1) + globals.dofile_basename
# Keep any detected title before overriding them with the file indicated in --titles
title_list_detected[i] = title
# Use title from the titles files. This gets skipped if there is no title file
if i < len(title_list):
# Skip any empty line in title file
if title_list[i]:
title = title_list[i]
# Write to title list and chunk
# NB: create_title above removed any detected title from chunk, thus avoiding duplicate titles
title_list_out[i] = title
chunk = '=' * 9 + ' ' + title + ' ' + '=' * 9 + '\n' + chunk
chunks[i] = chunk
return chunks, title_list_out, title_list_detected
def create_title(chunk, sep, tags):
"""Helper function to allow doconce jupyterbook to automatically assign titles in the TOC
If a chunk of text starts with the section specified in sep, lift it up
to a chapter section. This allows doconce jupyterbook to automatically use the
section's text as title in the TOC on the left
:param str chunk: text string
:param str sep: chapter|section|subsection
:param dict tags: tag patterns, e.g. INLINE_TAGS from common.py
:return: tuple with the chunk stripped of its section header, and title
:rtype: (str, str)
"""
title = ''
m = re.search(tags[sep], chunk, flags=re.MULTILINE)
if m and m.start() == 0:
name2s = {'chapter': 9, 'section': 7, 'subsection': 5, 'subsubsection': 3}
s = name2s[sep]
header_old = '=' * s
pattern = r'^ *%s +(.+?) +%s' % (header_old, header_old)
# Get the title
mt = re.match(pattern, chunk)
if mt:
title = mt.group(1)
chunk = re.sub(pattern, '', chunk, flags=re.MULTILINE, count=1)
return chunk, title
def identify_format(text_list):
"""Identify the appropriate formats to convert a list of DocOnce texts.
Given a list of DocOnce texts, check if they contain code. If so, return the suffix
'.ipynb' (for the Jupyter Notebook ipynb format), otherwise return '.md' (for
the pandoc markdown format).
:param list[str] text_list: list of strings using DocOnce syntax
:return: list of formats
:rtype: list[str]
"""
chunk_formats = [''] * len(text_list)
for i, text in enumerate(text_list):
# Convert each text to pandoc, or to ipynb if the text contains any computation
format = 'pandoc'
_filestr, code_blocks, code_block_types, tex_blocks = \
remove_code_and_tex(text, format)
if len(code_blocks):
format = 'ipynb'
chunk_formats[i] += '.md' if format == 'pandoc' else '.ipynb'
return chunk_formats
def create_toc_yml(basenames, nesting_levels, titles, dest='./', dest_toc='./', section_paths=None, section_titles=None):
"""Create the content of a _toc.yml file
Give the lists of paths, titles, and nesting levels, return the content of a _toc.yml file
:param list[str] basenames: list of file basenames for jupyter-book chapters or sections, i.e.
strings that can be used after the `file:` section in a _toc.yml
:param list[str] titles: list of titles to jupyter-book chapters, i.e. strings that can be used
after the `title:` section in a _toc.yml
:param list[str] nesting_levels: nesting levels for basenames and titles: # 0 or 1 for jupyter-book
chapters or sections, respectively
:param str dest: destination folder for _toc.yml
:param str dest_toc: destination folder for the chapter files
:return: content of a _toc.yml file
:rtype: str
"""
def escape_chars(title):
"""Wrap title in quotes if it contains colons, asterisks, bacticks"""
if re.search(':', title) or re.search('\*', title) or re.search('\`', title):
title = title.replace('"', '\\"')
title = '"' + title + '"'
return title
# Get the relative path between the destination folders
relpath = os.path.relpath(dest, start=dest_toc)
if relpath == '.':
relpath = ''
else:
relpath += '/'
# Produce the text for _toc.yml
yml_text = ""
nesting_prev = 0
for i, cfname in enumerate(basenames):
ctitle = escape_chars(titles[i])
if ctitle:
nesting = nesting_levels[i]
if nesting == 0:
yml_text += '\n'
yml_text += yml_titledpage(relpath + cfname, ctitle, numbered=False)
else:
# Write the sections
if nesting_prev == 0:
yml_text += yml_section(nesting_level=nesting)
yml_text += yml_nested_section(relpath + cfname, ctitle, nesting_level=nesting)
nesting_prev = nesting
yml_text = yml_text.strip('\n')
return yml_text
def print_help_jupyterbook():
"""Pretty print help string and command line options
Help function to print help and formatted command line options for doconce jupyterbook
"""
print(docstring_jupyterbook)
print('Options:')
help_print_options(cmdline_opts=_registered_cmdline_opts_jupyterbook)
def read_to_list(file):
"""Read the content of a file to list
Verify the existence of a file, then read it to a list by
stripping newlines. The function aborts the program if the file does not exist.
:param str file: Path to an existing file
:return: list of strings
:rtype: list[str]
"""
if not os.path.isfile(file):
errwarn('*** error: file "%s" does not exist!' % file)
_abort()
with open(file, 'r') as f:
out = f.read().splitlines()
return out
def get_link_destinations(chunk):
"""Find any target of a link in HTML code
Use regex to find tags with the id or name attribute, which makes them a possible target of a link
:param str chunk: text string
:return: destinations, destination_tags
:rtype: Tuple[list[str], list[str]]
"""
destinations, destination_tags = [], []
# html links. label{} has already been converted
pattern_tag = r'[\w _\-:]'
pattern_backslash = '[\\\]'
pattern = r'<' + pattern_tag + \
'+ (id|name)=' + pattern_backslash + '["\']' + \
'(' + pattern_tag + '+)' + pattern_backslash + '["\'][^>]*>'
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(2)
destinations.append(match)
destination_tags.append(tag)
return destinations, destination_tags
def fix_links(chunk, tag2file):
"""Find and fix the the destinations of hyperlinks using HTML or markdown syntax
Fix any link in a string text so that they can target a different html document.
First use regex on a HTML text to find any HTML or markdown hyperlinks
(e.g. <a href="#sec1"> or [sec1](#sec1) ). Then use a dictionary to prepend the
filename to the value of a link's href attribute (e.g. <a href="02_jupyterbook.html#sec1">)
:param str chunk: text string
:param dict tag2file: dictionary mapping a tag to a file basename e.g. tag2file['sec1']='02_jupyterbook'
:return: chunk with fixed links
:rtype: str
"""
chunk_out = chunk
# html links
pattern_tag = r'[\w _\-:]'
pattern = r'<' + pattern_tag + '+ href=[\\\]{0,2}["\']#(' + pattern_tag + '+)[\\\]{0,2}["\'][^>]*>'
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(1)
fixed_tag = match.replace('#' +tag, tag2file.get(tag, tag) + '.html#' + tag)
chunk_out = chunk_out.replace(match, fixed_tag)
# markdown links
pattern = r'\[' + pattern_tag + '+\]\(#(' + pattern_tag + '+)\)'
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(1)
fixed_tag = match.replace('#' + tag, tag2file.get(tag, tag) + '.html#' + tag)
chunk_out = chunk_out.replace(match, fixed_tag)
return chunk_out
def resolve_links_destinations(chunks, chunk_basenames):
"""Fix links in jupyter-book chapters/sections so that they can target destinations in other files
Prepend a filename to all links' destinations e.g. <a href="#Langtangen_2012"> becomes
<a href="02_jupyterbook.html#Langtangen_2012">
:param list[str] chunks: DocOnce texts consisting in Jupyter-book chapters/sections
:param list[str] chunk_basenames: file basenames for jupyter-book chapters/sections
:return: chunks with corrected links
:rtype: Tuple[list[str], list[list[str]]]
"""
# Flatten the texts and filenames, then get the basenames from filenames
def strip_end(text, suffix):
if suffix and text.endswith(suffix):
return text[:-len(suffix)]
return text
all_sects = chunks #+ flatten(sec_list)
all_basenames = chunk_basenames #+ flatten(sec_basename_list)
all_basenames = list(map(lambda fname: strip_end(fname, '.md'), all_basenames))
all_basenames = list(map(lambda fname: strip_end(fname, '.ipynb'), all_basenames))
# Find all link destinations and create a dictionary tag2file[tag] = destination file
tag2file = {}
for i in range(len(all_sects)):
ch_destinations, ch_destination_tags = get_link_destinations(all_sects[i])
basename_list = [all_basenames[i]] * len(ch_destinations)
tag2file.update(zip(ch_destination_tags, basename_list))
# Fix all href in links by prepending the destination filename
for c in range(len(chunks)):
chunks[c] = fix_links(chunks[c], tag2file)
return chunks
def fix_media_src(filestr, dirname, dest):
"""Fix the (relative) path to any figure and movie in the DocOnce file.
The generated .md and .ipynb files will be created in the path passed to `--dest`.
This method fixes the paths of the image and movie files so that they can be found
in generated .md and .ipynb files.
:param str filestr: text string
:param str dirname: Path to an existing folder
:param str dest: directory name
:return: filestr with new paths
:rtype: str
"""
patterns = [
# movies in .md and .ipynb. NB: jupyterbook does not support movies
movie2html['movie_regex'],
# images in .md
r'\!\[<p><em>(.*)</em></p>\]\((.*)\)',
# images in .ipynb. See ipynb.py
img2ipynb['imgtag_regex'],
# images in MarkDown syntax
img2ipynb['md_regex'],
# commented images and movies in ipynb. See ipynb.py
r'<!-- (?:dom:)(FIGURE|MOVIE): \[(.*)',
# commented images in md
r'<!-- <(\w+) src="(.*)" .*>(?=[<|\\n])',
]
filestr_out = filestr
for i,pattern in enumerate(patterns):
for m in re.finditer(pattern, filestr):
match = m.group()
tag = m.group(1)
src = m.group(2)
# Warn that FIGUREs cannot work in Jupyter Book
if pattern == movie2html['movie_regex']:
errwarn('*** warning : To make images work consider to add this extensions to _config.yml:\n',
('parse:\n'
' myst_enable_extensions:\n'
' - html_image\n'))
if not src.startswith('/'):
if dirname != '' and not dirname.endswith('/'):
dirname += '/'
src_new = os.path.relpath(dirname + src, start=dest)
replacement = match.replace(src, src_new, 1)
filestr_out = filestr_out.replace(match, replacement, 1)
return filestr_out
def yml_file(file):
return "- file: %s\n\n" % file
def yml_untitledpage(file, numbered=False):
return "- file: %s\n numbered: %s\n" % (file, str(numbered).lower())
def yml_titledpage(file, title, numbered=False):
return "- file: %s\n title: %s\n numbered: %s\n" % (file, title, str(numbered).lower())
def yml_section(nesting_level=1):
return "%ssections:\n" % (' ' * nesting_level)
def yml_nested_section(file, title, nesting_level=1):
return '%s - file: %s\n' % (' ' * nesting_level, file) + \
'%s title: %s\n' % (' ' * nesting_level, title)
def yml_part(part, *files):
yml = "- part: %s\n chapters:\n" % part
for file in files:
yml += ' - file: %s\n' % file
return yml + '\n'
def yml_ext_link(url, nesting_level=0, numbered=False):
return "%s- external: %s\n numbered: %s\n" % (url, ' ' * nesting_level, numbered)
def yml_header(header):
return "- header: %s\n" % header
def yml_chapter(file, title, sections, numbered='false'):
return "- title: %s\n file: %s\n numbered: %s\n sections: %s\n" % \
(title, file, numbered, sections)
| lib/doconce/jupyterbook.py | 33,648 | Helper function to allow doconce jupyterbook to automatically assign titles in the TOC
If a chunk of text starts with the section specified in sep, lift it up
to a chapter section. This allows doconce jupyterbook to automatically use the
section's text as title in the TOC on the left
:param str chunk: text string
:param str sep: chapter|section|subsection
:param dict tags: tag patterns, e.g. INLINE_TAGS from common.py
:return: tuple with the chunk stripped of its section header, and title
:rtype: (str, str)
Create the content of a _toc.yml file
Give the lists of paths, titles, and nesting levels, return the content of a _toc.yml file
:param list[str] basenames: list of file basenames for jupyter-book chapters or sections, i.e.
strings that can be used after the `file:` section in a _toc.yml
:param list[str] titles: list of titles to jupyter-book chapters, i.e. strings that can be used
after the `title:` section in a _toc.yml
:param list[str] nesting_levels: nesting levels for basenames and titles: # 0 or 1 for jupyter-book
chapters or sections, respectively
:param str dest: destination folder for _toc.yml
:param str dest_toc: destination folder for the chapter files
:return: content of a _toc.yml file
:rtype: str
Wrap title in quotes if it contains colons, asterisks, bacticks
Find and fix the the destinations of hyperlinks using HTML or markdown syntax
Fix any link in a string text so that they can target a different html document.
First use regex on a HTML text to find any HTML or markdown hyperlinks
(e.g. <a href="#sec1"> or [sec1](#sec1) ). Then use a dictionary to prepend the
filename to the value of a link's href attribute (e.g. <a href="02_jupyterbook.html#sec1">)
:param str chunk: text string
:param dict tag2file: dictionary mapping a tag to a file basename e.g. tag2file['sec1']='02_jupyterbook'
:return: chunk with fixed links
:rtype: str
Fix the (relative) path to any figure and movie in the DocOnce file.
The generated .md and .ipynb files will be created in the path passed to `--dest`.
This method fixes the paths of the image and movie files so that they can be found
in generated .md and .ipynb files.
:param str filestr: text string
:param str dirname: Path to an existing folder
:param str dest: directory name
:return: filestr with new paths
:rtype: str
Find any target of a link in HTML code
Use regex to find tags with the id or name attribute, which makes them a possible target of a link
:param str chunk: text string
:return: destinations, destination_tags
:rtype: Tuple[list[str], list[str]]
Identify the appropriate formats to convert a list of DocOnce texts.
Given a list of DocOnce texts, check if they contain code. If so, return the suffix
'.ipynb' (for the Jupyter Notebook ipynb format), otherwise return '.md' (for
the pandoc markdown format).
:param list[str] text_list: list of strings using DocOnce syntax
:return: list of formats
:rtype: list[str]
Create content and TOC for building a jupyter-book version 0.8: https://jupyterbook.org/intro
This function is called directly from bin/doconce
Pretty print help string and command line options
Help function to print help and formatted command line options for doconce jupyterbook
Helper function to read and process a file with titles
Read the file containing titles and process them according to the number of jupyter-book chapters and sections.
len(sec_list) should be the same as len(chapters), and its elements can be empty lists
:param str titles_opt: 'auto' or file containing titles
:param list[str] chapters: DocOnce texts consisting in Jupyter-book chapters
:param list[list[str]] sec_list: DocOnce texts consisting in Jupyter-book sections.
:return: tuple with chapter and section titles
:rtype: (list[str], list[list[str]])
Read the content of a file to list
Verify the existence of a file, then read it to a list by
stripping newlines. The function aborts the program if the file does not exist.
:param str file: Path to an existing file
:return: list of strings
:rtype: list[str]
Fix links in jupyter-book chapters/sections so that they can target destinations in other files
Prepend a filename to all links' destinations e.g. <a href="#Langtangen_2012"> becomes
<a href="02_jupyterbook.html#Langtangen_2012">
:param list[str] chunks: DocOnce texts consisting in Jupyter-book chapters/sections
:param list[str] chunk_basenames: file basenames for jupyter-book chapters/sections
:return: chunks with corrected links
:rtype: Tuple[list[str], list[list[str]]]
Split the text of a doconce file by a regex string.
Split the text of a doconce file by a separator regex (e.g. the values of
the INLINE_TAGS dictionary from common.py) and return the chunks of text.
Note that the first chunk contains any text before the first separator.
:param str filestr: text string
:param str separator: regex text, e.g. INLINE_TAGS['chapter'], see common.py
:return: list of text chunks
:rtype: list[str]
Split a Jupyter notebook based on filenames present in its blocks
Given the text of a Jupyter notebook marked with the output filename
in comments (e.g. <!-- jupyter-book 02_mybook.ipynb -->), return a list of
Jupyter notebooks separated accordingly.
:param str ipynb_text: ipynb code marked with individual filenames i.e. <!-- jupyter-book 02_mybook.ipynb -->
:param list[str] filenames: filenames
:return: ipynb_texts with the ipynb code for each block
:rtype: list[str]
Helper function to extract assign titles to jupyter-book chapters/sections (here called chunks)
Jupyter-book files must have a # header with the title (see doc jupyter-book >
Types of content source files > Rules for all content types). This function
extracts title from the title file or from the headers given by the separator
provided in the options. If no title is found, provide a default title as e.g.
03_mydoconcefile.
:param list[str] chunks: list of text string
:param list[str] title_list: titles for the chunks. Empty if --titles is us
:param str sep: separator: chapter|section|subsection
:param str sep2: second separator in case the first fails: chapter|section|subsection
:param dict tags: tag patterns, e.g. INLINE_TAGS from common.py
:param str chapter_formatter: formatter for default filenames
:return: tuple with the chunks of text having a # header, titles, titles detected
:rtype: (list[str], list[str], list[str])
Get the list of options for doconce jupyterbook Get the list of opitions for doconce in general Print help Check options NB: _legal_command_line_options allows options defined in misc.py/global.py Destination directories Get options Check if the file exists, then read it in cd into the DocOnce file's directory, then fix dest and dest_toc fix dest, dest_roc, and finally dirnamedirname = '' NOTE: The following is a reworking of code from doconce.py > format_driver always start with clean log file with errors Run parts of file2file code in format_driver. Cannot use it directly because file2file writes to file. Consider to modularize file2file Remove pandoc's title/author/date metadata, which does not get rendered appropriately in markdown/jupyter-book. Consider to write this metadata to the _config.yml file Remove TOC tag Format citations and add bibliography in DocOnce's html format Delete any non-printing characters, commands, and comments Using regex: No-regex method. This could be an alternative to the previous regex Description of relevant variables sep : Divide the text in jupyter-book chapters, see --sep chapters : ['whole chapter 1', 'whole chapter 2', 'summary'] chapter_titles : ['Chapter 1', 'Chapter 2', 'Summary'] chapter_titles_auto : ['Header 1', 'Header 2', 'Last Header in DocOnce file'] chapter_basenames : ['01_mybook', '02_mybook', '03_mybook'] If sep_section is not empty, these variables become relevant sep_section : Subdivide the jupyter-book chapters in sections, see --sep_section sec_list : [['subsection1','subsection2], ['subsection1'] , []] sec_title_list : [['Subsection 1.1', 'Subsection 1.2'], ['Subsection 2.1'], []] sec_title_list_auto : [['Subheader 1.1', 'Subheader 1.2'], ['Subheader 2.1'], ['Last Subheader in DocOnce file']] sec_basename_list : [['01_01_mybook', '01_02_mybook'], ['02_01_mybook'], []] Split the DocOnce file in jupyter-book chapters Extract all jupyter-book sections based on --sep_section Any text before the first jupyter-book section is part of a jupyter-book chapter, the rest consists in jupyter-book sections Write text before the first jupyter-book section as chapter The text after the first match of sep_section are jupyter-book sections Get titles from title file in options Extract and write titles to each jupyter-book chapter/section. Also get the basenames for the files to be created later The following contains section titles extracted automatically Print out the detected titles if --show_titles was used Description of relevant variables all_texts : ['====== Chapter 1 ======\n Some text', '====== Subsection 1.1 ======\n Some text', ..] all_basenames : ['01_mybook','01_01_mybook','01_02_mybook','02_mybook'] all_suffix : ['.md','.md','.ipynb','.md'] all_fnames : ['01_mybook.md','01_01_mybook.md','01_02_mybook.ipynb','02_mybook.md'] all_titles : ['Chapter 1','Subsection 1.1', 'Subsection 1.2','Chapter 2'] all_nestings : [0, 1, 1, 0] 0 or 1 for jupyter-book chapters or sections, respectively filestr_md : DocOnce input formatted to pandoc filestr_ipynb : DocOnce input formatted to ipynb all_texts_md : list of all chapters and sections from filestr_md all_texts_ipynb : list of all chapters and sections from filestr_ipynb all_texts_formatted : list of chapters and sections from filestr_ipynb Flatten all texts, basenames, titles, etc for jupyter-book chapters and sections Create markdown or ipynb filenames for each jupyter-book chapter section Mark the beginning of each jupyter-book chapter and section with its filename in a comment Merge all jupyter-book chapters and sections back to a single DocOnce text. Then convert to pandoc and ipynb Split the texts (formatted to md and ipynb) to individual jupyter-book chapters/sections Flatten the formatted texts Fix all links whose destination is in a different document e.g. <a href="Langtangen_2012"> to <a href="02_jupyterbook.htmlLangtangen_2012"> Fix the path of FIGUREs and MOVIEs. NB: at the time of writing (03-2021) movies are not supported by Jupyter Book Write chapters and sections to file Create the _toc.yml file Skip separators used for illustration of doconce syntax inside !bc and !ec directives An ipynb is a python dictionary Find the markings with filename in the ipynb blocks For each file create a dictionary with the relevant ipynb blocks, then convert to text title list can be empty (when --titles='auto') Process each chunk: detect and write title in the header of a chapter/section Try to find and remove any title from headers in each chunk Same, this time using the second optional separator Set default title Keep any detected title before overriding them with the file indicated in --titles Use title from the titles files. This gets skipped if there is no title file Skip any empty line in title file Write to title list and chunk NB: create_title above removed any detected title from chunk, thus avoiding duplicate titles Get the title Convert each text to pandoc, or to ipynb if the text contains any computation Get the relative path between the destination folders Produce the text for _toc.yml Write the sections html links. label{} has already been converted html links markdown links Flatten the texts and filenames, then get the basenames from filenames+ flatten(sec_list)+ flatten(sec_basename_list) Find all link destinations and create a dictionary tag2file[tag] = destination file Fix all href in links by prepending the destination filename movies in .md and .ipynb. NB: jupyterbook does not support movies images in .md images in .ipynb. See ipynb.py images in MarkDown syntax commented images and movies in ipynb. See ipynb.py commented images in md Warn that FIGUREs cannot work in Jupyter Book | 12,176 | en | 0.702952 |
from collections import defaultdict
from hsst.utility import search
from hsst.utility.graph import SemanticGraph
class SubgraphEnumeration(object):
def __init__(self, graph, node_set_size_limit=0):
self.full_node_set = graph.nodes
self.full_edge_set = graph.edges
self.current_node_set = set()
self.current_edge_set = set()
self.visited_states = set()
self.subgraphs = []
self.node_set_size_limit = node_set_size_limit
# Create fast lookup structures
self.edges_by_source = defaultdict(set)
self.edges_by_destination = defaultdict(set)
self.edges_by_both = defaultdict(set)
self.labels = defaultdict(list)
for edge in self.full_edge_set:
self.labels[(edge.from_node, edge.to_node)].append(edge)
self.edges_by_source[edge.from_node].add(edge.to_node)
self.edges_by_destination[edge.to_node].add(edge.from_node)
self.edges_by_both[edge.from_node].add(edge.to_node)
self.edges_by_both[edge.to_node].add(edge.from_node)
def generate_moves(self):
# Generate all possible moves
# Each move consists of a single node and the set of edges that connect that node to the nodes
# in the currentNodeSet E.g. ( node, { (label1, node, node1), (label2, node2, node) ... } )
# Moves are temporarily stored as a dictionary so that the full set of edges associated with each move
# can be constructed
moves = []
temporary_moves = {}
# Check if the limit for the currentNodeSet size has been reached
if 0 < self.node_set_size_limit <= len(self.current_node_set):
return moves
# The initial step is handled separately
if not self.current_node_set:
for node in self.full_node_set:
moves.append((node, set()))
return moves
# The set of possible nodes consists of nodes that are not yet in the currentNodeSet
possible_nodes = self.full_node_set - self.current_node_set
# For every possible node, we need to check that it shares an edge with a node in the currentNodeSet
# Otherwise we would violate the 'connected' constraint
for possible_node in possible_nodes:
destination_nodes = self.edges_by_source[possible_node] & self.current_node_set
source_nodes = self.edges_by_destination[possible_node] & self.current_node_set
if len(destination_nodes) > 0 or len(source_nodes) > 0:
# There is at least one node in the current node set that we can connect the possible_node to
# Check if this state has been explored already
if self.id(node=possible_node) in self.visited_states:
continue
# If not, it is an acceptable move and we just need to construct the edge set that connects
# the possible_node to the current node set
edges = set(
edge for source_node in source_nodes for edge in self.labels[(source_node, possible_node)]) | \
set(edge for destination_node in destination_nodes for edge in
self.labels[(possible_node, destination_node)])
temporary_moves[possible_node] = edges
for move in temporary_moves:
moves.append((move, temporary_moves[move]))
return moves
def move(self, move):
# Move is a tuple (node, edge_set)
node, edge_set = move
self.current_node_set.add(node)
self.current_edge_set |= edge_set
self.visited_states.add(self.id())
self.subgraphs.append((self.current_node_set.copy(), self.current_edge_set.copy()))
def undo_move(self, move):
# Move is a tuple (node, edge_set)
node, edge_set = move
self.current_node_set.remove(node)
self.current_edge_set -= edge_set
def solved(self):
return False
def id(self, node=None):
if node:
return " ".join(str(x) for x in sorted(self.current_node_set | {node}, key=lambda x: x.node_id))
else:
return " ".join(str(x) for x in sorted(self.current_node_set, key=lambda x: x.node_id))
def enumerate_dfs_subgraphs(graph, df_limit=100):
enumeration = SubgraphEnumeration(graph, node_set_size_limit=df_limit)
search.df(enumeration, df_limit)
return set(SemanticGraph(nodes, edges, nonterminal_count=0) for nodes, edges in enumeration.subgraphs)
| hsst/utility/dfs_subgraph_enumeration.py | 4,590 | Create fast lookup structures Generate all possible moves Each move consists of a single node and the set of edges that connect that node to the nodes in the currentNodeSet E.g. ( node, { (label1, node, node1), (label2, node2, node) ... } ) Moves are temporarily stored as a dictionary so that the full set of edges associated with each move can be constructed Check if the limit for the currentNodeSet size has been reached The initial step is handled separately The set of possible nodes consists of nodes that are not yet in the currentNodeSet For every possible node, we need to check that it shares an edge with a node in the currentNodeSet Otherwise we would violate the 'connected' constraint There is at least one node in the current node set that we can connect the possible_node to Check if this state has been explored already If not, it is an acceptable move and we just need to construct the edge set that connects the possible_node to the current node set Move is a tuple (node, edge_set) Move is a tuple (node, edge_set) | 1,043 | en | 0.895089 |
import os
import h5py
import numpy as np
from keras import backend as K
from keras.layers import Activation, BatchNormalization, Conv2D, Dense, Dot, \
Dropout, Flatten, Input, MaxPooling2D, GlobalAveragePooling2D
from keras import regularizers
from keras.layers import Average as KerasAverage
from keras.models import Sequential, Model
from keras.optimizers import Adam, SGD
from keras.engine.topology import Layer
from .layers import LayerNormalization, CustomSoftmax
from .tf_implementations.loss_functions import loss_factory
class TotalReshape(Layer):
def __init__(self, target_shape, **kwargs):
self.target_shape = target_shape
super(TotalReshape, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return tuple(
x if x != -1 else None
for x in self.target_shape
)
def call(self, x):
return K.reshape(x, self.target_shape)
class BaseReducer(Layer):
def __init__(self, **kwargs):
super(BaseReducer, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return input_shape[:-1]
class Average(BaseReducer):
def call(self, x):
return K.mean(x, axis=-1)
class Max(BaseReducer):
def call(self, x):
return K.max(x, axis=-1)
class TopKAverage(BaseReducer):
def __init__(self, k, **kwargs):
self.k = k
super(TopKAverage, self).__init__(**kwargs)
def call(self, x):
if K.backend() == "tensorflow":
tf = K.tf
x, _ = tf.nn.top_k(x, self.k, sorted=False)
return K.mean(x, axis=-1)
else:
raise NotImplementedError("TopKAverage is not implemented for "
" %s backend" % (K.backend(),))
def reducer_factory(reducer, k=3):
# Set the type of the reducer to be used
if reducer == "max":
return Max()
elif reducer == "average":
return Average()
elif reducer == "topK":
return TopKAverage(k)
def mae(y_true, y_pred):
""" Implementation of Mean average error
"""
return K.mean(K.abs(y_true - y_pred))
def mde(y_true, y_pred):
return K.mean(K.cast(
K.abs(K.argmax(y_true, axis=1) - K.argmax(y_pred, axis=1)),
K.floatx()
))
def create_simple_cnn(input_shape, kernel_regularizer=None):
common_params = dict(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
)
return Sequential([
Conv2D(input_shape=input_shape, **common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization()
])
def create_simple_cnn_ln(input_shape, kernel_regularizer=None):
common_params = dict(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
)
return Sequential([
Conv2D(input_shape=input_shape, **common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization()
])
def create_dilated_cnn_receptive_field_25(
input_shape,
kernel_regularizer=None
):
return Sequential([
Conv2D(
filters=32,
kernel_size=5,
input_shape=input_shape,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer,
dilation_rate=2
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer,
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization()
])
def create_dilated_cnn_receptive_field_25_with_tanh(
input_shape,
kernel_regularizer=None
):
return Sequential([
Conv2D(
filters=32,
kernel_size=5,
input_shape=input_shape,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer,
dilation_rate=2
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer,
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization()
])
def create_hartmann_cnn(input_shape, kernel_regularizer=None):
return Sequential([
Conv2D(filters=32, kernel_size=5, input_shape=input_shape),
Activation("tanh"),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(filters=64, kernel_size=5),
Activation("tanh"),
MaxPooling2D(pool_size=(2, 2))
])
def cnn_factory(name):
cnn_factories = {
"simple_cnn": create_simple_cnn,
"simple_cnn_ln": create_simple_cnn_ln,
"dilated_cnn_receptive_field_25":
create_dilated_cnn_receptive_field_25,
"dilated_cnn_receptive_field_25_with_tanh":
create_dilated_cnn_receptive_field_25_with_tanh,
"hartmann_cnn": create_hartmann_cnn
}
return cnn_factories[name]
def optimizer_factory(optimizer, lr, momentum=None, clipnorm=0.0, clipvalue=1):
# Set the type of optimizer to be used
if optimizer == "Adam":
return Adam(lr=lr, clipnorm=clipnorm, clipvalue=clipvalue)
elif optimizer == "SGD":
return SGD(lr=lr, momentum=momentum, clipnorm=clipnorm,
clipvalue=clipvalue)
def kernel_regularizer_factory(regularizer_factor):
if regularizer_factor == 0.0:
return None
else:
return regularizers.l2(regularizer_factor)
def build_simple_cnn(
input_shape,
create_cnn,
optimizer="Adam",
lr=1e-3,
momentum=None,
clipnorm=0.0,
loss="mse",
reducer="average",
merge_layer="dot-product",
weight_decay=None,
weight_file=None
):
# Make sure that we have a proper input shape
# TODO: Maybe change this to 3, because we finally need only the
# patch_shape?
assert len(input_shape) == 5
# Unpack the input shape to make the code more readable
D, N, W, H, C = input_shape
model = create_cnn(
input_shape=(None, None, C),
kernel_regularizer=weight_decay
)
model.compile(
optimizer=optimizer_factory(
optimizer,
lr=lr,
momentum=momentum,
clipnorm=clipnorm
),
loss=loss_factory(loss)
)
# If there is a weight file specified load the weights
if weight_file:
try:
f = h5py.File(weight_file, "r")
keys = [os.path.join(model.name, w.name)
for l in model.layers for w in l.weights]
weights = [f[os.path.join("model_weights", k)][:] for k in keys]
model.set_weights(weights)
except:
model.load_weights(weight_file, by_name=True)
return model
def build_simple_nn_for_training(
input_shape,
create_cnn,
optimizer="Adam",
lr=1e-3,
momentum=None,
clipnorm=0.0,
loss="emd",
reducer="average",
merge_layer="dot-product",
weight_decay=None,
weight_file=None
):
# Make sure that we have a proper input shape
assert len(input_shape) == 5
# Unpack the input shape to make the code more readable
D, N, W, H, C = input_shape
# Create the two stream inputs
x1_in = Input(shape=input_shape)
x2_in = Input(shape=input_shape)
# Reshape them for input in the CNN
x1 = TotalReshape((-1, W, H, C))(x1_in)
x2 = TotalReshape((-1, W, H, C))(x2_in)
# Create the CNN and extract features from both streams
cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)
x1 = Flatten()(cnn(x1))
x2 = Flatten()(cnn(x2))
# Compute a kind of similarity between the features of the two streams
x = Dot(axes=-1, normalize=(merge_layer == "cosine-similarity"))([x1, x2])
# Reshape them back into their semantic shape (depth planes, patches, etc)
x = TotalReshape((-1, D, N))(x)
# Compute the final similarity scores for each depth plane
x = reducer_factory(reducer)(x)
# Compute the final output
y = Activation("softmax")(x)
model = Model(inputs=[x1_in, x2_in], outputs=y)
model.compile(
optimizer=optimizer_factory(
optimizer,
lr=lr,
momentum=momentum,
clipnorm=clipnorm
),
loss=loss_factory(loss),
metrics=["accuracy", mae, mde]
)
if weight_file:
model.load_weights(weight_file, by_name=True)
return model
def build_hartmann_network(
input_shape,
create_cnn=create_hartmann_cnn,
optimizer="SGD",
lr=1e-3,
momentum=None,
clipnorm=0.0,
loss=None,
reducer=None,
merge_layer=None,
weight_decay=None,
weight_file=None
):
# Make sure that we have a proper input shape
assert len(input_shape) == 3
# Unpack the input shape to make the code more readable
H, W, C = input_shape
# Create the feature extracting CNN
cnn = create_hartmann_cnn(input_shape=(None, None, C))
# Create the similarity CNN
sim = Sequential([
Conv2D(
filters=2048,
kernel_size=5,
input_shape=K.int_shape(cnn.output)[1:]
),
Activation("relu"),
Conv2D(filters=2048, kernel_size=1),
Activation("relu"),
Conv2D(filters=2, kernel_size=1),
Activation("softmax")
])
# Create the joint model for training
x_in = [Input(shape=input_shape) for i in range(5)]
x = [cnn(xi) for xi in x_in]
x = KerasAverage()(x)
y = sim(x)
model = Model(inputs=x_in, outputs=y)
# Compile all the models
model.compile(
optimizer=optimizer_factory(
optimizer,
lr=lr,
momentum=momentum,
clipnorm=clipnorm
),
loss="categorical_crossentropy",
metrics=["accuracy"]
)
cnn.compile("sgd", "mse") # Just so that we can run predict()
sim.compile("sgd", "mse")
# Attach the cnn and sim to the model in case someone wants to use them
model.cnn = cnn
model.sim = sim
if weight_file:
model.load_weights(weight_file, by_name=True)
return model
def get_nn(name):
models = {
"simple_cnn": build_simple_cnn,
"simple_nn_for_training": build_simple_nn_for_training,
"hartmann": build_hartmann_network
}
return models[name]
| raynet/models.py | 12,739 | Implementation of Mean average error
Set the type of the reducer to be used Set the type of optimizer to be used Make sure that we have a proper input shape TODO: Maybe change this to 3, because we finally need only the patch_shape? Unpack the input shape to make the code more readable If there is a weight file specified load the weights Make sure that we have a proper input shape Unpack the input shape to make the code more readable Create the two stream inputs Reshape them for input in the CNN Create the CNN and extract features from both streams Compute a kind of similarity between the features of the two streams Reshape them back into their semantic shape (depth planes, patches, etc) Compute the final similarity scores for each depth plane Compute the final output Make sure that we have a proper input shape Unpack the input shape to make the code more readable Create the feature extracting CNN Create the similarity CNN Create the joint model for training Compile all the models Just so that we can run predict() Attach the cnn and sim to the model in case someone wants to use them | 1,106 | en | 0.900662 |
"""
module init
"""
from flask import Flask
<<<<<<< HEAD
from config import config_options
from flask_sqlalchemy import SQLAlchemy
import os
=======
from config import DevelopmentConfig
from .views import orders_blue_print
>>>>>>> ba86ec7ade79a936b81e04ee8b80a97cf8f97770
def create_app(DevelopmentConfig):
"""
Function create_app:
creates app and gives it the import name
holds the configuration being used.
registers the orders blueprint
:return: app:
"""
app = Flask(__name__)
app.config.from_object(DevelopmentConfig)
app.register_blueprint(orders_blue_print)
<<<<<<< HEAD
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# set the configurations
app.config.from_object(os.environ['APP_SETTINGS'])
db=SQLAlchemy(app)
# initialiaze the database
db.init_app(app)
with app.app_context():
from .import routes
db.create_all
# register your blueprints here
from app.main import main
from app.auth import auth
app.register_blueprint(main)
app.register_blueprint(auth)
@app.route('/')
def hello():
return "Hello World!"
return app
=======
return app
>>>>>>> ba86ec7ade79a936b81e04ee8b80a97cf8f97770
| app/__init__.py | 1,246 | set the configurations initialiaze the database register your blueprints here | 77 | en | 0.567025 |
# DO NOT EDIT THIS FILE!
#
# All configuration must be done in the `configuration.py` file.
# This file is part of the Peering Manager code and it will be overwritten with
# every code releases.
from __future__ import unicode_literals
import os
import socket
from django.contrib.messages import constants as messages
from django.core.exceptions import ImproperlyConfigured
try:
from peering_manager import configuration
except ImportError:
raise ImproperlyConfigured(
'Configuration file is not present. Please define peering_manager/configuration.py per the documentation.')
VERSION = '0.99-dev'
SECRET_KEY = getattr(configuration, 'SECRET_KEY', '')
ALLOWED_HOSTS = getattr(configuration, 'ALLOWED_HOSTS', [])
BASE_PATH = getattr(configuration, 'BASE_PATH', '')
if BASE_PATH:
BASE_PATH = BASE_PATH.strip('/') + '/' # Enforce trailing slash only
DEBUG = getattr(configuration, 'DEBUG', False)
LOGIN_REQUIRED = getattr(configuration, 'LOGIN_REQUIRED', False)
NAPALM_USERNAME = getattr(configuration, 'NAPALM_USERNAME', '')
NAPALM_PASSWORD = getattr(configuration, 'NAPALM_PASSWORD', '')
NAPALM_TIMEOUT = getattr(configuration, 'NAPALM_TIMEOUT', 30)
NAPALM_ARGS = getattr(configuration, 'NAPALM_ARGS', {})
PAGINATE_COUNT = getattr(configuration, 'PAGINATE_COUNT', 20)
TIME_ZONE = getattr(configuration, 'TIME_ZONE', 'UTC')
MY_ASN = getattr(configuration, 'MY_ASN', -1)
if MY_ASN == -1:
raise ImproperlyConfigured(
'The MY_ASN setting must be set to a valid AS number.')
# PeeringDB URLs
PEERINGDB_API = 'https://peeringdb.com/api/'
PEERINGDB = 'https://peeringdb.com/asn/'
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
try:
from peering_manager.ldap_config import *
LDAP_CONFIGURED = True
except ImportError:
LDAP_CONFIGURED = False
# If LDAP is configured, load the config
if LDAP_CONFIGURED:
try:
import ldap
import django_auth_ldap
# Prepend LDAPBackend to the default ModelBackend
AUTHENTICATION_BACKENDS = [
'django_auth_ldap.backend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
]
except ImportError:
raise ImproperlyConfigured(
'LDAP authentication has been configured, but django-auth-ldap is not installed. You can remove peering_manager/ldap_config.py to disable LDAP.'
)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_filters',
'django_tables2',
'peering',
'peeringdb',
'utils',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'utils.middleware.RequireLoginMiddleware',
]
ROOT_URLCONF = 'peering_manager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR + '/templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'utils.context_processors.settings',
],
},
},
]
WSGI_APPLICATION = 'peering_manager.wsgi.application'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Django logging
LOGGING = {
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s | %(levelname)s | %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'file': {
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': 'logs/peering-manager.log',
'when': 'midnight',
'interval': 1,
'backupCount': 5,
'formatter': 'simple',
},
'peeringdb_file': {
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': 'logs/peeringdb.log',
'when': 'midnight',
'interval': 1,
'backupCount': 5,
'formatter': 'simple',
},
'napalm_file': {
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': 'logs/napalm.log',
'when': 'midnight',
'interval': 1,
'backupCount': 5,
'formatter': 'simple',
},
},
'loggers': {
'peering.manager.peering': {
'handlers': ['file'],
'level': 'DEBUG',
},
'peering.manager.peeringdb': {
'handlers': ['peeringdb_file'],
'level': 'DEBUG',
},
'peering.manager.napalm': {
'handlers': ['napalm_file'],
'level': 'DEBUG',
},
}
}
# Internationalization
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Authentication URL
LOGIN_URL = '/{}login/'.format(BASE_PATH)
# Messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = BASE_DIR + '/static/'
STATIC_URL = '/{}static/'.format(BASE_PATH)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'project-static'),
)
# Django filters
FILTERS_NULL_CHOICE_LABEL = 'None'
FILTERS_NULL_CHOICE_VALUE = '0'
try:
HOSTNAME = socket.gethostname()
except Exception:
HOSTNAME = 'localhost'
| peering_manager/settings.py | 6,626 | DO NOT EDIT THIS FILE! All configuration must be done in the `configuration.py` file. This file is part of the Peering Manager code and it will be overwritten with every code releases. Enforce trailing slash only PeeringDB URLs Build paths inside the project like this: os.path.join(BASE_DIR, ...) If LDAP is configured, load the config Prepend LDAPBackend to the default ModelBackend Application definition Database Password validation Django logging Internationalization Authentication URL Messages Static files (CSS, JavaScript, Images) Django filters | 554 | en | 0.602535 |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""This module contains an enumerated type and helper functions related
to different types of training entry points (Python package, Python
script, bash script, etc.)
"""
import enum
import os
class _EntryPointType(enum.Enum):
"""Enumerated type consisting of valid types of training entry points."""
PYTHON_PACKAGE = "PYTHON_PACKAGE"
PYTHON_PROGRAM = "PYTHON_PROGRAM"
COMMAND = "COMMAND"
PYTHON_PACKAGE = _EntryPointType.PYTHON_PACKAGE
PYTHON_PROGRAM = _EntryPointType.PYTHON_PROGRAM
COMMAND = _EntryPointType.COMMAND
def get(path, name): # type: (str, str) -> _EntryPointType
"""
Args:
path (string): Directory where the entry point is located.
name (string): Name of the entry point file.
Returns:
(_EntryPointType): The type of the entry point.
"""
if name.endswith(".sh"):
return _EntryPointType.COMMAND
elif "setup.py" in os.listdir(path):
return _EntryPointType.PYTHON_PACKAGE
elif name.endswith(".py"):
return _EntryPointType.PYTHON_PROGRAM
else:
return _EntryPointType.COMMAND
| src/sagemaker_training/_entry_point_type.py | 1,664 | Enumerated type consisting of valid types of training entry points.
Args:
path (string): Directory where the entry point is located.
name (string): Name of the entry point file.
Returns:
(_EntryPointType): The type of the entry point.
This module contains an enumerated type and helper functions related
to different types of training entry points (Python package, Python
script, bash script, etc.)
Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the 'License'). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. type: (str, str) -> _EntryPointType | 990 | en | 0.860681 |
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for calibration_builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import interpolate
from six.moves import zip
import tensorflow as tf
from object_detection.builders import calibration_builder
from object_detection.protos import calibration_pb2
from object_detection.utils import test_case
class CalibrationBuilderTest(test_case.TestCase):
def test_tf_linear_interp1d_map(self):
"""Tests TF linear interpolation mapping to a single number."""
def graph_fn():
tf_x = tf.constant([0., 0.5, 1.])
tf_y = tf.constant([0.5, 0.5, 0.5])
new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.])
tf_map_outputs = calibration_builder._tf_linear_interp1d(
new_x, tf_x, tf_y)
return tf_map_outputs
tf_map_outputs_np = self.execute(graph_fn, [])
self.assertAllClose(tf_map_outputs_np, [0.5, 0.5, 0.5, 0.5, 0.5])
def test_tf_linear_interp1d_interpolate(self):
"""Tests TF 1d linear interpolation not mapping to a single number."""
def graph_fn():
tf_x = tf.constant([0., 0.5, 1.])
tf_y = tf.constant([0.6, 0.7, 1.0])
new_x = tf.constant([0., 0.25, 0.5, 0.75, 1.])
tf_interpolate_outputs = calibration_builder._tf_linear_interp1d(
new_x, tf_x, tf_y)
return tf_interpolate_outputs
tf_interpolate_outputs_np = self.execute(graph_fn, [])
self.assertAllClose(tf_interpolate_outputs_np, [0.6, 0.65, 0.7, 0.85, 1.])
@staticmethod
def _get_scipy_interp1d(new_x, x, y):
"""Helper performing 1d linear interpolation using SciPy."""
interpolation1d_fn = interpolate.interp1d(x, y)
return interpolation1d_fn(new_x)
def _get_tf_interp1d(self, new_x, x, y):
"""Helper performing 1d linear interpolation using Tensorflow."""
def graph_fn():
tf_interp_outputs = calibration_builder._tf_linear_interp1d(
tf.convert_to_tensor(new_x, dtype=tf.float32),
tf.convert_to_tensor(x, dtype=tf.float32),
tf.convert_to_tensor(y, dtype=tf.float32))
return tf_interp_outputs
np_tf_interp_outputs = self.execute(graph_fn, [])
return np_tf_interp_outputs
def test_tf_linear_interp1d_against_scipy_map(self):
"""Tests parity of TF linear interpolation with SciPy for simple mapping."""
length = 10
np_x = np.linspace(0, 1, length)
# Mapping all numbers to 0.5
np_y_map = np.repeat(0.5, length)
# Scipy and TF interpolations
test_data_np = np.linspace(0, 1, length * 10)
scipy_map_outputs = self._get_scipy_interp1d(test_data_np, np_x, np_y_map)
np_tf_map_outputs = self._get_tf_interp1d(test_data_np, np_x, np_y_map)
self.assertAllClose(scipy_map_outputs, np_tf_map_outputs)
def test_tf_linear_interp1d_against_scipy_interpolate(self):
"""Tests parity of TF linear interpolation with SciPy."""
length = 10
np_x = np.linspace(0, 1, length)
# Requires interpolation over 0.5 to 1 domain
np_y_interp = np.linspace(0.5, 1, length)
# Scipy interpolation for comparison
test_data_np = np.linspace(0, 1, length * 10)
scipy_interp_outputs = self._get_scipy_interp1d(test_data_np, np_x,
np_y_interp)
np_tf_interp_outputs = self._get_tf_interp1d(test_data_np, np_x,
np_y_interp)
self.assertAllClose(scipy_interp_outputs, np_tf_interp_outputs)
@staticmethod
def _add_function_approximation_to_calibration_proto(calibration_proto,
x_array, y_array,
class_id):
"""Adds a function approximation to calibration proto for a class id."""
# Per-class calibration.
if class_id is not None:
function_approximation = (
calibration_proto.class_id_function_approximations
.class_id_xy_pairs_map[class_id])
# Class-agnostic calibration.
else:
function_approximation = (
calibration_proto.function_approximation.x_y_pairs)
for x, y in zip(x_array, y_array):
x_y_pair_message = function_approximation.x_y_pair.add()
x_y_pair_message.x = x
x_y_pair_message.y = y
def test_class_agnostic_function_approximation(self):
"""Tests that calibration produces correct class-agnostic values."""
# Generate fake calibration proto. For this interpolation, any input on
# [0.0, 0.5] should be divided by 2 and any input on (0.5, 1.0] should have
# 0.25 subtracted from it.
class_agnostic_x = np.asarray([0.0, 0.5, 1.0])
class_agnostic_y = np.asarray([0.0, 0.25, 0.75])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_agnostic_x, class_agnostic_y, class_id=None)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3],
[0.4, 0.5, 0.0]],
[[0.6, 0.7, 0.8],
[0.9, 1.0, 1.0]]], dtype=tf.float32)
# Everything should map to 0.5 if classes are ignored.
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.05, 0.1, 0.15],
[0.2, 0.25, 0.0]],
[[0.35, 0.45, 0.55],
[0.65, 0.75, 0.75]]])
def test_multiclass_function_approximations(self):
"""Tests that calibration produces correct multiclass values."""
# Background class (0-index) maps all predictions to 0.5.
class_0_x = np.asarray([0.0, 0.5, 1.0])
class_0_y = np.asarray([0.5, 0.5, 0.5])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_0_x, class_0_y, class_id=0)
# Class id 1 will interpolate using these values.
class_1_x = np.asarray([0.0, 0.2, 1.0])
class_1_y = np.asarray([0.0, 0.6, 1.0])
self._add_function_approximation_to_calibration_proto(
calibration_config, class_1_x, class_1_y, class_id=1)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2], [0.9, 0.1]],
[[0.6, 0.4], [0.08, 0.92]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.5, 0.6], [0.5, 0.3]],
[[0.5, 0.7], [0.5, 0.96]]])
def test_temperature_scaling(self):
"""Tests that calibration produces correct temperature scaling values."""
calibration_config = calibration_pb2.CalibrationConfig()
calibration_config.temperature_scaling_calibration.scaler = 2.0
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.0]],
[[0.6, 0.7, 0.8], [0.9, 1.0, 1.0]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np,
[[[0.05, 0.1, 0.15], [0.2, 0.25, 0.0]],
[[0.3, 0.35, 0.4], [0.45, 0.5, 0.5]]])
def test_temperature_scaling_incorrect_value_error(self):
calibration_config = calibration_pb2.CalibrationConfig()
calibration_config.temperature_scaling_calibration.scaler = 0
calibration_fn = calibration_builder.build(calibration_config)
class_predictions_with_background = tf.constant(
[[[0.1, 0.2, 0.3]]], dtype=tf.float32)
with self.assertRaises(ValueError):
calibration_fn(class_predictions_with_background)
def test_skips_class_when_calibration_parameters_not_present(self):
"""Tests that graph fails when parameters not present for all classes."""
# Only adding calibration parameters for class id = 0, even though class id
# 1 is present in the data.
class_0_x = np.asarray([0.0, 0.5, 1.0])
class_0_y = np.asarray([0.5, 0.5, 0.5])
calibration_config = calibration_pb2.CalibrationConfig()
self._add_function_approximation_to_calibration_proto(
calibration_config, class_0_x, class_0_y, class_id=0)
def graph_fn():
calibration_fn = calibration_builder.build(calibration_config)
# batch_size = 2, num_classes = 2, num_anchors = 2.
class_predictions_with_background = tf.constant(
[[[0.1, 0.2], [0.9, 0.1]],
[[0.6, 0.4], [0.08, 0.92]]],
dtype=tf.float32)
calibrated_scores = calibration_fn(class_predictions_with_background)
return calibrated_scores
calibrated_scores_np = self.execute(graph_fn, [])
self.assertAllClose(calibrated_scores_np, [[[0.5, 0.2], [0.5, 0.1]],
[[0.5, 0.4], [0.5, 0.92]]])
if __name__ == '__main__':
tf.test.main()
| research/object_detection/builders/calibration_builder_test.py | 10,338 | Adds a function approximation to calibration proto for a class id.
Helper performing 1d linear interpolation using SciPy.
Helper performing 1d linear interpolation using Tensorflow.
Tests that calibration produces correct class-agnostic values.
Tests that calibration produces correct multiclass values.
Tests that graph fails when parameters not present for all classes.
Tests that calibration produces correct temperature scaling values.
Tests parity of TF linear interpolation with SciPy.
Tests parity of TF linear interpolation with SciPy for simple mapping.
Tests TF 1d linear interpolation not mapping to a single number.
Tests TF linear interpolation mapping to a single number.
Tests for calibration_builder.
Lint as: python2, python3 Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== Mapping all numbers to 0.5 Scipy and TF interpolations Requires interpolation over 0.5 to 1 domain Scipy interpolation for comparison Per-class calibration. Class-agnostic calibration. Generate fake calibration proto. For this interpolation, any input on [0.0, 0.5] should be divided by 2 and any input on (0.5, 1.0] should have 0.25 subtracted from it. batch_size = 2, num_classes = 2, num_anchors = 2. Everything should map to 0.5 if classes are ignored. Background class (0-index) maps all predictions to 0.5. Class id 1 will interpolate using these values. batch_size = 2, num_classes = 2, num_anchors = 2. batch_size = 2, num_classes = 2, num_anchors = 2. Only adding calibration parameters for class id = 0, even though class id 1 is present in the data. batch_size = 2, num_classes = 2, num_anchors = 2. | 2,216 | en | 0.765115 |
"""
Leetcode 70.
Climbing Stairs.
DP.
类似斐波那契数列:
转移方程: f(n) = f(n-1) + f(n-2).
时间复杂度:O(n)
还是没看明白这跟DP有啥关系,就是递归而已。
"""
class Solution:
def climbStairs(self, n: int) -> int:
res = [-1] * (n)
def dfs(n):
if n == 1:
return 1
if n == 2:
return 2
if res[n-1] == -1:
res[n-1] = dfs(n-1) + dfs(n-2)
return res[n-1]
else:
return res[n-1]
ans = dfs(n)
return ans
| dp/climbing_stairs.py | 597 | Leetcode 70.
Climbing Stairs.
DP.
类似斐波那契数列:
转移方程: f(n) = f(n-1) + f(n-2).
时间复杂度:O(n)
还是没看明白这跟DP有啥关系,就是递归而已。 | 108 | zh | 0.811985 |
import logging
from collections import Counter
from itertools import chain
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
from pysrc.papers.analysis.text import get_frequent_tokens
logger = logging.getLogger(__name__)
def compute_topics_similarity_matrix(papers_vectors, comps):
logger.debug('Computing mean similarity between topics embeddings')
n_comps = len(set(comps))
distances = pairwise_distances(papers_vectors)
similarity_matrix = np.zeros(shape=(n_comps, n_comps))
indx = {i: np.flatnonzero([c == i for c in comps]).tolist() for i in range(n_comps)}
for i in range(n_comps):
for j in range(i, n_comps):
mean_distance = np.mean(distances[indx[i], :][:, indx[j]])
similarity_matrix[i, j] = similarity_matrix[j, i] = 1 / (1 + mean_distance)
return similarity_matrix
def cluster_and_sort(x, max_clusters, min_cluster_size):
"""
:param x: object representations (X x Features)
:param max_clusters:
:param min_cluster_size:
:return: List[cluster], Hierarchical dendrogram of splits.
"""
logger.debug('Looking for an appropriate number of clusters,'
f'min_cluster_size={min_cluster_size}, max_clusters={max_clusters}')
if x.shape[1] == 0:
return [0] * x.shape[0], None
r = min(int(x.shape[0] / min_cluster_size), max_clusters) + 1
l = 1
if l >= r - 2:
return [0] * x.shape[0], None
prev_min_size = None
while l < r - 1:
n_clusters = int((l + r) / 2)
model = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward').fit(x)
clusters_counter = Counter(model.labels_)
min_size = clusters_counter.most_common()[-1][1]
logger.debug(f'l={l}, r={r}, n_clusters={n_clusters}, min_cluster_size={min_cluster_size}, '
f'prev_min_size={prev_min_size}, min_size={min_size}')
if min_size < min_cluster_size:
if prev_min_size is not None and min_size <= prev_min_size:
break
r = n_clusters + 1
else:
l = n_clusters
prev_min_size = min_size
logger.debug(f'Number of clusters = {n_clusters}')
logger.debug(f'Min cluster size = {prev_min_size}')
logger.debug('Reorder clusters by size descending')
reorder_map = {c: i for i, (c, _) in enumerate(clusters_counter.most_common())}
return [reorder_map[c] for c in model.labels_], model.children_
def get_topics_description(df, comps, corpus, corpus_tokens, corpus_counts, n_words, ignore_comp=None):
"""
Get words from abstracts that describe the components the best way
using closest to the 'ideal' frequency vector - [0, ..., 0, 1, 0, ..., 0] in tokens of cosine distance
"""
logger.debug(f'Generating topics description, ignore_comp={ignore_comp}')
# Since some of the components may be skipped, use this dict for continuous indexes'
comp_idx = {c: i for i, c in enumerate(c for c in comps if c != ignore_comp)}
# In cases with less than 2 components, return frequencies
if len(comp_idx) < 2:
comp = list(comp_idx.keys())[0]
if ignore_comp is None:
most_frequent = get_frequent_tokens(chain(*chain(*corpus)))
return {comp: list(sorted(most_frequent.items(), key=lambda kv: kv[1], reverse=True))[:n_words]}
else:
most_frequent = get_frequent_tokens(
chain(*chain(*[corpus[i] for i in np.flatnonzero(df['id'].isin(set(comps[comp])))]))
)
return {comp: list(sorted(most_frequent.items(), key=lambda kv: kv[1], reverse=True))[:n_words],
ignore_comp: []}
# Pass paper indices (for corpus_tokens and corpus_counts) instead of paper ids
comps_ids = {comp: list(np.flatnonzero(df['id'].isin(comp_pids))) for comp, comp_pids in comps.items()}
result = _get_topics_description_cosine(comps_ids, corpus_tokens, corpus_counts, n_words, ignore_comp=ignore_comp)
kwds = [(comp, ','.join([f'{t}:{v:.3f}' for t, v in vs])) for comp, vs in result.items()]
logger.debug('Description\n' + '\n'.join(f'{comp}: {kwd}' for comp, kwd in kwds))
return result
def _get_topics_description_cosine(comps, corpus_tokens, corpus_counts, n_words, ignore_comp=None):
"""
Select words with the frequency vector that is the closest to the 'ideal' frequency vector
([0, ..., 0, 1, 0, ..., 0]) in tokens of cosine distance
"""
logger.debug('Compute average tokens counts per components')
# Since some of the components may be skipped, use this dict for continuous indexes
comp_idx = {c: i for i, c in enumerate(c for c in comps if c != ignore_comp)}
tokens_freqs_per_comp = np.zeros(shape=(len(comp_idx), corpus_counts.shape[1]), dtype=np.float)
for comp, comp_ids in comps.items():
if comp != ignore_comp: # Not ignored
tokens_freqs_per_comp[comp_idx[comp], :] = \
np.sum(corpus_counts[comp_ids, :], axis=0)
# Calculate total number of occurrences for each word
tokens_freqs_total = np.sum(tokens_freqs_per_comp, axis=0)
# Normalize frequency vector for each word to have length of 1
tokens_freqs_norm = np.sqrt(np.diag(tokens_freqs_per_comp.T @ tokens_freqs_per_comp))
tokens_freqs_per_comp = tokens_freqs_per_comp / tokens_freqs_norm
logger.debug('Take frequent tokens that have the most descriptive frequency vector for topics')
# Calculate cosine distance between the frequency vector and [0, ..., 0, 1, 0, ..., 0] for each cluster
cluster_mask = np.eye(len(comp_idx))
distance = tokens_freqs_per_comp.T @ cluster_mask
# Add some weight for more frequent tokens to get rid of extremely rare ones in the top
adjusted_distance = distance.T * np.log(tokens_freqs_total)
result = {}
for comp in comps.keys():
if comp == ignore_comp:
result[comp] = [] # Ignored component
continue
c = comp_idx[comp] # Get the continuous index
cluster_tokens_idx = np.argsort(-adjusted_distance[c, :])[:n_words].tolist()
result[comp] = [(corpus_tokens[i], adjusted_distance[c, i]) for i in cluster_tokens_idx]
return result
| pysrc/papers/analysis/topics.py | 6,272 | Select words with the frequency vector that is the closest to the 'ideal' frequency vector
([0, ..., 0, 1, 0, ..., 0]) in tokens of cosine distance
:param x: object representations (X x Features)
:param max_clusters:
:param min_cluster_size:
:return: List[cluster], Hierarchical dendrogram of splits.
Get words from abstracts that describe the components the best way
using closest to the 'ideal' frequency vector - [0, ..., 0, 1, 0, ..., 0] in tokens of cosine distance
Since some of the components may be skipped, use this dict for continuous indexes' In cases with less than 2 components, return frequencies Pass paper indices (for corpus_tokens and corpus_counts) instead of paper ids Since some of the components may be skipped, use this dict for continuous indexes Not ignored Calculate total number of occurrences for each word Normalize frequency vector for each word to have length of 1 Calculate cosine distance between the frequency vector and [0, ..., 0, 1, 0, ..., 0] for each cluster Add some weight for more frequent tokens to get rid of extremely rare ones in the top Ignored component Get the continuous index | 1,128 | en | 0.860382 |
# Copyright (c) Chris Choy (chrischoy@ai.stanford.edu).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import torch.nn as nn
from torch.optim import SGD
import MinkowskiEngine as ME
from MinkowskiEngine.modules.resnet_block import BasicBlock, Bottleneck
from examples.resnet import ResNetBase
class BasicBlockShallow(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
bn_momentum=0.1,
dimension=-1):
super(BasicBlockShallow, self).__init__()
assert dimension > 0
self.conv1 = ME.MinkowskiConvolution(
inplanes, planes, kernel_size=1, stride=stride, dilation=dilation, dimension=dimension)
self.norm1 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)
self.conv2 = ME.MinkowskiConvolution(
planes, planes, kernel_size=1, stride=1, dilation=dilation, dimension=dimension)
self.norm2 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)
self.relu = ME.MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class MinkUNetBase(ResNetBase):
BLOCK = None
PLANES = None
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
INIT_DIM = 32
OUT_TENSOR_STRIDE = 1
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling
# initialize_coords
def __init__(self, in_channels=3, out_channels=20, bn_momentum=0.1, D=3, **kwargs):
self.bn_momentum=bn_momentum
for name, value in kwargs.items():
if name != "self":
try:
setattr(self, name, value)
except:
print(name, value)
ResNetBase.__init__(self, in_channels, out_channels, D)
def network_initialization(self, in_channels, out_channels, D):
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv0p1s1 = ME.MinkowskiConvolution(
in_channels, self.inplanes, kernel_size=5, dimension=D)
self.bn0 = ME.MinkowskiBatchNorm(self.inplanes, momentum=self.bn_momentum)
self.conv1p1s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn1 = ME.MinkowskiBatchNorm(self.inplanes, momentum=self.bn_momentum)
self.block1 = self._make_layer(self.BLOCK, self.PLANES[0],
self.LAYERS[0])
self.conv2p2s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn2 = ME.MinkowskiBatchNorm(self.inplanes, momentum=self.bn_momentum)
self.block2 = self._make_layer(self.BLOCK, self.PLANES[1],
self.LAYERS[1])
self.conv3p4s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn3 = ME.MinkowskiBatchNorm(self.inplanes, momentum=self.bn_momentum)
self.block3 = self._make_layer(self.BLOCK, self.PLANES[2],
self.LAYERS[2])
self.conv4p8s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn4 = ME.MinkowskiBatchNorm(self.inplanes, momentum=self.bn_momentum)
self.block4 = self._make_layer(self.BLOCK, self.PLANES[3],
self.LAYERS[3])
self.convtr4p16s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[4], kernel_size=2, stride=2, dimension=D)
self.bntr4 = ME.MinkowskiBatchNorm(self.PLANES[4], momentum=self.bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(self.BLOCK, self.PLANES[4],
self.LAYERS[4])
self.convtr5p8s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[5], kernel_size=2, stride=2, dimension=D)
self.bntr5 = ME.MinkowskiBatchNorm(self.PLANES[5], momentum=self.bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(self.BLOCK, self.PLANES[5],
self.LAYERS[5])
self.convtr6p4s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[6], kernel_size=2, stride=2, dimension=D)
self.bntr6 = ME.MinkowskiBatchNorm(self.PLANES[6], momentum=self.bn_momentum)
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(self.BLOCK, self.PLANES[6],
self.LAYERS[6])
self.convtr7p2s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[7], kernel_size=2, stride=2, dimension=D)
self.bntr7 = ME.MinkowskiBatchNorm(self.PLANES[7], momentum=self.bn_momentum)
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(self.BLOCK, self.PLANES[7],
self.LAYERS[7])
self.final = ME.MinkowskiConvolution(
self.PLANES[7] * self.BLOCK.expansion,
out_channels,
kernel_size=1,
bias=True,
dimension=D)
self.relu = ME.MinkowskiReLU(inplace=True)
def forward(self, in_dict, return_feats=False):
# print(in_dict['feats'].shape, in_dict['coords'].shape)
if self.quantization_mode == 'average':
quantization_mode=ME.SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE
elif self.quantization_mode == 'random':
quantization_mode=ME.SparseTensorQuantizationMode.RANDOM_SUBSAMPLE
in_field = ME.TensorField(
features=in_dict['feats'],
coordinates=in_dict['coords'],
quantization_mode=quantization_mode,
# minkowski_algorithm=ME.MinkowskiAlgorithm.SPEED_OPTIMIZED,
minkowski_algorithm=ME.MinkowskiAlgorithm.MEMORY_EFFICIENT,
device=in_dict['feats'].device,
)
# print(in_field.device)
# x = ME.SparseTensor(in_dict['feats'], in_dict['coords'])
# print(in_field)
# print(in_dict['feats'].shape)
x = in_field.sparse()
out = self.conv0p1s1(x)
# print(out.coordinates.shape)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
# print(out.coordinates.shape)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
# print(out.coordinates.shape)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
# print(out.coordinates.shape)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
# tensor_stride=16
out = self.conv4p8s2(out_b3p8)
# print(out.coordinates.shape)
out = self.bn4(out)
out = self.relu(out)
out = self.block4(out)
# tensor_stride=8
out = self.convtr4p16s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = ME.cat(out, out_b3p8)
out = self.block5(out)
# tensor_stride=4
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = ME.cat(out, out_b2p4)
out = self.block6(out)
# tensor_stride=2
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = ME.cat(out, out_b1p2)
out = self.block7(out)
# tensor_stride=1
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = ME.cat(out, out_p1)
out_feats = self.block8(out)
out = self.final(out_feats)
# if in_dict['rand_shift'] is not None:
# coords = []
# for i in range(len(in_dict['rand_shift'])):
# coords.append( out.coordinates_at(i) - in_dict['rand_shift'][i])
# feats = out.decomposed_features
# else:
# coords, feats = out.decomposed_coordinates_and_features
feats = out.slice(in_field).F
# feats = out.F
# feats = torch.cat(feats, axis=0)
if return_feats:
# return feats, out_feats, in_field
return feats, out_feats.slice(in_field).F
return feats
@staticmethod
def add_argparse_args(parent_parser):
parser = parent_parser.add_argument_group("MinkUNet")
parser.add_argument("--quantization_mode", type=str, default='average')
# parser.add_argument("--out_channels", type=int, default=32)
return parent_parser
def convert_sync_batchnorm(self):
self = ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm(self)
class MinkUNet14(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class MinkUNet18(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class MinkUNet34(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet34Shallow(MinkUNetBase):
BLOCK = BasicBlockShallow
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet50(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet101(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
class MinkUNet14A(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet14B(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet14C(MinkUNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
class MinkUNet14D(MinkUNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet18A(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet18B(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet18D(MinkUNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet34A(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 64)
class MinkUNet34B(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
class MinkUNet34C(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
class MinkUNet34CShallow(MinkUNet34Shallow):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96) | examples/minkunet.py | 12,318 | Copyright (c) Chris Choy (chrischoy@ai.stanford.edu). Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part of the code. To use the model, must call initialize_coords before forward pass. Once data is processed, call clear to reset the model before calling initialize_coords Output of the first conv concated to conv6 print(in_dict['feats'].shape, in_dict['coords'].shape) minkowski_algorithm=ME.MinkowskiAlgorithm.SPEED_OPTIMIZED, print(in_field.device) x = ME.SparseTensor(in_dict['feats'], in_dict['coords']) print(in_field) print(in_dict['feats'].shape) print(out.coordinates.shape) print(out.coordinates.shape) print(out.coordinates.shape) print(out.coordinates.shape) tensor_stride=16 print(out.coordinates.shape) tensor_stride=8 tensor_stride=4 tensor_stride=2 tensor_stride=1 if in_dict['rand_shift'] is not None: coords = [] for i in range(len(in_dict['rand_shift'])): coords.append( out.coordinates_at(i) - in_dict['rand_shift'][i]) feats = out.decomposed_features else: coords, feats = out.decomposed_coordinates_and_features feats = out.F feats = torch.cat(feats, axis=0) return feats, out_feats, in_field parser.add_argument("--out_channels", type=int, default=32) | 2,315 | en | 0.6971 |
#! /usr/bin/env python
#############################################################################
##
## Copyright (C) 2015 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the build configuration tools of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL21$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see http://www.qt.io/terms-conditions. For further
## information use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
## http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## As a special exception, The Qt Company gives you certain additional
## rights. These rights are described in The Qt Company LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## $QT_END_LICENSE$
##
#############################################################################
import sys;
import re;
import os;
import subprocess;
import errno;
instructions = "This script can be used as follows:\n\
a) if run from tests/auto without any arguments it runs unit tests and then integration tests\n\
b) if run from tests/auto/unit, it runs unit tests\n\
c) if run from tests/auto/integration, it runs integration tests\n\
d) if run from tests/auto with \"unit\" it runs unit tests, and correspondingly for \"integration\""
# Colors
red="\033[41;37m";
redfg="\033[31m";
norm="\033[0m";
green="\033[32m";
grey="\033[37m";
yellow="\033[33m";
# Variables
curtest = "";
numpasses = [0];
numfails = [0];
numcrashes = 0;
numx = [0];
runTests = []
notRunTests = []
# Do not run the tests in these directories.
exclusionList = ["qdeclarativevideo", "qmultimedia_common"]
# Helper function for replacing stuffs
def print_color_string(string, color, match, index):
if index > 0:
print string[:match.start(index)] + color + string[match.start(index):match.end(index)] + norm + string[match.end(index):],
else:
print color + string[:-1] + norm
# AWK translation
awkfoo = [
(re.compile("\*\*\*\*\*\*\*\*\* Start testing of (\S+)"), yellow, 1, curtest),
(re.compile("^(PASS) "), green, 1, numpasses),
(re.compile("^(FAIL!) "), red, 0, numfails),
(re.compile("^(XFAIL) "), redfg, 1, numx),
(re.compile("^(XPASS) "), redfg, 1, numx),
(re.compile("^(QFATAL) "), red, 0, numx),
(re.compile("^(QDEBUG) "), grey, 0, None),
(re.compile("^(QWARN) "), yellow, 1, None),
(re.compile("\*\*\*\*\*\*\*\*\* Finished testing of (\S+)"), yellow, 1, curtest),
]
#
# This method runs the test cases, color codes the output from the test cases and adds up the passes,
# fails etc.
#
def resultSummary(arg):
try:
pp = subprocess.Popen(arg, shell=False,stderr=subprocess.STDOUT,stdout=subprocess.PIPE);
p = pp.stdout;
try:
while True:
line = p.readline()
if len(line) == 0:
break
for (re, color, index, var) in awkfoo:
m = re.match(line)
if m:
break
if m:
print_color_string(line, color, m, index)
if isinstance(var, list):
var[0] = var[0] + 1;
else:
var = m.groups(index)
else:
print line,
finally:
rc = p.close();
pp.wait();
if pp.returncode < 0:
print red + "Error: '%s' exited with signal %d" % (arg, -pp.returncode) + norm
numcrashes = numcrashes + 1
except OSError, e:
if e.errno == errno.ENOENT:
print red + "Test '%s' not found." % arg + norm;
else:
print red + "Got an exception running '%s': %s " % (arg, e.strerror) + norm
numcrashes = numcrashes + 1
#
# This method finds the test cases that should be run and runs them.
#
def runAllTests(test):
for filename in os.listdir(test):
if(re.search("^q", filename)):
#Skip the dir if it is in the exclusion list.
exclude = False
for dir in exclusionList:
if(re.search(dir, filename)):
exclude = True
if(not(exclude)):
#Set path to this if on Windows
if(os.name=="nt"):
exePath = test+"\\"+filename+"\\debug\\tst_"+filename+".exe"
#Set path on OS X
if(sys.platform=="darwin"):
exePath = test +"/"+filename+"/tst_"+filename
if not (os.path.exists(exePath)):
exePath = test + "/"+filename+"/tst_"+filename+".app/Contents/MacOS/tst_"+filename
#Set path to this if on Unix
else:
exePath = test +"/"+filename+"/tst_"+filename
if(os.path.exists(exePath)):
runTests.append(filename)
resultSummary(exePath);
else:
notRunTests.append(filename)
arguments = sys.argv[1:]
count = len(arguments)
# Find the current working directory.
cwd = os.getcwd()
if(count == 0):
if re.search("auto$", cwd):
x = 0
runAllTests("unit")
runAllTests("integration")
elif re.search("unit$", cwd):
runAllTests(cwd)
elif re.search("integration$", cwd):
runAllTests(cwd)
else:
print "You are running this script from the wrong directory! " + instructions
exit()
elif(count == 1):
if os.path.exists(sys.argv[1]):
runAllTests(sys.argv[1])
else:
print sys.argv[1] + " test cases do not exist! " + instructions
exit()
else:
print "You have passed too many arguments! " + instructions
exit()
print "Total of all tests: %d passes, %d failures, %d unexpected, %d badnesses." % (numpasses[0], numfails[0], numx[0], numcrashes);
if runTests:
print "The following test cases were run: "
for testCase in runTests:
print testCase
else:
print "No test cases were run!"
if notRunTests:
print "The following test cases could not be run: "
for testCase in notRunTests:
print testCase
else:
print "All test cases were run."
| qtmultimedia/tests/auto/runautotests.py | 7,028 | ! /usr/bin/env python Copyright (C) 2015 The Qt Company Ltd. Contact: http://www.qt.io/licensing/ This file is part of the build configuration tools of the Qt Toolkit. $QT_BEGIN_LICENSE:LGPL21$ Commercial License Usage Licensees holding valid commercial Qt licenses may use this file in accordance with the commercial license agreement provided with the Software or, alternatively, in accordance with the terms contained in a written agreement between you and The Qt Company. For licensing terms and conditions see http://www.qt.io/terms-conditions. For further information use the contact form at http://www.qt.io/contact-us. GNU Lesser General Public License Usage Alternatively, this file may be used under the terms of the GNU Lesser General Public License version 2.1 or version 3 as published by the Free Software Foundation and appearing in the file LICENSE.LGPLv21 and LICENSE.LGPLv3 included in the packaging of this file. Please review the following information to ensure the GNU Lesser General Public License requirements will be met: https://www.gnu.org/licenses/lgpl.html and http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html. As a special exception, The Qt Company gives you certain additional rights. These rights are described in The Qt Company LGPL Exception version 1.1, included in the file LGPL_EXCEPTION.txt in this package. $QT_END_LICENSE$ Colors Variables Do not run the tests in these directories. Helper function for replacing stuffs AWK translation This method runs the test cases, color codes the output from the test cases and adds up the passes, fails etc. This method finds the test cases that should be run and runs them.Skip the dir if it is in the exclusion list.Set path to this if on WindowsSet path on OS XSet path to this if on Unix Find the current working directory. | 1,811 | en | 0.825608 |
import warnings
import numpy as np
from nilearn.plotting import cm
from nilearn.plotting.js_plotting_utils import decode
from nilearn.plotting import html_connectome
from .test_js_plotting_utils import check_html
def test_prepare_line():
e = np.asarray([0, 1, 2, 3], dtype=int)
n = np.asarray([[0, 1], [0, 2], [2, 3], [8, 9]], dtype=int)
pe, pn = html_connectome._prepare_line(e, n)
assert (pn == [0, 1, 0, 0, 2, 0, 2, 3, 0, 8, 9, 0]).all()
assert(pe == [0, 0, 0, 1, 1, 0, 2, 2, 0, 3, 3, 0]).all()
def _make_connectome():
adj = np.diag([1.5, .3, 2.5], 2)
adj += adj.T
adj += np.eye(5)
coord = np.arange(5)
coord = np.asarray([coord * 10, -coord, coord[::-1]]).T
return adj, coord
def test_get_connectome():
adj, coord = _make_connectome()
connectome = html_connectome._get_connectome(adj, coord)
con_x = decode(connectome['_con_x'], '<f4')
expected_x = np.asarray(
[0, 0, 0,
0, 20, 0,
10, 10, 0,
10, 30, 0,
20, 0, 0,
20, 20, 0,
20, 40, 0,
30, 10, 0,
30, 30, 0,
40, 20, 0,
40, 40, 0], dtype='<f4')
assert (con_x == expected_x).all()
assert {'_con_x', '_con_y', '_con_z', '_con_w', 'colorscale'
}.issubset(connectome.keys())
assert (connectome['cmin'], connectome['cmax']) == (-2.5, 2.5)
adj[adj == 0] = np.nan
connectome = html_connectome._get_connectome(adj, coord)
con_x = decode(connectome['_con_x'], '<f4')
assert (con_x == expected_x).all()
assert (connectome['cmin'], connectome['cmax']) == (-2.5, 2.5)
def test_view_connectome():
adj, coord = _make_connectome()
html = html_connectome.view_connectome(adj, coord)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_connectome(adj, coord, '85.3%',
title="SOME_TITLE")
check_html(html, False, 'connectome-plot')
assert "SOME_TITLE" in html.html
html = html_connectome.view_connectome(adj, coord, '85.3%',
linewidth=8.5, node_size=4.2)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_connectome(
adj, coord, '85.3%', linewidth=8.5, marker_size=np.arange(len(coord)))
check_html(html, False, 'connectome-plot')
def test_params_deprecation_view_connectome():
deprecated_params = {'coords': 'node_coords',
'threshold': 'edge_threshold',
'cmap': 'edge_cmap',
'marker_size': 'node_size',
}
deprecation_msg = (
'The parameter "{}" will be removed in 0.6.0 release of Nilearn. '
'Please use the parameter "{}" instead.'
)
warning_msgs = {old_: deprecation_msg.format(old_, new_)
for old_, new_ in deprecated_params.items()
}
adj, coord = _make_connectome()
with warnings.catch_warnings(record=True) as raised_warnings:
html_connectome.view_connectome(adjacency_matrix=adj,
coords=coord,
edge_threshold='85.3%',
edge_cmap=cm.cyan_orange,
linewidth=8.5, node_size=4.2,
)
html_connectome.view_connectome(adjacency_matrix=adj,
node_coords=coord,
threshold='85.3%',
edge_cmap=cm.cyan_orange,
linewidth=8.5,
node_size=4.2,
)
html_connectome.view_connectome(adjacency_matrix=adj,
node_coords=coord,
edge_threshold='85.3%',
cmap=cm.cyan_orange,
linewidth=8.5,
node_size=4.2,
)
html_connectome.view_connectome(adjacency_matrix=adj,
node_coords=coord,
edge_threshold='85.3%',
edge_cmap=cm.cyan_orange,
linewidth=8.5,
marker_size=4.2,
)
html_connectome.view_connectome(adjacency_matrix=adj,
node_coords=coord,
edge_threshold='85.3%',
edge_cmap=cm.cyan_orange,
linewidth=8.5,
node_size=4.2,
)
html_connectome.view_connectome(adj,
coord,
'85.3%',
cm.cyan_orange,
8.5,
4.2,
)
old_params = ['coords', 'threshold', 'cmap', 'marker_size']
raised_warning_messages = ''.join(
str(warning.message) for warning in raised_warnings)
print(raised_warning_messages)
for old_param_ in old_params:
assert warning_msgs[old_param_] in raised_warning_messages
def test_get_markers():
coords = np.arange(12).reshape((4, 3))
colors = ['r', 'g', 'black', 'white']
markers = html_connectome._get_markers(coords, colors)
assert markers["marker_color"] == [
'#ff0000', '#007f00', '#000000', '#ffffff']
assert markers['markers_only']
con_x = decode(markers['_con_x'], '<f4')
assert np.allclose(con_x, coords[:, 0])
def test_view_markers():
coords = np.arange(12).reshape((4, 3))
colors = ['r', 'g', 'black', 'white']
html = html_connectome.view_markers(coords, colors)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_markers(coords)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_markers(coords, marker_size=15)
check_html(html, False, 'connectome-plot')
html = html_connectome.view_markers(
coords, marker_size=np.arange(len(coords)))
check_html(html, False, 'connectome-plot')
html = html_connectome.view_markers(
coords, marker_size=list(range(len(coords))))
check_html(html, False, 'connectome-plot')
def test_params_deprecation_view_markers():
""" Tests whether use of deprecated keyword parameters of view_markers
raise corrrect warnings.
"""
deprecated_params = {'coords': 'marker_coords',
'colors': 'marker_color',
}
deprecation_msg = (
'The parameter "{}" will be removed in 0.6.0 release of Nilearn. '
'Please use the parameter "{}" instead.'
)
warning_msgs = {old_: deprecation_msg.format(old_, new_)
for old_, new_ in deprecated_params.items()
}
coords = np.arange(12).reshape((4, 3))
colors = ['r', 'g', 'black', 'white']
with warnings.catch_warnings(record=True) as raised_warnings:
html_connectome.view_markers(coords=coords,
marker_color=colors,
)
html_connectome.view_markers(marker_coords=coords,
colors=colors,
)
html_connectome.view_markers(marker_coords=coords,
marker_color=colors,
)
html_connectome.view_markers(coords,
colors,
)
old_params = ['coords', 'colors']
assert len(raised_warnings) == 2
for old_param_, raised_warning_ in zip(old_params, raised_warnings):
assert warning_msgs[old_param_] == str(raised_warning_.message)
assert raised_warning_.category is DeprecationWarning
| nilearn/plotting/tests/test_html_connectome.py | 8,330 | Tests whether use of deprecated keyword parameters of view_markers
raise corrrect warnings. | 91 | en | 0.368531 |
import numpy as np
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
dataset = load_boston()
X = dataset.data
y = dataset.target
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X-mean)/std
# print(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
n_train = X_train.shape[0]
n_features = X_train.shape[1]
# 权重初始化
w = np.random.rand(n_features)
b = 1.1
lr = 0.001
epoches = 3000
def model(x):
y_hat = w.dot(x)+b
return y_hat
def loss_funtion(X, y):
total_loss = 0
n_samples = len(X)
for i in range(n_samples):
xi = X[i]
yi = y[i]
yi_hat = model(xi)
total_loss += abs(yi_hat-yi)**2
avg_loss = (1/n_samples)*total_loss
return avg_loss
reg = 0.5
for epoch in range(epoches):
sum_w = 0.0
sum_b = 0.0
for i in range(n_train):
xi = X_train[i]
yi = y_train[i]
yi_hat = model(xi)
sum_w += (yi_hat-yi)*xi
sum_b += (yi_hat - yi)
grad_w = (2/n_train)*sum_w+(2.0*reg*w)
grad_b = (2/n_train)*sum_b # 偏置项不做正则化处理
w = w-lr*grad_w
b = b-lr*grad_b
train_loss = loss_funtion(X_train, y_train)
test_loss = loss_funtion(X_test, y_test)
print(train_loss)
print(test_loss)
| 1_boston.py | 1,285 | print(X) 权重初始化 偏置项不做正则化处理 | 25 | zh | 0.863565 |
# Name:
# Date:
# proj02: sum
# Write a program that prompts the user to enter numbers, one per line,
# ending with a line containing 0, and keep a running sum of the numbers.
# Only print out the sum after all the numbers are entered
# (at least in your final version). Each time you read in a number,
# you can immediately use it for your sum,
# and then be done with the number just entered.
# Example:
# Enter a number to sum, or 0 to indicate you are finished: 4
# Enter a number to sum, or 0 to indicate you are finished: 5
# Enter a number to sum, or 0 to indicate you are finished: 2
# Enter a number to sum, or 0 to indicate you are finished: 10
# Enter a number to sum, or 0 to indicate you are finished: 0
# The sum of your numbers is: 21
input_sum = 0
var = 1
while var != 0:
input1 = raw_input("Enter a number to sum, or 0 to indicate you are finished: ")
input_sum = int(input1) + input_sum
if int(input1) == 0:
var = 0
print"The sum of your numbers is: " + str(input_sum)
| proj02_loops/proj02_01.py | 1,019 | Name: Date: proj02: sum Write a program that prompts the user to enter numbers, one per line, ending with a line containing 0, and keep a running sum of the numbers. Only print out the sum after all the numbers are entered (at least in your final version). Each time you read in a number, you can immediately use it for your sum, and then be done with the number just entered. Example: Enter a number to sum, or 0 to indicate you are finished: 4 Enter a number to sum, or 0 to indicate you are finished: 5 Enter a number to sum, or 0 to indicate you are finished: 2 Enter a number to sum, or 0 to indicate you are finished: 10 Enter a number to sum, or 0 to indicate you are finished: 0 The sum of your numbers is: 21 | 717 | en | 0.918151 |
#!/usr/bin/env python
"""
convert corpus to annotated corpus
This script uses nltk for dependency parsing, which is based on stanford corenlp.
"""
import os
from nltk.parse.stanford import *
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('corenlp_path',
help='Directory to stanford corenlp') # /home/lbf/Documents/stanford-corenlp-full-2017-06-09/
parser.add_argument('--max_block_size', '-mbs', default=1000000, type=int,
help='indicate how much charactors a parser deals at one time, bigger max_block_size will consume more memeory, but should be faster.')
parser.add_argument('--corpus_path', default='./news.toy.txt',
help='Directory to corpus')
parser.add_argument('--annotated_corpus_path', default='./news.toy.annotated.txt',
help='Directory to annotated corpus')
parser.add_argument('--parser_model', '-o', choices=['edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz', 'edu/stanford/nlp/models/parser/nndep/english_UD.gz'],
default='edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz',
help='stanford parser model')
args = parser.parse_args()
class dependency_parser():
def __init__(self, path_to_jar, path_to_models_jar, model_path):
if 'nndep/' in model_path:
self.parser = StanfordNeuralDependencyParser( #StanfordNeuralDependencyParser
path_to_jar=path_to_jar,
path_to_models_jar=path_to_models_jar,
model_path=model_path, java_options='-mx5g') # , corenlp_options='-model modelOutputFile.txt.gz'
if 'lexparser/' in model_path:
self.parser = StanfordDependencyParser(
path_to_jar=path_to_jar,
path_to_models_jar=path_to_models_jar,
model_path=model_path, java_options='-mx10g')
def preprocess_text(self, text):
# hack for nltk
text = text.replace('/', '-')
# hack for output format
text = text.replace('{', '-')
text = text.replace('}', '-')
text = text.replace('[', '-')
text = text.replace(']', '-')
return text
def parse(self, text):
text = self.preprocess_text(text)
out = ''
# print(text)
try:
parse_results = self.parser.raw_parse(text) #, properties={'annotators' : 'depparse'}
for dependency_tree in parse_results:
for index, node in dependency_tree.nodes.items():
if node['word'] is None: # skip root node
continue
dependency_str = ''
for dep, index in node['deps'].items():
dependency_str += ',{}/{}'.format(str(index[0] - 1), dep)
dependency_str = dependency_str[1:]
dependency_str = '{}/{}'.format(node['rel'], node['head'])
out += '{}/{}[{}] '.format(node['word'], node['tag'], dependency_str)
out += "\n"
return out
except AssertionError as e:
print('error when parse "{}"'.format(text))
return ''
dependency_parser = dependency_parser(
path_to_jar=os.path.join(args.corenlp_path, "stanford-corenlp-3.8.0.jar"),
path_to_models_jar=os.path.join(args.corenlp_path, "stanford-corenlp-3.8.0-models.jar"),
model_path=args.parser_model)
# edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz
# edu/stanford/nlp/models/parser/nndep/english_UD.gz
start_time = time.time()
print(dependency_parser.parse("Alice's dog also likes eating sausage from Russia"))
# dependency_parser.parse('Information about the stages 50km to 80km), booking for food and accommodation (R450-38 per night) and downloadable maps are on the Freedom Challenge website call 00 27 84 567 4152 ')
block_size = 0
text = ''
with open(args.corpus_path, "r") as corpus_file, open(args.annotated_corpus_path, "w") as annotated_corpus_file:
for line in corpus_file:
text += line + "\n"
block_size += len(line)
if block_size > args.max_block_size:
out = dependency_parser.parse(text)
annotated_corpus_file.write(out)
block_size = 0
text = ''
out = dependency_parser.parse(text)
annotated_corpus_file.write(out)
end_time = time.time()
print('spend {} minutes'.format((end_time - start_time) / 60))
| vsmlib/embeddings/bofang/annotate_corpus_nltk.py | 4,466 | convert corpus to annotated corpus
This script uses nltk for dependency parsing, which is based on stanford corenlp.
!/usr/bin/env python /home/lbf/Documents/stanford-corenlp-full-2017-06-09/StanfordNeuralDependencyParser , corenlp_options='-model modelOutputFile.txt.gz' hack for nltk hack for output format print(text), properties={'annotators' : 'depparse'} skip root node edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz edu/stanford/nlp/models/parser/nndep/english_UD.gz dependency_parser.parse('Information about the stages 50km to 80km), booking for food and accommodation (R450-38 per night) and downloadable maps are on the Freedom Challenge website call 00 27 84 567 4152 ') | 690 | en | 0.541785 |
#!/usr/bin/python
#
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Import library functions we need
import RPi.GPIO as GPIO
import struct
import sys
import os
import subprocess
from time import sleep
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
bounce = 25
if len(sys.argv) > 2:
cmd = sys.argv[1].lower()
pin = int(sys.argv[2])
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
if cmd == "pwm":
#print("Initialised pin "+str(pin)+" to PWM")
try:
freq = int(sys.argv[3])
except:
freq = 100
GPIO.setup(pin,GPIO.OUT)
p = GPIO.PWM(pin, freq)
p.start(0)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
p.ChangeDutyCycle(float(data))
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
except Exception as ex:
print("bad data: "+data)
elif cmd == "buzz":
#print("Initialised pin "+str(pin)+" to Buzz")
GPIO.setup(pin,GPIO.OUT)
p = GPIO.PWM(pin, 100)
p.stop()
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
elif float(data) == 0:
p.stop()
else:
p.start(50)
p.ChangeFrequency(float(data))
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
except Exception as ex:
print("bad data: "+data)
elif cmd == "out":
#print("Initialised pin "+str(pin)+" to OUT")
GPIO.setup(pin,GPIO.OUT)
if len(sys.argv) == 4:
GPIO.output(pin,int(sys.argv[3]))
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
data = int(data)
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
except:
if len(sys.argv) == 4:
data = int(sys.argv[3])
else:
data = 0
if data != 0:
data = 1
GPIO.output(pin,data)
elif cmd == "in":
#print("Initialised pin "+str(pin)+" to IN")
bounce = float(sys.argv[4])
def handle_callback(chan):
sleep(bounce/1000.0)
print(GPIO.input(chan))
if sys.argv[3].lower() == "up":
GPIO.setup(pin,GPIO.IN,GPIO.PUD_UP)
elif sys.argv[3].lower() == "down":
GPIO.setup(pin,GPIO.IN,GPIO.PUD_DOWN)
else:
GPIO.setup(pin,GPIO.IN)
print(GPIO.input(pin))
GPIO.add_event_detect(pin, GPIO.BOTH, callback=handle_callback, bouncetime=int(bounce))
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup(pin)
sys.exit(0)
elif cmd == "byte":
#print("Initialised BYTE mode - "+str(pin)+)
list = [7,11,13,12,15,16,18,22]
GPIO.setup(list,GPIO.OUT)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
data = int(data)
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup()
sys.exit(0)
except:
data = 0
for bit in range(8):
if pin == 1:
mask = 1 << (7 - bit)
else:
mask = 1 << bit
GPIO.output(list[bit], data & mask)
elif cmd == "borg":
#print("Initialised BORG mode - "+str(pin)+)
GPIO.setup(11,GPIO.OUT)
GPIO.setup(13,GPIO.OUT)
GPIO.setup(15,GPIO.OUT)
r = GPIO.PWM(11, 100)
g = GPIO.PWM(13, 100)
b = GPIO.PWM(15, 100)
r.start(0)
g.start(0)
b.start(0)
while True:
try:
data = raw_input()
if 'close' in data:
sys.exit(0)
c = data.split(",")
r.ChangeDutyCycle(float(c[0]))
g.ChangeDutyCycle(float(c[1]))
b.ChangeDutyCycle(float(c[2]))
except (EOFError, SystemExit): # hopefully always caused by us sigint'ing the program
GPIO.cleanup()
sys.exit(0)
except:
data = 0
elif cmd == "mouse": # catch mice button events
file = open( "/dev/input/mice", "rb" )
oldbutt = 0
def getMouseEvent():
global oldbutt
global pin
buf = file.read(3)
pin = pin & 0x07
button = ord( buf[0] ) & pin # mask out just the required button(s)
if button != oldbutt: # only send if changed
oldbutt = button
print(button)
while True:
try:
getMouseEvent()
except:
file.close()
sys.exit(0)
elif cmd == "kbd": # catch keyboard button events
try:
while not os.path.isdir("/dev/input/by-path"):
sleep(10)
infile = subprocess.check_output("ls /dev/input/by-path/ | grep -m 1 'kbd'", shell=True).strip()
infile_path = "/dev/input/by-path/" + infile
EVENT_SIZE = struct.calcsize('llHHI')
file = open(infile_path, "rb")
event = file.read(EVENT_SIZE)
while event:
(tv_sec, tv_usec, type, code, value) = struct.unpack('llHHI', event)
#if type != 0 or code != 0 or value != 0:
if type == 1:
# type,code,value
print("%u,%u" % (code, value))
event = file.read(EVENT_SIZE)
print("0,0")
file.close()
sys.exit(0)
except:
file.close()
sys.exit(0)
elif len(sys.argv) > 1:
cmd = sys.argv[1].lower()
if cmd == "rev":
print(GPIO.RPI_REVISION)
elif cmd == "ver":
print(GPIO.VERSION)
elif cmd == "info":
print(GPIO.RPI_INFO)
else:
print("Bad parameters - in|out|pwm|buzz|byte|borg|mouse|kbd|ver|info {pin} {value|up|down}")
print(" only ver (gpio version) and info (board information) accept no pin parameter.")
else:
print("Bad parameters - in|out|pwm|buzz|byte|borg|mouse|kbd|ver|info {pin} {value|up|down}")
| packages/node_modules/@node-red/nodes/core/hardware/nrgpio.py | 7,702 | !/usr/bin/python Copyright JS Foundation and other contributors, http://js.foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Import library functions we need Python 2 Python 3print("Initialised pin "+str(pin)+" to PWM") hopefully always caused by us sigint'ing the programprint("Initialised pin "+str(pin)+" to Buzz") hopefully always caused by us sigint'ing the programprint("Initialised pin "+str(pin)+" to OUT") hopefully always caused by us sigint'ing the programprint("Initialised pin "+str(pin)+" to IN") hopefully always caused by us sigint'ing the programprint("Initialised BYTE mode - "+str(pin)+) hopefully always caused by us sigint'ing the programprint("Initialised BORG mode - "+str(pin)+) hopefully always caused by us sigint'ing the program catch mice button events mask out just the required button(s) only send if changed catch keyboard button eventsif type != 0 or code != 0 or value != 0: type,code,value | 1,402 | en | 0.812733 |
# Import Basic modules
import numpy as np
import os
# Import everything needed to edit video clips
from moviepy.editor import *
from moviepy.Clip import *
from moviepy.video.VideoClip import *
from moviepy.config import get_setting # ffmpeg, ffmpeg.exe, etc...
class AudioProcessing:
# documentation string, which can be accessed via ClassName.__doc__ (slide_detection.__doc__ )
""" This class include all required attributes and methods for slide detection.
It includes different algorithms for slide detection such as harris corner detection,
Histogram thresholding, Hough Transform, sum of differences of all frames and etc.
The input of the functions is the input image/frame/video and the output is the four
coordinates of the position of the detected slide.
Built-In Class Attributes:
Every Python class keeps following built-in attributes and they can be accessed using
dot operator like any other attribute:
__dict__ : Dictionary containing the class's namespace.
__doc__ : Class documentation string or None if undefined.
__name__: Class name.
__module__: Module name in which the class is defined. This attribute is "__main__" in interactive mode.
__bases__ : A possibly empty tuple containing the base classes, in the order of their occurrence
in the base class list."""
def __init__(self, inputFile):
self.inputFile = inputFile
#def template_matching(self):
def equalizer(self):
'''
This function serves for Haris Corner Detector
Inputs:
Outputs:
Example:
'''
def signal_improvement(self):
'''
This function serves for sum of the differences of all frames
Inputs:
Outputs:
Example:
'''
def audio_coding(self, bitrate, codecformat):
'''
This function serves for max of the differences of all frames
Inputs:
Outputs:
Example:
'''
def audio_clip(self):
'''
This function serves for max of all frames
Inputs:
Outputs:
Example:
'''
if __name__ == '__main__':
print "done"
| livius/audio/audioProcessing.py | 2,407 | Import Basic modules Import everything needed to edit video clips ffmpeg, ffmpeg.exe, etc... documentation string, which can be accessed via ClassName.__doc__ (slide_detection.__doc__ )def template_matching(self): | 213 | en | 0.382885 |
# Copyright 2016-2018 Dirk Thomas
# Licensed under the Apache License, Version 2.0
from collections import defaultdict
from collections import OrderedDict
import itertools
import os
from pathlib import Path
from colcon_core.package_selection import add_arguments \
as add_packages_arguments
from colcon_core.package_selection import get_package_descriptors
from colcon_core.package_selection import select_package_decorators
from colcon_core.plugin_system import satisfies_version
from colcon_core.topological_order import topological_order_packages
from colcon_core.verb import VerbExtensionPoint
class GraphVerb(VerbExtensionPoint):
"""Generate a visual representation of the dependency graph."""
def __init__(self): # noqa: D107
super().__init__()
satisfies_version(VerbExtensionPoint.EXTENSION_POINT_VERSION, '^1.0')
def add_arguments(self, *, parser): # noqa: D102
# only added so that package selection arguments can be used
# which use the build directory to store state information
parser.add_argument(
'--build-base',
default='build',
help='The base path for all build directories (default: build)')
add_packages_arguments(parser)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--dot',
action='store_true',
default=False,
help='Output topological graph in DOT '
'(e.g. pass the output to dot: ` | dot -Tpng -o graph.png`), '
'legend: blue=build, red=run, tan=test, dashed=indirect')
group.add_argument(
'--density',
action='store_true',
default=False,
help='Output density of the graph (only without --dot)')
parser.add_argument(
'--legend',
action='store_true',
default=False,
help='Output legend for the graph')
parser.add_argument(
'--dot-cluster',
action='store_true',
default=False,
help='Cluster packages by their filesystem path (only affects '
'--dot)')
parser.add_argument(
'--dot-include-skipped',
action='store_true',
default=False,
help='Also output skipped packages (only affects --dot)')
def main(self, *, context): # noqa: D102
args = context.args
descriptors = get_package_descriptors(args)
decorators = topological_order_packages(
descriptors, recursive_categories=('run', ))
select_package_decorators(args, decorators)
if not args.dot:
if args.legend:
print('+ marks when the package in this row can be processed')
print('* marks a direct dependency '
'from the package indicated by the + in the same column '
'to the package in this row')
print('. marks a transitive dependency')
print()
# draw dependency graph in ASCII
shown_decorators = list(filter(lambda d: d.selected, decorators))
max_length = max([
len(m.descriptor.name) for m in shown_decorators] + [0])
lines = [
m.descriptor.name.ljust(max_length + 2)
for m in shown_decorators]
depends = [
m.descriptor.get_dependencies() for m in shown_decorators]
rec_depends = [
m.descriptor.get_recursive_dependencies(
[d.descriptor for d in decorators],
recursive_categories=('run', ))
for m in shown_decorators]
empty_cells = 0
for i, decorator in enumerate(shown_decorators):
for j in range(len(lines)):
if j == i:
# package i is being processed
lines[j] += '+'
elif shown_decorators[j].descriptor.name in depends[i]:
# package i directly depends on package j
lines[j] += '*'
elif shown_decorators[j].descriptor.name in rec_depends[i]:
# package i recursively depends on package j
lines[j] += '.'
else:
# package i doesn't depend on package j
lines[j] += ' '
empty_cells += 1
if args.density:
empty_fraction = \
empty_cells / (len(lines) * (len(lines) - 1)) \
if len(lines) > 1 else 1.0
# normalize to 200% since half of the matrix should be empty
density_percentage = 200.0 * (1.0 - empty_fraction)
print('dependency density %.2f %%' % density_percentage)
print()
else: # --dot
lines = ['digraph graphname {']
decorators_by_name = defaultdict(set)
for deco in decorators:
decorators_by_name[deco.descriptor.name].add(deco)
selected_pkg_names = [
m.descriptor.name for m in decorators
if m.selected or args.dot_include_skipped]
has_duplicate_names = \
len(selected_pkg_names) != len(set(selected_pkg_names))
selected_pkg_names = set(selected_pkg_names)
# collect selected package decorators and their parent path
nodes = OrderedDict()
for deco in reversed(decorators):
if deco.selected or args.dot_include_skipped:
nodes[deco] = Path(deco.descriptor.path).parent
# collect direct dependencies
direct_edges = defaultdict(set)
for deco in reversed(decorators):
if (
not deco.selected and
not args.dot_include_skipped
):
continue
# iterate over dependency categories
for category, deps in deco.descriptor.dependencies.items():
# iterate over dependencies
for dep in deps:
if dep not in selected_pkg_names:
continue
# store the category of each dependency
# use the decorator
# since there might be packages with the same name
direct_edges[(deco, dep)].add(category)
# collect indirect dependencies
indirect_edges = defaultdict(set)
for deco in reversed(decorators):
if not deco.selected:
continue
# iterate over dependency categories
for category, deps in deco.descriptor.dependencies.items():
# iterate over dependencies
for dep in deps:
# ignore direct dependencies
if dep in selected_pkg_names:
continue
# ignore unknown dependencies
if dep not in decorators_by_name.keys():
continue
# iterate over recursive dependencies
for rdep in itertools.chain.from_iterable(
d.recursive_dependencies
for d in decorators_by_name[dep]
):
if rdep not in selected_pkg_names:
continue
# skip edges which are redundant to direct edges
if (deco, rdep) in direct_edges:
continue
indirect_edges[(deco, rdep)].add(category)
try:
# HACK Python 3.5 can't handle Path objects
common_path = os.path.commonpath(
[str(p) for p in nodes.values()])
except ValueError:
common_path = None
def get_node_data(decorator):
nonlocal args
nonlocal has_duplicate_names
if not has_duplicate_names:
# use name where possible so the dot code is easy to read
return decorator.descriptor.name, \
'' if (
decorator.selected or
not args.dot_include_skipped
) else '[color = "gray" fontcolor = "gray"]'
# otherwise append the descriptor id to make each node unique
descriptor_id = id(decorator.descriptor)
return (
'{decorator.descriptor.name}_{descriptor_id}'
.format_map(locals()),
' [label = "{decorator.descriptor.name}"]'
.format_map(locals()),
)
if not args.dot_cluster or common_path is None:
# output nodes
for deco in nodes.keys():
if (
not deco.selected and
not args.dot_include_skipped
):
continue
node_name, attributes = get_node_data(deco)
lines.append(
' "{node_name}"{attributes};'.format_map(locals()))
else:
# output clusters
clusters = defaultdict(set)
for deco, path in nodes.items():
clusters[path.relative_to(common_path)].add(deco)
for i, cluster in zip(range(len(clusters)), clusters.items()):
path, decos = cluster
if path.name:
# wrap cluster in subgraph
lines.append(
' subgraph cluster_{i} {{'.format_map(locals()))
lines.append(
' label = "{path}";'.format_map(locals()))
indent = ' '
else:
indent = ' '
for deco in decos:
node_name, attributes = get_node_data(deco)
lines.append(
'{indent}"{node_name}"{attributes};'
.format_map(locals()))
if path.name:
lines.append(' }')
# output edges
color_mapping = OrderedDict((
('build', '#0000ff'), # blue
('run', '#ff0000'), # red
('test', '#d2b48c'), # tan
))
for style, edges in zip(
('', ', style="dashed"'),
(direct_edges, indirect_edges),
):
for (deco_start, node_end), categories in edges.items():
start_name, _ = get_node_data(deco_start)
for deco in decorators_by_name[node_end]:
end_name, _ = get_node_data(deco)
edge_alpha = '' \
if deco_start.selected and deco.selected else '77'
colors = ':'.join([
color + edge_alpha
for category, color in color_mapping.items()
if category in categories])
lines.append(
' "{start_name}" -> "{end_name}" '
'[color="{colors}"{style}];'.format_map(locals()))
if args.legend:
lines.append(' subgraph cluster_legend {')
lines.append(' color=gray')
lines.append(' label="Legend";')
lines.append(' margin=0;')
# invisible nodes between the dependency edges
lines.append(' node [label="", shape=none];')
previous_node = '_legend_first'
# an edge for each dependency type
for dependency_type, color in color_mapping.items():
next_node = '_legend_' + dependency_type
lines.append(
' {previous_node} -> {next_node} '
'[label="{dependency_type} dep.", color="{color}"];'
.format_map(locals()))
previous_node = next_node
lines.append(
' {previous_node} -> _legend_last '
'[label="indirect dep.", style="dashed"];'
.format_map(locals()))
# layout all legend nodes on the same rank
lines.append(' {')
lines.append(' rank=same;')
lines.append(' _legend_first;')
for dependency_type in color_mapping.keys():
lines.append(
' _legend_{dependency_type};'
.format_map(locals()))
lines.append(' _legend_last;')
lines.append(' }')
lines.append(' }')
lines.append('}')
for line in lines:
print(line)
| colcon_package_information/verb/graph.py | 13,514 | Generate a visual representation of the dependency graph.
Copyright 2016-2018 Dirk Thomas Licensed under the Apache License, Version 2.0 noqa: D107 noqa: D102 only added so that package selection arguments can be used which use the build directory to store state information noqa: D102 draw dependency graph in ASCII package i is being processed package i directly depends on package j package i recursively depends on package j package i doesn't depend on package j normalize to 200% since half of the matrix should be empty --dot collect selected package decorators and their parent path collect direct dependencies iterate over dependency categories iterate over dependencies store the category of each dependency use the decorator since there might be packages with the same name collect indirect dependencies iterate over dependency categories iterate over dependencies ignore direct dependencies ignore unknown dependencies iterate over recursive dependencies skip edges which are redundant to direct edges HACK Python 3.5 can't handle Path objects use name where possible so the dot code is easy to read otherwise append the descriptor id to make each node unique output nodes output clusters wrap cluster in subgraph output edges blue red tan invisible nodes between the dependency edges an edge for each dependency type layout all legend nodes on the same rank | 1,371 | en | 0.74871 |
#!/usr/bin/env python
# encoding: utf-8
'''
@project : MSRGCN
@file : cmu_runner.py
@author : Droliven
@contact : droliven@163.com
@ide : PyCharm
@time : 2021-07-28 13:29
'''
from datas import CMUMotionDataset, get_dct_matrix, reverse_dct_torch, define_actions_cmu, draw_pic_gt_pred
from nets import MSRGCN, MSRGCNShortTerm
from configs.config import Config
from torch.utils.data import DataLoader
import torch.optim as optim
import torch
import os
from tensorboardX import SummaryWriter
import numpy as np
from tqdm import tqdm
from pprint import pprint
def L2NormLoss_test(gt, out, frame_ids): # (batch size,feature dim, seq len)
'''
gt: B, 66, 25
'''
t_3d = np.zeros(len(frame_ids))
batch_size, features, seq_len = gt.shape
gt = gt.permute(0, 2, 1).contiguous().view(batch_size, seq_len, -1, 3) # B, 25, 22, 3
out = out.permute(0, 2, 1).contiguous().view(batch_size, seq_len, -1, 3) # B, 25, 22, 3
for k in np.arange(0, len(frame_ids)):
j = frame_ids[k]
t_3d[k] = torch.mean(torch.norm(gt[:, j, :, :].contiguous().view(-1, 3) - out[:, j, :, :].contiguous().view(-1, 3), 2, 1)).cpu().data.numpy() * batch_size
return t_3d
def L2NormLoss_train(gt, out):
'''
# (batch size,feature dim, seq len)
等同于 mpjpe_error_p3d()
'''
batch_size, _, seq_len = gt.shape
gt = gt.view(batch_size, -1, 3, seq_len).permute(0, 3, 1, 2).contiguous()
out = out.view(batch_size, -1, 3, seq_len).permute(0, 3, 1, 2).contiguous()
loss = torch.mean(torch.norm(gt - out, 2, dim=-1))
return loss
def lr_decay(optimizer, lr_now, gamma):
lr = lr_now * gamma
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
class CMURunner():
def __init__(self, exp_name="cmu", input_n=10, output_n=10, dct_n=15, device="cuda:0", num_works=0, test_manner="all", debug_step=1):
super(CMURunner, self).__init__()
# 参数
self.start_epoch = 1
self.best_accuracy = 1e15
self.cfg = Config(exp_name=exp_name, input_n=input_n, output_n=output_n, dct_n=dct_n, device=device, num_works=num_works, test_manner=test_manner)
print("\n================== Configs =================")
pprint(vars(self.cfg), indent=4)
print("==========================================\n")
with open(os.path.join(self.cfg.ckpt_dir, "config.txt"), 'w', encoding='utf-8') as f:
f.write(str(self.cfg.__dict__))
# 模型
if self.cfg.output_n == 25:
self.model = MSRGCN(self.cfg.p_dropout, self.cfg.leaky_c, self.cfg.final_out_noden, input_feature=self.cfg.dct_n)
elif self.cfg.output_n == 10:
self.model = MSRGCNShortTerm(self.cfg.p_dropout, self.cfg.leaky_c, self.cfg.final_out_noden, input_feature=self.cfg.dct_n)
if self.cfg.device != "cpu":
self.model.cuda(self.cfg.device)
print(">>> total params: {:.2f}M\n".format(
sum(p.numel() for p in self.model.parameters()) / 1000000.0))
self.lr = self.cfg.lr
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
# 数据
dct_m, i_dct_m = get_dct_matrix(self.cfg.seq_len)
self.dct_m = torch.from_numpy(dct_m).float()
self.i_dct_m = torch.from_numpy(i_dct_m).float()
if self.cfg.device != "cpu":
self.dct_m = self.dct_m.cuda(self.cfg.device, non_blocking=True)
self.i_dct_m = self.i_dct_m.cuda(self.cfg.device, non_blocking=True)
train_dataset = CMUMotionDataset(self.cfg.base_data_dir, actions="all", mode_name="train", input_n=self.cfg.input_n, output_n=self.cfg.output_n,
dct_used=self.cfg.dct_n, split=0, sample_rate=2,
down_key=[('p22', 'p12', self.cfg.Index2212),
('p12', 'p7', self.cfg.Index127),
('p7', 'p4', self.cfg.Index74)], test_manner=self.cfg.test_manner, global_max=0, global_min=0, device=self.cfg.device, debug_step=debug_step)
print("train data shape {}".format(train_dataset.gt_all_scales['p32'].shape[0]))
self.train_loader = DataLoader(
dataset=train_dataset,
batch_size=self.cfg.train_batch_size,
shuffle=True,
num_workers=self.cfg.num_works,
pin_memory=True)
self.global_max = train_dataset.global_max
self.global_min = train_dataset.global_min
self.test_loader = dict()
for act in define_actions_cmu("all"):
test_dataset = CMUMotionDataset(self.cfg.base_data_dir, actions=act, mode_name="test", input_n=self.cfg.input_n, output_n=self.cfg.output_n,
dct_used=self.cfg.dct_n, split=1, sample_rate=2,
down_key=[('p22', 'p12', self.cfg.Index2212),
('p12', 'p7', self.cfg.Index127),
('p7', 'p4', self.cfg.Index74)], test_manner=self.cfg.test_manner, global_max=self.global_max, global_min=self.global_min, device=self.cfg.device, debug_step=debug_step)
self.test_loader[act] = DataLoader(
dataset=test_dataset,
batch_size=self.cfg.test_batch_size,
shuffle=False,
num_workers=self.cfg.num_works,
pin_memory=True)
print(">>> test {} data {}".format(act, test_dataset.gt_all_scales['p32'].shape[0]))
self.summary = SummaryWriter(self.cfg.ckpt_dir)
def save(self, checkpoint_path, best_err, curr_err):
state = {
"lr": self.lr,
"best_err": best_err,
"curr_err": curr_err,
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
}
torch.save(state, checkpoint_path)
def restore(self, checkpoint_path):
state = torch.load(checkpoint_path, map_location=self.cfg.device)
self.model.load_state_dict(state["model"])
self.optimizer.load_state_dict(state["optimizer"])
self.lr = state["lr"]
best_err = state['best_err']
curr_err = state["curr_err"]
print("load from lr {}, curr_avg {}, best_avg {}.".format(state["lr"], curr_err, best_err))
def train(self, epoch):
self.model.train()
average_loss = 0
for i, (inputs, gts) in tqdm(enumerate(self.train_loader), total=len(self.train_loader)):
b, cv, t_len = inputs[list(inputs.keys())[0]].shape
# skip the last batch if only have one sample for batch_norm layers
if b == 1:
continue
self.global_step = (epoch - 1) * len(self.train_loader) + i + 1
for k in inputs:
inputs[k] = inputs[k].float().cuda(non_blocking=True, device=self.cfg.device)
gts[k] = gts[k].float().cuda(non_blocking=True, device=self.cfg.device)
outputs = self.model(inputs)
losses = None
for k in outputs:
# 反 Norm
outputs[k] = (outputs[k] + 1) / 2
outputs[k] = outputs[k] * (self.global_max - self.global_min) + self.global_min
# 回转空间
outputs[k] = reverse_dct_torch(outputs[k], self.i_dct_m, self.cfg.seq_len)
# loss
loss_curr = L2NormLoss_train(gts[k], outputs[k])
if losses is None:
losses = loss_curr
else:
losses = losses + loss_curr
self.summary.add_scalar(f"Loss/{k}", loss_curr, self.global_step)
self.optimizer.zero_grad()
losses.backward()
self.optimizer.step()
average_loss += losses.cpu().data.numpy()
average_loss /= (i + 1)
return average_loss
def test(self, epoch=0):
self.model.eval()
frame_ids = self.cfg.frame_ids
total_loss = np.zeros((len(define_actions_cmu("all")), len(frame_ids)))
for act_idx, act in enumerate(define_actions_cmu("all")):
count = 0
for i, (inputs, gts) in enumerate(self.test_loader[act]):
b, cv, t_len = inputs[list(inputs.keys())[0]].shape
for k in inputs:
inputs[k] = inputs[k].float().cuda(non_blocking=True, device=self.cfg.device)
gts[k] = gts[k].float().cuda(non_blocking=True, device=self.cfg.device)
with torch.no_grad():
outputs = self.model(inputs)
# 反 Norm
for k in outputs:
outputs[k] = (outputs[k] + 1) / 2
outputs[k] = outputs[k] * (self.global_max - self.global_min) + self.global_min
# 回转空间
outputs[k] = reverse_dct_torch(outputs[k], self.i_dct_m, self.cfg.seq_len)
# 开始计算
mygt = gts['p32'].view(-1, self.cfg.origin_noden, 3, self.cfg.seq_len).clone()
myout = outputs['p22'].view(-1, self.cfg.final_out_noden, 3, self.cfg.seq_len)
mygt[:, self.cfg.dim_used_3d, :, :] = myout
mygt[:, self.cfg.dim_repeat_32, :, :] = myout[:, self.cfg.dim_repeat_22, :, :]
mygt = mygt.view(-1, self.cfg.origin_noden*3, self.cfg.seq_len)
loss = L2NormLoss_test(gts['p32'][:, :, self.cfg.input_n:], mygt[:, :, self.cfg.input_n:], self.cfg.frame_ids)
total_loss[act_idx] += loss
# count += 1
count += mygt.shape[0]
# ************ 画图
if act_idx == 0 and i == 0:
pred_seq = outputs['p22'].cpu().data.numpy()[0].reshape(self.cfg.final_out_noden, 3, self.cfg.seq_len)
gt_seq = gts['p22'].cpu().data.numpy()[0].reshape(self.cfg.final_out_noden, 3, self.cfg.seq_len)
for t in range(self.cfg.seq_len):
draw_pic_gt_pred(gt_seq[:, :, t], pred_seq[:, :, t], self.cfg.I22_plot, self.cfg.J22_plot, self.cfg.LR22_plot, os.path.join(self.cfg.ckpt_dir, "images", f"{epoch}_{act}_{t}.png"))
total_loss[act_idx] /= count
for fidx, frame in enumerate(frame_ids):
self.summary.add_scalar(f"Test/{act}/{frame}", total_loss[act_idx][fidx], epoch)
self.summary.add_scalar("Test/average", np.mean(total_loss), epoch)
for fidx, frame in enumerate(frame_ids):
self.summary.add_scalar(f"Test/avg{frame}", np.mean(total_loss[:, fidx]), epoch)
return total_loss
def run(self):
for epoch in range(self.start_epoch, self.cfg.n_epoch + 1):
if epoch % 2 == 0:
self.lr = lr_decay(self.optimizer, self.lr, self.cfg.lr_decay)
self.summary.add_scalar("LR", self.lr, epoch)
average_train_loss = self.train(epoch)
if average_train_loss < self.best_accuracy:
self.best_accuracy = average_train_loss
self.save(
os.path.join(self.cfg.ckpt_dir, "models",
'{}_in{}out{}dctn{}_best_epoch{}_err{:.4f}.pth'.format(self.cfg.exp_name,
self.cfg.input_n,
self.cfg.output_n,
self.cfg.dct_n, epoch,
average_train_loss)), self.best_accuracy, average_train_loss)
self.save(os.path.join(self.cfg.ckpt_dir, "models",
'{}_in{}out{}dctn{}_last.pth'.format(self.cfg.exp_name, self.cfg.input_n,
self.cfg.output_n, self.cfg.dct_n)),
self.best_accuracy, average_train_loss)
if epoch % 1 == 0:
loss_l2_test = self.test(epoch)
print('Epoch: {}, LR: {}, Current err test avg: {}'.format(epoch, self.lr, np.mean(loss_l2_test)))
if __name__ == '__main__':
pass | run/cmu_runner.py | 12,529 | gt: B, 66, 25
# (batch size,feature dim, seq len)
等同于 mpjpe_error_p3d()
@project : MSRGCN
@file : cmu_runner.py
@author : Droliven
@contact : droliven@163.com
@ide : PyCharm
@time : 2021-07-28 13:29
!/usr/bin/env python encoding: utf-8 (batch size,feature dim, seq len) B, 25, 22, 3 B, 25, 22, 3 参数 模型 数据 skip the last batch if only have one sample for batch_norm layers 反 Norm 回转空间 loss 反 Norm 回转空间 开始计算 count += 1 ************ 画图 | 443 | en | 0.269446 |
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef WAZ_CHAINPARAMSSEEDS_H\n')
g.write('#define WAZ_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the wuazi network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9999)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19999)
g.write('#endif // WAZ_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| contrib/seeds/generate-seeds.py | 4,364 | Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
!/usr/bin/python Copyright (c) 2014 Wladimir J. van der Laan Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. ipv4 in ipv6 prefix tor-specific ipv6 prefix IPv4 IPv6 prefix, suffix skip empty component at beginning or end :: skips to suffix two bytes per component IPv4-in-little-endian ipv6 ipv6, no port | 980 | en | 0.615454 |
class Type:
def __init__(self):
pass
def get_repr(self):
return self
def __repr__(self):
return self.get_repr().stringify()
def stringify(self):
return ""
def put_on_stack(self, stack):
stack.put(self.get_repr())
def take_from_stack(self, stack):
stack.take(self.get_repr())
def get_as_single_constant(self):
repr = self.get_repr()
if isinstance(repr, TypeConstant):
return repr
return None
class TypeConstant(Type):
def __init__(self, name):
self.name = name
def stringify(self):
return self.name
class TypeArrow(Type):
def __init__(self, left, right, name = None):
self.left = left
self.right = right
self.name = name
def stringify(self):
return "(" + str(self.left) + ")->" + str(self.right)
def put_on_stack(self, stack):
self.left.take_from_stack(stack)
self.right.put_on_stack(stack)
def take_from_stack(self, stack):
raise ArrowOnTheLeftOfArrowError("Arrow type on the left hand side of the arrow type", self)
class TypeTuple(Type):
def __init__(self, args):
self.args = args
def stringify(self):
return "(" + str.join(", ", map(str, self.args)) + ")"
def put_on_stack(self, stack):
for arg in self.args:
arg.put_on_stack(stack)
def take_from_stack(self, stack):
for arg in self.args:
arg.take_from_stack(stack)
class TypeVar(Type):
def __init__(self, name):
self.name = name
self.rank = 0
self.parent = self
def union(self, other):
self_repr = self.get_repr()
other_repr = other.get_repr()
if self_repr == other_repr:
return
if isinstance(other, TypeVar):
other_rank = other.rank
self_rank = self.rank
if self_rank < other_rank:
self.parent = other_repr
elif self_rank > other_rank:
other.parent = self_repr
else:
other.parent = self_repr
self.rank = self.rank + 1
else:
self.parent = other_repr
def get_repr(self):
if self.parent != self:
self.parent = self.parent.get_repr()
return self.parent
def stringify(self):
return "@" + self.name
class ArrowOnTheLeftOfArrowError(RuntimeError):
def __init__(self, message, type):
RuntimeError.__init__(self, message)
self.message = message
self.type = type
def __str__(self):
return self.message + " " + str(self.type)
class UnifiactionError(RuntimeError):
def __init__(self, message):
RuntimeError.__init__(self, message)
self.message = message
self.unify_stack = []
def add(self, type_a, type_b):
self.unify_stack.append((type_a, type_b))
def __str__(self):
return "Unification error: " + self.message + "\n" + str.join("\n", map(lambda p : "In unification of '%s' and '%s'" % p, self.unify_stack))
def types_equal(a, b):
a = a.get_repr()
b = b.get_repr()
if a == b:
return True
if isinstance(a, TypeTuple) and isinstance(b, TypeTuple):
if len(a.args) != len(b.args):
return False
return all(map(types_equal, zip(a.args, b.args)))
elif isinstance(a, TypeArrow) and isinstance(b, TypeArrow):
return types_equal(a.left, b.left) and types_equal(a.right, b.right)
return False
def types_unify(a, b):
try:
a = a.get_repr()
b = b.get_repr()
if isinstance(a, TypeVar):
a.union(b)
elif isinstance(b, TypeVar):
b.union(a)
elif isinstance(a, TypeConstant) and isinstance(b, TypeConstant):
if a != b:
raise UnifiactionError("Different basic types")
elif isinstance(a, TypeTuple) and isinstance(b, TypeTuple):
if len(a.args) != len(b.args):
raise UnifiactionError("Tuples size mismatch")
for (a,b) in zip(a.args, b.args):
types_unify(a, b)
elif isinstance(a, TypeArrow) and isinstance(b, TypeArrow):
types_unify(a.left, b.left)
types_unify(a.right, b.right)
else:
raise UnifiactionError("Different kinds")
except UnifiactionError as e:
e.add(a, b)
raise
def is_simple_arrow(a):
a = a.get_repr()
if isinstance(a, TypeArrow):
lhs = a.left
rhs = a.right
if lhs.get_repr() == rhs.get_repr():
return True
return False
def is_type_empty(type):
type = type.get_repr()
return isinstance(type, TypeTuple) and len(type.args) == 0
def split_arrow(type):
type = type.get_repr()
lhs = []
while isinstance(type, TypeArrow):
lhs.append(type.left)
type = type.right
return (lhs, type)
class TypeStack:
def __init__(self):
self.given = []
self.taken = []
def take(self, type):
if not isinstance(type, TypeConstant):
raise RuntimeError("Non-constant type placed into typestack: %s" % type)
if len(self.given) > 0:
last = self.given.pop()
types_unify(type, last)
else:
self.taken.append(type)
def put(self, type):
self.given.append(type)
def form_type(self):
if len(self.given) == 1:
rhs = self.given[0]
else:
rhs = TypeTuple(self.given)
t = rhs
for type in reversed(self.taken):
t = TypeArrow(type, t)
return t
#Takes a sequence of types, produces a signle type matching the sequence
def infer_type_from_sequence(seq):
stack = TypeStack()
for type in seq:
type.put_on_stack(stack)
return stack.form_type()
if __name__ == "__main__":
pass | utils/parsxv2/typesystem.py | 5,924 | Takes a sequence of types, produces a signle type matching the sequence | 71 | en | 0.74475 |
"""
logan.runner
~~~~~~~~~~~~
:copyright: (c) 2012 David Cramer.
:license: Apache License 2.0, see NOTICE for more details.
"""
import argparse
import os
import re
import sys
from django.core import management
from nautobot import __version__
from . import importer
from .settings import create_default_settings
__configured = False
def sanitize_name(project):
project = project.replace(" ", "-")
return re.sub("[^A-Z0-9a-z_-]", "-", project)
def parse_command_args(args):
"""
This parses the arguments and returns a tuple containing:
(args, command, command_args)
For example, "--config=bar start --with=baz" would return:
(['--config=bar'], 'start', ['--with=baz'])
"""
index = None
for arg_i, arg in enumerate(args):
if not arg.startswith("-"):
index = arg_i
break
# Unable to parse any arguments
if index is None:
return (args, None, [])
return (args[:index], args[index], args[(index + 1) :])
def is_configured():
global __configured
return __configured
def configure_app(
config_path=None,
project=None,
default_config_path=None,
default_settings=None,
settings_initializer=None,
settings_envvar=None,
initializer=None,
allow_extras=True,
config_module_name=None,
runner_name=None,
on_configure=None,
):
"""
:param project: should represent the canonical name for the project, generally
the same name it assigned in distutils.
:param default_config_path: the default location for the configuration file.
:param default_settings: default settings to load (think inheritence).
:param settings_initializer: a callback function which should return a string
representing the default settings template to generate.
:param initializer: a callback function which will be executed before the command
is executed. It is passed a dictionary of various configuration attributes.
"""
global __configured
project_filename = sanitize_name(project)
if default_config_path is None:
default_config_path = "~/%s/%s.conf.py" % (project_filename, project_filename)
if settings_envvar is None:
settings_envvar = project_filename.upper() + "_CONF"
if config_module_name is None:
config_module_name = project_filename + "_config"
# normalize path
if settings_envvar in os.environ:
default_config_path = os.environ.get(settings_envvar)
else:
default_config_path = os.path.normpath(os.path.abspath(os.path.expanduser(default_config_path)))
if not config_path:
config_path = default_config_path
config_path = os.path.expanduser(config_path)
if not os.path.exists(config_path):
if runner_name:
raise ValueError(
"Configuration file does not exist. Use '%s init' to initialize the file." % (runner_name,)
)
raise ValueError("Configuration file does not exist at %r" % (config_path,))
os.environ["DJANGO_SETTINGS_MODULE"] = config_module_name
def settings_callback(settings):
if initializer is None:
return
try:
initializer(
{
"project": project,
"config_path": config_path,
"settings": settings,
}
)
except Exception:
# XXX: Django doesn't like various errors in this path
import sys
import traceback
traceback.print_exc()
sys.exit(1)
importer.install(
config_module_name,
config_path,
default_settings,
allow_extras=allow_extras,
callback=settings_callback,
)
__configured = True
# HACK(dcramer): we need to force access of django.conf.settings to
# ensure we don't hit any import-driven recursive behavior
from django.conf import settings
hasattr(settings, "INSTALLED_APPS")
if on_configure:
on_configure(
{
"project": project,
"config_path": config_path,
"settings": settings,
}
)
class VerboseHelpFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
"""Argparse Formatter that includes newlines and shows argument defaults."""
def run_app(**kwargs):
sys_args = sys.argv
# The established command for running this program
runner_name = os.path.basename(sys_args[0])
default_config_path = kwargs.get("default_config_path")
# Primary parser
parser = management.CommandParser(
description=kwargs.pop("description"),
formatter_class=VerboseHelpFormatter,
add_help=False,
)
parser.add_argument(
"-c",
"--config",
metavar="CONFIG",
help="Path to the configuration file",
)
parser.add_argument(
"--version",
action="version",
version=__version__,
)
# This block of code here is done in this way because of the built in Django
# management command parsing not playing well unless you have a Django
# config with SECRET_KEY defined.
# Parse out `--config` here first capturing any unparsed args for passing to
# Django parser.
args, unparsed_args = parser.parse_known_args()
# Now add the sub-parser for "init" command
subparsers = parser.add_subparsers(help=False, dest="command", metavar="")
init_parser = subparsers.add_parser(
"init",
help="Initialize a new configuration",
)
init_parser.add_argument(
"config_path",
default=default_config_path,
nargs="?",
help="Path to output generated configuration file",
)
# Try to use our parser first, to process custom arguments
try:
args = parser.parse_args()
command = args.command
command_args = sys.argv[1:]
# Fallback to passing through to Django management commands
# except RuntimeError as err:
except management.CommandError as err:
if "invalid choice" not in str(err):
raise
# Rewrite sys_args to have the unparsed args (if any)
sys_args = sys_args[:1] + unparsed_args
_, command, command_args = parse_command_args(sys_args[1:])
# If we don't get a command of some sort, print help and exit dirty
if not command:
parser.print_help()
parser.exit(1)
# The `init` command is reserved for initializing configuration
if command == "init":
settings_initializer = kwargs.get("settings_initializer")
config_path = os.path.expanduser(args.config_path)
# Check if the config already exists; alert user and exit if exists.
if os.path.exists(config_path):
print(
f"A configuration already exists at {config_path}. Please backup and remove it or choose another path."
)
return
# Create the config
try:
create_default_settings(config_path, settings_initializer)
except OSError as e:
raise e.__class__("Unable to write default settings file to %r" % config_path)
print("Configuration file created at %r" % config_path)
return
# Fetch config path from `--config` if provided, otherwise we want it to
# default to None so that the underlying machinery in `configure_app` will
# process default path or environment variable.
config_path = args.config
# Overlay our config w/ defautls
try:
configure_app(config_path=config_path, **kwargs)
except ValueError as err:
parser.exit(status=2, message=str(err) + "\n")
# Call Django management command
management.execute_from_command_line([runner_name, command] + command_args)
# Exit cleanly
sys.exit(0)
if __name__ == "__main__":
run_app()
| nautobot/core/runner/runner.py | 7,969 | Argparse Formatter that includes newlines and shows argument defaults.
:param project: should represent the canonical name for the project, generally
the same name it assigned in distutils.
:param default_config_path: the default location for the configuration file.
:param default_settings: default settings to load (think inheritence).
:param settings_initializer: a callback function which should return a string
representing the default settings template to generate.
:param initializer: a callback function which will be executed before the command
is executed. It is passed a dictionary of various configuration attributes.
This parses the arguments and returns a tuple containing:
(args, command, command_args)
For example, "--config=bar start --with=baz" would return:
(['--config=bar'], 'start', ['--with=baz'])
logan.runner
~~~~~~~~~~~~
:copyright: (c) 2012 David Cramer.
:license: Apache License 2.0, see NOTICE for more details.
Unable to parse any arguments normalize path XXX: Django doesn't like various errors in this path HACK(dcramer): we need to force access of django.conf.settings to ensure we don't hit any import-driven recursive behavior The established command for running this program Primary parser This block of code here is done in this way because of the built in Django management command parsing not playing well unless you have a Django config with SECRET_KEY defined. Parse out `--config` here first capturing any unparsed args for passing to Django parser. Now add the sub-parser for "init" command Try to use our parser first, to process custom arguments Fallback to passing through to Django management commands except RuntimeError as err: Rewrite sys_args to have the unparsed args (if any) If we don't get a command of some sort, print help and exit dirty The `init` command is reserved for initializing configuration Check if the config already exists; alert user and exit if exists. Create the config Fetch config path from `--config` if provided, otherwise we want it to default to None so that the underlying machinery in `configure_app` will process default path or environment variable. Overlay our config w/ defautls Call Django management command Exit cleanly | 2,224 | en | 0.737274 |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
from copy import deepcopy
from nltk import word_tokenize
from tqdm import tqdm
import nemo.collections.nlp.data.text_normalization.constants as constants
__all__ = ['read_data_file', 'normalize_str']
def read_data_file(fp):
""" Reading the raw data from a file of NeMo format
For more info about the data format, refer to the
`text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.
"""
insts, w_words, s_words, classes = [], [], [], []
# Read input file
with open(fp, 'r', encoding='utf-8') as f:
for line in tqdm(f):
es = [e.strip() for e in line.strip().split('\t')]
if es[0] == '<eos>':
inst = (deepcopy(classes), deepcopy(w_words), deepcopy(s_words))
insts.append(inst)
# Reset
w_words, s_words, classes = [], [], []
else:
classes.append(es[0])
w_words.append(es[1])
s_words.append(es[2])
return insts
def normalize_str(input_str, lang):
""" Normalize an input string """
input_str_tokens = basic_tokenize(input_str.strip().lower(), lang)
input_str = ' '.join(input_str_tokens)
input_str = input_str.replace(' ', ' ')
return input_str
def remove_puncts(input_str):
""" Remove punctuations from an input string """
return input_str.translate(str.maketrans('', '', string.punctuation))
def basic_tokenize(input_str, lang):
"""
The function is used to do some basic tokenization
Args:
input_str: The input string
lang: Language of the input string
Return: a list of tokens of the input string
"""
if lang == constants.ENGLISH:
return word_tokenize(input_str)
return input_str.strip().split(' ')
| nemo/collections/nlp/data/text_normalization/utils.py | 2,453 | The function is used to do some basic tokenization
Args:
input_str: The input string
lang: Language of the input string
Return: a list of tokens of the input string
Normalize an input string
Reading the raw data from a file of NeMo format
For more info about the data format, refer to the
`text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.
Remove punctuations from an input string
Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Read input file Reset | 1,056 | en | 0.755212 |
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS rdata.
@var _rdata_modules: A dictionary mapping a (rdclass, rdtype) tuple to
the module which implements that type.
@type _rdata_modules: dict
@var _module_prefix: The prefix to use when forming modules names. The
default is 'dns.rdtypes'. Changing this value will break the library.
@type _module_prefix: string
@var _hex_chunk: At most this many octets that will be represented in each
chunk of hexstring that _hexify() produces before whitespace occurs.
@type _hex_chunk: int"""
from io import BytesIO
import base64
import binascii
import dns.exception
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.tokenizer
import dns.wiredata
from ._compat import xrange, string_types, text_type
_hex_chunksize = 32
def _hexify(data, chunksize=_hex_chunksize):
"""Convert a binary string into its hex encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
@rtype: string
"""
line = binascii.hexlify(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
_base64_chunksize = 32
def _base64ify(data, chunksize=_base64_chunksize):
"""Convert a binary string into its base64 encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is
L{dns.rdata._base64_chunksize}
@rtype: string
"""
line = base64.b64encode(data)
return b' '.join([line[i:i + chunksize]
for i
in range(0, len(line), chunksize)]).decode()
__escaped = bytearray(b'"\\')
def _escapify(qstring):
"""Escape the characters in a quoted string which need it.
@param qstring: the string
@type qstring: string
@returns: the escaped string
@rtype: string
"""
if isinstance(qstring, text_type):
qstring = qstring.encode()
if not isinstance(qstring, bytearray):
qstring = bytearray(qstring)
text = ''
for c in qstring:
if c in __escaped:
text += '\\' + chr(c)
elif c >= 0x20 and c < 0x7F:
text += chr(c)
else:
text += '\\%03d' % c
return text
def _truncate_bitmap(what):
"""Determine the index of greatest byte that isn't all zeros, and
return the bitmap that contains all the bytes less than that index.
@param what: a string of octets representing a bitmap.
@type what: string
@rtype: string
"""
for i in xrange(len(what) - 1, -1, -1):
if what[i] != 0:
return what[0: i + 1]
return what[0:1]
class Rdata(object):
"""Base class for all DNS rdata types.
"""
__slots__ = ['rdclass', 'rdtype']
def __init__(self, rdclass, rdtype):
"""Initialize an rdata.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
"""
self.rdclass = rdclass
self.rdtype = rdtype
def covers(self):
"""DNS SIG/RRSIG rdatas apply to a specific type; this type is
returned by the covers() function. If the rdata type is not
SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
creating rdatasets, allowing the rdataset to contain only RRSIGs
of a particular type, e.g. RRSIG(NS).
@rtype: int
"""
return dns.rdatatype.NONE
def extended_rdatatype(self):
"""Return a 32-bit type value, the least significant 16 bits of
which are the ordinary DNS type, and the upper 16 bits of which are
the "covered" type, if any.
@rtype: int
"""
return self.covers() << 16 | self.rdtype
def to_text(self, origin=None, relativize=True, **kw):
"""Convert an rdata to text format.
@rtype: string
"""
raise NotImplementedError
def to_wire(self, file, compress=None, origin=None):
"""Convert an rdata to wire format.
@rtype: string
"""
raise NotImplementedError
def to_digestable(self, origin=None):
"""Convert rdata to a format suitable for digesting in hashes. This
is also the DNSSEC canonical form."""
f = BytesIO()
self.to_wire(f, None, origin)
return f.getvalue()
def validate(self):
"""Check that the current contents of the rdata's fields are
valid. If you change an rdata by assigning to its fields,
it is a good idea to call validate() when you are done making
changes.
"""
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
def __repr__(self):
covers = self.covers()
if covers == dns.rdatatype.NONE:
ctext = ''
else:
ctext = '(' + dns.rdatatype.to_text(covers) + ')'
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
str(self) + '>'
def __str__(self):
return self.to_text()
def _cmp(self, other):
"""Compare an rdata with another rdata of the same rdtype and
rdclass. Return < 0 if self < other in the DNSSEC ordering,
0 if self == other, and > 0 if self > other.
"""
our = self.to_digestable(dns.name.root)
their = other.to_digestable(dns.name.root)
if our == their:
return 0
if our > their:
return 1
return -1
def __eq__(self, other):
if not isinstance(other, Rdata):
return False
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Rdata):
return True
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return True
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Rdata) or \
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
return NotImplemented
return self._cmp(other) > 0
def __hash__(self):
return hash(self.to_digestable(dns.name.root))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance
"""
raise NotImplementedError
def choose_relativity(self, origin=None, relativize=True):
"""Convert any domain names in the rdata to the specified
relativization.
"""
pass
class GenericRdata(Rdata):
"""Generate Rdata Class
This class is used for rdata types for which we have no better
implementation. It implements the DNS "unknown RRs" scheme.
"""
__slots__ = ['data']
def __init__(self, rdclass, rdtype, data):
super(GenericRdata, self).__init__(rdclass, rdtype)
self.data = data
def to_text(self, origin=None, relativize=True, **kw):
return r'\# %d ' % len(self.data) + _hexify(self.data)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
token = tok.get()
if not token.is_identifier() or token.value != r'\#':
raise dns.exception.SyntaxError(
r'generic rdata does not start with \#')
length = tok.get_int()
chunks = []
while 1:
token = tok.get()
if token.is_eol_or_eof():
break
chunks.append(token.value.encode())
hex = b''.join(chunks)
data = binascii.unhexlify(hex)
if len(data) != length:
raise dns.exception.SyntaxError(
'generic rdata hex data has wrong length')
return cls(rdclass, rdtype, data)
def to_wire(self, file, compress=None, origin=None):
file.write(self.data)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
return cls(rdclass, rdtype, wire[current: current + rdlen])
_rdata_modules = {}
_module_prefix = 'dns.rdtypes'
def get_rdata_class(rdclass, rdtype):
def import_module(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
mod = _rdata_modules.get((rdclass, rdtype))
rdclass_text = dns.rdataclass.to_text(rdclass)
rdtype_text = dns.rdatatype.to_text(rdtype)
rdtype_text = rdtype_text.replace('-', '_')
if not mod:
mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
if not mod:
try:
mod = import_module('.'.join([_module_prefix,
rdclass_text, rdtype_text]))
_rdata_modules[(rdclass, rdtype)] = mod
except ImportError:
try:
mod = import_module('.'.join([_module_prefix,
'ANY', rdtype_text]))
_rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
except ImportError:
mod = None
if mod:
cls = getattr(mod, rdtype_text)
else:
cls = GenericRdata
return cls
def from_text(rdclass, rdtype, tok, origin=None, relativize=True):
"""Build an rdata object from text format.
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_text() class method is called
with the parameters to this function.
If I{tok} is a string, then a tokenizer is created and the string
is used as its input.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer or input text
@type tok: dns.tokenizer.Tokenizer or string
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: Should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance"""
if isinstance(tok, string_types):
tok = dns.tokenizer.Tokenizer(tok)
cls = get_rdata_class(rdclass, rdtype)
if cls != GenericRdata:
# peek at first token
token = tok.get()
tok.unget(token)
if token.is_identifier() and \
token.value == r'\#':
#
# Known type using the generic syntax. Extract the
# wire form from the generic syntax, and then run
# from_wire on it.
#
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
relativize)
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
origin)
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None):
"""Build an rdata object from wire format
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_wire() class method is called
with the parameters to this function.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance"""
wire = dns.wiredata.maybe_wrap(wire)
cls = get_rdata_class(rdclass, rdtype)
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
| gcloud/google-cloud-sdk/.install/.backup/lib/third_party/dns/rdata.py | 14,998 | Generate Rdata Class
This class is used for rdata types for which we have no better
implementation. It implements the DNS "unknown RRs" scheme.
Base class for all DNS rdata types.
Initialize an rdata.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
Convert a binary string into its base64 encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is
L{dns.rdata._base64_chunksize}
@rtype: string
Compare an rdata with another rdata of the same rdtype and
rdclass. Return < 0 if self < other in the DNSSEC ordering,
0 if self == other, and > 0 if self > other.
Escape the characters in a quoted string which need it.
@param qstring: the string
@type qstring: string
@returns: the escaped string
@rtype: string
Convert a binary string into its hex encoding, broken up into chunks
of I{chunksize} characters separated by a space.
@param data: the binary string
@type data: string
@param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
@rtype: string
Determine the index of greatest byte that isn't all zeros, and
return the bitmap that contains all the bytes less than that index.
@param what: a string of octets representing a bitmap.
@type what: string
@rtype: string
Convert any domain names in the rdata to the specified
relativization.
DNS SIG/RRSIG rdatas apply to a specific type; this type is
returned by the covers() function. If the rdata type is not
SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
creating rdatasets, allowing the rdataset to contain only RRSIGs
of a particular type, e.g. RRSIG(NS).
@rtype: int
Return a 32-bit type value, the least significant 16 bits of
which are the ordinary DNS type, and the upper 16 bits of which are
the "covered" type, if any.
@rtype: int
Build an rdata object from text format.
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_text() class method is called
with the parameters to this function.
If I{tok} is a string, then a tokenizer is created and the string
is used as its input.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer or input text
@type tok: dns.tokenizer.Tokenizer or string
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: Should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance
Build an rdata object from text format.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@param relativize: should names be relativized?
@type relativize: bool
@rtype: dns.rdata.Rdata instance
Build an rdata object from wire format
This function attempts to dynamically load a class which
implements the specified rdata class and type. If there is no
class-and-type-specific implementation, the GenericRdata class
is used.
Once a class is chosen, its from_wire() class method is called
with the parameters to this function.
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance
Build an rdata object from wire format
@param rdclass: The rdata class
@type rdclass: int
@param rdtype: The rdata type
@type rdtype: int
@param wire: The wire-format message
@type wire: string
@param current: The offset in wire of the beginning of the rdata.
@type current: int
@param rdlen: The length of the wire-format rdata
@type rdlen: int
@param origin: The origin to use for relative names
@type origin: dns.name.Name
@rtype: dns.rdata.Rdata instance
Convert rdata to a format suitable for digesting in hashes. This
is also the DNSSEC canonical form.
Convert an rdata to text format.
@rtype: string
Convert an rdata to wire format.
@rtype: string
Check that the current contents of the rdata's fields are
valid. If you change an rdata by assigning to its fields,
it is a good idea to call validate() when you are done making
changes.
DNS rdata.
@var _rdata_modules: A dictionary mapping a (rdclass, rdtype) tuple to
the module which implements that type.
@type _rdata_modules: dict
@var _module_prefix: The prefix to use when forming modules names. The
default is 'dns.rdtypes'. Changing this value will break the library.
@type _module_prefix: string
@var _hex_chunk: At most this many octets that will be represented in each
chunk of hexstring that _hexify() produces before whitespace occurs.
@type _hex_chunk: int
Copyright (C) 2001-2007, 2009-2011 Nominum, Inc. Permission to use, copy, modify, and distribute this software and its documentation for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. peek at first token Known type using the generic syntax. Extract the wire form from the generic syntax, and then run from_wire on it. | 6,067 | en | 0.625867 |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 23 15:54:01 2018
@author: shinyonsei2
"""
import numpy as np
import imageio
def read_pfm(fpath, expected_identifier="Pf"):
# PFM format definition: http://netpbm.sourceforge.net/doc/pfm.html
def _get_next_line(f):
next_line = f.readline().decode('utf-8').rstrip()
# ignore comments
while next_line.startswith('#'):
next_line = f.readline().rstrip()
return next_line
with open(fpath, 'rb') as f:
# header
identifier = _get_next_line(f)
if identifier != expected_identifier:
raise Exception('Unknown identifier. Expected: "%s", got: "%s".' % (expected_identifier, identifier))
try:
line_dimensions = _get_next_line(f)
dimensions = line_dimensions.split(' ')
width = int(dimensions[0].strip())
height = int(dimensions[1].strip())
except:
raise Exception('Could not parse dimensions: "%s". '
'Expected "width height", e.g. "512 512".' % line_dimensions)
try:
line_scale = _get_next_line(f)
scale = float(line_scale)
assert scale != 0
if scale < 0:
endianness = "<"
else:
endianness = ">"
except:
raise Exception('Could not parse max value / endianess information: "%s". '
'Should be a non-zero number.' % line_scale)
try:
data = np.fromfile(f, "%sf" % endianness)
data = np.reshape(data, (height, width))
data = np.flipud(data)
with np.errstate(invalid="ignore"):
data *= abs(scale)
except:
raise Exception('Invalid binary values. Could not create %dx%d array from input.' % (height, width))
return data
def load_LFdata(dir_LFimages,hci_root):
traindata_all=np.zeros((len(dir_LFimages), 512, 512, 9, 9, 3),np.uint8)
traindata_label=np.zeros((len(dir_LFimages), 512, 512),np.float32)
image_id=0
for dir_LFimage in dir_LFimages:
print(dir_LFimage)
for i in range(81):
try:
tmp = np.float32(imageio.imread(hci_root + dir_LFimage+'/input_Cam0%.2d.png' % i)) # load LF images(9x9)
except:
print(hci_root + dir_LFimage+'/input_Cam0%.2d.png..does not exist' % i )
traindata_all[image_id,:,:,i//9,i-9*(i//9),:]=tmp
del tmp
try:
tmp = np.float32(read_pfm(hci_root +dir_LFimage+'/gt_disp_lowres.pfm')) # load LF disparity map
except:
print(hci_root + dir_LFimage+'/gt_disp_lowres.pfm..does not exist' % i )
traindata_label[image_id,:,:]=tmp
del tmp
image_id=image_id+1
return traindata_all, traindata_label
def load_depth_gts(gt_dir,dir_LFimages):
w_views = 9
n_views = w_views**2
traindata_label=np.zeros((len(dir_LFimages), 512, 512, n_views),np.float32)
image_id=0
for dir_LFimage in dir_LFimages:
sample_name = dir_LFimage.split('/')[-1]
print("loading additional gt.. " + sample_name)
for i in range(n_views):
# try: 0%.2d.png
tmp = np.float32(read_pfm(gt_dir +sample_name+'/gt_disp_lowres_Cam0%.2d.pfm' %i)) # load LF disparity map
# except:
# print(hci_root + dir_LFimage+'\gt_disp_lowres.pfm..does not exist' % i )
traindata_label[image_id,:,:,i]=tmp
del tmp
image_id=image_id+1
return traindata_label
| epinet_fun/util.py | 3,690 | Created on Fri Dec 23 15:54:01 2018
@author: shinyonsei2
-*- coding: utf-8 -*- PFM format definition: http://netpbm.sourceforge.net/doc/pfm.html ignore comments header load LF images(9x9) load LF disparity map try: 0%.2d.png load LF disparity map except: print(hci_root + dir_LFimage+'\gt_disp_lowres.pfm..does not exist' % i ) | 368 | en | 0.3444 |
# Copyright 2020 University of New South Wales, University of Sydney
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import pathlib
import pydicom
import numpy as np
import SimpleITK as sitk
from skimage.draw import polygon
from loguru import logger
from datetime import datetime
def flatten(itr):
if type(itr) in (str, bytes, sitk.Image):
yield itr
else:
for x in itr:
try:
yield from flatten(x)
except TypeError:
yield x
def get_suv_bw_scale_factor(ds):
# Modified from
# https://qibawiki.rsna.org/images/6/62/SUV_vendorneutral_pseudocode_happypathonly_20180626_DAC.pdf
if ds.Units == "CNTS":
# Try to find the Philips private scale factor")
return float(ds[0x7053, 0x1000].value)
assert ds.Modality == "PT"
assert "DECY" in ds.CorrectedImage
assert "ATTN" in ds.CorrectedImage
assert "START" in ds.DecayCorrection
assert ds.Units == "BQML"
half_life = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideHalfLife)
if "SeriesTime" in ds:
series_date_time = ds.SeriesDate + "_" + ds.SeriesTime
if "." in series_date_time:
series_date_time = series_date_time[
: -(len(series_date_time) - series_date_time.index("."))
]
series_date_time = datetime.strptime(series_date_time, "%Y%m%d_%H%M%S")
if "SeriesTime" in ds:
start_time = (
ds.SeriesDate
+ "_"
+ ds.RadiopharmaceuticalInformationSequence[0].RadiopharmaceuticalStartTime
)
if "." in start_time:
start_time = start_time[: -(len(start_time) - start_time.index("."))]
start_time = datetime.strptime(start_time, "%Y%m%d_%H%M%S")
decay_time = (series_date_time - start_time).seconds
injected_dose = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideTotalDose)
decayed_dose = injected_dose * pow(2, -decay_time / half_life)
patient_weight = float(ds.PatientWeight)
suv_bw_scale_factor = patient_weight * 1000 / decayed_dose
return suv_bw_scale_factor
def get_dicom_info_from_description(dicom_object, return_extra=False, sop_class_name="UNKNOWN"):
"""
Attempts to return some information from a DICOM
This is typically used for naming converted NIFTI files
Args:
dicom_object (pydicom.dataset.FileDataset): The DICOM object
return_extra (bool, optional): return information that is usually not required
Returns:
info (str): Some extracted information
"""
try:
dicom_sop_class_name = dicom_object.SOPClassUID.name
except AttributeError:
logger.warning(f"Could not find DICOM SOP Class UID, using {sop_class_name}.")
dicom_sop_class_name = sop_class_name
if "Image" in dicom_sop_class_name:
# Get the modality
image_modality = dicom_object.Modality
logger.info(f" Image modality: {image_modality}")
if image_modality == "CT":
# There is typically not much extra information
# At the moment, we do not return anything for CT imaging
if return_extra:
try:
protocol_name = dicom_object.ProtocolName
if protocol_name != "":
return re.sub(r"[^\w]", "_", protocol_name).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
return ""
elif image_modality == "MR":
# Not much consistency, but we can get the protocol name
try:
protocol_name = re.sub(r"[^\w]", "_", dicom_object.ProtocolName).upper()
except AttributeError:
logger.warning(" Could not find ProtocolName")
protocol_name = ""
try:
sequence_name = re.sub(r"[^\w]", "_", dicom_object.SequenceName).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
sequence_name = ""
try:
series_description = re.sub(r"[^\w]", "_", dicom_object.SeriesDescription).upper()
except AttributeError:
logger.warning(" Could not find SequenceName")
series_description = ""
combined_name = "_".join([protocol_name, sequence_name, series_description])
while "__" in combined_name:
combined_name = combined_name.replace("__", "_")
if protocol_name != "" and not return_extra:
return protocol_name
else:
return combined_name
elif image_modality == "PT":
# Not much experience with this
# We can search through the corrections applied
# Return whether or not attentuation is applied
try:
corrections = dicom_object.CorrectedImage
except AttributeError:
corrections = "NONE"
if "ATTN" in corrections:
return "AC"
else:
return "NAC"
def safe_sort_dicom_image_list(dicom_image_list):
"""
Sorts a list of DICOM image files based on a DICOM tag value.
This is a much safer method than reading SliceLocation.
It takes mandatory DICOM fields (Image Position [Patient]) and (Image Orientation [Patient]).
The list of DICOM files is sorted by projecting the image position onto the axis normal to the
place defined by the image orientation.
This accounts for differences in patient position (e.g. HFS/FFS).
Args:
dicom_image_list (list): [description]
"""
sorted_dict = {}
for dicom_file in dicom_image_list:
dcm = pydicom.read_file(dicom_file, force=True)
image_position = np.array(dcm.ImagePositionPatient, dtype=float)
image_orientation = np.array(dcm.ImageOrientationPatient, dtype=float)
image_plane_normal = np.cross(image_orientation[:3], image_orientation[3:])
slice_location = (image_position * image_plane_normal)[2]
sorted_dict[dicom_file] = slice_location
sorter_safe = lambda dcm_file: sorted_dict[dcm_file]
return sorted(dicom_image_list, key=sorter_safe)
def fix_missing_data(contour_data_list):
"""
Fixes missing points in contouring using simple linear interpolation
Args:
contour_data_list (list): The contour data for each slice
Returns:
contour_data (numpy array): Interpolated contour data
"""
contour_data = np.array(contour_data_list)
if contour_data.any() == "":
logger.warning(" Missing values detected.")
missing_values = np.where(contour_data == "")[0]
if missing_values.shape[0] > 1:
logger.warning(" More than one value missing, fixing this isn't implemented yet...")
else:
logger.warning(" Only one value missing.")
missing_index = missing_values[0]
missing_axis = missing_index % 3
if missing_axis == 0:
logger.warning(" Missing value in x axis: interpolating.")
if missing_index > len(contour_data) - 3:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[0]
elif missing_index == 0:
lower_val = contour_data[-3]
upper_val = contour_data[3]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
elif missing_axis == 1:
logger.warning(" Missing value in y axis: interpolating.")
if missing_index > len(contour_data) - 2:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[1]
elif missing_index == 0:
lower_val = contour_data[-2]
upper_val = contour_data[4]
else:
lower_val = contour_data[missing_index - 3]
upper_val = contour_data[missing_index + 3]
contour_data[missing_index] = 0.5 * (lower_val + upper_val)
else:
logger.warning(" Missing value in z axis: taking slice value")
temp = contour_data[2::3].tolist()
temp.remove("")
contour_data[missing_index] = np.min(np.array(temp, dtype=np.double))
return contour_data
def transform_point_set_from_dicom_struct(image, dicom_struct, spacing_override=False):
"""
This function is used to generate a binary mask from a set of vertices.
This allows us to convert from DICOM-RTStruct format to any imaging format.
Args:
image ([SimpleITK.Image]): The image, used to copy imaging information
(e.g. resolution, spacing)
dicom_struct ([pydicom.Dataset]): The DICOM-RTStruct file
spacing_override (bool | tuple, optional): Overwrite the spacing.
Set with (axial_spacing, coronal_spacing, sagittal spacing). Defaults to False.
Returns:
list, list : final_struct_name_sequence, structure_list
"""
if spacing_override:
current_spacing = list(image.GetSpacing())
new_spacing = tuple(
[
current_spacing[k] if spacing_override[k] == 0 else spacing_override[k]
for k in range(3)
]
)
image.SetSpacing(new_spacing)
struct_point_sequence = dicom_struct.ROIContourSequence
struct_name_sequence = [
"_".join(i.ROIName.split()) for i in dicom_struct.StructureSetROISequence
]
structure_list = []
final_struct_name_sequence = []
for structIndex, structure_name in enumerate(struct_name_sequence):
image_blank = np.zeros(image.GetSize()[::-1], dtype=np.uint8)
logger.info(
" Converting structure {0} with name: {1}".format(structIndex, structure_name)
)
if structIndex >= len(struct_point_sequence):
logger.warning(" Contour sequence is missing, skipping.")
continue
if not hasattr(struct_point_sequence[structIndex], "ContourSequence"):
logger.warning(" No contour sequence found for this structure, skipping.")
continue
if len(struct_point_sequence[structIndex].ContourSequence) == 0:
logger.warning(" Contour sequence is empty, skipping.")
continue
if (
not struct_point_sequence[structIndex].ContourSequence[0].ContourGeometricType
== "CLOSED_PLANAR"
):
logger.warning(" This is not a closed planar structure, skipping.")
continue
for sl in range(len(struct_point_sequence[structIndex].ContourSequence)):
contour_data = fix_missing_data(
struct_point_sequence[structIndex].ContourSequence[sl].ContourData
)
struct_slice_contour_data = np.array(contour_data, dtype=np.double)
vertexArr_physical = struct_slice_contour_data.reshape(
struct_slice_contour_data.shape[0] // 3, 3
)
point_arr = np.array(
[image.TransformPhysicalPointToIndex(i) for i in vertexArr_physical]
).T
[xVertexArr_image, yVertexArr_image] = point_arr[[0, 1]]
zIndex = point_arr[2][0]
if np.any(point_arr[2] != zIndex):
logger.error(" Axial slice index varies in contour. Quitting now.")
logger.error(" Structure: {0}".format(structure_name))
logger.error(" Slice index: {0}".format(zIndex))
quit()
if zIndex >= image.GetSize()[2]:
logger.warning(" Slice index greater than image size. Skipping slice.")
logger.warning(" Structure: {0}".format(structure_name))
logger.warning(" Slice index: {0}".format(zIndex))
continue
sliceArr = np.zeros(image.GetSize()[:2], dtype=np.uint8)
filledIndicesX, filledIndicesY = polygon(
xVertexArr_image, yVertexArr_image, shape=sliceArr.shape
)
sliceArr[filledIndicesX, filledIndicesY] = 1
image_blank[zIndex] += sliceArr.T
struct_image = sitk.GetImageFromArray(1 * (image_blank > 0))
struct_image.CopyInformation(image)
structure_list.append(sitk.Cast(struct_image, sitk.sitkUInt8))
structure_name_clean = re.sub(r"[^\w]", "_", structure_name).upper()
while "__" in structure_name_clean:
structure_name_clean = structure_name_clean.replace("__", "_")
final_struct_name_sequence.append(structure_name_clean)
return final_struct_name_sequence, structure_list
def process_dicom_file_list(dicom_file_list, parent_sorting_field="PatientName", verbose=False):
"""
Organise the DICOM files by the series UID
"""
dicom_series_dict_parent = {}
for i, dicom_file in enumerate(sorted(dicom_file_list)):
if verbose is True:
logger.debug(f" Sorting file {i}")
dicom_file = dicom_file.as_posix()
if "dicomdir" in dicom_file.lower():
logger.warning(
"DICOMDIR is not supported in this tool, images are read directly. Skipping."
)
continue
dicom_object = pydicom.read_file(dicom_file, force=True)
parent_sorting_field_data = dicom_object[parent_sorting_field].value
if parent_sorting_field_data not in dicom_series_dict_parent.keys():
dicom_series_dict_parent[parent_sorting_field_data] = {}
series_uid = dicom_object.SeriesInstanceUID
if series_uid not in dicom_series_dict_parent[parent_sorting_field_data].keys():
dicom_series_dict_parent[parent_sorting_field_data][series_uid] = [dicom_file]
else:
dicom_series_dict_parent[parent_sorting_field_data][series_uid].append(dicom_file)
return dicom_series_dict_parent
def process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field="PatientName",
return_extra=True,
individual_file=False,
initial_sop_class_name_default="UNKNOWN",
):
if not individual_file:
logger.info(f" Processing series UID: {series_uid}")
dicom_file_list = dicom_series_dict[series_uid]
else:
logger.info(f" Processing individual file: {individual_file}")
dicom_file_list = [individual_file]
logger.info(f" Number of DICOM files: {len(dicom_file_list)}")
initial_dicom = pydicom.read_file(dicom_file_list[0])
# Get the data in the parent sorting field, clean with RegEx
parent_sorting_data = re.sub(
r"[^\w]", "_", str(initial_dicom[parent_sorting_field].value)
).upper()
if parent_sorting_data == "":
logger.error(
f"Could not find any data in {parent_sorting_field}. This is very bad, the data cannot be sorted properly."
)
"""
! TO DO
Implement a routine to let a user correlate a root directory with a name
"""
parent_sorting_data = "TEMP"
try:
initial_dicom_sop_class_name = initial_dicom.SOPClassUID.name
except AttributeError:
logger.warning(
f"Could not find DICOM SOP Class UID, using {initial_sop_class_name_default}."
)
initial_dicom_sop_class_name = initial_sop_class_name_default
try:
study_uid = initial_dicom.StudyInstanceUID
except AttributeError:
study_uid = "00001"
"""
! TO DO
Need to check for secondary capture image storage
This can include JPEGs with written information on them
This is typically not very useful
We can dump it to file
Or just save the DICOM file in the folder of interest
Not a big problem, sort out another day
"""
# Check the potential types of DICOM files
if (
"Image" in initial_dicom_sop_class_name
and initial_dicom_sop_class_name != "Secondary Capture Image Storage"
):
# Load as an primary image
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list)
try:
image = sitk.ReadImage(sorted_file_list)
except RuntimeError:
logger.warning(" Could not read image into SimpleITK.")
logger.info(" Processing files individually.")
for dicom_file in dicom_file_list:
return process_dicom_series(
dicom_series_dict,
series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
individual_file=dicom_file,
initial_sop_class_name_default=initial_sop_class_name_default,
)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
"""
! TO DO - integrity check
Read in all the files here, check the slice location and determine if any are missing
"""
if initial_dicom.Modality == "PT":
# scaling_factor = get_suv_bw_scale_factor(initial_dicom)
# image *= scaling_factor
# !TO DO
# Work on PET SUV conversion
None
"""
! CHECKPOINT
Some DCE MRI sequences have the same series UID
Here we check the sequence name, and split if necessary
"""
if initial_dicom.Modality == "MR":
try:
sequence_names = np.unique(
[pydicom.read_file(x).SequenceName for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SequenceName
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
try:
logger.warning(
" MRI sequence name not found. The SeriesDescription will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).SeriesDescription for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.SeriesDescription
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
except AttributeError:
logger.warning(
" MRI SeriesDescription not found. The AcquisitionComments will be used instead."
)
sequence_names = np.unique(
[pydicom.read_file(x).AcquisitionComments for x in dicom_file_list]
)
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = dcm_obj.AcquisitionComments
if var not in sequence_dict.keys():
sequence_dict[var] = [dcm_name]
else:
sequence_dict[var].append(dcm_name)
if initial_dicom.Manufacturer == "GE MEDICAL SYSTEMS":
# GE use the DICOM tag (0019, 10a2) [Raw data run number]
# in Diffusion weighted MRI sequences
# We need to separate this out to get the difference sequences
if initial_dicom.SeriesDescription == "Diffusion Weighted":
# num_sequences = int( (initial_dicom[(0x0025, 0x1007)]) / (initial_dicom[(0x0021, 0x104f)]) )
# number_of_images / images_per_seq
num_images_per_seq = initial_dicom[(0x0021, 0x104F)].value
sequence_names = np.unique(
[
f"DWI_{str( ( pydicom.read_file(x)['InstanceNumber'].value - 1) // num_images_per_seq )}"
for x in dicom_file_list
]
)
sequence_name_index_dict = {
name: index for index, name in enumerate(sequence_names)
}
sequence_dict = {}
for dcm_name in dicom_file_list:
dcm_obj = pydicom.read_file(dcm_name)
var = f"DWI_{str( ( dcm_obj['InstanceNumber'].value - 1) // num_images_per_seq )}"
var_to_index = sequence_name_index_dict[var]
if var_to_index not in sequence_dict.keys():
sequence_dict[var_to_index] = [dcm_name]
else:
sequence_dict[var_to_index].append(dcm_name)
sequence_names = sorted(sequence_dict.keys())
if np.alen(sequence_names) > 1:
logger.warning(" Two MR sequences were found under a single series UID.")
logger.warning(" These will be split into separate images.")
# Split up the DICOM file list by sequence name
for sequence_name in sequence_names:
dicom_file_list_by_sequence = sequence_dict[sequence_name]
logger.info(sequence_name)
logger.info(len(dicom_file_list_by_sequence))
sorted_file_list = safe_sort_dicom_image_list(dicom_file_list_by_sequence)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
image_by_sequence = sitk.ReadImage(sorted_file_list)
dicom_file_metadata_by_sequence = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
yield "IMAGES", dicom_file_metadata_by_sequence, initial_dicom, image_by_sequence
return # Stop iteration
yield "IMAGES", dicom_file_metadata, initial_dicom, image
if "Structure" in initial_dicom_sop_class_name:
# Load as an RT structure set
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
# We must also read in the corresponding DICOM image
# This can be found by matching the references series UID to the series UID
"""
! TO DO
What happens if there is an RT structure set with different referenced sequences?
"""
# Get the "ReferencedFrameOfReferenceSequence", first item
referenced_frame_of_reference_item = dicom_object.ReferencedFrameOfReferenceSequence[0]
# Get the "RTReferencedStudySequence", first item
# This retrieves the study UID
# This might be useful, but would typically match the actual StudyInstanceUID in the
# DICOM object
rt_referenced_series_item = (
referenced_frame_of_reference_item.RTReferencedStudySequence[0]
)
# Get the "RTReferencedSeriesSequence", first item
# This retreives the actual referenced series UID, which we need to match imaging
# parameters
rt_referenced_series_again_item = rt_referenced_series_item.RTReferencedSeriesSequence[
0
]
# Get the appropriate series instance UID
image_series_uid = rt_referenced_series_again_item.SeriesInstanceUID
logger.info(f" Item {index}: Matched SeriesInstanceUID = {image_series_uid}")
# Read in the corresponding image
sorted_file_list = safe_sort_dicom_image_list(dicom_series_dict[image_series_uid])
image = sitk.ReadImage(sorted_file_list)
initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)
(
structure_name_list,
structure_image_list,
) = transform_point_set_from_dicom_struct(image, dicom_object)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
"structure_name_list": structure_name_list,
}
yield "STRUCTURES", dicom_file_metadata, dicom_object, structure_image_list
if "Dose" in initial_dicom_sop_class_name:
# Load as an RT Dose distribution
# This should be done individually for each file
logger.info(f" Number of files: {len(dicom_file_list)}")
for index, dicom_file in enumerate(dicom_file_list):
dicom_object = pydicom.read_file(dicom_file, force=True)
"""
! CHECKPOINT
There should only be a single RT dose file (with each series UID)
If there are more, yield each
"""
initial_dicom = pydicom.read_file(dicom_file, force=True)
dicom_file_metadata = {
"parent_sorting_data": parent_sorting_data,
"study_uid": study_uid,
}
# We must read in as a float otherwise when we multiply by one later it will not work!
raw_dose_image = sitk.ReadImage(dicom_file, sitk.sitkFloat32)
dose_grid_scaling = dicom_object.DoseGridScaling
logger.debug(f" Dose grid scaling: {dose_grid_scaling} Gy")
scaled_dose_image = raw_dose_image * dose_grid_scaling
yield "DOSES", dicom_file_metadata, dicom_object, scaled_dose_image
"""
! TO DO
1. (DONE) Implement conversion of dose files (to NIFTI images)
2. Implement conversion of RT plan files to text dump
3. Do something with other files (e.g. Deformable Image Registration stuff)
"""
return
def write_output_data_to_disk(
output_data_dict,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
):
"""
Write output to disk
"""
if output_data_dict is None:
return
filename_fields = [i for i in output_data_dict.keys() if i != "parent_sorting_data"]
parent_sorting_data = output_data_dict["parent_sorting_data"]
files_written = {}
"""
Write the the converted images to disk
! CONSIDER
We could simply write as we go?
Pro: save memory, important if processing very large files
Con: Reading as we go allows proper indexing
"""
for field in filename_fields:
logger.info(f" Writing files for field: {field}")
p = pathlib.Path(output_directory) / parent_sorting_data / field
p.mkdir(parents=True, exist_ok=True)
files_written[field] = []
for field_filename_base, field_list in output_data_dict[field].items():
# Check if there is a list of images with matching names
# This will depend on the name format chosen
# If there is a list, we append an index as we write to disk
if isinstance(field_list, (tuple, list)):
# Flatten
field_list_flat = list(flatten(field_list))
# Iterate
for suffix, file_to_write in enumerate(field_list_flat):
field_filename = field_filename_base + f"_{suffix}"
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
else:
field_filename = field_filename_base
file_to_write = field_list
# Some cleaning
while "__" in field_filename:
field_filename = field_filename.replace("__", "_")
while field_filename[-1] == "_":
field_filename = field_filename[:-1]
# Save image!
"""
! TO DO
Use pathlib, and perform some checks so we don"t overwrite anything!
"""
output_name = (
pathlib.Path(output_directory)
/ parent_sorting_data
/ field
/ (field_filename + output_file_suffix)
)
files_written[field].append(output_name)
if output_name.is_file():
logger.warning(f" File exists: {output_name}")
if overwrite_existing_files:
logger.warning(" You have selected to overwrite existing files.")
else:
logger.info(
" You have selected to NOT overwrite existing files. Continuing."
)
continue
sitk.WriteImage(file_to_write, output_name.as_posix())
return files_written
def process_dicom_directory(
dicom_directory,
parent_sorting_field="PatientName",
output_image_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{image_desc}_{SeriesNumber}",
output_structure_name_format="{parent_sorting_data}_{study_uid_index}_{Modality}_{structure_name}",
output_dose_name_format="{parent_sorting_data}_{study_uid_index}_{DoseSummationType}",
return_extra=True,
output_directory="./",
output_file_suffix=".nii.gz",
overwrite_existing_files=False,
write_to_disk=True,
verbose=False,
initial_sop_class_name_default="UNKNOWN",
):
# Check dicom_directory type
if isinstance(dicom_directory, str) or isinstance(dicom_directory, pathlib.Path):
# Get all the DICOM files in the given directory
root_path = pathlib.Path(dicom_directory)
# Find files ending with .dcm, .dc3
dicom_file_list = [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
elif hasattr(dicom_directory, "__iter__"):
dicom_file_list = []
for dicom_dir in dicom_directory:
# Get all the DICOM files in each directory
root_path = pathlib.Path(dicom_dir)
# Find files ending with .dcm, .dc3
dicom_file_list += [
p
for p in root_path.glob("**/*")
if p.name.lower().endswith(".dcm") or p.name.lower().endswith(".dc3")
]
if len(dicom_file_list) == 0:
logger.info("No DICOM files found in input directory. Exiting now.")
return
# Process the DICOM files
# This returns a dictionary (of dictionaries):
# {parent_data (e.g. PatientName): {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# parent_data_2 : {series_UID_1: [list_of_DICOM_files],
# {series_UID_2: [list_of_DICOM_files], ...
# ... }
dicom_series_dict_parent = process_dicom_file_list(
dicom_file_list, parent_sorting_field=parent_sorting_field, verbose=verbose
)
if dicom_series_dict_parent is None:
logger.info("No valid DICOM files found. Ending.")
return None
output = {}
for parent_data, dicom_series_dict in dicom_series_dict_parent.items():
logger.info(f"Processing data for {parent_sorting_field} = {parent_data}.")
logger.info(f" Number of DICOM series = {len(dicom_series_dict.keys())}")
# Set up the output data
# This stores the SimpleITK images and file names
output_data_dict = {}
# Set up the study UID dict
# This helps match structure sets to relevant images
# And paired images to each other (e.g. PET/CT)
study_uid_dict = {}
# Give some user feedback
logger.debug(f" Output image name format: {output_image_name_format}")
logger.debug(f" Output structure name format: {output_structure_name_format}")
logger.debug(f" Output dose name format: {output_dose_name_format}")
# For each unique series UID, process the DICOM files
for series_uid in dicom_series_dict.keys():
# This function returns four values
# 1. dicom_type: This is IMAGES, STRUCTURES, DOSES, etc
# 2. dicom_file_metadata: Some special metadata extracted from the DICOM header
# 3. initial_dicom: The first DICOM in the series. For doses and structures there is
# (usually) only one DICOM anyway
# 4. dicom_file_data: The actual SimpleITK image data
for (
dicom_type,
dicom_file_metadata,
initial_dicom,
dicom_file_data,
) in process_dicom_series(
dicom_series_dict=dicom_series_dict,
series_uid=series_uid,
parent_sorting_field=parent_sorting_field,
return_extra=return_extra,
initial_sop_class_name_default=initial_sop_class_name_default,
):
# Step 1
# Check the parent sorting field is consistent
# This would usually be the PatientName, PatientID, or similar
# Occasionally these will both be blank
parent_sorting_data = dicom_file_metadata["parent_sorting_data"]
if "parent_sorting_data" not in output_data_dict.keys():
output_data_dict["parent_sorting_data"] = parent_sorting_data
else:
if parent_sorting_data != output_data_dict["parent_sorting_data"]:
logger.error(
f"A conflict was found for the parent sorting field "
f"({parent_sorting_field}): {parent_sorting_data}"
)
logger.error("Quitting now.")
print(dicom_series_dict_parent.keys())
sys.exit()
else:
logger.info(
f" Parent sorting field ({parent_sorting_field}) match found: "
f"{parent_sorting_data}"
)
# Step 2
# Get the study UID
# Used for indexing DICOM series
study_uid = dicom_file_metadata["study_uid"]
if study_uid not in study_uid_dict.keys():
try:
study_uid_index = max(study_uid_dict.values()) + 1
except AttributeError:
study_uid_index = 0 # Study UID dict might not exist
except ValueError:
study_uid_index = 0 # Study UID dict might be empty
logger.info(f" Setting study instance UID index: {study_uid_index}")
study_uid_dict[study_uid] = study_uid_index
else:
logger.info(
f" Study instance UID index already exists: {study_uid_dict[study_uid]}"
)
# Step 3
# Generate names for output files
# Special names
# ! This can be defined once at the start of the function
special_name_fields = [
"parent_sorting_data",
"study_uid_index",
"image_desc",
"structure_name",
]
# Get the image description (other special names are already defined above)
image_desc = get_dicom_info_from_description(
initial_dicom, return_extra=return_extra
)
# Get all the fields from the user-given name format
if dicom_type == "IMAGES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_image_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "STRUCTURES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_structure_name_format.split("}")
if len(i) > 0
]
elif dicom_type == "DOSES":
all_naming_fields = [
i[i.find("{") + 1 :]
for i in output_dose_name_format.split("}")
if len(i) > 0
]
# Now exclude those that aren't derived from the DICOM header
dicom_header_tags = [i for i in all_naming_fields if i not in special_name_fields]
naming_info_dict = {}
for dicom_field in dicom_header_tags:
try:
dicom_field_value = initial_dicom[dicom_field].value
except (AttributeError, KeyError):
logger.warning(
f" Could not find DICOM header {dicom_field}. Setting as 0 to "
f"preserve naming convention."
)
dicom_field_value = 0
naming_info_dict[dicom_field] = dicom_field_value
if dicom_type == "IMAGES":
output_name = output_image_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
**naming_info_dict,
)
if "IMAGES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["IMAGES"] = {output_name: dicom_file_data}
else:
# First check if there is another image of the same name
if output_name not in output_data_dict["IMAGES"].keys():
output_data_dict["IMAGES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if hasattr(output_data_dict["IMAGES"][output_name], "__iter__"):
output_data_dict["IMAGES"][output_name] = list(
[output_data_dict["IMAGES"][output_name]]
)
output_data_dict["IMAGES"][output_name].append(dicom_file_data)
elif dicom_type == "STRUCTURES":
for structure_name, structure_image in zip(
dicom_file_metadata["structure_name_list"], dicom_file_data
):
output_name = output_structure_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
image_desc=image_desc,
structure_name=structure_name,
**naming_info_dict,
)
if "STRUCTURES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["STRUCTURES"] = {output_name: structure_image}
else:
# First check if there is another structure of the same name
if output_name not in output_data_dict["STRUCTURES"].keys():
output_data_dict["STRUCTURES"][output_name] = structure_image
else:
logger.info(" A structure with this name exists, appending.")
if hasattr(
output_data_dict["STRUCTURES"][output_name], "__iter__"
):
output_data_dict["STRUCTURES"][output_name] = list(
[output_data_dict["STRUCTURES"][output_name]]
)
output_data_dict["STRUCTURES"][output_name].append(structure_image)
elif dicom_type == "DOSES":
output_name = output_dose_name_format.format(
parent_sorting_data=parent_sorting_data,
study_uid_index=study_uid_dict[study_uid],
**naming_info_dict,
)
if "DOSES" not in output_data_dict.keys():
# Make a new entry
output_data_dict["DOSES"] = {output_name: dicom_file_data}
else:
# First check if there is another image of the same name
if output_name not in output_data_dict["DOSES"].keys():
output_data_dict["DOSES"][output_name] = dicom_file_data
else:
logger.info(" An image with this name exists, appending.")
if isinstance(output_data_dict["DOSES"][output_name], sitk.Image):
output_data_dict["DOSES"][output_name] = list(
[output_data_dict["DOSES"][output_name]]
)
output_data_dict["DOSES"][output_name].append(dicom_file_data)
if write_to_disk:
output[str(parent_data)] = write_output_data_to_disk(
output_data_dict=output_data_dict,
output_directory=output_directory,
output_file_suffix=output_file_suffix,
overwrite_existing_files=overwrite_existing_files,
)
else:
output[str(parent_data)] = output_data_dict
"""
TO DO!
Memory issue with output_data_dict
Use in inner loop, reset output_data_dict
"""
return output
| platipy/dicom/io/crawl.py | 44,900 | Fixes missing points in contouring using simple linear interpolation
Args:
contour_data_list (list): The contour data for each slice
Returns:
contour_data (numpy array): Interpolated contour data
Attempts to return some information from a DICOM
This is typically used for naming converted NIFTI files
Args:
dicom_object (pydicom.dataset.FileDataset): The DICOM object
return_extra (bool, optional): return information that is usually not required
Returns:
info (str): Some extracted information
Organise the DICOM files by the series UID
Sorts a list of DICOM image files based on a DICOM tag value.
This is a much safer method than reading SliceLocation.
It takes mandatory DICOM fields (Image Position [Patient]) and (Image Orientation [Patient]).
The list of DICOM files is sorted by projecting the image position onto the axis normal to the
place defined by the image orientation.
This accounts for differences in patient position (e.g. HFS/FFS).
Args:
dicom_image_list (list): [description]
This function is used to generate a binary mask from a set of vertices.
This allows us to convert from DICOM-RTStruct format to any imaging format.
Args:
image ([SimpleITK.Image]): The image, used to copy imaging information
(e.g. resolution, spacing)
dicom_struct ([pydicom.Dataset]): The DICOM-RTStruct file
spacing_override (bool | tuple, optional): Overwrite the spacing.
Set with (axial_spacing, coronal_spacing, sagittal spacing). Defaults to False.
Returns:
list, list : final_struct_name_sequence, structure_list
Write output to disk
Copyright 2020 University of New South Wales, University of Sydney Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Modified from https://qibawiki.rsna.org/images/6/62/SUV_vendorneutral_pseudocode_happypathonly_20180626_DAC.pdf Try to find the Philips private scale factor") Get the modality There is typically not much extra information At the moment, we do not return anything for CT imaging Not much consistency, but we can get the protocol name Not much experience with this We can search through the corrections applied Return whether or not attentuation is applied Get the data in the parent sorting field, clean with RegEx Check the potential types of DICOM files Load as an primary image scaling_factor = get_suv_bw_scale_factor(initial_dicom) image *= scaling_factor !TO DO Work on PET SUV conversion GE use the DICOM tag (0019, 10a2) [Raw data run number] in Diffusion weighted MRI sequences We need to separate this out to get the difference sequences num_sequences = int( (initial_dicom[(0x0025, 0x1007)]) / (initial_dicom[(0x0021, 0x104f)]) ) number_of_images / images_per_seq Split up the DICOM file list by sequence name Stop iteration Load as an RT structure set This should be done individually for each file We must also read in the corresponding DICOM image This can be found by matching the references series UID to the series UID Get the "ReferencedFrameOfReferenceSequence", first item Get the "RTReferencedStudySequence", first item This retrieves the study UID This might be useful, but would typically match the actual StudyInstanceUID in the DICOM object Get the "RTReferencedSeriesSequence", first item This retreives the actual referenced series UID, which we need to match imaging parameters Get the appropriate series instance UID Read in the corresponding image Load as an RT Dose distribution This should be done individually for each file We must read in as a float otherwise when we multiply by one later it will not work! Check if there is a list of images with matching names This will depend on the name format chosen If there is a list, we append an index as we write to disk Flatten Iterate Some cleaning Save image! Some cleaning Save image! Check dicom_directory type Get all the DICOM files in the given directory Find files ending with .dcm, .dc3 Get all the DICOM files in each directory Find files ending with .dcm, .dc3 Process the DICOM files This returns a dictionary (of dictionaries): {parent_data (e.g. PatientName): {series_UID_1: [list_of_DICOM_files], {series_UID_2: [list_of_DICOM_files], ... parent_data_2 : {series_UID_1: [list_of_DICOM_files], {series_UID_2: [list_of_DICOM_files], ... ... } Set up the output data This stores the SimpleITK images and file names Set up the study UID dict This helps match structure sets to relevant images And paired images to each other (e.g. PET/CT) Give some user feedback For each unique series UID, process the DICOM files This function returns four values 1. dicom_type: This is IMAGES, STRUCTURES, DOSES, etc 2. dicom_file_metadata: Some special metadata extracted from the DICOM header 3. initial_dicom: The first DICOM in the series. For doses and structures there is (usually) only one DICOM anyway 4. dicom_file_data: The actual SimpleITK image data Step 1 Check the parent sorting field is consistent This would usually be the PatientName, PatientID, or similar Occasionally these will both be blank Step 2 Get the study UID Used for indexing DICOM series Study UID dict might not exist Study UID dict might be empty Step 3 Generate names for output files Special names ! This can be defined once at the start of the function Get the image description (other special names are already defined above) Get all the fields from the user-given name format Now exclude those that aren't derived from the DICOM header Make a new entry First check if there is another image of the same name Make a new entry First check if there is another structure of the same name Make a new entry First check if there is another image of the same name | 6,212 | en | 0.819219 |
"""
Conjuntos são chamados de set's
- Set não possui duplicidade
- Set não possui valor ordenado
- Não são acessados via indice, ou seja, não são indexados
Bons para armazenar elementos são ordenação, sem se preocupar com chaves, valores e itens duplicados.
Set's são referenciados por {}
Diferença de set e dict
- Dict tem chave:valor
- Set tem apenas valor
---------------------------------------------------------------------------------------------------------------------
# DEFENINDO SET
# Forma 1
s = set ({1, 2, 3, 4, 5, 4, 5, 2, 1}) # valores duplicados
print(type(s))
print(s)
# OBS.: Ao criar um set, se uma valor estiver repetido, ele é ignorado, sem gerar erro.
# Forma 2 - Mais comum
set = {1, 2, 3, 4, 5, 4, 5, 2, 1} # valores duplicados
print(type(set))
print(set)
# Sem valores duplicados e sem ordenação entre eles
# Pode-se colocar todos os tipos de dados
---------------------------------------------------------------------------------------------------------------------
# PODE-SE ITERAR SOBRE UM SET
set = {1, 2, 3, 4, 5, 4, 5, 2, 1}
for valor in set:
print(valor)
---------------------------------------------------------------------------------------------------------------------
# USOS INTERESSANTES COM SET'S
# Imagine que fizemos um formulario de cadastro de visitantes em um museu, onde as pessoas informam manualmente
# sua cidade de origem
# Nos adicionamos cada cidade em uma lista Python, ja que em lista pode-se adicionar novos elementos e ter repetição
cidade = ['Lavras', 'Bagé', 'Caçapava', 'Lavras', 'Bagé']
print(type(cidade))
print(cidade)
print(len(cidade)) # para saber quantos visitantes teve
print(len(set(cidade))) # para saber quantas cidades distintas foram visitar
---------------------------------------------------------------------------------------------------------------------
# ADICIONANDO ELEMENTOS EM UM SET
s = {1, 2, 3}
s.add(4)
print(s)
---------------------------------------------------------------------------------------------------------------------
# REMOVANDO ELEMENTOS DE UM SET
# Forma 1
conj = {1, 2, 3}
conj.remove(3) # se tentar remover um valor que não existe, gera um erro.
print(conj)
# Forma 2
conj.discard(2) # se o elemento não existir, não vai gerar erro
print(conj)
---------------------------------------------------------------------------------------------------------------------
# COPIANDO UM SET PARA OUTRO
conj = {1, 2, 3}
# Forma 1 - Deep Copy (o novo conjunto fica independente)
novo = conj.copy()
print(novo)
novo.add(4)
print(conj, novo)
# Forma 2 - Shallow Copy (o novo conjunto fica interligado ao primeiro)
novo2 = conj
print(novo2)
novo2.add(5)
print(conj, novo2)
---------------------------------------------------------------------------------------------------------------------
# REMOVER TODOS OS DADOS DE UM SET
conj = {1, 2, 3}
conj.clear()
print(conj)
---------------------------------------------------------------------------------------------------------------------
# METODOS MATEMÁTICOS DE CONJUNTOS
# Dois conjuntos de estudantes, Python e Java.
python = {'Paulo', 'Luis', 'Marcos', 'Camila', 'Ana'}
java = {'Paulo', 'Fernando', 'Antonio', 'Joao', 'Ana'}
# Precisamos juntar em um set, os alunos dos dois cursos, mas apenas nomes únicos
# Forma 1 - usando union
unicos = python.union(java)
print(unicos)
# Forma 2 - Usando o caracter pipe "|"
unicos2 = python|java
print(unicos2)
---------------------------------------------------------------------------------------------------------------------
# GERANDO SET DE ESTUDANTES QUE ESTÃO NOS DOIS CURSOS
python = {'Paulo', 'Luis', 'Marcos', 'Camila', 'Ana'}
java = {'Paulo', 'Fernando', 'Antonio', 'Joao', 'Ana'}
# Forma 1 - usando intersection
ambos = python.intersection(java)
print(ambos)
# Forma 2 - usando &
ambos2 = python & java
print(ambos2)
---------------------------------------------------------------------------------------------------------------------
# GERAR SET DE ESTUDANTES QUE ESTÃ EM UM CURSO, MAS QUE NÃO ESTÃO NO OUTRO
python = {'Paulo', 'Luis', 'Marcos', 'Camila', 'Ana'}
java = {'Paulo', 'Fernando', 'Antonio', 'Joao', 'Ana'}
so_python = python.difference(java)
print(so_python)
---------------------------------------------------------------------------------------------------------------------
# SOMA*, MÁXIMO*, MÍNIMO*, TAMANHO.
# * -> somente valores inteiros ou float
conj = {1, 2, 3, 4, 5}
print(sum(conj))
print(max(conj))
print(min(conj))
print(len(conj))
---------------------------------------------------------------------------------------------------------------------
"""
| Secao7_ColecoesPython/Conjutos.py | 4,683 | Conjuntos são chamados de set's
- Set não possui duplicidade
- Set não possui valor ordenado
- Não são acessados via indice, ou seja, não são indexados
Bons para armazenar elementos são ordenação, sem se preocupar com chaves, valores e itens duplicados.
Set's são referenciados por {}
Diferença de set e dict
- Dict tem chave:valor
- Set tem apenas valor
---------------------------------------------------------------------------------------------------------------------
# DEFENINDO SET
# Forma 1
s = set ({1, 2, 3, 4, 5, 4, 5, 2, 1}) # valores duplicados
print(type(s))
print(s)
# OBS.: Ao criar um set, se uma valor estiver repetido, ele é ignorado, sem gerar erro.
# Forma 2 - Mais comum
set = {1, 2, 3, 4, 5, 4, 5, 2, 1} # valores duplicados
print(type(set))
print(set)
# Sem valores duplicados e sem ordenação entre eles
# Pode-se colocar todos os tipos de dados
---------------------------------------------------------------------------------------------------------------------
# PODE-SE ITERAR SOBRE UM SET
set = {1, 2, 3, 4, 5, 4, 5, 2, 1}
for valor in set:
print(valor)
---------------------------------------------------------------------------------------------------------------------
# USOS INTERESSANTES COM SET'S
# Imagine que fizemos um formulario de cadastro de visitantes em um museu, onde as pessoas informam manualmente
# sua cidade de origem
# Nos adicionamos cada cidade em uma lista Python, ja que em lista pode-se adicionar novos elementos e ter repetição
cidade = ['Lavras', 'Bagé', 'Caçapava', 'Lavras', 'Bagé']
print(type(cidade))
print(cidade)
print(len(cidade)) # para saber quantos visitantes teve
print(len(set(cidade))) # para saber quantas cidades distintas foram visitar
---------------------------------------------------------------------------------------------------------------------
# ADICIONANDO ELEMENTOS EM UM SET
s = {1, 2, 3}
s.add(4)
print(s)
---------------------------------------------------------------------------------------------------------------------
# REMOVANDO ELEMENTOS DE UM SET
# Forma 1
conj = {1, 2, 3}
conj.remove(3) # se tentar remover um valor que não existe, gera um erro.
print(conj)
# Forma 2
conj.discard(2) # se o elemento não existir, não vai gerar erro
print(conj)
---------------------------------------------------------------------------------------------------------------------
# COPIANDO UM SET PARA OUTRO
conj = {1, 2, 3}
# Forma 1 - Deep Copy (o novo conjunto fica independente)
novo = conj.copy()
print(novo)
novo.add(4)
print(conj, novo)
# Forma 2 - Shallow Copy (o novo conjunto fica interligado ao primeiro)
novo2 = conj
print(novo2)
novo2.add(5)
print(conj, novo2)
---------------------------------------------------------------------------------------------------------------------
# REMOVER TODOS OS DADOS DE UM SET
conj = {1, 2, 3}
conj.clear()
print(conj)
---------------------------------------------------------------------------------------------------------------------
# METODOS MATEMÁTICOS DE CONJUNTOS
# Dois conjuntos de estudantes, Python e Java.
python = {'Paulo', 'Luis', 'Marcos', 'Camila', 'Ana'}
java = {'Paulo', 'Fernando', 'Antonio', 'Joao', 'Ana'}
# Precisamos juntar em um set, os alunos dos dois cursos, mas apenas nomes únicos
# Forma 1 - usando union
unicos = python.union(java)
print(unicos)
# Forma 2 - Usando o caracter pipe "|"
unicos2 = python|java
print(unicos2)
---------------------------------------------------------------------------------------------------------------------
# GERANDO SET DE ESTUDANTES QUE ESTÃO NOS DOIS CURSOS
python = {'Paulo', 'Luis', 'Marcos', 'Camila', 'Ana'}
java = {'Paulo', 'Fernando', 'Antonio', 'Joao', 'Ana'}
# Forma 1 - usando intersection
ambos = python.intersection(java)
print(ambos)
# Forma 2 - usando &
ambos2 = python & java
print(ambos2)
---------------------------------------------------------------------------------------------------------------------
# GERAR SET DE ESTUDANTES QUE ESTÃ EM UM CURSO, MAS QUE NÃO ESTÃO NO OUTRO
python = {'Paulo', 'Luis', 'Marcos', 'Camila', 'Ana'}
java = {'Paulo', 'Fernando', 'Antonio', 'Joao', 'Ana'}
so_python = python.difference(java)
print(so_python)
---------------------------------------------------------------------------------------------------------------------
# SOMA*, MÁXIMO*, MÍNIMO*, TAMANHO.
# * -> somente valores inteiros ou float
conj = {1, 2, 3, 4, 5}
print(sum(conj))
print(max(conj))
print(min(conj))
print(len(conj))
--------------------------------------------------------------------------------------------------------------------- | 4,643 | pt | 0.755896 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
import builderutils.parser as parser
import builderutils.renderer as renderer
import builderutils.dom as dom
@click.group()
def cli():
pass
@click.command()
@click.option("--configfile", type=click.Path(), help="Builder config", required=True)
def create(configfile):
print("create command!")
parserObj = parser.ConfigParser(configfile)
print("Parser Obj: ", parserObj)
domObj = dom.DomManager(parserObj)
domObj.buildDomTree()
dom.DomManager.parseDomTree(dom.SAMPLE_DOM)
# parserObj = parser.BuilderParser(configfile)
# renderObj = renderer.Renderer()
# renderObj.build_staging_environment(parserObj.parsedData)
# userConfig = parserObj.parsedData["user_config"]
# htmlTemplate = parserObj.parsedData["html_template"]
# flaskTemplate = parserObj.parsedData["flask_template"]
# renderObj.build_html_documents(htmlTemplate, userConfig)
# renderObj.build_flask_app(flaskTemplate, userConfig)
def main():
cli.add_command(create)
cli()
if __name__ == "__main__":
main()
| builder/builder.py | 1,108 | !/usr/bin/env python -*- coding: utf-8 -*- parserObj = parser.BuilderParser(configfile) renderObj = renderer.Renderer() renderObj.build_staging_environment(parserObj.parsedData) userConfig = parserObj.parsedData["user_config"] htmlTemplate = parserObj.parsedData["html_template"] flaskTemplate = parserObj.parsedData["flask_template"] renderObj.build_html_documents(htmlTemplate, userConfig) renderObj.build_flask_app(flaskTemplate, userConfig) | 444 | en | 0.077154 |
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# justice-platform-service (4.10.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.platform import download as download_internal
@click.command()
@click.argument("campaign_id", type=str)
@click.option("--batch_no", "batch_no", type=int)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def download(
campaign_id: str,
batch_no: Optional[int] = None,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(download_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {
"Authorization": login_with_auth
}
else:
login_as_internal(login_as)
result, error = download_internal(
campaign_id=campaign_id,
batch_no=batch_no,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"download failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
download.operation_id = "download"
download.is_deprecated = False
| samples/cli/accelbyte_py_sdk_cli/platform/_download.py | 2,224 | Copyright (c) 2021 AccelByte Inc. All Rights Reserved. This is licensed software from AccelByte Inc, for limitations and restrictions contact your company contract manager. Code generated. DO NOT EDIT! template_file: python-cli-command.j2 justice-platform-service (4.10.0) pylint: disable=duplicate-code pylint: disable=line-too-long pylint: disable=missing-function-docstring pylint: disable=missing-module-docstring pylint: disable=too-many-arguments pylint: disable=too-many-branches pylint: disable=too-many-instance-attributes pylint: disable=too-many-lines pylint: disable=too-many-locals pylint: disable=too-many-public-methods pylint: disable=too-many-return-statements pylint: disable=too-many-statements pylint: disable=unused-import | 743 | en | 0.674467 |
"""
Credentials used when making CLIs.
"""
from pathlib import Path
from dcos_e2e.cluster import Cluster
DEFAULT_SUPERUSER_USERNAME = 'bootstrapuser'
DEFAULT_SUPERUSER_PASSWORD = 'deleteme'
def add_authorized_key(cluster: Cluster, public_key_path: Path) -> None:
"""
Add an authorized key to all nodes in the given cluster.
"""
nodes = {
*cluster.masters,
*cluster.agents,
*cluster.public_agents,
}
for node in nodes:
node.run(
args=['echo', '', '>>', '/root/.ssh/authorized_keys'],
shell=True,
)
node.run(
args=[
'echo',
public_key_path.read_text(),
'>>',
'/root/.ssh/authorized_keys',
],
shell=True,
)
| src/dcos_e2e_cli/common/credentials.py | 814 | Add an authorized key to all nodes in the given cluster.
Credentials used when making CLIs. | 91 | en | 0.697547 |
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
| openmdao/utils/general_utils.py | 34,574 | A fake dictionary that always reports __contains__(name) to be True.
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
Initialize the iterator.
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
Some miscellaneous utility functions.
note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x Certain command line tools can make use of this to allow visualization of models when errors are present that would normally cause setup to abort. if shape is not given, infer from value (if not scalar) or indices use shape provided or shape of value and check vs. shape of indices later shape is not determined, assume the shape of value was intended shape is determined, if value is scalar assign it to array of shape otherwise make sure value is an array of the determined shape Affine scaling cannot be used with scalers/adders Convert ref/ref0 to scaler/adder so we can scale the bounds Convert adder to ndarray/float as necessary ignore any keyword args that are not valid in this version of numpy e.g. numpy <=1.13 does not have the 'floatmode' option left Process excludes Process includes Process excludes Process includes for a simple slice we can use less memory connection is explicit | 15,046 | en | 0.650161 |
import json
import os
srt_path = '/home/lyp/桌面/MAE_论文逐段精读【论文精读】.457423264.zh-CN.srt'
json_path = '/home/lyp/桌面/caption.json'
txt_path = '/home/lyp/桌面'
def srt2txt(path):
out_path= os.path.join(txt_path,path.split('.')[0]+'.txt')
with open(path,'r+') as f:
with open(out_path, 'w+') as out:
for index,lines in enumerate(f.readlines()):
if(index%5 == 2):
out.write(lines.split('>')[1].split('<')[0]+'\n')
def json2txt(path):
out_path = out_path= os.path.join(txt_path,path.split('.')[0]+'.txt')
with open(out_path,'w+') as out:
with open(json_path,'r+') as f:
caption_dict = json.load(f)
# print(len(caption_dict['body']))
for content_dict in caption_dict['body']:
out.write(content_dict['content']+'\n')
if __name__ == '__main__':
srt2txt(srt_path)
json2txt(json_path) | test_model/utils/caption2txt.py | 945 | print(len(caption_dict['body'])) | 32 | en | 0.177847 |
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.3 Python SDK
Pure Storage FlashBlade REST 1.3 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.3
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class NetworkInterfacesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_network_interfaces(self, **kwargs):
"""
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.create_network_interfaces_with_http_info(**kwargs)
return data
def create_network_interfaces_with_http_info(self, **kwargs):
"""
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'network_interface' in params:
body_params = params['network_interface']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_network_interfaces(self, **kwargs):
"""
Delete a network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.delete_network_interfaces_with_http_info(**kwargs)
return data
def delete_network_interfaces_with_http_info(self, **kwargs):
"""
Delete a network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_network_interfaces(self, **kwargs):
"""
List network interfaces
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.list_network_interfaces_with_http_info(**kwargs)
return data
def list_network_interfaces_with_http_info(self, **kwargs):
"""
List network interfaces
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'filter', 'sort', 'start', 'limit', 'token']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'token' in params:
query_params.append(('token', params['token']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_network_interfaces(self, **kwargs):
"""
Update an existing network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: the attribute map used to update the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_network_interfaces_with_http_info(**kwargs)
else:
(data) = self.update_network_interfaces_with_http_info(**kwargs)
return data
def update_network_interfaces_with_http_info(self, **kwargs):
"""
Update an existing network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: the attribute map used to update the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'network_interface']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_network_interfaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'network_interface' in params:
body_params = params['network_interface']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/network-interfaces', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkInterfaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| purity_fb/purity_fb_1dot3/apis/network_interfaces_api.py | 20,545 | NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
Create a new network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: The attribute map used to create the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
Delete a network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
Delete a network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
List network interfaces
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
List network interfaces
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
Update an existing network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_network_interfaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: the attribute map used to update the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
Update an existing network interface
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_network_interfaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param NetworkInterface network_interface: the attribute map used to update the network interface
:return: NetworkInterfaceResponse
If the method is called asynchronously,
returns the request thread.
Pure Storage FlashBlade REST 1.3 Python SDK
Pure Storage FlashBlade REST 1.3 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.3
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 python 2 and python 3 compatibility library HTTP header `Accept` HTTP header `Content-Type` Authentication setting HTTP header `Accept` HTTP header `Content-Type` Authentication setting HTTP header `Accept` HTTP header `Content-Type` Authentication setting HTTP header `Accept` HTTP header `Content-Type` Authentication setting | 7,639 | en | 0.678387 |
"""
This project demonstrates NESTED LOOPS (i.e., loops within loops)
in the context of SEQUENCES OF SUB-SEQUENCES.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Lucas D'Alesio.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
def main():
""" Calls the other functions to test them. """
#run_test_largest_number()
#run_test_largest_negative_number()
run_test_first_is_elsewhere_too()
def run_test_largest_number():
""" Tests the largest_number function. """
# -------------------------------------------------------------------------
# DONE: 2. Implement this TEST function.
# It TESTS the largest_number function defined below.
# Include at least ** 1 ** ADDITIONAL test beyond those we wrote.
# -------------------------------------------------------------------------
print()
print('-------------------------------------')
print('Testing the LARGEST_NUMBER function:')
print('-------------------------------------')
# Test 1:
expected = 13
answer = largest_number([(3, 1, 4),
(13, 10, 11, 7, 10),
[1, 2, 3, 4]])
print('Expected and actual are:', expected, answer)
# Test 2:
expected = -1111111111111111
answer = largest_number(([], [-1111111111111111], []))
print('Expected and actual are:', expected, answer)
# Test 3:
expected = None
answer = largest_number(([], [], []))
print('Expected and actual are:', expected, answer)
# DONE 2 (continued): Add your ADDITIONAL test(s) here:
# Test 3:
expected = 13
answer = largest_number([(3, 1, 4),
(13, 10, 11, 7, 10),
[1, 2, 3, 4]])
print('Expected and actual are:', expected, answer)
def largest_number(seq_seq):
"""
Returns the largest number in the subsequences of the given
sequence of sequences. Returns None if there are NO numbers
in the subsequences.
For example, if the given argument is:
[(3, 1, 4),
(13, 10, 11, 7, 10),
[1, 2, 3, 4]]
then this function returns 13.
As another example, if the given argument is:
([], [-1111111111111111], [])
then this function returns -1111111111111111.
As yet another example, if the given argument is:
([], [], [])
then this function returns None.
Preconditions:
:type seq_seq: (list, tuple)
and the given argument is a sequence of sequences,
where each subsequence contains only numbers.
"""
# -------------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# Note that you should write its TEST function first (above).
# -------------------------------------------------------------------------
x = None
for j in range (len(seq_seq)):
for k in range(len(seq_seq[j])):
x = j
y = k
for l in range(len(seq_seq)):
for o in range(len(seq_seq[l])):
if seq_seq[l][o] > seq_seq[x][y]:
x = l
y = o
if x == None:
return None
return seq_seq[x][y]
def run_test_largest_negative_number():
""" Tests the largest_negative_number function. """
# -------------------------------------------------------------------------
# DONE: 4. Implement this TEST function.
# It TESTS the largest_negative_number function defined below.
#
# Include enough tests to give you confidence that your solution
# to this challenging problem is indeed correct.
# -------------------------------------------------------------------------
print()
print('-------------------------------------------------')
print('Testing the LARGEST_NEGATIVE_NUMBER function:')
print('-------------------------------------------------')
# Test 1:
expected = 11
answer = largest_number([(3, 1, 4),
(-13, 10, 11, 7, 10),
[1, 2, 3, 4]])
print('Expected and actual are:', expected, answer)
# Test 2:
expected = -2
answer = largest_number(([-10], [-1111111111111111], [-2]))
print('Expected and actual are:', expected, answer)
# Test 3:
expected = None
answer = largest_number(([], [], []))
print('Expected and actual are:', expected, answer)
def largest_negative_number(seq_seq):
"""
Returns the largest NEGATIVE number in the given sequence of
sequences of numbers. Returns None if there are no negative numbers
in the sequence of sequences.
For example, if the given argument is:
[(30, -5, 8, -20),
(100, -2.6, 88, -40, -5),
(400, 500)
]
then this function returns -2.6.
As another example, if the given argument is:
[(200, 2, 20), (500, 400)]
then this function returns None.
Preconditions:
:type seq_seq: (list, tuple)
and the given argument is a sequence of sequences,
where each subsequence contains only numbers.
"""
# -------------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# Note that you should write its TEST function first (above).
#
# CHALLENGE: Try to solve this problem with no additional sequences
# being constructed (so the SPACE allowed is limited to the
# give sequence of sequences plus any non-list variables you want).
# -------------------------------------------------------------------------
s = []
for k in range(len(seq_seq)):
s2 = seq_seq[k]
if s2 != []:
s = s + [max(s2)]
return max(s)
def run_test_first_is_elsewhere_too():
""" Tests the first_is_elsewhere_too function. """
# -------------------------------------------------------------------------
# We have supplied tests for you. No additional tests are required,
# although you are welcome to supply more tests if you choose.
# -------------------------------------------------------------------------
print()
print('-------------------------------------')
print('Testing the FIRST_IS_ELSEWHERE_TOO function:')
print('-------------------------------------')
# FYI: The notation below constructs what is called a DICTIONARY.
# It is like a list, but the indices can be any immutable
# objects (here, True or False), not just 0, 1, 2, ... as in lists.
message = {True: 'Your code PASSED this test.\n',
False: 'Your code FAILED this test.\n'}
no_failures = True
# Test 1:
expected = True
answer = first_is_elsewhere_too([(3, 1, 4),
(13, 10, 11, 7, 10),
[11, 12, 3, 10]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 2:
expected = False
answer = first_is_elsewhere_too([(3, 1, 4),
(13, 10, 11, 7, 10),
[11, 2, 13, 14]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 3:
expected = False
answer = first_is_elsewhere_too([[], [1, 2], [1, 2]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 4:
expected = True
answer = first_is_elsewhere_too([('a', 9),
(13, 10, 11, 7, 'a'),
[11, 12, 3, 10]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected]) # Test 1:
no_failures = no_failures and (answer == expected)
# Test 5:
expected = False
answer = first_is_elsewhere_too([('a', 9),
(13, 10, 11, 7, 'aa'),
[11, 12, 3, 10]])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 6:
expected = False
answer = first_is_elsewhere_too([('a', 'a', 'b', 'b', 'a', 'b')])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 7:
expected = False
answer = first_is_elsewhere_too([()])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 8:
expected = True
answer = first_is_elsewhere_too([('a'), (), (), (), ('a')])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 9:
expected = True
answer = first_is_elsewhere_too([('a'), (), (), (), ('a'), ()])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 10:
expected = False
answer = first_is_elsewhere_too([('a'), (), (), (), ('b'), ()])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 11:
expected = True
answer = first_is_elsewhere_too(['hello', 'goodbye'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 12:
expected = False
answer = first_is_elsewhere_too(['hello', 'xxxxxxxxxxx'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 13:
expected = False
answer = first_is_elsewhere_too(['1234567890',
'one two three',
'i am free',
'four five six',
'get my sticks',
'seven eight nine',
'i am fine'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 14:
expected = True
answer = first_is_elsewhere_too([(1000 * 'a') + 'b' + (500 * 'a'),
(800 * 'c') + 'd' + 1200 * 'c',
'b'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 15:
expected = True
answer = first_is_elsewhere_too([(1000 * 'a') + 'b' + (500 * 'a'),
(800 * 'c') + 'd' + 1200 * 'c',
(700 * 'eee') + 'b' + (90 * 'd'),
(800 * 'c') + 'd' + 1200 * 'c'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 16:
expected = True
answer = first_is_elsewhere_too([(1000 * 'b') + 'acd' + (500 * 'f'),
(800 * '1') + '234a',
'eeee'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 17:
expected = True
answer = first_is_elsewhere_too([(1000 * 'b') + 'acd' + (500 * 'f'),
'a' + (800 * '1') + '234',
'123'])
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 18:
test1 = [(1000 * 'b') + 'acd' + (500 * 'f'),
(800 * '1') + '234',
'123']
for k in range(95):
test1.append(k * chr(k))
test2 = []
for k in range(30):
test2.append(k * chr(k))
expected = True
answer = first_is_elsewhere_too(test1 + ['a'] + test2)
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 19 (continues test 18):
expected = False
answer = first_is_elsewhere_too(test1 + test2)
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
# Test 20 (continues test 18):
expected = True
a_inside = (100 * 'b') + 'a' + (100 * 'b')
answer = first_is_elsewhere_too(test1 + [a_inside] + test2)
print('Expected and actual are:', expected, answer)
print(message[answer == expected])
no_failures = no_failures and (answer == expected)
if no_failures:
print('*** Your code PASSED all')
else:
print('!!! Your code FAILED some')
print(' of the tests for first_is_elsewhere_too')
def first_is_elsewhere_too(seq_seq):
"""
Given a sequence of subsequences:
-- Returns True if any element of the first (initial) subsequence
appears in any of the other subsequences.
-- Returns False otherwise.
For example, if the given argument is:
[(3, 1, 4),
(13, 10, 11, 7, 10),
[11, 12, 3, 10]]
then this function returns True because 3 appears
in the first subsequence and also in the third subsequence.
As another example, if the given argument is:
[(3, 1, 4),
(13, 10, 11, 7, 10),
[11, 2, 13, 14]]
then this function returns False because 3 does not appear in
any subsequence except the first, 1 does not appear in any
subsequence except the first, and 4 does not appear in any
subsequence except the first.
As yet another example, if the given argument is:
([], [1, 2], [1, 2])
then this function returns False since no element of the first
subsequence appears elsewhere.
Preconditions:
:type seq_seq: (list, tuple)
and the given argument is a sequence of sequences.
"""
# -------------------------------------------------------------------------
# DONE: 6. Implement and test this function.
# Some tests are already written for you (above).
#
# IMPLEMENTATION RESTRICTION:
# ** You may NOT use anything but comparison (==) in judging
# membership. In particular, you may NOT use:
# -- the IN operator
# (example: 7 in [9, 6, 7, 9] returns True)
# -- the COUNT method
# (example: [9, 6, 7, 9].count(9) returns 2)
# -- the INDEX method
# (example: [9, 6, 7, 9, 6, 1].index(6) returns 1)
# in this problem, as doing so would defeat the goal of providing
# practice at loops within loops (within loops within ...)
# -------------------------------------------------------------------------
for j in range(len(seq_seq[0])):
for k in range(1, len(seq_seq)):
for i in range(len(seq_seq[k])):
if seq_seq[k][i] == seq_seq[0][j]:
return True
return False
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| src/m3_more_nested_loops_in_sequences.py | 15,919 | Given a sequence of subsequences:
-- Returns True if any element of the first (initial) subsequence
appears in any of the other subsequences.
-- Returns False otherwise.
For example, if the given argument is:
[(3, 1, 4),
(13, 10, 11, 7, 10),
[11, 12, 3, 10]]
then this function returns True because 3 appears
in the first subsequence and also in the third subsequence.
As another example, if the given argument is:
[(3, 1, 4),
(13, 10, 11, 7, 10),
[11, 2, 13, 14]]
then this function returns False because 3 does not appear in
any subsequence except the first, 1 does not appear in any
subsequence except the first, and 4 does not appear in any
subsequence except the first.
As yet another example, if the given argument is:
([], [1, 2], [1, 2])
then this function returns False since no element of the first
subsequence appears elsewhere.
Preconditions:
:type seq_seq: (list, tuple)
and the given argument is a sequence of sequences.
Returns the largest NEGATIVE number in the given sequence of
sequences of numbers. Returns None if there are no negative numbers
in the sequence of sequences.
For example, if the given argument is:
[(30, -5, 8, -20),
(100, -2.6, 88, -40, -5),
(400, 500)
]
then this function returns -2.6.
As another example, if the given argument is:
[(200, 2, 20), (500, 400)]
then this function returns None.
Preconditions:
:type seq_seq: (list, tuple)
and the given argument is a sequence of sequences,
where each subsequence contains only numbers.
Returns the largest number in the subsequences of the given
sequence of sequences. Returns None if there are NO numbers
in the subsequences.
For example, if the given argument is:
[(3, 1, 4),
(13, 10, 11, 7, 10),
[1, 2, 3, 4]]
then this function returns 13.
As another example, if the given argument is:
([], [-1111111111111111], [])
then this function returns -1111111111111111.
As yet another example, if the given argument is:
([], [], [])
then this function returns None.
Preconditions:
:type seq_seq: (list, tuple)
and the given argument is a sequence of sequences,
where each subsequence contains only numbers.
Calls the other functions to test them.
Tests the first_is_elsewhere_too function.
Tests the largest_negative_number function.
Tests the largest_number function.
This project demonstrates NESTED LOOPS (i.e., loops within loops)
in the context of SEQUENCES OF SUB-SEQUENCES.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Lucas D'Alesio.
DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.run_test_largest_number()run_test_largest_negative_number() ------------------------------------------------------------------------- DONE: 2. Implement this TEST function. It TESTS the largest_number function defined below. Include at least ** 1 ** ADDITIONAL test beyond those we wrote. ------------------------------------------------------------------------- Test 1: Test 2: Test 3: DONE 2 (continued): Add your ADDITIONAL test(s) here: Test 3: ------------------------------------------------------------------------- DONE: 3. Implement and test this function. Note that you should write its TEST function first (above). ------------------------------------------------------------------------- ------------------------------------------------------------------------- DONE: 4. Implement this TEST function. It TESTS the largest_negative_number function defined below. Include enough tests to give you confidence that your solution to this challenging problem is indeed correct. ------------------------------------------------------------------------- Test 1: Test 2: Test 3: ------------------------------------------------------------------------- DONE: 5. Implement and test this function. Note that you should write its TEST function first (above). CHALLENGE: Try to solve this problem with no additional sequences being constructed (so the SPACE allowed is limited to the give sequence of sequences plus any non-list variables you want). ------------------------------------------------------------------------- ------------------------------------------------------------------------- We have supplied tests for you. No additional tests are required, although you are welcome to supply more tests if you choose. ------------------------------------------------------------------------- FYI: The notation below constructs what is called a DICTIONARY. It is like a list, but the indices can be any immutable objects (here, True or False), not just 0, 1, 2, ... as in lists. Test 1: Test 2: Test 3: Test 4: Test 1: Test 5: Test 6: Test 7: Test 8: Test 9: Test 10: Test 11: Test 12: Test 13: Test 14: Test 15: Test 16: Test 17: Test 18: Test 19 (continues test 18): Test 20 (continues test 18): ------------------------------------------------------------------------- DONE: 6. Implement and test this function. Some tests are already written for you (above). IMPLEMENTATION RESTRICTION: ** You may NOT use anything but comparison (==) in judging membership. In particular, you may NOT use: -- the IN operator (example: 7 in [9, 6, 7, 9] returns True) -- the COUNT method (example: [9, 6, 7, 9].count(9) returns 2) -- the INDEX method (example: [9, 6, 7, 9, 6, 1].index(6) returns 1) in this problem, as doing so would defeat the goal of providing practice at loops within loops (within loops within ...) ------------------------------------------------------------------------- ----------------------------------------------------------------------------- Calls main to start the ball rolling. ----------------------------------------------------------------------------- | 5,888 | en | 0.70708 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.