hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
059f19fa9902c2e06c87d1308dfb3c2faf668b88 | 11,121 | py | Python | model_zoo/research/cv/FaceDetection/src/FaceDetection/yolo_loss.py | GuoSuiming/mindspore | 48afc4cfa53d970c0b20eedfb46e039db2a133d5 | [
"Apache-2.0"
] | 55 | 2020-12-17T10:26:06.000Z | 2022-03-28T07:18:26.000Z | model_zoo/research/cv/FaceDetection/src/FaceDetection/yolo_loss.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | null | null | null | model_zoo/research/cv/FaceDetection/src/FaceDetection/yolo_loss.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 14 | 2021-01-29T02:39:47.000Z | 2022-03-23T05:00:26.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Face detection loss."""
import numpy as np
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore.nn.loss.loss import _Loss
from mindspore.nn import Cell
from mindspore import Tensor
from mindspore.common import dtype as mstype
class PtLinspace(Cell):
'''PtLinspace'''
def __init__(self):
super(PtLinspace, self).__init__()
self.tuple_to_array = P.TupleToArray()
def construct(self, start, end, steps):
lin_x = ()
step = (end - start + 1) / steps
for i in range(start, end + 1, step):
lin_x += (i,)
lin_x = self.tuple_to_array(lin_x)
return lin_x
class MSELoss(_Loss):
'''MSELoss'''
def __init__(self):
super(MSELoss, self).__init__()
self.sum = P.Sum()
self.mean = P.ReduceMean(keepdims=False)
self.pow = P.Pow()
self.sqrt = P.Sqrt()
def construct(self, nembeddings1, nembeddings2):
dist = nembeddings1 - nembeddings2
dist_pow = self.pow(dist, 2.0)
dist_sum = self.sum(dist_pow, 1)
dist_sqrt = self.sqrt(dist_sum)
loss = self.mean(dist_sqrt, 0)
return loss
class YoloLoss(Cell):
""" Computes yolo loss from darknet network output and target annotation.
Args:
num_classes (int): number of categories
anchors (list): 2D list representing anchor boxes
coord_scale (float): weight of bounding box coordinates
no_object_scale (float): weight of regions without target boxes
object_scale (float): weight of regions with target boxes
class_scale (float): weight of categorical predictions
thresh (float): minimum iou between a predicted box and ground truth for them to be considered matching
seen (int): How many images the network has already been trained on.
"""
def __init__(self, num_classes, anchors, anchors_mask, reduction=32, seen=0, coord_scale=1.0, no_object_scale=1.0,
object_scale=1.0, class_scale=1.0, thresh=0.5, head_idx=0.0):
super(YoloLoss, self).__init__()
self.num_classes = num_classes
self.num_anchors = len(anchors_mask)
self.anchor_step = len(anchors[0]) # each scale has step anchors
self.anchors = np.array(anchors, dtype=np.float32) / reduction # scale every anchor for every scale
self.tensor_anchors = Tensor(self.anchors, mstype.float32)
self.anchors_mask = anchors_mask
anchors_w = []
anchors_h = []
for i in range(len(anchors_mask)):
anchors_w.append(self.anchors[self.anchors_mask[i]][0])
anchors_h.append(self.anchors[self.anchors_mask[i]][1])
self.anchors_w = Tensor(np.array(anchors_w).reshape(len(self.anchors_mask), 1))
self.anchors_h = Tensor(np.array(anchors_h).reshape(len(self.anchors_mask), 1))
self.reduction = reduction
self.seen = seen
self.head_idx = head_idx
self.zero = Tensor(0)
self.coord_scale = coord_scale
self.no_object_scale = no_object_scale
self.object_scale = object_scale
self.class_scale = class_scale
self.thresh = thresh
self.info = {'avg_iou': 0, 'class': 0, 'obj': 0, 'no_obj': 0,
'recall50': 0, 'recall75': 0, 'obj_cur': 0, 'obj_all': 0,
'coord_xy': 0, 'coord_wh': 0}
self.shape = P.Shape()
self.reshape = P.Reshape()
self.sigmoid = P.Sigmoid()
self.zeros_like = P.ZerosLike()
self.concat0 = P.Concat(0)
self.concat0_2 = P.Concat(0)
self.concat0_3 = P.Concat(0)
self.concat0_4 = P.Concat(0)
self.concat1 = P.Concat(1)
self.concat1_2 = P.Concat(1)
self.concat1_3 = P.Concat(1)
self.concat1_4 = P.Concat(1)
self.concat2 = P.Concat(2)
self.concat2_2 = P.Concat(2)
self.concat2_3 = P.Concat(2)
self.concat2_4 = P.Concat(2)
self.tile = P.Tile()
self.transpose = P.Transpose()
self.cast = P.Cast()
self.exp = P.Exp()
self.sum = P.ReduceSum()
self.smooth_l1_loss = P.SmoothL1Loss()
self.bce = P.SigmoidCrossEntropyWithLogits()
self.ce = P.SoftmaxCrossEntropyWithLogits()
self.pt_linspace = PtLinspace()
self.one_hot = nn.OneHot(-1, self.num_classes, 1.0, 0.0)
self.squeeze_2 = P.Squeeze(2)
self.reduce_sum = P.ReduceSum()
self.select = P.Select()
self.iou = P.IOU()
def construct(self, output, coord_mask, conf_pos_mask, conf_neg_mask, cls_mask, t_coord, t_conf, t_cls, gt_list):
"""
Compute Yolo loss.
"""
output_d = self.shape(output)
num_batch = output_d[0]
num_anchors = self.num_anchors
num_classes = self.num_classes
num_channels = output_d[1] / num_anchors
height = output_d[2]
width = output_d[3]
output = self.reshape(output, (num_batch, num_anchors, num_channels, height * width))
coord_01 = output[:, :, :2] # tx,ty
coord_23 = output[:, :, 2:4] # tw,th
coord = self.concat2((coord_01, coord_23))
conf = self.squeeze_2(output[:, :, 4:5, :])
cls = output[:, :, 5:]
cls = self.reshape(cls, (num_batch*num_anchors, num_classes, height*width))
perm = (0, 2, 1)
cls = self.transpose(cls, perm)
cls_shp = self.shape(cls)
cls = self.reshape(cls, (cls_shp[0] * cls_shp[1] * cls_shp[2] / num_classes, num_classes))
lin_x = self.pt_linspace(0, width - 1, width)
lin_x = self.tile(lin_x, (height,))
lin_x = self.cast(lin_x, mstype.float32)
lin_y = self.pt_linspace(0, height - 1, height)
lin_y = self.reshape(lin_y, (height, 1))
lin_y = self.tile(lin_y, (1, width))
lin_y = self.reshape(lin_y, (self.shape(lin_y)[0] * self.shape(lin_y)[1],))
lin_y = self.cast(lin_y, mstype.float32)
anchor_w = self.anchors_w
anchor_h = self.anchors_h
anchor_w = self.cast(anchor_w, mstype.float32)
anchor_h = self.cast(anchor_h, mstype.float32)
coord_x = self.sigmoid(coord[:, :, 0:1, :])
pred_boxes_0 = self.squeeze_2(coord_x) + lin_x
shape_pb0 = self.shape(pred_boxes_0)
pred_boxes_0 = self.reshape(pred_boxes_0, (shape_pb0[0] * shape_pb0[1] * shape_pb0[2], 1))
coord_y = self.sigmoid(coord[:, :, 1:2, :])
pred_boxes_1 = self.squeeze_2(coord_y) + lin_y
shape_pb1 = self.shape(pred_boxes_1)
pred_boxes_1 = self.reshape(pred_boxes_1, (shape_pb1[0] * shape_pb1[1] * shape_pb1[2], 1))
pred_boxes_2 = self.exp(self.squeeze_2(coord[:, :, 2:3, :])) * anchor_w
shape_pb2 = self.shape(pred_boxes_2)
pred_boxes_2 = self.reshape(pred_boxes_2, (shape_pb2[0] * shape_pb2[1] * shape_pb2[2], 1))
pred_boxes_3 = self.exp(self.squeeze_2(coord[:, :, 3:4, :])) * anchor_h
shape_pb3 = self.shape(pred_boxes_3)
pred_boxes_3 = self.reshape(pred_boxes_3, (shape_pb3[0] * shape_pb3[1] * shape_pb3[2], 1))
pred_boxes_x1 = pred_boxes_0 - pred_boxes_2 / 2
pred_boxes_y1 = pred_boxes_1 - pred_boxes_3 / 2
pred_boxes_x2 = pred_boxes_0 + pred_boxes_2 / 2
pred_boxes_y2 = pred_boxes_1 + pred_boxes_3 / 2
pred_boxes_points = self.concat1_4((pred_boxes_x1, pred_boxes_y1, pred_boxes_x2, pred_boxes_y2))
total_anchors = num_anchors * height * width
mask_concat = None
conf_neg_mask_zero = self.zeros_like(conf_neg_mask)
pred_boxes_points = pred_boxes_points * 64
gt_list = gt_list * 64
for b in range(num_batch):
cur_pred_boxes = pred_boxes_points[b * total_anchors:(b + 1) * total_anchors]
iou_gt_pred = self.iou(self.cast(cur_pred_boxes, mstype.float16), self.cast(gt_list[b], mstype.float16))
mask = self.cast((iou_gt_pred > self.thresh), mstype.float16)
mask = self.reduce_sum(mask, 0)
mask = mask > 0
shape_neg = self.shape(conf_neg_mask[0])
mask = self.reshape(mask, (1, shape_neg[0], shape_neg[1]))
if b == 0:
mask_concat = mask
else:
mask_concat = self.concat0_2((mask_concat, mask))
conf_neg_mask = self.select(mask_concat, conf_neg_mask_zero, conf_neg_mask)
coord_mask = self.tile(coord_mask, (1, 1, 4, 1))
coord_mask = coord_mask[:, :, :2]
coord_center = coord[:, :, :2]
t_coord_center = t_coord[:, :, :2]
coord_wh = coord[:, :, 2:]
t_coord_wh = t_coord[:, :, 2:]
one_hot_label = None
shape_cls_mask = None
if num_classes > 1:
shape_t_cls = self.shape(t_cls)
t_cls = self.reshape(t_cls, (shape_t_cls[0] * shape_t_cls[1] * shape_t_cls[2],))
one_hot_label = self.one_hot(self.cast(t_cls, mstype.int32))
shape_cls_mask = self.shape(cls_mask)
cls_mask = self.reshape(cls_mask, (1, shape_cls_mask[0] * shape_cls_mask[1] * shape_cls_mask[2]))
added_scale = 1.0 + self.head_idx * 0.5
loss_coord_center = added_scale * 2.0 * 1.0 * self.coord_scale * self.sum(
coord_mask * self.bce(coord_center, t_coord_center), ())
loss_coord_wh = added_scale * 2.0 * 1.5 * self.coord_scale * self.sum(
coord_mask * self.smooth_l1_loss(coord_wh, t_coord_wh), ())
loss_coord = 1.0 * (loss_coord_center + loss_coord_wh)
loss_conf_pos = added_scale * 2.0 * self.object_scale * self.sum(conf_pos_mask * self.bce(conf, t_conf), ())
loss_conf_neg = 1.0 * self.no_object_scale * self.sum(conf_neg_mask * self.bce(conf, t_conf), ())
loss_conf = loss_conf_pos + loss_conf_neg
loss_cls = None
if num_classes > 1:
loss_cls = self.class_scale * 1.0 * self.sum(cls_mask * self.ce(cls, one_hot_label)[0], ())
else:
loss_cls = 0.0
cls = self.squeeze_2(output[:, :, 5:6, :])
loss_cls_pos = added_scale * 2.0 * self.object_scale * self.sum(conf_pos_mask * self.bce(cls, t_conf), ())
loss_cls_neg = 1.0 * self.no_object_scale * self.sum(conf_neg_mask * self.bce(cls, t_conf), ())
loss_cls = loss_cls_pos + loss_cls_neg
loss_tot = loss_coord + 0.5 * loss_conf + 0.5 * loss_cls
return loss_tot
| 39.860215 | 118 | 0.61829 |
570f1ff62278f48c4d5bc9de8481d8f78bd958a9 | 17,045 | py | Python | tensor2tensor/rl/trainer_model_based.py | angelfish91/tensor2tensor | 168928d29cdd887fd6cdd5d5ad35181bef614154 | [
"Apache-2.0"
] | 1 | 2018-12-12T03:20:15.000Z | 2018-12-12T03:20:15.000Z | tensor2tensor/rl/trainer_model_based.py | angelfish91/tensor2tensor | 168928d29cdd887fd6cdd5d5ad35181bef614154 | [
"Apache-2.0"
] | null | null | null | tensor2tensor/rl/trainer_model_based.py | angelfish91/tensor2tensor | 168928d29cdd887fd6cdd5d5ad35181bef614154 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Training of model-based RL agents.
Example invocation:
python -m tensor2tensor.rl.trainer_model_based \
--output_dir=$HOME/t2t/rl_v1 \
--loop_hparams_set=rlmb_base \
--loop_hparams='num_real_env_frames=10000,epochs=3'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import math
import os
import pprint
import random
import time
import numpy as np
import six
from tensor2tensor.bin import t2t_trainer # pylint: disable=unused-import
from tensor2tensor.layers import common_video
from tensor2tensor.models.research import rl
from tensor2tensor.rl import rl_utils
from tensor2tensor.rl import trainer_model_based_params
from tensor2tensor.rl.restarter import Restarter
from tensor2tensor.utils import trainer_lib
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
def real_env_step_increment(hparams):
"""Real env step increment."""
return int(math.ceil(
hparams.num_real_env_frames / hparams.epochs
))
def world_model_step_increment(hparams, is_initial_epoch):
if is_initial_epoch:
multiplier = hparams.initial_epoch_train_steps_multiplier
else:
multiplier = 1
return multiplier * hparams.model_train_steps
def setup_directories(base_dir, subdirs):
"""Setup directories."""
base_dir = os.path.expanduser(base_dir)
tf.gfile.MakeDirs(base_dir)
all_dirs = {}
for subdir in subdirs:
if isinstance(subdir, six.string_types):
subdir_tuple = (subdir,)
else:
subdir_tuple = subdir
dir_name = os.path.join(base_dir, *subdir_tuple)
tf.gfile.MakeDirs(dir_name)
all_dirs[subdir] = dir_name
return all_dirs
def make_relative_timing_fn():
"""Make a function that logs the duration since it was made."""
start_time = time.time()
def format_relative_time():
time_delta = time.time() - start_time
return str(datetime.timedelta(seconds=time_delta))
def log_relative_time():
tf.logging.info("Timing: %s", format_relative_time())
return log_relative_time
def make_log_fn(epoch, log_relative_time_fn):
def log(msg, *args):
msg %= args
tf.logging.info("%s Epoch %d: %s", ">>>>>>>", epoch, msg)
log_relative_time_fn()
return log
def random_rollout_subsequences(rollouts, num_subsequences, subsequence_length):
"""Chooses a random frame sequence of given length from a set of rollouts."""
def choose_subsequence():
# TODO(koz4k): Weigh rollouts by their lengths so sampling is uniform over
# frames and not rollouts.
rollout = random.choice(rollouts)
try:
from_index = random.randrange(len(rollout) - subsequence_length + 1)
except ValueError:
# Rollout too short; repeat.
return choose_subsequence()
return rollout[from_index:(from_index + subsequence_length)]
return [choose_subsequence() for _ in range(num_subsequences)]
def make_simulated_env_fn(
real_env, hparams, batch_size, initial_frame_chooser, model_dir):
"""Creates a simulated env_fn."""
return rl.make_simulated_env_fn(
reward_range=real_env.reward_range,
observation_space=real_env.observation_space,
action_space=real_env.action_space,
frame_stack_size=hparams.frame_stack_size,
frame_height=real_env.frame_height, frame_width=real_env.frame_width,
initial_frame_chooser=initial_frame_chooser, batch_size=batch_size,
model_name=hparams.generative_model,
model_hparams=trainer_lib.create_hparams(hparams.generative_model_params),
model_dir=model_dir,
intrinsic_reward_scale=hparams.intrinsic_reward_scale,
)
def train_supervised(problem, model_name, hparams, data_dir, output_dir,
train_steps, eval_steps, local_eval_frequency=None,
schedule="continuous_train_and_eval"):
"""Train supervised."""
if local_eval_frequency is None:
local_eval_frequency = getattr(FLAGS, "local_eval_frequency")
exp_fn = trainer_lib.create_experiment_fn(
model_name, problem, data_dir, train_steps, eval_steps,
min_eval_frequency=local_eval_frequency
)
run_config = trainer_lib.create_run_config(model_name, model_dir=output_dir)
exp = exp_fn(run_config, hparams)
getattr(exp, schedule)()
def train_agent(real_env, learner, world_model_dir, hparams, epoch):
"""Train the PPO agent in the simulated environment."""
frame_stack_size = hparams.frame_stack_size
initial_frame_rollouts = real_env.current_epoch_rollouts(
split=tf.contrib.learn.ModeKeys.TRAIN,
minimal_rollout_frames=frame_stack_size,
)
# TODO(koz4k): Move this to a different module.
def initial_frame_chooser(batch_size):
"""Frame chooser."""
deterministic_initial_frames =\
initial_frame_rollouts[0][:frame_stack_size]
if not hparams.simulation_random_starts:
# Deterministic starts: repeat first frames from the first rollout.
initial_frames = [deterministic_initial_frames] * batch_size
else:
# Random starts: choose random initial frames from random rollouts.
initial_frames = random_rollout_subsequences(
initial_frame_rollouts, batch_size, frame_stack_size
)
if hparams.simulation_flip_first_random_for_beginning:
# Flip first entry in the batch for deterministic initial frames.
initial_frames[0] = deterministic_initial_frames
return np.stack([
[frame.observation.decode() for frame in initial_frame_stack]
for initial_frame_stack in initial_frames
])
env_fn = make_simulated_env_fn(
real_env, hparams, hparams.simulated_batch_size, initial_frame_chooser,
world_model_dir
)
base_algo_str = hparams.base_algo
train_hparams = trainer_lib.create_hparams(hparams.base_algo_params)
rl_utils.update_hparams_from_hparams(
train_hparams, hparams, base_algo_str + "_"
)
final_epoch = hparams.epochs - 1
is_special_epoch = (epoch + 3) == final_epoch or (epoch + 7) == final_epoch
is_final_epoch = epoch == final_epoch
env_step_multiplier = 3 if is_final_epoch else 2 if is_special_epoch else 1
learner.train(
env_fn, train_hparams, simulated=True, save_continuously=True,
epoch=epoch, env_step_multiplier=env_step_multiplier
)
def train_agent_real_env(env, learner, hparams, epoch):
"""Train the PPO agent in the real environment."""
base_algo_str = hparams.base_algo
train_hparams = trainer_lib.create_hparams(hparams.base_algo_params)
rl_utils.update_hparams_from_hparams(
train_hparams, hparams, "real_" + base_algo_str + "_"
)
env_fn = rl.make_real_env_fn(env)
num_env_steps = real_env_step_increment(hparams)
learner.train(
env_fn,
train_hparams,
simulated=False,
save_continuously=False,
epoch=epoch,
sampling_temp=hparams.real_sampling_temp,
num_env_steps=num_env_steps,
)
# Save unfinished rollouts to history.
env.reset()
def train_world_model(
env, data_dir, output_dir, hparams, world_model_steps_num, epoch
):
"""Train the world model on problem_name."""
world_model_steps_num += world_model_step_increment(
hparams, is_initial_epoch=(epoch == 0)
)
model_hparams = trainer_lib.create_hparams(hparams.generative_model_params)
model_hparams.learning_rate = model_hparams.learning_rate_constant
if epoch > 0:
model_hparams.learning_rate *= hparams.learning_rate_bump
restarter = Restarter("world_model", output_dir, world_model_steps_num)
if restarter.should_skip:
return world_model_steps_num
with restarter.training_loop():
train_supervised(
problem=env,
model_name=hparams.generative_model,
hparams=model_hparams,
data_dir=data_dir,
output_dir=output_dir,
train_steps=restarter.target_global_step,
eval_steps=100,
local_eval_frequency=2000
)
return world_model_steps_num
def evaluate_world_model(real_env, hparams, world_model_dir, debug_video_path):
"""Evaluate the world model (reward accuracy)."""
frame_stack_size = hparams.frame_stack_size
rollout_subsequences = []
def initial_frame_chooser(batch_size):
assert batch_size == len(rollout_subsequences)
return np.stack([
[frame.observation.decode() for frame in subsequence[:frame_stack_size]]
for subsequence in rollout_subsequences
])
env_fn = make_simulated_env_fn(
real_env, hparams, hparams.wm_eval_batch_size, initial_frame_chooser,
world_model_dir
)
sim_env = env_fn(in_graph=False)
subsequence_length = int(
max(hparams.wm_eval_rollout_ratios) * hparams.simulated_rollout_length
)
rollouts = real_env.current_epoch_rollouts(
split=tf.contrib.learn.ModeKeys.EVAL,
minimal_rollout_frames=(subsequence_length + frame_stack_size)
)
video_writer = common_video.WholeVideoWriter(
fps=10, output_path=debug_video_path, file_format="avi"
)
reward_accuracies_by_length = {
int(ratio * hparams.simulated_rollout_length): []
for ratio in hparams.wm_eval_rollout_ratios
}
for _ in range(hparams.wm_eval_num_batches):
rollout_subsequences[:] = random_rollout_subsequences(
rollouts, hparams.wm_eval_batch_size,
subsequence_length + frame_stack_size
)
eval_subsequences = [
subsequence[(frame_stack_size - 1):]
for subsequence in rollout_subsequences
]
# Check that the initial observation is the same in the real and simulated
# rollout.
sim_init_obs = sim_env.reset()
def decode_real_obs(index):
return np.stack([
subsequence[index].observation.decode()
for subsequence in eval_subsequences # pylint: disable=cell-var-from-loop
])
real_init_obs = decode_real_obs(0)
assert np.all(sim_init_obs == real_init_obs)
debug_frame_batches = []
def append_debug_frame_batch(sim_obs, real_obs):
errs = np.maximum(
np.abs(sim_obs.astype(np.int) - real_obs, dtype=np.int) - 10, 0
).astype(np.uint8)
debug_frame_batches.append( # pylint: disable=cell-var-from-loop
np.concatenate([sim_obs, real_obs, errs], axis=2)
)
append_debug_frame_batch(sim_init_obs, real_init_obs)
(sim_cum_rewards, real_cum_rewards) = (
np.zeros(hparams.wm_eval_batch_size) for _ in range(2)
)
for i in range(subsequence_length):
actions = [subsequence[i].action for subsequence in eval_subsequences]
(sim_obs, sim_rewards, _) = sim_env.step(actions)
sim_cum_rewards += sim_rewards
real_cum_rewards += [
subsequence[i + 1].reward for subsequence in eval_subsequences
]
for (length, reward_accuracies) in six.iteritems(
reward_accuracies_by_length
):
if i + 1 == length:
reward_accuracies.append(
np.sum(sim_cum_rewards == real_cum_rewards) /
len(real_cum_rewards)
)
real_obs = decode_real_obs(i + 1)
append_debug_frame_batch(sim_obs, real_obs)
for debug_frames in np.stack(debug_frame_batches, axis=1):
for debug_frame in debug_frames:
video_writer.write(debug_frame)
video_writer.finish_to_disk()
return {
"reward_accuracy/at_{}".format(length): np.mean(reward_accuracies)
for (length, reward_accuracies) in six.iteritems(
reward_accuracies_by_length
)
}
def load_metrics(event_dir, epoch):
"""Loads metrics for this epoch if they have already been written.
This reads the entire event file but it's small with just per-epoch metrics.
Args:
event_dir: TODO(koz4k): Document this.
epoch: TODO(koz4k): Document this.
Returns:
metrics.
"""
metrics = {}
for filename in tf.gfile.ListDirectory(event_dir):
path = os.path.join(event_dir, filename)
for event in tf.train.summary_iterator(path):
if event.step == epoch and event.HasField("summary"):
value = event.summary.value[0]
metrics[value.tag] = value.simple_value
return metrics
def summarize_metrics(eval_metrics_writer, metrics, epoch):
"""Write metrics to summary."""
for (name, value) in six.iteritems(metrics):
summary = tf.Summary()
summary.value.add(tag=name, simple_value=value)
eval_metrics_writer.add_summary(summary, epoch)
eval_metrics_writer.flush()
def training_loop(hparams, output_dir, report_fn=None, report_metric=None):
"""Run the main training loop."""
if report_fn:
assert report_metric is not None
# Directories
subdirectories = [
"data", "tmp", "world_model", ("world_model", "debug_videos"),
"policy", "eval_metrics"
]
directories = setup_directories(output_dir, subdirectories)
epoch = -1
data_dir = directories["data"]
env = rl_utils.setup_env(
hparams, batch_size=hparams.real_batch_size,
max_num_noops=hparams.max_num_noops
)
env.start_new_epoch(epoch, data_dir)
learner = rl_utils.LEARNERS[hparams.base_algo](
hparams.frame_stack_size, directories["policy"],
directories["policy"], hparams.epochs
)
# Timing log function
log_relative_time = make_relative_timing_fn()
# Per-epoch state
epoch_metrics = []
metrics = {}
# Collect data from the real environment.
policy_model_dir = directories["policy"]
tf.logging.info("Initial training of the policy in real environment.")
train_agent_real_env(env, learner, hparams, epoch)
metrics["mean_reward/train/clipped"] = rl_utils.compute_mean_reward(
env.current_epoch_rollouts(), clipped=True
)
tf.logging.info("Mean training reward (initial): {}".format(
metrics["mean_reward/train/clipped"]
))
env.generate_data(data_dir)
eval_metrics_writer = tf.summary.FileWriter(
directories["eval_metrics"]
)
world_model_steps_num = 0
for epoch in range(hparams.epochs):
log = make_log_fn(epoch, log_relative_time)
# Train world model
log("Training world model")
world_model_steps_num = train_world_model(
env, data_dir, directories["world_model"], hparams,
world_model_steps_num, epoch
)
# Train agent
log("Training policy in simulated environment.")
train_agent(env, learner, directories["world_model"], hparams, epoch)
env.start_new_epoch(epoch, data_dir)
# Train agent on real env (short)
log("Training policy in real environment.")
train_agent_real_env(env, learner, hparams, epoch)
if hparams.stop_loop_early:
return 0.0
env.generate_data(data_dir)
metrics = load_metrics(directories["eval_metrics"], epoch)
if metrics:
# Skip eval if metrics have already been written for this epoch. Otherwise
# we'd overwrite them with wrong data.
log("Metrics found for this epoch, skipping evaluation.")
else:
metrics["mean_reward/train/clipped"] = rl_utils.compute_mean_reward(
env.current_epoch_rollouts(), clipped=True
)
log("Mean training reward: {}".format(
metrics["mean_reward/train/clipped"]
))
eval_metrics = rl_utils.evaluate_all_configs(hparams, policy_model_dir)
log("Agent eval metrics:\n{}".format(pprint.pformat(eval_metrics)))
metrics.update(eval_metrics)
if hparams.eval_world_model:
debug_video_path = os.path.join(
directories["world_model", "debug_videos"],
"{}.avi".format(env.current_epoch)
)
wm_metrics = evaluate_world_model(
env, hparams, directories["world_model"], debug_video_path
)
log("World model eval metrics:\n{}".format(pprint.pformat(wm_metrics)))
metrics.update(wm_metrics)
summarize_metrics(eval_metrics_writer, metrics, epoch)
# Report metrics
if report_fn:
if report_metric == "mean_reward":
metric_name = rl_utils.get_metric_name(
sampling_temp=hparams.eval_sampling_temps[0],
max_num_noops=hparams.eval_max_num_noops,
clipped=False
)
report_fn(eval_metrics[metric_name], epoch)
else:
report_fn(eval_metrics[report_metric], epoch)
epoch_metrics.append(metrics)
# Return the evaluation metrics from the final epoch
return epoch_metrics[-1]
def main(_):
hp = trainer_model_based_params.create_loop_hparams()
assert not FLAGS.job_dir_to_evaluate
training_loop(hp, FLAGS.output_dir)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 32.343454 | 84 | 0.723673 |
4d3fb74ca65dfc00faf3136555071bc3db036c89 | 494 | py | Python | AppServer/lib/django-1.4/tests/regressiontests/urlpatterns_reverse/urlconf_inner.py | loftwah/appscale | 586fc1347ebc743d7a632de698f4dbfb09ae38d6 | [
"Apache-2.0"
] | 790 | 2015-01-03T02:13:39.000Z | 2020-05-10T19:53:57.000Z | AppServer/lib/django-1.4/tests/regressiontests/urlpatterns_reverse/urlconf_inner.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | AppServer/lib/django-1.4/tests/regressiontests/urlpatterns_reverse/urlconf_inner.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 155 | 2015-01-08T22:59:31.000Z | 2020-04-08T08:01:53.000Z | from django.conf.urls import patterns, url
from django.template import Template, Context
from django.http import HttpResponse
def inner_view(request):
content = Template('{% load url from future %}'
'{% url "outer" as outer_url %}outer:{{ outer_url }},'
'{% url "inner" as inner_url %}inner:{{ inner_url }}').render(Context())
return HttpResponse(content)
urlpatterns = patterns('',
url(r'^second_test/$', inner_view, name='inner'),
) | 38 | 95 | 0.637652 |
b80580c0abf6d05197b734bf6bb773f3f990830c | 104 | py | Python | tests/test_settings_example.py | HotMaps/building_h-c | db5a103cb9d41b88e6cdc3c9194fc1ec9fc5c31f | [
"Apache-2.0"
] | 1 | 2017-05-12T11:31:09.000Z | 2017-05-12T11:31:09.000Z | tests/test_settings_example.py | HotMaps/HotMaps-building_h-c | db5a103cb9d41b88e6cdc3c9194fc1ec9fc5c31f | [
"Apache-2.0"
] | 2 | 2017-08-22T13:53:22.000Z | 2017-09-25T07:27:28.000Z | tests/test_settings_example.py | HotMaps/Hotmaps-building_h-c | db5a103cb9d41b88e6cdc3c9194fc1ec9fc5c31f | [
"Apache-2.0"
] | null | null | null | # TODO: set up the URL of your localhost (specially the port)
base_url = "http://localhost:9006/api/v1"
| 34.666667 | 61 | 0.730769 |
2b3d7f5404628d033869ccf82766023215e7b25c | 3,760 | py | Python | scripts/crawl.py | joye1503/cocrawler | 39b543320e91477412ab8bfc8402c88c3304c553 | [
"Apache-2.0"
] | 166 | 2016-07-18T19:37:34.000Z | 2022-03-06T18:26:50.000Z | scripts/crawl.py | joye1503/cocrawler | 39b543320e91477412ab8bfc8402c88c3304c553 | [
"Apache-2.0"
] | 9 | 2016-10-22T18:20:56.000Z | 2021-04-06T05:28:04.000Z | scripts/crawl.py | joye1503/cocrawler | 39b543320e91477412ab8bfc8402c88c3304c553 | [
"Apache-2.0"
] | 25 | 2017-02-28T19:41:41.000Z | 2021-07-10T11:20:33.000Z | #!/usr/bin/env python
'''
CoCrawler web crawler, main program
'''
import sys
import os
import faulthandler
import gc
import argparse
import asyncio
import logging
import warnings
import cocrawler
import cocrawler.config as config
import cocrawler.stats as stats
import cocrawler.timer as timer
import cocrawler.webserver as webserver
import cocrawler.memory as memory
LOGGER = logging.getLogger(__name__)
faulthandler.enable()
ARGS = argparse.ArgumentParser(description='CoCrawler web crawler')
ARGS.add_argument('--config', action='append')
ARGS.add_argument('--configfile', action='store')
ARGS.add_argument('--no-test', action='store_true', help='do not check stats at the end of crawling')
ARGS.add_argument('--printdefault', action='store_true', help='print the default configuration')
ARGS.add_argument('--printfinal', action='store_true', help='print the final configuration')
ARGS.add_argument('--load', action='store', help='load saved crawl')
ARGS.add_argument('--loglevel', action='store', default='INFO', help='set logging level, default INFO')
ARGS.add_argument('--verbose', '-v', action='count', help='set logging level to DEBUG')
def main():
'''
Main program: parse args, read config, set up event loop, run the crawler.
'''
args = ARGS.parse_args()
if args.printdefault:
config.print_default()
sys.exit(1)
loglevel = os.getenv('COCRAWLER_LOGLEVEL')
if loglevel is None and args.loglevel:
loglevel = args.loglevel
if loglevel is None and args.verbose:
loglevel = 'DEBUG'
logging.basicConfig(level=loglevel)
config.config(args.configfile, args.config)
if args.printfinal:
config.print_final()
sys.exit(1)
memory.limit_resources()
if os.getenv('PYTHONASYNCIODEBUG') is not None:
logging.captureWarnings(True)
warnings.simplefilter('default', category=ResourceWarning)
if LOGGER.getEffectiveLevel() > logging.WARNING:
LOGGER.setLevel(logging.WARNING)
LOGGER.warning('Lowered logging level to WARNING because PYTHONASYNCIODEBUG env var is set')
LOGGER.warning('Configured logging system to show ResourceWarning because PYTHONASYNCIODEBUG env var is set')
LOGGER.warning('Note that this does have a significant impact on asyncio overhead')
if os.getenv('COCRAWLER_GC_DEBUG') is not None:
LOGGER.warning('Configuring gc debugging')
gc.set_debug(gc.DEBUG_STATS | gc.DEBUG_UNCOLLECTABLE)
kwargs = {}
if args.load:
kwargs['load'] = args.load
if args.no_test:
kwargs['no_test'] = True
crawler = cocrawler.Crawler(**kwargs)
loop = asyncio.get_event_loop()
slow_callback_duration = os.getenv('ASYNCIO_SLOW_CALLBACK_DURATION')
if slow_callback_duration:
loop.slow_callback_duration = float(slow_callback_duration)
LOGGER.warning('set slow_callback_duration to %f', slow_callback_duration)
if config.read('CarbonStats'):
timer.start_carbon()
if config.read('REST'):
app = webserver.make_app()
else:
app = None
try:
loop.run_until_complete(crawler.crawl())
except KeyboardInterrupt:
sys.stderr.flush()
print('\nInterrupt. Exiting cleanly.\n')
crawler.cancel_workers()
finally:
loop.run_until_complete(crawler.close())
if app:
webserver.close(app)
if config.read('CarbonStats'):
timer.close()
# vodoo recommended by advanced aiohttp docs for graceful shutdown
# https://github.com/aio-libs/aiohttp/issues/1925
loop.run_until_complete(asyncio.sleep(0.250))
loop.close()
if __name__ == '__main__':
main()
exit(stats.exitstatus)
| 31.596639 | 117 | 0.695479 |
13433486bc8d2d952f3eb3478cf5e6b66de45dcb | 8,100 | py | Python | hc/api/models.py | Masterabram/healthchecks | 229d72eb4bc7d21bfd9540a75093abad7fbc45ca | [
"BSD-3-Clause"
] | null | null | null | hc/api/models.py | Masterabram/healthchecks | 229d72eb4bc7d21bfd9540a75093abad7fbc45ca | [
"BSD-3-Clause"
] | null | null | null | hc/api/models.py | Masterabram/healthchecks | 229d72eb4bc7d21bfd9540a75093abad7fbc45ca | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
import hashlib
import json
import uuid
from datetime import timedelta as td
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
from django.utils import timezone
from hc.api import transports
from hc.lib import emails
STATUSES = (
("up", "Up"),
("down", "Down"),
("new", "New"),
("paused", "Paused")
)
DEFAULT_TIMEOUT = td(days=1)
DEFAULT_GRACE = td(hours=1)
CHANNEL_KINDS = (("email", "Email"), ("webhook", "Webhook"),
("hipchat", "HipChat"),
("slack", "Slack"), ("pd", "PagerDuty"), ("po", "Pushover"),
("victorops", "VictorOps"))
PO_PRIORITIES = {
-2: "lowest",
-1: "low",
0: "normal",
1: "high",
2: "emergency"
}
class Check(models.Model):
class Meta:
# sendalerts command will query using these
index_together = ["status", "user", "alert_after"]
name = models.CharField(max_length=100, blank=True)
tags = models.CharField(max_length=500, blank=True)
code = models.UUIDField(default=uuid.uuid4, editable=False, db_index=True)
user = models.ForeignKey(User, blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
timeout = models.DurationField(default=DEFAULT_TIMEOUT)
grace = models.DurationField(default=DEFAULT_GRACE)
n_pings = models.IntegerField(default=0)
last_ping = models.DateTimeField(null=True, blank=True)
alert_after = models.DateTimeField(null=True, blank=True, editable=False)
status = models.CharField(max_length=6, choices=STATUSES, default="new")
def name_then_code(self):
if self.name:
return self.name
return str(self.code)
def url(self):
return settings.PING_ENDPOINT + str(self.code)
def log_url(self):
return settings.SITE_ROOT + reverse("hc-log", args=[self.code])
def email(self):
return "%s@%s" % (self.code, settings.PING_EMAIL_DOMAIN)
def send_alert(self):
if self.status not in ("up", "down"):
raise NotImplementedError("Unexpected status: %s" % self.status)
errors = []
for channel in self.channel_set.all():
error = channel.notify(self)
if error not in ("", "no-op"):
errors.append((channel, error))
return errors
def get_status(self):
if self.status in ("new", "paused"):
return self.status
now = timezone.now()
if self.last_ping + self.timeout + self.grace > now:
return "up"
return "down"
def in_grace_period(self):
if self.status in ("new", "paused"):
return False
up_ends = self.last_ping + self.timeout
grace_ends = up_ends + self.grace
return up_ends < timezone.now() < grace_ends
def assign_all_channels(self):
if self.user:
channels = Channel.objects.filter(user=self.user)
self.channel_set.add(*channels)
def tags_list(self):
return [t.strip() for t in self.tags.split(" ") if t.strip()]
def to_dict(self):
pause_rel_url = reverse("hc-api-pause", args=[self.code])
result = {
"name": self.name,
"ping_url": self.url(),
"pause_url": settings.SITE_ROOT + pause_rel_url,
"tags": self.tags,
"timeout": int(self.timeout.total_seconds()),
"grace": int(self.grace.total_seconds()),
"n_pings": self.n_pings,
"status": self.get_status()
}
if self.last_ping:
result["last_ping"] = self.last_ping.isoformat()
result["next_ping"] = (self.last_ping + self.timeout).isoformat()
else:
result["last_ping"] = None
result["next_ping"] = None
return result
class Ping(models.Model):
n = models.IntegerField(null=True)
owner = models.ForeignKey(Check)
created = models.DateTimeField(auto_now_add=True)
scheme = models.CharField(max_length=10, default="http")
remote_addr = models.GenericIPAddressField(blank=True, null=True)
method = models.CharField(max_length=10, blank=True)
ua = models.CharField(max_length=200, blank=True)
class Channel(models.Model):
code = models.UUIDField(default=uuid.uuid4, editable=False)
user = models.ForeignKey(User)
created = models.DateTimeField(auto_now_add=True)
kind = models.CharField(max_length=20, choices=CHANNEL_KINDS)
value = models.TextField(blank=True)
email_verified = models.BooleanField(default=False)
checks = models.ManyToManyField(Check)
def assign_all_checks(self):
checks = Check.objects.filter(user=self.user)
self.checks.add(*checks)
def make_token(self):
seed = "%s%s" % (self.code, settings.SECRET_KEY)
seed = seed.encode("utf8")
return hashlib.sha1(seed).hexdigest()
def send_verify_link(self):
args = [self.code, self.make_token()]
verify_link = reverse("hc-verify-email", args=args)
verify_link = settings.SITE_ROOT + verify_link
emails.verify_email(self.value, {"verify_link": verify_link})
@property
def transport(self):
if self.kind == "email":
return transports.Email(self)
elif self.kind == "webhook":
return transports.Webhook(self)
elif self.kind == "slack":
return transports.Slack(self)
elif self.kind == "hipchat":
return transports.HipChat(self)
elif self.kind == "pd":
return transports.PagerDuty(self)
elif self.kind == "victorops":
return transports.VictorOps(self)
elif self.kind == "pushbullet":
return transports.Pushbullet(self)
elif self.kind == "po":
return transports.Pushover(self)
else:
raise NotImplementedError("Unknown channel kind: %s" % self.kind)
def notify(self, check):
# Make 3 attempts--
for x in range(0, 3):
error = self.transport.notify(check) or ""
if error in ("", "no-op"):
break # Success!
if error != "no-op":
n = Notification(owner=check, channel=self)
n.check_status = check.status
n.error = error
n.save()
return error
def test(self):
return self.transport().test()
@property
def po_value(self):
assert self.kind == "po"
user_key, prio = self.value.split("|")
prio = int(prio)
return user_key, prio, PO_PRIORITIES[prio]
@property
def value_down(self):
assert self.kind == "webhook"
parts = self.value.split("\n")
return parts[0]
@property
def value_up(self):
assert self.kind == "webhook"
parts = self.value.split("\n")
return parts[1] if len(parts) == 2 else ""
@property
def slack_team(self):
assert self.kind == "slack"
if not self.value.startswith("{"):
return None
doc = json.loads(self.value)
return doc["team_name"]
@property
def slack_channel(self):
assert self.kind == "slack"
if not self.value.startswith("{"):
return None
doc = json.loads(self.value)
return doc["incoming_webhook"]["channel"]
@property
def slack_webhook_url(self):
assert self.kind == "slack"
if not self.value.startswith("{"):
return self.value
doc = json.loads(self.value)
return doc["incoming_webhook"]["url"]
def latest_notification(self):
return Notification.objects.filter(channel=self).latest()
class Notification(models.Model):
class Meta:
get_latest_by = "created"
owner = models.ForeignKey(Check)
check_status = models.CharField(max_length=6)
channel = models.ForeignKey(Channel)
created = models.DateTimeField(auto_now_add=True)
error = models.CharField(max_length=200, blank=True)
| 30.451128 | 78 | 0.610494 |
9639537114eb5c830ebb458cab101c4d5d6aa895 | 1,773 | py | Python | plugins/quetz_conda_suggest/tests/conftest.py | davidbrochart/quetz | fd9b95add5b8f7a1c0863e7e08bf5a6ab5b84984 | [
"BSD-3-Clause"
] | null | null | null | plugins/quetz_conda_suggest/tests/conftest.py | davidbrochart/quetz | fd9b95add5b8f7a1c0863e7e08bf5a6ab5b84984 | [
"BSD-3-Clause"
] | null | null | null | plugins/quetz_conda_suggest/tests/conftest.py | davidbrochart/quetz | fd9b95add5b8f7a1c0863e7e08bf5a6ab5b84984 | [
"BSD-3-Clause"
] | null | null | null | import json
import uuid
from pytest import fixture
from quetz_conda_suggest import db_models
from quetz import rest_models
from quetz.dao import Dao
from quetz.db_models import User
pytest_plugins = "quetz.testing.fixtures"
@fixture
def dao(db) -> Dao:
return Dao(db)
@fixture
def user(db):
user = User(id=uuid.uuid4().bytes, username="madhurt")
db.add(user)
db.commit()
yield user
@fixture
def channel(dao, user, db):
channel_data = rest_models.Channel(
name="test_channel",
private=False,
)
channel = dao.create_channel(channel_data, user.id, "owner")
yield channel
db.delete(channel)
db.commit()
@fixture
def package(dao, user, channel, db):
new_package_data = rest_models.Package(name="test-package")
package = dao.create_package(
channel.name,
new_package_data,
user_id=user.id,
role="owner",
)
yield package
db.delete(package)
db.commit()
@fixture
def package_version(user, channel, db, dao, package):
package_format = 'tarbz2'
package_info = '{"size": 5000, "subdir": "linux-64"}'
version = dao.create_version(
channel.name,
package.name,
package_format,
"linux-64",
"0.1",
"0",
"0",
"test-package-0.1-0.tar.bz2",
package_info,
user.id,
)
yield version
db.delete(version)
db.commit()
@fixture
def subdir():
return "linux-64"
@fixture
def package_conda_suggest(package_version, db):
meta = db_models.CondaSuggestMetadata(
version_id=package_version.id,
data=json.dumps({"test-bin": "test-package"}),
)
db.add(meta)
db.commit()
yield meta
db.delete(meta)
db.commit()
| 17.382353 | 64 | 0.628878 |
3aced571fc5351bb6103362ca8f15cd07bffb283 | 3,143 | py | Python | isi_sdk_8_1_0/isi_sdk_8_1_0/models/namespace_metadata_list.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_1_0/isi_sdk_8_1_0/models/namespace_metadata_list.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_1_0/isi_sdk_8_1_0/models/namespace_metadata_list.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 5
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_1_0.models.namespace_metadata_list_attrs import NamespaceMetadataListAttrs # noqa: F401,E501
class NamespaceMetadataList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'attrs': 'list[NamespaceMetadataListAttrs]'
}
attribute_map = {
'attrs': 'attrs'
}
def __init__(self, attrs=None): # noqa: E501
"""NamespaceMetadataList - a model defined in Swagger""" # noqa: E501
self._attrs = None
self.discriminator = None
if attrs is not None:
self.attrs = attrs
@property
def attrs(self):
"""Gets the attrs of this NamespaceMetadataList. # noqa: E501
:return: The attrs of this NamespaceMetadataList. # noqa: E501
:rtype: list[NamespaceMetadataListAttrs]
"""
return self._attrs
@attrs.setter
def attrs(self, attrs):
"""Sets the attrs of this NamespaceMetadataList.
:param attrs: The attrs of this NamespaceMetadataList. # noqa: E501
:type: list[NamespaceMetadataListAttrs]
"""
self._attrs = attrs
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NamespaceMetadataList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.330435 | 108 | 0.576837 |
79a54b8309acaba05eae21a4586ae2929f352cfb | 3,816 | py | Python | TwScraper.py | EKOzkan/TwScraper | e5239a67edd30047ced8ff1aef489580b36104bd | [
"MIT"
] | null | null | null | TwScraper.py | EKOzkan/TwScraper | e5239a67edd30047ced8ff1aef489580b36104bd | [
"MIT"
] | null | null | null | TwScraper.py | EKOzkan/TwScraper | e5239a67edd30047ced8ff1aef489580b36104bd | [
"MIT"
] | null | null | null | import threading
import webview
import eel
import twint
from textblob import TextBlob
import datetime
import csv
print('TwScraper started.')
def Analyzer(num):
eel.init('web')
@eel.expose
def start_calculating(tw_hashtags,tw_username,tw_startdate,tw_enddate,tw_sentimental):
printer(tw_hashtags,tw_username,tw_startdate,tw_enddate,tw_sentimental)
def printer(tw_hashtags,tw_username,tw_startdate,tw_enddate,tw_sentimental):
tw_hashtags = tw_hashtags.replace(", ", ",")
tw_hashtags = tw_hashtags.replace(" ,", ",")
tw_hashtags_splitted = tw_hashtags.split(",")
print(tw_hashtags)
#array of keywords
print(tw_hashtags_splitted)
#username
print(tw_username)
#start date
print(tw_startdate)
#end date
print(tw_enddate)
#sentimental analysis
print(tw_sentimental)
#variable q contains twint configs
q = twint.Config()
if tw_username!='None':
q.Username = tw_username
tw_hashtags = tw_hashtags.replace(",", " OR ")
if tw_startdate!='None':
q.Since = str(tw_startdate)
q.Until = str(tw_enddate)
q.Search = tw_hashtags
q.Store_csv = True
q.Lang='en'
#date and time as file name
now = datetime.datetime.now()
file_loc=str('outputs/'+now.strftime("%Y-%m-%d_%H-%M-%S")+'.csv')
q.Output = file_loc
#start twint search/scraping
twint.run.Search(q)
with open(file_loc, newline='', encoding='utf8') as f:
reader = csv.reader(f)
data = list(reader)
data[0].append('Sentimental_Analysis_Subjectivity')
data[0].append('Sentimental_Analysis_Polarity_Value')
data[0].append('Sentimental_Analysis_Polarity_Output')
if str(e)!='False':
print('Scraping finished, now starting to do sentimental analysis.')
else:
print('Scraping finished.')
if str(tw_sentimental)!='False':
i=1
while i<len(data):
tweet = data[i][10]
tweet=tweet.replace('@', '')
tweet=tweet.replace('#', '')
stweet = TextBlob(tweet)
data[i].append(stweet.sentiment.subjectivity)
data[i].append(stweet.sentiment.polarity)
if stweet.sentiment.polarity>0:
data[i].append('Positive')
if stweet.sentiment.polarity==0 :
data[i].append('Neutral')
if stweet.sentiment.polarity<0:
data[i].append('Negative')
i=i+1
file = open(file_loc, 'w+', newline ='', encoding='utf8')
file.truncate(0)
with file:
write = csv.writer(file)
write.writerows(data)
print('Scraping and analysis finished.')
eel.start('index.html', size=(300, 700), mode=None, port=8078) # Start
if __name__ == "__main__":
t1 = threading.Thread(target=Analyzer, args=(10,))
# starting thread 1
t1.start()
# starting thread 2
webview.create_window('TwScrapper', 'http://localhost:8078/index.html', height=900, background_color='#1a1a1a')
webview.start()
# wait until thread 1 is completely executed
t1.join()
# wait until thread 2 is completely executed
# both threads completely executed
print("Done!")
| 33.182609 | 116 | 0.537998 |
96e98573f3a352b6931a5d5177faf2570501dfcc | 3,823 | py | Python | yt_dlp/extractor/videocampus_sachsen.py | jeroenj/yt-dlp | 11078c6d571673a0f09e21933f4ad1e6fcc35456 | [
"Unlicense"
] | 2 | 2022-03-14T15:34:14.000Z | 2022-03-23T17:05:42.000Z | yt_dlp/extractor/videocampus_sachsen.py | jeroenj/yt-dlp | 11078c6d571673a0f09e21933f4ad1e6fcc35456 | [
"Unlicense"
] | null | null | null | yt_dlp/extractor/videocampus_sachsen.py | jeroenj/yt-dlp | 11078c6d571673a0f09e21933f4ad1e6fcc35456 | [
"Unlicense"
] | 2 | 2022-01-05T08:23:25.000Z | 2022-03-06T21:33:37.000Z | # coding: utf-8
from .common import InfoExtractor
class VideocampusSachsenIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://videocampus\.sachsen\.de/(?:
m/(?P<tmp_id>[0-9a-f]+)|
(?:category/)?video/(?P<display_id>[\w-]+)/(?P<id>[0-9a-f]{32})
)'''
_TESTS = [
{
'url': 'https://videocampus.sachsen.de/m/e0d6c8ce6e394c188f1342f1ab7c50ed6fc4490b808699801def5cb2e46d76ca7367f622a9f516c542ffb805b24d6b643bd7c81f385acaac4c59081b87a2767b',
'info_dict': {
'id': 'e6b9349905c1628631f175712250f2a1',
'title': 'Konstruktiver Entwicklungsprozess Vorlesung 7',
'ext': 'mp4',
},
},
{
'url': 'https://videocampus.sachsen.de/video/Was-ist-selbstgesteuertes-Lernen/fc99c527e4205b121cb7c74433469262',
'info_dict': {
'id': 'fc99c527e4205b121cb7c74433469262',
'title': 'Was ist selbstgesteuertes Lernen?',
'display_id': 'Was-ist-selbstgesteuertes-Lernen',
'ext': 'mp4',
},
},
{
'url': 'https://videocampus.sachsen.de/category/video/Tutorial-zur-Nutzung-von-Adobe-Connect-aus-Veranstalter-Sicht/09d4ed029002eb1bdda610f1103dd54c/100',
'info_dict': {
'id': '09d4ed029002eb1bdda610f1103dd54c',
'title': 'Tutorial zur Nutzung von Adobe Connect aus Veranstalter-Sicht',
'display_id': 'Tutorial-zur-Nutzung-von-Adobe-Connect-aus-Veranstalter-Sicht',
'ext': 'mp4',
},
},
]
def _real_extract(self, url):
video_id, tmp_id, display_id = self._match_valid_url(url).group('id', 'tmp_id', 'display_id')
webpage = self._download_webpage(url, video_id or tmp_id, fatal=False) or ''
if not tmp_id:
video_id = self._html_search_regex(
r'src="https?://videocampus\.sachsen\.de/media/embed\?key=([0-9a-f]+)&',
webpage, 'video_id')
title = self._html_search_regex(
(r'<h1>(?P<content>[^<]+)</h1>', *self._meta_regex('title')),
webpage, 'title', group='content', fatal=False)
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
f'https://videocampus.sachsen.de/media/hlsMedium/key/{video_id}/format/auto/ext/mp4/learning/0/path/m3u8',
video_id, 'mp4', 'm3u8_native', m3u8_id='hls')
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'display_id': display_id,
'formats': formats,
'subtitles': subtitles
}
class VideocampusSachsenEmbedIE(InfoExtractor):
_VALID_URL = r'https?://videocampus.sachsen.de/media/embed\?key=(?P<id>[0-9a-f]+)'
_TESTS = [
{
'url': 'https://videocampus.sachsen.de/media/embed?key=fc99c527e4205b121cb7c74433469262',
'info_dict': {
'id': 'fc99c527e4205b121cb7c74433469262',
'title': 'Was ist selbstgesteuertes Lernen?',
'ext': 'mp4',
},
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<img[^>]*title="([^"<]+)"', webpage, 'title', fatal=False)
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
f'https://videocampus.sachsen.de/media/hlsMedium/key/{video_id}/format/auto/ext/mp4/learning/0/path/m3u8',
video_id, 'mp4', 'm3u8_native', m3u8_id='hls')
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'subtitles': subtitles,
}
| 39.412371 | 183 | 0.578865 |
0a4d8624688d18e95273b8e96ac3c408930313c8 | 458 | py | Python | gcode_cflow/config.py | gluap/gcode_cflow | d7acf9aca591d5f1cbe39e85719ff55fdcc40c68 | [
"MIT"
] | null | null | null | gcode_cflow/config.py | gluap/gcode_cflow | d7acf9aca591d5f1cbe39e85719ff55fdcc40c68 | [
"MIT"
] | null | null | null | gcode_cflow/config.py | gluap/gcode_cflow | d7acf9aca591d5f1cbe39e85719ff55fdcc40c68 | [
"MIT"
] | null | null | null | from pathlib import Path
from typing import List
import yaml
from pydantic import BaseModel
class Config(BaseModel):
values_speeds: List[float]
values_extruded: List[float]
def load_config(path: Path):
config = None
with path.open("r") as stream:
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print("Exception reading yaml file, is it malformed?")
return Config(**config)
| 21.809524 | 66 | 0.674672 |
117d04c88f677a42e3c76548757126bf7749925f | 29,859 | py | Python | homeassistant/components/homematic/__init__.py | da-anda/home-assistant | 04de22613c57e98c75315e6323833dc361399b53 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/homematic/__init__.py | da-anda/home-assistant | 04de22613c57e98c75315e6323833dc361399b53 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/homematic/__init__.py | da-anda/home-assistant | 04de22613c57e98c75315e6323833dc361399b53 | [
"Apache-2.0"
] | null | null | null | """
Support for HomeMatic devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/homematic/
"""
import asyncio
from datetime import timedelta
from functools import partial
import logging
import os
import socket
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP, CONF_USERNAME, CONF_PASSWORD, CONF_PLATFORM,
CONF_HOSTS, CONF_HOST, ATTR_ENTITY_ID, STATE_UNKNOWN)
from homeassistant.helpers import discovery
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.loader import bind_hass
REQUIREMENTS = ['pyhomematic==0.1.36']
DOMAIN = 'homematic'
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL_HUB = timedelta(seconds=300)
SCAN_INTERVAL_VARIABLES = timedelta(seconds=30)
DISCOVER_SWITCHES = 'homematic.switch'
DISCOVER_LIGHTS = 'homematic.light'
DISCOVER_SENSORS = 'homematic.sensor'
DISCOVER_BINARY_SENSORS = 'homematic.binary_sensor'
DISCOVER_COVER = 'homematic.cover'
DISCOVER_CLIMATE = 'homematic.climate'
ATTR_DISCOVER_DEVICES = 'devices'
ATTR_PARAM = 'param'
ATTR_CHANNEL = 'channel'
ATTR_NAME = 'name'
ATTR_ADDRESS = 'address'
ATTR_VALUE = 'value'
ATTR_INTERFACE = 'interface'
ATTR_ERRORCODE = 'error'
ATTR_MESSAGE = 'message'
ATTR_MODE = 'mode'
ATTR_TIME = 'time'
EVENT_KEYPRESS = 'homematic.keypress'
EVENT_IMPULSE = 'homematic.impulse'
EVENT_ERROR = 'homematic.error'
SERVICE_VIRTUALKEY = 'virtualkey'
SERVICE_RECONNECT = 'reconnect'
SERVICE_SET_VARIABLE_VALUE = 'set_variable_value'
SERVICE_SET_DEVICE_VALUE = 'set_device_value'
SERVICE_SET_INSTALL_MODE = 'set_install_mode'
HM_DEVICE_TYPES = {
DISCOVER_SWITCHES: [
'Switch', 'SwitchPowermeter', 'IOSwitch', 'IPSwitch', 'RFSiren',
'IPSwitchPowermeter', 'KeyMatic', 'HMWIOSwitch', 'Rain', 'EcoLogic'],
DISCOVER_LIGHTS: ['Dimmer', 'KeyDimmer', 'IPKeyDimmer'],
DISCOVER_SENSORS: [
'SwitchPowermeter', 'Motion', 'MotionV2', 'RemoteMotion', 'MotionIP',
'ThermostatWall', 'AreaThermostat', 'RotaryHandleSensor',
'WaterSensor', 'PowermeterGas', 'LuxSensor', 'WeatherSensor',
'WeatherStation', 'ThermostatWall2', 'TemperatureDiffSensor',
'TemperatureSensor', 'CO2Sensor', 'IPSwitchPowermeter', 'HMWIOSwitch',
'FillingLevel', 'ValveDrive', 'EcoLogic', 'IPThermostatWall',
'IPSmoke', 'RFSiren', 'PresenceIP'],
DISCOVER_CLIMATE: [
'Thermostat', 'ThermostatWall', 'MAXThermostat', 'ThermostatWall2',
'MAXWallThermostat', 'IPThermostat', 'IPThermostatWall',
'ThermostatGroup'],
DISCOVER_BINARY_SENSORS: [
'ShutterContact', 'Smoke', 'SmokeV2', 'Motion', 'MotionV2',
'MotionIP', 'RemoteMotion', 'WeatherSensor', 'TiltSensor',
'IPShutterContact', 'HMWIOSwitch', 'MaxShutterContact', 'Rain',
'WiredSensor', 'PresenceIP'],
DISCOVER_COVER: ['Blind', 'KeyBlind']
}
HM_IGNORE_DISCOVERY_NODE = [
'ACTUAL_TEMPERATURE',
'ACTUAL_HUMIDITY'
]
HM_ATTRIBUTE_SUPPORT = {
'LOWBAT': ['battery', {0: 'High', 1: 'Low'}],
'LOW_BAT': ['battery', {0: 'High', 1: 'Low'}],
'ERROR': ['sabotage', {0: 'No', 1: 'Yes'}],
'RSSI_DEVICE': ['rssi', {}],
'VALVE_STATE': ['valve', {}],
'BATTERY_STATE': ['battery', {}],
'CONTROL_MODE': ['mode', {
0: 'Auto',
1: 'Manual',
2: 'Away',
3: 'Boost',
4: 'Comfort',
5: 'Lowering'
}],
'POWER': ['power', {}],
'CURRENT': ['current', {}],
'VOLTAGE': ['voltage', {}],
'OPERATING_VOLTAGE': ['voltage', {}],
'WORKING': ['working', {0: 'No', 1: 'Yes'}],
}
HM_PRESS_EVENTS = [
'PRESS_SHORT',
'PRESS_LONG',
'PRESS_CONT',
'PRESS_LONG_RELEASE',
'PRESS',
]
HM_IMPULSE_EVENTS = [
'SEQUENCE_OK',
]
CONF_RESOLVENAMES_OPTIONS = [
'metadata',
'json',
'xml',
False
]
DATA_HOMEMATIC = 'homematic'
DATA_STORE = 'homematic_store'
DATA_CONF = 'homematic_conf'
CONF_INTERFACES = 'interfaces'
CONF_LOCAL_IP = 'local_ip'
CONF_LOCAL_PORT = 'local_port'
CONF_PORT = 'port'
CONF_PATH = 'path'
CONF_CALLBACK_IP = 'callback_ip'
CONF_CALLBACK_PORT = 'callback_port'
CONF_RESOLVENAMES = 'resolvenames'
CONF_VARIABLES = 'variables'
CONF_DEVICES = 'devices'
CONF_PRIMARY = 'primary'
DEFAULT_LOCAL_IP = '0.0.0.0'
DEFAULT_LOCAL_PORT = 0
DEFAULT_RESOLVENAMES = False
DEFAULT_PORT = 2001
DEFAULT_PATH = ''
DEFAULT_USERNAME = 'Admin'
DEFAULT_PASSWORD = ''
DEVICE_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'homematic',
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_ADDRESS): cv.string,
vol.Required(ATTR_INTERFACE): cv.string,
vol.Optional(ATTR_CHANNEL, default=1): vol.Coerce(int),
vol.Optional(ATTR_PARAM): cv.string,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_INTERFACES, default={}): {cv.match_all: {
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string,
vol.Optional(CONF_RESOLVENAMES, default=DEFAULT_RESOLVENAMES):
vol.In(CONF_RESOLVENAMES_OPTIONS),
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_CALLBACK_IP): cv.string,
vol.Optional(CONF_CALLBACK_PORT): cv.port,
}},
vol.Optional(CONF_HOSTS, default={}): {cv.match_all: {
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
}},
vol.Optional(CONF_LOCAL_IP, default=DEFAULT_LOCAL_IP): cv.string,
vol.Optional(CONF_LOCAL_PORT, default=DEFAULT_LOCAL_PORT): cv.port,
}),
}, extra=vol.ALLOW_EXTRA)
SCHEMA_SERVICE_VIRTUALKEY = vol.Schema({
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAM): cv.string,
vol.Optional(ATTR_INTERFACE): cv.string,
})
SCHEMA_SERVICE_SET_VARIABLE_VALUE = vol.Schema({
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
SCHEMA_SERVICE_SET_DEVICE_VALUE = vol.Schema({
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAM): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_INTERFACE): cv.string,
})
SCHEMA_SERVICE_RECONNECT = vol.Schema({})
SCHEMA_SERVICE_SET_INSTALL_MODE = vol.Schema({
vol.Required(ATTR_INTERFACE): cv.string,
vol.Optional(ATTR_TIME, default=60): cv.positive_int,
vol.Optional(ATTR_MODE, default=1):
vol.All(vol.Coerce(int), vol.In([1, 2])),
vol.Optional(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
})
@bind_hass
def virtualkey(hass, address, channel, param, interface=None):
"""Send virtual keypress to homematic controlller."""
data = {
ATTR_ADDRESS: address,
ATTR_CHANNEL: channel,
ATTR_PARAM: param,
ATTR_INTERFACE: interface,
}
hass.services.call(DOMAIN, SERVICE_VIRTUALKEY, data)
@bind_hass
def set_variable_value(hass, entity_id, value):
"""Change value of a Homematic system variable."""
data = {
ATTR_ENTITY_ID: entity_id,
ATTR_VALUE: value,
}
hass.services.call(DOMAIN, SERVICE_SET_VARIABLE_VALUE, data)
@bind_hass
def set_device_value(hass, address, channel, param, value, interface=None):
"""Call setValue XML-RPC method of supplied interface."""
data = {
ATTR_ADDRESS: address,
ATTR_CHANNEL: channel,
ATTR_PARAM: param,
ATTR_VALUE: value,
ATTR_INTERFACE: interface,
}
hass.services.call(DOMAIN, SERVICE_SET_DEVICE_VALUE, data)
@bind_hass
def set_install_mode(hass, interface, mode=None, time=None, address=None):
"""Call setInstallMode XML-RPC method of supplied inteface."""
data = {
key: value for key, value in (
(ATTR_INTERFACE, interface),
(ATTR_MODE, mode),
(ATTR_TIME, time),
(ATTR_ADDRESS, address)
) if value
}
hass.services.call(DOMAIN, SERVICE_SET_INSTALL_MODE, data)
@bind_hass
def reconnect(hass):
"""Reconnect to CCU/Homegear."""
hass.services.call(DOMAIN, SERVICE_RECONNECT, {})
def setup(hass, config):
"""Set up the Homematic component."""
from pyhomematic import HMConnection
conf = config[DOMAIN]
hass.data[DATA_CONF] = remotes = {}
hass.data[DATA_STORE] = set()
# Create hosts-dictionary for pyhomematic
for rname, rconfig in conf[CONF_INTERFACES].items():
remotes[rname] = {
'ip': socket.gethostbyname(rconfig.get(CONF_HOST)),
'port': rconfig.get(CONF_PORT),
'path': rconfig.get(CONF_PATH),
'resolvenames': rconfig.get(CONF_RESOLVENAMES),
'username': rconfig.get(CONF_USERNAME),
'password': rconfig.get(CONF_PASSWORD),
'callbackip': rconfig.get(CONF_CALLBACK_IP),
'callbackport': rconfig.get(CONF_CALLBACK_PORT),
'connect': True,
}
for sname, sconfig in conf[CONF_HOSTS].items():
remotes[sname] = {
'ip': socket.gethostbyname(sconfig.get(CONF_HOST)),
'port': DEFAULT_PORT,
'username': sconfig.get(CONF_USERNAME),
'password': sconfig.get(CONF_PASSWORD),
'connect': False,
}
# Create server thread
bound_system_callback = partial(_system_callback_handler, hass, config)
hass.data[DATA_HOMEMATIC] = homematic = HMConnection(
local=config[DOMAIN].get(CONF_LOCAL_IP),
localport=config[DOMAIN].get(CONF_LOCAL_PORT),
remotes=remotes,
systemcallback=bound_system_callback,
interface_id='homeassistant'
)
# Start server thread, connect to hosts, initialize to receive events
homematic.start()
# Stops server when HASS is shutting down
hass.bus.listen_once(
EVENT_HOMEASSISTANT_STOP, hass.data[DATA_HOMEMATIC].stop)
# Init homematic hubs
entity_hubs = []
for hub_name in conf[CONF_HOSTS].keys():
entity_hubs.append(HMHub(hass, homematic, hub_name))
# Register HomeMatic services
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
def _hm_service_virtualkey(service):
"""Service to handle virtualkey servicecalls."""
address = service.data.get(ATTR_ADDRESS)
channel = service.data.get(ATTR_CHANNEL)
param = service.data.get(ATTR_PARAM)
# Device not found
hmdevice = _device_from_servicecall(hass, service)
if hmdevice is None:
_LOGGER.error("%s not found for service virtualkey!", address)
return
# Parameter doesn't exist for device
if param not in hmdevice.ACTIONNODE:
_LOGGER.error("%s not datapoint in hm device %s", param, address)
return
# Channel doesn't exist for device
if channel not in hmdevice.ACTIONNODE[param]:
_LOGGER.error("%i is not a channel in hm device %s",
channel, address)
return
# Call parameter
hmdevice.actionNodeData(param, True, channel)
hass.services.register(
DOMAIN, SERVICE_VIRTUALKEY, _hm_service_virtualkey,
descriptions[SERVICE_VIRTUALKEY], schema=SCHEMA_SERVICE_VIRTUALKEY)
def _service_handle_value(service):
"""Service to call setValue method for HomeMatic system variable."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
name = service.data[ATTR_NAME]
value = service.data[ATTR_VALUE]
if entity_ids:
entities = [entity for entity in entity_hubs if
entity.entity_id in entity_ids]
else:
entities = entity_hubs
if not entities:
_LOGGER.error("No HomeMatic hubs available")
return
for hub in entities:
hub.hm_set_variable(name, value)
hass.services.register(
DOMAIN, SERVICE_SET_VARIABLE_VALUE, _service_handle_value,
descriptions[SERVICE_SET_VARIABLE_VALUE],
schema=SCHEMA_SERVICE_SET_VARIABLE_VALUE)
def _service_handle_reconnect(service):
"""Service to reconnect all HomeMatic hubs."""
homematic.reconnect()
hass.services.register(
DOMAIN, SERVICE_RECONNECT, _service_handle_reconnect,
descriptions[SERVICE_RECONNECT], schema=SCHEMA_SERVICE_RECONNECT)
def _service_handle_device(service):
"""Service to call setValue method for HomeMatic devices."""
address = service.data.get(ATTR_ADDRESS)
channel = service.data.get(ATTR_CHANNEL)
param = service.data.get(ATTR_PARAM)
value = service.data.get(ATTR_VALUE)
# Device not found
hmdevice = _device_from_servicecall(hass, service)
if hmdevice is None:
_LOGGER.error("%s not found!", address)
return
hmdevice.setValue(param, value, channel)
hass.services.register(
DOMAIN, SERVICE_SET_DEVICE_VALUE, _service_handle_device,
descriptions[SERVICE_SET_DEVICE_VALUE],
schema=SCHEMA_SERVICE_SET_DEVICE_VALUE)
def _service_handle_install_mode(service):
"""Service to set interface into install mode."""
interface = service.data.get(ATTR_INTERFACE)
mode = service.data.get(ATTR_MODE)
time = service.data.get(ATTR_TIME)
address = service.data.get(ATTR_ADDRESS)
homematic.setInstallMode(interface, t=time, mode=mode, address=address)
hass.services.register(
DOMAIN, SERVICE_SET_INSTALL_MODE, _service_handle_install_mode,
descriptions[SERVICE_SET_INSTALL_MODE],
schema=SCHEMA_SERVICE_SET_INSTALL_MODE)
return True
def _system_callback_handler(hass, config, src, *args):
"""System callback handler."""
# New devices available at hub
if src == 'newDevices':
(interface_id, dev_descriptions) = args
interface = interface_id.split('-')[-1]
# Device support active?
if not hass.data[DATA_CONF][interface]['connect']:
return
addresses = []
for dev in dev_descriptions:
address = dev['ADDRESS'].split(':')[0]
if address not in hass.data[DATA_STORE]:
hass.data[DATA_STORE].add(address)
addresses.append(address)
# Register EVENTS
# Search all devices with an EVENTNODE that includes data
bound_event_callback = partial(_hm_event_handler, hass, interface)
for dev in addresses:
hmdevice = hass.data[DATA_HOMEMATIC].devices[interface].get(dev)
if hmdevice.EVENTNODE:
hmdevice.setEventCallback(
callback=bound_event_callback, bequeath=True)
# Create HASS entities
if addresses:
for component_name, discovery_type in (
('switch', DISCOVER_SWITCHES),
('light', DISCOVER_LIGHTS),
('cover', DISCOVER_COVER),
('binary_sensor', DISCOVER_BINARY_SENSORS),
('sensor', DISCOVER_SENSORS),
('climate', DISCOVER_CLIMATE)):
# Get all devices of a specific type
found_devices = _get_devices(
hass, discovery_type, addresses, interface)
# When devices of this type are found
# they are setup in HASS and an discovery event is fired
if found_devices:
discovery.load_platform(hass, component_name, DOMAIN, {
ATTR_DISCOVER_DEVICES: found_devices
}, config)
# Homegear error message
elif src == 'error':
_LOGGER.error("Error: %s", args)
(interface_id, errorcode, message) = args
hass.bus.fire(EVENT_ERROR, {
ATTR_ERRORCODE: errorcode,
ATTR_MESSAGE: message
})
def _get_devices(hass, discovery_type, keys, interface):
"""Get the HomeMatic devices for given discovery_type."""
device_arr = []
for key in keys:
device = hass.data[DATA_HOMEMATIC].devices[interface][key]
class_name = device.__class__.__name__
metadata = {}
# Class not supported by discovery type
if class_name not in HM_DEVICE_TYPES[discovery_type]:
continue
# Load metadata needed to generate a parameter list
if discovery_type == DISCOVER_SENSORS:
metadata.update(device.SENSORNODE)
elif discovery_type == DISCOVER_BINARY_SENSORS:
metadata.update(device.BINARYNODE)
else:
metadata.update({None: device.ELEMENT})
# Generate options for 1...n elements with 1...n parameters
for param, channels in metadata.items():
if param in HM_IGNORE_DISCOVERY_NODE:
continue
# Add devices
_LOGGER.debug("%s: Handling %s: %s: %s",
discovery_type, key, param, channels)
for channel in channels:
name = _create_ha_name(
name=device.NAME, channel=channel, param=param,
count=len(channels)
)
device_dict = {
CONF_PLATFORM: "homematic",
ATTR_ADDRESS: key,
ATTR_INTERFACE: interface,
ATTR_NAME: name,
ATTR_CHANNEL: channel
}
if param is not None:
device_dict[ATTR_PARAM] = param
# Add new device
try:
DEVICE_SCHEMA(device_dict)
device_arr.append(device_dict)
except vol.MultipleInvalid as err:
_LOGGER.error("Invalid device config: %s",
str(err))
return device_arr
def _create_ha_name(name, channel, param, count):
"""Generate a unique entity id."""
# HMDevice is a simple device
if count == 1 and param is None:
return name
# Has multiple elements/channels
if count > 1 and param is None:
return "{} {}".format(name, channel)
# With multiple parameters on first channel
if count == 1 and param is not None:
return "{} {}".format(name, param)
# Multiple parameters with multiple channels
if count > 1 and param is not None:
return "{} {} {}".format(name, channel, param)
def _hm_event_handler(hass, interface, device, caller, attribute, value):
"""Handle all pyhomematic device events."""
try:
channel = int(device.split(":")[1])
address = device.split(":")[0]
hmdevice = hass.data[DATA_HOMEMATIC].devices[interface].get(address)
except (TypeError, ValueError):
_LOGGER.error("Event handling channel convert error!")
return
# Return if not an event supported by device
if attribute not in hmdevice.EVENTNODE:
return
_LOGGER.debug("Event %s for %s channel %i", attribute,
hmdevice.NAME, channel)
# Keypress event
if attribute in HM_PRESS_EVENTS:
hass.bus.fire(EVENT_KEYPRESS, {
ATTR_NAME: hmdevice.NAME,
ATTR_PARAM: attribute,
ATTR_CHANNEL: channel
})
return
# Impulse event
if attribute in HM_IMPULSE_EVENTS:
hass.bus.fire(EVENT_IMPULSE, {
ATTR_NAME: hmdevice.NAME,
ATTR_CHANNEL: channel
})
return
_LOGGER.warning("Event is unknown and not forwarded")
def _device_from_servicecall(hass, service):
"""Extract HomeMatic device from service call."""
address = service.data.get(ATTR_ADDRESS)
interface = service.data.get(ATTR_INTERFACE)
if address == 'BIDCOS-RF':
address = 'BidCoS-RF'
if interface:
return hass.data[DATA_HOMEMATIC].devices[interface].get(address)
for devices in hass.data[DATA_HOMEMATIC].devices.values():
if address in devices:
return devices[address]
class HMHub(Entity):
"""The HomeMatic hub. (CCU2/HomeGear)."""
def __init__(self, hass, homematic, name):
"""Initialize HomeMatic hub."""
self.hass = hass
self.entity_id = "{}.{}".format(DOMAIN, name.lower())
self._homematic = homematic
self._variables = {}
self._name = name
self._state = None
# Load data
self.hass.helpers.event.track_time_interval(
self._update_hub, SCAN_INTERVAL_HUB)
self.hass.add_job(self._update_hub, None)
self.hass.helpers.event.track_time_interval(
self._update_variables, SCAN_INTERVAL_VARIABLES)
self.hass.add_job(self._update_variables, None)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""Return false. HomeMatic Hub object updates variables."""
return False
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def state_attributes(self):
"""Return the state attributes."""
attr = self._variables.copy()
return attr
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return "mdi:gradient"
def _update_hub(self, now):
"""Retrieve latest state."""
service_message = self._homematic.getServiceMessages(self._name)
state = None if service_message is None else len(service_message)
# state have change?
if self._state != state:
self._state = state
self.schedule_update_ha_state()
def _update_variables(self, now):
"""Retrive all variable data and update hmvariable states."""
variables = self._homematic.getAllSystemVariables(self._name)
if variables is None:
return
state_change = False
for key, value in variables.items():
if key in self._variables and value == self._variables[key]:
continue
state_change = True
self._variables.update({key: value})
if state_change:
self.schedule_update_ha_state()
def hm_set_variable(self, name, value):
"""Set variable value on CCU/Homegear."""
if name not in self._variables:
_LOGGER.error("Variable %s not found on %s", name, self.name)
return
old_value = self._variables.get(name)
if isinstance(old_value, bool):
value = cv.boolean(value)
else:
value = float(value)
self._homematic.setSystemVariable(self.name, name, value)
self._variables.update({name: value})
self.schedule_update_ha_state()
class HMDevice(Entity):
"""The HomeMatic device base object."""
def __init__(self, config):
"""Initialize a generic HomeMatic device."""
self._name = config.get(ATTR_NAME)
self._address = config.get(ATTR_ADDRESS)
self._interface = config.get(ATTR_INTERFACE)
self._channel = config.get(ATTR_CHANNEL)
self._state = config.get(ATTR_PARAM)
self._data = {}
self._homematic = None
self._hmdevice = None
self._connected = False
self._available = False
# Set parameter to uppercase
if self._state:
self._state = self._state.upper()
@asyncio.coroutine
def async_added_to_hass(self):
"""Load data init callbacks."""
yield from self.hass.async_add_job(self.link_homematic)
@property
def should_poll(self):
"""Return false. HomeMatic states are pushed by the XML-RPC Server."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def available(self):
"""Return true if device is available."""
return self._available
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
attr = {}
# Generate a dictionary with attributes
for node, data in HM_ATTRIBUTE_SUPPORT.items():
# Is an attribute and exists for this object
if node in self._data:
value = data[1].get(self._data[node], self._data[node])
attr[data[0]] = value
# Static attributes
attr['id'] = self._hmdevice.ADDRESS
attr['interface'] = self._interface
return attr
def link_homematic(self):
"""Connect to HomeMatic."""
if self._connected:
return True
# Initialize
self._homematic = self.hass.data[DATA_HOMEMATIC]
self._hmdevice = \
self._homematic.devices[self._interface][self._address]
self._connected = True
try:
# Initialize datapoints of this object
self._init_data()
self._load_data_from_hm()
# Link events from pyhomematic
self._subscribe_homematic_events()
self._available = not self._hmdevice.UNREACH
# pylint: disable=broad-except
except Exception as err:
self._connected = False
_LOGGER.error("Exception while linking %s: %s",
self._address, str(err))
def _hm_event_callback(self, device, caller, attribute, value):
"""Handle all pyhomematic device events."""
_LOGGER.debug("%s received event '%s' value: %s", self._name,
attribute, value)
has_changed = False
# Is data needed for this instance?
if attribute in self._data:
# Did data change?
if self._data[attribute] != value:
self._data[attribute] = value
has_changed = True
# Availability has changed
if attribute == 'UNREACH':
self._available = bool(value)
has_changed = True
elif not self.available:
self._available = False
has_changed = True
# If it has changed data point, update HASS
if has_changed:
self.schedule_update_ha_state()
def _subscribe_homematic_events(self):
"""Subscribe all required events to handle job."""
channels_to_sub = set()
channels_to_sub.add(0) # Add channel 0 for UNREACH
# Push data to channels_to_sub from hmdevice metadata
for metadata in (self._hmdevice.SENSORNODE, self._hmdevice.BINARYNODE,
self._hmdevice.ATTRIBUTENODE,
self._hmdevice.WRITENODE, self._hmdevice.EVENTNODE,
self._hmdevice.ACTIONNODE):
for node, channels in metadata.items():
# Data is needed for this instance
if node in self._data:
# chan is current channel
if len(channels) == 1:
channel = channels[0]
else:
channel = self._channel
# Prepare for subscription
try:
channels_to_sub.add(int(channel))
except (ValueError, TypeError):
_LOGGER.error("Invalid channel in metadata from %s",
self._name)
# Set callbacks
for channel in channels_to_sub:
_LOGGER.debug(
"Subscribe channel %d from %s", channel, self._name)
self._hmdevice.setEventCallback(
callback=self._hm_event_callback, bequeath=False,
channel=channel)
def _load_data_from_hm(self):
"""Load first value from pyhomematic."""
if not self._connected:
return False
# Read data from pyhomematic
for metadata, funct in (
(self._hmdevice.ATTRIBUTENODE,
self._hmdevice.getAttributeData),
(self._hmdevice.WRITENODE, self._hmdevice.getWriteData),
(self._hmdevice.SENSORNODE, self._hmdevice.getSensorData),
(self._hmdevice.BINARYNODE, self._hmdevice.getBinaryData)):
for node in metadata:
if metadata[node] and node in self._data:
self._data[node] = funct(name=node, channel=self._channel)
return True
def _hm_set_state(self, value):
"""Set data to main datapoint."""
if self._state in self._data:
self._data[self._state] = value
def _hm_get_state(self):
"""Get data from main datapoint."""
if self._state in self._data:
return self._data[self._state]
return None
def _init_data(self):
"""Generate a data dict (self._data) from the HomeMatic metadata."""
# Add all attributes to data dictionary
for data_note in self._hmdevice.ATTRIBUTENODE:
self._data.update({data_note: STATE_UNKNOWN})
# Initialize device specific data
self._init_data_struct()
def _init_data_struct(self):
"""Generate a data dictionary from the HomeMatic device metadata."""
raise NotImplementedError
| 33.474215 | 79 | 0.627851 |
8558f18ec58068de9f04bde71b1d2081307c1751 | 1,041 | py | Python | db_manager.py | netarachelhershko/crawler | 22a5b41a768fae7415ad30cc6aec97063f2d07ce | [
"MIT"
] | null | null | null | db_manager.py | netarachelhershko/crawler | 22a5b41a768fae7415ad30cc6aec97063f2d07ce | [
"MIT"
] | null | null | null | db_manager.py | netarachelhershko/crawler | 22a5b41a768fae7415ad30cc6aec97063f2d07ce | [
"MIT"
] | null | null | null | from schemas.url import Url, Base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
class DBManager(object):
""" Managed the db session """
def __init__(self, db_file):
"""
:param db_file: Database file to open
"""
self.engine = create_engine('sqlite:///{db}'.format(db=db_file))
self.conn = self.engine.connect()
self.session = sessionmaker(bind=self.engine)()
self._create_tables()
def update(self, url_objects):
"""
Updates the database and commits results
:param url_objects: List of Url objects (See schemas/url.py)
"""
try:
for url_obj in url_objects:
url_obj = Url(url_obj['url'], url_obj['crawl_time'])
self.session.add(url_obj)
self.session.commit()
except:
pass
def get_crawled_urls(self):
return self.session.query(Url).all()
def _create_tables(self):
Base.metadata.create_all(self.engine)
| 29.742857 | 72 | 0.608069 |
5f5d85a298e9d70ff56c323525421b27ed4cbddd | 10,141 | py | Python | pineboolib/application/acls/pnaccesscontrollists.py | juanjosepablos/pineboo | f6ce515aec6e0139821bb9c1d62536d9fb50dae4 | [
"MIT"
] | null | null | null | pineboolib/application/acls/pnaccesscontrollists.py | juanjosepablos/pineboo | f6ce515aec6e0139821bb9c1d62536d9fb50dae4 | [
"MIT"
] | 1 | 2017-10-30T22:00:48.000Z | 2017-11-11T19:34:32.000Z | pineboolib/application/acls/pnaccesscontrollists.py | juanjosepablos/pineboo | f6ce515aec6e0139821bb9c1d62536d9fb50dae4 | [
"MIT"
] | 1 | 2017-10-30T20:16:38.000Z | 2017-10-30T20:16:38.000Z | # -*- coding: utf-8 -*-
"""
PNAccessControlList Module.
Manage access lists to limit the application to users..
"""
from PyQt5 import QtCore, QtXml, QtWidgets
from pineboolib import application
from pineboolib.application.database import pnsqlquery
from . import pnaccesscontrolfactory
from pineboolib import logging
from typing import Dict, Optional, Union, TYPE_CHECKING
if TYPE_CHECKING:
from . import pnaccesscontrol # noqa : F401
from pineboolib.application.metadata import pntablemetadata # noqa : F401
LOGGER = logging.getLogger(__name__)
class PNAccessControlLists(object):
"""PNAccessControlList Class."""
"""
Nombre que identifica la lista de control de acceso actualmente establecida.
Generalmente corresponderá con el identificador del registro de la tabla "flacls" que se utilizó para crear "acl.xml".
"""
_name: str
"""
Diccionario (lista) que mantiene los objetos de las reglas de control de acceso establecidas.
La clave que identifica a cada objeto está formada por el siguiente literal:
\\code
PNAccessControl::type + "::" + PNAccessControl::name + "::" + PNAccessControl::user
\\endcode
"""
_access_control_list: Dict[str, "pnaccesscontrol.PNAccessControl"]
def __init__(self) -> None:
"""Initialize the class."""
self._name = ""
self._access_control_list: Dict[str, "pnaccesscontrolfactory.PNAccessControlFactory"] = {}
def __del__(self) -> None:
"""Process when destroying the class."""
if self._access_control_list:
self._access_control_list.clear()
del self._access_control_list
def name(self) -> Optional[str]:
"""
Return the name that identifies the currently established access control list.
@return Name the current access control list.
"""
return self._name
def init(self, _acl_xml: str = None) -> None:
"""
Read the file "acl.xml" and establish a new access control list.
If the file "acl.xml" cannot be read, the access control list is empty and
no access control will be processed on any object.
@param _acl_xml XML content with the definition of the access control list.
"""
if _acl_xml is None:
if application.PROJECT.conn_manager is None:
raise Exception("Project is not connected yet")
_acl_xml = application.PROJECT.conn_manager.managerModules().content("acl.xml")
doc = QtXml.QDomDocument("ACL")
if self._access_control_list:
self._access_control_list.clear()
if _acl_xml and not doc.setContent(_acl_xml):
LOGGER.warning(QtCore.QObject().tr("Lista de control de acceso errónea"))
return
self._access_control_list = {}
# self._access_control_list.setAutoDelete(True)
doc_elem = doc.documentElement()
node = doc_elem.firstChild()
if node.isNull():
return
while not node.isNull():
element = node.toElement()
if element:
if element.tagName() == "name":
self._name = element.text()
node = node.nextSibling()
continue
rule = pnaccesscontrolfactory.PNAccessControlFactory().create(element.tagName())
if rule:
rule.set(element)
self._access_control_list[
"%s::%s::%s" % (rule.type(), rule.name(), rule.user())
] = rule
node = node.nextSibling()
continue
node = node.nextSibling()
LOGGER.warning(QtCore.QObject().tr("Lista de control de acceso cargada"))
def process(
self, obj: Optional[Union[QtWidgets.QWidget, "pntablemetadata.PNTableMetaData"]] = None
) -> None:
"""
Process a high-level object according to the established access control list.
@param obj High-level object to which access control is applied. It must be or inherit from the QObject class.
"""
if obj is None or not self._access_control_list:
return
if not self._access_control_list:
return
type_: str = pnaccesscontrolfactory.PNAccessControlFactory().type(obj)
if hasattr(obj, "name"):
name_ = obj.name() # type: ignore[union-attr] # noqa: F821
else:
name_ = obj.objectName() # type: ignore[union-attr] # noqa: F821
if application.PROJECT.conn_manager is None:
raise Exception("Project is not connected yet")
user_ = application.PROJECT.conn_manager.mainConn().user()
if "" in (type_, name_, user_):
return
key = "%s::%s::%s" % (type_, name_, user_)
if key in self._access_control_list.keys():
self._access_control_list[key].processObject(obj)
def install_acl(self, idacl: str) -> None:
"""
Create a new file "acl.xml" and store it replacing the previous one, if it exists.
@param idacl Record identifier of the "flacls" table to use to create "acl.xml".
"""
doc = QtXml.QDomDocument("ACL")
root = doc.createElement("ACL")
doc.appendChild(root)
name = doc.createElement("name")
root.appendChild(name)
text_node = doc.createTextNode(idacl)
name.appendChild(text_node)
qry = pnsqlquery.PNSqlQuery()
qry.setTablesList("flacs")
qry.setSelect("idac,tipo,nombre,iduser,idgroup,degrupo,permiso")
qry.setFrom("flacs")
qry.setWhere("idacl='%s'" % idacl)
qry.setOrderBy("prioridad DESC, tipo")
qry.setForwardOnly(True)
if qry.exec_():
# step = 0
# progress = util.ProgressDialog(util.tr("Instalando control de acceso..."), None, q.size(), None, None, True)
# progress.setCaption(util.tr("Instalando ACL"))
# progress.setMinimumDuration(0)
# progress.setProgress(++step)
while qry.next():
self.make_rule(qry, doc)
# progress.setProgress(++step)
from pineboolib import application
if application.PROJECT.conn_manager is None:
raise Exception("Project is not connected yet")
application.PROJECT.conn_manager.managerModules().setContent(
"acl.xml", "sys", doc.toString()
)
def make_rule(self, qry: pnsqlquery.PNSqlQuery, dom_document: QtXml.QDomDocument) -> None:
"""
Create the corresponding DOM node (s) to a record in the "flacs" table.
Use PNAccessControlLists :: makeRuleUser or PNAccessControlLists :: makeRuleGroup depending on whether the registry
to which the query points indicates that the rule is for a user or a group. If the record indicates a
user will create a user rule, if you indicate a group a user rule will be created for each of
Group member users.
@param q Query about the "flacs" table positioned in the register to be used to construct the rule (s).
@param d DOM / XML document in which you will insert the node (s) that describe the access control rule (s).
"""
if not qry or not dom_document:
return
if qry.value(5):
self.make_rule_group(qry, dom_document, str(qry.value(4)))
else:
self.make_rule_user(qry, dom_document, str(qry.value(3)))
def make_rule_user(
self, qry: pnsqlquery.PNSqlQuery, dom_document: QtXml.QDomDocument, iduser: str
) -> None:
"""
Create a DOM node corresponding to a record in the "flacs" table and for a given user.
@param q Query about the "flacs" table positioned in the register to be used to construct the rule.
@param d DOM / XML document in which you will insert the node that describes the access control rule.
@param iduser Identifier of the user used in the access control rule.
"""
if not iduser or not qry or not dom_document:
return
rule = pnaccesscontrolfactory.PNAccessControlFactory().create(str(qry.value(1)))
if rule:
rule.setName(str(qry.value(2)))
rule.setUser(iduser)
rule.setPerm(str(qry.value(6)))
qry_acos = pnsqlquery.PNSqlQuery()
qry_acos.setTablesList("flacos")
qry_acos.setSelect("nombre,permiso")
qry_acos.setFrom("flacos")
qry_acos.setWhere("idac ='%s'" % qry.value(0))
qry_acos.setForwardOnly(True)
acos = []
if qry_acos.exec_():
while qry_acos.next():
acos.append(str(qry_acos.value(0)))
acos.append((qry_acos.value(1)))
rule.setAcos(acos)
rule.get(dom_document)
def make_rule_group(
self, qry: pnsqlquery.PNSqlQuery, dom_document: QtXml.QDomDocument, idgroup: str = ""
) -> None:
"""
Create several DOM nodes corresponding to a record in the "flacs" table and for a specific user group.
The function of this method is to create a rule for each of the group member users, using
PNAccessControlLists :: makeRuleUser.
@param q Query about the "flacs" table positioned in the register to use to build the rules.
@param d DOM / XML document in which the nodes that describe the access control rules will be inserted.
@param idgroup Identifier of the user group.
"""
if idgroup == "" or not qry or not dom_document:
return
qry_users = pnsqlquery.PNSqlQuery()
qry_users.setTablesList("flusers")
qry_users.setSelect("iduser")
qry_users.setFrom("flusers")
qry_users.setWhere("idgroup='%s'" % idgroup)
qry_users.setForwardOnly(True)
if qry_users.exec_():
while qry_users.next():
self.make_rule_user(qry, dom_document, str(qry_users.value(0)))
| 35.707746 | 123 | 0.621241 |
0639333ab87b70f011e7665281e39c4a01da778c | 9,737 | py | Python | lakefs/client.py | treeverse/lakeFS-hooks | fd24373eff7085bd941543e595e1024cb398304a | [
"Apache-2.0"
] | 6 | 2021-03-02T15:13:31.000Z | 2021-04-13T10:19:12.000Z | lakefs/client.py | treeverse/lakeFS-hooks | fd24373eff7085bd941543e595e1024cb398304a | [
"Apache-2.0"
] | 1 | 2021-04-05T13:44:03.000Z | 2021-04-05T13:44:03.000Z | lakefs/client.py | treeverse/lakeFS-hooks | fd24373eff7085bd941543e595e1024cb398304a | [
"Apache-2.0"
] | 1 | 2021-12-04T20:22:37.000Z | 2021-12-04T20:22:37.000Z | import datetime
from collections import namedtuple
from typing import Iterator, Union, Tuple
import lakefs_client
from lakefs_client.client import LakeFSClient
from lakefs_client.exceptions import NotFoundException
from pyarrow import NativeFile, BufferReader
from pyarrow.fs import PyFileSystem, FileInfo, FileType, FileSystemHandler, FileSelector
from lakefs.path import DEFAULT_PATH_SEPARATOR
PREFETCH_CURSOR_SIZE = 1000
class Client:
"""
Client is a lakeFS OpenAPI client, generated dynamically using Bravado.
To instantiate a new client, it must have access to a running lakeFS server.
Example usage:
>>> import lakefs
>>> import pyarrow.parquet as pq
>>> client = lakefs.Client('http://localhost:8000', '<lakeFS access key ID>', '<lakeFS secret key>')
>>> # Explore a diff between two branches
>>> # Get a PyArrow compatible, read-only filesystem on top of lakeFS
>>> fs = get_filesystem(client, 'my-repo-name', 'experiment-branch')
>>> for change in client.diff('my-repo-name', 'experiment-branch', 'main', prefix='collections/production/'):
>>> if change.type == 'added':
>>> schema = pq.read_schema(fs.open_input_file(change.path))
>>> for field in schema:
>>> pass # Do something with the schema!
"""
def __init__(self, base_url: str, access_key: str, secret_key: str):
configuration = lakefs_client.Configuration(host=base_url, username=access_key, password=secret_key)
self._client = LakeFSClient(configuration)
def get_last_commit(self, repository: str, branch: str) -> str:
response = self._client.branches.get_branch(repository=repository, branch=branch)
return response.commit_id
def diff_branch(self, repository: str, branch: str, prefix: str = '',
prefetch_amount: int = PREFETCH_CURSOR_SIZE,
max_amount: int = None) -> Iterator[namedtuple]:
after = prefix
amount = 0
if max_amount is not None:
prefetch_amount = min(prefetch_amount, max_amount)
while True:
response = self._client.branches.diff_branch(
repository=repository,
branch=branch,
after=after,
amount=prefetch_amount)
for change in response.results:
if not change.path.startswith(prefix):
return # we're done since path > prefix
yield change
amount += 1
if max_amount is not None and amount >= max_amount:
return
if not response.pagination.has_more:
return # no more things.
after = response.pagination.next_offset
def diff(self, repository: str, from_ref: str, to_ref: str, prefix: str = '',
prefetch_amount: int = PREFETCH_CURSOR_SIZE) -> Iterator[namedtuple]:
after = prefix
while True:
response = self._client.refs.diff_refs(
repository=repository,
left_ref=from_ref,
right_ref=to_ref,
after=after,
amount=prefetch_amount)
for change in response.results:
if not change.path.startswith(prefix):
return # we're done since path > prefix
yield change
if not response.pagination.has_more:
return # no more things.
after = response.pagination.next_offset
def list(self, repository: str, ref: str, path: str, delimiter: str = DEFAULT_PATH_SEPARATOR,
max_amount: int = None):
after = ''
amount = 0
while True:
response = self._client.objects.list_objects(
repository=repository,
ref=ref,
prefix=path,
after=after,
delimiter=delimiter,
amount=PREFETCH_CURSOR_SIZE)
for result in response.results:
yield result
amount += 1
if max_amount is not None and amount >= max_amount:
return
if not response.pagination.has_more:
return # no more things.
after = response.pagination.next_offset
def get_object(self, repository: str, ref: str, path: str):
return self._client.objects.get_object(
repository=repository,
ref=ref,
path=path)
def stat_object(self, repository: str, ref: str, path: str):
return self._client.objects.stat_object(
repository=repository,
ref=ref,
path=path)
def get_filesystem(client: Client, repository: str, ref: str) -> PyFileSystem:
return pyarrow_fs(client=client, repository=repository, ref=ref)
LAKEFS_TYPE_NAME = 'lakefs'
def pyarrow_fs(client: Client, repository: str, ref: str):
"""
A wrapper that returns a pyarrow.fs.PyFileSystem from the LakeFSFileSystem implementation.
"""
return PyFileSystem(LakeFSFileSystem(client, repository, ref))
def get_file_info(path: str, file_type: FileType, size_bytes: int = 0, mtime_ts: int = 0) -> FileInfo:
"""
Generate a pyarrow.FileInfo object for the given path metadata.
Used to convert lakeFS statObject/listObjects responses to pyArrow's format
"""
return FileInfo(
path=path,
type=file_type,
size=size_bytes,
mtime=datetime.datetime.fromtimestamp(mtime_ts),
)
class LakeFSFileSystem(FileSystemHandler):
"""
A naive read-only implementation of a PyArrow FileSystem.
Just enough here to be able to read a ParquetFile and a ParquetDataSet:
Be warned: the current implementation is naive and will read entire objects into memory.
Examples:
>>> import lakefs
>>> import pyarrow.parquet as pq
>>>
>>> client = lakefs.Client('http://localhost:8000', '<lakeFS access key ID>', '<lakeFS secret key>')
>>> fs = lakefs.get_filesystem(client, 'my-repo-name', 'my-branch')
>>>
>>> # Do some schema validation
>>> schema = pq.read_schema(fs.open_input_file('some_file.parquet'))
>>> for field in schema:
>>> if field.name.startswith('user_'):
>>> raise ValueError('user identifying columns are not allowed!')
>>>
>>> # read a dataset and explore the data
>>> dataset = pq.ParquetDataset('collections/events/', filesystem=client.filesystem('my-repo-name', 'my-branch'))
>>> table = dataset.read_pandas()
>>> assert len(table) > 50000
"""
def __init__(self, client: Client, repository: str, ref: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self._client = client
self.repository = repository
self.ref = ref
def copy_file(self, src: str, dst: str):
pass
def create_dir(self, path: str, recursive: bool = True):
pass
def delete_dir(self, path: str):
pass
def delete_dir_contents(self, path: str, accept_root_dir: bool = False):
pass
def delete_file(self, path: str):
pass
def get_file_info(self, paths_or_selector):
if isinstance(paths_or_selector, str):
return self._get_file_info(paths_or_selector)
return [self._get_file_info(p) for p in paths_or_selector]
def normalize_path(self, path):
return path
def move(self, src: str, dst: str):
pass
def open_append_stream(self, path: str, compression: str = 'detect', buffer_size: int = None):
pass
def open_input_file(self, source: str, compression: str = 'detect', buffer_size: int = None) -> NativeFile:
obj = self._client.get_object(self.repository, self.ref, source)
return BufferReader(obj.read())
def open_input_stream(self, source: str, compression: str = 'detect', buffer_size: int = None):
pass
def open_output_stream(self, path: str, compression: str = 'detect', buffer_size: int = None):
pass
def delete_root_dir_contents(self, path: str, accept_root_dir: bool = False):
pass
def get_file_info_selector(self, selector: Union[FileSelector, str, Tuple[str]]):
delimiter = DEFAULT_PATH_SEPARATOR
path = selector
if isinstance(selector, FileSelector):
path = selector.base_dir
if selector.recursive:
delimiter = ''
entries = list(self._list_entries(path, delimiter))
return entries
def get_type_name(self, *args, **kwargs):
return LAKEFS_TYPE_NAME
def _get_file_info(self, path) -> FileInfo:
if path.endswith(DEFAULT_PATH_SEPARATOR):
# Check it exists
if next(self._list_entries(path, max_amount=1), None):
# this doesn't exist!
return get_file_info(path, FileType.NotFound)
return get_file_info(path, FileType.Directory)
# get file
try:
stat = self._client.stat_object(repository=self.repository, ref=self.ref, path=path)
except NotFoundException:
return get_file_info(path, FileType.NotFound) # this doesn't exist!
return get_file_info(path, FileType.File, stat.size_bytes, stat.mtime)
def _list_entries(self, path: str, delimiter: str = DEFAULT_PATH_SEPARATOR, max_amount: int = None):
for result in self._client.list(self.repository, self.ref, path, delimiter, max_amount):
if result.path_type == 'object':
yield get_file_info(result.path, FileType.File, result.size_bytes, result.mtime)
else:
yield get_file_info(result.path, FileType.Directory)
| 38.334646 | 117 | 0.630584 |
d21df65762eda85ca481063fa1e8b1c667c4b45a | 7,664 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/_compute_management_client.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 1 | 2021-06-02T08:01:35.000Z | 2021-06-02T08:01:35.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/_compute_management_client.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/_compute_management_client.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from ._configuration import ComputeManagementClientConfiguration
from .operations import AvailabilitySetsOperations
from .operations import VirtualMachineExtensionImagesOperations
from .operations import VirtualMachineExtensionsOperations
from .operations import VirtualMachinesOperations
from .operations import VirtualMachineImagesOperations
from .operations import UsageOperations
from .operations import VirtualMachineSizesOperations
from .operations import ImagesOperations
from .operations import ResourceSkusOperations
from .operations import VirtualMachineScaleSetsOperations
from .operations import VirtualMachineScaleSetExtensionsOperations
from .operations import VirtualMachineScaleSetRollingUpgradesOperations
from .operations import VirtualMachineScaleSetVMsOperations
from .operations import DisksOperations
from .operations import SnapshotsOperations
from .operations import VirtualMachineRunCommandsOperations
from . import models
class ComputeManagementClient(SDKClient):
"""Compute Client
:ivar config: Configuration for client.
:vartype config: ComputeManagementClientConfiguration
:ivar availability_sets: AvailabilitySets operations
:vartype availability_sets: azure.mgmt.compute.v2017_03_30.operations.AvailabilitySetsOperations
:ivar virtual_machine_extension_images: VirtualMachineExtensionImages operations
:vartype virtual_machine_extension_images: azure.mgmt.compute.v2017_03_30.operations.VirtualMachineExtensionImagesOperations
:ivar virtual_machine_extensions: VirtualMachineExtensions operations
:vartype virtual_machine_extensions: azure.mgmt.compute.v2017_03_30.operations.VirtualMachineExtensionsOperations
:ivar virtual_machines: VirtualMachines operations
:vartype virtual_machines: azure.mgmt.compute.v2017_03_30.operations.VirtualMachinesOperations
:ivar virtual_machine_images: VirtualMachineImages operations
:vartype virtual_machine_images: azure.mgmt.compute.v2017_03_30.operations.VirtualMachineImagesOperations
:ivar usage: Usage operations
:vartype usage: azure.mgmt.compute.v2017_03_30.operations.UsageOperations
:ivar virtual_machine_sizes: VirtualMachineSizes operations
:vartype virtual_machine_sizes: azure.mgmt.compute.v2017_03_30.operations.VirtualMachineSizesOperations
:ivar images: Images operations
:vartype images: azure.mgmt.compute.v2017_03_30.operations.ImagesOperations
:ivar resource_skus: ResourceSkus operations
:vartype resource_skus: azure.mgmt.compute.v2017_03_30.operations.ResourceSkusOperations
:ivar virtual_machine_scale_sets: VirtualMachineScaleSets operations
:vartype virtual_machine_scale_sets: azure.mgmt.compute.v2017_03_30.operations.VirtualMachineScaleSetsOperations
:ivar virtual_machine_scale_set_extensions: VirtualMachineScaleSetExtensions operations
:vartype virtual_machine_scale_set_extensions: azure.mgmt.compute.v2017_03_30.operations.VirtualMachineScaleSetExtensionsOperations
:ivar virtual_machine_scale_set_rolling_upgrades: VirtualMachineScaleSetRollingUpgrades operations
:vartype virtual_machine_scale_set_rolling_upgrades: azure.mgmt.compute.v2017_03_30.operations.VirtualMachineScaleSetRollingUpgradesOperations
:ivar virtual_machine_scale_set_vms: VirtualMachineScaleSetVMs operations
:vartype virtual_machine_scale_set_vms: azure.mgmt.compute.v2017_03_30.operations.VirtualMachineScaleSetVMsOperations
:ivar disks: Disks operations
:vartype disks: azure.mgmt.compute.v2017_03_30.operations.DisksOperations
:ivar snapshots: Snapshots operations
:vartype snapshots: azure.mgmt.compute.v2017_03_30.operations.SnapshotsOperations
:ivar virtual_machine_run_commands: VirtualMachineRunCommands operations
:vartype virtual_machine_run_commands: azure.mgmt.compute.v2017_03_30.operations.VirtualMachineRunCommandsOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Subscription credentials which uniquely identify
Microsoft Azure subscription. The subscription ID forms part of the URI
for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = ComputeManagementClientConfiguration(credentials, subscription_id, base_url)
super(ComputeManagementClient, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2017-03-30'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.availability_sets = AvailabilitySetsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_extension_images = VirtualMachineExtensionImagesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_extensions = VirtualMachineExtensionsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machines = VirtualMachinesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_images = VirtualMachineImagesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.usage = UsageOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_sizes = VirtualMachineSizesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.images = ImagesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.resource_skus = ResourceSkusOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_scale_sets = VirtualMachineScaleSetsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_scale_set_extensions = VirtualMachineScaleSetExtensionsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_scale_set_rolling_upgrades = VirtualMachineScaleSetRollingUpgradesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_scale_set_vms = VirtualMachineScaleSetVMsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.disks = DisksOperations(
self._client, self.config, self._serialize, self._deserialize)
self.snapshots = SnapshotsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.virtual_machine_run_commands = VirtualMachineRunCommandsOperations(
self._client, self.config, self._serialize, self._deserialize)
| 60.346457 | 146 | 0.7881 |
a726e8dca6572f1c1947d23164f0b000af527bb4 | 3,855 | py | Python | src/utils.py | asedeki/newflow | d8d749e2569babc41f27869bd368fd99f6c1cb5c | [
"Unlicense"
] | null | null | null | src/utils.py | asedeki/newflow | d8d749e2569babc41f27869bd368fd99f6c1cb5c | [
"Unlicense"
] | null | null | null | src/utils.py | asedeki/newflow | d8d749e2569babc41f27869bd368fd99f6c1cb5c | [
"Unlicense"
] | null | null | null | import difflib
from numba import jit, njit
# import future.concurrent
def best_match_dict(parameters: dict, match_param_keys) -> dict:
_best_match_dict = dict()
for param in parameters.keys():
v = difflib.get_close_matches(param, match_param_keys, n=1)
if v:
_best_match_dict[v[0]] = parameters[param]
return _best_match_dict
def best_match_list(inilist: list, match_list: list) -> list:
_best_match_list = list()
for param in inilist:
v = difflib.get_close_matches(param, match_list, n=1)
if v:
_best_match_list.append(v[0])
return _best_match_list
@njit(cache=True)
def rg_equations_interaction(dg1, dg2, dg3, self_g1, self_g2,
self_g3, loopsPeierls, loopsCooper):
Np = loopsPeierls.shape[0]
N2 = Np // 2
inds = (-N2, N2)
#inds = (0, Np)
for k1 in range(inds[0], inds[1]):
for k2 in range(inds[0], inds[1]):
qc = (k1 + k2) % Np
for k3 in range(inds[0], 1):
qp = (k3 - k2) % Np
qpp = (k1 - k3) % Np
k4 = (k1 + k2 - k3) % Np
i = (k1, k2, k3)
for kp in range(inds[0], inds[1]):
IP = loopsPeierls[k2, kp, qp]
IP2 = loopsPeierls[k2, kp, -qp]
IC = loopsCooper[k1, kp, qc]
IPP = loopsPeierls[k3, kp, qpp]
m1 = (k1, k2, kp)
m2 = (kp, (qc - kp) % Np, k3)
m3 = (k1, (kp - qp) % Np, kp)
m4 = (kp, k2, k3)
m5 = (k1, kp, (kp + qp) % Np)
m6 = (k2, (kp + qp) % Np, k3)
m7 = (k2, (kp + qp) % Np, kp)
m8 = ((kp + qp) % Np, k2, k3)
m9 = (k2, (kp + qpp) % Np, kp)
m10 = (k2, (kp + qpp) % Np, k4)
m11 = (k1, kp, (kp + qpp) % Np)
m12 = (k1, kp, k3)
dg1[i] += -0.5 * (
(self_g2[m1] * self_g1[m2]
+ self_g1[m1] * self_g2[m2]
) * IC
- (
self_g2[m3] * self_g1[m4]
+ self_g1[m3] * self_g2[m4]
- 2 * self_g1[m3] * self_g1[m4]
) * IP2
)
dg1[i] += 0.5 * (
self_g3[m5] * self_g3[m6]
+ self_g3[m7] * self_g3[k1, kp, k4]
- 2.0 * self_g3[m5] * self_g3[m8]
) * IP
dg2[i] += 0.5 * (
+(
- self_g2[m1] * self_g2[m2]
- self_g1[m1] * self_g1[m2]
) * IC
+ self_g2[m3] * self_g2[m4] * IP2
)
dg2[i] += 0.5 * self_g3[k1, kp, k4] * self_g3[m6] * IP
dg3[i] += 0.5 * (
self_g3[m5] * self_g2[m7]
+ self_g3[k1, kp, k4] * self_g1[m7]
+ self_g2[m5] * self_g3[m7]
+ self_g1[m5] * self_g3[m6]
- 2 * self_g1[m7] * self_g3[m5]
- 2 * self_g3[m7] * self_g1[m5]
) * IP
dg3[i] += 0.5 * (
self_g3[m12] * self_g2[m9]
+ self_g3[m10] * self_g2[m11]
) * IPP
for k1 in range(-N2, N2):
for k2 in range(-N2, N2):
for k3 in range(1, N2):
dg1[k1, k2, k3] = dg1[-k1, -k2, -k3]
dg2[k1, k2, k3] = dg2[-k1, -k2, -k3]
dg3[k1, k2, k3] = dg3[-k1, -k2, -k3]
| 35.694444 | 74 | 0.375875 |
e9c60ee8401173e511d049e9b40ce1be0bdd6ef9 | 2,355 | py | Python | feature.py | devashishp/Content-Based-Image-Retrieval | 86f08255694452d7fd2abaf5ab595f5f3f4e6a18 | [
"MIT"
] | 12 | 2017-06-30T03:23:16.000Z | 2020-10-23T13:03:01.000Z | feature.py | devashishp/Content-Based-Image-Retrieval | 86f08255694452d7fd2abaf5ab595f5f3f4e6a18 | [
"MIT"
] | null | null | null | feature.py | devashishp/Content-Based-Image-Retrieval | 86f08255694452d7fd2abaf5ab595f5f3f4e6a18 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from matplotlib import pyplot as plt
from prelib import preprocess
import pywt
def segment(img):
seg = []
for i in range(4):
for j in range(4):
start = i*128
start1 = j*128
end = start+128
end1 = start1+128
seg.append(img[start:end,start1:end1])
return seg
def localfeature(seg):
ll2=[]
lh1=[]
hl1=[]
hh1=[]
dwvt=[]
ht = []
vt = []
for i in range(16):
wp = pywt.WaveletPacket2D(data=seg[i],wavelet='haar',mode='sym')
lh1.append(wp['v'].data)
hl1.append(wp['h'].data)
hh1.append(wp['d'].data)
level1 = np.hstack((np.vstack((wp['aa'].data,wp['vv'].data)),np.vstack((wp['hh'].data,wp['dd'].data))))
level2 = np.hstack((np.vstack((wp['aaa'].data,wp['vvv'].data)),np.vstack((wp['hhh'].data,wp['ddd'].data))))
level3 = np.hstack((np.vstack((wp['aaaa'].data,wp['vvvv'].data)),np.vstack((wp['hhhh'].data,wp['dddd'].data))))
level4 = np.hstack((np.vstack((wp['aaaaa'].data,wp['vvvvv'].data)),np.vstack((wp['hhhhh'].data,wp['ddddd'].data))))
level3[:8,:8] = level4
level2[:16,:16] = level3
level1[:32,:32] = level2
ll2.append(level1)
vt.append(np.vstack((ll2[i],lh1[i])))
ht.append(np.vstack((hl1[i],hh1[i])))
dwvt.append(np.hstack((vt[i],ht[i])))
s1 = []
s2 = []
s3 = []
s4 = []
subvector = []
vector = []
for i in range(16):
s1.append(np.linalg.svd(ll2[i], compute_uv=False))
s2.append(np.linalg.svd(hl1[i], compute_uv=False))
s3.append(np.linalg.svd(lh1[i], compute_uv=False))
s4.append(np.linalg.svd(hh1[i], compute_uv=False))
subvector.append(np.vstack((np.vstack((s1[i],s2[i])),np.vstack((s3[i],s4[i])))))
vector.append(subvector[i])
vector1 = np.concatenate(vector,axis=0)
vector1 = np.array(vector1,dtype=int)
return vector1
def globalfeature(img,gran):
gloseg = np.zeros((gran,gran),dtype=int)
displ = 512/gran
for i in range(gran):
for j in range(gran):
start = i*displ
start1 = j*displ
end = start+displ
end1 = start1+displ
if(img[start:end,start1:end1].any()):
gloseg[i,j]= 1
return gloseg
| 32.260274 | 123 | 0.547771 |
64aa8bb3565187c84236c3a9ccf44faf0c94206b | 3,341 | py | Python | tmp/utils/utils.py | hanzhichao/runnerz | 3a42b7281972871b16c6347a2e233c5215047b08 | [
"MIT"
] | null | null | null | tmp/utils/utils.py | hanzhichao/runnerz | 3a42b7281972871b16c6347a2e233c5215047b08 | [
"MIT"
] | 2 | 2021-03-31T19:45:39.000Z | 2021-12-13T20:43:55.000Z | tmp/utils/utils.py | hanzhichao/runnerz | 3a42b7281972871b16c6347a2e233c5215047b08 | [
"MIT"
] | null | null | null | import os
import yaml
from string import Template
import operator
import importlib
from logz import log
from tmp.keywords import ACTION, SUB_STEPS, FUNCTIONS, VAIABLES
BASEDIR = os.path.dirname(os.path.dirname(__file__))
FIXTURES_FILE = 'fixtures'
def get_section(data, keywords):
if not isinstance(keywords, (str, list)):
raise TypeError('keywords must be str or list')
if isinstance(keywords, str):
return data.get(keywords)
if isinstance(keywords, list):
for keyword in keywords:
section = data.get(keyword)
if section is not None:
return section
def is_step(data):
return True if get_section(data, SUB_STEPS) is None else False
def get_function(data, context=None):
if context is None:
return
functions = context.get(FUNCTIONS)
action = data.get(ACTION)
if action:
return functions.get(action)
for action in functions.keys() - {'name', 'key', 'skip', 'extract', 'validate'}:
function = functions.get(action)
if function is not None:
return function
def parse(data, context):
variables = context.get(VAIABLES)
if variables is None:
return data
data_str = yaml.safe_dump(data, default_flow_style=False)
if '$' in data_str:
data_str = Template(data_str).safe_substitute(variables)
data = yaml.safe_load(data_str)
return data
def do_extract(data: (dict, list), context):
if isinstance(data, dict): # dict -> list
data = [data]
variables = context.get(VAIABLES)
for line in data:
if not isinstance(line, dict):
raise TypeError(f'line: {line} 必须为字典格式')
for key, value in line.items():
print("提取变量:", key, value)
variables[key] = eval(value, {}, variables) # 保存变量结果到局部变量中
def do_check(data, context):
variables = context.get(VAIABLES)
for line in data:
if isinstance(line, str):
result = eval(line, {}, variables) # 计算断言表达式,True代表成功,False代表失败
elif isinstance(line, dict):
for key, value in line.items():
if hasattr(operator, key):
func = getattr(operator, key)
for index, item in enumerate(value):
if isinstance(item, str):
value[index] = variables.get(item) or item
result = func(*value)
print("处理断言:", line, "结果:", "PASS" if result else "FAIL")
def merge_update(dict1, dict2):
"""融合子属性中的字典和列表类型"""
for item, value in dict2.items():
if item in dict1:
if isinstance(value, dict) and isinstance(dict1[item], dict):
merge_update(dict1[item], dict2[item])
continue
if isinstance(item, list) and isinstance(dict1[item], list):
dict1[item].extend(dict2[item])
continue
dict1[item] = dict2[item]
def get_model_functions(model):
functions = {attr: func for attr, func in model.__dict__.items()
if not attr.startswith('__')}
return functions
def get_fixtures():
try:
fixtures = importlib.import_module(FIXTURES_FILE)
except Exception as ex:
log.exception(ex)
return {}
return get_model_functions(fixtures) | 29.566372 | 84 | 0.611194 |
203f48aecc6df8b64abe2500c4624ec959881814 | 60,493 | py | Python | pymatgen/ext/matproj.py | oxana-a/pymatgen | 69393c5fbf88c3e9f148a91c090b4e2f02ac664d | [
"MIT"
] | 1 | 2021-07-23T05:38:15.000Z | 2021-07-23T05:38:15.000Z | pymatgen/ext/matproj.py | nicon2000/pymatgen | ca6a6110f9fa0076e2088050cd96b14d702c0801 | [
"MIT"
] | null | null | null | pymatgen/ext/matproj.py | nicon2000/pymatgen | ca6a6110f9fa0076e2088050cd96b14d702c0801 | [
"MIT"
] | null | null | null | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import sys
import itertools
import json
import platform
import re
import warnings
from time import sleep
from monty.json import MontyDecoder, MontyEncoder
from copy import deepcopy
from pymatgen import SETTINGS, __version__ as pmg_version
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure
from pymatgen.core.surface import get_symmetrically_equivalent_miller_indices
from pymatgen.entries.computed_entries import ComputedEntry, \
ComputedStructureEntry
from pymatgen.entries.exp_entries import ExpEntry
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.sequence import get_chunks, PBar
"""
This module provides classes to interface with the Materials Project REST
API v2 to enable the creation of data structures and pymatgen objects using
Materials Project data.
To make use of the Materials API, you need to be a registered user of the
Materials Project, and obtain an API key by going to your dashboard at
https://www.materialsproject.org/dashboard.
"""
__author__ = "Shyue Ping Ong, Shreyas Cholia"
__credits__ = "Anubhav Jain"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Feb 22, 2013"
class MPRester:
"""
A class to conveniently interface with the Materials Project REST
interface. The recommended way to use MPRester is with the "with" context
manager to ensure that sessions are properly closed after usage::
with MPRester("API_KEY") as m:
do_something
MPRester uses the "requests" package, which provides for HTTP connection
pooling. All connections are made via https for security.
For more advanced uses of the Materials API, please consult the API
documentation at https://github.com/materialsproject/mapidoc.
Args:
api_key (str): A String API key for accessing the MaterialsProject
REST interface. Please obtain your API key at
https://www.materialsproject.org/dashboard. If this is None,
the code will check if there is a "PMG_MAPI_KEY" setting.
If so, it will use that environment variable. This makes
easier for heavy users to simply add this environment variable to
their setups and MPRester can then be called without any arguments.
endpoint (str): Url of endpoint to access the MaterialsProject REST
interface. Defaults to the standard Materials Project REST
address at "https://materialsproject.org/rest/v2", but
can be changed to other urls implementing a similar interface.
include_user_agent (bool): If True, will include a user agent with the
HTTP request including information on pymatgen and system version
making the API request. This helps MP support pymatgen users, and
is similar to what most web browsers send with each page request.
Set to False to disable the user agent.
"""
supported_properties = ("energy", "energy_per_atom", "volume",
"formation_energy_per_atom", "nsites",
"unit_cell_formula", "pretty_formula",
"is_hubbard", "elements", "nelements",
"e_above_hull", "hubbards", "is_compatible",
"spacegroup", "task_ids", "band_gap", "density",
"icsd_id", "icsd_ids", "cif", "total_magnetization",
"material_id", "oxide_type", "tags", "elasticity")
supported_task_properties = ("energy", "energy_per_atom", "volume",
"formation_energy_per_atom", "nsites",
"unit_cell_formula", "pretty_formula",
"is_hubbard",
"elements", "nelements", "e_above_hull",
"hubbards",
"is_compatible", "spacegroup",
"band_gap", "density", "icsd_id", "cif")
def __init__(self, api_key=None, endpoint=None, include_user_agent=True):
if api_key is not None:
self.api_key = api_key
else:
self.api_key = SETTINGS.get("PMG_MAPI_KEY", "")
if endpoint is not None:
self.preamble = endpoint
else:
self.preamble = SETTINGS.get("PMG_MAPI_ENDPOINT",
"https://materialsproject.org/rest/v2")
if self.preamble != "https://materialsproject.org/rest/v2":
warnings.warn("Non-default endpoint used: {}".format(self.preamble))
import requests
if sys.version_info[0] < 3:
try:
from pybtex import __version__
except ImportError:
warnings.warn("If you query for structure data encoded using MP's "
"Structure Notation Language (SNL) format and you use "
"`mp_decode=True` (the default) for MPRester queries, "
"you should install dependencies via "
"`pip install pymatgen[matproj.snl]`.")
self.session = requests.Session()
self.session.headers = {"x-api-key": self.api_key}
if include_user_agent:
pymatgen_info = "pymatgen/"+pmg_version
python_info = "Python/{}.{}.{}".format(
sys.version_info.major, sys.version_info.minor, sys.version_info.micro)
platform_info = "{}/{}".format(platform.system(), platform.release())
self.session.headers["user-agent"] = "{} ({} {})".format(
pymatgen_info, python_info, platform_info)
def __enter__(self):
"""
Support for "with" context.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Support for "with" context.
"""
self.session.close()
def _make_request(self, sub_url, payload=None, method="GET",
mp_decode=True):
response = None
url = self.preamble + sub_url
try:
if method == "POST":
response = self.session.post(url, data=payload, verify=True)
else:
response = self.session.get(url, params=payload, verify=True)
if response.status_code in [200, 400]:
if mp_decode:
data = json.loads(response.text, cls=MontyDecoder)
else:
data = json.loads(response.text)
if data["valid_response"]:
if data.get("warning"):
warnings.warn(data["warning"])
return data["response"]
else:
raise MPRestError(data["error"])
raise MPRestError("REST query returned with error status code {}"
.format(response.status_code))
except Exception as ex:
msg = "{}. Content: {}".format(str(ex), response.content) \
if hasattr(response, "content") else str(ex)
raise MPRestError(msg)
def get_materials_id_from_task_id(self, task_id):
"""
Returns a new MP materials id from a task id (which can be
equivalent to an old materials id)
Args:
task_id (str): A task id.
Returns:
materials_id (str)
"""
return self._make_request("/materials/mid_from_tid/%s" % task_id)
def get_materials_id_references(self, material_id):
"""
Returns all references for a materials id.
Args:
material_id (str): A material id.
Returns:
BibTeX (str)
"""
return self._make_request("/materials/%s/refs" % material_id)
def get_data(self, chemsys_formula_id, data_type="vasp", prop=""):
"""
Flexible method to get any data using the Materials Project REST
interface. Generally used by other methods for more specific queries.
Format of REST return is *always* a list of dict (regardless of the
number of pieces of data returned. The general format is as follows:
[{"material_id": material_id, "property_name" : value}, ...]
This is generally a call to
https://www.materialsproject.org/rest/v2/materials/vasp/<prop>.
See https://github.com/materialsproject/mapidoc for details.
Args:
chemsys_formula_id (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3) or materials_id (e.g., mp-1234).
data_type (str): Type of data to return. Currently can either be
"vasp" or "exp".
prop (str): Property to be obtained. Should be one of the
MPRester.supported_task_properties. Leave as empty string for a
general list of useful properties.
"""
sub_url = "/materials/%s/%s" % (chemsys_formula_id, data_type)
if prop:
sub_url += "/" + prop
return self._make_request(sub_url)
def get_materials_ids(self, chemsys_formula):
"""
Get all materials ids for a formula or chemsys.
Args:
chemsys_formula (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3).
Returns:
([str]) List of all materials ids.
"""
return self._make_request("/materials/%s/mids" % chemsys_formula,
mp_decode=False)
def get_doc(self, materials_id):
"""
Get the entire data document for one materials id. Use this judiciously.
REST Endpoint: https://www.materialsproject.org/materials/<mp-id>/doc.
Args:
materials_id (str): E.g., mp-1143 for Al2O3
Returns:
Dict of json document of all data that is displayed on a materials
details page.
"""
return self._make_request("/materials/%s/doc" % materials_id,
mp_decode=False)
def get_xas_data(self, material_id, absorbing_element):
"""
Get X-ray absorption spectroscopy data for absorbing element in the
structure corresponding to a material_id. Only X-ray Absorption Near Edge
Structure (XANES) for K-edge is supported.
REST Endpoint:
https://www.materialsproject.org/materials/<mp-id>/xas/<absorbing_element>.
Args:
material_id (str): E.g., mp-1143 for Al2O3
absorbing_element (str): The absorbing element in the corresponding
structure. E.g., Al in Al2O3
"""
element_list = self.get_data(material_id,
prop="elements")[0]["elements"]
if absorbing_element not in element_list:
raise ValueError(
"{} element not contained in corresponding structure with "\
"mp_id: {}".format(absorbing_element, material_id))
data = self._make_request(
"/materials/{}/xas/{}".format(material_id, absorbing_element),
mp_decode=False)
return data[0]
def get_task_data(self, chemsys_formula_id, prop=""):
"""
Flexible method to get any data using the Materials Project REST
interface. Generally used by other methods for more specific queries.
Unlike the :func:`get_data`_, this method queries the task collection
for specific run information.
Format of REST return is *always* a list of dict (regardless of the
number of pieces of data returned. The general format is as follows:
[{"material_id": material_id, "property_name" : value}, ...]
Args:
chemsys_formula_id (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3) or materials_id (e.g., mp-1234).
prop (str): Property to be obtained. Should be one of the
MPRester.supported_properties. Leave as empty string for a
general list of useful properties.
"""
sub_url = "/tasks/%s" % chemsys_formula_id
if prop:
sub_url += "/" + prop
return self._make_request(sub_url)
def get_structures(self, chemsys_formula_id, final=True):
"""
Get a list of Structures corresponding to a chemical system, formula,
or materials_id.
Args:
chemsys_formula_id (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3) or materials_id (e.g., mp-1234).
final (bool): Whether to get the final structure, or the initial
(pre-relaxation) structure. Defaults to True.
Returns:
List of Structure objects.
"""
prop = "final_structure" if final else "initial_structure"
data = self.get_data(chemsys_formula_id, prop=prop)
return [d[prop] for d in data]
def find_structure(self, filename_or_structure):
"""
Finds matching structures on the Materials Project site.
Args:
filename_or_structure: filename or Structure object
Returns:
A list of matching structures.
Raises:
MPRestError
"""
try:
if isinstance(filename_or_structure, str):
s = Structure.from_file(filename_or_structure)
elif isinstance(filename_or_structure, Structure):
s = filename_or_structure
else:
raise MPRestError("Provide filename or Structure object.")
payload = {'structure': json.dumps(s.as_dict(), cls=MontyEncoder)}
response = self.session.post(
'{}/find_structure'.format(self.preamble), data=payload
)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp['valid_response']:
return resp['response']
else:
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}"
.format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def get_entries(self, chemsys_formula_id_criteria, compatible_only=True,
inc_structure=None, property_data=None,
conventional_unit_cell=False, sort_by_e_above_hull=False):
"""
Get a list of ComputedEntries or ComputedStructureEntries corresponding
to a chemical system, formula, or materials_id or full criteria.
Args:
chemsys_formula_id_criteria (str/dict): A chemical system
(e.g., Li-Fe-O), or formula (e.g., Fe2O3) or materials_id
(e.g., mp-1234) or full Mongo-style dict criteria.
compatible_only (bool): Whether to return only "compatible"
entries. Compatible entries are entries that have been
processed using the MaterialsProjectCompatibility class,
which performs adjustments to allow mixing of GGA and GGA+U
calculations for more accurate phase diagrams and reaction
energies.
inc_structure (str): If None, entries returned are
ComputedEntries. If inc_structure="initial",
ComputedStructureEntries with initial structures are returned.
Otherwise, ComputedStructureEntries with final structures
are returned.
property_data (list): Specify additional properties to include in
entry.data. If None, no data. Should be a subset of
supported_properties.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
sort_by_e_above_hull (bool): Whether to sort the list of entries by
e_above_hull (will query e_above_hull as a property_data if True).
Returns:
List of ComputedEntry or ComputedStructureEntry objects.
"""
# TODO: This is a very hackish way of doing this. It should be fixed
# on the REST end.
params = ["run_type", "is_hubbard", "pseudo_potential", "hubbards",
"potcar_symbols", "oxide_type"]
props = ["energy", "unit_cell_formula", "task_id"] + params
if sort_by_e_above_hull:
if property_data and "e_above_hull" not in property_data:
property_data.append("e_above_hull")
elif not property_data:
property_data = ["e_above_hull"]
if property_data:
props += property_data
if inc_structure:
if inc_structure == "initial":
props.append("initial_structure")
else:
props.append("structure")
if not isinstance(chemsys_formula_id_criteria, dict):
criteria = MPRester.parse_criteria(chemsys_formula_id_criteria)
else:
criteria = chemsys_formula_id_criteria
data = self.query(criteria, props)
entries = []
for d in data:
d["potcar_symbols"] = [
"%s %s" % (d["pseudo_potential"]["functional"], l)
for l in d["pseudo_potential"]["labels"]]
data = {"oxide_type": d["oxide_type"]}
if property_data:
data.update({k: d[k] for k in property_data})
if not inc_structure:
e = ComputedEntry(d["unit_cell_formula"], d["energy"],
parameters={k: d[k] for k in params},
data=data,
entry_id=d["task_id"])
else:
prim = d["initial_structure"] if inc_structure == "initial" \
else d["structure"]
if conventional_unit_cell:
s = SpacegroupAnalyzer(prim).get_conventional_standard_structure()
energy = d["energy"] * (len(s) / len(prim))
else:
s = prim.copy()
energy = d["energy"]
e = ComputedStructureEntry(
s, energy,
parameters={k: d[k] for k in params},
data=data,
entry_id=d["task_id"])
entries.append(e)
if compatible_only:
from pymatgen.entries.compatibility import \
MaterialsProjectCompatibility
entries = MaterialsProjectCompatibility().process_entries(entries)
if sort_by_e_above_hull:
entries = sorted(entries, key=lambda entry: entry.data["e_above_hull"])
return entries
def get_pourbaix_entries(self, chemsys):
"""
A helper function to get all entries necessary to generate
a pourbaix diagram from the rest interface.
Args:
chemsys ([str]): A list of elements comprising the chemical
system, e.g. ['Li', 'Fe']
"""
from pymatgen.analysis.pourbaix_diagram import PourbaixEntry, IonEntry
from pymatgen.analysis.phase_diagram import PhaseDiagram
from pymatgen.core.ion import Ion
from pymatgen.entries.compatibility import \
MaterialsProjectAqueousCompatibility
pbx_entries = []
# Get ion entries first, because certain ions have reference
# solids that aren't necessarily in the chemsys (Na2SO4)
url = '/pourbaix_diagram/reference_data/' + '-'.join(chemsys)
ion_data = self._make_request(url)
ion_ref_comps = [Composition(d['Reference Solid']) for d in ion_data]
ion_ref_elts = list(itertools.chain.from_iterable(
i.elements for i in ion_ref_comps))
ion_ref_entries = self.get_entries_in_chemsys(
list(set([str(e) for e in ion_ref_elts] + ['O', 'H'])),
property_data=['e_above_hull'], compatible_only=False)
compat = MaterialsProjectAqueousCompatibility("Advanced")
ion_ref_entries = compat.process_entries(ion_ref_entries)
ion_ref_pd = PhaseDiagram(ion_ref_entries)
# position the ion energies relative to most stable reference state
for n, i_d in enumerate(ion_data):
ion_entry = IonEntry(Ion.from_formula(i_d['Name']), i_d['Energy'])
refs = [e for e in ion_ref_entries
if e.composition.reduced_formula == i_d['Reference Solid']]
if not refs:
raise ValueError("Reference solid not contained in entry list")
stable_ref = sorted(refs, key=lambda x: x.data['e_above_hull'])[0]
rf = stable_ref.composition.get_reduced_composition_and_factor()[1]
solid_diff = ion_ref_pd.get_form_energy(stable_ref) \
- i_d['Reference solid energy'] * rf
elt = i_d['Major_Elements'][0]
correction_factor = ion_entry.ion.composition[elt] \
/ stable_ref.composition[elt]
ion_entry.energy += solid_diff * correction_factor
pbx_entries.append(PourbaixEntry(ion_entry, 'ion-{}'.format(n)))
# Construct the solid pourbaix entries from filtered ion_ref entries
extra_elts = set(ion_ref_elts) - {Element(s) for s in chemsys} \
- {Element('H'), Element('O')}
for entry in ion_ref_entries:
entry_elts = set(entry.composition.elements)
# Ensure no OH chemsys or extraneous elements from ion references
if not (entry_elts <= {Element('H'), Element('O')} or \
extra_elts.intersection(entry_elts)):
# replace energy with formation energy, use dict to
# avoid messing with the ion_ref_pd and to keep all old params
form_e = ion_ref_pd.get_form_energy(entry)
new_entry = deepcopy(entry)
new_entry.uncorrected_energy = form_e
new_entry.correction = 0.0
pbx_entry = PourbaixEntry(new_entry)
pbx_entries.append(pbx_entry)
return pbx_entries
def get_structure_by_material_id(self, material_id, final=True,
conventional_unit_cell=False):
"""
Get a Structure corresponding to a material_id.
Args:
material_id (str): Materials Project material_id (a string,
e.g., mp-1234).
final (bool): Whether to get the final structure, or the initial
(pre-relaxation) structure. Defaults to True.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
Returns:
Structure object.
"""
prop = "final_structure" if final else "initial_structure"
data = self.get_data(material_id, prop=prop)
if conventional_unit_cell:
data[0][prop] = SpacegroupAnalyzer(data[0][prop]). \
get_conventional_standard_structure()
return data[0][prop]
def get_entry_by_material_id(self, material_id, compatible_only=True,
inc_structure=None, property_data=None,
conventional_unit_cell=False):
"""
Get a ComputedEntry corresponding to a material_id.
Args:
material_id (str): Materials Project material_id (a string,
e.g., mp-1234).
compatible_only (bool): Whether to return only "compatible"
entries. Compatible entries are entries that have been
processed using the MaterialsProjectCompatibility class,
which performs adjustments to allow mixing of GGA and GGA+U
calculations for more accurate phase diagrams and reaction
energies.
inc_structure (str): If None, entries returned are
ComputedEntries. If inc_structure="final",
ComputedStructureEntries with final structures are returned.
Otherwise, ComputedStructureEntries with initial structures
are returned.
property_data (list): Specify additional properties to include in
entry.data. If None, no data. Should be a subset of
supported_properties.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
Returns:
ComputedEntry or ComputedStructureEntry object.
"""
data = self.get_entries(material_id, compatible_only=compatible_only,
inc_structure=inc_structure,
property_data=property_data,
conventional_unit_cell=conventional_unit_cell)
return data[0]
def get_dos_by_material_id(self, material_id):
"""
Get a Dos corresponding to a material_id.
REST Endpoint: https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/dos
Args:
material_id (str): Materials Project material_id (a string,
e.g., mp-1234).
Returns:
A Dos object.
"""
data = self.get_data(material_id, prop="dos")
return data[0]["dos"]
def get_bandstructure_by_material_id(self, material_id, line_mode=True):
"""
Get a BandStructure corresponding to a material_id.
REST Endpoint: https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/bandstructure or
https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/bandstructure_uniform
Args:
material_id (str): Materials Project material_id.
line_mode (bool): If True, fetch a BandStructureSymmLine object
(default). If False, return the uniform band structure.
Returns:
A BandStructure object.
"""
prop = "bandstructure" if line_mode else "bandstructure_uniform"
data = self.get_data(material_id, prop=prop)
return data[0][prop]
def get_phonon_dos_by_material_id(self, material_id):
"""
Get phonon density of states data corresponding to a material_id.
Args:
material_id (str): Materials Project material_id.
Returns:
CompletePhononDos: A phonon DOS object.
"""
return self._make_request("/materials/{}/phonondos".format(material_id))
def get_phonon_bandstructure_by_material_id(self, material_id):
"""
Get phonon dispersion data corresponding to a material_id.
Args:
material_id (str): Materials Project material_id.
Returns:
PhononBandStructureSymmLine: A phonon band structure.
"""
return self._make_request("/materials/{}/phononbs".format(material_id))
def get_phonon_ddb_by_material_id(self, material_id):
"""
Get ABINIT Derivative Data Base (DDB) output for phonon calculations.
Args:
material_id (str): Materials Project material_id.
Returns:
str: ABINIT DDB file as a string.
"""
return self._make_request("/materials/{}/abinit_ddb"
.format(material_id))
def get_entries_in_chemsys(self, elements, compatible_only=True,
inc_structure=None, property_data=None,
conventional_unit_cell=False):
"""
Helper method to get a list of ComputedEntries in a chemical system.
For example, elements = ["Li", "Fe", "O"] will return a list of all
entries in the Li-Fe-O chemical system, i.e., all LixOy,
FexOy, LixFey, LixFeyOz, Li, Fe and O phases. Extremely useful for
creating phase diagrams of entire chemical systems.
Args:
elements ([str]): List of element symbols, e.g., ["Li", "Fe",
"O"].
compatible_only (bool): Whether to return only "compatible"
entries. Compatible entries are entries that have been
processed using the MaterialsProjectCompatibility class,
which performs adjustments to allow mixing of GGA and GGA+U
calculations for more accurate phase diagrams and reaction
energies.
inc_structure (str): If None, entries returned are
ComputedEntries. If inc_structure="final",
ComputedStructureEntries with final structures are returned.
Otherwise, ComputedStructureEntries with initial structures
are returned.
property_data (list): Specify additional properties to include in
entry.data. If None, no data. Should be a subset of
supported_properties.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
Returns:
List of ComputedEntries.
"""
entries = []
for i in range(len(elements)):
for els in itertools.combinations(elements, i + 1):
entries.extend(
self.get_entries(
"-".join(els), compatible_only=compatible_only,
inc_structure=inc_structure,
property_data=property_data,
conventional_unit_cell=conventional_unit_cell))
return entries
def get_exp_thermo_data(self, formula):
"""
Get a list of ThermoData objects associated with a formula using the
Materials Project REST interface.
Args:
formula (str): A formula to search for.
Returns:
List of ThermoData objects.
"""
return self.get_data(formula, data_type="exp")
def get_exp_entry(self, formula):
"""
Returns an ExpEntry object, which is the experimental equivalent of a
ComputedEntry and can be used for analyses using experimental data.
Args:
formula (str): A formula to search for.
Returns:
An ExpEntry object.
"""
return ExpEntry(Composition(formula),
self.get_exp_thermo_data(formula))
def query(self, criteria, properties, chunk_size=500, max_tries_per_chunk=5,
mp_decode=True):
"""
Performs an advanced query using MongoDB-like syntax for directly
querying the Materials Project database. This allows one to perform
queries which are otherwise too cumbersome to perform using the standard
convenience methods.
Please consult the Materials API documentation at
https://github.com/materialsproject/mapidoc, which provides a
comprehensive explanation of the document schema used in the Materials
Project (supported criteria and properties) and guidance on how best to
query for the relevant information you need.
For queries that request data on more than CHUNK_SIZE materials at once,
this method will chunk a query by first retrieving a list of material
IDs that satisfy CRITERIA, and then merging the criteria with a
restriction to one chunk of materials at a time of size CHUNK_SIZE. You
can opt out of this behavior by setting CHUNK_SIZE=0. To guard against
intermittent server errors in the case of many chunks per query,
possibly-transient server errors will result in re-trying a give chunk
up to MAX_TRIES_PER_CHUNK times.
Args:
criteria (str/dict): Criteria of the query as a string or
mongo-style dict.
If string, it supports a powerful but simple string criteria.
E.g., "Fe2O3" means search for materials with reduced_formula
Fe2O3. Wild cards are also supported. E.g., "\\*2O" means get
all materials whose formula can be formed as \\*2O, e.g.,
Li2O, K2O, etc.
Other syntax examples:
mp-1234: Interpreted as a Materials ID.
Fe2O3 or \\*2O3: Interpreted as reduced formulas.
Li-Fe-O or \\*-Fe-O: Interpreted as chemical systems.
You can mix and match with spaces, which are interpreted as
"OR". E.g. "mp-1234 FeO" means query for all compounds with
reduced formula FeO or with materials_id mp-1234.
Using a full dict syntax, even more powerful queries can be
constructed. For example, {"elements":{"$in":["Li",
"Na", "K"], "$all": ["O"]}, "nelements":2} selects all Li, Na
and K oxides. {"band_gap": {"$gt": 1}} selects all materials
with band gaps greater than 1 eV.
properties (list): Properties to request for as a list. For
example, ["formula", "formation_energy_per_atom"] returns
the formula and formation energy per atom.
chunk_size (int): Number of materials for which to fetch data at a
time. More data-intensive properties may require smaller chunk
sizes. Use chunk_size=0 to force no chunking -- this is useful
when fetching only properties such as 'material_id'.
max_tries_per_chunk (int): How many times to re-try fetching a given
chunk when the server gives a 5xx error (e.g. a timeout error).
mp_decode (bool): Whether to do a decoding to a Pymatgen object
where possible. In some cases, it might be useful to just get
the raw python dict, i.e., set to False.
Returns:
List of results. E.g.,
[{u'formula': {u'O': 1, u'Li': 2.0}},
{u'formula': {u'Na': 2.0, u'O': 2.0}},
{u'formula': {u'K': 1, u'O': 3.0}},
...]
"""
if not isinstance(criteria, dict):
criteria = self.parse_criteria(criteria)
payload = {"criteria": json.dumps(criteria),
"properties": json.dumps(properties)}
if chunk_size == 0:
return self._make_request(
"/query", payload=payload, method="POST", mp_decode=mp_decode)
count_payload = payload.copy()
count_payload["options"] = json.dumps({"count_only": True})
num_results = self._make_request(
"/query", payload=count_payload, method="POST")
if num_results <= chunk_size:
return self._make_request(
"/query", payload=payload, method="POST", mp_decode=mp_decode)
data = []
mids = [d["material_id"] for d in
self.query(criteria, ["material_id"], chunk_size=0)]
chunks = get_chunks(mids, size=chunk_size)
progress_bar = PBar(total=len(mids))
for chunk in chunks:
chunk_criteria = criteria.copy()
chunk_criteria.update({"material_id": {"$in": chunk}})
num_tries = 0
while num_tries < max_tries_per_chunk:
try:
data.extend(self.query(chunk_criteria, properties,
chunk_size=0, mp_decode=mp_decode))
break
except MPRestError as e:
match = re.search(r"error status code (\d+)", e.message)
if match:
if not match.group(1).startswith("5"):
raise e
else: # 5xx error. Try again
num_tries += 1
print(
"Unknown server error. Trying again in five "
"seconds (will try at most {} times)...".format(
max_tries_per_chunk))
sleep(5)
progress_bar.update(len(chunk))
return data
def submit_structures(self, structures, authors, projects=None,
references='', remarks=None, data=None,
histories=None, created_at=None):
"""
Submits a list of structures to the Materials Project as SNL files.
The argument list mirrors the arguments for the StructureNL object,
except that a list of structures with the same metadata is used as an
input.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
structures: A list of Structure objects
authors (list): List of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <johndoe@gmail.com>',
or a single String with commas separating authors
projects ([str]): List of Strings ['Project A', 'Project B'].
This applies to all structures.
references (str): A String in BibTeX format. Again, this applies to
all structures.
remarks ([str]): List of Strings ['Remark A', 'Remark B']
data ([dict]): A list of free form dict. Namespaced at the root
level with an underscore, e.g. {"_materialsproject":<custom
data>}. The length of data should be the same as the list of
structures if not None.
histories: List of list of dicts - [[{'name':'', 'url':'',
'description':{}}], ...] The length of histories should be the
same as the list of structures if not None.
created_at (datetime): A datetime object
Returns:
A list of inserted submission ids.
"""
from pymatgen.util.provenance import StructureNL
snl_list = StructureNL.from_structures(structures, authors, projects,
references, remarks, data,
histories, created_at)
self.submit_snl(snl_list)
def submit_snl(self, snl):
"""
Submits a list of StructureNL to the Materials Project site.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
snl (StructureNL/[StructureNL]): A single StructureNL, or a list
of StructureNL objects
Returns:
A list of inserted submission ids.
Raises:
MPRestError
"""
try:
snl = snl if isinstance(snl, list) else [snl]
jsondata = [s.as_dict() for s in snl]
payload = {"snl": json.dumps(jsondata, cls=MontyEncoder)}
response = self.session.post("{}/snl/submit".format(self.preamble),
data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp['inserted_ids']
else:
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}"
.format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def delete_snl(self, snl_ids):
"""
Delete earlier submitted SNLs.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
snl_ids: List of SNL ids.
Raises:
MPRestError
"""
try:
payload = {"ids": json.dumps(snl_ids)}
response = self.session.post(
"{}/snl/delete".format(self.preamble), data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp
else:
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}"
.format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def query_snl(self, criteria):
"""
Query for submitted SNLs.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
criteria (dict): Query criteria.
Returns:
A dict, with a list of submitted SNLs in the "response" key.
Raises:
MPRestError
"""
try:
payload = {"criteria": json.dumps(criteria)}
response = self.session.post("{}/snl/query".format(self.preamble),
data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp["response"]
else:
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}"
.format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def submit_vasp_directory(self, rootdir, authors, projects=None,
references='', remarks=None, master_data=None,
master_history=None, created_at=None,
ncpus=None):
"""
Assimilates all vasp run directories beneath a particular
directory using BorgQueen to obtain structures, and then submits thhem
to the Materials Project as SNL files. VASP related meta data like
initial structure and final energies are automatically incorporated.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
rootdir (str): Rootdir to start assimilating VASP runs from.
authors: *List* of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <johndoe@gmail.com>',
or a single String with commas separating authors. The same
list of authors should apply to all runs.
projects ([str]): List of Strings ['Project A', 'Project B'].
This applies to all structures.
references (str): A String in BibTeX format. Again, this applies to
all structures.
remarks ([str]): List of Strings ['Remark A', 'Remark B']
master_data (dict): A free form dict. Namespaced at the root
level with an underscore, e.g. {"_materialsproject":<custom
data>}. This data is added to all structures detected in the
directory, in addition to other vasp data on a per structure
basis.
master_history: A master history to be added to all entries.
created_at (datetime): A datetime object
ncpus (int): Number of cpus to use in using BorgQueen to
assimilate. Defaults to None, which means serial.
"""
from pymatgen.apps.borg.hive import VaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
drone = VaspToComputedEntryDrone(inc_structure=True,
data=["filename",
"initial_structure"])
queen = BorgQueen(drone, number_of_drones=ncpus)
queen.parallel_assimilate(rootdir)
structures = []
metadata = []
histories = []
for e in queen.get_data():
structures.append(e.structure)
m = {
"_vasp": {
"parameters": e.parameters,
"final_energy": e.energy,
"final_energy_per_atom": e.energy_per_atom,
"initial_structure": e.data["initial_structure"].as_dict()
}
}
if "history" in e.parameters:
histories.append(e.parameters["history"])
if master_data is not None:
m.update(master_data)
metadata.append(m)
if master_history is not None:
histories = master_history * len(structures)
return self.submit_structures(
structures, authors, projects=projects, references=references,
remarks=remarks, data=metadata, histories=histories,
created_at=created_at)
def get_stability(self, entries):
"""
Returns the stability of all entries.
"""
try:
payload = {"entries": json.dumps(entries, cls=MontyEncoder)}
response = self.session.post("{}/phase_diagram/calculate_stability"
.format(self.preamble), data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp["response"]
else:
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}"
.format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def get_cohesive_energy(self, material_id, per_atom=False):
"""
Gets the cohesive for a material (eV per formula unit). Cohesive energy
is defined as the difference between the bulk energy and the sum of
total DFT energy of isolated atoms for atom elements in the bulk.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
per_atom (bool): Whether or not to return cohesive energy per atom
Returns:
Cohesive energy (eV).
"""
entry = self.get_entry_by_material_id(material_id)
ebulk = entry.energy / \
entry.composition.get_integer_formula_and_factor()[1]
comp_dict = entry.composition.reduced_composition.as_dict()
isolated_atom_e_sum, n = 0, 0
for el in comp_dict.keys():
e = self._make_request("/element/%s/tasks/isolated_atom" % (el),
mp_decode=False)[0]
isolated_atom_e_sum += e['output']["final_energy_per_atom"] * comp_dict[el]
n += comp_dict[el]
ecoh_per_formula = isolated_atom_e_sum - ebulk
return ecoh_per_formula/n if per_atom else ecoh_per_formula
def get_reaction(self, reactants, products):
"""
Gets a reaction from the Materials Project.
Args:
reactants ([str]): List of formulas
products ([str]): List of formulas
Returns:
rxn
"""
return self._make_request("/reaction",
payload={"reactants[]": reactants,
"products[]": products}, mp_decode=False)
def get_substrates(self, material_id, number=50, orient=None):
"""
Get a substrate list for a material id. The list is in order of
increasing elastic energy if a elastic tensor is available for
the material_id. Otherwise the list is in order of increasing
matching area.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
orient (list) : substrate orientation to look for
number (int) : number of substrates to return;
n=0 returns all available matches
Returns:
list of dicts with substrate matches
"""
req = "/materials/{}/substrates?n={}".format(material_id, number)
if orient:
req += "&orient={}".format(" ".join(map(str, orient)))
return self._make_request(req)
def get_all_substrates(self):
"""
Gets the list of all possible substrates considered in the
Materials Project substrate database
Returns:
list of material_ids corresponding to possible substrates
"""
return self._make_request("/materials/all_substrate_ids")
def get_surface_data(self, material_id, miller_index=None, inc_structures=False):
"""
Gets surface data for a material. Useful for Wulff shapes.
Reference for surface data:
Tran, R., Xu, Z., Radhakrishnan, B., Winston, D., Sun, W., Persson, K.
A., & Ong, S. P. (2016). Data Descripter: Surface energies of elemental
crystals. Scientific Data, 3(160080), 1–13.
http://dx.doi.org/10.1038/sdata.2016.80
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
miller_index (list of integer): The miller index of the surface.
e.g., [3, 2, 1]. If miller_index is provided, only one dictionary
of this specific plane will be returned.
inc_structures (bool): Include final surface slab structures.
These are unnecessary for Wulff shape construction.
Returns:
Surface data for material. Energies are given in SI units (J/m^2).
"""
req = "/materials/{}/surfaces".format(material_id)
if inc_structures:
req += "?include_structures=true"
if miller_index:
surf_data_dict = self._make_request(req)
surf_list = surf_data_dict['surfaces']
ucell = self.get_structure_by_material_id(material_id,
conventional_unit_cell=True)
eq_indices = get_symmetrically_equivalent_miller_indices(ucell, miller_index)
for one_surf in surf_list:
if tuple(one_surf['miller_index']) in eq_indices:
return one_surf
else:
return self._make_request(req)
def get_wulff_shape(self, material_id):
"""
Constructs a Wulff shape for a material.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
Returns:
pymatgen.analysis.wulff.WulffShape
"""
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.wulff import WulffShape, hkl_tuple_to_str
structure = self.get_structure_by_material_id(material_id)
surfaces = self.get_surface_data(material_id)["surfaces"]
lattice = (SpacegroupAnalyzer(structure)
.get_conventional_standard_structure().lattice)
miller_energy_map = {}
for surf in surfaces:
miller = tuple(surf["miller_index"])
# Prefer reconstructed surfaces, which have lower surface energies.
if (miller not in miller_energy_map) or surf["is_reconstructed"]:
miller_energy_map[miller] = surf["surface_energy"]
millers, energies = zip(*miller_energy_map.items())
return WulffShape(lattice, millers, energies)
def get_gb_data(self, material_id=None, pretty_formula=None,
chemsys=None, sigma=None, gb_plane=None,
rotation_axis=None, include_work_of_separation=False):
"""
Gets grain boundary data for a material.
Args:
material_id (str): Materials Project material_id, e.g., 'mp-129'.
pretty_formula (str): The formula of metals. e.g., 'Fe'
sigma(int): The sigma value of a certain type of grain boundary
gb_plane(list of integer): The Miller index of grain
boundary plane. e.g., [1, 1, 1]
rotation_axis(list of integer): The Miller index of rotation
axis. e.g., [1, 0, 0], [1, 1, 0], and [1, 1, 1]
Sigma value is determined by the combination of rotation axis and
rotation angle. The five degrees of freedom (DOF) of one grain boundary
include: rotation axis (2 DOFs), rotation angle (1 DOF), and grain
boundary plane (2 DOFs).
include_work_of_separation (bool): whether to include the work of separation
(in unit of (J/m^2)). If you want to query the work of separation, please
specify the material_id.
Returns:
A list of grain boundaries that satisfy the query conditions (sigma, gb_plane).
Energies are given in SI units (J/m^2).
"""
if gb_plane:
gb_plane = ','.join([str(i) for i in gb_plane])
if rotation_axis:
rotation_axis = ','.join([str(i) for i in rotation_axis])
payload = {"material_id": material_id,
"pretty_formula": pretty_formula,
"chemsys": chemsys,
"sigma": sigma,
"gb_plane": gb_plane,
"rotation_axis":rotation_axis}
if include_work_of_separation and material_id:
list_of_gbs = self._make_request("/grain_boundaries",
payload=payload)
for i, gb_dict in enumerate(list_of_gbs):
gb_energy = gb_dict['gb_energy']
gb_plane_int = gb_dict['gb_plane']
surface_energy = self.get_surface_data(material_id=material_id,
miller_index=gb_plane_int)['surface_energy']
wsep = 2 * surface_energy - gb_energy # calculate the work of separation
gb_dict['work_of_separation'] = wsep
return list_of_gbs
else:
return self._make_request("/grain_boundaries",
payload=payload)
def get_interface_reactions(self, reactant1, reactant2,
open_el=None, relative_mu=None,
use_hull_energy=False):
"""
Gets critical reactions between two reactants.
Get critical reactions ("kinks" in the mixing ratio where
reaction products change) between two reactants. See the
`pymatgen.analysis.interface_reactions` module for more info.
Args:
reactant1 (str): Chemical formula for reactant
reactant2 (str): Chemical formula for reactant
open_el (str): Element in reservoir available to system
relative_mu (float): Relative chemical potential of element in
reservoir with respect to pure substance. Must be non-positive.
use_hull_energy (bool): Whether to use the convex hull energy for a
given composition for the reaction energy calculation. If false,
the energy of the ground state structure will be preferred; if a
ground state can not be found for a composition, the convex hull
energy will be used with a warning message.
Returns:
list: list of dicts of form {ratio,energy,rxn} where `ratio` is the
reactant mixing ratio, `energy` is the reaction energy
in eV/atom, and `rxn` is a
`pymatgen.analysis.reaction_calculator.Reaction`.
"""
payload = {"reactants": " ".join([reactant1, reactant2]),
"open_el": open_el,
"relative_mu": relative_mu,
"use_hull_energy": use_hull_energy}
return self._make_request("/interface_reactions",
payload=payload, method="POST")
@staticmethod
def parse_criteria(criteria_string):
"""
Parses a powerful and simple string criteria and generates a proper
mongo syntax criteria.
Args:
criteria_string (str): A string representing a search criteria.
Also supports wild cards. E.g.,
something like "*2O" gets converted to
{'pretty_formula': {'$in': [u'B2O', u'Xe2O', u"Li2O", ...]}}
Other syntax examples:
mp-1234: Interpreted as a Materials ID.
Fe2O3 or *2O3: Interpreted as reduced formulas.
Li-Fe-O or *-Fe-O: Interpreted as chemical systems.
You can mix and match with spaces, which are interpreted as
"OR". E.g., "mp-1234 FeO" means query for all compounds with
reduced formula FeO or with materials_id mp-1234.
Returns:
A mongo query dict.
"""
toks = criteria_string.split()
def parse_sym(sym):
if sym == "*":
return [el.symbol for el in Element]
else:
m = re.match(r"\{(.*)\}", sym)
if m:
return [s.strip() for s in m.group(1).split(",")]
else:
return [sym]
def parse_tok(t):
if re.match(r"\w+-\d+", t):
return {"task_id": t}
elif "-" in t:
elements = [parse_sym(sym) for sym in t.split("-")]
chemsyss = []
for cs in itertools.product(*elements):
if len(set(cs)) == len(cs):
# Check for valid symbols
cs = [Element(s).symbol for s in cs]
chemsyss.append("-".join(sorted(cs)))
return {"chemsys": {"$in": chemsyss}}
else:
all_formulas = set()
explicit_els = []
wild_card_els = []
for sym in re.findall(
r"(\*[\.\d]*|\{.*\}[\.\d]*|[A-Z][a-z]*)[\.\d]*", t):
if ("*" in sym) or ("{" in sym):
wild_card_els.append(sym)
else:
m = re.match(r"([A-Z][a-z]*)[\.\d]*", sym)
explicit_els.append(m.group(1))
nelements = len(wild_card_els) + len(set(explicit_els))
parts = re.split(r"(\*|\{.*\})", t)
parts = [parse_sym(s) for s in parts if s != ""]
for f in itertools.product(*parts):
c = Composition("".join(f))
if len(c) == nelements:
# Check for valid Elements in keys.
for e in c.keys():
Element(e.symbol)
all_formulas.add(c.reduced_formula)
return {"pretty_formula": {"$in": list(all_formulas)}}
if len(toks) == 1:
return parse_tok(toks[0])
else:
return {"$or": list(map(parse_tok, toks))}
class MPRestError(Exception):
"""
Exception class for MPRestAdaptor.
Raised when the query has problems, e.g., bad query format.
"""
pass
| 43.302076 | 103 | 0.577538 |
4b3650bc91e9c78f75a141e2872db660e79886e1 | 4,041 | py | Python | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/caffe/cf_facerec-resnet20_112_96_3.5G_1.3/code/example/api/detect_api.py | guochunhe/Vitis-AI | e86b6efae11f8703ee647e4a99004dc980b84989 | [
"Apache-2.0"
] | 1 | 2020-12-18T14:49:19.000Z | 2020-12-18T14:49:19.000Z | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/caffe/cf_facerec-resnet20_112_96_3.5G_1.3/code/get_aligned_face/api/detect_api.py | guochunhe/Vitis-AI | e86b6efae11f8703ee647e4a99004dc980b84989 | [
"Apache-2.0"
] | null | null | null | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/caffe/cf_facerec-resnet20_112_96_3.5G_1.3/code/get_aligned_face/api/detect_api.py | guochunhe/Vitis-AI | e86b6efae11f8703ee647e4a99004dc980b84989 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# PART OF THIS FILE AT ALL TIMES.
import numpy as np
import scipy.misc
import scipy.io
from matplotlib.patches import Rectangle
import datetime
import cv2
import sys
def nms(dets, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
class Detect(object):
def __init__(self):
self.expand_scale_=0.0
self.force_gray_=False
self.input_mean_value_=128.0
self.input_scale_=1.0
self.pixel_blob_name_='pixel-loss'
self.bb_blob_name_='bb-output-tiled'
self.res_stride_=4
self.det_threshold_=0.7
self.nms_threshold_=0.3
self.caffe_path_=""
self.input_channels_=3
def model_init(self,caffe_python_path,model_path,def_path):
sys.path.insert(0,caffe_python_path)
import caffe
self.caffe_path_=caffe_python_path
self.net_=caffe.Net(def_path,model_path,caffe.TEST)
def detect(self,image):
#sys.path.insert(0,self.caffe_path_)
import caffe
#caffe.set_mode_cpu()
#caffe.set_device(0)
self.transformer_=caffe.io.Transformer({'data': (1,self.input_channels_,image.shape[0],image.shape[1])})
if self.force_gray_:
image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
self.input_channels_=1
else:
self.transformer_.set_transpose('data', (2,0,1))
transformed_image=self.transformer_.preprocess('data',image)
transformed_image=(transformed_image-self.input_mean_value_)*self.input_scale_
sz=image.shape
self.net_.blobs['data'].reshape(1, self.input_channels_, sz[0], sz[1])
self.net_.blobs['data'].data[0, ...] = transformed_image
output = self.net_.forward()
prob = output[self.pixel_blob_name_][0, 1, ...]
bb = output[self.bb_blob_name_][0, ...]
gy = np.arange(0, sz[0], self.res_stride_)
gx = np.arange(0, sz[1], self.res_stride_)
gy = gy[0 : bb.shape[1]]
gx = gx[0 : bb.shape[2]]
[x, y] = np.meshgrid(gx, gy)
#print bb.shape[1],len(gy),sz[0],sz[1]
bb[0, :, :] += x
bb[2, :, :] += x
bb[1, :, :] += y
bb[3, :, :] += y
bb = np.reshape(bb, (4, -1)).T
prob = np.reshape(prob, (-1, 1))
bb = bb[prob.ravel() > self.det_threshold_, :]
prob = prob[prob.ravel() > self.det_threshold_, :]
rects = np.hstack((bb, prob))
keep = nms(rects, self.nms_threshold_)
rects = rects[keep, :]
rects_expand=[]
for rect in rects:
rect_expand=[]
rect_w=rect[2]-rect[0]
rect_h=rect[3]-rect[1]
rect_expand.append(int(max(0,rect[0]-rect_w*self.expand_scale_)))
rect_expand.append(int(max(0,rect[1]-rect_h*self.expand_scale_)))
rect_expand.append(int(min(sz[1],rect[2]+rect_w*self.expand_scale_)))
rect_expand.append(int(min(sz[0],rect[3]+rect_h*self.expand_scale_)))
rects_expand.append(rect_expand)
return rects_expand
| 32.58871 | 108 | 0.640188 |
32b4fff532501c706b6557bb9fbd318c66a721b0 | 412 | py | Python | patterns/singletons/singleton_metaclass.py | wswld/notes | 02be683821792dec785d0bd9a6dfa1b7236a6321 | [
"MIT"
] | null | null | null | patterns/singletons/singleton_metaclass.py | wswld/notes | 02be683821792dec785d0bd9a6dfa1b7236a6321 | [
"MIT"
] | null | null | null | patterns/singletons/singleton_metaclass.py | wswld/notes | 02be683821792dec785d0bd9a6dfa1b7236a6321 | [
"MIT"
] | null | null | null | # Singleton through a metaclass. Requires Python 3
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Logger(metaclass=Singleton):
pass
l1 = Logger()
print(l1)
l2 = Logger()
print(l2)
l3 = Logger()
print(l3)
| 17.913043 | 81 | 0.648058 |
f121cd71641f43cb549be04b6288bd73382a0f7b | 4,596 | py | Python | contrib/docker/harvester/count_field_unique_value.py | meertensinstituut/ckan | 91f0d441baac1e835d529dac8f037c324d8e1281 | [
"Apache-2.0"
] | null | null | null | contrib/docker/harvester/count_field_unique_value.py | meertensinstituut/ckan | 91f0d441baac1e835d529dac8f037c324d8e1281 | [
"Apache-2.0"
] | 1 | 2019-02-20T10:23:18.000Z | 2019-02-20T10:23:18.000Z | contrib/docker/harvester/count_field_unique_value.py | meertensinstituut/ckan | 91f0d441baac1e835d529dac8f037c324d8e1281 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import urllib2
import json
import xml.etree.ElementTree as et
from os import listdir
from os.path import isfile, join
import time
from pprint import pprint
from xmljson import badgerfish as bf
import re
import sys
import import_xml_to_ckan_util as importlib
story_fields = ['identifier', 'title', 'type', 'contents', 'places', 'persons', 'events', 'keywords', ]
class XML:
def __init__(self):
pass
data = dict()
def get_element(self, tree, isebel_list=None):
if isebel_list is None:
isebel_list = story_fields
for i in isebel_list:
self.data[i] = tree.find(i) if tree.find(i) is not None else None
@staticmethod
def parse_xml(path):
# get xml file
try:
tree = et.parse(path)
except Exception as e:
tree = None
print('error parsing XML file!')
exit(e.message)
for el in tree.iter():
if '}' in el.tag:
el.tag = el.tag.split('}', 1)[1] # strip all namespaces
root = tree.getroot()
for el in root:
if '}' in el.tag:
el.tag = el.tag.split('}', 1)[1] # strip all namespaces
# self.data['id'] = root.attrib['{http://www.w3.org/XML/1998/namespace}id']
return root
def load_story_from_xml_as_dict(xml):
# converting XML data to json
data = bf.data(xml)
# converting json to dict and clean out NS from key names
data = json.loads(json.dumps(data).replace('{http://www.w3.org/XML/1998/namespace}', '').replace(
'{http://www.w3.org/2001/XMLSchema-instance}', ''))
# get story from dict
story = data.get('story', None)
return story
def get_story_from_xml_file(f):
# getting xml data
xml_data = XML().parse_xml(f)
# getting dict from xml data
story_dict = load_story_from_xml_as_dict(xml_data)
return story_dict
def get_unique_field_value_as_set(story_dict, field_name='keywords'):
if story_dict.get('keywords', None) and story_dict.get('keywords').get('keyword', None):
# print(story_dict.get('keywords', None))
keywords_list = list()
if isinstance(story_dict.get('keywords').get('keyword'), list):
for i in story_dict.get('keywords').get('keyword'):
# if not isinstance(i.get('$'), bool) and not isinstance(i.get('$'), int):
# keywords_list.append(i.get('$'))
# else:
keywords_list.append(i.get('$'))
else:
keyword = story_dict.get('keywords').get('keyword').get('$')
# if isinstance(keywords_list, bool) or isinstance(keywords_list, int):
keywords_list.append(keyword)
return set(keywords_list)
return None
def __main__():
start = time.time()
print('start')
# init the config
importlib.init()
args = None
try:
args = sys.argv[1]
except IndexError:
exit('organization is required on the command line')
if args in ('meertens', 'verhalenbank'):
org = 'meertens'
elif args == 'ucla':
org = 'ucla'
elif args in ('wossidia', 'rostock'):
org = 'wossidia'
else:
raise Exception('Invalid organization')
wd = importlib.orgs[org][4]
apikey = importlib.apikey
# if not org Create it
if not importlib.org_exists(org):
exit('organization [%s] does not exist.' % org)
files = [join(wd, f) for f in sorted(listdir(wd)) if f.endswith('.xml') and isfile(join(wd, f))]
print('get file lists')
error = list()
counter = 0
limit = 50000
results = set()
for f in files:
if counter <= limit:
counter += 1
print('### start with file: %s ###' % f)
# try:
current_story = get_story_from_xml_file(f)
current_values = get_unique_field_value_as_set(current_story)
if current_values:
results = results.union(current_values)
# except Exception as ex:
# error.append(f)
# exit(ex.message)
print('### end with file: %s ###' % f)
else:
break
if len(error) > 0:
for i in error:
print(i)
print('Number of unique keywords: {}'.format(len(results)))
end = time.time()
elapsed = end - start
print('#### Overview ###')
print('#### Start at: %s' % start)
print('#### Ends at: %s' % end)
print('#### Time elapsed: %s' % elapsed)
if __name__ == '__main__':
__main__()
| 29.273885 | 103 | 0.579417 |
b5c09a263911d6800f5036e418fb1c01439ee1d4 | 4,219 | py | Python | tests/Cpl/Dm/TShell/_0test/windows/mingw_w64/mytoolchain.py | johnttaylor/foxtail | 86e4e1d19d5e8f9c1d1064cf0939f4bf62615400 | [
"BSD-3-Clause"
] | null | null | null | tests/Cpl/Dm/TShell/_0test/windows/mingw_w64/mytoolchain.py | johnttaylor/foxtail | 86e4e1d19d5e8f9c1d1064cf0939f4bf62615400 | [
"BSD-3-Clause"
] | null | null | null | tests/Cpl/Dm/TShell/_0test/windows/mingw_w64/mytoolchain.py | johnttaylor/foxtail | 86e4e1d19d5e8f9c1d1064cf0939f4bf62615400 | [
"BSD-3-Clause"
] | null | null | null | #---------------------------------------------------------------------------
# This python module is used to customize a supported toolchain for your
# project specific settings.
#
# Notes:
# - ONLY edit/add statements in the sections marked by BEGIN/END EDITS
# markers.
# - Maintain indentation level and use spaces (it's a python thing)
# - rvalues must be enclosed in quotes (single ' ' or double " ")
# - The structure/class 'BuildValues' contains (at a minimum the
# following data members. Any member not specifically set defaults
# to null/empty string
# .inc
# .asminc
# .cflags
# .cppflags
# .asmflags
# .linkflags
# .linklibs
#
#---------------------------------------------------------------------------
# get definition of the Options structure
from nqbplib.base import BuildValues
#===================================================
# BEGIN EDITS/CUSTOMIZATIONS
#---------------------------------------------------
# Set the name for the final output item
FINAL_OUTPUT_NAME = 'b.exe'
# Set project specific 'base' (i.e always used) options
base_release = BuildValues() # Do NOT comment out this line
base_release.cflags = '-m32 -std=c++11 -Wall -Werror -x c++ -fprofile-arcs -ftest-coverage'
base_release.linkflags = '-m32 -fprofile-arcs'
base_release.linklibs = '-lgcov'
# Set project specific 'optimized' options
optimzed_release = BuildValues() # Do NOT comment out this line
optimzed_release.cflags = '-O3'
# Set project specific 'debug' options
debug_release = BuildValues() # Do NOT comment out this line
#debug_release.cflags = '-D_MY_APP_DEBUG_SWITCH_'
#
# For build config/variant: "cpp11"
# (note: uses same internal toolchain options as the 'Release' variant,
# only the 'User' options will/are different)
#
# Construct option structs
base_cpp11 = BuildValues()
optimzed_cpp11 = BuildValues()
debug_cpp11 = BuildValues()
# Set 'base' options
base_cpp11.cflags = '-m64 -std=c++11 -Wall -Werror -x c++'
base_cpp11.linkflags = '-m64'
# Set 'Optimized' options
optimzed_cpp11.cflags = '-O3'
#
# For build config/variant: "win64"
# (note: uses same internal toolchain options as the 'Release' variant,
# only the 'User' options will/are different)
#
# Construct option structs
base_win64 = BuildValues()
optimzed_win64 = BuildValues()
debug_win64 = BuildValues()
# Set 'base' options
base_win64.cflags = '-m64 -std=c++11 -Wall -Werror -x c++'
base_win64.linkflags = '-m64'
# Set 'Optimized' options
optimzed_win64.cflags = '-O3'
#-------------------------------------------------
# ONLY edit this section if you are ADDING options
# for build configurations/variants OTHER than the
# 'release' build
#-------------------------------------------------
release_opts = { 'user_base':base_release,
'user_optimized':optimzed_release,
'user_debug':debug_release
}
# Add new dictionary of for new build configuration options
cpp11_opts = { 'user_base':base_cpp11,
'user_optimized':optimzed_cpp11,
'user_debug':debug_cpp11
}
# Add new dictionary of for new build configuration options
win64_opts = { 'user_base':base_win64,
'user_optimized':optimzed_win64,
'user_debug':debug_win64
}
# Add new variant option dictionary to # dictionary of
# build variants
build_variants = { 'win32':release_opts,
'win64':win64_opts,
'cpp11':cpp11_opts,
}
#---------------------------------------------------
# END EDITS/CUSTOMIZATIONS
#===================================================
# Capture project/build directory
import os
prjdir = os.path.dirname(os.path.abspath(__file__))
# Select Module that contains the desired toolchain
from nqbplib.toolchains.windows.mingw_w64.console_exe import ToolChain
# Function that instantiates an instance of the toolchain
def create():
tc = ToolChain( FINAL_OUTPUT_NAME, prjdir, build_variants, "win32" )
return tc
| 30.352518 | 95 | 0.593269 |
5ca712db7fd82d40cc19deddd291413c2fca8c6b | 1,680 | py | Python | bin/scrape_software_versions.py | BioDragao/trigenome-analysis-nf | 76f69d9e780050a47544b77bfed30de93d357466 | [
"MIT"
] | null | null | null | bin/scrape_software_versions.py | BioDragao/trigenome-analysis-nf | 76f69d9e780050a47544b77bfed30de93d357466 | [
"MIT"
] | null | null | null | bin/scrape_software_versions.py | BioDragao/trigenome-analysis-nf | 76f69d9e780050a47544b77bfed30de93d357466 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
from collections import OrderedDict
import re
# TODO nf-core: Add additional regexes for new tools in process get_software_versions
regexes = {
'nf-core/trigenome': ['v_pipeline.txt', r"(\S+)"],
'Nextflow': ['v_nextflow.txt', r"(\S+)"],
'FastQC': ['v_fastqc.txt', r"FastQC v(\S+)"],
'MultiQC': ['v_multiqc.txt', r"multiqc, version (\S+)"],
}
results = OrderedDict()
results['nf-core/trigenome'] = '<span style="color:#999999;\">N/A</span>'
results['Nextflow'] = '<span style="color:#999999;\">N/A</span>'
results['FastQC'] = '<span style="color:#999999;\">N/A</span>'
results['MultiQC'] = '<span style="color:#999999;\">N/A</span>'
# Search each file using its regex
for k, v in regexes.items():
try:
with open(v[0]) as x:
versions = x.read()
match = re.search(v[1], versions)
if match:
results[k] = "v{}".format(match.group(1))
except IOError:
results[k] = False
# Remove software set to false in results
for k in results:
if not results[k]:
del(results[k])
# Dump to YAML
print ('''
id: 'software_versions'
section_name: 'nf-core/trigenome Software Versions'
section_href: 'https://github.com/nf-core/trigenome'
plot_type: 'html'
description: 'are collected at run time from the software output.'
data: |
<dl class="dl-horizontal">
''')
for k,v in results.items():
print(" <dt>{}</dt><dd><samp>{}</samp></dd>".format(k,v))
print (" </dl>")
# Write out regexes as csv file:
with open('software_versions.csv', 'w') as f:
for k,v in results.items():
f.write("{}\t{}\n".format(k,v))
| 31.698113 | 85 | 0.623214 |
aef4e14243052b8e5349ea9b4240a5f1147cdb59 | 56 | py | Python | __init__.py | OdooTestNet/School-Mangement | 3c613a2ecdb88965d847acd294e70a875f5af5e2 | [
"Unlicense"
] | null | null | null | __init__.py | OdooTestNet/School-Mangement | 3c613a2ecdb88965d847acd294e70a875f5af5e2 | [
"Unlicense"
] | null | null | null | __init__.py | OdooTestNet/School-Mangement | 3c613a2ecdb88965d847acd294e70a875f5af5e2 | [
"Unlicense"
] | null | null | null | from.import model
from.import wizard
from.import reports | 18.666667 | 19 | 0.857143 |
7b0abdc0ba9ef9c1fde7a0f0b003db2e5f729e7f | 2,325 | py | Python | lambdasoc/periph/timer.py | supersat/lambdasoc | c2662ef4cf485330dd137e1993394416c5497de5 | [
"BSD-2-Clause"
] | 1 | 2021-09-15T00:38:20.000Z | 2021-09-15T00:38:20.000Z | lambdasoc/periph/timer.py | supersat/lambdasoc | c2662ef4cf485330dd137e1993394416c5497de5 | [
"BSD-2-Clause"
] | null | null | null | lambdasoc/periph/timer.py | supersat/lambdasoc | c2662ef4cf485330dd137e1993394416c5497de5 | [
"BSD-2-Clause"
] | 2 | 2021-10-10T23:05:25.000Z | 2021-12-17T05:57:56.000Z | from nmigen import *
from . import Peripheral
__all__ = ["TimerPeripheral"]
class TimerPeripheral(Peripheral, Elaboratable):
"""Timer peripheral.
A general purpose down-counting timer peripheral.
CSR registers
-------------
reload : read/write
Reload value of counter. When `ctr` reaches 0, it is automatically reloaded with this value.
en : read/write
Counter enable.
ctr : read/write
Counter value.
Events
------
zero : edge-triggered (rising)
Counter value reached 0.
Parameters
----------
width : int
Counter width.
Attributes
----------
bus : :class:`nmigen_soc.wishbone.Interface`
Wishbone bus interface.
irq : :class:`IRQLine`
Interrupt request.
"""
def __init__(self, width):
super().__init__()
if not isinstance(width, int) or width < 0:
raise ValueError("Counter width must be a non-negative integer, not {!r}"
.format(width))
if width > 32:
raise ValueError("Counter width cannot be greater than 32 (was: {})"
.format(width))
self.width = width
bank = self.csr_bank()
self._reload = bank.csr(width, "rw")
self._en = bank.csr( 1, "rw")
self._ctr = bank.csr(width, "rw")
self._zero_ev = self.event(mode="rise")
self._bridge = self.bridge(data_width=32, granularity=8, alignment=2)
self.bus = self._bridge.bus
self.irq = self._bridge.irq
def elaborate(self, platform):
m = Module()
m.submodules.bridge = self._bridge
with m.If(self._en.r_data):
with m.If(self._ctr.r_data == 0):
m.d.comb += self._zero_ev.stb.eq(1)
m.d.sync += self._ctr.r_data.eq(self._reload.r_data)
with m.Else():
m.d.sync += self._ctr.r_data.eq(self._ctr.r_data - 1)
with m.If(self._reload.w_stb):
m.d.sync += self._reload.r_data.eq(self._reload.w_data)
with m.If(self._en.w_stb):
m.d.sync += self._en.r_data.eq(self._en.w_data)
with m.If(self._ctr.w_stb):
m.d.sync += self._ctr.r_data.eq(self._ctr.w_data)
return m
| 28.703704 | 100 | 0.556129 |
6ab71e0f32a9d37aad12c4ed1e52efaf0815a17c | 432 | py | Python | output/models/nist_data/atomic/string/schema_instance/nistschema_sv_iv_atomic_string_length_3_xsd/nistschema_sv_iv_atomic_string_length_3.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/atomic/string/schema_instance/nistschema_sv_iv_atomic_string_length_3_xsd/nistschema_sv_iv_atomic_string_length_3.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/atomic/string/schema_instance/nistschema_sv_iv_atomic_string_length_3_xsd/nistschema_sv_iv_atomic_string_length_3.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-string-length-3-NS"
@dataclass
class NistschemaSvIvAtomicStringLength3:
class Meta:
name = "NISTSchema-SV-IV-atomic-string-length-3"
namespace = "NISTSchema-SV-IV-atomic-string-length-3-NS"
value: str = field(
default="",
metadata={
"required": True,
"length": 713,
}
)
| 22.736842 | 64 | 0.622685 |
70c21fc5c68aadb4b14cd80409b077ed44b800cc | 978 | py | Python | kubernetes/test/test_v1_volume_attachment.py | redjohn/python | 5e512ff564c244c50cab780d821542ed56aa965a | [
"Apache-2.0"
] | 1 | 2019-04-14T23:51:35.000Z | 2019-04-14T23:51:35.000Z | kubernetes/test/test_v1_volume_attachment.py | redjohn/python | 5e512ff564c244c50cab780d821542ed56aa965a | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_volume_attachment.py | redjohn/python | 5e512ff564c244c50cab780d821542ed56aa965a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_volume_attachment import V1VolumeAttachment
class TestV1VolumeAttachment(unittest.TestCase):
""" V1VolumeAttachment unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1VolumeAttachment(self):
"""
Test V1VolumeAttachment
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_volume_attachment.V1VolumeAttachment()
pass
if __name__ == '__main__':
unittest.main()
| 21.733333 | 105 | 0.713701 |
1a966ceee40551999606fa2ef4b1b09a1d8d532f | 1,374 | py | Python | tubbymemes/cli/mod.py | lalanza808/suchwowx | eea72168dbcadb13de3c309d135b0fb9137b62ad | [
"MIT"
] | 2 | 2021-12-13T16:49:53.000Z | 2022-01-16T21:22:45.000Z | tubbymemes/cli/mod.py | lalanza808/suchwowx | eea72168dbcadb13de3c309d135b0fb9137b62ad | [
"MIT"
] | null | null | null | tubbymemes/cli/mod.py | lalanza808/suchwowx | eea72168dbcadb13de3c309d135b0fb9137b62ad | [
"MIT"
] | 1 | 2022-03-12T02:40:53.000Z | 2022-03-12T02:40:53.000Z | import click
from flask import Blueprint
from tubbymemes.models import User
from tubbymemes.factory import db
bp = Blueprint('mod', 'mod')
@bp.cli.command('list')
def list():
"""
List current server moderators.
"""
for mod in User.query.filter(User.moderator == True):
click.echo(f'{mod.id} - {mod.public_address}')
@bp.cli.command('add')
@click.argument('address')
def add(address):
"""
Add server moderators by address.
"""
user = User.query.filter(User.public_address == address.lower()).first()
if user:
if not user.moderator:
user.moderator = True
db.session.commit()
click.echo(f'[+] Added moderator status to {address}')
else:
click.echo('[.] User is already a moderator')
else:
click.echo('[!] No user with that address.')
@bp.cli.command('remove')
@click.argument('address')
def remove(address):
"""
Remove server moderator by address.
"""
user = User.query.filter(User.public_address == address).first()
if user:
if user.moderator:
user.moderator = False
db.session.commit()
click.echo(f'[-] Removed moderator status from {address}')
else:
click.echo('[.] That user is not a moderator.')
else:
click.echo('[!] No user with that address.')
| 25.444444 | 76 | 0.60262 |
30a94ab7f99b995765a15eecdfe6a9efb7d446f2 | 702 | py | Python | maxArea.py | KFavour/geeksforgeeks | f920701b622ff56cf584f9c462b5afe2e2dfe6fb | [
"MIT"
] | null | null | null | maxArea.py | KFavour/geeksforgeeks | f920701b622ff56cf584f9c462b5afe2e2dfe6fb | [
"MIT"
] | null | null | null | maxArea.py | KFavour/geeksforgeeks | f920701b622ff56cf584f9c462b5afe2e2dfe6fb | [
"MIT"
] | null | null | null | def maxArea(A,le):
#code here
Area_max = 0
for i in range(le):
first = A[i]
if A[i] > Area_max/(le-i) or A[i] <= Area_max/i:
for j in range(i+1, le):
second = A[j]
if second > first:
Area = first*abs(i-j)
else:
Area = second*abs(i-j)
if Area > Area_max:
Area_max = Area
return Area_max
Array = [41,24, 66, 30, 7, 91, 7, 37, 57, 87, 53, 83, 45, 9, 9, 58, 21, 88, 22, 46, 6, 30, 13, 68, 1, 91, 62, 55, 10, 59, 24, 37, 48, 83, 95, 41, 2,50, 91, 36, 74, 20]
enter = maxArea(Array, len(Array))
print(enter)
print(5**0.5) | 33.428571 | 167 | 0.444444 |
dec55a866ca1c06f9e673f40b2520e65f6a19c68 | 2,169 | py | Python | saml2idp/xml_signing.py | anentropic/django-saml2-idp | a2810de839b26cf740a7b1ad3e00658498ce4d22 | [
"MIT"
] | null | null | null | saml2idp/xml_signing.py | anentropic/django-saml2-idp | a2810de839b26cf740a7b1ad3e00658498ce4d22 | [
"MIT"
] | 1 | 2016-11-09T13:32:44.000Z | 2019-01-31T19:06:05.000Z | saml2idp/xml_signing.py | anentropic/django-saml2-idp | a2810de839b26cf740a7b1ad3e00658498ce4d22 | [
"MIT"
] | null | null | null | """
Signing code goes here.
"""
# python:
import hashlib
import logging
import string
# other libraries:
import M2Crypto
# this app:
import saml2idp_metadata
from codex import nice64
from xml_templates import SIGNED_INFO, SIGNATURE
def load_cert_data(certificate_file):
"""
Returns the certificate data out of the certificate_file.
"""
certificate = M2Crypto.X509.load_cert(certificate_file)
cert_data = ''.join(certificate.as_pem().split('\n')[1:-2])
return cert_data
def get_signature_xml(subject, reference_uri):
"""
Returns XML Signature for subject.
"""
config = saml2idp_metadata.SAML2IDP_CONFIG
private_key_file = config['private_key_file']
certificate_file = config['certificate_file']
logging.debug('get_signature_xml - Begin.')
logging.debug('Using private key file: ' + private_key_file)
logging.debug('Using certificate file: ' + certificate_file)
logging.debug('Subject: ' + subject)
# Hash the subject.
subject_hash = hashlib.sha1()
subject_hash.update(subject)
subject_digest = nice64(subject_hash.digest())
logging.debug('Subject digest: ' + subject_digest)
# Create signed_info.
signed_info = string.Template(SIGNED_INFO).substitute({
'REFERENCE_URI': reference_uri,
'SUBJECT_DIGEST': subject_digest,
})
logging.debug('SignedInfo XML: ' + signed_info)
# RSA-sign the signed_info.
private_key = M2Crypto.EVP.load_key(private_key_file)
private_key.sign_init()
private_key.sign_update(signed_info)
rsa_signature = nice64(private_key.sign_final())
logging.debug('RSA Signature: ' + rsa_signature)
# Load the certificate.
cert_data = load_cert_data(certificate_file)
# Put the signed_info and rsa_signature into the XML signature.
signed_info_short = signed_info.replace(' xmlns:ds="http://www.w3.org/2000/09/xmldsig#"', '')
signature_xml = string.Template(SIGNATURE).substitute({
'RSA_SIGNATURE': rsa_signature,
'SIGNED_INFO': signed_info_short,
'CERTIFICATE': cert_data,
})
logging.debug('Signature XML: ' + signature_xml)
return signature_xml
| 32.373134 | 97 | 0.713232 |
a519f2bb9fd08b68adea481a217379196719080d | 2,285 | py | Python | python_crash_course/classes/user.py | heniu1985/Learning | e865d59df9c4b1b0d7fc0158635c2a035f20bd4f | [
"MIT"
] | null | null | null | python_crash_course/classes/user.py | heniu1985/Learning | e865d59df9c4b1b0d7fc0158635c2a035f20bd4f | [
"MIT"
] | null | null | null | python_crash_course/classes/user.py | heniu1985/Learning | e865d59df9c4b1b0d7fc0158635c2a035f20bd4f | [
"MIT"
] | null | null | null | class User():
"""Klasa opisująca użytkownika"""
def __init__(self, first_name, last_name, email, country, city):
"""Inicjalizacja atrybutów opisujących użytkownika"""
self.first_name = first_name
self.last_name = last_name
self.email = email
self.country = country
self.city = city
self.login_attempts = 0
def describe_user(self):
"""Wyświetla opis użytkownika"""
print(f"{self.first_name.title()} {self.last_name.title()}")
print(f"e-mail: {self.email}")
print(f"{self.city.title()}, {self.country.title()}")
def greet_user(self):
"""Spersonalizowane powitanie"""
print(f"Witaj {self.first_name.title()}")
def increment_login_attempts(self):
"""Zwiększa liczbę prób logowania"""
self.login_attempts += 1
def reset_login_attempts(self):
"""Resetuje próby logowania"""
self.login_attempts = 0
def show_login_attempts(self):
"""Wyświetla liczbę prób logowania"""
print(f"{self.login_attempts} prób logowania.")
class Privileges():
"""Klasa opisująca uprawnienia użytkowników"""
def __init__(self, privileges=["może dodać post", "może usunąć post", "może zbanować użytkownika"]):
"""Inicjalizacja atrybutów uprawnień"""
self.privileges = privileges
def show_privileges(self):
"""Wyświetla uprawnienia Administratora"""
print(f"Użytkownik jest Administratorem i posiada następujące uprawnienia:")
for privilege in self.privileges:
print(f"- {privilege.capitalize()}")
class Admin(User):
"""Klasa opisująca administratora (dziedzicząca po klasie User)"""
def __init__(self, first_name, last_name, email, country, city):
"""Inicjalizacja atrybutów opisujących Administratora"""
super().__init__(first_name, last_name, email, country, city)
self.privileges = Privileges()
user1 = User("Piotr", "Kowalski", "pkowalski@gmail.com", "Polska", "Rzeszów")
user2 = User("Damian", "Hennek", "hennek@gmail.com", "Polska", "Katowice")
user3 = User("John", "Kowalsky", "john.kowalsky@yahoo.com", "usa", "new")
user4 = Admin("Jan", "Nowak", "jan.nowak@wp.pl", "Polska", "wrocław")
user4.privileges.show_privileges() | 37.459016 | 104 | 0.656018 |
aca81e9fa6e8b20b1bdfab388678eacef6f82b06 | 852 | py | Python | 5. WEB/app/external_sources/mi_cole/models.py | doyaguillo1997/Data2Gether | 125e3e54060b342a473480f8cb1a913fc54f55ed | [
"MIT"
] | 1 | 2021-10-03T10:19:14.000Z | 2021-10-03T10:19:14.000Z | 5. WEB/app/external_sources/mi_cole/models.py | doyaguillo1997/Data2Gether | 125e3e54060b342a473480f8cb1a913fc54f55ed | [
"MIT"
] | null | null | null | 5. WEB/app/external_sources/mi_cole/models.py | doyaguillo1997/Data2Gether | 125e3e54060b342a473480f8cb1a913fc54f55ed | [
"MIT"
] | null | null | null | from django.contrib.gis.db import models
class School(models.Model):
type = models.ForeignKey(
"SchoolType", null=True, blank=True, on_delete=models.SET_NULL
)
name = models.TextField("Nombre")
address = models.TextField("Dirección")
price = models.TextField("Precio")
review = models.TextField("Valoración")
description = models.TextField("Descripción")
query = models.TextField("Petición")
coord = models.PointField("Localización")
price_level = models.SmallIntegerField("Nivel Precio")
price_level_weights = models.FloatField("Peso Precio")
def __str__(self):
return self.name
@property
def latitude(self):
return self.coord.y
@property
def longitude(self):
return self.coord.x
class SchoolType(models.Model):
type = models.TextField("Tipo")
| 26.625 | 70 | 0.684272 |
8c8e652a04e1ea09eff8385a5c99b2d63a9da290 | 3,585 | py | Python | system_manager.py | rakibulmdalam/IOT-Hardware-Abstraction-Layer | 4d344a82aa94ae561a7c2889f942c6a892e6810e | [
"MIT"
] | 1 | 2018-06-12T15:40:45.000Z | 2018-06-12T15:40:45.000Z | system_manager.py | UyumazHakan/Hardware-Abstraction-Platform | 52f13df333351516a88497e4a655ee1333abe7eb | [
"MIT"
] | null | null | null | system_manager.py | UyumazHakan/Hardware-Abstraction-Platform | 52f13df333351516a88497e4a655ee1333abe7eb | [
"MIT"
] | null | null | null | import os
import json
import logging
import time
import atexit
from device_manager import DeviceManager
from communication_manager import CommunicationManager
from security_communication.secure_communication_enum import SecurityEnum, CommunicationEnum, security_constructors, communication_constructors
dir_path = os.path.dirname(os.path.realpath(__file__))
config_file_directory = dir_path+"/config.json"
class SystemManager:
config_file_directory = None
config = {}
device_manager, communication_manager = None, None
#Updates config according to config file
def update_config(self):
logging.info("Config updated")
with open(self.config_file_directory) as config_file:
self.config = json.loads(config_file.read())
logging.debug("New config : " + json.dumps(self.config))
def device_manager_callback(self, data):
self.communication_manager.send_all(data)
def communication_manager_send_callback(self):
pass
def communication_manager_receive_callback(self, data):
pass
def __init__(self, config_file_directory):
self.config_file_directory = config_file_directory
self.update_config()
if self.config["board_type"] == "raspberry_pi":
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
self.device_manager = DeviceManager(self.config["devices"], self.config["board_type"], self.device_manager_callback)
if self.device_manager.connected > 0:
self.communication_manager = CommunicationManager(self.config["communication_protocols"], self.communication_manager_send_callback, self.communication_manager_receive_callback)
else:
print('no sensor initiated')
exit()
time.sleep(5)
self.device_manager.read_all()
def log_worker(server):
with open(config_file_directory) as config_file:
config = json.loads(config_file.read())
web_server = security_constructors[SecurityEnum.PlainText.value] \
({}, \
communication_constructors[CommunicationEnum.HTTP.value] \
(server))
login_response = web_server.send( \
{"msg":{"username": config["username"], "password": config["password"]}, \
"http_header":{"Content-Type": "application/json"}, "http_method": "POST", \
"http_selector": "/api/users/authenticate", \
"http_body_type": web_server.communication_protocol.BodyTypes.RAW})
token = json.loads(login_response.read().decode("utf-8"))["token"]
for root, dirs, files in os.walk(config["log_directory"], topdown=False):
for name in files:
old_log=os.path.join(root, name)
response = web_server.send( \
{"fields":{"id": config["id"]}, \
"files":{"file":old_log}, \
"http_header":{"Authorization": "Bearer "+token}, "http_method": "POST", \
"http_selector": "/api/devices/upload", \
"http_body_type": web_server.communication_protocol.BodyTypes.MULTIPART})
print(response.read())
status = response.status
if status == 200:
os.remove(old_log)
logging.basicConfig(filename= config["log_directory"] + "log_" + str(int(time.time())) + ".txt", \
filemode= "w", level=logging.DEBUG, \
format="%(asctime)s - %(funcName)-25s:%(filename)-30s:%(thread)d - %(levelname)-5s - %(message)s")
logging.info("Started")
def main():
log_server = {"ip": "141.40.254.150", "port": 80}
#log_worker(log_server)
system_manager = SystemManager(config_file_directory)
while True:
pass
def exit_handler():
with open(config_file_directory) as config_file:
config = json.loads(config_file.read())
logging.info("Stopped")
logging.shutdown()
if config["board_type"] == "raspberry_pi":
import RPi.GPIO as GPIO
GPIO.cleanup()
atexit.register(exit_handler)
if __name__ == "__main__":
main()
| 33.504673 | 179 | 0.747559 |
6450d297b76851c7ea273ac03618ee6527271ba3 | 21,302 | py | Python | TXHousing/analysis/zoning_graphs.py | amspector100/TXHousing | 5579ed6372c1ca78a72ba5e47a0f1f3146181bf8 | [
"MIT"
] | 1 | 2020-04-20T04:30:58.000Z | 2020-04-20T04:30:58.000Z | TXHousing/analysis/zoning_graphs.py | amspector100/TXHousing | 5579ed6372c1ca78a72ba5e47a0f1f3146181bf8 | [
"MIT"
] | null | null | null | TXHousing/analysis/zoning_graphs.py | amspector100/TXHousing | 5579ed6372c1ca78a72ba5e47a0f1f3146181bf8 | [
"MIT"
] | null | null | null | """Graphs which mostly rely on zoning data"""
import time
import shapely
import pandas as pd
import geopandas as gpd
from .. import utilities
from ..data_processing import zoning, boundaries
from plotnine import *
import matplotlib.pyplot as plt
# Minimum lot size graph -------------------------------------
def plot_minimum_lot_size(savepath = 'Figures/Zoning/Minimum_Lot_Size_Residential_No_Agriculture.svg',
width = 10, height = 8):
"""Graphs average minimum lot size in Austin/Dallas.
Methodological note: this treats zones with no minimum lot size as having a minimum lot size of zero. It also
includes areas zoned as non-single family, which we might want to exclude in the future."""
# Get zoning data and subset to exclude agricultural zones and nonresidential zones
austin_zones = zoning.austin_inputs.process_zoning_shapefile(regulation_features = ['min_lot'])
austin_zones = austin_zones.loc[(~austin_zones['base_zone'].str.contains('RR')) &
(austin_zones['broad_zone'] != 'Other')]
dallas_zones = zoning.dallas_inputs.process_zoning_shapefile(regulation_features = ['inv_density'])
dallas_zones = dallas_zones.loc[(~dallas_zones['base_zone'].str.contains('A(A)', regex = False)) &
(austin_zones['broad_zone'] != 'Other')]
# Update min_lot/inv_density (which are the same thing) features to fill NaN values with zeros.
austin_zones['min_lot'] = austin_zones['min_lot'].fillna(0)
dallas_zones['inv_density'] = dallas_zones['inv_density'].fillna(0)
# Polygons intersect rings calls
austin_minlot_radii = utilities.measurements.polygons_intersect_rings(austin_zones, factor = 'min_lot',
lat = zoning.austin_inputs.lat,
long = zoning.austin_inputs.long,
categorical = False,
newproj = 'epsg:2277',
step = 1,
maximum = 15)
dallas_minlot_radii = utilities.measurements.polygons_intersect_rings(dallas_zones, factor = 'inv_density',
lat = zoning.dallas_inputs.lat,
long = zoning.dallas_inputs.long,
categorical = False,
newproj = 'epsg:2276',
step = 1,
maximum = 15)
minlotdata = pd.concat([dallas_minlot_radii, austin_minlot_radii], axis = 1)
minlotdata.columns = ['Dallas', 'Austin']
minlotdata['dist_to_center'] = minlotdata.index
minlotdata = minlotdata.melt(id_vars = 'dist_to_center', value_name = 'min_lot', var_name = 'City')
minlotplot = (ggplot(minlotdata, aes(x = 'dist_to_center', y = 'min_lot', fill = 'City'))
+ geom_col(position = 'dodge', width = 0.9)
+ theme_bw()
+ scale_fill_manual(values=['#008000', 'cornflowerblue'])
+ labs(title = 'Minimum Lot Size in Austin and Dallas, Excluding Agricultural and Nonresidential Land',
x = 'Distance from the City Center (Miles',
y = 'Average Lot Size (Square Feet)'))
minlotplot.save(savepath, width = width, height = height)
def plot_hd_locations(save_path = 'Figures/Zoning/HDLocations.svg', width = 8, height = 5):
"""Graphs locations of historic districts in Austin, Dallas, Houston"""
tx_hd_data = gpd.read_file(zoning.tx_hd_path).to_crs({'init':'epsg:4326'})
# Get austin hds
signature = '-HD'
austin_nat_hds = tx_hd_data.loc[tx_hd_data['CITY'] == 'Austin']
austin_zones = zoning.austin_inputs.process_zoning_shapefile()
austin_local_hds = austin_zones.loc[austin_zones[zoning.austin_inputs.feature].str.contains(signature)]
# Get Dallas hds
dallas_zones = zoning.dallas_inputs.process_zoning_shapefile()
dallas_cds = dallas_zones.loc[dallas_zones['LONG_ZONE_'].apply(lambda x: x[0:2]) == 'CD']
dallas_nat_hds = tx_hd_data.loc[tx_hd_data['CITY'] == 'Dallas']
# Houston hds and others
houston_local_hds = gpd.read_file(zoning.houston_historic_districts_path).to_crs({'init':'epsg:4326'})
houston_nat_hds = tx_hd_data.loc[tx_hd_data['CITY'] == 'Houston']
step = 0.5
maximum = 5
# Many polygons_intersect_rings calls
austin_local_data = utilities.measurements.polygons_intersect_rings(austin_local_hds, factor=None,
lat = zoning.austin_inputs.lat,
long = zoning.austin_inputs.long,
newproj='epsg:2277', step=step,
maximum=maximum, group_outliers = False)
austin_local_data.name = 'Austin Local Historic Districts'
austin_nat_data = utilities.measurements.polygons_intersect_rings(austin_nat_hds, factor=None,
lat = zoning.austin_inputs.lat,
long = zoning.austin_inputs.long,
newproj='epsg:2277', step=step,
maximum=maximum, group_outliers = False)
austin_nat_data.name = 'Austin National Historic Districts'
dallas_cds_data = utilities.measurements.polygons_intersect_rings(dallas_cds, factor=None,
lat = zoning.dallas_inputs.lat,
long = zoning.dallas_inputs.long,
newproj='epsg:2276', step=step,
maximum=maximum, group_outliers = False)
dallas_cds_data.name = 'Dallas Conservation Districts'
dallas_nat_data = utilities.measurements.polygons_intersect_rings(dallas_nat_hds, factor=None,
lat = zoning.dallas_inputs.lat,
long = zoning.dallas_inputs.long,
newproj='epsg:2276', step=step,
maximum=maximum, group_outliers = False)
dallas_nat_data.name = 'Dallas National Historic Districts'
houston_local_data = utilities.measurements.polygons_intersect_rings(houston_local_hds, factor=None,
lat = zoning.houston_inputs.lat,
long = zoning.houston_inputs.long,
newproj='epsg:2278', step=step,
maximum=maximum, group_outliers = False)
houston_local_data.name = 'Houston Local Historic Districts'
houston_nat_data = utilities.measurements.polygons_intersect_rings(houston_nat_hds, factor=None,
lat = zoning.houston_inputs.lat,
long = zoning.houston_inputs.long,
newproj='epsg:2278', step=step,
maximum=maximum, group_outliers = False)
houston_nat_data.name = 'Houston National Historic Districts'
# Combine and plot
all_loc_data = 100 * pd.concat(
[austin_local_data, austin_nat_data, dallas_cds_data, dallas_nat_data, houston_local_data, houston_nat_data],
axis=1)
all_loc_data['dist'] = all_loc_data.index
all_loc_data = pd.melt(all_loc_data, var_name='type', value_name='percent', id_vars=['dist'])
all_loc_data['city'] = all_loc_data['type'].apply(lambda x: x.split(' ')[0])
all_loc_data['District Type'] = all_loc_data['type'].apply(lambda x: x.split(' ')[1])
histlocations = (ggplot(all_loc_data, aes(x='dist', y='percent', group='District Type', fill='District Type'))
+ geom_col(position='dodge')
+ facet_wrap('~ city')
+ labs(title = 'Locations of Historic Districts in Austin, Dallas, and Houston',
x = 'Distance from City Center (Miles)',
y = 'Percent of Land in Historic Districts')
+ theme_bw())
histlocations.save(save_path, width=width, height=height)
def plot_broad_zones_proportion():
"""Plot proportion of broad_zones by distance from city center, excluding nonresidential and agricultural land."""
# Get zoning data and subset to exclude agricultural zones and nonresidential zones
austin_zones = zoning.austin_inputs.process_zoning_shapefile(regulation_features = ['min_lot'])
austin_zones = austin_zones.loc[(~austin_zones['base_zone'].str.contains('RR')) &
(austin_zones['broad_zone'] != 'Other')]
dallas_zones = zoning.dallas_inputs.process_zoning_shapefile(regulation_features = ['inv_density'])
dallas_zones = dallas_zones.loc[(~dallas_zones['base_zone'].str.contains('A(A)', regex = False)) &
(dallas_zones['broad_zone'] != 'Other')]
dallas_zone_rings = utilities.measurements.polygons_intersect_rings(dallas_zones, factor = 'broad_zone',
lat = zoning.dallas_inputs.lat,
long = zoning.dallas_inputs.long,
categorical = True,
newproj='epsg:2276', step=1, maximum = 10)
dallas_zone_rings['City'] = "Dallas"
austin_zone_rings = utilities.measurements.polygons_intersect_rings(austin_zones, factor = 'broad_zone',
lat = zoning.austin_inputs.lat,
long = zoning.austin_inputs.long,
categorical = True,
newproj='epsg:2277', step=1, maximum = 10)
austin_zone_rings['City'] = 'Austin'
# Combine and divide by totals
zone_rings = pd.concat([dallas_zone_rings, austin_zone_rings], axis = 0)
zone_rings['dist_to_center'] = zone_rings.index
zone_rings = zone_rings.melt(id_vars = ['City', 'dist_to_center'], var_name = 'broad_zone', value_name = 'percent')
# Plot Single Family
sfrings = zone_rings.loc[zone_rings['broad_zone'] == 'Single Family']
sfringsplot = (ggplot(sfrings, aes(x = 'dist_to_center', y = 'percent', fill = 'City'))
+ geom_col(position = 'dodge', width = 0.6)
+ theme_bw()
+ scale_fill_manual(['cornflowerblue', '#8FBC8F'])
+ labs(title = 'Single Family Zoning in Austin and Dallas, Excluding Agricultural and Nonresidential Land',
x = 'Distance from the city center (Miles)',
y = 'Percentage of Residential Land Zoned'))
sfringsplot.save('Figures/Zoning/SFZoningRings.svg', width = 8, height = 5)
# Plot Multifamily
mfrings = zone_rings.loc[zone_rings['broad_zone'] == 'Multifamily']
mfringsplot = (ggplot(mfrings, aes(x = 'dist_to_center', y = 'percent', fill = 'City'))
+ geom_col(position = 'dodge', width = 0.6)
+ theme_bw()
+ scale_fill_manual(['cornflowerblue', '#8FBC8F'])
+ labs(title = 'Multifamily Zoning in Austin and Dallas, Excluding Agricultural and Nonresidential Land',
x = 'Distance from the city center (Miles)',
y = 'Percentage of Residential Land Zoned'))
mfringsplot.save('Figures/Zoning/MFZoningRings.svg', width = 8, height = 5)
# Matplotlib helper functions
def add_counties(ax, county_list):
""" Given a matplotlib axis, adds texas county outlines to it"""
# Plot and label counties
counties = gpd.read_file(boundaries.county_boundaries_path)
counties = counties.loc[(counties['NAME'].isin(county_list)) & (counties['STATEFP'] == '48')]
counties['coords'] = counties['geometry'].apply(lambda x: x.representative_point().coords[:][0])
counties['geometry'] = counties['geometry'].apply(lambda x: x.boundary)
counties.plot(ax = ax, facecolor = 'none', alpha = 0.5, edgecolor = 'black')
for idx, row in counties.iterrows():
ax.annotate(s=row['NAME'], xy=row['coords'], horizontalalignment='center')
def map_broad_zones(data, county_list, name, minlat, maxlat, minlong, maxlong, save_path, colordic = None,
featurename = 'Base Zoning'):
"""Plots an actual map of broad_zones in a data source within the lat/long bounds. If you want to do this for Austin
or Dallas, just use the map_broad_zones_dallas_austin wrapper.
:param data: A gdf including a geometry and base_zone column.
:param county_list: A list of county outlines to plot on the data.
:param name: A name to be used in titling the egraph.
:param minlat, maxlat, minlong, maxlong: Bounds of the graph
:param save_path: A path at which to save the map.
:param colordic: Dictionary which maps broad_zones to colors.
:param featurename: Used for the title of the graph. Graph title is {featurename} around {name}."""
time0 = time.time()
# Fig, ax
fig, ax = plt.subplots()
# Get subset of north texas data
data['geometry'] = data['geometry'].simplify(tolerance = 0.001)
spatial_index = data.sindex
print('Subsetting')
bounding_polygon = shapely.geometry.box(minlong, minlat, maxlong, maxlat)
ids = list(spatial_index.intersection(bounding_polygon.bounds))
subset = data.iloc[ids]
# Plot
if colordic != 'none':
if colordic is None:
colordic = {'Single Family': '#ff81c0', # Pink np.array((255, 129, 192), dtype = int)
'Other Residential': '#c79fef', # Lavender np.array((199, 159, 239)), dtype = int)
'Multifamily': '#840000', # Dark red np.array((132, 0, 0), dtype = int)
'Other': '#96f97b'}
# Loop through zones with specific colors
zones = subset['broad_zone'].unique()
legend_handlers = []
# Add zones
for zone in zones:
filtered_subset = subset.loc[subset['broad_zone'] == zone]
filtered_subset.plot(ax = ax, color = colordic[zone], alpha = 0.6, label = zone)
legend_handlers.append(plt.scatter([], [], color = colordic[zone]))
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
ax.set_ylim(minlat, maxlat)
ax.set_xlim(minlong, maxlong)
ax.set_title('{} Around {}, TX'.format(featurename, name))
ax.legend(tuple(legend_handlers), tuple(zones), fontsize = 6)
else:
subset.plot(ax = ax, column = 'broad_zone', legend = True, legend_kwds = {'fontsize':6}, cmap = 'Set1')
add_counties(ax, county_list = county_list)
print('Saving')
plt.savefig(save_path, dpi = 1000)
print('Finished, took {}'.format(time.time() - time0))
def map_broad_zones_dallas_austin(plot_austin = True, plot_dallas = True):
""" Wrapper for map_broad_zones, just plots them around austin/dallas. """
if plot_austin:
austin_data = zoning.get_austin_surrounding_zones()
map_broad_zones(data = austin_data,
county_list = ['Travis', 'Williamson'],
minlat=30.09, maxlat=30.76, minlong=-98.11, maxlong=-97.43,
name = 'Austin',
save_path = 'Figures/Zoning/Austin_base_zones.png',
featurename = 'Zoning')
if plot_dallas:
north_texas_data = zoning.north_texas_inputs.process_zoning_shapefile()
map_broad_zones(data = north_texas_data,
county_list = ['Dallas', 'Denton', 'Tarrant', 'Collin'],
minlat=32.49, maxlat=33.51, minlong=-97.71, maxlong=-96.29,
name = 'Dallas',
save_path = 'Figures/Zoning/north_texas_base_zones.png',
featurename = 'Land Use')
def plot_zone_income_histogram(calculate = True,
save_path = 'Figures/Zoning/income_housing_typology.svg',
cache_path = 'shared_data/calculations/zoning_income_data.csv'):
""" Plots the distribution of incomes conditional on broad zones in Austin and Dallas"""
if calculate:
# Get block data with income brackets
block_data = boundaries.BlockBoundaries(['X19_INCOME'], cities = ['Austin', 'Dallas'])
factor_dictionary = {'B19001e2':0, # Start values, the next value is the end of the bracket
'B19001e3':10000,
'B19001e4':15000,
'B19001e5':20000,
'B19001e6':25000,
'B19001e7':30000,
'B19001e8':35000,
'B19001e9':40000,
'B19001e10':45000,
'B19001e11':50000,
'B19001e12':60000,
'B19001e13':75000,
'B19001e14':100000,
'B19001e15':125000,
'B19001e16':150000,
'B19001e17':200000}
data_features = [factor_dictionary[key] for key in factor_dictionary]
block_data.data = block_data.data.rename(columns = factor_dictionary)
# Austin/Dallas Zones
dallas_zones = zoning.dallas_inputs.process_zoning_shapefile()
dallas_zones = dallas_zones.loc[dallas_zones['broad_zone'].isin(['Single Family', 'Multifamily'])]
austin_zones = zoning.austin_inputs.process_zoning_shapefile()
austin_zones = austin_zones.loc[austin_zones['broad_zone'].isin(['Single Family', 'Multifamily'])]
# Pull income statistics
austin_zones = block_data.push_features(austin_zones, data_features)
dallas_zones = block_data.push_features(dallas_zones, data_features)
# Calculate and cache
austin_zones['City'] = 'Austin'
dallas_zones['City'] = 'Dallas'
selected_columns = data_features
selected_columns.extend(['broad_zone', 'City']) # We don't need geometry anymore
all_zones = pd.concat([austin_zones[selected_columns], dallas_zones[selected_columns]], axis = 0)
final_data = all_zones.groupby(['City', 'broad_zone']).sum()
final_data.to_csv(cache_path)
# Read in data
final_data = pd.read_csv(cache_path)
final_data = final_data.melt(var_name = 'Household_Income', value_name = 'Count', id_vars = ['City', 'broad_zone'])
final_data = utilities.measurements.order_radii(final_data, feature = 'Household_Income')
# Normalize by the total
conditional_sums = final_data.groupby(['Household_Income', 'broad_zone', 'City']).agg({'Count': 'sum'})
final_data = conditional_sums.groupby(level = [1,2]).apply(lambda x: 100*x / x.sum()).reset_index()
incomeplot = (ggplot(final_data, aes(x = 'Household_Income', y = "Count", group = 'broad_zone', fill = 'broad_zone'))
+ geom_bar(stat="identity", position=position_dodge())
+ facet_wrap('~ City')
+ theme_bw()
+ labs(title = 'Income by Base Residential Zone, Austin and Dallas',
x = 'Household Income Bracket',
y = 'Percent of SF/MF Households in Income Bracket Given Income')
+ theme(axis_text_x = element_text(rotation = 20, size = 8)))
incomeplot.save(filename=save_path, width=15, height=8, bbox_inches='tight') | 58.202186 | 126 | 0.555629 |
df853a7e93936a146fdd5929d026cca71a6c9ea3 | 6,177 | py | Python | core/loader.py | limpins/Deep-Learning-for-Time-Series | 237598881f69a0637f16c6bb488db17cd5b23aa4 | [
"MIT"
] | null | null | null | core/loader.py | limpins/Deep-Learning-for-Time-Series | 237598881f69a0637f16c6bb488db17cd5b23aa4 | [
"MIT"
] | null | null | null | core/loader.py | limpins/Deep-Learning-for-Time-Series | 237598881f69a0637f16c6bb488db17cd5b23aa4 | [
"MIT"
] | 1 | 2021-07-13T14:10:33.000Z | 2021-07-13T14:10:33.000Z | """
Email: autuanliu@163.com
Date: 2018/9/28
"""
import numpy as np
import pandas as pd
import torch
from torch.utils.data import DataLoader, Dataset
def get_json_data(file_name):
import ujson
return ujson.load(open(file_name, 'r'))
def get_yaml_data(file_name):
import yaml
return yaml.load(open(file_name, 'r'))
def get_mat_data(file_name, var_name):
"""从文件中读取出原始数据并转换为 np.array 类型
Args:
file_name (str): 数据存储的完整路径,如 'datastes/abc.mat'
var_name (str): 存储数据的变量名
Returns:
np.array: 将读取到的原始数据转换为 np.ndarray 类型
"""
import scipy.io as sio
data_dict = sio.loadmat(file_name)
return data_dict[var_name]
def get_csv_data(file_name, sep=',', skiprows: int = 0, dtype=np.float32):
data_df = pd.read_csv(file_name, sep=sep, skiprows=skiprows, dtype=dtype)
return data_df.values
def get_txt_data(file_name, delimiter=',', dtype=np.float32):
data = np.loadtxt(file_name, delimiter=delimiter, dtype=dtype)
return data
def get_excel_data(file_name, sheet_name, skiprows=0, dtype=np.float32):
data_df = pd.read_excel(file_name, sheet_name=sheet_name, skiprows=skiprows, dtype=dtype)
return data_df.values
def time_series_split(data: np.ndarray, splits=[0.8, 0.1, 0.1]):
"""序列数据的 train、test 划分
Args:
data (np.ndarray): 原始或者待划分的时间序列数据
spilts (list) 分割时序数据的分割比例
Returns:
train_subseq, test_subseq (tuple): train_subseq, test_subseq
"""
lens = data.shape[0]
split_point1 = int(np.ceil(lens * splits[0]))
split_point2 = int(np.ceil(lens * splits[1]))
train_set, valid_set, test_set = data[:split_point1, :], data[split_point1:(split_point1 + split_point2), :], data[(split_point1 + split_point2):, :]
return train_set, valid_set, test_set
def normalize(train_data, valid_data, test_data):
"""归一化数据(一定是在训练集测试集已经划分完后进行)
EEG 数据有正有负,标准归一化
Args:
train_data (np.ndarray): 未经过归一化的训练集原始数据
valid_data (np.ndarray): 未经过归一化的验证集原始数据
test_data (np.ndarray): 未经过归一化的测试集原始数据
"""
from sklearn import preprocessing as skp
dim = train_data.shape[-1]
s1, s2, s3 = train_data.shape, valid_data.shape, test_data.shape
scaler = skp.StandardScaler().fit(train_data.reshape(-1, dim))
train_data = scaler.transform(train_data.reshape(-1, dim)).reshape(s1)
valid_data = scaler.transform(valid_data.reshape(-1, dim)).reshape(s2)
test_data = scaler.transform(test_data.reshape(-1, dim)).reshape(s3)
return train_data, valid_data, test_data
def series2xy(series_data: np.ndarray, idx_x=None, idx_y=None, seq_length: int = 20, num_shift: int = 1):
"""将序列数据转换为监督学习数据
Args:
series_data (np.ndarray): 原始的序列数据
idx_x (list or tuple or index slice or int): x 的索引位置, defaults to None.
idx_y (list or tuple or index slice or int): y 的索引位置, defaults to None.
seq_length (int, optional): Defaults to 20. 序列的长度或者采样窗宽
num_shift (int, optional): Defaults to 1. 窗口每次平移的距离
Returns:
inputs, targets (np.ndarray)
"""
# 获取子序列(窗口)数据
num_point, _ = series_data.shape
inputs, targets = [], []
for idx in range(0, num_point - seq_length - num_shift + 1, num_shift):
if idx_x is None and idx_y is None:
inputs.append(series_data[idx:(idx + seq_length), :])
targets.append(series_data[idx + seq_length, :])
elif idx_x is None and idx_y is not None:
inputs.append(series_data[idx:(idx + seq_length), :])
targets.append(series_data[idx + seq_length, idx_y])
elif idx_y is None and idx_x is not None:
inputs.append(series_data[idx:(idx + seq_length), idx_x])
targets.append(series_data[idx + seq_length, :])
else:
inputs.append(series_data[idx:(idx + seq_length), idx_x])
targets.append(series_data[idx + seq_length, idx_y])
return np.array(inputs), np.array(targets)
class MakeSeqData(Dataset):
"""创建序列数据集
Args:
inputs (np.ndarray): 输入数据 x
targets (np.ndarray): 输出数据 y
Returns:
(torch.utils.data.Dataset) Dataset 子类,可以使用 DataLoader(数据类型为 tensor)
"""
def __init__(self, inputs: np.ndarray, targets: np.ndarray):
super(MakeSeqData, self).__init__()
# 这里要保证维度是不可改变的 [2, 1] 和 [2,] 是不同的维度
self.fill_dim = lambda a: a.unsqueeze_(1) if a.ndimension() == 1 else a
self.data = self.fill_dim(torch.from_numpy(inputs))
self.target = self.fill_dim(torch.from_numpy(targets))
def __getitem__(self, index):
return self.data[index], self.target[index]
def __len__(self):
return self.data.shape[0]
def make_loader(train_set, valid_set, test_set, idx_x=None, idx_y=None, seq_len=20, num_shift=1, bt_sz=32):
"""获取可以迭代的分割后的数据
Args:
train_set (np.ndarray): 训练数据
valid_set (np.ndarray): 验证数据
test_set (np.ndarray): 测试数据
idx_x (list or tuple or index slice or int): x 的索引位置, defaults to None.
idx_y (list or tuple or index slice or int): y 的索引位置, defaults to None.
seq_len (int, optional): Defaults to 20. 窗口的长度
num_shift (int, optional): Defaults to 1. 窗口每次平移的距离
bt_sz (int, optional): Defaults to 32. batchsize
Returns:
[torch.utils.data.DataLoader]: train_loader, valid_loader, test_loader
"""
# 转为窗口数据
X_train, y_train = series2xy(train_set, idx_x=idx_x, idx_y=idx_y, seq_length=seq_len, num_shift=num_shift)
X_valid, y_valid = series2xy(valid_set, idx_x=idx_x, idx_y=idx_y, seq_length=seq_len, num_shift=num_shift)
X_test, y_test = series2xy(test_set, idx_x=idx_x, idx_y=idx_y, seq_length=seq_len, num_shift=num_shift)
# 特征数据归一化
X_train, X_valid, X_test = normalize(X_train, X_valid, X_test)
# 构造数据集
sub = [MakeSeqData(x, y) for x, y in zip([X_train, X_valid, X_test], [y_train, y_valid, y_test])]
# 测试集不需要随机打乱
# 为了顺利训练 这里 drop-last 设置为 True
[train_loader, valid_loader, test_loader] = [DataLoader(t, batch_size=bt_sz, shuffle=sf, drop_last=True) for t, sf in zip(sub, [True, False, False])]
return train_loader, valid_loader, test_loader
| 33.209677 | 153 | 0.672171 |
953b2b889b522076a7238ccfa494232d17915ec1 | 31 | py | Python | dicelang/float_special.py | Grumblesaur/atropos | 05b7875bdb415e09fbe845429b05bc09b6763920 | [
"MIT"
] | 1 | 2020-01-22T15:09:40.000Z | 2020-01-22T15:09:40.000Z | dicelang/float_special.py | Grumblesaur/atropos | 05b7875bdb415e09fbe845429b05bc09b6763920 | [
"MIT"
] | 36 | 2020-02-04T04:02:53.000Z | 2021-06-10T19:06:14.000Z | dicelang/float_special.py | Grumblesaur/dicelark | 05b7875bdb415e09fbe845429b05bc09b6763920 | [
"MIT"
] | null | null | null | inf = 10e10000
nan = inf - inf
| 10.333333 | 15 | 0.645161 |
278f7ed87fd051a4aa8f9a34b6f1b36ef3e790fe | 1,274 | py | Python | src/first/exception.py | gurimmer/study-python | c9d4ccab48e511587a17e67b4ca203048cfc9c40 | [
"MIT"
] | null | null | null | src/first/exception.py | gurimmer/study-python | c9d4ccab48e511587a17e67b4ca203048cfc9c40 | [
"MIT"
] | null | null | null | src/first/exception.py | gurimmer/study-python | c9d4ccab48e511587a17e67b4ca203048cfc9c40 | [
"MIT"
] | null | null | null |
items = ['book1', 'book2', 'book3']
def getBook(index):
message = 0
try:
message = items[index]
except IndexError:
return 'indexが範囲外です: ' + str(index)
else:
print('tryのelse句はexceptが実行されない場合に処理される')
finally:
print('finally句は必ず実行される')
return message
numbers = ['1', '2', '3']
def getMessage(index):
try:
return numbers[index]
except (IndexError, TypeError) as e:
return f'indexが範囲外です: {e}'
finally:
print('finally句は必ず実行される')
# raiseで例外を発生させる
def throwException():
raise ValueError('意図的にエラーを発生')
# except句のraiseは引数なしで実行でき、例外を引きついでthrowできる
def takeOverException():
try:
throwException()
except ValueError:
raise
print(getBook(1))
print(getBook(10))
print(getMessage(1))
print(getMessage(10))
print(getMessage('1'))
try:
throwException()
except ValueError as e:
print(f'{e}')
# オリジナルな例外を定義
class OriginalError(Exception):
"""オリジナル例外"""
# オリジナル例外クラスの継承
class PageNotFoundError(OriginalError):
def __init__(self, message):
self.message = message
try:
raise OriginalError('オリジナル')
except OriginalError as e:
print(f'{e}')
try:
raise PageNotFoundError('オリジナルページエラー')
except PageNotFoundError as e:
print(f'{e}')
| 19.6 | 48 | 0.657771 |
9cb46042a3079c574a19e63119d8ba82bcd4a955 | 6,997 | py | Python | tests/rs_sqla_test_utils/models.py | jferg368/sqlalchemy-redshift | 5bd6e0dcefbd0a2f3cee0001efc5565ac2395cd2 | [
"MIT"
] | null | null | null | tests/rs_sqla_test_utils/models.py | jferg368/sqlalchemy-redshift | 5bd6e0dcefbd0a2f3cee0001efc5565ac2395cd2 | [
"MIT"
] | null | null | null | tests/rs_sqla_test_utils/models.py | jferg368/sqlalchemy-redshift | 5bd6e0dcefbd0a2f3cee0001efc5565ac2395cd2 | [
"MIT"
] | 1 | 2022-03-16T21:29:34.000Z | 2022-03-16T21:29:34.000Z | import sqlalchemy as sa
from sqlalchemy import event
from sqlalchemy.ext import declarative
from sqlalchemy.schema import CreateSchema
Base = declarative.declarative_base()
event.listen(Base.metadata, 'before_create', CreateSchema('other_schema'))
class Basic(Base):
__tablename__ = 'basic'
name = sa.Column(
sa.Unicode(64), primary_key=True,
redshift_distkey=True, redshift_sortkey=True, redshift_encode='lzo'
)
class BasicInOtherSchema(Base):
__tablename__ = 'basic'
__table_args__ = (
{'schema': 'other_schema',
'redshift_diststyle': 'KEY',
'redshift_distkey': 'col1',
'redshift_sortkey': 'col1'}
)
col1 = sa.Column(sa.Integer(), primary_key=True)
class ReflectionDistKey(Base):
__tablename__ = 'reflection_distkey'
col1 = sa.Column(sa.Integer(), primary_key=True)
col2 = sa.Column(sa.Integer())
__table_args__ = (
{'redshift_diststyle': 'KEY',
'redshift_distkey': 'col1'}
)
class ReflectionSortKey(Base):
__tablename__ = 'reflection_sortkey'
col1 = sa.Column(sa.Integer(), primary_key=True)
col2 = sa.Column(sa.Integer())
__table_args__ = (
{'redshift_diststyle': 'EVEN',
'redshift_sortkey': ('col1', 'col2')}
)
class ReflectionInterleavedSortKey(Base):
__tablename__ = 'reflection_interleaved_sortkey'
col1 = sa.Column(sa.Integer(), primary_key=True)
col2 = sa.Column(sa.Integer())
__table_args__ = (
{'redshift_diststyle': 'EVEN',
'redshift_interleaved_sortkey': (col1, col2)}
)
class ReflectionSortKeyDistKeyWithSpaces(Base):
__tablename__ = 'sort_key_with_spaces'
col1 = sa.Column('col with spaces', sa.Integer(), nullable=False)
__table_args__ = {
'redshift_diststyle': 'KEY',
'redshift_sortkey': 'col with spaces',
'redshift_distkey': 'col with spaces',
}
__mapper_args__ = {
'primary_key': [col1],
}
class ReflectionUniqueConstraint(Base):
__tablename__ = 'reflection_unique_constraint'
col1 = sa.Column(sa.Integer(), primary_key=True)
col2 = sa.Column(sa.Integer())
__table_args__ = (
sa.UniqueConstraint(col1, col2),
{'redshift_diststyle': 'EVEN'}
)
class ReflectionPrimaryKeyConstraint(Base):
__tablename__ = 'reflection_pk_constraint'
col1 = sa.Column(sa.Integer())
col2 = sa.Column(sa.Integer())
__table_args__ = (
sa.PrimaryKeyConstraint(col1, col2),
{'redshift_diststyle': 'EVEN'}
)
class ReflectionNamedPrimaryKeyConstraint(Base):
__tablename__ = 'reflection_named_pk_constraint'
col1 = sa.Column(sa.Integer())
col2 = sa.Column(sa.Integer())
__table_args__ = (
sa.PrimaryKeyConstraint(
col1, col2,
name="reflection_named_pk_constraint__pkey"
),
{'redshift_diststyle': 'EVEN'}
)
class ReflectionForeignKeyConstraint(Base):
__tablename__ = 'reflection_fk_constraint'
col1 = sa.Column(sa.Integer(),
sa.ForeignKey('reflection_unique_constraint.col1'),
primary_key=True)
col2 = sa.Column(sa.Integer())
__table_args__ = (
{'redshift_diststyle': 'EVEN'}
)
class ReflectionNamedForeignKeyConstraint(Base):
__tablename__ = 'reflection_named_fk_constraint'
col1 = sa.Column(
sa.Integer(),
sa.ForeignKey('reflection_unique_constraint.col1',
name="reflection_named_fk_constraint__fk"),
primary_key=True
)
col2 = sa.Column(sa.Integer())
__table_args__ = (
{'redshift_diststyle': 'EVEN'}
)
class ReflectionCompositeForeignKeyConstraint(Base):
__tablename__ = 'reflection_composite_fk_constraint'
id = sa.Column(sa.Integer(),
primary_key=True)
col1 = sa.Column(sa.Integer())
col2 = sa.Column(sa.Integer())
__table_args__ = (
sa.ForeignKeyConstraint(
['col1', 'col2'],
['reflection_pk_constraint.col1', 'reflection_pk_constraint.col2']
),
{'redshift_diststyle': 'EVEN'}
)
class ReflectionDefaultValue(Base):
__tablename__ = 'reflection_default_value'
col1 = sa.Column(sa.Integer(), primary_key=True)
col2 = sa.Column(sa.Integer(), server_default=sa.text('5'))
__table_args__ = (
{'redshift_diststyle': 'EVEN'}
)
class ReflectionIdentity(Base):
__tablename__ = 'reflection_identity'
col1 = sa.Column(sa.Integer(), primary_key=True)
col2 = sa.Column(sa.Integer(), redshift_identity=(1, 3))
col3 = sa.Column(sa.Integer())
__table_args__ = (
{'redshift_diststyle': 'EVEN'}
)
class ReflectionDelimitedIdentifiers1(Base):
__tablename__ = 'group'
col1 = sa.Column('this "is it"', sa.Integer(), primary_key=True)
col2 = sa.Column('and this also', sa.Integer())
__table_args__ = (
{'redshift_diststyle': 'EVEN'}
)
class ReflectionDelimitedIdentifiers2(Base):
__tablename__ = 'column'
col1 = sa.Column('excellent! & column', sa.Integer(), primary_key=True)
# # TODO: Upstream fix to allow ForeignKey definition to work.
# # Currently gives sqlalchemy.exc.ArgumentError:
# # Can't create ForeignKeyConstraint on table 'column':
# # no column named '"most @exce.llent "' is present.
# sa.ForeignKey(ReflectionDelimitedIdentifiers1.col1)
col2 = sa.Column('most @exce.llent ', sa.Integer())
__table_args__ = (
{'redshift_diststyle': 'EVEN'}
)
class ReflectionCustomReservedWords(Base):
__tablename__ = 'aes256'
col1 = sa.Column('open', sa.Integer())
col2 = sa.Column('tag', sa.Integer())
pkey = sa.Column('pkey', sa.Integer(), primary_key=True)
__table_args__ = (
{'redshift_diststyle': 'EVEN'}
)
class ReflectionDelimitedTableName(Base):
__tablename__ = 'this.table'
col1 = sa.Column('id', sa.Integer(), primary_key=True)
__table_args__ = (
{'redshift_diststyle': 'EVEN',
'schema': 'other_schema'}
)
class ReflectionDelimitedTableNoSchema(Base):
__tablename__ = 'this.table'
col1 = sa.Column('id', sa.Integer(), primary_key=True)
__table_args__ = (
{'redshift_diststyle': 'EVEN',
'schema': None}
)
class Referenced(Base):
__tablename__ = 'referenced'
id = sa.Column(
sa.Integer(), primary_key=True, nullable=False,
redshift_identity=(1, 1)
)
__table_args__ = {
'redshift_diststyle': 'EVEN',
'schema': 'other_schema',
}
class Referencing(Base):
__tablename__ = 'referencing'
referenced_table_id = sa.Column(
sa.Integer(), sa.ForeignKey(Referenced.id), primary_key=True,
nullable=False,
)
__table_args__ = {
'redshift_diststyle': 'EVEN',
'schema': 'other_schema',
}
class LongTablename(Base):
__tablename__ = 'a' * 127
metric = sa.Column(name='b' * 127, type_=sa.Integer, primary_key=True)
| 28.559184 | 78 | 0.649564 |
04d0974c37a6f7ae3f0b605dd4bd66cbe3e9cddf | 1,557 | py | Python | centernet/defaults.py | lbin/CenterNet-better-plus | 4c49a574520f122605b641eaab3e165a32a09915 | [
"MIT"
] | 75 | 2020-03-03T10:03:07.000Z | 2022-03-11T02:57:01.000Z | centernet/defaults.py | CPFLAME/CenterNet-better-plus | 4c49a574520f122605b641eaab3e165a32a09915 | [
"MIT"
] | 5 | 2020-08-12T08:12:53.000Z | 2021-09-01T08:47:15.000Z | centernet/defaults.py | CPFLAME/CenterNet-better-plus | 4c49a574520f122605b641eaab3e165a32a09915 | [
"MIT"
] | 15 | 2020-03-20T14:01:03.000Z | 2022-03-19T00:46:09.000Z | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file contains components with some default boilerplate logic user may need
in training / testing. They will not work for everyone, but many users may find them useful.
The behavior of functions/classes in this file is subject to change,
since they are meant to represent the "common default behavior" people need in their projects.
"""
import logging
from detectron2.data import build_detection_test_loader, build_detection_train_loader
from detectron2.engine.defaults import DefaultTrainer
# from detectron2.modeling import build_model
from centernet.centernet import build_model
from centernet.dataset_mapper import DatasetMapper
__all__ = ["DefaultTrainer2"]
class DefaultTrainer2(DefaultTrainer):
def __init__(self, cfg):
super().__init__(cfg)
@classmethod
def build_model(cls, cfg):
"""
Returns:
torch.nn.Module:
It now calls :func:`detectron2.modeling.build_model`.
Overwrite it if you'd like a different model.
"""
model = build_model(cfg)
logger = logging.getLogger(__name__)
logger.info("Model:\n{}".format(model))
return model
@classmethod
def build_test_loader(cls, cfg, dataset_name):
return build_detection_test_loader(cfg, dataset_name, mapper=DatasetMapper(cfg, False))
@classmethod
def build_train_loader(cls, cfg):
return build_detection_train_loader(cfg, mapper=DatasetMapper(cfg, True))
| 32.4375 | 95 | 0.727681 |
a6fa798dafc0a871cdee4448495bcf02c915b31b | 5,656 | py | Python | autoencoders/autoencoders.py | mavroudo/Proximity-based_OutlierDetection_BPM | 4711d7626a24f468c9201cf3833e182d18bcbdee | [
"MIT"
] | null | null | null | autoencoders/autoencoders.py | mavroudo/Proximity-based_OutlierDetection_BPM | 4711d7626a24f468c9201cf3833e182d18bcbdee | [
"MIT"
] | null | null | null | autoencoders/autoencoders.py | mavroudo/Proximity-based_OutlierDetection_BPM | 4711d7626a24f468c9201cf3833e182d18bcbdee | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 26 19:37:18 2021
@author: mavroudo
"""
from pm4py.objects.log.importer.xes import factory as xes_import_factory
from pm4py.algo.filtering.log.attributes import attributes_filter
from pm4py.objects.log.log import EventLog
from statistics import mean, stdev
import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
def mean_value_per_Activity(log):
data = dict()
data_durations = [[] for i in log]
for index_t, trace in enumerate(log):
previous_time = 0
for index, event in enumerate(trace):
if index == 0:
previous_time = trace.attributes["REG_DATE"]
event_name = event["concept:name"]
if event_name not in data:
data[event_name] = [[], 0]
time = event["time:timestamp"]
duration = time - previous_time
data[event_name][0].append(duration.total_seconds())
data_durations[index_t].append(duration.total_seconds())
data[event_name][1] += 1
previous_time = time
return data, data_durations
def meanAndstdev(data,activity_names)->list:
mean_values=[]
std_values=[]
for name in activity_names:
mean_values.append(mean(data[name][0]))
std_values.append(stdev(data[name][0]))
return mean_values,std_values
def transformTraces(log:EventLog) -> list:
activities = attributes_filter.get_attribute_values(log, "concept:name")
activity_names = [i for i in activities]
data,data_durations=mean_value_per_Activity(log)
log_list=[]
for n_trace,trace in enumerate(log):
l_trace=[0 for i in range(len(activity_names))]
times=[0 for i in range(len(activity_names))]
for n_event,event in enumerate(trace):
index = activity_names.index(event["concept:name"])
l_trace[index]+=data_durations[n_trace][n_event]
times[index]+=1
l_trace=[x/y if y!=0 else 0 for x,y in zip(l_trace,times)]
log_list.append(l_trace)
means,stdevs = meanAndstdev(data,activity_names)
log_list= [[(x-y)/z if z!=0 else 0 for x,y,z in zip(l,means,stdevs)]for l in log_list]
return log_list
def addGaussianNoise(traces:list,mean:float=0,stddev:float=0.1)->list:
noisyTraces=[]
for trace in traces:
noise = np.random.normal(loc=mean,scale=stddev,size=len(trace))
noisyTraces.append([i+x for i,x in zip(trace,noise)])
return noisyTraces
#create autoencoder model as in Denoising autoencoders Nolle
class AE(torch.nn.Module):
def __init__(self,length):
super().__init__()
self.encoder = torch.nn.Linear(in_features=length,out_features=length*2)
self.decoder = torch.nn.Linear(in_features=length*2,out_features=length)
def forward(self,x):
x=F.relu(self.encoder(x))
x=F.relu(self.decoder(x))
return x
if __name__ =="__main__":
#read xes
log=xes_import_factory.apply("input/outliers_30_activities_10k_0.005.xes")
#transform it to traces as explained in our paper
transformed = transformTraces(log)
model = AE(len(transformed[0]))
loss_function=torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(),lr=0.01,momentum=0.9,nesterov=True, weight_decay=1e-5)
#Create training set byt adding noise
tracesWithNoise = addGaussianNoise(transformed)
#train on the training set
trainSet= [torch.FloatTensor(i) for i in tracesWithNoise ]
testSet = [torch.FloatTensor(i) for i in transformed ]
epochs = 500
train_loss=[]
for epoch in range(epochs):
running_loss = 0.0
for trace in [torch.FloatTensor(i) for i in addGaussianNoise(transformed) ]:
reconstructed = model(trace)
loss = loss_function(reconstructed, trace)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
loss = running_loss / len(trainSet)
train_loss.append(loss)
print('Epoch {} of {}, Train Loss: {:.3f}'.format(epoch+1, epochs, loss))
#save model
torch.save(model,"model")
#read xes
log=xes_import_factory.apply("input/outliers_30_activities_10k_0.005.xes")
#transform it to traces as explained in our paper
transformed = transformTraces(log)
testSet = [torch.FloatTensor(i) for i in transformed ]
modelS=torch.load("model_0.005")
modelS.eval()
#get results
losses=[]
for trace in testSet:
reconstructed=modelS(trace)
loss = loss_function(reconstructed, trace)
losses.append(loss.item())
m=mean(losses)
std=stdev(losses)
r=[]
with open("input/results_30_activities_10k_0.005_description","r") as f:
for line in f:
r.append(int(line.split(",")[1]))
outliers=[]
threshold = sorted(losses)[-len(r)]
for i,x in enumerate(losses):
if x>=threshold:
outliers.append(i)
#create the roc_curve
from sklearn.metrics import RocCurveDisplay,auc,roc_curve
losses_normalized = [(float(i)-min(losses))/(max(losses)-min(losses)) for i in losses]
true_outliers=[1 if i in r else 0 for i in range(len(losses))]
fprS, tprS, thresholds = roc_curve(true_outliers, losses_normalized)
roc_auc = auc(fprS, tprS)
displayS = RocCurveDisplay(fpr=fprS, tpr=tprS, roc_auc=roc_auc,estimator_name='Denoising autoencoder')
displayS.plot()
plt.plot(sorted(losses)) | 34.072289 | 106 | 0.652581 |
8271cbea5597ce6e305556868ab2634e6fb0296e | 21,024 | py | Python | src/custom_codes/scripts/command_vel_gazebo.py | lar-deeufba/potential_fields | 19f7a0f73e503a00999fa9e175d8cc1d432bee99 | [
"BSD-2-Clause"
] | 6 | 2020-01-14T17:56:03.000Z | 2021-08-10T18:57:04.000Z | src/custom_codes/scripts/command_vel_gazebo.py | lar-deeufba/potential_fields | 19f7a0f73e503a00999fa9e175d8cc1d432bee99 | [
"BSD-2-Clause"
] | null | null | null | src/custom_codes/scripts/command_vel_gazebo.py | lar-deeufba/potential_fields | 19f7a0f73e503a00999fa9e175d8cc1d432bee99 | [
"BSD-2-Clause"
] | 3 | 2019-12-21T02:50:39.000Z | 2022-01-04T02:53:07.000Z | #!/usr/bin/python
import rospy
import actionlib
import numpy as np
import argparse
import rosservice
from std_msgs.msg import Float64MultiArray, MultiArrayDimension
from sensor_msgs.msg import JointState
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal
from visualization_msgs.msg import Marker
from std_msgs.msg import Header, ColorRGBA
from geometry_msgs.msg import PoseStamped, Point, Vector3, Pose
from tf import TransformListener, TransformerROS
from tf.transformations import euler_from_quaternion, quaternion_from_euler, euler_from_matrix, quaternion_multiply
# import from moveit
from moveit_python import PlanningSceneInterface
# customized code
from get_geometric_jacobian import *
from ur_inverse_kinematics import *
from pyquaternion import Quaternion
def parse_args():
parser = argparse.ArgumentParser(description='AAPF_Orientation')
# store_false assumes that variable is already true and is only set to false if is given in command terminal
parser.add_argument('--armarker', action='store_true', help='Follow dynamic goal from ar_track_alvar package')
parser.add_argument('--gazebo', action='store_true', help='Follow dynamic goal from ar_track_alvar package')
parser.add_argument('--dyntest', action='store_true', help='Follow dynamic goal from ar_track_alvar package')
parser.add_argument('--OriON', action='store_true', help='Activate Orientation Control')
args = parser.parse_args()
return args
"""
Calculate the initial robot position - Used before CPA application
"""
def get_ik(pose):
matrix = TransformerROS()
# The orientation of /tool0 will be constant
q = quaternion_from_euler(0, 3.14, 1.57)
matrix2 = matrix.fromTranslationRotation((pose[0]*(-1), pose[1]*(-1), pose[2]), (q[0], q[1], q[2], q[3]))
th = invKine(matrix2)
sol1 = th[:, 2].transpose()
joint_values_from_ik = np.array(sol1)
joint_values = joint_values_from_ik[0, :]
return joint_values.tolist()
def turn_velocity_controller_on():
rosservice.call_service('/controller_manager/switch_controller', [['joint_group_vel_controller'], ['pos_based_pos_traj_controller'], 1])
def turn_position_controller_on():
rosservice.call_service('/controller_manager/switch_controller', [['pos_based_pos_traj_controller'], ['joint_group_vel_controller'], 1])
class vel_control(object):
def __init__(self, args, joint_values):
self.args = args
self.joint_values_home = joint_values
# CPA PARAMETERS
# At the end, the disciplacement will take place as a final orientation
self.Displacement = [0.01, 0.01, 0.01]
# CPA Parameters
self.zeta = 0.5 # Attractive force gain of the goal
self.max_error_allowed_pos_x = 0.010
self.max_error_allowed_pos_y = 0.010
self.max_error_allowed_pos_z = 0.006
self.max_error_allowed_ori = 0.14
self.dist_att = 0.1 # Influence distance in workspace
self.dist_att_config = 0.2 # Influence distance in configuration space
self.alfa_geral = 1.5 # multiply each alfa (position and rotation) equally
self.gravity_compensation = 9
self.alfa_pos = 4.5 * self.alfa_geral # Grad step of positioning - Default: 0.5
self.alfa_rot = 4 * self.alfa_geral # Grad step of orientation - Default: 0.4
# attributes used to receive msgs while publishing new ones
self.processing = False
self.new_msg = False
self.msg = None
# CPA Parameters
self.diam_goal = 0.05
# Topic used to publish vel commands
self.pub_vel = rospy.Publisher('/joint_group_vel_controller/command', Float64MultiArray, queue_size=10)
# Topic used to control the gripper
self.griper_pos = rospy.Publisher('/gripper/command', JointTrajectory, queue_size=10)
self.gripper_msg = JointTrajectory()
self.gripper_msg.joint_names = ['robotiq_85_left_knuckle_joint']
# visual tools from moveit
# self.scene = PlanningSceneInterface("base_link")
self.marker_publisher = rospy.Publisher('visualization_marker2', Marker, queue_size=10)
# Subscriber used to read joint values
rospy.Subscriber('/joint_states', JointState, self.ur5_actual_position, queue_size=10)
# if true, this node receives messages from publish_dynamic_goal.py
if self.args.dyntest:
# Subscriber used to receive goal coordinates from publish_dynamic_goal.py
rospy.Subscriber('/dynamic_goal', Point, self.get_goal_coordinates, queue_size=10)
# actionClient used to send joint positions
self.client = actionlib.SimpleActionClient('pos_based_pos_traj_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
print "Waiting for server (pos_based_pos_traj_controller)..."
self.client.wait_for_server()
print "Connected to server (pos_based_pos_traj_controller)"
rospy.sleep(1)
# Standard attributes used to send joint position commands
self.joint_vels = Float64MultiArray()
self.goal = FollowJointTrajectoryGoal()
self.goal.trajectory = JointTrajectory()
self.goal.trajectory.joint_names = ['shoulder_pan_joint', 'shoulder_lift_joint',
'elbow_joint', 'wrist_1_joint', 'wrist_2_joint',
'wrist_3_joint']
self.initial_time = 4
# Class attribute used to perform TF transformations
self.tf = TransformListener()
# Denavit-Hartenberg parameters of UR5
# The order of the parameters is d1, SO, EO, a2, a3, d4, d45, d5, d6
self.ur5_param = (0.089159, 0.13585, -0.1197, 0.425, 0.39225, 0.10915, 0.093, 0.09465, 0.0823 + 0.15)
"""
Adds spheres in RVIZ - Used to plot goals and obstacles
"""
def add_sphere(self, pose, diam, color):
marker = Marker()
marker.header.frame_id = "base_link"
marker.id = 0
marker.pose.position = Point(pose[0], pose[1], pose[2])
marker.type = marker.SPHERE
marker.action = marker.ADD
marker.scale = Vector3(diam, diam, diam)
marker.color = color
self.marker_publisher.publish(marker)
"""
Function to ensure safety
"""
def safety_stop(self, ptAtual, wristPt):
# High limit in meters of the end effector relative to the base_link
high_limit = 0.01
# Does not allow wrist_1_link to move above 20 cm relative to base_link
high_limit_wrist_pt = 0.15
if ptAtual[-1] < high_limit or wristPt[-1] < high_limit_wrist_pt:
# Be careful. Only the limit of the end effector is being watched but the other
# joint can also exceed this limit and need to be carefully watched by the operator
rospy.loginfo("High limit of " + str(high_limit) + " exceeded!")
self.home_pos()
raw_input("\n==== Press enter to load Velocity Controller and start APF")
turn_velocity_controller_on()
"""
This function check if the goal position was reached
"""
def all_close(self, goal, tolerance = 0.015):
angles_difference = [self.actual_position[i] - goal[i] for i in range(6)]
total_error = np.sum(angles_difference)
if abs(total_error) > tolerance:
return False
return True
"""
This function is responsible for closing the gripper
"""
def close_gripper(self):
self.gripper_msg.points = [JointTrajectoryPoint(positions=[0.274], velocities=[0], time_from_start=rospy.Duration(0.1))]
self.griper_pos.publish(self.gripper_msg)
"""
This function is responsible for openning the gripper
"""
def open_gripper(self):
self.gripper_msg.points = [JointTrajectoryPoint(positions=[0.0], velocities=[0], time_from_start=rospy.Duration(1.0))]
self.griper_pos.publish(self.gripper_msg)
"""
The joint states published by /joint_staes of the UR5 robot are in wrong order.
/joint_states topic normally publishes the joint in the following order:
[elbow_joint, shoulder_lift_joint, shoulder_pan_joint, wrist_1_joint, wrist_2_joint, wrist_3_joint]
But the correct order of the joints that must be sent to the robot is:
['shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint', 'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint']
"""
def ur5_actual_position(self, joint_values_from_ur5):
# rospy.loginfo(joint_values_from_ur5)
if self.args.gazebo:
self.th3, self.robotic, self.th2, self.th1, self.th4, self.th5, self.th6 = joint_values_from_ur5.position
else:
self.th3, self.th2, self.th1, self.th4, self.th5, self.th6 = joint_values_from_ur5.position
self.actual_position = [self.th1, self.th2, self.th3, self.th4, self.th5, self.th6]
"""
When to node /dynamic_goal from publish_dynamic_goal.py is used instead of Markers, this
function is responsible for getting the coordinates published by the node and save it
as attribute of the class
"""
def get_goal_coordinates(self, goal_coordinates):
self.ptFinal = [goal_coordinates.x, goal_coordinates.y, goal_coordinates.z]
self.add_sphere(self.ptFinal, self.diam_goal, ColorRGBA(0.0, 1.0, 0.0, 1.0))
"""
Used to test velcoity control under /joint_group_vel_controller/command topic
"""
def velocity_control_test(self):
# publishing rate for velocity control
# Joints are in the order [base, shoulder, elbow, wrist_1, wrist_2, wrist_3]
rate = rospy.Rate(125)
while not rospy.is_shutdown():
self.joint_vels.data = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.pub_vel.publish(self.joint_vels)
rospy.loginfo(self.actual_position)
rate.sleep()
"""
Send the HOME position to the robot
self.client.wait_for_result() does not work well.
Instead, a while loop has been created to ensure that the robot reaches the
goal even after the failure.
"""
def home_pos(self):
turn_position_controller_on()
rospy.sleep(0.1)
print(self.joint_values_home)
# First point is current position
try:
self.goal.trajectory.points = [(JointTrajectoryPoint(positions=self.joint_values_home, velocities=[0]*6, time_from_start=rospy.Duration(self.initial_time)))]
self.initial_time += 1
if not self.all_close(self.joint_values_home):
raw_input("==== Press enter to home the robot!")
print "'Homing' the robot."
self.client.send_goal(self.goal)
self.client.wait_for_result()
while not self.all_close(self.joint_values_home):
self.client.send_goal(self.goal)
self.client.wait_for_result()
except KeyboardInterrupt:
self.client.cancel_goal()
raise
except:
raise
print "\n==== The robot is HOME position!"
"""
Get forces from APF algorithm
"""
def get_joint_forces(self, ptAtual, ptFinal, oriAtual, dist_EOF_to_Goal, err_ori):
# Get UR5 Jacobian of each link
Jacobian = get_geometric_jacobian(self.ur5_param, self.actual_position)
# Getting attractive forces
forces_p = np.zeros((3, 1))
forces_w = np.zeros((3, 1))
for i in range(3):
if abs(ptAtual[i] - ptFinal[i]) <= self.dist_att:
f_att_l = -self.zeta*(ptAtual[i] - ptFinal[i])
else:
f_att_l = -self.dist_att*self.zeta*(ptAtual[i] - ptFinal[i])/dist_EOF_to_Goal[i]
if abs(oriAtual[i] - self.Displacement[i]) <= self.dist_att_config:
f_att_w = -self.zeta*(oriAtual[i] - self.Displacement[i])
else:
f_att_w = -self.dist_att_config*self.zeta*(oriAtual[i] - self.Displacement[i])/dist_EOF_to_Goal[i]
forces_p[i, 0] = f_att_l
forces_w[i, 0] = f_att_w
forces_p = np.asarray(forces_p)
JacobianAtt_p = np.asarray(Jacobian[5])
joint_att_force_p = JacobianAtt_p.dot(forces_p)
joint_att_force_p = np.multiply(joint_att_force_p, [[0.5], [0.1], [1.5], [1], [1], [1]])
forces_w = np.asarray(forces_w)
JacobianAtt_w = np.asarray(Jacobian[6])
joint_att_force_w = JacobianAtt_w.dot(forces_w)
joint_att_force_w = np.multiply(joint_att_force_w, [[0], [0.1], [0.1], [0.4], [0.4], [0.4]])
return np.transpose(joint_att_force_p), np.transpose(joint_att_force_w)
"""
Gets ptFinal and oriAtual
"""
def get_tf_param(self, approach):
# Check if a marker is used instead of a dynamic goal published by publish_dynamic_goal.py
if self.args.armarker:
# When the marker disappears we get an error and the node is killed. To avoid this
# we implemented this try function to check if ar_marker_0 frame is available
try:
# used when Ar Marker is ON
ptFinal, oriFinal = self.tf.lookupTransform("base_link", "ar_marker_0", rospy.Time())
oriFinal = list(euler_from_quaternion(oriFinal))
# Make YAW Angle goes from 0 to 2*pi
# Solution proposed by
# https://answers.ros.org/question/302953/tfquaternion-getangle-eqivalent-for-rospy/
ptAtual, oriAtual = self.tf.lookupTransform("ar_marker_0", "grasping_link", rospy.Time())
angle = -1 * 2 * np.arccos(oriAtual[-1])
oriAtual = list(euler_from_quaternion(oriAtual))
oriAtual[0] = angle
self.add_sphere(ptFinal, self.diam_goal, ColorRGBA(0.0, 1.0, 0.0, 1.0))
if not approach:
# Reach the position above the goal (object)
ptFinal[-1] += 0.02
max_error_allowed_pos_z = self.max_error_allowed_pos_z + ptFinal[-1]
return ptFinal, oriAtual, oriFinal, max_error_allowed_pos_z
max_error_allowed_pos_z = self.max_error_allowed_pos_z
# Return it if pick is performed
return ptFinal, oriAtual, oriFinal, max_error_allowed_pos_z
except:
if not rospy.is_shutdown():
self.home_pos()
raw_input("\nWaiting for /ar_marker_0 frame to be available! Press ENTER after /ar_marker_0 shows up.")
turn_velocity_controller_on()
self.CPA_vel_control(approach)
"""
Main function related the Artificial Potential Field method
"""
def CPA_vel_control(self, approach = False):
# Return the end effector location relative to the base_link
ptAtual, _ = self.tf.lookupTransform("base_link", "grasping_link", rospy.Time())
if approach:
self.alfa_pos = 8 * self.alfa_geral
self.pos_z = 0.04 if approach else None
if self.args.gazebo:
self.alfa_pos = 4.5 * self.alfa_geral * self.gravity_compensation * 0.001 # Grad step of positioning - Default: 0.5
self.alfa_rot = 4 * self.alfa_geral * 0.01
if approach:
self.alfa_pos = 8 * self.alfa_geral * self.gravity_compensation # Grad step of positioning - Default: 0.5
self.alfa_rot = 6 * self.alfa_geral # Grad step of orientation - Default: 0.4
# Get ptFinal published by ar_marker_0 frame and the orientation from grasping_link to ar_marker_0
ptFinal, oriAtual, oriFinal, max_error_allowed_pos_z = self.get_tf_param(approach)
# Calculate the correction of the orientation relative to the actual orientation
R, P, Y = -1 * oriAtual[0], -1 * oriAtual[1], 0.0
corr = [R, P, Y]
oriAtual = [oriAtual[i] + corr[i] for i in range(len(corr))]
err_ori = abs(np.sum(oriAtual))
# Calculate the distance between end effector and goal in each direction
# it is necessary to approach the object
dist_vec_x, dist_vec_y, dist_vec_z = np.abs(ptAtual - np.asarray(ptFinal))
if approach:
dist_EOF_to_Goal = [dist_vec_x, dist_vec_y, self.pos_z]
else:
dist_EOF_to_Goal = [dist_vec_x, dist_vec_y, dist_vec_z]
# Frequency of the velocity controller pubisher
# Max frequency: 125 Hz
rate = rospy.Rate(125)
while not rospy.is_shutdown() and (dist_vec_z > max_error_allowed_pos_z or dist_vec_y > self.max_error_allowed_pos_y or \
dist_vec_x > self.max_error_allowed_pos_x or err_ori > self.max_error_allowed_ori):
# In order to keep orientation constant, we need to correct the orientation
# of the end effector in respect to the ar_marker_0 orientation
oriAtual = [oriAtual[i] + corr[i] for i in range(len(corr))]
# Get absolute orientation error
err_ori = abs(np.sum(oriAtual))
# Get attractive linear and angular forces and repulsive forces
joint_att_force_p, joint_att_force_w = \
self.get_joint_forces(ptAtual, ptFinal, oriAtual, dist_EOF_to_Goal, err_ori)
# Publishes joint valocities related to position only
self.joint_vels.data = np.array(self.alfa_pos * joint_att_force_p[0])
# If orientation control is turned on, sum actual position forces to orientation forces
if self.args.OriON:
self.joint_vels.data = self.joint_vels.data + \
self.alfa_rot * joint_att_force_w[0]
self.pub_vel.publish(self.joint_vels)
# Get ptFinal published by ar_marker_0 frame and the orientation from grasping_link to ar_marker_0
# The oriFinal needs to be tracked online because the object will be dynamic
ptFinal, oriAtual, oriFinal, max_error_allowed_pos_z = self.get_tf_param(approach)
# Calculate the distance between end effector and goal
# dist_EOF_to_Goal = np.linalg.norm(ptAtual - np.asarray(ptFinal))
dist_vec_x, dist_vec_y, dist_vec_z = np.abs(ptAtual - np.asarray(ptFinal))
# Return the end effector position relative to the base_link
ptAtual, _ = self.tf.lookupTransform("base_link", "grasping_link", rospy.Time())
print "Z_position: ", ptAtual[-1]
# Check wrist_1_link position just for safety
wristPt, _ = self.tf.lookupTransform("base_link", "wrist_1_link", rospy.Time())
# Function to ensure safety. It does not allow End Effector to move below 20 cm above the desk
self.safety_stop(ptAtual, wristPt)
if approach:
dist_vec_z = self.pos_z
# The end effector will move 1 cm below the marker
if ptAtual[-1] < (ptFinal[-1] - 0.01):
print "Break loop."
break
ptAtual = [ptAtual[0], ptAtual[1], self.pos_z]
dist_EOF_to_Goal = [dist_vec_x, dist_vec_y, self.pos_z]
else:
dist_EOF_to_Goal = [dist_vec_x, dist_vec_y, dist_vec_z]
rate.sleep()
def main():
arg = parse_args()
turn_position_controller_on()
# Calculate joint values equivalent to the HOME position
joint_values = get_ik([-0.4, 0.0, 0.1 + 0.15])
joint_values_grasp = [2.7503889388487677, -1.3631583069981188, 2.079091014654578, -2.357721461467634, -1.6166076458026515, 1.7685985390922419]
ur5_vel = vel_control(arg, joint_values)
# ur5_vel.joint_values_home = joint_values_ik
# Send the robot to the custom HOME position
ur5_vel.home_pos()
# Stop the robot in case of the node is killed
rospy.on_shutdown(ur5_vel.home_pos)
raw_input("\n==== Press enter to open the gripper!")
ur5_vel.open_gripper()
raw_input("\n==== Press enter to get close to the object using APF!")
turn_velocity_controller_on()
ur5_vel.CPA_vel_control(approach = False)
turn_position_controller_on()
raw_input("\n==== Press enter to approach the object!")
turn_velocity_controller_on()
ur5_vel.CPA_vel_control(approach = True)
# ur5_vel.joint_values_home = joint_values
# ur5_vel.home_pos()
# turn_position_controller_on()
# print ur5_vel.actual_position
raw_input("\n==== Press enter to close the gripper!")
ur5_vel.close_gripper()
# ur5_vel.joint_values_home = joint_values_ik
ur5_vel.home_pos()
# '''
# Velocity Control Test
# '''
# raw_input("Press enter to load velocity control!")
# ur5_vel.velocity_control_test()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
print "Program interrupted before completion"
| 42.731707 | 169 | 0.659056 |
f121b7b07f426923b7f551657d88c46c78e3fdd3 | 9,417 | py | Python | tests/test.py | super-resolution/line_profiler | 472ee9433298327263ea1e1423b37fbe78d2c861 | [
"MIT"
] | 4 | 2019-10-11T11:28:46.000Z | 2021-01-29T20:19:30.000Z | tests/test.py | super-resolution/line_profiler | 472ee9433298327263ea1e1423b37fbe78d2c861 | [
"MIT"
] | null | null | null | tests/test.py | super-resolution/line_profiler | 472ee9433298327263ea1e1423b37fbe78d2c861 | [
"MIT"
] | null | null | null | from unittest import TestCase
import unittest
from matplotlib import cm
from controllers.random_GUI import MainWindow
from src.controllers.fitter import *
from src.controllers.utility import *
from src.controllers.image import ImageSIM
from src.controllers import processing_SNC, processing_microtubule, processing_canny, processing_one_channel
from src.controllers.micro_services import profile_collector, profile_painter
from tifffile import TiffFile
from src.controllers.micro_services import *
from PyQt5.QtWidgets import QApplication, QMainWindow
import sys
import numpy as np
import os
import cv2
class FitterTest(TestCase):
def setUp(self):
self.qtApp = QApplication(sys.argv)
self.fitter = Fit()
self.path = os.getcwd()
self.SCTestFile = os.path.dirname(self.path)+ r"\test_data"+r"\MAX_3Farben-X1_16um_Out_Channel Alignment-5-X1.tif"
self.microtubTestFile = os.path.dirname(self.path)+ r"\test_data"+r"\Expansion dSTORM-Line Profile test.tif"
self.image = ImageSIM(self.SCTestFile)
self.image.parse()
self.data = self.image.data
self.save_path = os.path.dirname(os.getcwd()) + "\\data\\" + os.path.splitext(os.path.basename(self.image.file_path))[0]
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
def test_collect_distance_data_for_histogram(self):
image_list = [self.image]
#thread = processing_one_channel.QProcessThread()
#self.run_thread(thread)
histogramer = Hist(image_list)
histogramer.create_histogram()
def test_fit_functions_setter(self):
fit_function_name = {"gaussian", "bigaussian", "trigaussian", "cylinder_projection", "multi_cylinder_projection"}
for func in fit_function_name:
self.assertIn(func, fit_functions)
fit_function_name.remove("multi_cylinder_projection")
self.fitter.fit_function = fit_function_name
fit_function_name_list = list(fit_function_name)
fit_function_name_list.append("trigaussian")
self.fitter.fit_function = fit_function_name_list
self.fitter.fit_function = tuple(fit_function_name)
self.assertEqual(len(fit_functions), len(fit_function_name))
print(fit_functions)
def test_fit_data(self):
X = np.linspace(0, 200, 801)
data = cylinder_projection.fit(X, 25, 100, 25/2+8.75, 25/2+8.75*2, 0, blur=38.73)
self.fitter.fit_data(data, 400)
def test_floodfill(self):
image = np.zeros((1024,1024)).astype(np.uint8)
image[512,:] = 255
image[520,:] = 255
image[512:520,0] = 255
image[512:520,1023] = 255
image = create_floodfill_image(image)
self.assertEqual(len(np.where(image != 0)[0]), 7154)#1022*7 pixel != 0
def painter(self):
self.assertTrue(os.path.exists(self.save_path+r"\Image_with_profiles.tif"))
self.assertTrue(os.path.exists(self.save_path+r'\Image_overlay.tif'))
os.remove(self.save_path + r"\Image_with_profiles.tif")
os.remove(self.save_path + r'\Image_overlay.tif')
def run_thread(self, thread):
thread.set_data(0, self.data, self.image.file_path)
thread.blur = 9
thread.px_size = 0.032
thread.intensity_threshold = 3.9
thread.profil_width = 80
thread.spline_parameter = 1
thread.run()
def test_processing_SNC(self):
thread = processing_SNC.QProcessThread()
self.run_thread(thread)
self.painter()
def test_processing_one_channel(self):
thread = processing_one_channel.QProcessThread()
self.run_thread(thread)
self.painter()
def test_processing_microtuboli(self):
thread = processing_microtubule.QProcessThread()
self.run_thread(thread)
self.painter()
def test_profile_collector(self):
red = np.ones(500)*2
green = np.ones(500)*2
blue = np.ones(500)*2
gen = profile_collector(self.save_path,1)
gen.send(red)
gen.send((red,green))
gen.send([red,green,blue])
try:
gen.send(None)
except StopIteration as exc:
profiles = exc.value
print(f"valid finish with {profiles}")
self.assertTrue(os.path.exists(self.save_path+r"\red1.txt"))
self.assertTrue(os.path.exists(self.save_path+r"\green1.txt"))
self.assertTrue(os.path.exists(self.save_path+r"\blue1.txt"))
os.remove(self.save_path + r'\red1.txt')
os.remove(self.save_path + r'\green1.txt')
os.remove(self.save_path + r'\blue1.txt')
def test_profile_painter(self):
line = {'X':[],'Y':[]}
line['X'] = np.linspace(0,self.data.shape[-2]-1,600)
line['Y'] = np.linspace(0,self.data.shape[-1]-1,600)
data = self.data[0, 0]
data = cv2.cvtColor(data.astype(np.uint16), cv2.COLOR_GRAY2RGBA)
painter = profile_painter(data, self.save_path)
painter.send(line)
try:
painter.send(None)
except StopIteration:
print("valid finish")
self.painter()
def tearDown(self):
pass
class TestThreadScheduler():
def setUp(self):
path = r"C:\Users\biophys\PycharmProjects\Fabi\data\test_data\MAX_3Farben-X1_16um_Out_Channel Alignment-5-X1.tif"
image = ImageSIM(path)
image.parse()
def test_add_file_to_list(self):
"""
File apears in filelist if added
Returns
-------
"""
pass
def test_file_disapears_on_processig(self):
"""
Build thread factory on run...
File should be visible in it's own update bar
Check processing options
Returns
-------
"""
pass
def test_scheduler_can_handle_multiple_threads(self):
"""
Microservice for fitting and plotting
Check for right save folder
Thread Shuts down after processing
File returns to file list
Returns
-------
"""
pass
class TestHistogram():
def test_plot(self):
histogramer = Hist()
path = r"C:\Users\biophys\PycharmProjects\Fabi\data\MAX_3Farben-X1_16um_Out_Channel Alignment-5-X1"
data = np.loadtxt(path+"\distances.txt")
histogramer.create_histogram(data, path=path)
def test_collect_distances(self):
histogramer = Hist()
path = r"D:\Daten\Fabi\SNCRevisonEvaluatedData\Ultra-ExM"
data = np.array([0])
folders = [x[1] for x in os.walk(path)]
for x in folders[0]:
data = np.append(data,np.loadtxt(path+"\\"+x+"\distances.txt"))
data = data[1:]
histogramer.create_histogram(data, path=path)
class TestJansData():
def __init__(self):
self.path = r"D:\Daten\Jan"
self.folders = [x[1] for x in os.walk(self.path)]
z=0
def test_run_thread(self):
for folder in self.folders[0]:
self.files = [x[2] for x in os.walk(self.path+"\\"+folder)]
for file in self.files[0]:
current_path = self.path +"\\"+folder + "\\" + file
if os.path.exists(current_path + "_" + "evaluation" + ".txt"):
continue
fitter = Fit()
thread = processing_microtubule.QProcessThread()
thread.sig_plot_data.connect(fitter.fit_data)
if file.split(".")[-1] != "tif":
continue
with TiffFile(current_path) as tif:
self.data = tif.asarray()
new_data = np.zeros((2, self.data.shape[0], self.data.shape[1]+50, self.data.shape[2]+50))
new_data[1,:,25:25+self.data.shape[1], 25:self.data.shape[2]+25] = self.data[:]
self.data = new_data
service = z_stack_microservice(current_path)
fitter.service = service
fitter.fit_function = ["gaussian"]
thread.set_data(0,self.data, current_path)
thread.blur = 4
thread.px_size = 0.1984
thread.profil_width = 15
thread.spline_parameter = 1
thread.intensity_threshold = 0
thread.run()
try:
service.send(None)
except StopIteration:
print("success")
def mean_profile_for_condition():
path = r"D:\Daten\Fabi\SNCRevisonEvaluatedData\dStorm"
data = []
folders = [x[1] for x in os.walk(path)]
min_len = 999999
for x in folders[0]:
profile = np.loadtxt(path + "\\" + x + r"\red_mean.txt")
data.append(profile[:,1])
if profile.shape[0]< min_len:
min_len = profile.shape[0]
new_data = []
for x in data:
x = x[int(x.shape[0]/2-min_len/2):int(x.shape[0]/2+min_len/2)]
new_data.append(x)
data = np.array(new_data)
data = np.mean(data,axis=0)
to_save = np.array([np.arange(data.shape[0]),data])
np.savetxt(path+r"\average_profile.txt",to_save.T)
if __name__ == '__main__':
unittest.main()
# case = TestArtificialHelixCreation()
# case.setUp()
# case.test_line_profile_evaluation()
#mean_profile_for_condition()
#case = TestHistogram()
#case.test_plot()
#case = TestJansData()
#case.test_run_thread()
| 35.269663 | 128 | 0.618775 |
10c8497260068c4486e416a74f9573145559d590 | 19,594 | py | Python | src/elg_demo.py | lelechen63/GazeML | ceab8c076b7bf164f9145f12915e1e0c7f14276a | [
"MIT"
] | null | null | null | src/elg_demo.py | lelechen63/GazeML | ceab8c076b7bf164f9145f12915e1e0c7f14276a | [
"MIT"
] | null | null | null | src/elg_demo.py | lelechen63/GazeML | ceab8c076b7bf164f9145f12915e1e0c7f14276a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Main script for gaze direction inference from webcam feed."""
import argparse
import os
import queue
import threading
import time
import coloredlogs
import cv2 as cv
import numpy as np
import tensorflow as tf
from datasources import Video, Webcam
from datasources.image import Image
from models import ELG
import util.gaze
if __name__ == '__main__':
# Set global log level
parser = argparse.ArgumentParser(description='Demonstration of landmarks localization.')
parser.add_argument('-v', type=str, help='logging level', default='info',
choices=['debug', 'info', 'warning', 'error', 'critical'])
parser.add_argument('--from_image', type=str, help='Use this iamge path')
parser.add_argument('--from_video', type=str, help='Use this video path instead of webcam')
parser.add_argument('--record_video', type=str, help='Output path of video of demonstration.')
parser.add_argument('--fullscreen', action='store_true')
parser.add_argument('--headless', action='store_true')
parser.add_argument('--fps', type=int, default=60, help='Desired sampling rate of webcam')
parser.add_argument('--camera_id', type=int, default=0, help='ID of webcam to use')
args = parser.parse_args()
coloredlogs.install(
datefmt='%d/%m %H:%M',
fmt='%(asctime)s %(levelname)s %(message)s',
level=args.v.upper(),
)
# Check if GPU is available
from tensorflow.python.client import device_lib
session_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
gpu_available = False
try:
gpus = [d for d in device_lib.list_local_devices(config=session_config)
if d.device_type == 'GPU']
gpu_available = len(gpus) > 0
except:
pass
# Initialize Tensorflow session
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Session(config=session_config) as session:
# Declare some parameters
batch_size = 2
# Define webcam stream data source
# Change data_format='NHWC' if not using CUDA
if args.from_image:
assert os.path.isfile(args.from_image)
data_source = Image(args.from_image,
tensorflow_session=session, batch_size=batch_size,
data_format='NCHW' if gpu_available else 'NHWC',
eye_image_shape=(108, 180))
elif args.from_video:
assert os.path.isfile(args.from_video)
data_source = Video(args.from_video,
tensorflow_session=session, batch_size=batch_size,
data_format='NCHW' if gpu_available else 'NHWC',
eye_image_shape=(108, 180))
else:
data_source = Webcam(tensorflow_session=session, batch_size=batch_size,
camera_id=args.camera_id, fps=args.fps,
data_format='NCHW' if gpu_available else 'NHWC',
eye_image_shape=(36, 60))
# Define model
if args.from_video:
model = ELG(
session, train_data={'videostream': data_source},
first_layer_stride=3,
num_modules=3,
num_feature_maps=64,
learning_schedule=[
{
'loss_terms_to_optimize': {'dummy': ['hourglass', 'radius']},
},
],
)
else:
model = ELG(
session, train_data={'videostream': data_source},
first_layer_stride=1,
num_modules=2,
num_feature_maps=32,
learning_schedule=[
{
'loss_terms_to_optimize': {'dummy': ['hourglass', 'radius']},
},
],
)
# Record output frames to file if requested
if args.record_video:
video_out = None
video_out_queue = queue.Queue()
video_out_should_stop = False
video_out_done = threading.Condition()
def _record_frame():
global video_out
last_frame_time = None
out_fps = 30
out_frame_interval = 1.0 / out_fps
while not video_out_should_stop:
frame_index = video_out_queue.get()
if frame_index is None:
break
assert frame_index in data_source._frames
frame = data_source._frames[frame_index]['bgr']
h, w, _ = frame.shape
if video_out is None:
video_out = cv.VideoWriter(
args.record_video, cv.VideoWriter_fourcc(*'H264'),
out_fps, (w, h),
)
now_time = time.time()
if last_frame_time is not None:
time_diff = now_time - last_frame_time
while time_diff > 0.0:
video_out.write(frame)
time_diff -= out_frame_interval
last_frame_time = now_time
video_out.release()
with video_out_done:
video_out_done.notify_all()
record_thread = threading.Thread(target=_record_frame, name='record')
record_thread.daemon = True
record_thread.start()
# Begin visualization thread
inferred_stuff_queue = queue.Queue()
def _visualize_output():
last_frame_index = 0
last_frame_time = time.time()
fps_history = []
all_gaze_histories = []
if args.fullscreen:
cv.namedWindow('vis', cv.WND_PROP_FULLSCREEN)
cv.setWindowProperty('vis', cv.WND_PROP_FULLSCREEN, cv.WINDOW_FULLSCREEN)
while True:
# If no output to visualize, show unannotated frame
if inferred_stuff_queue.empty():
next_frame_index = last_frame_index + 1
if next_frame_index in data_source._frames:
next_frame = data_source._frames[next_frame_index]
if 'faces' in next_frame and len(next_frame['faces']) == 0:
if not args.headless:
cv.imshow('vis', next_frame['bgr'])
if args.record_video:
video_out_queue.put_nowait(next_frame_index)
last_frame_index = next_frame_index
if cv.waitKey(1) & 0xFF == ord('q'):
return
continue
# Get output from neural network and visualize
output = inferred_stuff_queue.get()
bgr = None
for j in range(batch_size):
frame_index = output['frame_index'][j]
if frame_index not in data_source._frames:
continue
frame = data_source._frames[frame_index]
# Decide which landmarks are usable
heatmaps_amax = np.amax(output['heatmaps'][j, :].reshape(-1, 18), axis=0)
can_use_eye = np.all(heatmaps_amax > 0.7)
can_use_eyelid = np.all(heatmaps_amax[0:8] > 0.75)
can_use_iris = np.all(heatmaps_amax[8:16] > 0.8)
start_time = time.time()
eye_index = output['eye_index'][j]
bgr = frame['bgr']
eye = frame['eyes'][eye_index]
eye_image = eye['image']
eye_side = eye['side']
eye_landmarks = output['landmarks'][j, :]
eye_radius = output['radius'][j][0]
if eye_side == 'left':
eye_landmarks[:, 0] = eye_image.shape[1] - eye_landmarks[:, 0]
eye_image = np.fliplr(eye_image)
# Embed eye image and annotate for picture-in-picture
eye_upscale = 2
eye_image_raw = cv.cvtColor(cv.equalizeHist(eye_image), cv.COLOR_GRAY2BGR)
eye_image_raw = cv.resize(eye_image_raw, (0, 0), fx=eye_upscale, fy=eye_upscale)
eye_image_annotated = np.copy(eye_image_raw)
if can_use_eyelid:
cv.polylines(
eye_image_annotated,
[np.round(eye_upscale*eye_landmarks[0:8]).astype(np.int32)
.reshape(-1, 1, 2)],
isClosed=True, color=(255, 255, 0), thickness=1, lineType=cv.LINE_AA,
)
if can_use_iris:
cv.polylines(
eye_image_annotated,
[np.round(eye_upscale*eye_landmarks[8:16]).astype(np.int32)
.reshape(-1, 1, 2)],
isClosed=True, color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
cv.drawMarker(
eye_image_annotated,
tuple(np.round(eye_upscale*eye_landmarks[16, :]).astype(np.int32)),
color=(0, 255, 255), markerType=cv.MARKER_CROSS, markerSize=4,
thickness=1, line_type=cv.LINE_AA,
)
face_index = int(eye_index / 2)
eh, ew, _ = eye_image_raw.shape
v0 = face_index * 2 * eh
v1 = v0 + eh
v2 = v1 + eh
u0 = 0 if eye_side == 'left' else ew
u1 = u0 + ew
bgr[v0:v1, u0:u1] = eye_image_raw
bgr[v1:v2, u0:u1] = eye_image_annotated
# Visualize preprocessing results
frame_landmarks = (frame['smoothed_landmarks']
if 'smoothed_landmarks' in frame
else frame['landmarks'])
for f, face in enumerate(frame['faces']):
for landmark in frame_landmarks[f][:-1]:
cv.drawMarker(bgr, tuple(np.round(landmark).astype(np.int32)),
color=(0, 0, 255), markerType=cv.MARKER_STAR,
markerSize=2, thickness=1, line_type=cv.LINE_AA)
cv.rectangle(
bgr, tuple(np.round(face[:2]).astype(np.int32)),
tuple(np.round(np.add(face[:2], face[2:])).astype(np.int32)),
color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
# Transform predictions
eye_landmarks = np.concatenate([eye_landmarks,
[[eye_landmarks[-1, 0] + eye_radius,
eye_landmarks[-1, 1]]]])
eye_landmarks = np.asmatrix(np.pad(eye_landmarks, ((0, 0), (0, 1)),
'constant', constant_values=1.0))
eye_landmarks = (eye_landmarks *
eye['inv_landmarks_transform_mat'].T)[:, :2]
eye_landmarks = np.asarray(eye_landmarks)
eyelid_landmarks = eye_landmarks[0:8, :]
iris_landmarks = eye_landmarks[8:16, :]
iris_centre = eye_landmarks[16, :]
eyeball_centre = eye_landmarks[17, :]
eyeball_radius = np.linalg.norm(eye_landmarks[18, :] -
eye_landmarks[17, :])
# Smooth and visualize gaze direction
num_total_eyes_in_frame = len(frame['eyes'])
if len(all_gaze_histories) != num_total_eyes_in_frame:
all_gaze_histories = [list() for _ in range(num_total_eyes_in_frame)]
gaze_history = all_gaze_histories[eye_index]
if can_use_eye:
# Visualize landmarks
cv.drawMarker( # Eyeball centre
bgr, tuple(np.round(eyeball_centre).astype(np.int32)),
color=(0, 255, 0), markerType=cv.MARKER_CROSS, markerSize=4,
thickness=1, line_type=cv.LINE_AA,
)
# cv.circle( # Eyeball outline
# bgr, tuple(np.round(eyeball_centre).astype(np.int32)),
# int(np.round(eyeball_radius)), color=(0, 255, 0),
# thickness=1, lineType=cv.LINE_AA,
# )
# Draw "gaze"
# from models.elg import estimate_gaze_from_landmarks
# current_gaze = estimate_gaze_from_landmarks(
# iris_landmarks, iris_centre, eyeball_centre, eyeball_radius)
i_x0, i_y0 = iris_centre
e_x0, e_y0 = eyeball_centre
theta = -np.arcsin(np.clip((i_y0 - e_y0) / eyeball_radius, -1.0, 1.0))
phi = np.arcsin(np.clip((i_x0 - e_x0) / (eyeball_radius * -np.cos(theta)),
-1.0, 1.0))
current_gaze = np.array([theta, phi])
gaze_history.append(current_gaze)
gaze_history_max_len = 10
if len(gaze_history) > gaze_history_max_len:
gaze_history = gaze_history[-gaze_history_max_len:]
util.gaze.draw_gaze(bgr, iris_centre, np.mean(gaze_history, axis=0),
length=120.0, thickness=1)
else:
gaze_history.clear()
if can_use_eyelid:
cv.polylines(
bgr, [np.round(eyelid_landmarks).astype(np.int32).reshape(-1, 1, 2)],
isClosed=True, color=(255, 255, 0), thickness=1, lineType=cv.LINE_AA,
)
if can_use_iris:
cv.polylines(
bgr, [np.round(iris_landmarks).astype(np.int32).reshape(-1, 1, 2)],
isClosed=True, color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
cv.drawMarker(
bgr, tuple(np.round(iris_centre).astype(np.int32)),
color=(0, 255, 255), markerType=cv.MARKER_CROSS, markerSize=4,
thickness=1, line_type=cv.LINE_AA,
)
dtime = 1e3*(time.time() - start_time)
if 'visualization' not in frame['time']:
frame['time']['visualization'] = dtime
else:
frame['time']['visualization'] += dtime
def _dtime(before_id, after_id):
return int(1e3 * (frame['time'][after_id] - frame['time'][before_id]))
def _dstr(title, before_id, after_id):
return '%s: %dms' % (title, _dtime(before_id, after_id))
if eye_index == len(frame['eyes']) - 1:
# Calculate timings
frame['time']['after_visualization'] = time.time()
fps = int(np.round(1.0 / (time.time() - last_frame_time)))
fps_history.append(fps)
if len(fps_history) > 60:
fps_history = fps_history[-60:]
fps_str = '%d FPS' % np.mean(fps_history)
last_frame_time = time.time()
fh, fw, _ = bgr.shape
cv.putText(bgr, fps_str, org=(fw - 110, fh - 20),
fontFace=cv.FONT_HERSHEY_DUPLEX, fontScale=0.8,
color=(0, 0, 0), thickness=1, lineType=cv.LINE_AA)
cv.putText(bgr, fps_str, org=(fw - 111, fh - 21),
fontFace=cv.FONT_HERSHEY_DUPLEX, fontScale=0.79,
color=(255, 255, 255), thickness=1, lineType=cv.LINE_AA)
if not args.headless:
cv.imshow('vis', bgr)
last_frame_index = frame_index
# Record frame?
if args.record_video:
video_out_queue.put_nowait(frame_index)
# Quit?
if cv.waitKey(1) & 0xFF == ord('q'):
return
# Print timings
if frame_index % 60 == 0:
latency = _dtime('before_frame_read', 'after_visualization')
processing = _dtime('after_frame_read', 'after_visualization')
timing_string = ', '.join([
_dstr('read', 'before_frame_read', 'after_frame_read'),
_dstr('preproc', 'after_frame_read', 'after_preprocessing'),
'infer: %dms' % int(frame['time']['inference']),
'vis: %dms' % int(frame['time']['visualization']),
'proc: %dms' % processing,
'latency: %dms' % latency,
])
print('%08d [%s] %s' % (frame_index, fps_str, timing_string))
visualize_thread = threading.Thread(target=_visualize_output, name='visualization')
visualize_thread.daemon = True
visualize_thread.start()
# Do inference forever
infer = model.inference_generator()
while True:
output = next(infer)
for frame_index in np.unique(output['frame_index']):
if frame_index not in data_source._frames:
continue
frame = data_source._frames[frame_index]
if 'inference' in frame['time']:
frame['time']['inference'] += output['inference_time']
else:
frame['time']['inference'] = output['inference_time']
inferred_stuff_queue.put_nowait(output)
if not visualize_thread.isAlive():
break
if not data_source._open:
break
# Close video recording
if args.record_video and video_out is not None:
video_out_should_stop = True
video_out_queue.put_nowait(None)
with video_out_done:
video_out_done.wait()
| 48.142506 | 100 | 0.479177 |
f1decafed3dd9912b1ab456a5f7d5b245e48033e | 521 | py | Python | picoctf-2019/got/shellcode.py | onealmond/hacking-lab | 631e615944add02db3c2afef47bf1de7171eb065 | [
"MIT"
] | 9 | 2021-04-20T15:28:36.000Z | 2022-03-08T19:53:48.000Z | picoctf-2019/got/shellcode.py | onealmond/hacking-lab | 631e615944add02db3c2afef47bf1de7171eb065 | [
"MIT"
] | null | null | null | picoctf-2019/got/shellcode.py | onealmond/hacking-lab | 631e615944add02db3c2afef47bf1de7171eb065 | [
"MIT"
] | 6 | 2021-06-24T03:25:21.000Z | 2022-02-20T21:44:52.000Z | import os;os.environ['TMPDIR'] = os.path.join(os.environ['HOME'], 'tmp')
import pwn
remote_binary = "/problems/got_5_c5119617c90aa544a639812dbc41e24e/vuln"
def segfault():
try:
pr = pwn.process(remote_binary)
elf = pwn.ELF(remote_binary, False)
print(elf.got)
pr.sendlineafter("Input address\n", str(elf.got["exit"]))
pr.sendlineafter("Input value?\n", str(elf.sym["win"]))
rsp = pr.readall(timeout=0.5)
print(rsp)
finally:
pr.close()
segfault()
| 27.421053 | 72 | 0.629559 |
353e2f7ae305142dc22eaf77a8143ff784407cba | 262 | py | Python | wallme/websites/tetras.py | LucBerge/wallme | 3c76a08cb4d8ae916b2b12708a78263573f6b06a | [
"MIT"
] | 1 | 2019-08-29T17:27:36.000Z | 2019-08-29T17:27:36.000Z | wallme/websites/tetras.py | LucBerge/wallme | 3c76a08cb4d8ae916b2b12708a78263573f6b06a | [
"MIT"
] | 2 | 2020-07-08T20:54:05.000Z | 2020-10-14T20:44:28.000Z | wallme/websites/tetras.py | LucBerge/wallme | 3c76a08cb4d8ae916b2b12708a78263573f6b06a | [
"MIT"
] | 1 | 2020-10-01T05:58:11.000Z | 2020-10-01T05:58:11.000Z | # coding: utf8
KEY = 'tetras'
DESCRIPTION = 'Amateur pictures of the Alpes and the Dauphine'
URL = 'http://tetras.org/Semaine.jpg'
def pre_process(subkey):
return None
def process(date, subkey):
return URL
def post_process(image):
return None
| 14.555556 | 62 | 0.70229 |
b1a45612d48a520664360de02a5e2d958b16c505 | 900 | py | Python | yuvist/__init__.py | luuvish/yuvist | 2612278f0e0bc5ea1d49315ec611d1ac216609e3 | [
"Unlicense",
"MIT"
] | 3 | 2016-01-08T02:01:18.000Z | 2018-04-18T18:53:17.000Z | yuvist/__init__.py | luuvish/yuvist | 2612278f0e0bc5ea1d49315ec611d1ac216609e3 | [
"Unlicense",
"MIT"
] | null | null | null | yuvist/__init__.py | luuvish/yuvist | 2612278f0e0bc5ea1d49315ec611d1ac216609e3 | [
"Unlicense",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""\
Kivy YUV Image Viewer
Copyright (C) 2012 Luuvish <luuvish@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__all__ = ()
__version__ = '0.10.1'
import kivy
kivy.require('1.6.0')
from os.path import dirname
from kivy.resources import resource_add_path
resource_add_path(dirname(__file__))
| 29.032258 | 69 | 0.767778 |
5640457ed4be3a9bdad92897a8e2827a2ae64aad | 2,433 | py | Python | tests/test_settings.py | learningequality/klorimin | c569cd4048ac670bc55a83f4fdda0b818c7f626e | [
"MIT"
] | null | null | null | tests/test_settings.py | learningequality/klorimin | c569cd4048ac670bc55a83f4fdda0b818c7f626e | [
"MIT"
] | null | null | null | tests/test_settings.py | learningequality/klorimin | c569cd4048ac670bc55a83f4fdda0b818c7f626e | [
"MIT"
] | null | null | null | import sys
from mock import patch
from ricecooker import chefs
settings = {"generate-missing-thumbnails": True, "compress-videos": True}
def test_settings_unset_default():
chef = chefs.SushiChef()
for setting in settings:
assert chef.get_setting(setting) is None
assert chef.get_setting(setting, default=False) is False
def test_settings():
chef = chefs.SushiChef()
for setting in settings:
value = settings[setting]
chef.SETTINGS[setting] = value
assert chef.get_setting(setting) == value
assert chef.get_setting(setting, default=None) == value
def test_cli_args_override_settings():
"""
For settings that can be controlled via the command line, ensure that the command line setting
takes precedence over the default setting.
"""
test_argv = ["sushichef.py", "--compress", "--thumbnails", "--token", "12345"]
with patch.object(sys, "argv", test_argv):
chef = chefs.SushiChef()
chef.SETTINGS["generate-missing-thumbnails"] = False
chef.SETTINGS["compress-videos"] = False
assert not chef.get_setting("generate-missing-thumbnails")
assert not chef.get_setting("compress-videos")
chef.parse_args_and_options()
assert chef.get_setting("generate-missing-thumbnails")
assert chef.get_setting("compress-videos")
test_argv = ["sushichef.py", "--compress", "--thumbnails", "--token", "12345"]
with patch.object(sys, "argv", test_argv):
chef = chefs.SushiChef()
assert len(chef.SETTINGS) == 0
assert chef.get_setting("generate-missing-thumbnails") is None
assert chef.get_setting("compress-videos") is None
chef.parse_args_and_options()
assert chef.get_setting("generate-missing-thumbnails")
assert chef.get_setting("compress-videos")
# now test without setting the flags
test_argv = ["sushichef.py", "--token", "12345"]
with patch.object(sys, "argv", test_argv):
chef = chefs.SushiChef()
chef.SETTINGS["generate-missing-thumbnails"] = False
chef.SETTINGS["compress-videos"] = False
assert not chef.get_setting("generate-missing-thumbnails")
assert not chef.get_setting("compress-videos")
chef.parse_args_and_options()
assert not chef.get_setting("generate-missing-thumbnails")
assert not chef.get_setting("compress-videos")
| 31.597403 | 98 | 0.675709 |
4ec4597011e6627b52ee6495890d57eb2dcd63e6 | 2,079 | py | Python | sudoku/network/model.py | kevinddchen/sudoku | 135b47ffcfade88628acabacd0a2c916da98a9e4 | [
"MIT"
] | null | null | null | sudoku/network/model.py | kevinddchen/sudoku | 135b47ffcfade88628acabacd0a2c916da98a9e4 | [
"MIT"
] | null | null | null | sudoku/network/model.py | kevinddchen/sudoku | 135b47ffcfade88628acabacd0a2c916da98a9e4 | [
"MIT"
] | null | null | null | import os
import torch
import torch.nn as nn
# ------------------------------------------------------------------------------
def get_model(L: int = 8, D: int = 256) -> nn.Module:
'''
Neural network model for digit classification. Inputs are Nx1x28x28 tensors
of floats in the range [0, 1].
Args:
L (int): Number of latent features.
D (int): Size of dense layers.
Returns:
(nn.Module): Neural network model.
'''
model = nn.Sequential(
nn.Conv2d( 1, L, 3, padding='same'),
nn.ReLU(inplace=True),
nn.Conv2d( L, L, 3, padding='same'),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d( L, 2*L, 3, padding='same'),
nn.ReLU(inplace=True),
nn.Conv2d(2*L, 2*L, 3, padding='same'),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Conv2d(2*L, D, 7, padding=0),
nn.ReLU(inplace=True),
nn.Flatten(1),
nn.Linear(D, D),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(D, 10)
)
return model
# ------------------------------------------------------------------------------
def init_weights(m: nn.Module):
'''He initialization for weights.'''
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
# ------------------------------------------------------------------------------
def checkpoint_path(checkpoint_dir: str, epoch: int) -> str:
'''Path to checkpoint file.'''
return os.path.join(checkpoint_dir, f'weights_epoch_{epoch:02d}.pth')
# ------------------------------------------------------------------------------
def save_weights(model: nn.Module, path: str):
'''Save model weights to file.'''
torch.save(model.state_dict(), path)
# ------------------------------------------------------------------------------
def load_weights(model: nn.Module, path: str):
'''Load model weights from file.'''
pretrained_dict = torch.load(path)
model.load_state_dict(pretrained_dict)
| 30.573529 | 80 | 0.484367 |
d468ab1ecfa1481f83bcc18e93b0c4d46f2cf2d3 | 5,955 | py | Python | subjectExamplesFidelityGain.py | sanrou/fidelityWeighting | 3767d80ad31559264d8ff3e42407eeed863f0ebc | [
"BSD-3-Clause"
] | 1 | 2020-11-30T09:20:19.000Z | 2020-11-30T09:20:19.000Z | subjectExamplesFidelityGain.py | sanrou/fidelityWeighting | 3767d80ad31559264d8ff3e42407eeed863f0ebc | [
"BSD-3-Clause"
] | 4 | 2019-08-01T13:25:16.000Z | 2019-08-14T10:14:24.000Z | subjectExamplesFidelityGain.py | sanrou/fidelityWeighting | 3767d80ad31559264d8ff3e42407eeed863f0ebc | [
"BSD-3-Clause"
] | 3 | 2018-11-21T13:58:05.000Z | 2021-03-18T11:01:52.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon May 31 11:42:12 2021
Load and display example subjects' parcel fidelities. One median original fidelity subject.
@author: rouhinen
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
# Subject fraction list
percentileList = np.array([0.5])
# percentileList = np.array([0.15, 0.5, 0.85])
savePathBase = "C:\\temp\\fWeighting\\plotDump\\"
parcelPlotEnd = 'XYZ Fidelity Orig Weighted examples.pdf'
gainPlotEnd = 'XYZ Fidelities Relative examples highlighted.pdf'
subjectsOrigWeightEnd = 'XYZ Fidelities Orig x Weighted scatter examples.pdf'
XYZto = 'schaefer200'
savePDFs = False
tightLayout = True
## Replace XYZ
parcelPlotEnd = parcelPlotEnd.replace('XYZ', XYZto)
subjectsOrigWeightEnd = subjectsOrigWeightEnd.replace('XYZ', XYZto)
gainPlotEnd = gainPlotEnd.replace('XYZ', XYZto)
## Load files. fidXArrays created in fidGroupAnalysis.py. There saved with np.save().
fidWArray = np.load('C:\\temp\\fWeighting\\numpyArrays\\fidArrays\\XYZ\\fidWArray.npy'.
replace('XYZ', XYZto))
fidOArray = np.load('C:\\temp\\fWeighting\\numpyArrays\\fidArrays\\XYZ\\fidOArray.npy'.
replace('XYZ', XYZto))
## Search example subjects. Build average fidelity arrays.
fidRArray = fidWArray/fidOArray
fidRAverage = np.average(fidRArray, axis=1)
fidWAverage = np.average(fidWArray, axis=1)
fidOAverage = np.average(fidOArray, axis=1)
# Searching values, sort the indices
# ind = np.argsort(fidRAverage) # Sorted by average gain
ind = np.argsort(fidOAverage) # Sorted by average original fidelity
exampleInds = np.int32(np.round(len(fidRAverage)*percentileList))
exampleInds = ind[exampleInds]
# Indices of subjects not used as examples
notExInds = list(range(len(fidRAverage)))
for i, index in enumerate(exampleInds):
notExInds.remove(index)
""" Plots """
# Set global figure parameters, including CorelDraw compatibility (.fonttype)
import matplotlib.pylab as pylab
if tightLayout == True:
params = {'legend.fontsize':'7',
'figure.figsize':(1.8, 1.4),
'axes.labelsize':'7',
'axes.titlesize':'7',
'xtick.labelsize':'7',
'ytick.labelsize':'7',
'lines.linewidth':'0.5',
'pdf.fonttype':42,
'ps.fonttype':42,
'font.family':'Arial'}
else: # Looks nice on the screen parameters
params = {'legend.fontsize':'7',
'figure.figsize':(3, 2),
'axes.labelsize':'7',
'axes.titlesize':'7',
'xtick.labelsize':'7',
'ytick.labelsize':'7',
'lines.linewidth':'0.5',
'pdf.fonttype':42,
'ps.fonttype':42,
'font.family':'Arial'}
pylab.rcParams.update(params)
colors = ['red', 'magenta', 'darkcyan']
""" Scatter plot weighted and original average fidelities by subject. """
fig, ax = plt.subplots(1,1)
# Plot non-example subjects
ax.scatter(fidOAverage[notExInds], fidWAverage[notExInds], c='black', alpha=0.5, s=10) ## X, Y.
ax.plot([0,1], [0,1], color='black')
# ax.set_title('PLV')
ax.set_xlabel('Parcel fidelity, Original')
ax.set_ylabel('Parcel fidelity, Weighted')
plt.ylim(0.2, 0.5)
plt.xlim(0.2, 0.5)
# Plot example subjects
for i, exInd in enumerate(exampleInds):
ax.scatter(fidOAverage[exInd], fidWAverage[exInd], c=colors[i], alpha=0.7, s=10) ## X, Y.
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
ax.spines['right'].set_visible(False)
plt.tight_layout(pad=0.1)
plt.show()
if savePDFs == True:
fig.savefig(savePathBase + subjectsOrigWeightEnd, format='pdf')
""" Plot example subject original and weighted parcel fidelities. """
fig, ax = plt.subplots(1,1)
for i, subInd in enumerate(exampleInds):
# Weighted
fidW = np.sort(fidWArray[subInd,:])
ax.plot(fidW, color=colors[i], linestyle='-',
label=f'Weighted fidelity, mean: {np.round(np.mean(fidW),3)}')
# Original
fidO = np.sort(fidOArray[subInd,:])
ax.plot(fidO, color=colors[i], linestyle=':', linewidth=1,
label=f'Original fidelity, mean: {np.round(np.mean(fidO),3)}')
# ax.fill_between(list(range(len(fidW))), fidO, fidW, color=colors[i], alpha=0.2)
# legend = ax.legend(loc='best', shadow=False)
# legend.get_frame()
ax.set_ylabel('Parcel fidelity')
ax.set_xlabel('Parcels, sorted by fidelity')
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
ax.spines['right'].set_visible(False)
plt.tight_layout(pad=0.1)
plt.show()
if savePDFs == True:
fig.savefig(savePathBase + parcelPlotEnd, format='pdf')
""" Plot parcel fidelity gain per subject. """
## Make color array by original mean parcel fidelity. Replace example subjects color.
multipliers = fidOAverage / np.max(fidOAverage)
colorsFids = []
for i, multiplier in enumerate(multipliers):
colorsFids.append([0.6*multiplier**2]*3)
for i, exInd in enumerate(exampleInds):
colorsFids[exInd] = mcolors.to_rgb(colors[i])
fig, ax = plt.subplots(1,1)
# Draw non-example subjects
for i, subjInd in enumerate(notExInds):
ax.plot(np.sort(fidRArray[subjInd])*100, color=colorsFids[subjInd], linestyle='-')
# Draw example subjects
for i, subjInd in enumerate(exampleInds):
ax.plot(np.sort(fidRArray[subjInd])*100, color=colorsFids[subjInd], linestyle='-')
ax.plot(100*np.ones(fidRArray.shape[1], dtype=float), color='black', linestyle='-', linewidth=0.3) # Set a horizontal line at 100 %.
# legend = ax.legend(loc='best', shadow=False)
# legend.get_frame()
ax.set_ylabel('Relative parcel fidelity (%)')
ax.set_xlabel('Parcels, sorted by gain')
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
ax.spines['right'].set_visible(False)
plt.ylim(10, 1000)
plt.yscale('log')
plt.tight_layout(pad=0.1)
plt.show()
if savePDFs == True:
fig.savefig(savePathBase + gainPlotEnd, format='pdf')
| 31.342105 | 133 | 0.693535 |
f7033cd7e4893924e98add48a902ed4e0b88f83b | 99 | py | Python | mmgen/core/runners/__init__.py | HXWAndCL/mmgeneration | 9afb1d740bf56a4ecde5064d5bb2a4e2d777638b | [
"Apache-2.0"
] | 1 | 2021-05-27T13:04:41.000Z | 2021-05-27T13:04:41.000Z | mmgen/core/runners/__init__.py | HXWAndCL/mmgeneration | 9afb1d740bf56a4ecde5064d5bb2a4e2d777638b | [
"Apache-2.0"
] | null | null | null | mmgen/core/runners/__init__.py | HXWAndCL/mmgeneration | 9afb1d740bf56a4ecde5064d5bb2a4e2d777638b | [
"Apache-2.0"
] | null | null | null | from .dynamic_iterbased_runner import DynamicIterBasedRunner
__all__ = ['DynamicIterBasedRunner']
| 24.75 | 60 | 0.858586 |
dfcb2371caa768405047d50559c740757833bdfb | 175 | py | Python | exercicio 5.py | pedropenna1/Exercicios-Python | bde8967a31b2395477f346ca1f1b5cb2de666a40 | [
"MIT"
] | null | null | null | exercicio 5.py | pedropenna1/Exercicios-Python | bde8967a31b2395477f346ca1f1b5cb2de666a40 | [
"MIT"
] | null | null | null | exercicio 5.py | pedropenna1/Exercicios-Python | bde8967a31b2395477f346ca1f1b5cb2de666a40 | [
"MIT"
] | null | null | null | N = int(input('Digite um número:' ))
Sucessor = N + 1
Antecessor = N - 1
print('O valor do número é {}\nSeu sucessor é {}\nSeu antecessor é {}'.format(N,Sucessor,Antecessor))
| 35 | 101 | 0.674286 |
3b649fd5fc16d6c94fb876f9bc5cf5eab3e45bea | 757 | py | Python | cogs/configuration.py | Tempystral/Boxbot-2.0 | 41b5fbb70c20107e7b38b1550f2363e097c9bb4b | [
"BSD-3-Clause"
] | null | null | null | cogs/configuration.py | Tempystral/Boxbot-2.0 | 41b5fbb70c20107e7b38b1550f2363e097c9bb4b | [
"BSD-3-Clause"
] | 8 | 2019-12-29T06:58:42.000Z | 2020-02-13T18:34:32.000Z | cogs/configuration.py | Tempystral/Boxbot-2.0 | 41b5fbb70c20107e7b38b1550f2363e097c9bb4b | [
"BSD-3-Clause"
] | null | null | null | import asyncio
from cogs.utils import checks
from discord.ext import commands
class Config(commands.Cog):
def __init__(self, bot):
self.bot = bot
#self.manager = ConfigManager()
@commands.has_any_role(["Bot Developer"])
@commands.group(pass_context=True, aliases=["settings", "config"])
async def configuration(self, context):
if context.invoked_subcommand is None:
await context.send("Not enough arguments!")
@configuration.command()
async def update(self, context):
#settings.reload()
pass
@configuration.command()
async def ping(self, context):
"""Replies with a pong."""
await context.send("pong")
def setup(bot):
bot.add_cog(Config(bot))
| 26.103448 | 70 | 0.651255 |
31be4d64dbcbe9ea1cb9eed4c9f4573d211d22bd | 661 | py | Python | src/main/wsgi.py | vadimkondratovich/asd | 5f2db494f739ea663795c5d4a924ced942cb1852 | [
"MIT"
] | null | null | null | src/main/wsgi.py | vadimkondratovich/asd | 5f2db494f739ea663795c5d4a924ced942cb1852 | [
"MIT"
] | 8 | 2021-01-10T09:38:54.000Z | 2021-02-28T12:33:58.000Z | src/main/wsgi.py | vadimkondratovich/asd | 5f2db494f739ea663795c5d4a924ced942cb1852 | [
"MIT"
] | null | null | null | import sentry_sdk
from main.custom_types import RequestT
from framework.util.settings import get_setting
from main.handlers import get_handler
from main.handlers import handle_500
sentry_sdk.init(get_setting("SENTRY_DSN"), traces_sample_rate=1.0)
def application(environ, start_response):
request = RequestT(environ)
handler = get_handler(request)
try:
response = handler(request)
except Exception:
response = handle_500(request)
status = f"{response.status.value} {response.status.phrase}"
headers_list = list(response.headers_items())
start_response(status, headers_list)
yield response.payload.encode()
| 26.44 | 66 | 0.75643 |
53241ca06a4af2d20627277be305aa7bcfb6b840 | 6,423 | py | Python | instagrapi/album.py | V-ampire/instagrapi | 758c89bf8b85fe2a91ba40530349007d2d2760e1 | [
"MIT"
] | 1 | 2021-05-05T03:44:31.000Z | 2021-05-05T03:44:31.000Z | instagrapi/album.py | V-ampire/instagrapi | 758c89bf8b85fe2a91ba40530349007d2d2760e1 | [
"MIT"
] | null | null | null | instagrapi/album.py | V-ampire/instagrapi | 758c89bf8b85fe2a91ba40530349007d2d2760e1 | [
"MIT"
] | null | null | null | import time
from urllib.parse import urlparse
from .extractors import extract_media_v1
from .exceptions import PrivateError
from .utils import dumps
class AlbumNotDownload(PrivateError):
pass
class AlbumNotUpload(PrivateError):
pass
class UnknownFormat(AlbumNotUpload):
pass
class AlbumConfigureError(AlbumNotUpload):
pass
class AlbumConfigureStoryError(AlbumConfigureError):
pass
class DownloadAlbum:
def album_download(self, media_pk: int, folder: str = "/tmp") -> str:
media = self.media_info(media_pk)
assert media["media_type"] == 8, "Must been album"
paths = []
for resource in media['resources']:
filename = "{username}_{media_pk}".format(
username=media["user"]["username"],
media_pk=resource['pk']
)
if resource['media_type'] == 1:
paths.append(
self.photo_download_by_url(resource["thumbnail_url"], filename, folder)
)
elif resource['media_type'] == 2:
paths.append(
self.video_download_by_url(resource["video_url"], filename, folder)
)
else:
raise AlbumNotDownload('Media type "%s" unknown for album (resource.media_pk=%s)' % (resource['media_type'], resource['pk']))
return paths
def album_download_by_urls(self, urls: str, folder: str = "/tmp") -> str:
paths = []
for url in urls:
fname = urlparse(url).path.rsplit('/', 1)[1]
if fname.endswith('.jpg'):
paths.append(self.photo_download_by_url(url, fname, folder))
elif fname.endswith('.mp4'):
paths.append(self.video_download_by_url(url, fname, folder))
else:
raise UnknownFormat()
return paths
class UploadAlbum:
def album_upload(
self,
paths: list,
caption: str,
usertags: list = [],
configure_timeout: str = 3,
configure_handler=None,
configure_exception=None,
to_story=False
) -> dict:
"""Upload album to feed
:param paths: Path to files (List)
:param caption: Media description (String)
:param usertags: Mentioned users (List)
:param configure_timeout: Timeout between attempt to configure media (set caption, etc)
:param configure_handler: Configure handler method
:param configure_exception: Configure exception class
:return: Extracted media (Dict)
"""
childs = []
for filepath in paths:
if filepath.endswith('.jpg'):
upload_id, width, height = self.photo_rupload(filepath, to_album=True)
childs.append({
"upload_id": upload_id,
"edits": dumps({"crop_original_size": [width, height], "crop_center": [0.0, -0.0], "crop_zoom": 1.0}),
"extra": dumps({"source_width": width, "source_height": height}),
"scene_capture_type": "",
"scene_type": None
})
elif filepath.endswith('.mp4'):
upload_id, width, height, duration, thumbnail = self.video_rupload(filepath, to_album=True)
childs.append({
"upload_id": upload_id,
"clips": dumps([{"length": duration, "source_type": "4"}]),
"extra": dumps({"source_width": width, "source_height": height}),
"length": duration,
"poster_frame_index": "0",
"filter_type": "0",
"video_result": "",
"date_time_original": time.strftime("%Y%m%dT%H%M%S.000Z", time.localtime()),
"audio_muted": "false"
})
self.photo_rupload(thumbnail, upload_id)
else:
raise UnknownFormat()
for attempt in range(20):
self.logger.debug("Attempt #%d to configure Album: %s", attempt, filepath)
time.sleep(configure_timeout)
try:
configured = (configure_handler or self.album_configure)(childs, caption, usertags)
except Exception as e:
if "Transcode not finished yet" in str(e):
"""
Response 202 status:
{"message": "Transcode not finished yet.", "status": "fail"}
"""
time.sleep(10)
continue
raise e
else:
if configured:
media = configured.get("media")
self.expose()
return extract_media_v1(media)
raise (configure_exception or AlbumConfigureError)(response=self.last_response, **self.last_json)
def album_configure(
self,
childs: list,
caption: str,
usertags: list,
) -> bool:
"""Post Configure Album
:param childs: Childs of album (List)
:param caption: Media description (String)
:param usertags: Mentioned users (List)
:return: Media (Dict)
"""
upload_id = str(int(time.time() * 1000))
if usertags:
usertags = [
{"user_id": tag['user']['pk'], "position": tag['position']}
for tag in usertags
]
childs[0]["usertags"] = dumps({"in": usertags})
data = {
"timezone_offset": "10800",
"source_type": "4",
"creation_logger_session_id": self.client_session_id,
"caption": caption,
"client_sidecar_id": upload_id,
"upload_id": upload_id,
# "location": self.build_location(name, lat, lng, address),
"suggested_venue_position": -1,
"device": self.device,
"is_suggested_venue": False,
"children_metadata": [
{
"source_type": "4",
"timezone_offset": "10800",
"device": dumps(self.device),
**child
} for child in childs
]
}
return self.private_request("media/configure_sidecar/", self.with_default_data(data))
| 36.08427 | 141 | 0.530905 |
103fe6d36324659360e79cb76015948ea6b5baed | 2,160 | py | Python | ProjectApplication/grant_management/forms/milestones.py | code-review-doctor/project-application | d85b40b69572efbcda24ce9c40803f76d8ffd192 | [
"MIT"
] | 5 | 2020-07-29T10:00:11.000Z | 2022-02-19T11:00:34.000Z | ProjectApplication/grant_management/forms/milestones.py | code-review-doctor/project-application | d85b40b69572efbcda24ce9c40803f76d8ffd192 | [
"MIT"
] | 471 | 2019-09-20T14:37:28.000Z | 2022-03-25T14:16:34.000Z | ProjectApplication/grant_management/forms/milestones.py | code-review-doctor/project-application | d85b40b69572efbcda24ce9c40803f76d8ffd192 | [
"MIT"
] | 5 | 2020-03-15T12:42:47.000Z | 2022-02-15T18:06:52.000Z | from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Field
from dal import autocomplete
from django import forms
from django.forms import BaseInlineFormSet, inlineformset_factory, NumberInput
from grant_management.models import Milestone
from project_core.models import Project
from project_core.widgets import XDSoftYearMonthDayPickerInput
class MilestoneModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.disable_csrf = True # checked in the higher form level
XDSoftYearMonthDayPickerInput.set_format_to_field(self.fields['due_date'])
self.helper.layout = Layout(
Div(
Div('project', hidden=True),
Div('id', hidden=True),
Div(Field('DELETE', hidden=True)),
css_class='row', hidden=True
),
Div(
Div('due_date', css_class='col-2'),
Div('category', css_class='col-4'),
Div('text', css_class='col-6'),
css_class='row'
),
)
def clean(self):
cd = super().clean()
return cd
class Meta:
model = Milestone
fields = ['project', 'due_date', 'category', 'text']
widgets = {
'project': NumberInput,
'due_date': XDSoftYearMonthDayPickerInput,
'category': autocomplete.ModelSelect2(url='logged-grant_management-autocomplete-milestones-names')
}
class MilestoneFormSet(BaseInlineFormSet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
def get_queryset(self):
return super().get_queryset().order_by('due_date')
MilestoneInlineFormSet = inlineformset_factory(Project, Milestone, form=MilestoneModelForm,
formset=MilestoneFormSet,
min_num=1, extra=0, can_delete=True)
| 33.230769 | 110 | 0.610648 |
522ba5773074140384ceadca7586712cb86da9a2 | 794 | py | Python | src/atcoder/abc218/e/sol_3.py | kagemeka/competitive-programming | c70fe481bcd518f507b885fc9234691d8ce63171 | [
"MIT"
] | 1 | 2021-07-11T03:20:10.000Z | 2021-07-11T03:20:10.000Z | src/atcoder/abc218/e/sol_3.py | kagemeka/competitive-programming | c70fe481bcd518f507b885fc9234691d8ce63171 | [
"MIT"
] | 39 | 2021-07-10T05:21:09.000Z | 2021-12-15T06:10:12.000Z | src/atcoder/abc218/e/sol_3.py | kagemeka/competitive-programming | c70fe481bcd518f507b885fc9234691d8ce63171 | [
"MIT"
] | null | null | null | import typing
class UnionFind():
def __init__(self, n: int) -> typing.NoReturn:
self.__a = [-1] * n
def find(self, u: int) -> int:
a = self.__a
if a[u] < 0: return u
a[u] = self.find(a[u])
return a[u]
def unite(self, u: int, v: int) -> typing.NoReturn:
u, v = self.find(u), self.find(v)
if u == v: return
a = self.__a
if a[u] > a[v]: u, v = v, u
a[u] += a[v]
a[v] = u
def main() -> typing.NoReturn:
n, m = map(int, input().split())
abc = [
tuple(map(int, input().split()))
for _ in range(m)
]
abc.sort(key=lambda x: x[2])
uf = UnionFind(n)
s = 0
for a, b, c in abc:
a -= 1; b -= 1
if c >= 0 and uf.find(a) == uf.find(b):
s += c
continue
uf.unite(a, b)
print(s)
main()
| 16.541667 | 53 | 0.488665 |
ec9b055d86c0f5ef380e1c432607670d8b18ebd4 | 83,305 | py | Python | aiida_fleur/tools/xml_util.py | anoopkcn/aiida-fleur | 5d4cc2092b7c3ce5402f1d4b89787eae53b2e60f | [
"MIT"
] | null | null | null | aiida_fleur/tools/xml_util.py | anoopkcn/aiida-fleur | 5d4cc2092b7c3ce5402f1d4b89787eae53b2e60f | [
"MIT"
] | null | null | null | aiida_fleur/tools/xml_util.py | anoopkcn/aiida-fleur | 5d4cc2092b7c3ce5402f1d4b89787eae53b2e60f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
###############################################################################
# Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. #
# All rights reserved. #
# This file is part of the AiiDA-FLEUR package. #
# #
# The code is hosted on GitHub at https://github.com/JuDFTteam/aiida-fleur #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.flapw.de or #
# http://aiida-fleur.readthedocs.io/en/develop/ #
###############################################################################
"""
In this module contains useful methods for handling xml trees and files which are used
by the Fleur code and the fleur plugin.
"""
# TODO FEHLER meldungen, currently if a xpath expression is valid, but does not exists
# xpath returns []. Do we want this behavior?
# TODO finish implementation of create=False
# TODO: no aiida imports
from __future__ import absolute_import
from __future__ import print_function
from lxml import etree
import six
from six.moves import range
from aiida.common.exceptions import InputValidationError
def is_sequence(arg):
"""
Checks if arg is a sequence
"""
if isinstance(arg, str):
return False
return (not hasattr(arg, "strip") and
hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__"))
##### CONVERTERS ############
def convert_to_float(value_string, parser_info_out=None, suc_return=True):
"""
Tries to make a float out of a string. If it can't it logs a warning
and returns True or False if convertion worked or not.
:param value_string: a string
:returns value: the new float or value_string: the string given
:returns: True or False
"""
if parser_info_out is None:
parser_info_out = {'parser_warnings': []}
try:
value = float(value_string)
except TypeError:
parser_info_out['parser_warnings'].append('Could not convert: "{}" to float, TypeError'
''.format(value_string))
if suc_return:
return value_string, False
else:
return value_string
except ValueError:
parser_info_out['parser_warnings'].append(
'Could not convert: "{}" to float, ValueError'
''.format(value_string))
if suc_return:
return value_string, False
else:
return value_string
if suc_return:
return value, True
else:
return value
def convert_to_int(value_string, parser_info_out=None, suc_return=True):
"""
Tries to make a int out of a string. If it can't it logs a warning
and returns True or False if convertion worked or not.
:param value_string: a string
:returns value: the new int or value_string: the string given
:returns: True or False, if suc_return=True
"""
if parser_info_out is None:
parser_info_out = {'parser_warnings': []}
try:
value = int(value_string)
except TypeError:
parser_info_out['parser_warnings'].append(
'Could not convert: "{}" to int, TypeError'
''.format(value_string))
if suc_return:
return value_string, False
else:
return value_string
except ValueError:
parser_info_out['parser_warnings'].append(
'Could not convert: "{}" to int, ValueError'
''.format(value_string))
if suc_return:
return value_string, False
else:
return value_string
if suc_return:
return value, True
else:
return value
def convert_htr_to_ev(value, parser_info_out=None):
"""
Multiplies the value given with the Hartree factor (converts htr to eV)
"""
if parser_info_out is None:
parser_info_out = {'parser_warnings': []}
htr = 27.21138602
suc = False
value_to_save, suc = convert_to_float(value, parser_info_out=parser_info_out)
if suc:
return value_to_save * htr
else:
return value
def convert_ev_to_htr(value, parser_info_out=None):
"""
Divides the value given with the Hartree factor (converts htr to eV)
"""
if parser_info_out is None:
parser_info_out = {'parser_warnings': []}
htr = 27.21138602
suc = False
value_to_save, suc = convert_to_float(value, parser_info_out=parser_info_out)
if suc:
return value_to_save / htr
else:
return value
def convert_from_fortran_bool(stringbool):
"""
Converts a string in this case ('T', 'F', or 't', 'f') to True or False
:param stringbool: a string ('t', 'f', 'F', 'T')
:return: boolean (either True or False)
"""
true_items = ['True', 't', 'T']
false_items = ['False', 'f', 'F']
if isinstance(stringbool, str):
if stringbool in false_items:
return False
if stringbool in true_items:
return True
else:
raise InputValidationError(
"A string: {} for a boolean was given, which is not 'True',"
" 'False', 't', 'T', 'F' or 'f'".format(stringbool))
elif isinstance(stringbool, bool):
return stringbool # no convertion needed...
else:
raise TypeError("convert_to_fortran_bool accepts only a string or "
"bool as argument")
def convert_to_fortran_bool(boolean):
"""
Converts a Boolean as string to the format defined in the input
:param boolean: either a boolean or a string ('True', 'False', 'F', 'T')
:return: a string (either 't' or 'f')
"""
if isinstance(boolean, bool):
if boolean:
new_string = 'T'
return new_string
else:
new_string = 'F'
return new_string
elif isinstance(boolean, str): # basestring):
if boolean == 'True' or boolean == 't' or boolean == 'T':
new_string = 'T'
return new_string
elif boolean == 'False' or boolean == 'f' or boolean == 'F':
new_string = 'F'
return new_string
else:
raise InputValidationError(
"A string: {} for a boolean was given, which is not 'True',"
"'False', 't', 'T', 'F' or 'f'".format(boolean))
else:
raise TypeError("convert_to_fortran_bool accepts only a string or "
"bool as argument, given {} ".format(boolean))
def convert_to_fortran_string(string):
"""
converts some parameter strings to the format for the inpgen
:param string: some string
:returns: string in right format (extra "")
"""
new_string = '"' + string + '"'
return new_string
def convert_fleur_lo(loelements):
"""
Converts lo xml elements from the inp.xml file into a lo string for the inpgen
"""
# Developer hint: Be careful with using '' and "", basestring and str are not the same...
# therefore other conversion methods might fail, or the wrong format could be written.
from aiida_fleur.tools.element_econfig_list import shell_map
lo_string = ''
for element in loelements:
lo_type = get_xml_attribute(element, 'type')
if lo_type != 'SCLO': # non standard los not supported for now
continue
l_num = get_xml_attribute(element, 'l')
n_num = get_xml_attribute(element, 'n')
l_char = shell_map.get(int(l_num), '')
lostr = '{}{}'.format(n_num, l_char)
lo_string = lo_string + ' ' + lostr
return lo_string.strip()
def set_dict_or_not(para_dict, key, value):
"""
setter method for a dictionary that will not set the key, value pair.
if the key is [] or None.
"""
if value == [] or value is None:
return para_dict
else:
para_dict[key] = value
return para_dict
####### XML SETTERS GENERAL ##############
def xml_set_attribv_occ(xmltree, xpathn, attributename, attribv, occ=None, create=False):
"""
Routine sets the value of an attribute in the xml file on only the places
specified in occ
:param xmltree: an xmltree that represents inp.xml
:param xpathn: a path to the attribute
:param attributename: an attribute name
:param attribv: an attribute value which will be set
:param occ: a list of integers specifying number of occurrence to be set
:param create: if True and there is no given xpath in the FleurinpData, creates it
Comment: Element.set will add the attribute if it does not exist,
xpath expression has to exist
example: xml_set_first_attribv(tree, '/fleurInput/calculationSetup', 'band', 'T')
xml_set_first_attribv(tree, '/fleurInput/calculationSetup', 'dos', 'F')
"""
if occ is None:
occ = [0]
root = xmltree.getroot()
nodes = eval_xpath3(root, xpathn, create=create)
if not isinstance(attribv, type('')):
attribv = str(attribv)
for i, node in enumerate(nodes):
if i in occ:
node.set(attributename, attribv)
if -1 in occ: # 'all'
node.set(attributename, attribv)
def xml_set_first_attribv(xmltree, xpathn, attributename, attribv, create=False):
"""
Routine sets the value of the first found attribute in the xml file
:param xmltree: an xmltree that represents inp.xml
:param xpathn: a path to the attribute
:param attributename: an attribute name
:param attribv: an attribute value which will be set
:param create: if True and there is no given xpath in the FleurinpData, creates it
:return: None, or an etree
Comment: Element.set will add the attribute if it does not exist,
xpath expression has to exist
example: xml_set_first_attribv(tree, '/fleurInput/calculationSetup', 'band', 'T')
xml_set_first_attribv(tree, '/fleurInput/calculationSetup', 'dos', 'F')
"""
root = xmltree.getroot()
if isinstance(attribv, type('')):
eval_xpath3(root, xpathn, create=create)[0].set(attributename, attribv)
else:
eval_xpath3(root, xpathn, create=create)[0].set(attributename, str(attribv))
# return xmltree
# ToDO check if worked. else exception,
def xml_set_all_attribv(xmltree, xpathn, attributename, attribv, create=False):
"""
Routine sets the value of an attribute in the xml file on all places it occurs
:param xmltree: an xmltree that represents inp.xml
:param xpathn: a path to the attribute
:param attributename: an attribute name
:param attribv: an attribute value which will be set
:param create: if True and there is no given xpath in the FleurinpData, creates it
:return: None, or an etree
Comment: Element.set will add the attribute if it does not exist,
xpath expression has to exist
example: xml_set_first_attribv(tree, '/fleurInput/atomGroups/atomGroup/force', 'relaxXYZ', 'TTF')
xml_set_first_attribv(tree, '/fleurInput/atomGroups/atomGroup/force', 'calculate', 'F')
"""
root = xmltree.getroot()
nodes = eval_xpath3(root, xpathn, create=create)
if is_sequence(attribv):
for i, node in enumerate(nodes):
if not isinstance(attribv[i], str): # type(attribv) != type(''):
attribv[i] = str(attribv[i])
node.set(attributename, attribv[i])
else:
if not isinstance(attribv, str): # type(attribv) != type(''):
attribv = str(attribv)
for node in nodes:
node.set(attributename, attribv)
def xml_set_text(xmltree, xpathn, text, create=False, place_index=None, tag_order=None):
"""
Routine sets the text of a tag in the xml file
:param xmltree: an xmltree that represents inp.xml
:param xpathn: a path to the attribute
:param text: text to be set
:param create: if True and there is no given xpath in the FleurinpData, creates it
:param place_index: if create=True, defines the place where to put a created tag
:param tag_order: if create=True, defines a tag order
example:
xml_set_text(tree, '/fleurInput/comment', 'Test Fleur calculation for AiiDA plug-in')
but also coordinates and Bravais Matrix!:
xml_set_text(tree, '/fleurInput/atomGroups/atomGroup/relPos','1.20000 PI/3 5.1-MYCrazyCostant')
"""
root = xmltree.getroot()
node = eval_xpath3(root, xpathn, create=create, place_index=place_index, tag_order=tag_order)
if node:
node[0].text = text
# return xmltree
def xml_set_text_occ(xmltree, xpathn, text, create=False, occ=0, place_index=None, tag_order=None):
"""
Routine sets the text of a tag in the xml file
:param xmltree: an xmltree that represents inp.xml
:param xpathn: a path to the attribute
:param text: text to be set
:param create: if True and there is no given xpath in the FleurinpData, creates it
:param occ: an integer that sets occurrence number to be set
:param place_index: if create=True, defines the place where to put a created tag
:param tag_order: if create=True, defines a tag order
"""
root = xmltree.getroot()
node = eval_xpath3(root, xpathn, create=create, place_index=place_index, tag_order=tag_order)
if node:
node[occ].text = text
def xml_set_all_text(xmltree, xpathn, text, create=False, tag_order=None):
"""
Routine sets the text of a tag in the xml file
:param xmltree: an xmltree that represents inp.xml
:param xpathn: a path to the attribute
:param text: text to be set
:param create: if True and there is no given xpath in the FleurinpData, creates it
:param place_index: if create=True, defines the place where to put a created tag
:param tag_order: if create=True, defines a tag order
"""
root = xmltree.getroot()
nodes = eval_xpath3(root, xpathn, create=create, tag_order=tag_order)
if is_sequence(text):
for i, node in enumerate(nodes):
node.text = text[i]
else:
for node in nodes:
node.text = text
def create_tag(xmlnode, xpath, newelement, create=False, place_index=None, tag_order=None):
"""
This method evaluates an xpath expresion and creates tag in an xmltree under the
returned nodes. If the path does exist things will be overwritten, or created.
Per default the new element is appended to the elements, but it can also be
inserted in a certain position or after certain other tags.
:param xmlnode: an xmltree that represents inp.xml
:param xpathn: a path where to place a new tag
:param newelement: a tag name to be created
:param create: if True and there is no given xpath in the FleurinpData, creates it
:param place_index: defines the place where to put a created tag
:param tag_order: defines a tag order
"""
import copy
newelement_name = newelement
if not etree.iselement(newelement):
try:
newelement = etree.Element(newelement)
except ValueError as v:
raise ValueError('{}. If this is a species, are you sure this species exists '
'in your inp.xml?'.format(v))
nodes = eval_xpath3(xmlnode, xpath, create=create)
if nodes:
for node_1 in nodes:
element_to_write = copy.deepcopy(newelement)
if place_index:
if tag_order:
# behind what shall I place it
try:
place_index = tag_order.index(newelement_name)
except:
raise ValueError('Did not find element name in the tag_order list')
behind_tags = tag_order[:place_index]
# check if children are in the same sequence as given in tag_order
tags = []
for child in node_1.iterchildren():
if child.tag not in tags:
tags.append(child.tag)
prev = -1
for name in tags:
try:
current = tag_order.index(name)
except ValueError:
raise ValueError('Did not find existing tag name in the tag_order list'
': {}'.format(name))
if current > prev:
prev = current
else:
raise ValueError('Existing order does not correspond to tag_order list')
# get all names of tag existing tags
was_set = False
for tag in reversed(behind_tags):
for child in node_1.iterchildren(tag=tag, reversed=False):
# if tagname of elements==tag:
tag_index = node_1.index(child)
try:
node_1.insert(tag_index + 1, element_to_write)
except ValueError as v:
raise ValueError('{}. If this is a species, are'
'you sure this species exists in your inp.xml?'
''.format(v))
was_set = True
break
if was_set:
break
if not was_set: # just append
try:
node_1.insert(0, element_to_write)
except ValueError as v:
raise ValueError('{}. If this is a species, are you'
' sure this species exists in your inp.xml?'
''.format(v))
# (or remove all and write them again in right order?)
else:
try:
node_1.insert(place_index, element_to_write)
except ValueError as v:
raise ValueError('{}. If this is a species, are you sure this species '
'exists in your inp.xml?'.format(v))
else:
try:
node_1.append(element_to_write)
except ValueError as v:
raise ValueError('{}. If this is a species, are you sure this species exists'
'in your inp.xml?'.format(v))
return xmlnode
def delete_att(xmltree, xpath, attrib):
"""
Deletes an xml tag in an xmletree.
:param xmltree: an xmltree that represents inp.xml
:param xpathn: a path to the attribute to be deleted
:param attrib: the name of an attribute
"""
root = xmltree.getroot()
nodes = eval_xpath3(root, xpath)
if nodes:
for node in nodes:
try:
del node.attrib[attrib]
except BaseException:
pass
return xmltree
def delete_tag(xmltree, xpath):
"""
Deletes an xml tag in an xmletree.
:param xmltree: an xmltree that represents inp.xml
:param xpathn: a path to the tag to be deleted
"""
root = xmltree.getroot()
nodes = eval_xpath3(root, xpath)
if nodes:
for node in nodes:
parent = node.getparent()
parent.remove(node)
return xmltree
def replace_tag(xmltree, xpath, newelement):
"""
replaces a xml tag by another tag on an xmletree in place
:param xmltree: an xmltree that represents inp.xml
:param xpathn: a path to the tag to be replaced
:param newelement: a new tag
"""
root = xmltree.getroot()
nodes = eval_xpath3(root, xpath)
if nodes:
for node in nodes:
parent = node.getparent()
index = parent.index(node)
parent.remove(node)
parent.insert(index, newelement)
return xmltree
def get_inpgen_paranode_from_xml(inpxmlfile):
"""
This routine returns an AiiDA Parameter Data type produced from the inp.xml
file, which can be used by inpgen.
:return: ParameterData node
"""
from aiida.orm import Dict
para_dict = get_inpgen_para_from_xml(inpxmlfile)
return Dict(dict=para_dict)
def get_inpgen_para_from_xml(inpxmlfile):
"""
This routine returns an python dictionary produced from the inp.xml
file, which can be used as a calc_parameters node by inpgen.
Be aware that inpgen does not take all information that is contained in an inp.xml file
:return new_parameters: A Dict, which will lead to the same inp.xml (in case if other defaults,
which can not be controlled by input for inpgen, were changed)
"""
# TODO: convert econfig
# TODO: parse kpoints, somehow count is bad (if symmetry changes), mesh is not known, path cannot be specified
# Disclaimer: this routine needs some xpath expressions. these are hardcoded here,
# therefore maintainance might be needed, if you want to circumvent this, you have
# to get all the paths from somewhere.
#######
# all hardcoded xpaths used and attributes names:
# input
film_xpath = '/fleurInput/atomGroups/atomGroup/filmPos/' # check for film pos
# atom, for each species\
species_xpath = '/fleurInput/atomSpecies/species'
atom_id_xpath = '' # is reconstruction possible at all now?
atom_z_xpath = '@atomicNumber'
atom_rmt_xpath = 'mtSphere/@radius'
atom_dx_xpath = 'mtSphere/@logIncrement'
atom_jri_xpath = 'mtSphere/@gridPoints'
atom_lmax_xpath = 'atomicCutoffs/@lmax'
atom_lnosph_xpath = 'atomicCutoffs/@lnonsphr'
atom_ncst_xpath = '@coreStates'
atom_econfig_xpath = 'electronConfig' # converting todo
atom_bmu_xpath = '@magMom'
atom_lo_xpath = 'lo' # converting todo
atom_element_xpath = '@element'
atom_name_xpath = '@name'
# comp
jspins_xpath = 'calculationSetup/magnetism/@jspins'
frcor_xpath = 'calculationSetup/coreElectrons/@frcor'
ctail_xpath = 'calculationSetup/coreElectrons/@ctail'
kcrel_xpath = 'calculationSetup/coreElectrons/@kcrel'
gmax_xpath = 'calculationSetup/cutoffs/@Gmax'
gmaxxc_xpath = 'calculationSetup/cutoffs/@GmaxXC'
kmax_xpath = 'calculationSetup/cutoffs/@Kmax'
# exco
exco_xpath = 'xcFunctional/@name'
# film
# soc
l_soc_xpath = '//calculationSetup/soc/@l_soc'
theta_xpath = '//calculationSetup/soc/@theta'
phi_xpath = '//calculationSetup/soc/@phi'
# qss
# kpt
title_xpath = '/fleurInput/comment/text()' # text
########
new_parameters = {}
#print('parsing inp.xml without XMLSchema')
#tree = etree.parse(inpxmlfile)
tree = inpxmlfile
root = tree.getroot()
# Create the cards
# &input # most things are not needed for AiiDA here. or we ignor them for now.
# film is set by the plugin depended on the structure
# symor per default = False? to avoid input which fleur can't take
# &comp
# attrib = get_xml_attribute(
comp_dict = {}
comp_dict = set_dict_or_not(comp_dict, 'jspins',
convert_to_float(eval_xpath(root, jspins_xpath), suc_return=False))
comp_dict = set_dict_or_not(comp_dict, 'frcor',
convert_from_fortran_bool(eval_xpath(root, frcor_xpath)))
comp_dict = set_dict_or_not(comp_dict, 'ctail',
convert_from_fortran_bool(eval_xpath(root, ctail_xpath)))
comp_dict = set_dict_or_not(comp_dict, 'kcrel', eval_xpath(root, kcrel_xpath))
comp_dict = set_dict_or_not(comp_dict, 'gmax',
convert_to_float(eval_xpath(root, gmax_xpath), suc_return=False))
comp_dict = set_dict_or_not(comp_dict, 'gmaxxc',
convert_to_float(eval_xpath(root, gmaxxc_xpath), suc_return=False))
comp_dict = set_dict_or_not(comp_dict, 'kmax',
convert_to_float(eval_xpath(root, kmax_xpath), suc_return=False))
new_parameters['comp'] = comp_dict
# &atoms
species_list = eval_xpath2(root, species_xpath)
for i, species in enumerate(species_list):
atom_dict = {}
atoms_name = 'atom{}'.format(i)
atom_z = convert_to_int(eval_xpath(species, atom_z_xpath), suc_return=False)
atom_rmt = convert_to_float(eval_xpath(species, atom_rmt_xpath), suc_return=False)
atom_dx = convert_to_float(eval_xpath(species, atom_dx_xpath), suc_return=False)
atom_jri = convert_to_int(eval_xpath(species, atom_jri_xpath), suc_return=False)
atom_lmax = convert_to_int(eval_xpath(species, atom_lmax_xpath), suc_return=False)
atom_lnosph = convert_to_int(eval_xpath(species, atom_lnosph_xpath), suc_return=False)
atom_ncst = convert_to_int(eval_xpath(species, atom_ncst_xpath), suc_return=False)
atom_econfig = eval_xpath(species, atom_econfig_xpath)
atom_bmu = convert_to_float(eval_xpath(species, atom_bmu_xpath), suc_return=False)
atom_lo = eval_xpath(species, atom_lo_xpath)
atom_element = eval_xpath(species, atom_element_xpath)
atom_name_2 = eval_xpath(species, atom_name_xpath)
atom_dict = set_dict_or_not(atom_dict, 'z', atom_z)
atom_dict = set_dict_or_not(atom_dict, 'rmt', atom_rmt)
atom_dict = set_dict_or_not(atom_dict, 'dx', atom_dx)
atom_dict = set_dict_or_not(atom_dict, 'jri', atom_jri)
atom_dict = set_dict_or_not(atom_dict, 'lmax', atom_lmax)
atom_dict = set_dict_or_not(atom_dict, 'lnonsph', atom_lnosph)
atom_dict = set_dict_or_not(atom_dict, 'ncst', atom_ncst)
atom_dict = set_dict_or_not(atom_dict, 'econfig', atom_econfig)
atom_dict = set_dict_or_not(atom_dict, 'bmu', atom_bmu)
if atom_lo is not None:
atom_dict = set_dict_or_not(atom_dict, 'lo', convert_fleur_lo(atom_lo))
atom_dict = set_dict_or_not(atom_dict, 'element', '{}'.format(atom_element))
#atom_dict = set_dict_or_not(atom_dict, 'name', atom_name_2)
new_parameters[atoms_name] = atom_dict
# &soc
attrib = convert_from_fortran_bool(eval_xpath(root, l_soc_xpath))
theta = convert_to_float(eval_xpath(root, theta_xpath), suc_return=False)
phi = convert_to_float(eval_xpath(root, phi_xpath), suc_return=False)
if attrib:
new_parameters['soc'] = {'theta': theta, 'phi': phi}
# &kpt
#attrib = convert_from_fortran_bool(eval_xpath(root, l_soc_xpath))
#theta = eval_xpath(root, theta_xpath)
#phi = eval_xpath(root, phi_xpath)
# if kpt:
# new_parameters['kpt'] = {'theta' : theta, 'phi' : phi}
# # ['nkpt', 'kpts', 'div1', 'div2', 'div3', 'tkb', 'tria'],
# title
title = eval_xpath(root, title_xpath) # text
if title:
new_parameters['title'] = title.replace('\n', '').strip()
# &exco
#TODO, easy
exco_dict = {}
exco_dict = set_dict_or_not(exco_dict, 'xctyp', eval_xpath(root, exco_xpath))
# 'exco' : ['xctyp', 'relxc'],
new_parameters['exco'] = exco_dict
# &film
# TODO
# &qss
# TODO
# lattice, not supported?
return new_parameters
####### XML SETTERS SPECIAL ########
def set_species_label(fleurinp_tree_copy, at_label, attributedict, create=False):
"""
This method calls :func:`~aiida_fleur.tools.xml_util.set_species()`
method for a certain atom specie that corresponds to an atom with a given label
:param fleurinp_tree_copy: xml etree of the inp.xml
:param at_label: string, a label of the atom which specie will be changed
:param attributedict: a python dict specifying what you want to change.
:param create: bool, if species does not exist create it and all subtags?
"""
if at_label == 'all':
fleurinp_tree_copy = set_species(fleurinp_tree_copy, 'all', attributedict, create)
return fleurinp_tree_copy
specie = ''
at_label = "{: >20}".format(at_label)
all_groups = eval_xpath2(fleurinp_tree_copy, '/fleurInput/atomGroups/atomGroup')
for group in all_groups:
positions = eval_xpath2(group, 'filmPos')
if not positions:
positions = eval_xpath2(group, 'relPos')
for atom in positions:
atom_label = get_xml_attribute(atom, 'label')
if atom_label == at_label:
specie = get_xml_attribute(group, 'species')
fleurinp_tree_copy = set_species(fleurinp_tree_copy, specie, attributedict, create)
return fleurinp_tree_copy
def set_species(fleurinp_tree_copy, species_name, attributedict, create=False):
"""
Method to set parameters of a species tag of the fleur inp.xml file.
:param fleurinp_tree_copy: xml etree of the inp.xml
:param species_name: string, name of the specie you want to change
:param attributedict: a python dict specifying what you want to change.
:param create: bool, if species does not exist create it and all subtags?
:raises ValueError: if species name is non existent in inp.xml and should not be created.
also if other given tags are garbage. (errors from eval_xpath() methods)
:return fleurinp_tree_copy: xml etree of the new inp.xml
**attributedict** is a python dictionary containing dictionaries that specify attributes
to be set inside the certain specie. For example, if one wants to set a MT radius it
can be done via::
attributedict = {'mtSphere' : {'radius' : 2.2}}
Another example::
'attributedict': {'special': {'socscale': 0.0}}
that switches SOC terms on a sertain specie. ``mtSphere``, ``atomicCutoffs``,
``energyParameters``, ``lo``, ``electronConfig``, ``nocoParams``, ``ldaU`` and
``special`` keys are supported. To find possible
keys of the inner dictionary please refer to the FLEUR documentation flapw.de
"""
# TODO lowercase everything
# TODO make a general specifier for species, not only the name i.e. also
# number, other parameters
if species_name == 'all':
xpath_species = '/fleurInput/atomSpecies/species'
else:
xpath_species = '/fleurInput/atomSpecies/species[@name = "{}"]'.format(species_name)
xpath_mt = '{}/mtSphere'.format(xpath_species)
xpath_atomic_cutoffs = '{}/atomicCutoffs'.format(xpath_species)
xpath_energy_parameters = '{}/energyParameters'.format(xpath_species)
xpath_lo = '{}/lo'.format(xpath_species)
xpath_electron_config = '{}/electronConfig'.format(xpath_species)
xpath_core_occ = '{}/electronConfig/stateOccupation'.format(xpath_species)
xpath_lda_u = '{}/ldaU'.format(xpath_species)
xpath_soc_scale = '{}/special'.format(xpath_species)
# can we get this out of schema file?
species_seq = [
'mtSphere',
'atomicCutoffs',
'energyParameters',
'prodBasis',
'special',
'force',
'electronConfig',
'nocoParams',
'ldaU',
'lo']
for key, val in six.iteritems(attributedict):
if key == 'mtSphere': # always in inp.xml
for attrib, value in six.iteritems(val):
xml_set_all_attribv(fleurinp_tree_copy, xpath_mt, attrib, value)
elif key == 'atomicCutoffs': # always in inp.xml
for attrib, value in six.iteritems(val):
xml_set_all_attribv(fleurinp_tree_copy, xpath_atomic_cutoffs, attrib, value)
elif key == 'energyParameters': # always in inp.xml
for attrib, value in six.iteritems(val):
xml_set_all_attribv(fleurinp_tree_copy, xpath_energy_parameters, attrib, value)
elif key == 'lo': # optional in inp.xml
# policy: we DELETE all LOs, and create new ones from the given parameters.
existinglos = eval_xpath3(fleurinp_tree_copy, xpath_lo)
for los in existinglos:
parent = los.getparent()
parent.remove(los)
# there can be multible LO tags, so I expect either one or a list
if isinstance(val, dict):
create_tag(
fleurinp_tree_copy,
xpath_species,
'lo',
place_index=species_seq.index('lo'),
tag_order=species_seq)
for attrib, value in six.iteritems(val):
xml_set_all_attribv(fleurinp_tree_copy, xpath_lo, attrib, value, create=True)
else: # I expect a list of dicts
# lonodes = eval_xpath3(root, xpathlo)#, create=True, place_index=species_seq.index('lo'), tag_order=species_seq)
#nlonodes = len(lonodes)
# ggf create more lo tags of needed
los_need = len(val) # - nlonodes
for j in range(0, los_need):
create_tag(
fleurinp_tree_copy,
xpath_species,
'lo',
place_index=species_seq.index('lo'),
tag_order=species_seq)
for i, lodict in enumerate(val):
for attrib, value in six.iteritems(lodict):
sets = []
for k in range(len(eval_xpath2(fleurinp_tree_copy, xpath_species + '/lo'))//los_need):
sets.append(k * los_need + i)
xml_set_attribv_occ(fleurinp_tree_copy, xpath_lo, attrib, value, occ=sets)
elif key == 'electronConfig':
# eval electronConfig and ggf create tag at right place.
eval_xpath3(
fleurinp_tree_copy,
xpath_electron_config,
create=True,
place_index=species_seq.index('electronConfig'),
tag_order=species_seq)
for tag in ['coreConfig', 'valenceConfig', 'stateOccupation']:
for etag, edictlist in six.iteritems(val):
if not etag == tag:
continue
if etag == 'stateOccupation': # there can be multiple times stateOccupation
# policy: default we DELETE all existing occs and create new ones for the
# given input!
existingocc = eval_xpath3(fleurinp_tree_copy, xpath_core_occ)
for occ in existingocc:
parent = occ.getparent()
parent.remove(occ)
if isinstance(edictlist, dict):
for attrib, value in six.iteritems(edictlist):
xml_set_all_attribv(
fleurinp_tree_copy, xpath_core_occ, attrib, value, create=True)
else: # I expect a list of dicts
nodes_need = len(edictlist)
for j in range(0, nodes_need):
create_tag(
fleurinp_tree_copy,
xpath_electron_config,
'stateOccupation',
create=True)
for i, occdict in enumerate(edictlist):
# override them one after one
sets = []
for k in range(len(eval_xpath2(fleurinp_tree_copy, xpath_core_occ))//nodes_need):
sets.append(k * nodes_need + i)
for attrib, value in six.iteritems(occdict):
xml_set_attribv_occ(
fleurinp_tree_copy, xpath_core_occ, attrib, value, occ=sets)
else:
xpathconfig = xpath_electron_config + '/{}'.format(etag)
xml_set_all_text(
fleurinp_tree_copy,
xpathconfig,
edictlist,
create=create,
tag_order=['coreConfig', 'valenceConfig', 'stateOccupation'])
elif key == 'ldaU':
for attrib, value in six.iteritems(val):
xml_set_all_attribv(fleurinp_tree_copy, xpath_lda_u, attrib, value, create=True)
elif key == 'special':
eval_xpath3(fleurinp_tree_copy,
xpath_soc_scale,
create=True,
place_index=species_seq.index('special'),
tag_order=species_seq)
for attrib, value in six.iteritems(val):
xml_set_all_attribv(
fleurinp_tree_copy,
xpath_soc_scale,
attrib,
value,
create=create)
else:
xml_set_all_attribv(fleurinp_tree_copy, xpath_species, attrib, value)
return fleurinp_tree_copy
def shift_value_species_label(fleurinp_tree_copy, at_label, attr_name, value_given, mode='abs'):
"""
Shifts value of a specie by label
"""
specie = ''
at_label = "{: >20}".format(at_label)
all_groups = eval_xpath2(fleurinp_tree_copy, '/fleurInput/atomGroups/atomGroup')
for group in all_groups:
positions = eval_xpath2(group, 'filmPos')
if not positions:
positions = eval_xpath2(group, 'relPos')
for atom in positions:
atom_label = get_xml_attribute(atom, 'label')
if atom_label == at_label:
specie = get_xml_attribute(group, 'species')
xpath_species = '/fleurInput/atomSpecies/species[@name = "{}"]'.format(specie)
xpath_mt = '{}/mtSphere'.format(xpath_species)
xpath_atomic_cutoffs = '{}/atomicCutoffs'.format(xpath_species)
xpath_energy_parameters = '{}/energyParameters'.format(xpath_species)
xpath_final = 'initialise'
if attr_name in ['radius', 'gridPoints', 'logIncrement']:
xpath_final = xpath_mt
elif attr_name in ['lmax', 'lnonsphr']:
xpath_final = xpath_atomic_cutoffs
elif attr_name in ['s', 'p', 'd', 'f']:
xpath_final = xpath_energy_parameters
old_val = eval_xpath2(fleurinp_tree_copy, '/@'.join([xpath_final, attr_name]))
if not old_val:
print('Can not find {} attribute in the inp.xml, skip it'.format(attr_name))
else:
old_val = float(old_val[0])
if mode == 'rel':
value = value_given * old_val
elif mode == 'abs':
value = value_given + old_val
else:
raise ValueError("Mode should be 'res' or 'abs' only")
if attr_name in ['radius', 'logIncrement']:
value_to_write = value
else:
if not value.is_integer():
raise ValueError('You are trying to write a float to an integer attribute')
value_to_write = int(value)
xml_set_first_attribv(fleurinp_tree_copy, xpath_final, attr_name,value_to_write)
return fleurinp_tree_copy
def change_atomgr_att_label(fleurinp_tree_copy, attributedict, at_label):
"""
This method calls :func:`~aiida_fleur.tools.xml_util.change_atomgr_att()`
method for a certain atom specie that corresponds to an atom with a given label.
"""
if at_label == 'all':
fleurinp_tree_copy = change_atomgr_att(fleurinp_tree_copy, attributedict, position=None,
species='all')
return fleurinp_tree_copy
specie = ''
at_label = "{: >20}".format(at_label)
all_groups = eval_xpath2(fleurinp_tree_copy, '/fleurInput/atomGroups/atomGroup')
for group in all_groups:
positions = eval_xpath2(group, 'filmPos')
if not positions:
positions = eval_xpath2(group, 'relPos')
for atom in positions:
atom_label = get_xml_attribute(atom, 'label')
if atom_label == at_label:
specie = get_xml_attribute(group, 'species')
fleurinp_tree_copy = change_atomgr_att(fleurinp_tree_copy, attributedict, position=None,
species=specie)
return fleurinp_tree_copy
def change_atomgr_att(fleurinp_tree_copy, attributedict, position=None, species=None):
"""
Method to set parameters of an atom group of the fleur inp.xml file.
:param fleurinp_tree_copy: xml etree of the inp.xml
:param attributedict: a python dict specifying what you want to change.
:param position: position of an atom group to be changed. If equals to 'all', all species will be changed
:param species: atom groups, corresponding to the given specie will be changed
:param create: bool, if species does not exist create it and all subtags?
:return fleurinp_tree_copy: xml etree of the new inp.xml
**attributedict** is a python dictionary containing dictionaries that specify attributes
to be set inside the certain specie. For example, if one wants to set a beta noco parameter it
can be done via::
'attributedict': {'nocoParams': [('beta', val)]}
``force`` and ``nocoParams`` keys are supported.
To find possible keys of the inner dictionary please refer to the FLEUR documentation flapw.de
"""
xpathatmgroup = '/fleurInput/atomGroups/atomGroup'
xpathforce = '{}/force'.format(xpathatmgroup)
xpathnocoParams = '{}/nocoParams'.format(xpathatmgroup)
if not position and not species: # not specfied what to change
return fleurinp_tree_copy
if position:
if not position == 'all':
xpathatmgroup = '/fleurInput/atomGroups/atomGroup[{}]'.format(position)
xpathforce = '{}/force'.format(xpathatmgroup)
xpathnocoParams = '{}/nocoParams'.format(xpathatmgroup)
if species:
if not species == 'all':
xpathatmgroup = '/fleurInput/atomGroups/atomGroup[@species = "{}"]'.format(species)
xpathforce = '{}/force'.format(xpathatmgroup)
xpathnocoParams = '{}/nocoParams'.format(xpathatmgroup)
for key, val in six.iteritems(attributedict):
if key == 'force':
for attrib, value in val:
xml_set_all_attribv(fleurinp_tree_copy, xpathforce, attrib, value)
elif key == 'nocoParams':
for attrib, value in val:
xml_set_all_attribv(fleurinp_tree_copy, xpathnocoParams, attrib, value)
else:
xml_set_all_attribv(fleurinp_tree_copy, xpathatmgroup, attrib, value)
return fleurinp_tree_copy
def set_inpchanges(fleurinp_tree_copy, change_dict):
"""
Makes given changes directly in the inp.xml file. Afterwards
updates the inp.xml file representation and the current inp_userchanges
dictionary with the keys provided in the 'change_dict' dictionary.
:param fleurinp_tree_copy: a lxml tree that represents inp.xml
:param change_dict: a python dictionary with the keys to substitute.
It works like dict.update(), adding new keys and
overwriting existing keys.
:returns new_tree: a lxml tree with applied changes
An example of change_dict::
change_dict = {'itmax' : 1,
'l_noco': True,
'ctail': False,
'l_ss': True}
A full list of supported keys in the change_dict can be found in
:py:func:`~aiida_fleur.tools.xml_util.get_inpxml_file_structure()`::
'comment': '/fleurInput/comment',
'relPos': '/fleurInput/atomGroups/atomGroup/relPos',
'filmPos': '/fleurInput/atomGroups/atomGroup/filmPos',
'absPos': '/fleurInput/atomGroups/atomGroup/absPos',
'qss': '/fleurInput/calculationSetup/nocoParams/qss',
'l_ss': '/fleurInput/calculationSetup/nocoParams',
'row-1': '/fleurInput/cell/bulkLattice/bravaisMatrix',
'row-2': '/fleurInput/cell/bulkLattice/bravaisMatrix',
'row-3': '/fleurInput/cell/bulkLattice/bravaisMatrix',
'a1': '/fleurInput/cell/filmLattice/a1', # switches once
'dos': '/fleurInput/output',
'band': '/fleurInput/output',
'secvar': '/fleurInput/calculationSetup/expertModes',
'ctail': '/fleurInput/calculationSetup/coreElectrons',
'frcor': '/fleurInput/calculationSetup/coreElectrons',
'l_noco': '/fleurInput/calculationSetup/magnetism',
'l_J': '/fleurInput/calculationSetup/magnetism',
'swsp': '/fleurInput/calculationSetup/magnetism',
'lflip': '/fleurInput/calculationSetup/magnetism',
'off': '/fleurInput/calculationSetup/soc',
'spav': '/fleurInput/calculationSetup/soc',
'l_soc': '/fleurInput/calculationSetup/soc',
'soc66': '/fleurInput/calculationSetup/soc',
'pot8': '/fleurInput/calculationSetup/expertModes',
'eig66': '/fleurInput/calculationSetup/expertModes',
'l_f': '/fleurInput/calculationSetup/geometryOptimization',
'gamma': '/fleurInput/calculationSetup/bzIntegration/kPointMesh',
'gauss': '',
'tria': '',
'invs': '',
'zrfs': '',
'vchk': '/fleurInput/output/checks',
'cdinf': '/fleurInput/output/checks',
'disp': '/fleurInput/output/checks',
'vacdos': '/fleurInput/output',
'integ': '/fleurInput/output/vacuumDOS',
'star': '/fleurInput/output/vacuumDOS',
'iplot': '/fleurInput/output/plotting',
'score': '/fleurInput/output/plotting',
'plplot': '/fleurInput/output/plotting',
'slice': '/fleurInput/output',
'pallst': '/fleurInput/output/chargeDensitySlicing',
'form66': '/fleurInput/output/specialOutput',
'eonly': '/fleurInput/output/specialOutput',
'bmt': '/fleurInput/output/specialOutput',
'relativisticCorrections': '/fleurInput/xcFunctional',
'calculate': '/fleurInput/atomGroups/atomGroup/force',
'flipSpin': '/fleurInput/atomSpecies/species',
'Kmax': '/fleurInput/calculationSetup/cutoffs',
'Gmax': '/fleurInput/calculationSetup/cutoffs',
'GmaxXC': '/fleurInput/calculationSetup/cutoffs',
'numbands': '/fleurInput/calculationSetup/cutoffs',
'itmax': '/fleurInput/calculationSetup/scfLoop',
'minDistance': '/fleurInput/calculationSetup/scfLoop',
'maxIterBroyd': '/fleurInput/calculationSetup/scfLoop',
'imix': '/fleurInput/calculationSetup/scfLoop',
'alpha': '/fleurInput/calculationSetup/scfLoop',
'spinf': '/fleurInput/calculationSetup/scfLoop',
'kcrel': '/fleurInput/calculationSetup/coreElectrons',
'jspins': '/fleurInput/calculationSetup/magnetism',
'theta': '/fleurInput/calculationSetup/soc',
'phi': '/fleurInput/calculationSetup/soc',
'gw': '/fleurInput/calculationSetup/expertModes',
'lpr': '/fleurInput/calculationSetup/expertModes',
'isec1': '/fleurInput/calculationSetup/expertModes',
'forcemix': '/fleurInput/calculationSetup/geometryOptimization',
'forcealpha': '/fleurInput/calculationSetup/geometryOptimization',
'force_converged': '/fleurInput/calculationSetup/geometryOptimization',
'qfix': '/fleurInput/calculationSetup/geometryOptimization',
'epsdisp': '/fleurInput/calculationSetup/geometryOptimization',
'epsforce': '/fleurInput/calculationSetup/geometryOptimization',
'valenceElectrons': '/fleurInput/calculationSetup/bzIntegration',
'mode': '/fleurInput/calculationSetup/bzIntegration',
'fermiSmearingEnergy': '/fleurInput/calculationSetup/bzIntegration',
'nx': '/fleurInput/calculationSetup/bzIntegration/kPointMesh',
'ny': '/fleurInput/calculationSetup/bzIntegration/kPointMesh',
'nz': '/fleurInput/calculationSetup/bzIntegration/kPointMesh',
'count': '/fleurInput/calculationSetup/kPointCount',
'ellow': '/fleurInput/calculationSetup/energyParameterLimits',
'elup': '/fleurInput/calculationSetup',
'filename': '/fleurInput/cell/symmetryFile',
'scale': '/fleurInput/cell/bulkLattice',
'ndir': '/fleurInput/output/densityOfStates',
'minEnergy': '/fleurInput/output/densityOfStates',
'maxEnergy': '/fleurInput/output/densityOfStates',
'sigma': ' /fleurInput/output/densityOfStates',
'layers': '/fleurInput/output/vacuumDOS',
'nstars': '/fleurInput/output/vacuumDOS',
'locx1': '/fleurInput/output/vacuumDOS',
'locy1': '/fleurInput/output/vacuumDOS',
'locx2': '/fleurInput/output/vacuumDOS',
'locy2': '/fleurInput/output/vacuumDOS',
'nstm': '/fleurInput/output/vacuumDOS',
'tworkf': '/fleurInput/output/vacuumDOS',
'numkpt': '/fleurInput/output/chargeDensitySlicing',
'minEigenval': '/fleurInput/output/chargeDensitySlicing',
'maxEigenval': '/fleurInput/output/chargeDensitySlicing',
'nnne': '/fleurInput/output/chargeDensitySlicing',
'dVac': '/fleurInput/cell/filmLattice',
'dTilda': '/fleurInput/cell/filmLattice',
'xcFunctional': '/fleurInput/xcFunctional/name', # other_attributes_more
'name': {'/fleurInput/constantDefinitions', '/fleurInput/xcFunctional',
'/fleurInput/atomSpecies/species'},
'value': '/fleurInput/constantDefinitions',
'element': '/fleurInput/atomSpecies/species',
'atomicNumber': '/fleurInput/atomSpecies/species',
'coreStates': '/fleurInput/atomSpecies/species',
'magMom': '/fleurInput/atomSpecies/species',
'radius': '/fleurInput/atomSpecies/species/mtSphere',
'gridPoints': '/fleurInput/atomSpecies/species/mtSphere',
'logIncrement': '/fleurInput/atomSpecies/species/mtSphere',
'lmax': '/fleurInput/atomSpecies/species/atomicCutoffs',
'lnonsphr': '/fleurInput/atomSpecies/species/atomicCutoffs',
's': '/fleurInput/atomSpecies/species/energyParameters',
'p': '/fleurInput/atomSpecies/species/energyParameters',
'd': '/fleurInput/atomSpecies/species/energyParameters',
'f': '/fleurInput/atomSpecies/species/energyParameters',
'type': '/fleurInput/atomSpecies/species/lo',
'l': '/fleurInput/atomSpecies/species/lo',
'n': '/fleurInput/atomSpecies/species/lo',
'eDeriv': '/fleurInput/atomSpecies/species/lo',
'species': '/fleurInput/atomGroups/atomGroup',
'relaxXYZ': '/fleurInput/atomGroups/atomGroup/force'
"""
tree = fleurinp_tree_copy
# apply changes to etree
xmlinpstructure = get_inpxml_file_structure()
new_tree = write_new_fleur_xmlinp_file(tree, change_dict, xmlinpstructure)
return new_tree
def shift_value(fleurinp_tree_copy, change_dict, mode='abs'):
"""
Shifts numertical values of some tags directly in the inp.xml file.
:param fleurinp_tree_copy: a lxml tree that represents inp.xml
:param change_dict: a python dictionary with the keys to shift.
:param mode: 'abs' if change given is absolute, 'rel' if relative
:returns new_tree: a lxml tree with shifted values
An example of change_dict::
change_dict = {'itmax' : 1, 'dVac': -0.123}
"""
xmlinpstructure = get_inpxml_file_structure()
all_attrib_xpath = xmlinpstructure[12]
float_attributes_once = xmlinpstructure[4]
int_attributes_once = xmlinpstructure[3]
change_to_write = {}
for key, value_given in six.iteritems(change_dict):
if key not in float_attributes_once and key not in int_attributes_once:
raise ValueError('Given attribute name either does not exist or is not floar or int')
key_path = all_attrib_xpath[key]
old_val = eval_xpath2(fleurinp_tree_copy, '/@'.join([key_path, key]))
if not old_val:
print('Can not find {} attribute in the inp.xml, skip it'.format(key))
continue
else:
old_val = float(old_val[0])
if mode == 'rel':
value = value_given * old_val
elif mode == 'abs':
value = value_given + old_val
else:
raise ValueError("Mode should be 'res' or 'abs' only")
if key in float_attributes_once:
change_to_write[key] = value
elif key in int_attributes_once:
if not value.is_integer():
raise ValueError('You are trying to write a float to an integer attribute')
change_to_write[key] = int(value)
new_tree = set_inpchanges(fleurinp_tree_copy, change_to_write)
return new_tree
def add_num_to_att(xmltree, xpathn, attributename, set_val, mode='abs', occ=None):
"""
Routine adds something to the value of an attribute in the xml file (should be a number here)
This is a lower-level version of :func:`~aiida_fleur.tools.xml_util.shift_value()` which
allows one to specife an arbitrary xml path.
:param: an etree a xpath from root to the attribute and the attribute value
:param xpathn: an xml path to the attribute to change
:param attributename: a name of the attribute to change
:param set_val: a value to be added/multiplied to the previous value
:param mode: 'abs' if to add set_val, 'rel' if multiply
:param occ: a list of integers specifying number of occurrence to be set
Comment: Element.set will add the attribute if it does not exist,
xpath expression has to exist
example: add_num_to_add(tree, '/fleurInput/bzIntegration', 'valenceElectrons', '1')
add_num_to_add(tree, '/fleurInput/bzIntegration', 'valenceElectrons', '1.1', mode='rel')
"""
if occ is None:
occ = [0]
# get attribute, add or multiply
# set attribute
attribval_node = eval_xpath(xmltree, xpathn)
# do some checks..
attribval = get_xml_attribute(attribval_node, attributename)
print(attribval)
if attribval:
if mode == 'abs':
newattribv = float(attribval) + float(set_val)
elif mode == 'rel':
newattribv = float(attribval) * float(set_val)
else:
pass
# unknown mode
xml_set_attribv_occ(xmltree, xpathn, attributename, newattribv, occ=[0], create=False)
else:
pass
# something was wrong, ...
return xmltree
def set_nkpts(fleurinp_tree_copy, count, gamma):
"""
Sets a k-point mesh directly into inp.xml
:param fleurinp_tree_copy: a lxml tree that represents inp.xml
:param count: number of k-points
:param gamma: a fortran-type boolean that controls if the gamma-point should be included
in the k-point mesh
:returns new_tree: a lxml tree with applied changes
"""
kpointlist_xpath = '/fleurInput/calculationSetup/bzIntegration/kPointList'
#kpoint_xpath = '/fleurInput/calculationSetup/bzIntegration/kPoint*'
tree = fleurinp_tree_copy
new_kpo = etree.Element(
'kPointCount',
count="{}".format(count),
gamma="{}".format(gamma))
new_tree = replace_tag(tree, kpointlist_xpath, new_kpo)
return new_tree
####### XML GETTERS #########
# TODO parser infos do not really work, might need to be returned, here
def eval_xpath(node, xpath, parser_info=None):
"""
Tries to evalutate an xpath expression. If it fails it logs it.
If seferal paths are found, return a list. If only one - returns the value.
:param root node of an etree and an xpath expression (relative, or absolute)
:returns either nodes, or attributes, or text
"""
if parser_info is None:
parser_info = {'parser_warnings': []}
try:
return_value = node.xpath(xpath)
except etree.XPathEvalError:
parser_info['parser_warnings'].append('There was a XpathEvalError on the xpath: {} \n'
'Either it does not exist, or something is wrong'
' with the expression.'.format(xpath))
# TODO maybe raise an error again to catch in upper routine, to know where exactly
return []
if len(return_value) == 1:
return return_value[0]
else:
return return_value
def eval_xpath2(node, xpath, parser_info=None):
"""
Tries to evalutate an xpath expression. If it fails it logs it.
Always return a list.
:param root node of an etree and an xpath expression (relative, or absolute)
:returns a node list
"""
if parser_info is None:
parser_info = {'parser_warnings': []}
try:
return_value = node.xpath(xpath)
except etree.XPathEvalError:
parser_info['parser_warnings'].append('There was a XpathEvalError on the xpath: {} \n'
'Either it does not exist, or something is wrong'
'with the expression.'.format(xpath))
# TODO maybe raise an error again to catch in upper routine, to know where exactly
return []
return return_value
def eval_xpath3(node, xpath, create=False, place_index=None, tag_order=None):
"""
Tries to evalutate an xpath expression. If it fails it logs it.
If create == True, creates a tag
:param root node of an etree and an xpath expression (relative, or absolute)
:returns always a node list
"""
try:
return_value = node.xpath(xpath)
except etree.XPathEvalError:
message = (
'There was a XpathEvalError on the xpath: {} \n Either it does '
'not exist, or something is wrong with the expression.'
''.format(xpath))
raise etree.XPathEvalError(message)
if return_value == []:
if create:
x_pieces = [e for e in xpath.split('/') if e != ""]
#x_pieces = xpath.split('/')
xpathn = ''
for piece in x_pieces[:-1]:
xpathn = xpathn + '/' + piece
# this is REKURSIV! since create tag calls eval_xpath3
create_tag(node, xpathn, x_pieces[-1], create=create,
place_index=place_index, tag_order=tag_order)
return_value = node.xpath(xpath)
return return_value
else:
return return_value
else:
return return_value
def get_xml_attribute(node, attributename, parser_info_out=None):
"""
Get an attribute value from a node.
:params node: a node from etree
:params attributename: a string with the attribute name.
:returns: either attributevalue, or None
"""
if parser_info_out is None:
parser_info_out = {'parser_warnings': []}
if etree.iselement(node):
attrib_value = node.get(attributename)
if attrib_value:
return attrib_value
else:
if parser_info_out:
parser_info_out['parser_warnings'].append(
'Tried to get attribute: "{}" from element {}.\n '
'I recieved "{}", maybe the attribute does not exist'
''.format(attributename, node, attrib_value))
else:
print((
'Can not get attributename: "{}" from node "{}", '
'because node is not an element of etree.'
''.format(attributename, node)))
return None
else: # something doesn't work here, some nodes get through here
if parser_info_out:
parser_info_out['parser_warnings'].append(
'Can not get attributename: "{}" from node "{}", '
'because node is not an element of etree.'
''.format(attributename, node))
else:
print(('Can not get attributename: "{}" from node "{}", '
'because node is not an element of etree.'
''.format(attributename, node)))
return None
# TODO this has to be done better. be able to write tags and
# certain attributes of attributes that occur possible more then once.
# HINT: This is not really used anymore. use fleurinpmodifier
def write_new_fleur_xmlinp_file(inp_file_xmltree, fleur_change_dic, xmlinpstructure):
"""
This modifies the xml-inp file. Makes all the changes wanted by
the user or sets some default values for certain modes
:params inp_file_xmltree: xml-tree of the xml-inp file
:params fleur_change_dic: dictionary {attrib_name : value} with all the wanted changes.
:returns: an etree of the xml-inp file with changes.
"""
# TODO rename, name is misleaded just changes the tree.
xmltree_new = inp_file_xmltree
pos_switch_once = xmlinpstructure[0]
pos_switch_several = xmlinpstructure[1]
pos_attrib_once = xmlinpstructure[2]
pos_float_attributes_once = xmlinpstructure[4]
pos_attrib_several = xmlinpstructure[6]
pos_int_attributes_several = xmlinpstructure[7]
pos_text = xmlinpstructure[11]
pos_xpaths = xmlinpstructure[12]
expertkey = xmlinpstructure[13]
for key in fleur_change_dic:
if key in pos_switch_once:
# TODO: a test here if path is plausible and if exist
# ggf. create tags and key.value is 'T' or 'F' if not convert,
# if garbage, exception
# convert user input into 'fleurbool'
fleur_bool = convert_to_fortran_bool(fleur_change_dic[key])
xpath_set = pos_xpaths[key]
# TODO: check if something in setup is inconsitent?
xml_set_first_attribv(xmltree_new, xpath_set, key, fleur_bool)
elif key in pos_attrib_once:
# TODO: same here, check existance and plausiblility of xpath
xpath_set = pos_xpaths[key]
if key in pos_float_attributes_once:
newfloat = '{:.10f}'.format(fleur_change_dic[key])
xml_set_first_attribv(xmltree_new, xpath_set, key, newfloat)
elif key == 'xcFunctional':
xml_set_first_attribv(xmltree_new, xpath_set, 'name', fleur_change_dic[key])
else:
xml_set_first_attribv(xmltree_new, xpath_set, key, fleur_change_dic[key])
elif key in pos_text:
# can be several times, therefore check
xpath_set = pos_xpaths[key]
xml_set_text(xmltree_new, xpath_set, fleur_change_dic[key])
else:
raise InputValidationError(
"You try to set the key:'{}' to : '{}', but the key is unknown"
" to the fleur plug-in".format(key, fleur_change_dic[key]))
return xmltree_new
# TODO: maybe it is possible to use the xml, schema to dict libary of the QE people.
# So far it does not seem to do what we need.
def inpxml_todict(parent, xmlstr):
"""
Recursive operation which transforms an xml etree to
python nested dictionaries and lists.
Decision to add a list is if the tag name is in the given list tag_several
:param parent: some xmltree, or xml element
:param xmlstr: structure/layout of the xml file in xmlstr is tags_several:
a list of the tags, which should be converted to a list, not
a dictionary(because they are known to occur more often, and
want to be accessed in a list later.
:return: a python dictionary
"""
xmlstructure = xmlstr
pos_switch_once1 = xmlstructure[0]
pos_switch_several1 = xmlstructure[1]
int_attributes_once1 = xmlstructure[3]
float_attributes_once1 = xmlstructure[4]
string_attributes_once1 = xmlstructure[5]
int_attributes_several1 = xmlstructure[7]
float_attributes_several1 = xmlstructure[8]
string_attributes_several1 = xmlstructure[9]
tags_several1 = xmlstructure[10]
pos_text1 = xmlstructure[11]
return_dict = {}
if list(parent.items()):
return_dict = dict(list(parent.items()))
# Now we have to convert lazy fortan style into pretty things for the Database
for key in return_dict:
if key in pos_switch_once1 or (key in pos_switch_several1):
return_dict[key] = convert_from_fortran_bool(return_dict[key])
elif key in int_attributes_once1 or (key in int_attributes_several1):
# TODO int several
try:
return_dict[key] = int(return_dict[key])
except ValueError:
pass
elif key in float_attributes_once1 or (key in float_attributes_several1):
# TODO pressision?
try:
return_dict[key] = float(return_dict[key])
except ValueError:
pass
elif key in string_attributes_once1 or (key in string_attributes_several1):
# TODO What attribute shall be set? all, one or several specific onces?
return_dict[key] = str(return_dict[key])
elif key in pos_text1:
# Text is done by below check (parent.text)
pass
else:
pass
# this key is not know to plug-in TODO maybe make this a method
# of the parser and log this as warning, or add here make a log
# list, to which you always append messages, pass them back to
# the parser, who locks it then
# raise TypeError("Parser wanted to convert the key:'{}' with
# value '{}', from the inpxml file but the key is unknown to the
# fleur plug-in".format(key, return_dict[key]))
if parent.text: # TODO more detal, exp: relPos
# has text, but we don't want all the '\n' s and empty stings in the database
if parent.text.strip() != '': # might not be the best solution
# set text
return_dict = parent.text.strip()
firstocc = True
for element in parent:
if element.tag in tags_several1:
# make a list, otherwise the tag will be overwritten in the dict
if firstocc: # is this the first occurence?
# create a list
return_dict[element.tag] = []
return_dict[element.tag].append(inpxml_todict(element, xmlstructure))
firstocc = False
else: # occured before, a list already exists, therefore just add
return_dict[element.tag].append(inpxml_todict(element, xmlstructure))
else:
# make dict
return_dict[element.tag] = inpxml_todict(element, xmlstructure)
return return_dict
# This is probably only used to represent the whole inp.xml in the database for the fleurinpData attributes
# TODO this should be replaced by something else, maybe a class. that has a method to return certain
# list of possible xpaths from a schema file, or to validate a certain xpath expression and
# to allow to get SINGLE xpaths for certain attrbiutes.
# akk: tell me where 'DOS' is
# This might not be back compatible... i.e a certain plugin version will by this design only work
# with certain schema version
def get_inpxml_file_structure():
"""
This routine returns the structure/layout of the 'inp.xml' file.
Basicly the plug-in should know from this routine, what things are allowed
to be set and where, i.e all attributes and their xpaths.
As a developer make sure to use this routine always of you need information
about the inp.xml file structure.
Therefore, this plug-in should be easy to adjust to other codes with xml
files as input files. Just rewrite this routine.
For now the structure of the xmlinp file for fleur is hardcoded.
If big changes are in the 'inp.xml' file, maintain this routine.
TODO: Maybe this is better done, by reading the xml schema datei instead.
And maybe it should also work without the schema file, do we want this?
:param Nothing: TODO xml schema
:return all_switches_once: list of all switches ('T' or 'F') which are allowed to be set
:return all_switches_several: list of all switches ('T' or 'F') which are allowed to be set
:return other_attributes_once: list of all attributes, which occur just once (can be tested)
:return other_attributes_several: list of all attributes, which can occur more then once
:return all_text: list of all text of tags, which can be set
:return all_attrib_xpath:
dictonary (attrib, xpath), of all possible attributes
with their xpath expression for the xmp inp
:return expertkey:
keyname (should not be in any other list), which can be
used to set anything in the file, by hand,
(for experts, and that plug-in does not need to be directly maintained if
xmlinp gets a new switch)
"""
# All attributes (allowed to change?)
# switches can be 'T' ot 'F' # TODO: alphabetical sorting
all_switches_once = (
'dos', 'band', 'secvar', 'ctail', 'frcor', 'l_noco',
'ctail', 'swsp', 'lflip', 'off', 'spav', 'l_soc', 'soc66', 'pot8',
'eig66', 'gamma', 'gauss', 'tria', 'invs', 'invs2', 'zrfs', 'vchk', 'cdinf',
'disp', 'vacdos', 'integ', 'star', 'score', 'plplot', 'slice',
'pallst', 'form66', 'eonly', 'bmt', 'relativisticCorrections', 'l_J', 'l_f', 'l_ss')
all_switches_several = ('calculate', 'flipSpin')
int_attributes_once = ('numbands', 'itmax', 'maxIterBroyd', 'kcrel', 'jspins',
'gw', 'isec1', 'nx', 'ny', 'nz', 'ndir', 'layers',
'nstars', 'nstm', 'iplot', 'numkpt', 'nnne', 'lpr', 'count', 'qfix')
float_attributes_once = ('Kmax', 'Gmax', 'GmaxXC', 'alpha', 'spinf', 'minDistance', 'theta',
'phi', 'epsdisp', 'epsforce',
'valenceElectrons', 'fermiSmearingEnergy', 'ellow',
'elup', 'scale', 'dTilda', 'dVac', 'minEnergy',
'maxEnergy', 'sigma', 'locx1', 'locy1', 'locx2',
'locy2', 'tworkf', 'minEigenval', 'maxEigenval',
'forcealpha', 'force_converged')
string_attributes_once = ('imix', 'mode', 'filename', 'latnam', 'spgrp',
'xcFunctional', 'fleurInputVersion', 'species', 'forcemix')
other_attributes_once = tuple(
list(int_attributes_once) +
list(float_attributes_once) +
list(string_attributes_once))
other_attributes_once1 = (
'isec1', 'Kmax', 'Gmax', 'GmaxXC', 'numbands', 'itmax', 'maxIterBroyd',
'imix', 'alpha', 'spinf', 'minDistance',
'kcrel', 'jspins', 'theta', 'phi', 'gw', 'lpr',
'epsdisp', 'epsforce', 'valenceElectrons', 'mode',
'gauss', 'fermiSmearingEnergy', 'nx', 'ny', 'nz', 'ellow', 'elup',
'filename', 'scale', 'dTilda', 'dVac', 'ndir', 'minEnergy', 'maxEnergy',
'sigma', 'layers', 'nstars', 'locx1', 'locy1', 'locx2', 'locy2', 'nstm',
'tworkf', 'numkpt', 'minEigenval', 'maxEigenval', 'nnne')
int_attributes_several = ('atomicNumber', 'gridPoints', 'lmax', 'lnonsphr',
's', 'p', 'd', 'f', 'l', 'n', 'eDeriv', 'coreStates')
float_attributes_several = ('value', 'magMom', 'radius', 'logIncrement')
string_attributes_several = ('name', 'element', 'coreStates', 'type', 'relaxXYZ')
other_attributes_several = (
'name', 'value', 'element', 'atomicNumber', 'coreStates', 'magMom',
'radius', 'gridPoints', 'logIncrement', 'lmax', 'lnonsphr', 's', 'p',
'd', 'f', 'species', 'type', 'coreStates', 'l', 'n', 'eDeriv', 'relaxXYZ')
# when parsing the xml file to a dict, these tags should become
# list(sets, or tuples) instead of dictionaries.
tags_several = ('atomGroup', 'relPos', 'absPos', 'filmPos',
'species', 'kPoint', 'lo', 'stateOccupation')
all_text = {'comment': 1, 'relPos': 3, 'filmPos': 3, 'absPos': 3,
'row-1': 3, 'row-2': 3, 'row-3': 3, 'a1': 1, 'qss': 3}
# TODO all these (without comment) are floats, or float tuples.
# Should be converted to this in the databas
# changing the Bravais matrix should rather not be allowed I guess
# all attribute xpaths
# text xpaths(coordinates, bravaisMatrix)
# all switches once, several, all attributes once, several
all_attrib_xpath = { # text
'comment': '/fleurInput/comment',
'relPos': '/fleurInput/atomGroups/atomGroup/relPos',
'filmPos': '/fleurInput/atomGroups/atomGroup/filmPos',
'absPos': '/fleurInput/atomGroups/atomGroup/absPos',
'qss': '/fleurInput/calculationSetup/nocoParams/qss',
'l_ss': '/fleurInput/calculationSetup/nocoParams',
'row-1': '/fleurInput/cell/bulkLattice/bravaisMatrix',
'row-2': '/fleurInput/cell/bulkLattice/bravaisMatrix',
'row-3': '/fleurInput/cell/bulkLattice/bravaisMatrix',
'a1': '/fleurInput/cell/filmLattice/a1', # switches once
'dos': '/fleurInput/output',
'band': '/fleurInput/output',
'secvar': '/fleurInput/calculationSetup/expertModes',
'ctail': '/fleurInput/calculationSetup/coreElectrons',
'frcor': '/fleurInput/calculationSetup/coreElectrons',
'l_noco': '/fleurInput/calculationSetup/magnetism',
'l_J': '/fleurInput/calculationSetup/magnetism',
'swsp': '/fleurInput/calculationSetup/magnetism',
'lflip': '/fleurInput/calculationSetup/magnetism',
'off': '/fleurInput/calculationSetup/soc',
'spav': '/fleurInput/calculationSetup/soc',
'l_soc': '/fleurInput/calculationSetup/soc',
'soc66': '/fleurInput/calculationSetup/soc',
'pot8': '/fleurInput/calculationSetup/expertModes',
'eig66': '/fleurInput/calculationSetup/expertModes',
'l_f': '/fleurInput/calculationSetup/geometryOptimization',
'gamma': '/fleurInput/calculationSetup/bzIntegration/kPointMesh',
# 'invs': '',
# 'zrfs': '',
'vchk': '/fleurInput/output/checks',
'cdinf': '/fleurInput/output/checks',
'disp': '/fleurInput/output/checks',
'vacdos': '/fleurInput/output',
'integ': '/fleurInput/output/vacuumDOS',
'star': '/fleurInput/output/vacuumDOS',
'iplot': '/fleurInput/output/plotting',
'score': '/fleurInput/output/plotting',
'plplot': '/fleurInput/output/plotting',
'slice': '/fleurInput/output',
'pallst': '/fleurInput/output/chargeDensitySlicing',
'form66': '/fleurInput/output/specialOutput',
'eonly': '/fleurInput/output/specialOutput',
'bmt': '/fleurInput/output/specialOutput',
'relativisticCorrections': '/fleurInput/xcFunctional', # ALL_Switches_several
'calculate': '/fleurInput/atomGroups/atomGroup/force',
'flipSpin': '/fleurInput/atomSpecies/species', # other_attributes_once
'Kmax': '/fleurInput/calculationSetup/cutoffs',
'Gmax': '/fleurInput/calculationSetup/cutoffs',
'GmaxXC': '/fleurInput/calculationSetup/cutoffs',
'numbands': '/fleurInput/calculationSetup/cutoffs',
'itmax': '/fleurInput/calculationSetup/scfLoop',
'minDistance': '/fleurInput/calculationSetup/scfLoop',
'maxIterBroyd': '/fleurInput/calculationSetup/scfLoop',
'imix': '/fleurInput/calculationSetup/scfLoop',
'alpha': '/fleurInput/calculationSetup/scfLoop',
'spinf': '/fleurInput/calculationSetup/scfLoop',
'kcrel': '/fleurInput/calculationSetup/coreElectrons',
'jspins': '/fleurInput/calculationSetup/magnetism',
'theta': '/fleurInput/calculationSetup/soc',
'phi': '/fleurInput/calculationSetup/soc',
'gw': '/fleurInput/calculationSetup/expertModes',
'lpr': '/fleurInput/calculationSetup/expertModes',
'isec1': '/fleurInput/calculationSetup/expertModes',
'forcemix': '/fleurInput/calculationSetup/geometryOptimization',
'forcealpha': '/fleurInput/calculationSetup/geometryOptimization',
'force_converged': '/fleurInput/calculationSetup/geometryOptimization',
'qfix': '/fleurInput/calculationSetup/geometryOptimization',
'epsdisp': '/fleurInput/calculationSetup/geometryOptimization',
'epsforce': '/fleurInput/calculationSetup/geometryOptimization',
'valenceElectrons': '/fleurInput/calculationSetup/bzIntegration',
'mode': '/fleurInput/calculationSetup/bzIntegration',
'fermiSmearingEnergy': '/fleurInput/calculationSetup/bzIntegration',
'nx': '/fleurInput/calculationSetup/bzIntegration/kPointMesh',
'ny': '/fleurInput/calculationSetup/bzIntegration/kPointMesh',
'nz': '/fleurInput/calculationSetup/bzIntegration/kPointMesh',
'count': '/ fleurInput/calculationSetup/bzIntegration/kPointList',
'ellow': '/fleurInput/calculationSetup/energyParameterLimits',
'elup': '/fleurInput/calculationSetup/energyParameterLimits',
#'filename': '/fleurInput/cell/symmetryFile',
'scale': '/fleurInput/cell/bulkLattice',
# 'film_scale': '/fleurInput/cell/filmLattice',
'ndir': '/fleurInput/output/densityOfStates',
'minEnergy': '/fleurInput/output/densityOfStates',
'maxEnergy': '/fleurInput/output/densityOfStates',
'sigma': ' /fleurInput/output/densityOfStates',
'layers': '/fleurInput/output/vacuumDOS',
'nstars': '/fleurInput/output/vacuumDOS',
'locx1': '/fleurInput/output/vacuumDOS',
'locy1': '/fleurInput/output/vacuumDOS',
'locx2': '/fleurInput/output/vacuumDOS',
'locy2': '/fleurInput/output/vacuumDOS',
'nstm': '/fleurInput/output/vacuumDOS',
'tworkf': '/fleurInput/output/vacuumDOS',
'numkpt': '/fleurInput/output/chargeDensitySlicing',
'minEigenval': '/fleurInput/output/chargeDensitySlicing',
'maxEigenval': '/fleurInput/output/chargeDensitySlicing',
'nnne': '/fleurInput/output/chargeDensitySlicing',
'dVac': '/fleurInput/cell/filmLattice',
'dTilda': '/fleurInput/cell/filmLattice',
'xcFunctional': '/fleurInput/xcFunctional', # other_attributes_more
# 'name': {'/fleurInput/constantDefinitions', '/fleurInput/xcFunctional',
# '/fleurInput/atomSpecies/species'},
# 'value': '/fleurInput/constantDefinitions',
'element': '/fleurInput/atomSpecies/species',
'atomicNumber': '/fleurInput/atomSpecies/species',
'coreStates': '/fleurInput/atomSpecies/species',
'magMom': '/fleurInput/atomSpecies/species',
'radius': '/fleurInput/atomSpecies/species/mtSphere',
'gridPoints': '/fleurInput/atomSpecies/species/mtSphere',
'logIncrement': '/fleurInput/atomSpecies/species/mtSphere',
'lmax': '/fleurInput/atomSpecies/species/atomicCutoffs',
'lnonsphr': '/fleurInput/atomSpecies/species/atomicCutoffs',
's': '/fleurInput/atomSpecies/species/energyParameters',
'p': '/fleurInput/atomSpecies/species/energyParameters',
'd': '/fleurInput/atomSpecies/species/energyParameters',
'f': '/fleurInput/atomSpecies/species/energyParameters',
'type': '/fleurInput/atomSpecies/species/lo',
'l': '/fleurInput/atomSpecies/species/lo',
'n': '/fleurInput/atomSpecies/species/lo',
'eDeriv': '/fleurInput/atomSpecies/species/lo',
'species': '/fleurInput/atomGroups/atomGroup',
'relaxXYZ': '/fleurInput/atomGroups/atomGroup/force'
}
all_tag_xpaths = (
'/fleurInput/constantDefinitions',
'/fleurInput/calculationSetup',
'/fleurInput/calculationSetup/cutoffs',
'/fleurInput/calculationSetup/scfLoop',
'/fleurInput/calculationSetup/coreElectrons',
'/fleurInput/calculationSetup/magnetism',
'/fleurInput/calculationSetup/soc',
'/fleurInput/calculationSetup/expertModes',
'/fleurInput/calculationSetup/geometryOptimization',
'/fleurInput/calculationSetup/bzIntegration',
'/fleurInput/calculationSetup/kPointMesh',
'/fleurInput/cell/symmetry',
'/fleurInput/cell/bravaisMatrix',
'/fleurInput/calculationSetup/nocoParams',
'/fleurInput/xcFunctional',
'/fleurInput/xcFunctional/xcParams',
'/fleurInput/atomSpecies/species',
'/fleurInput/atomSpecies/species/mtSphere',
'/fleurInput/atomSpecies/species/atomicCutoffs',
'/fleurInput/atomSpecies/species/energyParameters',
'/fleurInput/atomSpecies/species/coreConfig',
'/fleurInput/atomSpecies/species/coreOccupation',
'/fleurInput/atomGroups/atomGroup',
'/fleurInput/atomGroups/atomGroup/relPos',
'/fleurInput/atomGroups/atomGroup/absPos',
'/fleurInput/atomGroups/atomGroup/filmPos',
'/fleurInput/output/checks',
'/fleurInput/output/densityOfStates',
'/fleurInput/output/vacuumDOS',
'/fleurInput/output/plotting',
'/fleurInput/output/chargeDensitySlicing',
'/fleurInput/output/specialOutput'
)
expertkey = 'other'
returnlist = (all_switches_once,
all_switches_several,
other_attributes_once,
int_attributes_once,
float_attributes_once,
string_attributes_once,
other_attributes_several,
int_attributes_several,
float_attributes_several,
string_attributes_several,
tags_several,
all_text,
all_attrib_xpath,
expertkey)
return returnlist
def clear_xml(tree):
"""
Removes comments and executes xinclude tags of an
xml tree.
:param tree: an xml-tree which will be processes
:return cleared_tree: an xml-tree without comments and with replaced xinclude tags
"""
import copy
cleared_tree = copy.deepcopy(tree)
# replace XInclude parts to validate against schema
cleared_tree.xinclude()
# remove comments from inp.xml
comments = cleared_tree.xpath('//comment()')
for comment in comments:
com_parent = comment.getparent()
com_parent.remove(comment)
return cleared_tree
| 43.320333 | 129 | 0.621055 |
b2605bb845fbd08f3639acd11ff406101be3558a | 4,800 | py | Python | tests/unit/test_outlets_geolocation_response.py | madpilot/pyptv3 | f4c6b257d84ccebc0e3a217f5f7aa99257e4a39a | [
"MIT"
] | 2 | 2019-02-24T03:40:35.000Z | 2019-09-09T00:05:27.000Z | tests/unit/test_outlets_geolocation_response.py | madpilot/pyptv3 | f4c6b257d84ccebc0e3a217f5f7aa99257e4a39a | [
"MIT"
] | 2 | 2019-06-15T11:41:24.000Z | 2021-06-01T22:30:12.000Z | tests/unit/test_outlets_geolocation_response.py | madpilot/pyptv3 | f4c6b257d84ccebc0e3a217f5f7aa99257e4a39a | [
"MIT"
] | null | null | null | import pytest
from mock import Mock
import json
from pyptv3 import OutletsGeolocationResponse, OutletGeolocationResponse, StatusResponse, ONLINE
class TestOutletsGeolocationResponse:
@pytest.fixture(scope="module")
def response(self):
return json.loads("""
{
"outlets": [
{
"outlet_distance": 1000,
"outlet_slid_spid": "814",
"outlet_name": "3/67-69 Separation Street",
"outlet_business": "7-Eleven Geelong North",
"outlet_latitude": -38.1110878,
"outlet_longitude": 144.343689,
"outlet_suburb": "Geelong North",
"outlet_postcode": 3215,
"outlet_business_hour_mon": "24 Hours",
"outlet_business_hour_tue": "24 Hours",
"outlet_business_hour_wed": "24 Hours",
"outlet_business_hour_thur": "24 Hours",
"outlet_business_hour_fri": "24 Hours",
"outlet_business_hour_sat": "24 Hours",
"outlet_business_hour_sun": "24 Hours",
"outlet_notes": null
},
{
"outlet_distance": 2000,
"outlet_slid_spid": "815",
"outlet_name": "115 Moorabool Street",
"outlet_business": "7-Eleven Geelong City",
"outlet_latitude": -38.1483879,
"outlet_longitude": 144.3604,
"outlet_suburb": "Geelong",
"outlet_postcode": 3220,
"outlet_business_hour_mon": "24 Hours",
"outlet_business_hour_tue": "24 Hours",
"outlet_business_hour_wed": "24 Hours",
"outlet_business_hour_thur": "24 Hours",
"outlet_business_hour_fri": "24 Hours",
"outlet_business_hour_sat": "24 Hours",
"outlet_business_hour_sun": "24 Hours",
"outlet_notes": "Buy pre-loaded myki cards only"
}
],
"status": {
"version": "3.0",
"health": 1
}
}
""")
def test_outlets(self, response):
subject = OutletsGeolocationResponse(response)
assert len(subject.outlets) == 2
assert subject.outlets[0].__class__ == OutletGeolocationResponse
assert subject[0].__class__ == OutletGeolocationResponse
assert subject.outlets[0].distance == 1000
assert subject.outlets[0].id == "814"
assert subject.outlets[0].name == "3/67-69 Separation Street"
assert subject.outlets[0].business == "7-Eleven Geelong North"
assert subject.outlets[0].latitude == -38.1110878
assert subject.outlets[0].longitude == 144.343689
assert subject.outlets[0].suburb == "Geelong North"
assert subject.outlets[0].postcode == 3215
assert subject.outlets[0].business_hour_mon == "24 Hours"
assert subject.outlets[0].business_hour_tue == "24 Hours"
assert subject.outlets[0].business_hour_wed == "24 Hours"
assert subject.outlets[0].business_hour_thur == "24 Hours"
assert subject.outlets[0].business_hour_fri == "24 Hours"
assert subject.outlets[0].business_hour_sat == "24 Hours"
assert subject.outlets[0].business_hour_sun == "24 Hours"
assert subject.outlets[0].notes == None
assert subject.outlets[1].distance == 2000
assert subject.outlets[1].id == "815"
assert subject.outlets[1].name == "115 Moorabool Street"
assert subject.outlets[1].business == "7-Eleven Geelong City"
assert subject.outlets[1].latitude == -38.1483879
assert subject.outlets[1].longitude == 144.3604
assert subject.outlets[1].suburb == "Geelong"
assert subject.outlets[1].postcode == 3220
assert subject.outlets[1].business_hour_mon == "24 Hours"
assert subject.outlets[1].business_hour_tue == "24 Hours"
assert subject.outlets[1].business_hour_wed == "24 Hours"
assert subject.outlets[1].business_hour_thur == "24 Hours"
assert subject.outlets[1].business_hour_fri == "24 Hours"
assert subject.outlets[1].business_hour_sat == "24 Hours"
assert subject.outlets[1].business_hour_sun == "24 Hours"
assert subject.outlets[1].notes == "Buy pre-loaded myki cards only"
def test_status(self, response):
subject = OutletsGeolocationResponse(response)
assert subject.status.__class__ == StatusResponse
assert subject.status.version == "3.0"
assert subject.status.health == ONLINE
def test_repr(self, response):
subject = OutletsGeolocationResponse(response)
assert subject.__repr__().__class__ == str
| 44.859813 | 96 | 0.600833 |
46c8ae888669cab902175aa7635a5bca6cab8a3b | 2,025 | py | Python | 001~100/014LongestCommonPerfix.py | hyhplus/LeetCodeByPython | ebc0a59610e4c8b72660a9c96f408cc3319a7c28 | [
"MIT"
] | 2 | 2019-01-03T10:04:50.000Z | 2019-10-29T07:46:39.000Z | 001~100/014LongestCommonPerfix.py | hyhplus/LeetCodeByPython | ebc0a59610e4c8b72660a9c96f408cc3319a7c28 | [
"MIT"
] | null | null | null | 001~100/014LongestCommonPerfix.py | hyhplus/LeetCodeByPython | ebc0a59610e4c8b72660a9c96f408cc3319a7c28 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
14、最长公共前缀
"""
# class Solution:
# def longestCommonPrefix(self, sl):
# """
# :type sl: List[str]
# :rtype: str
# """
# if '' in sl:
# return ''
# n = len(sl)
# if n > 1:
# pr = ''
# for index, st in enumerate(sl[0]):
# pr += st
# for j in range(1, n):
# if pr not in sl[j][:index+1]:
# break
# else:
# continue
# break
# else:
# return pr
# return pr[:-1]
# else:
# return '' if not n else sl[0]
# class Solution:
# def longestCommonPrefix(self, strs):
# """
# :type strs: List[str]
# :rtype: str
# """
# common = None
# for s in strs:
# if common is None:
# common = list(s)
# else:
# for i, c in enumerate(common):
# if i >= len(s) or c != s[i]:
# common = common[:i]
# break
# return ''.join(common) if common else ''
# class Solution:
# def longestCommonPrefix(self, m):
# if not m:
# return ''
# s1 = min(m)
# print(s1)
# s2 = max(m)
# print(s2)
#
# for i, c in enumerate(s1):
# if c != s2[i]:
# return s1[:i] # stop until hit the split index
# return s1
class Solution:
# @return a string
def longestCommonPrefix(self, strs):
if not strs:
return ""
for i, letter_group in enumerate(zip(*strs)):
if len(set(letter_group)) > 1:
return strs[0][:i]
else:
return min(strs)
if __name__ == '__main__':
sl_1 = ['flow', 'fawer', 'flower']
result = Solution().longestCommonPrefix(sl_1)
print(result)
| 23.546512 | 65 | 0.406914 |
8c85ff7d63f316c0d5d6a607dfa068bf0eaccae6 | 2,729 | py | Python | flink-python/pyflink/table/tests/test_table_completeness.py | vcd047/flink | c9b413edb31232fb740e819f77a11eb2e779be52 | [
"Apache-2.0"
] | 1 | 2022-03-12T08:27:42.000Z | 2022-03-12T08:27:42.000Z | flink-python/pyflink/table/tests/test_table_completeness.py | w156226814/flink | acab7e06c6ece03dfeb8ae90eef52721d330833f | [
"Apache-2.0"
] | 1 | 2022-02-25T03:04:41.000Z | 2022-02-25T03:04:41.000Z | flink-python/pyflink/table/tests/test_table_completeness.py | w156226814/flink | acab7e06c6ece03dfeb8ae90eef52721d330833f | [
"Apache-2.0"
] | 1 | 2022-03-09T08:50:37.000Z | 2022-03-09T08:50:37.000Z | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.testing.test_case_utils import PythonAPICompletenessTestCase, PyFlinkTestCase
from pyflink.table import Table
class TableAPICompletenessTests(PythonAPICompletenessTestCase, PyFlinkTestCase):
"""
Tests whether the Python :class:`Table` is consistent with
Java `org.apache.flink.table.api.Table`.
"""
@classmethod
def python_class(cls):
return Table
@classmethod
def java_class(cls):
return "org.apache.flink.table.api.Table"
@classmethod
def excluded_methods(cls):
# row-based operators should be supported when UDFs supported in python.
# getSchema method returns a TableSchema, the implementation of TableSchema requires a
# complete type system, which does not exist currently. It will be implemented after
# FLINK-12408 is merged. So we exclude this method for the time being.
# Also FLINK-25986 are excluded.
return {'map', 'flatMap', 'flatAggregate', 'aggregate', 'leftOuterJoinLateral',
'createTemporalTableFunction', 'joinLateral', 'getQueryOperation', 'limit',
'getResolvedSchema', 'insertInto', 'printExplain'}
@classmethod
def java_method_name(cls, python_method_name):
"""
Due to 'as' is python keyword, so we use 'alias'
in Python API corresponding 'as' in Java API.
:param python_method_name:
:return:
"""
return {'alias': 'as'}.get(python_method_name, python_method_name)
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 39.550725 | 94 | 0.665812 |
27f98953dbefa2aee5fc63639a73966ae19345ab | 1,225 | py | Python | problems/nand_gate.py | benallan/lovelace-problems | 3780d2bfc58fe0531d60a92ae0a6c45e9814f58f | [
"MIT"
] | 29 | 2019-07-23T16:51:36.000Z | 2022-03-08T21:42:05.000Z | problems/nand_gate.py | benallan/lovelace-problems | 3780d2bfc58fe0531d60a92ae0a6c45e9814f58f | [
"MIT"
] | 44 | 2019-03-22T00:05:32.000Z | 2021-05-04T13:25:12.000Z | problems/nand_gate.py | benallan/lovelace-problems | 3780d2bfc58fe0531d60a92ae0a6c45e9814f58f | [
"MIT"
] | 3 | 2019-08-04T13:06:21.000Z | 2021-04-20T07:41:42.000Z | import logging
from typing import Tuple
from problems.test_case import TestCase, TestCaseTypeEnum
from problems.solutions.nand_gate import NAND
logger = logging.getLogger(__name__)
FUNCTION_NAME = "NAND"
INPUT_VARS = ['p', 'q']
OUTPUT_VARS = ['nand']
STATIC_RESOURCES = []
PHYSICAL_CONSTANTS = {}
ATOL = {}
RTOL = {}
class TestCaseType(TestCaseTypeEnum):
ZERO_ZERO = ("00", 1)
ZERO_ONE = ("01", 1)
ONE_ZERO = ("10", 1)
ONE_ONE = ("11", 1)
class ProblemTestCase(TestCase):
def input_tuple(self) -> tuple:
return self.input['p'], self.input['q']
def output_tuple(self) -> tuple:
return self.output['nand'],
def generate_test_case(test_type: TestCaseType) -> ProblemTestCase:
test_case = ProblemTestCase(test_type)
if test_type is TestCaseType.ZERO_ZERO:
p, q = 0, 0
elif test_type is TestCaseType.ZERO_ONE:
p, q = 0, 1
elif test_type is TestCaseType.ONE_ZERO:
p, q = 1, 0
elif test_type is TestCaseType.ONE_ONE:
p, q = 1, 1
else:
raise ValueError(f"Unrecognized test case: {test_type}")
test_case.input["p"], test_case.input["q"] = p, q
test_case.output["nand"] = NAND(p, q)
return test_case
| 21.491228 | 67 | 0.656327 |
fe0f6f07d75f281ca09f9058538d2c928981e3a4 | 2,725 | py | Python | src/robotino/robotino_rest_node/scripts/analoginput.py | moritzknaust/robotino_ros | 86b503c5432739c0b8f9eb8a76d4efcf2cc73fd6 | [
"MIT"
] | null | null | null | src/robotino/robotino_rest_node/scripts/analoginput.py | moritzknaust/robotino_ros | 86b503c5432739c0b8f9eb8a76d4efcf2cc73fd6 | [
"MIT"
] | null | null | null | src/robotino/robotino_rest_node/scripts/analoginput.py | moritzknaust/robotino_ros | 86b503c5432739c0b8f9eb8a76d4efcf2cc73fd6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2019, REC Robotics Equipment Corporation GmbH, Planegg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import requests
import sys
import rospy
from robotino_msgs.msg import AnalogReadings
# api-endpoint
URL = "http://127.0.0.1/data/analoginputarray"
PARAMS = {'sid':'robotino_rest_node'}
def talker():
analog_readingsPub = rospy.Publisher('analog_readings', AnalogReadings, queue_size=1)
rospy.init_node('robotino_analoginput', anonymous=True)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
try:
r = requests.get(url = URL, params = PARAMS)
if r.status_code == requests.codes.ok:
data = r.json()
rospy.loginfo(data)
msg = AnalogReadings()
msg.stamp = rospy.get_rostime()
msg.values = data
analog_readingsPub.publish(msg)
else:
rospy.logwarn("get from %s with params %s failed", URL, PARAMS)
except requests.exceptions.RequestException as e:
rospy.logerr("%s", e)
pass
rate.sleep()
if __name__ == '__main__':
myargv = rospy.myargv(argv=sys.argv)
if len(myargv)>1:
URL = URL.replace("127.0.0.1",myargv[1])
print("connecting to: ",URL)
try:
talker()
except rospy.ROSInterruptException:
pass
| 37.328767 | 86 | 0.74789 |
cb92724204563d08beee9888f972e3a53a297ba3 | 1,076 | py | Python | yatube/posts/migrations/0005_comment.py | egor-fipu/yatube | 64e91662f9f0d834ca7a8187f87005c3cc729368 | [
"MIT"
] | null | null | null | yatube/posts/migrations/0005_comment.py | egor-fipu/yatube | 64e91662f9f0d834ca7a8187f87005c3cc729368 | [
"MIT"
] | null | null | null | yatube/posts/migrations/0005_comment.py | egor-fipu/yatube | 64e91662f9f0d834ca7a8187f87005c3cc729368 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.6 on 2021-06-30 06:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0004_auto_20210629_1219'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(verbose_name='Текст')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата комментария')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL, verbose_name='Автор')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='posts.Post', verbose_name='Пост')),
],
),
]
| 39.851852 | 167 | 0.656134 |
654f8ab91ec7aa2133e77681aa68e1cfabe9f020 | 1,216 | py | Python | backend/app/alembic/versions/fe34a060f1ba_cascade_delete.py | dmitritruf/distiller-django-react | ec4c3837194306e725d8a2437ace10a899a254d2 | [
"BSD-3-Clause"
] | 2 | 2021-11-04T16:27:33.000Z | 2021-11-04T20:09:19.000Z | backend/app/alembic/versions/fe34a060f1ba_cascade_delete.py | OpenChemistry/distiller | 67da87672654555f9821590f42b108d70a55c1a6 | [
"BSD-3-Clause"
] | 70 | 2021-11-04T16:34:52.000Z | 2022-03-30T02:52:17.000Z | backend/app/alembic/versions/fe34a060f1ba_cascade_delete.py | dmitritruf/distiller-django-react | ec4c3837194306e725d8a2437ace10a899a254d2 | [
"BSD-3-Clause"
] | null | null | null | """cascade_delete
Revision ID: fe34a060f1ba
Revises: 8ec14b991bc5
Create Date: 2021-12-14 15:02:06.318653
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'fe34a060f1ba'
down_revision = '8ec14b991bc5'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('jobs_scan_id_fkey', 'jobs', type_='foreignkey')
op.create_foreign_key(None, 'jobs', 'scans', ['scan_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('locations_scan_id_fkey', 'locations', type_='foreignkey')
op.create_foreign_key(None, 'locations', 'scans', ['scan_id'], ['id'], ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'locations', type_='foreignkey')
op.create_foreign_key('locations_scan_id_fkey', 'locations', 'scans', ['scan_id'], ['id'])
op.drop_constraint(None, 'jobs', type_='foreignkey')
op.create_foreign_key('jobs_scan_id_fkey', 'jobs', 'scans', ['scan_id'], ['id'])
# ### end Alembic commands ###
| 34.742857 | 94 | 0.700658 |
d315efe21b2afc6323f541d91321ef681c0e4166 | 1,895 | py | Python | setup.py | somenzz/somedecorators | 1b2f7e0c3330024c1f9871016d7d9d4b87c5bde6 | [
"MIT"
] | 2 | 2021-06-18T02:31:53.000Z | 2021-08-04T06:39:13.000Z | setup.py | Tsurol/somedecorators | 1b2f7e0c3330024c1f9871016d7d9d4b87c5bde6 | [
"MIT"
] | 1 | 2021-07-12T01:45:24.000Z | 2021-09-29T22:10:58.000Z | setup.py | Tsurol/somedecorators | 1b2f7e0c3330024c1f9871016d7d9d4b87c5bde6 | [
"MIT"
] | 1 | 2022-01-03T13:05:09.000Z | 2022-01-03T13:05:09.000Z | #!/usr/bin/env python
from pathlib import Path
try:
from setuptools import setup
from setuptools import find_packages
except ImportError:
raise ImportError("Could not import \"setuptools\"."
"Please install the setuptools package.")
readme = Path("README.md")
license = Path("LICENSE")
# Read the version without importing the package
# (and thus attempting to import packages it depends on that may not be
# installed yet)
version = "0.7"
NAME = 'somedecorators'
VERSION = version
DESCRIPTION = 'Some useful decorators in Python.'
KEYWORDS = 'awesome decorators'
AUTHOR = 'somenzz'
AUTHOR_EMAIL = 'somenzz@163.com'
URL = 'https://github.com/somenzz/somedecorators'
LICENSE = license.read_text()
PACKAGES = find_packages(exclude=['tests', 'tests.*'])
INSTALL_REQUIRES = ['djangomail']
TEST_SUITE = 'tests'
TESTS_REQUIRE = []
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
]
LONG_DESCRIPTION = readme
params = {
'name': NAME,
'version': VERSION,
'description': DESCRIPTION,
'keywords': KEYWORDS,
'author': AUTHOR,
'author_email': AUTHOR_EMAIL,
'url': URL,
'license': "MIT",
'packages': PACKAGES,
'install_requires': INSTALL_REQUIRES,
'tests_require': TESTS_REQUIRE,
'test_suite': TEST_SUITE,
'classifiers': CLASSIFIERS,
'long_description': readme.read_text()
}
if __name__ == '__main__':
setup(**params,long_description_content_type='text/markdown')
| 27.071429 | 71 | 0.651715 |
4dfdd9bfa504c1bf2f6f2906056066f521f5ee7e | 136 | py | Python | main.py | tsabelmann/cthulhu_cmd | 76d99d01a01795523400a87cf41594274ece87e4 | [
"MIT"
] | 3 | 2021-07-29T03:20:45.000Z | 2021-08-25T12:18:09.000Z | main.py | tsabelmann/cthulhu_cmd | 76d99d01a01795523400a87cf41594274ece87e4 | [
"MIT"
] | null | null | null | main.py | tsabelmann/cthulhu_cmd | 76d99d01a01795523400a87cf41594274ece87e4 | [
"MIT"
] | null | null | null | """Module provides hook-in for the CLI class.
"""
# Import C
import cthulhu_cmd.cli as cli
if __name__ == "__main__":
cli.main()
| 13.6 | 45 | 0.669118 |
a495fecf8199fa3b5415b6a3822b0f4842abc9ee | 20,550 | py | Python | flux_mito/model_734.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | flux_mito/model_734.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | flux_mito/model_734.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 85000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 200000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.138889 | 798 | 0.804136 |
fb61c95935700a2be939e38a9daeba1e47c55e3c | 4,269 | py | Python | tests/attention/test_improved_clustered_transformer_gpu.py | SamuelCahyawijaya/fast-transformers | 6ae8ed4cc50bd037968db4f5062e4d328aae73fe | [
"MIT"
] | 1,171 | 2020-06-30T01:57:19.000Z | 2022-03-31T15:11:25.000Z | tests/attention/test_improved_clustered_transformer_gpu.py | SamuelCahyawijaya/fast-transformers | 6ae8ed4cc50bd037968db4f5062e4d328aae73fe | [
"MIT"
] | 105 | 2020-06-30T14:40:56.000Z | 2022-02-08T16:31:45.000Z | tests/attention/test_improved_clustered_transformer_gpu.py | SamuelCahyawijaya/fast-transformers | 6ae8ed4cc50bd037968db4f5062e4d328aae73fe | [
"MIT"
] | 127 | 2020-06-26T09:07:48.000Z | 2022-03-25T06:46:37.000Z | #
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>,
# Apoorv Vyas <avyas@idiap.ch>
#
import unittest
import torch
from fast_transformers.attention import AttentionLayer, \
ImprovedClusteredAttention, FullAttention
from fast_transformers.masking import FullMask
from fast_transformers.masking import LengthMask
from fast_transformers.transformers import TransformerEncoderLayer, \
TransformerEncoder
class TestTransformerEncoder(unittest.TestCase):
def test_full_attention_forward(self):
d_model = 128
n_heads = 4
transformer = TransformerEncoder([
TransformerEncoderLayer(
AttentionLayer(
ImprovedClusteredAttention(
clusters=10,
topk=5
),
d_model,
n_heads
),
d_model,
n_heads
)
for i in range(6)
])
transformer = transformer.to("cuda")
x = torch.rand(100, 20, d_model).to("cuda")
y = transformer(x)
self.assertEqual(y.shape, (100, 20, d_model))
def test_topk_equals_length_attention(self):
d_model = 32
n_heads = 4
improved_transformer = TransformerEncoder([
TransformerEncoderLayer(
AttentionLayer(
ImprovedClusteredAttention(
clusters=10,
topk=20
),
d_model,
n_heads
),
d_model,
n_heads
)
for i in range(6)
])
full_transformer = TransformerEncoder([
TransformerEncoderLayer(
AttentionLayer(FullAttention(), d_model, n_heads),
d_model,
n_heads
)
for i in range(6)
])
full_transformer = full_transformer.to("cuda")
improved_transformer = improved_transformer.to("cuda")
improved_transformer.load_state_dict(full_transformer.state_dict())
improved_transformer.eval()
full_transformer.eval()
x = torch.rand(100, 20, d_model).to("cuda")
y_full = improved_transformer(x)
y_improved = full_transformer(x)
self.assertLess(
torch.max(torch.abs(y_improved - y_full)),
1e-4
)
def test_topk_equals_length_attention_masked(self):
d_model = 32
n_heads = 4
improved_transformer = TransformerEncoder([
TransformerEncoderLayer(
AttentionLayer(
ImprovedClusteredAttention(
clusters=10,
topk=20
),
d_model,
n_heads
),
d_model,
n_heads
)
for i in range(6)
])
full_transformer = TransformerEncoder([
TransformerEncoderLayer(
AttentionLayer(FullAttention(), d_model, n_heads),
d_model,
n_heads
)
for i in range(6)
])
full_transformer = full_transformer.to("cuda")
improved_transformer = improved_transformer.to("cuda")
improved_transformer.load_state_dict(full_transformer.state_dict())
improved_transformer.eval()
full_transformer.eval()
x = torch.rand(100, 20, d_model).to("cuda")
lengths = x.new_full((100,), 20, dtype=torch.int64)
lengths[1] = 5
lengths[10] = 10
length_mask = LengthMask(
lengths=lengths,
max_len=20
)
y_full = improved_transformer(x, length_mask=length_mask)
y_improved = full_transformer(x, length_mask=length_mask)
self.assertLess(
torch.max(torch.abs(y_improved[1,:5] - y_full[1,:5])),
1e-4
)
self.assertLess(
torch.max(torch.abs(y_improved[10,:10] - y_full[10,:10])),
1e-4
)
if __name__ == "__main__":
unittest.main()
| 31.622222 | 75 | 0.54111 |
b2846f15b4ddd008f541d1dc88f04bfacdfbcba7 | 59,199 | py | Python | tools/system_libs.py | flarestart/emscripten | 2d0ffdf265fe63c7ada99dff58957c08c5608280 | [
"MIT"
] | null | null | null | tools/system_libs.py | flarestart/emscripten | 2d0ffdf265fe63c7ada99dff58957c08c5608280 | [
"MIT"
] | null | null | null | tools/system_libs.py | flarestart/emscripten | 2d0ffdf265fe63c7ada99dff58957c08c5608280 | [
"MIT"
] | null | null | null | # Copyright 2014 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from .toolchain_profiler import ToolchainProfiler
import itertools
import logging
import os
import shutil
from enum import IntEnum, auto
from glob import iglob
from . import shared, building, utils
from . import deps_info, tempfiles
from . import diagnostics
from tools.shared import mangle_c_symbol_name, demangle_c_symbol_name
from tools.settings import settings
logger = logging.getLogger('system_libs')
# Files that are part of libsockets.a and so should be excluded from libc.a
LIBC_SOCKETS = ['socket.c', 'socketpair.c', 'shutdown.c', 'bind.c', 'connect.c',
'listen.c', 'accept.c', 'getsockname.c', 'getpeername.c', 'send.c',
'recv.c', 'sendto.c', 'recvfrom.c', 'sendmsg.c', 'recvmsg.c',
'getsockopt.c', 'setsockopt.c', 'freeaddrinfo.c',
'in6addr_any.c', 'in6addr_loopback.c']
def files_in_path(path, filenames):
srcdir = utils.path_from_root(path)
return [os.path.join(srcdir, f) for f in filenames]
def glob_in_path(path, glob_pattern, excludes=()):
srcdir = utils.path_from_root(path)
files = iglob(os.path.join(srcdir, glob_pattern), recursive=True)
return [f for f in files if os.path.basename(f) not in excludes]
def get_base_cflags(force_object_files=False):
# Always build system libraries with debug information. Non-debug builds
# will ignore this at link time because we link with `-strip-debug`.
flags = ['-g']
if settings.LTO and not force_object_files:
flags += ['-flto=' + settings.LTO]
if settings.RELOCATABLE:
flags += ['-s', 'RELOCATABLE']
if settings.MEMORY64:
flags += ['-s', 'MEMORY64=' + str(settings.MEMORY64)]
return flags
def clean_env():
# building system libraries and ports should be hermetic in that it is not
# affected by things like EMMAKEN_CFLAGS which the user may have set.
# At least one port also uses autoconf (harfbuzz) so we also need to clear
# CFLAGS/LDFLAGS which we don't want to effect the inner call to configure.
safe_env = os.environ.copy()
for opt in ['CFLAGS', 'CXXFLAGS', 'LDFLAGS', 'EMCC_CFLAGS', 'EMMAKEN_CFLAGS', 'EMMAKEN_JUST_CONFIGURE']:
if opt in safe_env:
del safe_env[opt]
return safe_env
def run_build_commands(commands):
# Before running a set of build commands make sure the common sysroot
# headers are installed. This prevents each sub-process from attempting
# to setup the sysroot itself.
ensure_sysroot()
shared.run_multiple_processes(commands, env=clean_env())
def create_lib(libname, inputs):
"""Create a library from a set of input objects."""
suffix = shared.suffix(libname)
if suffix in ('.bc', '.o'):
if len(inputs) == 1:
if inputs[0] != libname:
shutil.copyfile(inputs[0], libname)
else:
building.link_to_object(inputs, libname)
else:
assert suffix == '.a'
building.emar('cr', libname, inputs)
def get_wasm_libc_rt_files():
# Combining static linking with LTO is tricky under LLVM. The codegen that
# happens during LTO can generate references to new symbols that didn't exist
# in the linker inputs themselves.
# These symbols are called libcalls in LLVM and are the result of intrinsics
# and builtins at the LLVM level. These libcalls cannot themselves be part
# of LTO because once the linker is running the LTO phase new bitcode objects
# cannot be added to link. Another way of putting it: by the time LTO happens
# the decision about which bitcode symbols to compile has already been made.
# See: https://bugs.llvm.org/show_bug.cgi?id=44353.
# To solve this we put all such libcalls in a separate library that, like
# compiler-rt, is never compiled as LTO/bitcode (see force_object_files in
# CompilerRTLibrary).
# Note that this also includes things that may be depended on by those
# functions - fmin uses signbit, for example, so signbit must be here (so if
# fmin is added by codegen, it will have all it needs).
math_files = files_in_path(
path='system/lib/libc/musl/src/math',
filenames=[
'fmin.c', 'fminf.c', 'fminl.c',
'fmax.c', 'fmaxf.c', 'fmaxl.c',
'fmod.c', 'fmodf.c', 'fmodl.c',
'log2.c', 'log2f.c', 'log10.c', 'log10f.c',
'exp2.c', 'exp2f.c', 'exp10.c', 'exp10f.c',
'scalbn.c', '__fpclassifyl.c',
'__signbitl.c', '__signbitf.c', '__signbit.c'
])
other_files = files_in_path(
path='system/lib/libc',
filenames=['emscripten_memcpy.c', 'emscripten_memset.c',
'emscripten_scan_stack.c',
'emscripten_memmove.c'])
# Calls to iprintf can be generated during codegen. Ideally we wouldn't
# compile these with -O2 like we do the rest of compiler-rt since its
# probably not performance sensitive. However we don't currently have
# a way to set per-file compiler flags. And hopefully we should be able
# move all this stuff back into libc once we it LTO compatible.
iprintf_files = files_in_path(
path='system/lib/libc/musl/src/stdio',
filenames=['__towrite.c', '__overflow.c', 'fwrite.c', 'fputs.c',
'printf.c', 'puts.c', '__lockfile.c'])
iprintf_files += files_in_path(
path='system/lib/libc/musl/src/string',
filenames=['strlen.c'])
return math_files + other_files + iprintf_files
def is_case_insensitive(path):
"""Returns True if the filesystem at `path` is case insensitive."""
utils.write_file(os.path.join(path, 'test_file'), '')
case_insensitive = os.path.exists(os.path.join(path, 'TEST_FILE'))
os.remove(os.path.join(path, 'test_file'))
return case_insensitive
class Library:
"""
`Library` is the base class of all system libraries.
There are two types of libraries: abstract and concrete.
* An abstract library, e.g. MTLibrary, is a subclass of `Library` that
implements certain behaviour common to multiple libraries. The features
of multiple abstract libraries can be used through multiple inheritance.
* A concrete library, e.g. libc, is a subclass of `Library` that describes
how to build a particular library, and its properties, such as name and
dependencies.
This library system is meant to handle having many versions of the same library,
which we call *variations*. For example, some libraries (those that inherit
from MTLibrary), have both single-threaded and multi-threaded versions.
An instance of a `Library` subclass represents a specific variation of the
library. Instance methods perform operations relating to this variation.
For example, `get_cflags()` would return the emcc flags needed to build this
variation, and `build()` would generate the library file for this variation.
The constructor takes keyword arguments that defines the variation.
Class methods perform tasks relating to all variations. For example,
`variations()` returns a list of all variations that exists for this library,
and `get_default_variation()` returns the variation suitable for the current
environment.
Other class methods act upon a group of libraries. For example,
`Library.get_all_variations()` returns a mapping of all variations of
existing libraries.
To add a new type of variation, you must add an parameter to `__init__` that
selects the variant. Then, override one of `vary_on` or `variations`, as well
as `get_default_variation`.
If the parameter is boolean, overriding `vary_on` to add the parameter name
to the returned list is sufficient:
@classmethod
def vary_on(cls):
return super().vary_on() + ['my_parameter']
Otherwise, you must override `variations`:
@classmethod
def variations(cls):
return [{'my_parameter': value, **other} for value, other in
itertools.product([1, 2, 3], super().variations())]
Overriding either `vary_on` or `variations` allows `embuilder.py` to know all
possible variations so it can build all of them.
You then need to modify `get_default_variation` to detect the correct value
for your new parameter based on the settings:
@classmethod
def get_default_variation(cls, **kwargs):
return super().get_default_variation(my_parameter=settings.MY_PARAMETER, **kwargs)
This allows the correct variation of the library to be selected when building
code with Emscripten.
"""
# The simple name of the library. When linking, this is the name to use to
# automatically get the correct version of the library.
# This should only be overridden in a concrete library class, e.g. libc,
# and left as None in an abstract library class, e.g. MTLibrary.
name = None
# Set to true to prevent EMCC_FORCE_STDLIBS from linking this library.
never_force = False
# A list of flags to pass to emcc.
# The flags for the parent class is automatically inherited.
# TODO: Investigate whether perf gains from loop unrolling would be worth the
# extra code size. The -fno-unroll-loops flags was added here when loop
# unrolling landed upstream in LLVM to avoid changing behavior but was not
# specifically evaluated.
cflags = ['-Werror', '-fno-unroll-loops']
# A list of directories to put in the include path when building.
# This is a list of tuples of path components.
# For example, to put system/lib/a and system/lib/b under the emscripten
# directory into the include path, you would write:
# includes = [('system', 'lib', 'a'), ('system', 'lib', 'b')]
# The include path of the parent class is automatically inherited.
includes = []
# By default, `get_files` look for source files for this library under `src_dir`.
# It will either use the files listed in `src_files`, or use the glob pattern in
# `src_glob`. You may not specify both `src_files` and `src_glob`.
# When using `src_glob`, you can specify a list of files in `src_glob_exclude`
# to be excluded from the library.
# Alternatively, you can override `get_files` to use your own logic.
src_dir = None
src_files = None
src_glob = None
src_glob_exclude = None
# Whether to always generate WASM object files, even when LTO is set
force_object_files = False
def __init__(self):
"""
Creates a variation of this library.
A variation is a specific combination of settings a library can have.
For example, libc++-mt-noexcept is a variation of libc++.
There might be only one variation of a library.
The constructor keyword arguments will define what variation to use.
Use the `variations` classmethod to get the list of all possible constructor
arguments for this library.
Use the `get_default_variation` classmethod to construct the variation
suitable for the current invocation of emscripten.
"""
if not self.name:
raise NotImplementedError('Cannot instantiate an abstract library')
def can_use(self):
"""
Whether this library can be used in the current environment.
For example, libmalloc would override this and return False
if the user requested no malloc.
"""
return True
def can_build(self):
"""
Whether this library can be built in the current environment.
Override this if, for example, the library can only be built on WASM backend.
"""
return True
def erase(self):
shared.Cache.erase_file(shared.Cache.get_lib_name(self.get_filename()))
def get_path(self):
"""
Gets the cached path of this library.
This will trigger a build if this library is not in the cache.
"""
return shared.Cache.get_lib(self.get_filename(), self.build)
def get_link_flag(self):
"""
Gets the link flags needed to use the library.
This will trigger a build if this library is not in the cache.
"""
fullpath = self.get_path()
# For non-libaries (e.g. crt1.o) we pass the entire path to the linker
if self.get_ext() != '.a':
return fullpath
# For libraries (.a) files, we pass the abbreviated `-l` form.
base = shared.unsuffixed_basename(fullpath)
return '-l' + shared.strip_prefix(base, 'lib')
def get_files(self):
"""
Gets a list of source files for this library.
Typically, you will use `src_dir`, `src_files`, `src_glob` and `src_glob_exclude`.
If those are insufficient to describe the files needed, you can override this method.
"""
if self.src_dir:
if self.src_files and self.src_glob:
raise Exception('Cannot use src_files and src_glob together')
if self.src_files:
return files_in_path(self.src_dir, self.src_files)
elif self.src_glob:
return glob_in_path(self.src_dir, self.src_glob, self.src_glob_exclude or ())
raise NotImplementedError()
def build_objects(self, build_dir):
"""
Returns a list of compiled object files for this library.
By default, this builds all the source files returned by `self.get_files()`,
with the `cflags` returned by `self.get_cflags()`.
"""
commands = []
objects = []
cflags = self.get_cflags()
base_flags = get_base_cflags()
case_insensitive = is_case_insensitive(build_dir)
for src in self.get_files():
object_basename = shared.unsuffixed_basename(src)
# Resolve duplicates by appending unique.
# This is needed on case insensitve filesystem to handle,
# for example, _exit.o and _Exit.o.
if case_insensitive:
object_basename = object_basename.lower()
o = os.path.join(build_dir, object_basename + '.o')
object_uuid = 0
# Find a unique basename
while o in objects:
object_uuid += 1
o = os.path.join(build_dir, f'{object_basename}__{object_uuid}.o')
ext = shared.suffix(src)
if ext in ('.s', '.S', '.c'):
cmd = [shared.EMCC]
else:
cmd = [shared.EMXX]
if ext in ('.s', '.S'):
cmd += base_flags
# TODO(sbc) There is an llvm bug that causes a crash when `-g` is used with
# assembly files that define wasm globals.
cmd.remove('-g')
else:
cmd += cflags
commands.append(cmd + ['-c', src, '-o', o])
objects.append(o)
run_build_commands(commands)
return objects
def build(self, out_filename):
"""Builds the library and returns the path to the file."""
build_dir = shared.Cache.get_path(os.path.join('build', self.get_base_name()))
utils.safe_ensure_dirs(build_dir)
create_lib(out_filename, self.build_objects(build_dir))
if not shared.DEBUG:
tempfiles.try_delete(build_dir)
@classmethod
def _inherit_list(cls, attr):
# Some properties, like cflags and includes, makes more sense to inherit
# via concatenation than replacement.
result = []
for item in cls.__mro__[::-1]:
# Using __dict__ to avoid inheritance
result += item.__dict__.get(attr, [])
return result
def get_cflags(self):
"""
Returns the list of flags to pass to emcc when building this variation
of the library.
Override and add any flags as needed to handle new variations.
"""
cflags = self._inherit_list('cflags')
cflags += get_base_cflags(force_object_files=self.force_object_files)
if self.includes:
cflags += ['-I' + utils.path_from_root(i) for i in self._inherit_list('includes')]
return cflags
def get_base_name_prefix(self):
"""
Returns the base name of the library without any suffixes.
"""
return self.name
def get_base_name(self):
"""
Returns the base name of the library file.
This will include suffixes such as -mt, but will not include a file extension.
"""
return self.get_base_name_prefix()
def get_ext(self):
"""
Return the appropriate file extension for this library.
"""
return '.a'
def get_filename(self):
"""
Return the full name of the library file, including the file extension.
"""
return self.get_base_name() + self.get_ext()
@classmethod
def vary_on(cls):
"""
Returns a list of strings that are the names of boolean constructor
arguments that defines the variations of this library.
This is used by the default implementation of `cls.variations()` to generate
every possible combination of boolean values to pass to these arguments.
"""
return []
@classmethod
def variations(cls):
"""
Returns a list of keyword arguments to pass to the constructor to create
every possible variation of this library.
By default, this is every possible combination of boolean values to pass
to the list of arguments returned by `vary_on`, but you can override
the behaviour.
"""
vary_on = cls.vary_on()
return [dict(zip(vary_on, toggles)) for toggles in
itertools.product([False, True], repeat=len(vary_on))]
@classmethod
def get_default_variation(cls, **kwargs):
"""
Construct the variation suitable for the current invocation of emscripten.
Subclasses should pass the keyword arguments they introduce to the
superclass version, and propagate **kwargs. The base class collects
all the keyword arguments and creates the instance.
"""
return cls(**kwargs)
@classmethod
def get_inheritance_tree(cls):
"""Returns all the classes in the inheritance tree of the current class."""
yield cls
for subclass in cls.__subclasses__():
for subclass in subclass.get_inheritance_tree():
yield subclass
@classmethod
def get_all_variations(cls):
"""
Gets all the variations of libraries in the inheritance tree of the current
library.
Calling Library.get_all_variations() returns the variations of ALL libraries
that can be built as a dictionary of variation names to Library objects.
"""
result = {}
for library in cls.get_inheritance_tree():
if library.name:
for flags in library.variations():
variation = library(**flags)
if variation.can_build():
result[variation.get_base_name()] = variation
return result
@classmethod
def get_usable_variations(cls):
"""
Gets all libraries suitable for the current invocation of emscripten.
This returns a dictionary of simple names to Library objects.
"""
if not hasattr(cls, 'useable_variations'):
cls.useable_variations = {}
for subclass in cls.get_inheritance_tree():
if subclass.name:
library = subclass.get_default_variation()
if library.can_build() and library.can_use():
cls.useable_variations[subclass.name] = library
return cls.useable_variations
class MTLibrary(Library):
def __init__(self, **kwargs):
self.is_mt = kwargs.pop('is_mt')
super().__init__(**kwargs)
def get_cflags(self):
cflags = super().get_cflags()
if self.is_mt:
cflags += ['-s', 'USE_PTHREADS']
return cflags
def get_base_name(self):
name = super().get_base_name()
if self.is_mt:
name += '-mt'
return name
@classmethod
def vary_on(cls):
return super().vary_on() + ['is_mt']
@classmethod
def get_default_variation(cls, **kwargs):
return super().get_default_variation(is_mt=settings.USE_PTHREADS, **kwargs)
class OptimizedAggressivelyForSizeLibrary(Library):
def __init__(self, **kwargs):
self.is_optz = kwargs.pop('is_optz')
super().__init__(**kwargs)
def get_base_name(self):
name = super().get_base_name()
if self.is_optz:
name += '-optz'
return name
def get_cflags(self):
cflags = super().get_cflags()
if self.is_optz:
cflags += ['-DEMSCRIPTEN_OPTIMIZE_FOR_OZ']
return cflags
@classmethod
def vary_on(cls):
return super().vary_on() + ['is_optz']
@classmethod
def get_default_variation(cls, **kwargs):
return super().get_default_variation(is_optz=settings.SHRINK_LEVEL >= 2, **kwargs)
class Exceptions(IntEnum):
"""
This represents exception handling mode of Emscripten. Currently there are
three modes of exception handling:
- None: Does not handle exceptions. This includes -fno-exceptions, which
prevents both throwing and catching, and -fignore-exceptions, which only
allows throwing, but library-wise they use the same version.
- Emscripten: Emscripten provides exception handling capability using JS
emulation. This causes code size increase and performance degradation.
- Wasm: Wasm native exception handling support uses Wasm EH instructions and
is meant to be fast. You need to use a VM that has the EH support to use
this. This is not fully working yet and still experimental.
"""
NONE = auto()
EMSCRIPTEN = auto()
WASM = auto()
class NoExceptLibrary(Library):
def __init__(self, **kwargs):
self.eh_mode = kwargs.pop('eh_mode')
super().__init__(**kwargs)
def get_cflags(self):
cflags = super().get_cflags()
if self.eh_mode == Exceptions.NONE:
cflags += ['-fno-exceptions']
elif self.eh_mode == Exceptions.EMSCRIPTEN:
cflags += ['-s', 'DISABLE_EXCEPTION_CATCHING=0']
elif self.eh_mode == Exceptions.WASM:
cflags += ['-fwasm-exceptions']
return cflags
def get_base_name(self):
name = super().get_base_name()
# TODO Currently emscripten-based exception is the default mode, thus no
# suffixes. Change the default to wasm exception later.
if self.eh_mode == Exceptions.NONE:
name += '-noexcept'
elif self.eh_mode == Exceptions.WASM:
name += '-except'
return name
@classmethod
def variations(cls, **kwargs): # noqa
combos = super().variations()
return ([dict(eh_mode=Exceptions.NONE, **combo) for combo in combos] +
[dict(eh_mode=Exceptions.EMSCRIPTEN, **combo) for combo in combos] +
[dict(eh_mode=Exceptions.WASM, **combo) for combo in combos])
@classmethod
def get_default_variation(cls, **kwargs):
if settings.EXCEPTION_HANDLING:
eh_mode = Exceptions.WASM
elif settings.DISABLE_EXCEPTION_CATCHING == 1:
eh_mode = Exceptions.NONE
else:
eh_mode = Exceptions.EMSCRIPTEN
return super().get_default_variation(eh_mode=eh_mode, **kwargs)
class SjLjLibrary(Library):
def __init__(self, **kwargs):
# Whether we use Wasm EH instructions for SjLj support
self.is_wasm = kwargs.pop('is_wasm')
super().__init__(**kwargs)
def get_cflags(self):
cflags = super().get_cflags()
if self.is_wasm:
# DISABLE_EXCEPTION_THROWING=0 is the default, which is for Emscripten
# EH/SjLj, so we should reverse it.
cflags += ['-s', 'SUPPORT_LONGJMP=wasm',
'-s', 'DISABLE_EXCEPTION_THROWING=1',
'-D__USING_WASM_SJLJ__']
else:
cflags += ['-s', 'SUPPORT_LONGJMP=emscripten']
return cflags
def get_base_name(self):
name = super().get_base_name()
# TODO Currently emscripten-based SjLj is the default mode, thus no
# suffixes. Change the default to wasm exception later.
if self.is_wasm:
name += '-wasm-sjlj'
return name
@classmethod
def vary_on(cls):
return super().vary_on() + ['is_wasm']
@classmethod
def get_default_variation(cls, **kwargs):
is_wasm = settings.SUPPORT_LONGJMP == 'wasm'
return super().get_default_variation(is_wasm=is_wasm, **kwargs)
class MuslInternalLibrary(Library):
includes = [
'system/lib/libc/musl/src/internal',
'system/lib/libc/musl/src/include',
]
cflags = [
'-std=c99',
'-D_XOPEN_SOURCE=700',
'-Wno-unused-result', # system call results are often ignored in musl, and in wasi that warns
]
class AsanInstrumentedLibrary(Library):
def __init__(self, **kwargs):
self.is_asan = kwargs.pop('is_asan', False)
super().__init__(**kwargs)
def get_cflags(self):
cflags = super().get_cflags()
if self.is_asan:
cflags += ['-fsanitize=address']
return cflags
def get_base_name(self):
name = super().get_base_name()
if self.is_asan:
name += '-asan'
return name
@classmethod
def vary_on(cls):
return super().vary_on() + ['is_asan']
@classmethod
def get_default_variation(cls, **kwargs):
return super().get_default_variation(is_asan=settings.USE_ASAN, **kwargs)
# Subclass of SjLjLibrary because emscripten_setjmp.c uses SjLj support
class libcompiler_rt(MTLibrary, SjLjLibrary):
name = 'libcompiler_rt'
# compiler_rt files can't currently be part of LTO although we are hoping to remove this
# restriction soon: https://reviews.llvm.org/D71738
force_object_files = True
cflags = ['-O2', '-fno-builtin']
src_dir = 'system/lib/compiler-rt/lib/builtins'
# gcc_personality_v0.c depends on libunwind, which don't include by default.
src_files = glob_in_path(src_dir, '*.c', excludes=['gcc_personality_v0.c'])
src_files += files_in_path(
path='system/lib/compiler-rt',
filenames=[
'stack_ops.S',
'stack_limits.S',
'emscripten_setjmp.c',
'emscripten_exception_builtins.c'
])
class libc(AsanInstrumentedLibrary, MuslInternalLibrary, MTLibrary):
name = 'libc'
# Without -fno-builtin, LLVM can optimize away or convert calls to library
# functions to something else based on assumptions that they behave exactly
# like the standard library. This can cause unexpected bugs when we use our
# custom standard library. The same for other libc/libm builds.
cflags = ['-Os', '-fno-builtin']
# Disable certain warnings for code patterns that are contained in upstream musl
cflags += ['-Wno-ignored-attributes',
'-Wno-dangling-else',
'-Wno-unknown-pragmas',
'-Wno-shift-op-parentheses',
'-Wno-string-plus-int',
'-Wno-pointer-sign']
def get_files(self):
libc_files = []
musl_srcdir = utils.path_from_root('system/lib/libc/musl/src')
# musl modules
ignore = [
'ipc', 'passwd', 'signal', 'sched', 'time', 'linux',
'aio', 'exit', 'legacy', 'mq', 'setjmp', 'env',
'ldso', 'malloc'
]
# individual files
ignore += [
'memcpy.c', 'memset.c', 'memmove.c', 'getaddrinfo.c', 'getnameinfo.c',
'res_query.c', 'res_querydomain.c', 'gai_strerror.c',
'proto.c', 'gethostbyaddr.c', 'gethostbyaddr_r.c', 'gethostbyname.c',
'gethostbyname2_r.c', 'gethostbyname_r.c', 'gethostbyname2.c',
'alarm.c', 'syscall.c', 'popen.c', 'pclose.c',
'getgrouplist.c', 'initgroups.c', 'wordexp.c', 'timer_create.c',
'getentropy.c',
# 'process' exclusion
'fork.c', 'vfork.c', 'posix_spawn.c', 'posix_spawnp.c', 'execve.c', 'waitid.c', 'system.c',
'_Fork.c',
]
ignore += LIBC_SOCKETS
if self.is_mt:
ignore += [
'clone.c', '__lock.c',
'pthread_create.c',
'pthread_kill.c', 'pthread_sigmask.c',
'__set_thread_area.c', 'synccall.c',
'__syscall_cp.c', '__tls_get_addr.c',
'__unmapself.c',
# Empty files, simply ignore them.
'syscall_cp.c', 'tls.c',
# TODO: Comment out (or support) within upcoming musl upgrade. See #12216.
'pthread_setname_np.c',
# TODO: No longer exists in the latest musl version.
'__futex.c',
# 'pthread_setattr_default_np.c',
# TODO: These could be moved away from JS in the upcoming musl upgrade.
'pthread_cancel.c',
'pthread_join.c', 'pthread_testcancel.c',
]
libc_files += files_in_path(
path='system/lib/pthread',
filenames=[
'library_pthread.c',
'pthread_create.c',
'pthread_join.c',
'pthread_testcancel.c',
'emscripten_proxy_main.c',
'emscripten_thread_state.S',
])
else:
ignore += ['thread']
libc_files += files_in_path(
path='system/lib/libc/musl/src/thread',
filenames=[
'pthread_self.c',
'pthread_cleanup_push.c',
'pthread_attr_get.c',
# C11 thread library functions
'call_once.c',
'tss_create.c',
'tss_delete.c',
'tss_set.c',
'cnd_broadcast.c',
'cnd_destroy.c',
'cnd_init.c',
'cnd_signal.c',
'cnd_timedwait.c',
'cnd_wait.c',
'mtx_destroy.c',
'mtx_init.c',
'mtx_lock.c',
'mtx_timedlock.c',
'mtx_trylock.c',
'mtx_unlock.c',
'thrd_create.c',
'thrd_exit.c',
'thrd_join.c',
'thrd_sleep.c',
'thrd_yield.c',
])
libc_files += files_in_path(
path='system/lib/pthread',
filenames=[
'library_pthread_stub.c',
'pthread_self_stub.c'
])
# These are included in wasm_libc_rt instead
ignore += [os.path.basename(f) for f in get_wasm_libc_rt_files()]
ignore = set(ignore)
# TODO: consider using more math code from musl, doing so makes box2d faster
for dirpath, dirnames, filenames in os.walk(musl_srcdir):
# Don't recurse into ingored directories
remove = [d for d in dirnames if d in ignore]
for r in remove:
dirnames.remove(r)
for f in filenames:
if f.endswith('.c') and f not in ignore:
libc_files.append(os.path.join(musl_srcdir, dirpath, f))
# Allowed files from ignored modules
libc_files += files_in_path(
path='system/lib/libc/musl/src/time',
filenames=[
'clock_settime.c',
'asctime_r.c',
'asctime.c',
'ctime.c',
'gmtime.c',
'localtime.c',
'nanosleep.c',
'clock_nanosleep.c',
])
libc_files += files_in_path(
path='system/lib/libc/musl/src/legacy',
filenames=['getpagesize.c', 'err.c'])
libc_files += files_in_path(
path='system/lib/libc/musl/src/linux',
filenames=['getdents.c'])
libc_files += files_in_path(
path='system/lib/libc/musl/src/env',
filenames=['__environ.c', 'getenv.c', 'putenv.c', 'setenv.c', 'unsetenv.c'])
libc_files += files_in_path(
path='system/lib/libc/musl/src/sched',
filenames=['sched_yield.c'])
libc_files += files_in_path(
path='system/lib/libc/musl/src/exit',
filenames=['_Exit.c'])
libc_files += files_in_path(
path='system/lib/libc/musl/src/ldso',
filenames=['dlerror.c', 'dlsym.c', 'dlclose.c'])
libc_files += files_in_path(
path='system/lib/libc/musl/src/linux',
filenames=['gettid.c'])
libc_files += files_in_path(
path='system/lib/libc/musl/src/signal',
filenames=[
'block.c',
'getitimer.c',
'killpg.c',
'setitimer.c',
'sigorset.c',
'sigandset.c',
'sigaddset.c',
'sigdelset.c',
'sigemptyset.c',
'sigfillset.c',
'sigismember.c',
'siginterrupt.c',
'signal.c',
'sigprocmask.c',
'sigrtmax.c',
'sigrtmin.c',
'sigwait.c',
'sigwaitinfo.c',
])
libc_files += files_in_path(
path='system/lib/libc',
filenames=[
'dynlink.c',
'extras.c',
'wasi-helpers.c',
'emscripten_get_heap_size.c',
'raise.c',
'kill.c',
'sigaction.c',
'sigtimedwait.c',
'pthread_sigmask.c',
'emscripten_console.c',
])
libc_files += files_in_path(
path='system/lib/pthread',
filenames=['emscripten_atomic.c'])
libc_files += glob_in_path('system/lib/libc/compat', '*.c')
return libc_files
class libprintf_long_double(libc):
name = 'libprintf_long_double'
cflags = ['-DEMSCRIPTEN_PRINTF_LONG_DOUBLE']
def get_files(self):
return files_in_path(
path='system/lib/libc/musl/src/stdio',
filenames=['vfprintf.c'])
def can_use(self):
return super(libprintf_long_double, self).can_use() and settings.PRINTF_LONG_DOUBLE
class libsockets(MuslInternalLibrary, MTLibrary):
name = 'libsockets'
cflags = ['-Os', '-fno-builtin', '-Wno-shift-op-parentheses']
def get_files(self):
return files_in_path(
path='system/lib/libc/musl/src/network',
filenames=LIBC_SOCKETS)
def can_use(self):
return super(libsockets, self).can_use() and not settings.PROXY_POSIX_SOCKETS
class libsockets_proxy(MTLibrary):
name = 'libsockets_proxy'
cflags = ['-Os']
def get_files(self):
return [utils.path_from_root('system/lib/websocket/websocket_to_posix_socket.cpp')]
def can_use(self):
return super(libsockets_proxy, self).can_use() and settings.PROXY_POSIX_SOCKETS
class crt1(MuslInternalLibrary):
name = 'crt1'
cflags = ['-O2']
src_dir = 'system/lib/libc'
src_files = ['crt1.c']
force_object_files = True
def get_ext(self):
return '.o'
def can_use(self):
return super().can_use() and settings.STANDALONE_WASM
class crt1_reactor(MuslInternalLibrary):
name = 'crt1_reactor'
cflags = ['-O2']
src_dir = 'system/lib/libc'
src_files = ['crt1_reactor.c']
force_object_files = True
def get_ext(self):
return '.o'
def can_use(self):
return super().can_use() and settings.STANDALONE_WASM
class crtbegin(Library):
name = 'crtbegin'
cflags = ['-O2', '-s', 'USE_PTHREADS']
src_dir = 'system/lib/pthread'
src_files = ['emscripten_tls_init.c']
force_object_files = True
def get_ext(self):
return '.o'
def can_use(self):
return super().can_use() and settings.USE_PTHREADS
class libcxxabi(NoExceptLibrary, MTLibrary):
name = 'libc++abi'
cflags = [
'-Oz',
'-D_LIBCXXABI_BUILDING_LIBRARY',
'-DLIBCXXABI_NON_DEMANGLING_TERMINATE',
]
def get_cflags(self):
cflags = super().get_cflags()
cflags.append('-DNDEBUG')
if not self.is_mt:
cflags.append('-D_LIBCXXABI_HAS_NO_THREADS')
if self.eh_mode == Exceptions.NONE:
cflags.append('-D_LIBCXXABI_NO_EXCEPTIONS')
elif self.eh_mode == Exceptions.EMSCRIPTEN:
cflags.append('-D__USING_EMSCRIPTEN_EXCEPTIONS__')
# The code used to interpret exceptions during terminate
# is not compatible with emscripten exceptions.
cflags.append('-DLIBCXXABI_SILENT_TERMINATE')
elif self.eh_mode == Exceptions.WASM:
cflags.append('-D__USING_WASM_EXCEPTIONS__')
return cflags
def get_files(self):
filenames = [
'abort_message.cpp',
'cxa_aux_runtime.cpp',
'cxa_default_handlers.cpp',
'cxa_demangle.cpp',
'cxa_guard.cpp',
'cxa_handlers.cpp',
'cxa_virtual.cpp',
'cxa_thread_atexit.cpp',
'fallback_malloc.cpp',
'stdlib_new_delete.cpp',
'stdlib_exception.cpp',
'stdlib_stdexcept.cpp',
'stdlib_typeinfo.cpp',
'private_typeinfo.cpp'
]
if self.eh_mode == Exceptions.NONE:
filenames += ['cxa_noexception.cpp']
elif self.eh_mode == Exceptions.WASM:
filenames += [
'cxa_exception_storage.cpp',
'cxa_exception.cpp',
'cxa_personality.cpp'
]
return files_in_path(
path='system/lib/libcxxabi/src',
filenames=filenames)
class libcxx(NoExceptLibrary, MTLibrary):
name = 'libc++'
cflags = ['-DLIBCXX_BUILDING_LIBCXXABI=1', '-D_LIBCPP_BUILDING_LIBRARY', '-Oz',
'-D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS']
src_dir = 'system/lib/libcxx/src'
src_glob = '**/*.cpp'
src_glob_exclude = ['locale_win32.cpp', 'thread_win32.cpp', 'support.cpp', 'int128_builtins.cpp']
class libunwind(NoExceptLibrary, MTLibrary):
name = 'libunwind'
# Because calls to _Unwind_CallPersonality are generated during LTO, libunwind
# can't currently be part of LTO.
# See https://bugs.llvm.org/show_bug.cgi?id=44353
force_object_files = True
cflags = ['-Oz', '-D_LIBUNWIND_DISABLE_VISIBILITY_ANNOTATIONS']
src_dir = 'system/lib/libunwind/src'
# Without this we can't build libunwind since it will pickup the unwind.h
# that is part of llvm (which is not compatible for some reason).
includes = ['system/lib/libunwind/include']
src_files = ['Unwind-wasm.c']
def __init__(self, **kwargs):
super().__init__(**kwargs)
def can_use(self):
return super().can_use() and self.eh_mode == Exceptions.WASM
def get_cflags(self):
cflags = super().get_cflags()
cflags.append('-DNDEBUG')
if not self.is_mt:
cflags.append('-D_LIBUNWIND_HAS_NO_THREADS')
if self.eh_mode == Exceptions.NONE:
cflags.append('-D_LIBUNWIND_HAS_NO_EXCEPTIONS')
elif self.eh_mode == Exceptions.EMSCRIPTEN:
cflags.append('-D__USING_EMSCRIPTEN_EXCEPTIONS__')
elif self.eh_mode == Exceptions.WASM:
cflags.append('-D__USING_WASM_EXCEPTIONS__')
return cflags
class libmalloc(MTLibrary):
name = 'libmalloc'
cflags = ['-O2', '-fno-builtin']
def __init__(self, **kwargs):
self.malloc = kwargs.pop('malloc')
if self.malloc not in ('dlmalloc', 'emmalloc', 'emmalloc-debug', 'emmalloc-memvalidate', 'emmalloc-verbose', 'emmalloc-memvalidate-verbose', 'none'):
raise Exception('malloc must be one of "emmalloc[-debug|-memvalidate][-verbose]", "dlmalloc" or "none", see settings.js')
self.use_errno = kwargs.pop('use_errno')
self.is_tracing = kwargs.pop('is_tracing')
self.memvalidate = kwargs.pop('memvalidate')
self.verbose = kwargs.pop('verbose')
self.is_debug = kwargs.pop('is_debug') or self.memvalidate or self.verbose
super().__init__(**kwargs)
def get_files(self):
malloc_base = self.malloc.replace('-memvalidate', '').replace('-verbose', '').replace('-debug', '')
malloc = utils.path_from_root('system/lib', {
'dlmalloc': 'dlmalloc.c', 'emmalloc': 'emmalloc.c',
}[malloc_base])
sbrk = utils.path_from_root('system/lib/sbrk.c')
return [malloc, sbrk]
def get_cflags(self):
cflags = super().get_cflags()
if self.memvalidate:
cflags += ['-DEMMALLOC_MEMVALIDATE']
if self.verbose:
cflags += ['-DEMMALLOC_VERBOSE']
if self.is_debug:
cflags += ['-UNDEBUG', '-DDLMALLOC_DEBUG']
else:
cflags += ['-DNDEBUG']
if not self.use_errno:
cflags += ['-DMALLOC_FAILURE_ACTION=', '-DEMSCRIPTEN_NO_ERRNO']
if self.is_tracing:
cflags += ['--tracing']
return cflags
def get_base_name_prefix(self):
return 'lib%s' % self.malloc
def get_base_name(self):
name = super().get_base_name()
if self.is_debug and not self.memvalidate and not self.verbose:
name += '-debug'
if not self.use_errno:
# emmalloc doesn't actually use errno, but it's easier to build it again
name += '-noerrno'
if self.is_tracing:
name += '-tracing'
return name
def can_use(self):
return super().can_use() and settings.MALLOC != 'none'
@classmethod
def vary_on(cls):
return super().vary_on() + ['is_debug', 'use_errno', 'is_tracing', 'memvalidate', 'verbose']
@classmethod
def get_default_variation(cls, **kwargs):
return super().get_default_variation(
malloc=settings.MALLOC,
is_debug=settings.ASSERTIONS >= 2,
use_errno=settings.SUPPORT_ERRNO,
is_tracing=settings.EMSCRIPTEN_TRACING,
memvalidate='memvalidate' in settings.MALLOC,
verbose='verbose' in settings.MALLOC,
**kwargs
)
@classmethod
def variations(cls):
combos = super().variations()
return ([dict(malloc='dlmalloc', **combo) for combo in combos if not combo['memvalidate'] and not combo['verbose']] +
[dict(malloc='emmalloc', **combo) for combo in combos if not combo['memvalidate'] and not combo['verbose']] +
[dict(malloc='emmalloc-memvalidate-verbose', **combo) for combo in combos if combo['memvalidate'] and combo['verbose']] +
[dict(malloc='emmalloc-memvalidate', **combo) for combo in combos if combo['memvalidate'] and not combo['verbose']] +
[dict(malloc='emmalloc-verbose', **combo) for combo in combos if combo['verbose'] and not combo['memvalidate']])
class libal(Library):
name = 'libal'
cflags = ['-Os']
src_dir = 'system/lib'
src_files = ['al.c']
class libGL(MTLibrary):
name = 'libGL'
src_dir = 'system/lib/gl'
src_files = ['gl.c', 'webgl1.c', 'libprocaddr.c']
cflags = ['-Oz']
def __init__(self, **kwargs):
self.is_legacy = kwargs.pop('is_legacy')
self.is_webgl2 = kwargs.pop('is_webgl2')
self.is_ofb = kwargs.pop('is_ofb')
self.is_full_es3 = kwargs.pop('is_full_es3')
if self.is_webgl2 or self.is_full_es3:
# Don't use append or += here, otherwise we end up adding to
# the class member.
self.src_files = self.src_files + ['webgl2.c']
super().__init__(**kwargs)
def get_base_name(self):
name = super().get_base_name()
if self.is_legacy:
name += '-emu'
if self.is_webgl2:
name += '-webgl2'
if self.is_ofb:
name += '-ofb'
if self.is_full_es3:
name += '-full_es3'
return name
def get_cflags(self):
cflags = super().get_cflags()
if self.is_legacy:
cflags += ['-DLEGACY_GL_EMULATION=1']
if self.is_webgl2:
cflags += ['-DMAX_WEBGL_VERSION=2']
if self.is_ofb:
cflags += ['-D__EMSCRIPTEN_OFFSCREEN_FRAMEBUFFER__']
if self.is_full_es3:
cflags += ['-D__EMSCRIPTEN_FULL_ES3__']
return cflags
@classmethod
def vary_on(cls):
return super().vary_on() + ['is_legacy', 'is_webgl2', 'is_ofb', 'is_full_es3']
@classmethod
def get_default_variation(cls, **kwargs):
return super().get_default_variation(
is_legacy=settings.LEGACY_GL_EMULATION,
is_webgl2=settings.MAX_WEBGL_VERSION >= 2,
is_ofb=settings.OFFSCREEN_FRAMEBUFFER,
is_full_es3=settings.FULL_ES3,
**kwargs
)
class libwebgpu_cpp(MTLibrary):
name = 'libwebgpu_cpp'
cflags = ['-std=c++11', '-O2']
src_dir = 'system/lib/webgpu'
src_files = ['webgpu_cpp.cpp']
class libembind(Library):
name = 'libembind'
never_force = True
def __init__(self, **kwargs):
self.with_rtti = kwargs.pop('with_rtti', False)
super().__init__(**kwargs)
def get_cflags(self):
cflags = super().get_cflags()
if not self.with_rtti:
cflags += ['-fno-rtti', '-DEMSCRIPTEN_HAS_UNBOUND_TYPE_NAMES=0']
return cflags
@classmethod
def vary_on(cls):
return super().vary_on() + ['with_rtti']
def get_base_name(self):
name = super().get_base_name()
if self.with_rtti:
name += '-rtti'
return name
def get_files(self):
return [utils.path_from_root('system/lib/embind/bind.cpp')]
@classmethod
def get_default_variation(cls, **kwargs):
return super().get_default_variation(with_rtti=settings.USE_RTTI, **kwargs)
class libfetch(MTLibrary):
name = 'libfetch'
never_force = True
def get_files(self):
return [utils.path_from_root('system/lib/fetch/emscripten_fetch.cpp')]
class libstb_image(Library):
name = 'libstb_image'
never_force = True
includes = ['third_party']
def get_files(self):
return [utils.path_from_root('system/lib/stb_image.c')]
class libasmfs(MTLibrary):
name = 'libasmfs'
never_force = True
def get_files(self):
return [utils.path_from_root('system/lib/fetch/asmfs.cpp')]
def can_build(self):
# ASMFS is looking for a maintainer
# https://github.com/emscripten-core/emscripten/issues/9534
return True
class libwasmfs(MTLibrary):
name = 'libwasmfs'
cflags = ['-fno-exceptions', '-std=c++17']
def get_files(self):
return files_in_path(
path='system/lib/wasmfs',
filenames=['syscalls.cpp', 'file_table.cpp', 'file.cpp', 'wasmfs.cpp', 'streams.cpp'])
def can_build(self):
return settings.WASMFS
class libhtml5(Library):
name = 'libhtml5'
cflags = ['-Oz']
src_dir = 'system/lib/html5'
src_glob = '*.c'
class CompilerRTLibrary(Library):
cflags = ['-O2', '-fno-builtin']
# compiler_rt files can't currently be part of LTO although we are hoping to remove this
# restriction soon: https://reviews.llvm.org/D71738
force_object_files = True
class libc_rt_wasm(OptimizedAggressivelyForSizeLibrary, AsanInstrumentedLibrary, CompilerRTLibrary, MuslInternalLibrary, MTLibrary):
name = 'libc_rt_wasm'
def get_files(self):
return get_wasm_libc_rt_files()
class libubsan_minimal_rt_wasm(CompilerRTLibrary, MTLibrary):
name = 'libubsan_minimal_rt_wasm'
never_force = True
includes = ['system/lib/compiler-rt/lib']
src_dir = 'system/lib/compiler-rt/lib/ubsan_minimal'
src_files = ['ubsan_minimal_handlers.cpp']
class libsanitizer_common_rt(CompilerRTLibrary, MTLibrary):
name = 'libsanitizer_common_rt'
# TODO(sbc): We should not need musl-internal headers here.
includes = ['system/lib/libc/musl/src/internal',
'system/lib/compiler-rt/lib']
never_force = True
src_dir = 'system/lib/compiler-rt/lib/sanitizer_common'
src_glob = '*.cpp'
src_glob_exclude = ['sanitizer_common_nolibc.cpp']
class SanitizerLibrary(CompilerRTLibrary, MTLibrary):
never_force = True
includes = ['system/lib/compiler-rt/lib']
src_glob = '*.cpp'
class libubsan_rt(SanitizerLibrary):
name = 'libubsan_rt'
cflags = ['-DUBSAN_CAN_USE_CXXABI']
src_dir = 'system/lib/compiler-rt/lib/ubsan'
class liblsan_common_rt(SanitizerLibrary):
name = 'liblsan_common_rt'
src_dir = 'system/lib/compiler-rt/lib/lsan'
src_glob = 'lsan_common*.cpp'
class liblsan_rt(SanitizerLibrary):
name = 'liblsan_rt'
src_dir = 'system/lib/compiler-rt/lib/lsan'
src_glob_exclude = ['lsan_common.cpp', 'lsan_common_mac.cpp', 'lsan_common_linux.cpp',
'lsan_common_emscripten.cpp']
class libasan_rt(SanitizerLibrary):
name = 'libasan_rt'
src_dir = 'system/lib/compiler-rt/lib/asan'
class libasan_js(Library):
name = 'libasan_js'
never_force = True
cflags = ['-fsanitize=address']
src_dir = 'system/lib'
src_files = ['asan_js.c']
# This library is used when STANDALONE_WASM is set. In that mode, we don't
# want to depend on JS, and so this library contains implementations of
# things that we'd normally do in JS. That includes some general things
# as well as some additional musl components (that normally we reimplement
# in JS as it's more efficient that way).
class libstandalonewasm(MuslInternalLibrary):
name = 'libstandalonewasm'
# LTO defeats the weak linking trick used in __original_main.c
force_object_files = True
cflags = ['-Os', '-fno-builtin']
src_dir = 'system/lib'
def __init__(self, **kwargs):
self.is_mem_grow = kwargs.pop('is_mem_grow')
super().__init__(**kwargs)
def get_base_name(self):
name = super().get_base_name()
if self.is_mem_grow:
name += '-memgrow'
return name
def get_cflags(self):
cflags = super().get_cflags()
cflags += ['-DNDEBUG', '-DEMSCRIPTEN_STANDALONE_WASM']
if self.is_mem_grow:
cflags += ['-DEMSCRIPTEN_MEMORY_GROWTH']
return cflags
@classmethod
def vary_on(cls):
return super().vary_on() + ['is_mem_grow']
@classmethod
def get_default_variation(cls, **kwargs):
return super().get_default_variation(
is_mem_grow=settings.ALLOW_MEMORY_GROWTH,
**kwargs
)
def get_files(self):
files = files_in_path(
path='system/lib/standalone',
filenames=['standalone.c', 'standalone_wasm_stdio.c', '__original_main.c',
'__main_void.c', '__main_argc_argv.c'])
files += files_in_path(
path='system/lib/libc',
filenames=['emscripten_memcpy.c'])
# It is more efficient to use JS methods for time, normally.
files += files_in_path(
path='system/lib/libc/musl/src/time',
filenames=['strftime.c',
'__month_to_secs.c',
'__secs_to_tm.c',
'__tm_to_secs.c',
'__tz.c',
'__year_to_secs.c',
'clock.c',
'clock_gettime.c',
'ctime_r.c',
'difftime.c',
'gettimeofday.c',
'gmtime_r.c',
'localtime_r.c',
'mktime.c',
'time.c'])
# It is more efficient to use JS for __assert_fail, as it avoids always
# including fprintf etc.
files += files_in_path(
path='system/lib/libc/musl/src/exit',
filenames=['assert.c', 'atexit.c', 'exit.c'])
return files
def can_use(self):
return super(libstandalonewasm, self).can_use() and settings.STANDALONE_WASM
class libjsmath(Library):
name = 'libjsmath'
cflags = ['-Os']
src_dir = 'system/lib'
src_files = ['jsmath.c']
def can_use(self):
return super(libjsmath, self).can_use() and settings.JS_MATH
class libstubs(Library):
name = 'libstubs'
cflags = ['-O2']
src_dir = 'system/lib/libc'
src_files = ['emscripten_syscall_stubs.c', 'emscripten_libc_stubs.c']
def __init__(self, **kwargs):
self.is_debug = kwargs.pop('is_debug')
super().__init__(**kwargs)
def get_base_name(self):
name = super().get_base_name()
if self.is_debug:
name += '-debug'
return name
def get_cflags(self):
cflags = super().get_cflags()
if self.is_debug:
cflags += ['-UNDEBUG']
else:
cflags += ['-DNDEBUG']
return cflags
@classmethod
def vary_on(cls):
return super().vary_on() + ['is_debug']
@classmethod
def get_default_variation(cls, **kwargs):
return super().get_default_variation(is_debug=settings.ASSERTIONS, **kwargs)
# If main() is not in EXPORTED_FUNCTIONS, it may be dce'd out. This can be
# confusing, so issue a warning.
def warn_on_unexported_main(symbolses):
# In STANDALONE_WASM we don't expect main to be explictly exported
if settings.STANDALONE_WASM:
return
if '_main' not in settings.EXPORTED_FUNCTIONS:
for symbols in symbolses:
if 'main' in symbols['defs']:
logger.warning('main() is in the input files, but "_main" is not in EXPORTED_FUNCTIONS, which means it may be eliminated as dead code. Export it if you want main() to run.')
return
def handle_reverse_deps(input_files):
if settings.REVERSE_DEPS == 'none':
return
elif settings.REVERSE_DEPS == 'all':
# When not optimzing we add all possible reverse dependencies rather
# than scanning the input files
for symbols in deps_info.get_deps_info().values():
for symbol in symbols:
settings.EXPORTED_FUNCTIONS.append(mangle_c_symbol_name(symbol))
return
if settings.REVERSE_DEPS != 'auto':
shared.exit_with_error(f'invalid values for REVERSE_DEPS: {settings.REVERSE_DEPS}')
added = set()
def add_reverse_deps(need):
more = False
for ident, deps in deps_info.get_deps_info().items():
if ident in need['undefs'] and ident not in added:
added.add(ident)
more = True
for dep in deps:
need['undefs'].add(dep)
logger.debug('adding dependency on %s due to deps-info on %s' % (dep, ident))
settings.EXPORTED_FUNCTIONS.append(mangle_c_symbol_name(dep))
if more:
add_reverse_deps(need) # recurse to get deps of deps
# Scan symbols
symbolses = building.llvm_nm_multiple([os.path.abspath(t) for t in input_files])
warn_on_unexported_main(symbolses)
if len(symbolses) == 0:
symbolses.append({'defs': set(), 'undefs': set()})
# depend on exported functions
for export in settings.EXPORTED_FUNCTIONS:
if settings.VERBOSE:
logger.debug('adding dependency on export %s' % export)
symbolses[0]['undefs'].add(demangle_c_symbol_name(export))
for symbols in symbolses:
add_reverse_deps(symbols)
def calculate(input_files, forced):
# Setting this will only use the forced libs in EMCC_FORCE_STDLIBS. This avoids spending time checking
# for unresolved symbols in your project files, which can speed up linking, but if you do not have
# the proper list of actually needed libraries, errors can occur. See below for how we must
# export all the symbols in deps_info when using this option.
only_forced = os.environ.get('EMCC_ONLY_FORCED_STDLIBS')
if only_forced:
# One of the purposes EMCC_ONLY_FORCED_STDLIBS was to skip the scanning
# of the input files for reverse dependencies.
diagnostics.warning('deprecated', 'EMCC_ONLY_FORCED_STDLIBS is deprecated. Use `-nostdlib` and/or `-s REVERSE_DEPS=none` depending on the desired result')
settings.REVERSE_DEPS = 'all'
handle_reverse_deps(input_files)
libs_to_link = []
already_included = set()
system_libs_map = Library.get_usable_variations()
# Setting this in the environment will avoid checking dependencies and make
# building big projects a little faster 1 means include everything; otherwise
# it can be the name of a lib (libc++, etc.).
# You can provide 1 to include everything, or a comma-separated list with the
# ones you want
force = os.environ.get('EMCC_FORCE_STDLIBS')
if force == '1':
force = ','.join(name for name, lib in system_libs_map.items() if not lib.never_force)
force_include = set((force.split(',') if force else []) + forced)
if force_include:
logger.debug(f'forcing stdlibs: {force_include}')
def add_library(libname):
lib = system_libs_map[libname]
if lib.name in already_included:
return
already_included.add(lib.name)
logger.debug('including %s (%s)' % (lib.name, lib.get_filename()))
need_whole_archive = lib.name in force_include and lib.get_ext() == '.a'
libs_to_link.append((lib.get_link_flag(), need_whole_archive))
if settings.USE_PTHREADS:
add_library('crtbegin')
if settings.SIDE_MODULE:
return [l[0] for l in libs_to_link]
if settings.STANDALONE_WASM:
if settings.EXPECT_MAIN:
add_library('crt1')
else:
add_library('crt1_reactor')
for forced in force_include:
if forced not in system_libs_map:
shared.exit_with_error('invalid forced library: %s', forced)
add_library(forced)
if only_forced:
add_library('libc_rt_wasm')
add_library('libcompiler_rt')
else:
if settings.AUTO_NATIVE_LIBRARIES:
add_library('libGL')
add_library('libal')
add_library('libhtml5')
sanitize = settings.USE_LSAN or settings.USE_ASAN or settings.UBSAN_RUNTIME
# JS math must come before anything else, so that it overrides the normal
# libc math.
if settings.JS_MATH:
add_library('libjsmath')
# to override the normal libc printf, we must come before it
if settings.PRINTF_LONG_DOUBLE:
add_library('libprintf_long_double')
if settings.ALLOW_UNIMPLEMENTED_SYSCALLS:
add_library('libstubs')
add_library('libc')
add_library('libcompiler_rt')
if settings.LINK_AS_CXX:
add_library('libc++')
if settings.LINK_AS_CXX or sanitize:
add_library('libc++abi')
if settings.EXCEPTION_HANDLING:
add_library('libunwind')
if settings.MALLOC != 'none':
add_library('libmalloc')
if settings.STANDALONE_WASM:
add_library('libstandalonewasm')
add_library('libc_rt_wasm')
if settings.USE_LSAN:
force_include.add('liblsan_rt')
add_library('liblsan_rt')
if settings.USE_ASAN:
force_include.add('libasan_rt')
add_library('libasan_rt')
add_library('libasan_js')
if settings.UBSAN_RUNTIME == 1:
add_library('libubsan_minimal_rt_wasm')
elif settings.UBSAN_RUNTIME == 2:
add_library('libubsan_rt')
if settings.USE_LSAN or settings.USE_ASAN:
add_library('liblsan_common_rt')
if sanitize:
add_library('libsanitizer_common_rt')
# the sanitizer runtimes may call mmap, which will need a few things. sadly
# the usual deps_info mechanism does not work since we scan only user files
# for things, and not libraries (to be able to scan libraries, we'd need to
# somehow figure out which of their object files will actually be linked in -
# but only lld knows that). so just directly handle that here.
if sanitize:
settings.EXPORTED_FUNCTIONS.append(mangle_c_symbol_name('memset'))
if settings.PROXY_POSIX_SOCKETS:
add_library('libsockets_proxy')
else:
add_library('libsockets')
if settings.USE_WEBGPU:
add_library('libwebgpu_cpp')
# When LINKABLE is set the entire link command line is wrapped in --whole-archive by
# building.link_ldd. And since --whole-archive/--no-whole-archive processing does not nest we
# shouldn't add any extra `--no-whole-archive` or we will undo the intent of building.link_ldd.
if settings.LINKABLE:
return [l[0] for l in libs_to_link]
# Wrap libraries in --whole-archive, as needed. We need to do this last
# since otherwise the abort sorting won't make sense.
ret = []
in_group = False
for name, need_whole_archive in libs_to_link:
if need_whole_archive and not in_group:
ret.append('--whole-archive')
in_group = True
if in_group and not need_whole_archive:
ret.append('--no-whole-archive')
in_group = False
ret.append(name)
if in_group:
ret.append('--no-whole-archive')
return ret
# Once we require python 3.8 we can use shutil.copytree with
# dirs_exist_ok=True and remove this function.
def copytree_exist_ok(src, dst):
os.makedirs(dst, exist_ok=True)
for entry in os.scandir(src):
srcname = os.path.join(src, entry.name)
dstname = os.path.join(dst, entry.name)
if entry.is_dir():
copytree_exist_ok(srcname, dstname)
else:
shared.safe_copy(srcname, dstname)
def install_system_headers(stamp):
install_dirs = {
('include',): '',
('lib', 'compiler-rt', 'include'): '',
('lib', 'libunwind', 'include'): '',
# Copy the generic arch files first then
('lib', 'libc', 'musl', 'arch', 'generic'): '',
# Then overlay the emscripten directory on top.
# This mimicks how musl itself installs its headers.
('lib', 'libc', 'musl', 'arch', 'emscripten'): '',
('lib', 'libc', 'musl', 'include'): '',
('lib', 'libcxx', 'include'): os.path.join('c++', 'v1'),
('lib', 'libcxxabi', 'include'): os.path.join('c++', 'v1'),
}
target_include_dir = shared.Cache.get_include_dir()
for src, dest in install_dirs.items():
src = utils.path_from_root('system', *src)
dest = os.path.join(target_include_dir, dest)
copytree_exist_ok(src, dest)
pkgconfig_src = utils.path_from_root('system/lib/pkgconfig')
pkgconfig_dest = shared.Cache.get_sysroot_dir('lib', 'pkgconfig')
copytree_exist_ok(pkgconfig_src, pkgconfig_dest)
bin_src = utils.path_from_root('system/bin')
bin_dest = shared.Cache.get_sysroot_dir('bin')
copytree_exist_ok(bin_src, bin_dest)
# Create a stamp file that signal the the header have been installed
# Removing this file, or running `emcc --clear-cache` or running
# `./embuilder build sysroot --force` will cause the re-installation of
# the system headers.
utils.write_file(stamp, 'x')
return stamp
@ToolchainProfiler.profile_block('ensure_sysroot')
def ensure_sysroot():
shared.Cache.get('sysroot_install.stamp', install_system_headers, what='system headers')
| 32.473396 | 181 | 0.673795 |
4f1c0b73f3eca838e4d79640dc558931a4eba9f9 | 24,094 | py | Python | lsdr/envs/environment_sampler.py | melfm/lsdr | 36b0a85e970fdcaae828eeff6c147432aa767c93 | [
"MIT"
] | 3 | 2019-09-20T19:10:50.000Z | 2021-12-30T02:55:21.000Z | lsdr/envs/environment_sampler.py | melfm/lsdr | 36b0a85e970fdcaae828eeff6c147432aa767c93 | [
"MIT"
] | null | null | null | lsdr/envs/environment_sampler.py | melfm/lsdr | 36b0a85e970fdcaae828eeff6c147432aa767c93 | [
"MIT"
] | 1 | 2020-08-01T21:28:12.000Z | 2020-08-01T21:28:12.000Z | import numpy as np
import torch
from functools import partial
from lsdr.envs.classic_control import cartpole, mountain_car
from lsdr.envs.box2d import lunar_lander
from lsdr.envs.mujoco import (hopper, cartpole as mj_cartpole, half_cheetah,
humanoid)
available_envs = [
'cartpole', 'lunar-lander', 'mountain-car',
'cartpole-swingup', 'cartpole-swingup-pl', 'cartpole-swingup-pm',
'cartpole-swingup-cm', 'cartpole-swingup-cd', 'cartpole-swingup-pole-only',
'humanoid', 'hopper', 'hopper-torso-only', 'hopper-density-only',
'hopper-damping-only', 'hopper-friction-only', 'half-cheetah',
'half-cheetah-torso', 'half-cheetah-density', 'half-cheetah-friction',
'half-cheetah-damping'
]
class Delta(torch.distributions.Uniform):
def __init__(self, value, validate_args=None):
return super().__init__(value, value, validate_args=validate_args)
class Discrete(torch.distributions.Categorical):
def __init__(self, cells_per_dim, ranges, params, validate_args=None):
"""Implements context distribution with a discrete (categorical
distribution). To enable learning the distribution parameters,
we override the @lazy_property attributes of the Categorical
super class.
"""
# parameters defining the support of this distribution
self.lo = ranges[:, 0]
self.hi = ranges[:, 1]
self.widths = self.hi - self.lo
# parameters defining the cells per context dimension
self.ncells = cells_per_dim
if self.ncells.numel() == 1:
self.ncells = self.ncells.expand_as(self.lo)
# Helper variable for converting multidimensional coordinates to
# linear indices, since the underlying categorical distribution is 1D
self.cumprods = torch.cat(
[torch.tensor([1]),
self.ncells.cumprod(-1)[:-1].long()], -1)
self._logits = params[0]
super().__init__(logits=params[0], validate_args=validate_args)
@property
def logits(self):
# ensure the logits correspond to a valid discrete distribution
# (probs sum to one)
return self._logits - self._logits.logsumexp(dim=-1, keepdim=True)
@logits.setter
def logits(self, x):
# only copy the data, don't replace self._logits (since we want to
# update them with an optimizer)
self._logits.data = x.data
@property
def probs(self):
# use the current logits to compute the probs
return torch.distributions.utils.logits_to_probs(self.logits)
def to_category(self, values):
# transform incoming values into cell coordinates
coords = (self.ncells.float() * (values - self.lo) /
self.widths).floor().long()
# ensure the coords do not exceed the indices per dimension
coords = torch.min(coords, self.ncells - 1)
# convert coordinates into category indices
idxs = (self.cumprods * coords).sum(-1)
return idxs
def from_category(self, categories):
# convert flat indices to coordinates
coords = torch.tensor(
np.unravel_index(categories,
self.ncells.numpy().tolist(), 'F')).t().float()
# convert coords into samples from each cell
# adding U[0,1] samples to get values other than the
# lower limits of a cell
u = torch.rand_like(coords)
values = (coords + u) * self.widths / self.ncells.float() + self.lo
return values
def sample(self, sample_shape=torch.Size()):
cat_samples = super(Discrete, self).sample(sample_shape)
if len(sample_shape) == 0:
return self.from_category([cat_samples])[0]
return self.from_category(cat_samples)
def log_prob(self, value):
return super(Discrete, self).log_prob(self.to_category(value))
class SafeMultivariateNormal(torch.distributions.MultivariateNormal):
def __init__(self,
loc,
covariance_matrix=None,
precision_matrix=None,
scale_tril_offdiag=None,
log_scale_tril_diag=None,
validate_args=None,
ranges=None):
D = scale_tril_offdiag.size(0)
self.scale_tril_offdiag = scale_tril_offdiag
self.log_scale_tril_diag = log_scale_tril_diag
if ranges is not None:
self.lo = ranges[:, 0]
self.hi = ranges[:, 1]
self.widths = (self.hi - self.lo) / (12**0.5)
self.center = (self.lo + self.hi) / 2.0
L_off = self.scale_tril_offdiag
log_L_diag = self.log_scale_tril_diag
self.scale_tril_offdiag.data = (L_off.t() / self.widths).t()
self.log_scale_tril_diag.data = log_L_diag - torch.log(self.widths)
scale_tril = self.scale_tril_offdiag + torch.eye(D) * torch.exp(
self.log_scale_tril_diag)
super(SafeMultivariateNormal, self).__init__(
loc,
covariance_matrix=covariance_matrix,
precision_matrix=precision_matrix,
scale_tril=scale_tril,
validate_args=validate_args)
if ranges is not None:
self._loc = loc
self._loc.data = (self._loc.detach() - self.center) / self.widths
self.update_scale_tril()
self.scale_tril = self._unbroadcasted_scale_tril
@property
def loc(self):
if hasattr(self, 'widths'):
return self._loc * self.widths + self.center
else:
return self._loc
@loc.setter
def loc(self, loc):
if hasattr(self, 'widths'):
self._loc = (loc - self.center) / self.widths
else:
self._loc = loc
def update_scale_tril(self):
D = self.scale_tril_offdiag.size(0)
mask = torch.ones(D, D).tril(-1)
L = (self.scale_tril_offdiag * mask +
torch.eye(D) * torch.exp(self.log_scale_tril_diag))
if hasattr(self, 'widths'):
L = ((L.t()) * self.widths).t()
self._unbroadcasted_scale_tril = L
self.scale_tril.data = L.data
def sample(self, *args, **kwargs):
self.update_scale_tril()
return super(SafeMultivariateNormal, self).sample(*args, **kwargs)
def log_prob(self, value):
self.update_scale_tril()
return super(SafeMultivariateNormal, self).log_prob(value)
def entropy(self):
self.update_scale_tril()
return super(SafeMultivariateNormal, self).entropy()
class EnvSampler(object):
"""Env Class used for sampling environments with different
simulation parameters, from a distribution specified by
self.dist
"""
def __init__(self,
env_constructor,
dist,
params,
seed,
env_name,
test_dist=None):
assert isinstance(dist, torch.distributions.Distribution)
if test_dist:
assert isinstance(test_dist, torch.distributions.Distribution)
self.train_dist = dist
self.test_dist = test_dist
self.env_constructor = env_constructor
self.params = params
self.seed = seed
torch.manual_seed(seed)
self.env_name = env_name
def parameters(self):
return self.params
def sample(self, n_samples=1, return_ctxs=False, quiet=True,
distr='train'):
if distr == 'train':
ctxs = self.train_dist.sample(torch.Size([n_samples]))
elif distr == 'test':
ctxs = self.test_dist.sample(torch.Size([n_samples]))
else:
raise ValueError('Invalid distribution type.')
envs = []
for ctx in ctxs:
ctx_ = ctx.cpu().numpy()
try:
env = self.env_constructor(*ctx_.tolist())
env.seed(self.seed)
setattr(env, 'context', ctx)
except Exception:
if not quiet:
import traceback
traceback.print_exc()
env = None
envs.append(env)
if len(envs) == 1:
envs = envs[0]
ctxs = ctxs[0]
if return_ctxs:
return envs, ctxs
else:
return envs
def get_observation_space(self, cat_context=False, full_dim=False):
env = None
while env is None:
env = self.sample()
obs_dim = env.observation_space.shape
if cat_context:
ctx = env.context
if full_dim:
obs_dim = list(obs_dim)
obs_dim[0] += len(ctx)
obs_dim = tuple(obs_dim)
return obs_dim
return obs_dim, len(ctx)
else:
return obs_dim
def sample_env(context_sampler, distr='train', env_steps=5):
valid_env = False
# Keep trying until a valid env is found.
while not valid_env:
try:
env = context_sampler.sample(distr=distr)
if env is not None:
env.reset()
for i in range(env_steps):
env.step(env.action_space.sample())
valid_env = True
except Exception:
import traceback
traceback.print_exc()
valid_env = False
return env
def reacher_constructor(z1):
return reacher.Reacher(mass=z1)
def lunar_lander_constructor(z1, z2, z3):
return lunar_lander.LunarLander(
leg_spring_torque=z1, main_engine_power=z2, side_engine_power=z3)
def cartpole_swingup_constructor(z1, z2, z3, z4):
return mj_cartpole.Cartpole(
cart_mass=z1, pole_mass=z2, pole_length=z3, cart_damping=z4)
def cartpole_swingup_pole_only_constructor(z1, z2):
return mj_cartpole.Cartpole(pole_mass=z1, pole_length=z2)
def cartpole_swingup_cart_mass_constructor(z1):
return mj_cartpole.Cartpole(cart_mass=z1)
def cartpole_swingup_pole_mass_constructor(z2):
return mj_cartpole.Cartpole(pole_mass=z2)
def cartpole_swingup_pole_length_constructor(z3):
return mj_cartpole.Cartpole(pole_length=z3)
def cartpole_swingup_cart_damp_constructor(z4):
return mj_cartpole.Cartpole(cart_damping=z4)
def hopper_constructor(z1, z2, z3, experiment_id=None):
return hopper.HopperEnv(
foot_friction=z1,
torso_size=z2,
joint_damping=z3,
experiment_id=experiment_id)
def hopper_torso_only_constructor(z1, experiment_id=None):
return hopper.HopperEnv(torso_size=z1, experiment_id=experiment_id)
def hopper_density_only_constructor(z1, experiment_id=None):
return hopper.HopperEnv(torso_density=z1, experiment_id=experiment_id)
def hopper_damping_only_constructor(z1, experiment_id=None):
return hopper.HopperEnv(joint_damping=z1, experiment_id=experiment_id)
def hopper_friction_only_constructor(z1, experiment_id=None):
return hopper.HopperEnv(foot_friction=z1, experiment_id=experiment_id)
def half_cheetah_constructor(z1, z2, z3, experiment_id=None):
return half_cheetah.HalfCheetahEnv(
friction=z1,
torso_size=z2,
joint_damping=z3,
experiment_id=experiment_id)
def half_cheetah_torso_only_constructor(z1, experiment_id=None):
return half_cheetah.HalfCheetahEnv(
torso_size=z1, experiment_id=experiment_id)
def half_cheetah_density_constructor(z1, experiment_id=None):
return half_cheetah.HalfCheetahEnv(
torso_density=z1, experiment_id=experiment_id)
def half_cheetah_damping_only_constructor(z1, experiment_id=None):
return half_cheetah.HalfCheetahEnv(
joint_damping=z1, experiment_id=experiment_id)
def half_cheetah_friction_constructor(z1, experiment_id=None):
return half_cheetah.HalfCheetahEnv(
friction=z1, experiment_id=experiment_id)
def humanoid_constructor(z1, z2, z3):
return humanoid.HumanoidEnv(wind=[z1, z1], gravity=z2, air_viscosity=z3)
def init_dist(params,
test_dist_params=None,
dist_type='gaussian',
rescale=True):
if dist_type == 'gaussian':
mu, L = params
if isinstance(mu, np.ndarray):
mu = torch.tensor(mu)
if isinstance(L, np.ndarray):
L = torch.tensor(L)
mu = mu.float().detach()
L = L.float().detach()
D = L.size(0)
mask = torch.ones(D, D).tril(-1).byte()
L_off = torch.zeros(D, D).float()
L_off[mask] = L[mask]
log_L_diag = torch.log(L.diag())
mu.requires_grad_(True)
L_off.requires_grad_(True)
log_L_diag.requires_grad_(True)
# initialize distribution
test_dist = None
rngs = None
if test_dist_params is not None:
# We have the case of guassian train distr and uniform test
# at least for now assume that is the case (because it could
# also be just gaussian ...
test_ranges, test_ncells = \
test_dist_params[:-1], test_dist_params[-1]
test_ranges_tensor = torch.tensor(
test_ranges).float().detach().squeeze(0)
test_ncells_tensor = torch.tensor(test_ncells).long()
test_params = create_discrete_distr_params(test_ranges_tensor,
test_ncells_tensor)
test_dist = Discrete(
test_ncells_tensor, test_ranges_tensor, params=test_params)
if rescale:
rngs = test_ranges_tensor
dist = SafeMultivariateNormal(
mu,
scale_tril_offdiag=L_off,
log_scale_tril_diag=log_L_diag,
ranges=rngs)
train_params = [mu, L_off, log_L_diag]
return dist, train_params, test_dist
elif dist_type == 'uniform':
if len(params) >= 2:
# Need this hack because `discrete` distr will try a uniform
# distr for during transfer and its forcing dist_type of
# uniform to enter here.
params = params[:-1]
# Create two distributions, one for train and one for test
R = torch.tensor(params).float().detach().requires_grad_()
lo, hi = R[:, 0], R[:, 1]
train_dist = torch.distributions.Uniform(lo, hi)
params = [R]
# The parameter coming in, is designed for discrete distr
# So either do a good fix, or take the first range and
# ignore the bin parameter.
range_test = torch.tensor([test_dist_params[:-1]]).float()
if range_test.shape[0] == 1:
range_test = torch.squeeze(range_test, dim=0)
lo, hi = range_test[:, 0], range_test[:, 1]
test_dist = torch.distributions.Uniform(lo, hi)
return train_dist, params, test_dist
elif dist_type == 'delta':
context = torch.tensor(params).flatten().requires_grad_()
dist = Delta(context)
train_params = [context]
elif dist_type == 'discrete':
ranges, ncells = params[:-1], params[-1]
ranges_tensor = torch.tensor(ranges).float().detach()
# Why are these type conversions needed?
# Who knows, this is how pytorch does it.
train_ncells_tensor = torch.tensor(ncells).long()
train_params = create_discrete_distr_params(ranges_tensor,
train_ncells_tensor)
dist = Discrete(
train_ncells_tensor, ranges_tensor, params=train_params)
# Create test distribution
if test_dist_params is None:
raise ValueError(
'Test distribution must be defined for discrete distr.')
test_ranges, test_ncells = test_dist_params[:-1], test_dist_params[-1]
test_ranges_tensor = torch.tensor(test_ranges).float().detach()
test_ncells_tensor = torch.tensor(test_ncells).long()
test_params = create_discrete_distr_params(test_ranges_tensor,
test_ncells_tensor)
test_dist = Discrete(
test_ncells_tensor, test_ranges_tensor, params=test_params)
return dist, train_params, test_dist
return dist, params, None
def create_discrete_distr_params(ranges_tensor, ncells_tensor):
if ncells_tensor.numel() == 1:
ncells_tensor = ncells_tensor.expand_as(ranges_tensor[:, 0])
probs = torch.ones(*ncells_tensor).flatten()
probs = probs / probs.sum()
logit_params = [
torch.distributions.utils.probs_to_logits(probs).requires_grad_()
]
return logit_params
def create_def_gaussian(mean_array, noise_std):
mu = torch.tensor(mean_array)
S = torch.rand(mu.shape[-1], mu.shape[-1])
S = noise_std * S.mm(S.t()) + noise_std * mu.detach() * torch.eye(
S.shape[0])
L = torch.cholesky(S).clone().detach()
init_dist_params = mu, L
return init_dist_params
def init_env_sampler(env_name,
seed=None,
experiment_id=None,
default_env=False,
init_dist_params=None,
test_dist_params=[[0.0, 0.1], 100],
dist_type='gaussian',
rescale=True):
if env_name == "cartpole":
# the context distribution corresponds to the cart mass, pole mass and
# pole length
if dist_type == 'gaussian' and init_dist_params is None:
init_dist_params = create_def_gaussian([1.0, 0.1, 0.5], 1e-1)
env_constructor = cartpole.Cartpole
elif env_name == "reacher1D":
if dist_type == 'gaussian' and init_dist_params is None:
init_dist_params = create_def_gaussian([1.0], 1e-1)
env_constructor = reacher_constructor
elif env_name == 'lunar-lander':
if dist_type == 'gaussian' and init_dist_params is None:
init_dist_params = create_def_gaussian([120.0, 20.0, 0.6], 1e-1)
env_constructor = lunar_lander_constructor if not default_env \
else lunar_lander.LunarLander
elif env_name == 'mountain-car':
# the conxtext distribution corresponds to min_position, max_position,
# max_speed, goal_position, thrust magnitude and gravity
if dist_type == 'gaussian' and init_dist_params is None:
# Is the 1e-2 important?
# S = 1e-1 * S.mm(S.t()) + 1e-2 * ...
init_dist_params = create_def_gaussian(
[1.2, 0.6, 0.07, 0.001, -0.0025], 1e-1)
env_constructor = mountain_car.MountainCarEnv
elif env_name.startswith('cartpole-swingup-pole-only'):
env_constructor = cartpole_swingup_pole_only_constructor
default_params = [0.5, 1.0]
if dist_type == 'gaussian' and init_dist_params is None:
init_dist_params = create_def_gaussian(default_params, 1e-2)
elif env_name.startswith('cartpole-swingup'):
if env_name.endswith('-pl'):
env_constructor = cartpole_swingup_pole_length_constructor
idx = slice(0, 1)
elif env_name.endswith('-pm'):
env_constructor = cartpole_swingup_pole_mass_constructor
idx = slice(1, 2)
elif env_name.endswith('-cm'):
env_constructor = cartpole_swingup_cart_mass_constructor
idx = slice(2, 3)
elif env_name.endswith('-cd'):
env_constructor = cartpole_swingup_cart_damp_constructor
idx = slice(3, 4)
else:
env_constructor = cartpole_swingup_constructor
idx = slice(0, 4)
default_params = [0.5, 0.5, 1.0, 0.1]
if dist_type == 'gaussian' and init_dist_params is None:
init_dist_params = create_def_gaussian(default_params[idx], 1e-2)
elif env_name == 'hopper':
# Context order : foot friction, torso size, joint damping
if dist_type == 'gaussian' and init_dist_params is None:
init_dist_params = create_def_gaussian([2.0, 0.05, 1.0], 1e-1)
env_constructor = partial(
hopper_constructor, experiment_id=experiment_id
) if not default_env else hopper.HopperEnv()
elif env_name == 'hopper-torso-only':
# Context is only torso size
if dist_type == 'gaussian' and init_dist_params is None:
init_dist_params = create_def_gaussian([0.05], 1e-1)
env_constructor = partial(
hopper_torso_only_constructor, experiment_id=experiment_id
) if not default_env else hopper.HopperEnv()
elif env_name == 'hopper-density-only':
# Context is only torso size
if dist_type == 'gaussian' and init_dist_params is None:
init_dist_params = create_def_gaussian([1000.0], 1e-1)
env_constructor = partial(
hopper_density_only_constructor, experiment_id=experiment_id
) if not default_env else hopper.HopperEnv()
elif env_name == 'hopper-damping-only':
# Context is only damping
if dist_type == 'gaussian' and init_dist_params is None:
init_dist_params = create_def_gaussian([0.0], 1e-1)
env_constructor = partial(
hopper_damping_only_constructor, experiment_id=experiment_id
) if not default_env else hopper.HopperEnv()
elif env_name == 'hopper-friction-only':
# Context is only friction
if dist_type == 'gaussian' and init_dist_params is None:
init_dist_params = create_def_gaussian([2.0], 1e-1)
env_constructor = partial(
hopper_friction_only_constructor, experiment_id=experiment_id
) if not default_env else hopper.HopperEnv()
elif env_name == 'half-cheetah':
# Context order : torso_size, joint damping, friction
if dist_type == 'gaussian' and init_dist_params is None:
init_dist_params = create_def_gaussian([0.046, 0.01, 0.0], 1e-1)
env_constructor = partial(
half_cheetah_constructor, experiment_id=experiment_id
) if not default_env else half_cheetah.HalfCheetahEnv()
elif env_name == 'half-cheetah-torso':
if dist_type == 'gaussian' and init_dist_params is None:
init_dist_params = create_def_gaussian([0.046], 1e-1)
env_constructor = partial(
half_cheetah_torso_only_constructor, experiment_id=experiment_id
) if not default_env else half_cheetah.HalfCheetahEnv()
elif env_name == 'half-cheetah-density':
if dist_type == 'gaussian' and init_dist_params is None:
init_dist_params = create_def_gaussian([1000.0], 1e-1)
env_constructor = partial(
half_cheetah_density_constructor, experiment_id=experiment_id
) if not default_env else half_cheetah.HalfCheetahEnv()
elif env_name == 'half-cheetah-damping':
if dist_type == 'gaussian' and init_dist_params is None:
init_dist_params = create_def_gaussian([0.0], 1e-1)
env_constructor = partial(
half_cheetah_damping_only_constructor, experiment_id=experiment_id
) if not default_env else half_cheetah.HalfCheetahEnv()
elif env_name == 'half-cheetah-friction':
if dist_type == 'gaussian' and init_dist_params is None:
init_dist_params = create_def_gaussian([0.4], 1e-1)
env_constructor = partial(
half_cheetah_friction_constructor, experiment_id=experiment_id
) if not default_env else half_cheetah.HalfCheetahEnv()
elif env_name == 'humanoid':
if dist_type == 'gaussian' and init_dist_params is None:
init_dist_params = create_def_gaussian([40, 9.8, 0.1], 1e-1)
env_constructor = humanoid_constructor
else:
raise ValueError('Environment not supported!')
dist, init_dist_params, test_dist = init_dist(
init_dist_params, test_dist_params, dist_type, rescale=rescale)
init_dist_params = list(init_dist_params)
return EnvSampler(env_constructor, dist, init_dist_params, seed, env_name,
test_dist)
| 35.907601 | 79 | 0.632938 |
bab59b99092cd7d300d8365db36c024411f3e3ed | 1,954 | py | Python | bitshares_pricefeed/sources/currencylayer.py | nbs-dex/nbs_pricefeed | 9653517531790ec79245f6982ad72fdc5c52544e | [
"MIT"
] | 16 | 2017-09-19T08:27:53.000Z | 2021-02-14T13:59:12.000Z | bitshares_pricefeed/sources/currencylayer.py | nbs-dex/nbs_pricefeed | 9653517531790ec79245f6982ad72fdc5c52544e | [
"MIT"
] | 51 | 2017-05-18T13:18:06.000Z | 2019-10-18T12:16:22.000Z | bitshares_pricefeed/sources/currencylayer.py | nbs-dex/nbs_pricefeed | 9653517531790ec79245f6982ad72fdc5c52544e | [
"MIT"
] | 28 | 2017-07-11T21:18:17.000Z | 2021-10-02T02:52:31.000Z | import requests
from . import FeedSource, _request_headers
class CurrencyLayer(FeedSource): # Hourly updated data over http with free subscription
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not hasattr(self, "api_key") or not hasattr(self, "free_subscription"):
raise Exception("CurrencyLayer FeedSource requires 'api_key' and 'free_subscription'")
def _fetch(self):
feed = {}
try:
for base in self.bases:
url = "http://apilayer.net/api/live?access_key=%s¤cies=%s&source=%s&format=1" % (self.api_key, ",".join(self.quotes), base)
if self.free_subscription:
if base == 'USD':
response = requests.get(url=url, headers=_request_headers, timeout=self.timeout)
result = response.json()
else:
continue
else:
response = requests.get(url=url, headers=_request_headers, timeout=self.timeout)
result = response.json()
if result.get("source") == base:
feed[base] = {}
for quote in self.quotes:
if quote == base:
continue
if hasattr(self, "quoteNames") and quote in self.quoteNames:
quoteNew = self.quoteNames[quote]
else:
quoteNew = quote
feed[base][quoteNew] = {
"price": 1 / result["quotes"][base + quote],
"volume": 1.0}
else:
raise Exception(result.get("description"))
except Exception as e:
raise Exception("\nError fetching results from {1}! ({0})".format(str(e), type(self).__name__))
return feed
| 46.52381 | 145 | 0.504606 |
22ea140de1070b88620206b7edc17069a3504dff | 1,413 | py | Python | lesson-10/ex3.py | alirsamar/intro-ml | 36450b26b7ea09472ccdd2a0abce51b6c3889a20 | [
"MIT"
] | null | null | null | lesson-10/ex3.py | alirsamar/intro-ml | 36450b26b7ea09472ccdd2a0abce51b6c3889a20 | [
"MIT"
] | null | null | null | lesson-10/ex3.py | alirsamar/intro-ml | 36450b26b7ea09472ccdd2a0abce51b6c3889a20 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#### Boilerplate #################################################################
import os
import pickle
import re
import sys
sys.path.append( "./" )
from ex2 import parseOutText
from_sara = open("../ud120-projects/text_learning/from_sara.txt", "r")
from_chris = open("../ud120-projects/text_learning/from_chris.txt", "r")
from_data = []
word_data = []
temp_counter = 0
for name, from_person in [("sara", from_sara), ("chris", from_chris)]:
for path in from_person:
temp_counter += 1
if temp_counter < 200:
path = os.path.join('../ud120-projects/', path[:-1])
email = open(path, "r")
#### Exercise code #############################################################
text = parseOutText(email)
taboo = ["sara", "shackleton", "chris", "germani"]
purified_text = ' '.join([word for word in text.split() if word not in taboo])
word_data.append(purified_text)
from_data.append( 0 if (name == 'sara') else 1)
#### Boilerplate #################################################################
email.close()
print "emails processed"
from_sara.close()
from_chris.close()
print word_data[152]
pickle.dump( word_data, open("your_word_data.pkl", "w") )
pickle.dump( from_data, open("your_email_authors.pkl", "w") )
### in Part 4, do TfIdf vectorization here
| 26.166667 | 90 | 0.53857 |
0054b25262b5fcfc8c89922f7952c64ff6ce7705 | 4,836 | py | Python | pubsub/tests/unit/pubsub_v1/publisher/test_publisher_client.py | bomboradata/bombora-google-cloud-python | 255bbebe6c50490f40fcc3eed40bae1e77e03859 | [
"Apache-2.0"
] | 1 | 2021-01-04T11:40:17.000Z | 2021-01-04T11:40:17.000Z | pubsub/tests/unit/pubsub_v1/publisher/test_publisher_client.py | bomboradata/bombora-google-cloud-python | 255bbebe6c50490f40fcc3eed40bae1e77e03859 | [
"Apache-2.0"
] | null | null | null | pubsub/tests/unit/pubsub_v1/publisher/test_publisher_client.py | bomboradata/bombora-google-cloud-python | 255bbebe6c50490f40fcc3eed40bae1e77e03859 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import pytest
from google.auth import credentials
from google.cloud.gapic.pubsub.v1 import publisher_client
from google.cloud.pubsub_v1 import publisher
from google.cloud.pubsub_v1 import types
def create_client():
creds = mock.Mock(spec=credentials.Credentials)
return publisher.Client(credentials=creds)
def test_init():
client = create_client()
# A plain client should have an `api` (the underlying GAPIC) and a
# batch settings object, which should have the defaults.
assert isinstance(client.api, publisher_client.PublisherClient)
assert client.batch_settings.max_bytes == 5 * (2 ** 20)
assert client.batch_settings.max_latency == 0.05
assert client.batch_settings.max_messages == 1000
def test_batch_accepting():
"""Establish that an existing batch is returned if it accepts messages."""
client = create_client()
message = types.PubsubMessage(data=b'foo')
# At first, there are no batches, so this should return a new batch
# which is also saved to the object.
ante = len(client._batches)
batch = client.batch('topic_name', message, autocommit=False)
assert len(client._batches) == ante + 1
assert batch is client._batches['topic_name']
# A subsequent request should return the same batch.
batch2 = client.batch('topic_name', message, autocommit=False)
assert batch is batch2
assert batch2 is client._batches['topic_name']
def test_batch_without_autocreate():
client = create_client()
message = types.PubsubMessage(data=b'foo')
# If `create=False` is sent, then when the batch is not found, None
# is returned instead.
ante = len(client._batches)
batch = client.batch('topic_name', message, create=False)
assert batch is None
assert len(client._batches) == ante
def test_publish():
client = create_client()
# Use a mock in lieu of the actual batch class; set the mock up to claim
# indiscriminately that it accepts all messages.
batch = mock.Mock(spec=client._batch_class)
batch.will_accept.return_value = True
client._batches['topic_name'] = batch
# Begin publishing.
client.publish('topic_name', b'spam')
client.publish('topic_name', b'foo', bar='baz')
# The batch's publish method should have been called twice.
assert batch.publish.call_count == 2
# In both cases
# The first call should correspond to the first message.
_, args, _ = batch.publish.mock_calls[0]
assert args[0].data == b'spam'
assert not args[0].attributes
# The second call should correspond to the second message.
_, args, _ = batch.publish.mock_calls[1]
assert args[0].data == b'foo'
assert args[0].attributes == {u'bar': u'baz'}
def test_publish_data_not_bytestring_error():
client = create_client()
with pytest.raises(TypeError):
client.publish('topic_name', u'This is a text string.')
with pytest.raises(TypeError):
client.publish('topic_name', 42)
def test_publish_attrs_bytestring():
client = create_client()
# Use a mock in lieu of the actual batch class; set the mock up to claim
# indiscriminately that it accepts all messages.
batch = mock.Mock(spec=client._batch_class)
batch.will_accept.return_value = True
client._batches['topic_name'] = batch
# Begin publishing.
client.publish('topic_name', b'foo', bar=b'baz')
# The attributes should have been sent as text.
_, args, _ = batch.publish.mock_calls[0]
assert args[0].data == b'foo'
assert args[0].attributes == {u'bar': u'baz'}
def test_publish_attrs_type_error():
client = create_client()
with pytest.raises(TypeError):
client.publish('topic_name', b'foo', answer=42)
def test_gapic_instance_method():
client = create_client()
with mock.patch.object(client.api, '_create_topic', autospec=True) as ct:
client.create_topic('projects/foo/topics/bar')
assert ct.call_count == 1
_, args, _ = ct.mock_calls[0]
assert args[0] == types.Topic(name='projects/foo/topics/bar')
def test_gapic_class_method():
client = create_client()
answer = client.topic_path('foo', 'bar')
assert answer == 'projects/foo/topics/bar'
| 33.583333 | 78 | 0.70885 |
10b863270e2d1810686fe674c621fa28a22b3606 | 2,017 | py | Python | pmedian/functions/p_median/geojson.py | ibadkureshi/tnk-locationallocation | b06abcb7bf8675b13e4c2e4fe419afb5ee11018f | [
"MIT"
] | 1 | 2021-02-07T10:37:52.000Z | 2021-02-07T10:37:52.000Z | pmedian/functions/p_median/geojson.py | panosprotopapas/tnk-locationallocation | b06abcb7bf8675b13e4c2e4fe419afb5ee11018f | [
"MIT"
] | null | null | null | pmedian/functions/p_median/geojson.py | panosprotopapas/tnk-locationallocation | b06abcb7bf8675b13e4c2e4fe419afb5ee11018f | [
"MIT"
] | 2 | 2020-10-23T13:14:53.000Z | 2020-11-13T12:01:44.000Z | def sol_details(final_population, grid, distances, p_value, supply_coordinates=None):
sol_coordinates = []
sol_types = []
sol_demand = [0] * p_value
# Reverse supply coordinates, if any
if supply_coordinates:
supply_coordinates = [(t[1], t[0]) for t in supply_coordinates]
for c in final_population.best:
if c < len(grid):
sol_coordinates.append(grid[c].centre)
sol_types.append("GridSquare")
else:
sol_coordinates.append(supply_coordinates[c - len(grid)][::-1])
sol_types.append("Supply Point")
c = sorted(final_population.best)
for i, sq in enumerate(grid):
j = distances[c, i].argmin()
sol_demand[j] += sq.demand
return sol_coordinates, sol_types, sol_demand
def save_geojson_output(solution_details, p_value):
coordinates, types, demand = solution_details
filename = "test_" + str(p_value)
path = "./output/geojson/" + filename
output = {"type": "FeatureCollection",
"name": filename,
"crs": {"type": "name", "properties": {"name": "urn:ogc:def:crs:OGC:1.3:CRS84"}},
"features": []}
for i, (x, y, z) in enumerate(zip(coordinates, types, demand)):
item = {"type": "Feature",
"id": i + 1,
"properties": {
"P": p_value,
"Name": "???",
"DemandWeig": z,
"Curr": 0.0,
"OrgnstT": y,
"Oximetr": None,
"AllRPH": 0.0,
"AllEACl": 0.0,
"CodeRPH": None,
"d": None,
},
"geometry": {
"type": "Point",
"coordinates": [format(x[0], ".15f"), format(x[1], ".15f")]
}
}
output["features"].append(item)
with open(path, "w") as json_file:
json.dump(output, json_file)
| 33.616667 | 95 | 0.497769 |
41a5d37782f372695ffd31a8d42930719e53e075 | 6,587 | py | Python | examples/mixing/example.py | domischi/SpringBox | fbd3f97cf769467d385fb60825df15087cd5658f | [
"MIT"
] | null | null | null | examples/mixing/example.py | domischi/SpringBox | fbd3f97cf769467d385fb60825df15087cd5658f | [
"MIT"
] | null | null | null | examples/mixing/example.py | domischi/SpringBox | fbd3f97cf769467d385fb60825df15087cd5658f | [
"MIT"
] | 1 | 2020-07-02T17:14:14.000Z | 2020-07-02T17:14:14.000Z | from sacred import Experiment, SETTINGS
from sacred.dependencies import PackageDependency
from sacred.observers import FileStorageObserver, MongoObserver
from sacred.utils import apply_backspaces_and_linefeeds
from functools import partial
from copy import deepcopy
from tqdm import tqdm as std_tqdm
tqdm = partial(std_tqdm, ncols=100)
import numpy as np
import time
import datetime
import os
import numba
from numba.errors import NumbaWarning
import warnings
warnings.simplefilter('ignore', category=NumbaWarning)
import SpringBox
from SpringBox.integrator import integrate_one_timestep
from SpringBox.activation import *
from SpringBox.post_run_hooks import post_run_hooks
from SpringBox.measurements import do_measurements, do_one_timestep_correlation_measurement, get_mixing_score
ex = Experiment('SpringBox')
#ex.observers.append(MongoObserver.create())
ex.observers.append(FileStorageObserver.create(f'data/'))
ex.dependencies.add(PackageDependency("SpringBox",SpringBox.__version__))
@ex.config
def cfg():
## Simulation parameters
sweep_experiment = False
mixing_experiment = True
run_id = 0
savefreq_fig = 3
savefreq_data_dump = 3
# Speeds up the computation somewhat, but incurs an error due to oversmoothing of fluids (which could however be somewhat physical)
use_interpolated_fluid_velocities = False
dt=.02
T=1.
particle_density = 15.625
MAKE_VIDEO = True
SAVEFIG = False
const_particle_density = False
measure_one_timestep_correlator = False
periodic_boundary = True
## Geometry parameters / Activation Fn
activation_fn_type = 'const-rectangle' # For the possible choices, see the activation.py file
AR=.75
L=2
n_part = int(particle_density * ((2*L)**2))
if mixing_experiment:
assert (n_part % 2 == 0)
## Interaction parameters
# Particle properties
m_init=1.
activation_decay_rate = 10. # Ex. at dt=0.01 this leads to an average deactivation of 10% of the particles
# Spring properties
spring_cutoff = 2.
spring_lower_cutoff = 10e-6
spring_k=1.
spring_k_rep=1.
spring_r0=0.
# LJ properties
LJ_eps=0.
LJ_r0=.05
LJ_cutoff=2.5/1.122*LJ_r0 # canonical choice
# Brownian properties
brownian_motion_delta = 0.
## Fluid parameters
mu=10.
Rdrag = 0.
drag_factor=1
def get_sim_info(old_sim_info, _config, i):
sim_info = old_sim_info
dt = _config['dt']
L = _config['L']
T = _config['T']
savefreq_fig = _config['savefreq_fig']
savefreq_dd = _config['savefreq_data_dump']
sim_info['t'] = i*dt
sim_info['time_step_index'] = i
sim_info['x_min'] = -L
sim_info['y_min'] = -L
sim_info['x_max'] = L
sim_info['y_max'] = L
sim_info['plotting_this_iteration'] = (savefreq_fig!=None and i%savefreq_fig == 0)
sim_info['data_dump_this_iteration'] = (savefreq_dd!=None and (i%savefreq_dd == 0 or i==int(T/dt)-1))
sim_info['get_fluid_velocity_this_iteration'] = sim_info['plotting_this_iteration'] or sim_info['data_dump_this_iteration']
sim_info['measure_one_timestep_correlator'] = ( 'measure_one_timestep_correlator' in _config.keys() and _config['measure_one_timestep_correlator'])
return sim_info
@ex.automain
def main(_config, _run):
## Load local copies of the parameters needed in main
run_id = _config['run_id']
## Setup Folders
timestamp = int(time.time())
data_dir = f'/tmp/boxspring-{run_id}-{timestamp}'
os.makedirs(data_dir)
## Initialize particles
pXs = (np.random.rand(_config['n_part'],2)-.5)*2*_config['L']
pXs[:_config['n_part']//2,0] = -np.random.rand(_config['n_part']//2)*_config['L']
pXs[_config['n_part']//2:,0] = +np.random.rand(_config['n_part']//2)*_config['L']
pVs = np.zeros_like(pXs)
acc = np.zeros(len(pXs))
ms = _config['m_init']*np.ones(len(pXs))
if _config['use_interpolated_fluid_velocities']:
print('WARNING: Using interpolated fluid velocities can yield disagreements. The interpolation is correct for most points. However, for some the difference can be relatively large.')
## Initialize information dict
sim_info = {'data_dir': data_dir}
## Integration loop
N_steps = int(_config['T']/_config['dt'])
for i in tqdm(range(N_steps), disable = True):
if _config['sweep_experiment'] and (i%50)==0:
print(f"[{datetime.datetime.now()}] Run {_config['run_id']}: Doing step {i+1: >6} of {N_steps}")
sim_info = get_sim_info(sim_info, _config, i)
activation_fn = activation_fn_dispatcher(_config, sim_info['t'])
if sim_info['measure_one_timestep_correlator']:
pXs_old = deepcopy(pXs)
pXs, pVs, acc, ms, fXs, fVs = integrate_one_timestep(pXs = pXs,
pVs = pVs,
acc = acc,
ms = ms,
activation_fn = lambda ps: activation_fn(ps),
sim_info = sim_info,
_config = _config,
get_fluid_velocity=sim_info['get_fluid_velocity_this_iteration'],
use_interpolated_fluid_velocities=_config['use_interpolated_fluid_velocities'])
do_measurements(ex = ex,
_config = _config,
_run = _run,
sim_info = sim_info,
pXs = pXs,
pVs = pVs,
acc = acc,
ms = ms,
fXs = fXs,
fVs = fVs,
plotting_this_iteration = sim_info['plotting_this_iteration'],
save_all_data_this_iteration = sim_info['data_dump_this_iteration'])
if sim_info['measure_one_timestep_correlator']:
do_one_timestep_correlation_measurement(ex = ex,
_config = _config,
_run = _run,
sim_info = sim_info,
pXs = pXs,
pXs_old = pXs_old)
post_run_hooks(ex, _config, _run, data_dir)
| 41.16875 | 190 | 0.606042 |
2f1faf17e9f6d9419964b8c180b48ae3a20f320e | 3,358 | py | Python | accounts/permission.py | damondengxin/manager | cce0fc06fa1f09addf46c9cfb213b0e7d39614ac | [
"MIT"
] | null | null | null | accounts/permission.py | damondengxin/manager | cce0fc06fa1f09addf46c9cfb213b0e7d39614ac | [
"MIT"
] | null | null | null | accounts/permission.py | damondengxin/manager | cce0fc06fa1f09addf46c9cfb213b0e7d39614ac | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render, HttpResponse
from django.contrib.auth.decorators import login_required
from .forms import PermissionListForm
from .models import UserInfo, RoleList, PermissionList
def permission_verify():
def decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
userobj = UserInfo.objects.get(username=request.user)
if not userobj.is_superuser:
if not userobj.role.all():
return HttpResponseRedirect(reverse('noperm'))
rolelist =userobj.role.all()
#role_permission_list = role_permission.permission.all()
matchUrl = []
#requesturl = request.build_absolute_uri()
requesturl=request.get_full_path()
#print(requesturl)
for x in rolelist:
for u in PermissionList.objects.filter(role=x):
if requesturl == u.url or requesturl.rstrip('/') == u.url:
matchUrl.append(u.url)
elif requesturl.startswith(u.url):
matchUrl.append(u.url)
else:
pass
#print('{}---->matchUrl:{}'.format(request.user, str(matchUrl)))
if not matchUrl:
return HttpResponseRedirect(reverse('noperm'))
else:
pass
return view_func(request, *args, **kwargs)
return _wrapped_view
return decorator
@login_required
@permission_verify()
def permission_list(request):
all_permission = PermissionList.objects.all()
return render(request, 'accounts/permission_list.html', locals())
@login_required
@permission_verify()
def permission_add(request):
if request.method == "POST":
form = PermissionListForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('permission_list'))
return render(request,'accounts/permission_add.html', locals())
else:
form = PermissionListForm()
return render(request, 'accounts/permission_add.html', locals())
@login_required
@permission_verify()
def permission_edit(request, id):
iPermission = PermissionList.objects.get(id=id)
if request.method == "POST":
form = PermissionListForm(request.POST, instance=iPermission)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('permission_list'))
return render(request, 'accounts/permission_edit.html', locals())
else:
form = PermissionListForm(instance=iPermission)
return render(request, 'accounts/permission_edit.html', locals())
@login_required
@permission_verify()
def permission_del(request):
ret = {'status': True, 'error': None, 'data': None}
if request.method == "POST":
try:
id=request.POST.get("id")
for i in json.loads(id):
PermissionList.objects.get(id=int(i)).delete()
except Exception as e:
ret["error"] = str(e)
ret["status"] = False
return HttpResponse(json.dumps(ret)) | 35.723404 | 82 | 0.611376 |
5d09c1eadb638cb285907db21f35f36fe6ba1880 | 3,396 | py | Python | pypureclient/flasharray/FA_2_8/models/dns_patch.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_8/models/dns_patch.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_8/models/dns_patch.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_8 import models
class DnsPatch(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'domain': 'str',
'nameservers': 'list[str]'
}
attribute_map = {
'domain': 'domain',
'nameservers': 'nameservers'
}
required_args = {
}
def __init__(
self,
domain=None, # type: str
nameservers=None, # type: List[str]
):
"""
Keyword args:
domain (str): Domain suffix to be appended by the appliance when performing DNS lookups.
nameservers (list[str]): List of DNS server IP addresses.
"""
if domain is not None:
self.domain = domain
if nameservers is not None:
self.nameservers = nameservers
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `DnsPatch`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DnsPatch, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DnsPatch):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.779661 | 105 | 0.545936 |
48f2a464a8766ba501f97481c67f0abb7ec40903 | 450 | py | Python | tests/unit/test_helpers.py | broper2/solana-py | 146390d959f017e137238335ee6fa362ad1a1ab4 | [
"MIT"
] | 477 | 2020-08-28T08:49:49.000Z | 2022-03-30T16:19:03.000Z | tests/unit/test_helpers.py | broper2/solana-py | 146390d959f017e137238335ee6fa362ad1a1ab4 | [
"MIT"
] | 195 | 2020-08-27T11:55:42.000Z | 2022-03-31T18:59:55.000Z | tests/unit/test_helpers.py | broper2/solana-py | 146390d959f017e137238335ee6fa362ad1a1ab4 | [
"MIT"
] | 143 | 2020-08-27T03:10:55.000Z | 2022-03-30T17:27:35.000Z | """Test helpers."""
from random import randint
import pytest
import solana.utils.helpers as helpers
def test_to_uint8_bytes():
"""Test int to uint8 bytes."""
assert helpers.to_uint8_bytes(255) == b"\xff"
with pytest.raises(OverflowError):
helpers.to_uint8_bytes(256)
def test_from_uint8():
"""Test uint8 bytes to int."""
num = randint(0, 255)
assert helpers.from_uint8_bytes(helpers.to_uint8_bytes(num)) == num
| 21.428571 | 71 | 0.697778 |
6e1689f458e20deb9498cb63a6bf112b993ff847 | 109,319 | py | Python | jax/numpy/lax_numpy.py | stephentu/jax | 7ecfa542269371833fd50b6078e86e7d624cd9e3 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | jax/numpy/lax_numpy.py | stephentu/jax | 7ecfa542269371833fd50b6078e86e7d624cd9e3 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | jax/numpy/lax_numpy.py | stephentu/jax | 7ecfa542269371833fd50b6078e86e7d624cd9e3 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implements the NumPy API, using the primitives in :mod:`jax.lax`.
NumPy operations are implemented in Python in terms of the primitive operations
in :mod:`jax.lax`. Since NumPy operations are not primitive and instead are
implemented in terms of :mod:`jax.lax` operations, we do not need to define
transformation rules such as gradient or batching rules. Instead,
transformations for NumPy primitives can be derived from the transformation
rules for the underlying :code:`lax` primitives.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from distutils.util import strtobool
import collections
try:
from collections.abc import Sequence
except ImportError: # python 2
from collections import Sequence
import itertools
import os
import re
import string
import types
import warnings
import numpy as onp
import opt_einsum
import six
from six.moves import builtins, xrange
from jax import jit, device_put, custom_transforms, defjvp
from .. import core
from ..abstract_arrays import UnshapedArray, ShapedArray, ConcreteArray
from ..config import flags
from ..interpreters.xla import DeviceArray
from .. import lax
from ..util import partial, get_module_functions, unzip2, prod as _prod
from ..lib import pytree
from ..lib import xla_bridge
from ..lib import xla_client
FLAGS = flags.FLAGS
flags.DEFINE_enum(
'jax_numpy_rank_promotion', os.getenv('JAX_NUMPY_RANK_PROMOTION', 'allow'),
enum_values=['allow', 'warn', 'raise'],
help=
'Control NumPy-style automatic rank promotion broadcasting '
'("allow", "warn", or "raise").')
if six.PY3:
def removechars(s, chars):
return s.translate(str.maketrans(dict.fromkeys(chars)))
else:
def removechars(s, chars):
return s.translate(None, ''.join(chars))
newaxis = None
# We replace some builtin names to follow Numpy's API, so we capture here.
_abs = builtins.abs
_all = builtins.all
_any = builtins.any
_max = builtins.max
_min = builtins.min
_sum = builtins.sum
# We need some numpy scalars
pi = onp.pi
e = onp.e
inf = onp.inf
NINF = onp.NINF
nan = onp.nan
# And some numpy utility functions
set_printoptions = onp.set_printoptions
# We want isinstance(x, np.ndarray) checks in user code to work with the our
# array-like types, including DeviceArray and UnshapedArray (i.e. the abstract
# array base class). We can override the isinstance behavior directly, without
# having the complexity of multiple inheritance on those classes, by defining
# the ndarray class to have a metaclass with special __instancecheck__ behavior.
_arraylike_types = (onp.ndarray, UnshapedArray, DeviceArray)
class _ArrayMeta(type(onp.ndarray)):
"""Metaclass for overriding ndarray isinstance checks."""
def __instancecheck__(self, instance):
try:
return isinstance(instance.aval, _arraylike_types)
except AttributeError:
return isinstance(instance, _arraylike_types)
class ndarray(six.with_metaclass(_ArrayMeta, onp.ndarray)):
def __init__(shape, dtype=None, buffer=None, offset=0, strides=None,
order=None):
raise TypeError("jax.numpy.ndarray() should not be instantiated explicitly."
" Use jax.numpy.array, or jax.numpy.zeros instead.")
isscalar = onp.isscalar
iscomplexobj = onp.iscomplexobj
shape = _shape = onp.shape
ndim = _ndim = onp.ndim
size = onp.size
_dtype = lax.dtype
bool_ = onp.bool_
uint8 = onp.uint8
uint16 = onp.uint16
uint32 = onp.uint32
uint64 = onp.uint64
int8 = onp.int8
int16 = onp.int16
int32 = onp.int32
int64 = onp.int64
float16 = onp.float16
float32 = single = onp.float32
float64 = double = onp.float64
complex64 = csingle = onp.complex64
complex128 = cdouble = onp.complex128
flexible = onp.flexible
character = onp.character
object_ = onp.object_
number = onp.number
inexact = onp.inexact
complexfloating = onp.complexfloating
floating = onp.floating
integer = onp.integer
signedinteger = onp.signedinteger
unsignedinteger = onp.unsignedinteger
iinfo = onp.iinfo
finfo = onp.finfo
can_cast = onp.can_cast
issubdtype = onp.issubdtype
issubsctype = onp.issubsctype
result_type = onp.result_type
promote_types = onp.promote_types
ComplexWarning = onp.ComplexWarning
array_str = onp.array_str
array_repr = onp.array_repr
save = onp.save
savez = onp.savez
load = onp.load
### utility functions
def _promote_shapes(fun_name, *args):
"""Prepend implicit leading singleton dimensions for Numpy broadcasting."""
if len(args) < 2:
return args
else:
shapes = [shape(arg) for arg in args]
nonscalar_ranks = [len(shp) for shp in shapes if shp]
if not nonscalar_ranks or len(set(nonscalar_ranks)) == 1:
return args
else:
if FLAGS.jax_numpy_rank_promotion != "allow":
_rank_promotion_warning_or_error(fun_name, shapes)
result_rank = len(lax.broadcast_shapes(*shapes))
return [lax.reshape(arg, (1,) * (result_rank - len(shp)) + shp)
if shp and len(shp) != result_rank else arg
for arg, shp in zip(args, shapes)]
def _rank_promotion_warning_or_error(fun_name, shapes):
if FLAGS.jax_numpy_rank_promotion == "warn":
msg = ("Following NumPy automatic rank promotion for {} on shapes {}. "
"Set the jax_numpy_rank_promotion config option to 'allow' to "
"disable this warning; for more information, see "
"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.")
warnings.warn(msg.format(fun_name, ' '.join(map(str, shapes))))
elif FLAGS.jax_numpy_rank_promotion == "raise":
msg = ("Operands could not be broadcast together for {} on shapes {} "
"and with the config option jax_numpy_rank_promotion='raise'. "
"For more information, see "
"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.")
raise ValueError(msg.format(fun_name, ' '.join(map(str, shapes))))
def _promote_dtypes(*args):
"""Convenience function to apply Numpy argument dtype promotion."""
# TODO(dougalm,mattjj): This is a performance bottleneck. Consider memoizing.
if len(args) < 2:
return args
else:
from_dtypes = map(_dtype, args)
to_dtype = xla_bridge.canonicalize_dtype(result_type(*from_dtypes))
return [lax.convert_element_type(x, to_dtype)
if _dtype(x) != to_dtype else x for x in args]
def _promote_to_result_dtype(op, *args):
"""Convenience function to promote args directly to the op's result dtype."""
to_dtype = _result_dtype(op, *args)
return [lax.convert_element_type(arg, to_dtype) for arg in args]
def _result_dtype(op, *args):
"""Compute result dtype of applying op to arguments with given dtypes."""
args = [onp.ones((0,) * ndim(arg), _dtype(arg)) for arg in args]
return _dtype(op(*args))
def _check_arraylike(fun_name, *args):
"""Check if all args fit JAX's definition of arraylike (ndarray or scalar)."""
not_array = lambda x: not isinstance(x, ndarray) and not onp.isscalar(x)
if _any(not_array(arg) for arg in args):
pos, arg = next((i, arg) for i, arg in enumerate(args) if not_array(arg))
msg = "{} requires ndarray or scalar arguments, got {} at position {}."
raise TypeError(msg.format(fun_name, type(arg), pos))
def _promote_args(fun_name, *args):
"""Convenience function to apply Numpy argument shape and dtype promotion."""
_check_arraylike(fun_name, *args)
return _promote_shapes(fun_name, *_promote_dtypes(*args))
def _promote_args_like(op, *args):
"""Convenience function to apply shape and dtype promotion to result type."""
_check_arraylike(op.__name__, *args)
return _promote_shapes(op.__name__, *_promote_to_result_dtype(op, *args))
def _constant_like(x, const):
return onp.array(const, dtype=_dtype(x))
def update_numpydoc(docstr, fun, op):
'''Transforms the numpy docstring to remove references of
parameters that are supported by the numpy version but not the JAX version'''
#Some numpy functions have an extra tab at the beginning of each line,
#If this function is one of those we remove this extra tab from all the lines
if docstr[:4] == ' ':
lines = docstr.split('\n')
for idx, line in enumerate(lines):
lines[idx] = line.replace(' ', '', 1)
docstr = '\n'.join(lines)
begin_idx = docstr.find("Parameters")
begin_idx = docstr.find("--\n", begin_idx) + 2
end_idx = docstr.find("Returns", begin_idx)
parameters = docstr[begin_idx:end_idx]
param_list = parameters.replace('\n ', '@@').split('\n')
for idx, p in enumerate(param_list):
param = p[:p.find(' : ')].split(", ")[0]
if param not in op.__code__.co_varnames:
param_list[idx] = ''
param_list = [param for param in param_list if param != '']
parameters = '\n'.join(param_list).replace('@@', '\n ')
return docstr[:begin_idx + 1] + parameters + docstr[end_idx - 2:]
_numpy_signature_re = re.compile(r'^([\w., ]+=)?\s*[\w\.]+\(.*\)$')
def _wraps(fun, update_doc=True):
"""Like functools.wraps but works with numpy.ufuncs.
It is important that when wrapping numpy functions the parameters names
in the original function and in the JAX version are the same
Parameters:
fun: The function being wrapped
update_doc: whether to transform the numpy docstring to remove references of
parameters that are supported by the numpy version but not the JAX version.
If False, include the numpy docstring verbatim.
"""
def wrap(op):
try:
# Numpy doc comments have the form:
# fn(x, y, z) (optional)
#
# A one-line summary
#
# ... everything else ...
# We (a) move the summary to the top, since it is what the Sphinx
# autosummary extension expects, and (b) add a comment below the summary
# to the effect that this is a LAX wrapper of a Numpy function.
sections = fun.__doc__.split("\n\n")
signatures = []
summary = None
for i in xrange(len(sections)):
if _numpy_signature_re.match(sections[i]):
signatures.append(sections[i])
else:
summary = sections[i].strip()
break
body = "\n\n".join(signatures + sections[i + 1:])
if update_doc:
body = update_numpydoc(body, fun, op)
docstr = (
"{summary}\n\nLAX-backend implementation of :func:`{fun}`. "
"Original docstring below.\n\n{body}".format(
summary=summary, fun=fun.__name__, body=body))
op.__name__ = fun.__name__
op.__doc__ = docstr
finally:
return op
return wrap
def _canonicalize_axis(axis, num_dims):
"""Canonicalize an axis in (-num_dims, num_dims) to [0, num_dims)."""
axis = int(axis)
if axis < 0:
axis = axis + num_dims
if axis < 0 or axis >= num_dims:
raise ValueError(
"axis {} is out of bounds for array of dimension {}".format(
axis, num_dims))
return axis
### implementations of numpy functions in terms of lax
def _one_to_one_unop(numpy_fn, lax_fn, promote_like=False):
if promote_like:
fn = lambda x: lax_fn(lax.convert_element_type(x, _result_dtype(numpy_fn, x)))
else:
fn = lambda x: lax_fn(x)
return _wraps(numpy_fn)(fn)
def _one_to_one_binop(numpy_fn, lax_fn, promote_like=False):
if promote_like:
fn = lambda x1, x2: lax_fn(*_promote_args_like(numpy_fn, x1, x2))
else:
fn = lambda x1, x2: lax_fn(*_promote_args(numpy_fn.__name__, x1, x2))
return _wraps(numpy_fn)(fn)
absolute = abs = _one_to_one_unop(onp.absolute, lax.abs)
fabs = _one_to_one_unop(onp.fabs, lax.abs, True)
bitwise_not = _one_to_one_unop(onp.bitwise_not, lax.bitwise_not)
negative = _one_to_one_unop(onp.negative, lax.neg)
positive = _one_to_one_unop(onp.positive, lambda x: x)
sign = _one_to_one_unop(onp.sign, lax.sign)
floor = _one_to_one_unop(onp.floor, lax.floor, True)
ceil = _one_to_one_unop(onp.ceil, lax.ceil, True)
exp = _one_to_one_unop(onp.exp, lax.exp, True)
log = _one_to_one_unop(onp.log, lax.log, True)
expm1 = _one_to_one_unop(onp.expm1, lax.expm1, True)
log1p = _one_to_one_unop(onp.log1p, lax.log1p, True)
sin = _one_to_one_unop(onp.sin, lax.sin, True)
cos = _one_to_one_unop(onp.cos, lax.cos, True)
tan = _one_to_one_unop(onp.tan, lax.tan, True)
arcsin = _one_to_one_unop(onp.arcsin, lax.asin, True)
arccos = _one_to_one_unop(onp.arccos, lax.acos, True)
arctan = _one_to_one_unop(onp.arctan, lax.atan, True)
sinh = _one_to_one_unop(onp.sinh, lax.sinh, True)
cosh = _one_to_one_unop(onp.cosh, lax.cosh, True)
tanh = _one_to_one_unop(onp.tanh, lax.tanh, True)
sqrt = _one_to_one_unop(onp.sqrt, lax.sqrt, True)
add = _one_to_one_binop(onp.add, lax.add)
bitwise_and = _one_to_one_binop(onp.bitwise_and, lax.bitwise_and)
bitwise_or = _one_to_one_binop(onp.bitwise_or, lax.bitwise_or)
bitwise_xor = _one_to_one_binop(onp.bitwise_xor, lax.bitwise_xor)
right_shift = _one_to_one_binop(onp.right_shift, lax.shift_right_arithmetic)
left_shift = _one_to_one_binop(onp.left_shift, lax.shift_left)
equal = _one_to_one_binop(onp.equal, lax.eq)
multiply = _one_to_one_binop(onp.multiply, lax.mul)
not_equal = _one_to_one_binop(onp.not_equal, lax.ne)
subtract = _one_to_one_binop(onp.subtract, lax.sub)
arctan2 = _one_to_one_binop(onp.arctan2, lax.atan2, True)
minimum = _one_to_one_binop(onp.minimum, lax.min)
maximum = _one_to_one_binop(onp.maximum, lax.max)
float_power = _one_to_one_binop(onp.float_power, lax.pow, True)
def _comparison_op(numpy_fn, lax_fn):
def fn(x1, x2):
x1, x2 = _promote_args(numpy_fn.__name__, x1, x2)
# Comparison on complex types are defined as a lexicographic ordering on
# the (real, imag) pair.
if issubdtype(_dtype(x1), complexfloating):
rx = lax.real(x1)
ry = lax.real(x2)
return lax.select(lax.eq(rx, ry), lax_fn(lax.imag(x1), lax.imag(x2)),
lax_fn(rx, ry))
return lax_fn(x1, x2)
return _wraps(numpy_fn)(fn)
greater_equal = _comparison_op(onp.greater_equal, lax.ge)
greater = _comparison_op(onp.greater, lax.gt)
less_equal = _comparison_op(onp.less_equal, lax.le)
less = _comparison_op(onp.less, lax.lt)
def _logical_op(np_op, bitwise_op):
@_wraps(np_op, update_doc=False)
def op(*args):
zero = lambda x: lax.full_like(x, shape=(), fill_value=0)
args = (x if issubdtype(_dtype(x), onp.bool_) else lax.ne(x, zero(x))
for x in args)
return bitwise_op(*_promote_args(np_op.__name__, *args))
return op
logical_and = _logical_op(onp.logical_and, lax.bitwise_and)
logical_not = _logical_op(onp.logical_not, lax.bitwise_not)
logical_or = _logical_op(onp.logical_or, lax.bitwise_or)
logical_xor = _logical_op(onp.logical_xor, lax.bitwise_xor)
@_wraps(onp.true_divide)
def true_divide(x1, x2):
result_dtype = _result_dtype(onp.true_divide, x1, x2)
x1, x2 = _promote_shapes("true_divide", x1, x2)
return lax.div(lax.convert_element_type(x1, result_dtype),
lax.convert_element_type(x2, result_dtype))
@_wraps(onp.divide)
def divide(x1, x2):
# decide whether to perform integer division based on Numpy result dtype, as a
# way to check whether Python 3 style division is active in Numpy
result_dtype = _result_dtype(onp.divide, x1, x2)
if issubdtype(result_dtype, onp.integer):
return floor_divide(x1, x2)
else:
return true_divide(x1, x2)
@_wraps(onp.floor_divide)
def floor_divide(x1, x2):
x1, x2 = _promote_args("floor_divide", x1, x2)
dtype = _dtype(x1)
if issubdtype(dtype, integer):
quotient = lax.div(x1, x2)
select = logical_and(lax.sign(x1) != lax.sign(x2), lax.rem(x1, x2) != 0)
# TODO(mattjj): investigate why subtracting a scalar was causing promotion
return where(select, quotient - onp.array(1, _dtype(quotient)), quotient)
elif issubdtype(dtype, complexfloating):
x1r = lax.real(x1)
x1i = lax.imag(x1)
x2r = lax.real(x2)
x2i = lax.imag(x2)
which = lax.ge(lax.abs(x2r), lax.abs(x2i))
rat1 = where(which, lax._const(x2i, 1), lax.div(x2r, x2i))
rat2 = where(which, lax.div(x2i, x2r), lax._const(x2i, 1))
out = lax.floor(lax.div(lax.add(lax.mul(x1r, rat1), lax.mul(x1i, rat2)),
lax.add(lax.mul(x2r, rat1), lax.mul(x2i, rat2))))
return lax.convert_element_type(out, dtype)
else:
return _float_divmod(x1, x2)[0]
@_wraps(onp.divmod)
def divmod(x1, x2):
x1, x2 = _promote_args("divmod", x1, x2)
if issubdtype(_dtype(x1), onp.integer):
return floor_divide(x1, x2), remainder(x1, x2)
else:
return _float_divmod(x1, x2)
def _float_divmod(x1, x2):
# see float_divmod in floatobject.c of CPython
mod = lax.rem(x1, x2)
div = lax.div(lax.sub(x1, mod), x2)
ind = lax.bitwise_and(mod != 0, lax.sign(x2) != lax.sign(mod))
mod = lax.select(ind, mod + x2, mod)
div = lax.select(ind, div - _constant_like(div, 1), div)
return lax.round(div), mod
@_wraps(onp.power)
def power(x1, x2):
x1 = asarray(x1)
x2 = asarray(x2)
x1, x2 = _promote_args_like(onp.power, x1, x2)
dtype = _dtype(x1)
if not issubdtype(dtype, integer):
return lax.pow(x1, x2)
# Integer power => use binary exponentiation.
# TODO(phawkins): add integer pow support to XLA.
bits = 6 # Anything more would overflow for any x1 > 1
acc = ones(shape(x1), dtype=dtype)
for _ in xrange(bits):
acc = where(lax.bitwise_and(x2, _constant_like(x2, 1)),
lax.mul(acc, x1), acc)
x1 = lax.mul(x1, x1)
x2 = lax.shift_right_logical(x2, _constant_like(x2, 1))
return acc
@_wraps(onp.logaddexp)
def logaddexp(x1, x2):
x1, x2 = _promote_shapes("logaddexp",
*_promote_to_result_dtype(onp.logaddexp, x1, x2))
amax = lax.max(x1, x2)
return lax.add(amax, lax.log1p(lax.exp(-lax.abs(lax.sub(x1, x2)))))
@_wraps(onp.logaddexp2)
def logaddexp2(x1, x2):
x1, x2 = _promote_shapes("logaddexp2",
*_promote_to_result_dtype(onp.logaddexp2, x1, x2))
amax = lax.max(x1, x2)
return lax.add(amax, lax.div(lax.log1p(exp2(-lax.abs(lax.sub(x1, x2)))),
_constant_like(x1, onp.log(2))))
@_wraps(onp.log2)
def log2(x):
x, = _promote_to_result_dtype(onp.log2, x)
return lax.div(lax.log(x), lax.log(_constant_like(x, 2)))
@_wraps(onp.log10)
def log10(x):
x, = _promote_to_result_dtype(onp.log10, x)
return lax.div(lax.log(x), lax.log(_constant_like(x, 10)))
@_wraps(onp.exp2)
def exp2(x):
x, = _promote_to_result_dtype(onp.exp2, x)
return lax.exp(lax.mul(lax.log(_constant_like(x, 2)), x))
@_wraps(onp.remainder)
def remainder(x1, x2):
x1, x2 = _promote_args("remainder", x1, x2)
zero = _constant_like(x1, 0)
trunc_mod = lax.rem(x1, x2)
trunc_mod_not_zero = lax.ne(trunc_mod, zero)
do_plus = lax.bitwise_and(
lax.ne(lax.lt(trunc_mod, zero), lax.lt(x2, zero)), trunc_mod_not_zero)
return lax.select(do_plus, lax.add(trunc_mod, x2), trunc_mod)
mod = remainder
fmod = _wraps(onp.fmod)(lambda x1, x2: lax.rem(x1, x2))
@_wraps(onp.cbrt)
def cbrt(x):
x, = _promote_to_result_dtype(onp.cbrt, x)
return lax.sign(x) * power(lax.abs(x), _constant_like(x, 1. / 3.))
@_wraps(onp.square)
def square(x):
x, = _promote_to_result_dtype(onp.square, x)
return x * x
@_wraps(onp.deg2rad)
def deg2rad(x):
x, = _promote_to_result_dtype(onp.deg2rad, x)
return lax.mul(x, lax._const(x, pi / 180))
@_wraps(onp.rad2deg)
def rad2deg(x):
x, = _promote_to_result_dtype(onp.rad2deg, x)
return lax.mul(x, lax._const(x, 180 / pi))
degrees = rad2deg
radians = deg2rad
@_wraps(onp.heaviside)
def heaviside(x1, x2):
x1, x2 = _promote_to_result_dtype(onp.heaviside, x1, x2)
zero = lax._const(x1, 0)
return where(lax.lt(x1, zero), zero,
where(lax.gt(x1, zero), lax._const(x1, 1), x2))
@_wraps(onp.hypot)
def hypot(x1, x2):
x1, x2 = _promote_to_result_dtype(onp.hypot, x1, x2)
return lax.sqrt(x1*x1 + x2*x2)
@_wraps(onp.reciprocal)
def reciprocal(x):
x, = _promote_to_result_dtype(onp.reciprocal, x)
return lax.div(lax._const(x, 1), x)
@_wraps(onp.sinc, update_doc=False)
def sinc(x):
x, = _promote_to_result_dtype(onp.sinc, x)
pi_x = lax.mul(lax._const(x, pi), x)
return where(lax.eq(x, lax._const(x, 0)),
lax._const(x, 1), lax.div(lax.sin(pi_x), pi_x))
@_wraps(onp.arcsinh)
@custom_transforms
@jit
@lax._upcast_fp16_for_computation
def arcsinh(x):
# asinh(x) = log(x + sqrt(x**2 + 1))
x, = _promote_to_result_dtype(onp.arcsinh, x)
one = lax._const(x, 1)
result = lax.log(x + lax.sqrt(x * x + one))
if issubdtype(_dtype(result), onp.complexfloating):
return result
a = abs(x)
sqrt_max_value = onp.sqrt(finfo(_dtype(x)).max)
log2 = lax._const(a, onp.log(2))
return lax.select(a < sqrt_max_value, result, lax.sign(x) * (lax.log(a) + log2))
defjvp(arcsinh, lambda g, ans, x: g / lax.sqrt(lax._const(x, 1) + square(x)))
@_wraps(onp.arccosh)
@jit
@lax._upcast_fp16_for_computation
def arccosh(x):
# acosh(x) = log(x + sqrt((x + 1) * (x - 1))) if x < sqrt_max_value
# log(x) + log(2) otherwise
x, = _promote_to_result_dtype(onp.arccosh, x)
one = lax._const(x, 1)
result = lax.log(x + lax.sqrt((x + one) * (x - one)))
if issubdtype(_dtype(result), onp.complexfloating):
return result
sqrt_max_value = onp.sqrt(finfo(_dtype(x)).max)
log2 = lax._const(x, onp.log(2))
return lax.select(x < sqrt_max_value, result, lax.log(x) + log2)
@_wraps(onp.arctanh)
def arctanh(x):
# atanh(x) = 0.5 * log((1 + x) / (1 - x))
x, = _promote_to_result_dtype(onp.arctanh, x)
one = lax._const(x, 1)
result = lax._const(x, 0.5) * lax.log((one + x) / (one - x))
if issubdtype(_dtype(result), onp.complexfloating):
return result
return lax.select(abs(x) <= 1, result, lax.full_like(x, onp.nan))
@_wraps(onp.transpose)
def transpose(a, axes=None):
axes = onp.arange(ndim(a))[::-1] if axes is None else axes
return lax.transpose(a, axes)
@_wraps(onp.rot90)
def rot90(m, k=1, axes=(0, 1)):
ax1, ax2 = axes
ax1 = _canonicalize_axis(ax1, m.ndim)
ax2 = _canonicalize_axis(ax2, m.ndim)
if ax1 == ax2:
raise ValueError("Axes must be different") # same as numpy error
k = k % 4
if k == 0:
return m
elif k == 2:
return flip(flip(m, ax1), ax2)
else:
perm = list(range(m.ndim))
perm[ax1], perm[ax2] = perm[ax2], perm[ax1]
if k == 1:
return transpose(flip(m, ax2), perm)
else:
return flip(transpose(m, perm), ax2)
@_wraps(onp.flip)
def flip(m, axis):
return lax.rev(m, [_canonicalize_axis(axis, len(m.shape))])
@_wraps(onp.fliplr)
def fliplr(m):
return flip(m, 1)
@_wraps(onp.flipud)
def flipud(m):
return flip(m, 0)
@_wraps(onp.conjugate)
def conjugate(x):
return lax.conj(x) if iscomplexobj(x) else x
conj = conjugate
@_wraps(onp.imag)
def imag(val):
return lax.imag(val) if iscomplexobj(val) else zeros_like(val)
@_wraps(onp.real)
def real(val):
return lax.real(val) if iscomplexobj(val) else val
@_wraps(onp.iscomplex)
def iscomplex(x):
i = imag(x)
return lax.ne(i, lax._const(i, 0))
@_wraps(onp.isreal)
def isreal(x):
i = imag(x)
return lax.eq(i, lax._const(i, 0))
@_wraps(onp.angle)
def angle(z):
re = real(z)
im = imag(z)
dtype = _dtype(re)
if not issubdtype(dtype, inexact) or (
issubdtype(_dtype(z), floating) and ndim(z) == 0):
dtype = xla_bridge.canonicalize_dtype(float64)
re = lax.convert_element_type(re, dtype)
im = lax.convert_element_type(im, dtype)
return lax.atan2(im, re)
@_wraps(onp.diff)
def diff(a, n=1, axis=-1,):
if not isinstance(a, ndarray) or a.ndim == 0:
return a
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
nd = a.ndim
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
op = not_equal if a.dtype == onp.bool_ else subtract
for _ in range(n):
a = op(a[slice1], a[slice2])
return a
@_wraps(onp.isrealobj)
def isrealobj(x):
return not iscomplexobj(x)
@_wraps(onp.reshape)
def reshape(a, newshape, order="C"):
try:
return a.reshape(newshape, order=order) # forward to method for ndarrays
except AttributeError:
return _reshape(a, newshape, order=order)
def _reshape(a, newshape, order="C"):
dummy_val = onp.broadcast_to(0, shape(a)) # zero strides
computed_newshape = onp.reshape(dummy_val, newshape).shape
if order == "C":
return lax.reshape(a, computed_newshape, None)
elif order == "F":
dims = onp.arange(ndim(a))[::-1]
return lax.reshape(a, computed_newshape[::-1], dims).T
elif order == "A":
raise NotImplementedError("np.reshape order=A is not implemented.")
else:
raise ValueError("Unexpected value for 'order' argument: {}.".format(order))
def _reshape_method(a, *newshape, **kwargs):
order = kwargs.pop("order", "C")
if len(kwargs) == 1:
invalid_kwarg, = kwargs
msg = "'{}' is an invalid keyword argument for this function"
raise TypeError(msg.format(invalid_kwarg)) # same as NumPy error
elif kwargs:
invalid_kwargs = "'{}'".format("'".join(kwargs))
msg = "{} are invalid keyword arguments for this function"
raise TypeError(msg.format(invalid_kwargs)) # different from NumPy error
if len(newshape) == 1 and not isinstance(newshape[0], int):
newshape = newshape[0]
return _reshape(a, newshape, order=order)
@_wraps(onp.ravel)
def ravel(a, order="C"):
if order == "K":
raise NotImplementedError("Ravel not implemented for order='K'.")
return reshape(a, (size(a),), order)
@_wraps(onp.squeeze)
def squeeze(a, axis=None):
if 1 not in shape(a):
return a
if axis is None:
newshape = [d for d in shape(a) if d != 1]
else:
if isinstance(axis, int):
axis = (axis,)
axis = frozenset(_canonicalize_axis(i, ndim(a)) for i in axis)
newshape = [d for i, d in enumerate(shape(a))
if d != 1 or i not in axis]
return lax.reshape(a, newshape)
@_wraps(onp.expand_dims)
def expand_dims(a, axis):
shape = _shape(a)
axis = _canonicalize_axis(axis, ndim(a) + 1)
return lax.reshape(a, shape[:axis] + (1,) + shape[axis:])
@_wraps(onp.swapaxes)
def swapaxes(a, axis1, axis2):
perm = onp.arange(ndim(a))
perm[axis1], perm[axis2] = perm[axis2], perm[axis1]
return lax.transpose(a, perm)
@_wraps(onp.moveaxis)
def moveaxis(a, source, destination):
if isinstance(source, int):
source = (source,)
if isinstance(destination, int):
destination = (destination,)
source = tuple(_canonicalize_axis(i, ndim(a)) for i in source)
destination = tuple(_canonicalize_axis(i, ndim(a)) for i in destination)
if len(source) != len(destination):
raise ValueError("Inconsistent number of elements: {} vs {}"
.format(len(source), len(destination)))
perm = [i for i in range(ndim(a)) if i not in source]
for dest, src in sorted(zip(destination, source)):
perm.insert(dest, src)
return lax.transpose(a, perm)
@_wraps(onp.isclose)
def isclose(a, b, rtol=1e-05, atol=1e-08):
a, b = _promote_args("isclose", asarray(a), asarray(b))
dtype = _dtype(a)
if issubdtype(dtype, inexact):
if issubdtype(dtype, complexfloating):
dtype = _result_dtype(real, a)
rtol = lax.convert_element_type(rtol, dtype)
atol = lax.convert_element_type(atol, dtype)
out = lax.le(
lax.abs(lax.sub(a, b)),
lax.add(atol, lax.mul(rtol, lax.abs(b))))
return _maybe_numpy_1_13_isclose_behavior(a, out)
else:
return lax.eq(a, b)
numpy_version = tuple(map(int, onp.version.version.split('.')[:2]))
if numpy_version < (1, 14):
# see discussion at https://github.com/numpy/numpy/pull/9720
def _maybe_numpy_1_13_isclose_behavior(a, out):
if size(out) == 1 and issubdtype(_dtype(a), complexfloating):
return lax.reshape(out, (1,))
else:
return out
else:
def _maybe_numpy_1_13_isclose_behavior(a, out):
return out
# The `jit` on `where` exists to avoid materializing constants in cases like
# `np.where(np.zeros(1000), 7, 4)`. In op-by-op mode, we don't want to
# materialize the broadcast forms of scalar arguments.
@_wraps(onp.where, update_doc=False)
@jit
def where(condition, x=None, y=None):
if x is None or y is None:
raise ValueError("Must use the three-argument form of where().")
if not issubdtype(_dtype(condition), onp.bool_):
condition = lax.ne(condition, zeros_like(condition))
condition, x, y = broadcast_arrays(condition, x, y)
if not onp.size(x):
empty, _ = _promote_dtypes(x, y)
return empty
else:
return lax.select(condition, *_promote_dtypes(x, y))
@_wraps(onp.select)
def select(condlist, choicelist, default=0):
if len(condlist) != len(choicelist):
msg = "condlist must have length equal to choicelist ({} vs {})"
raise ValueError(msg.format(len(condlist), len(choicelist)))
if len(condlist) == 0:
raise ValueError("condlist must be non-empty")
output = default
for cond, choice in zip(condlist[::-1], choicelist[::-1]):
output = where(cond, choice, output)
return output
def broadcast_arrays(*args):
"""Like Numpy's broadcast_arrays but doesn't return views."""
shapes = [shape(arg) for arg in args]
if len(set(shapes)) == 1:
return [arg if isinstance(arg, ndarray) or isscalar(arg) else array(arg)
for arg in args]
result_shape = lax.broadcast_shapes(*shapes)
return [broadcast_to(arg, result_shape) for arg in args]
def broadcast_to(arr, shape):
"""Like Numpy's broadcast_to but doesn't necessarily return views."""
arr = arr if isinstance(arr, ndarray) or isscalar(arr) else array(arr)
shape = tuple(map(int, shape)) # check that shape is concrete
arr_shape = _shape(arr)
if arr_shape == shape:
return arr
else:
nlead = len(shape) - len(arr_shape)
compatible = onp.equal(arr_shape, shape[nlead:]) | onp.equal(arr_shape, 1)
if nlead < 0 or not onp.all(compatible):
msg = "Incompatible shapes for broadcasting: {} and requested shape {}"
raise ValueError(msg.format(arr_shape, shape))
diff, = onp.where(onp.not_equal(shape[nlead:], arr_shape))
new_dims = tuple(range(nlead)) + tuple(nlead + diff)
kept_dims = tuple(onp.delete(onp.arange(len(shape)), new_dims))
return lax.broadcast_in_dim(squeeze(arr, diff), shape, kept_dims)
@_wraps(onp.split)
def split(ary, indices_or_sections, axis=0):
dummy_val = onp.broadcast_to(0, ary.shape) # zero strides
subarrays = onp.split(dummy_val, indices_or_sections, axis) # shapes
split_indices = onp.cumsum([0] + [onp.shape(sub)[axis] for sub in subarrays])
starts, ends = [0] * ndim(ary), shape(ary)
_subval = lambda x, i, v: lax.subvals(x, [(i, v)])
return [lax.slice(ary, _subval(starts, axis, start), _subval(ends, axis, end))
for start, end in zip(split_indices[:-1], split_indices[1:])]
def _split_on_axis(onp_fun, axis):
@_wraps(onp_fun, update_doc=False)
def f(ary, indices_or_sections):
return split(ary, indices_or_sections, axis=axis)
return f
vsplit = _split_on_axis(onp.vsplit, axis=0)
hsplit = _split_on_axis(onp.hsplit, axis=1)
dsplit = _split_on_axis(onp.dsplit, axis=2)
@_wraps(onp.clip)
def clip(a, a_min=None, a_max=None):
if a_min is None and a_max is None:
raise "At most one of a_min and a_max may be None"
if a_min is not None:
if _dtype(a_min) != _dtype(a):
a_min = lax.convert_element_type(a_min, _dtype(a))
a = lax.max(a_min, a)
if a_max is not None:
if _dtype(a_max) != _dtype(a):
a_max = lax.convert_element_type(a_max, _dtype(a))
a = lax.min(a_max, a)
return a
def _dtype_info(dtype):
"""Helper function for to get dtype info needed for clipping."""
if issubdtype(dtype, onp.integer):
return onp.iinfo(dtype)
return finfo(dtype)
def _round_to_nearest_even(x):
half = lax._const(x, 0.5)
one = lax._const(x, 1)
round_val = lax.floor(x)
fraction = x - round_val
nearest_even_int = lax.sub(
round_val, lax.mul(lax._const(x, 2), lax.floor(lax.mul(half, x))))
is_odd = lax.eq(nearest_even_int, one)
return lax.select(
lax.bitwise_or(lax.gt(fraction, half),
lax.bitwise_and(lax.eq(fraction, half), is_odd)),
lax.add(round_val, one), round_val)
@_wraps(onp.round, update_doc=False)
def round(a, decimals=0):
dtype = _dtype(a)
if issubdtype(dtype, integer):
if decimals < 0:
raise NotImplementedError(
"integer np.round not implemented for decimals < 0")
return a # no-op on integer types
def _round_float(x):
if decimals == 0:
return _round_to_nearest_even(x)
# TODO(phawkins): the strategy of rescaling the value isn't necessarily a
# good one since we may be left with an incorrectly rounded value at the
# end due to precision problems. As a workaround for float16, convert to
# float32,
x = lax.convert_element_type(x, onp.float32) if dtype == onp.float16 else x
factor = _constant_like(x, 10 ** decimals)
out = lax.div(_round_to_nearest_even(lax.mul(x, factor)), factor)
return lax.convert_element_type(out, dtype) if dtype == onp.float16 else out
if issubdtype(dtype, complexfloating):
return lax.complex(_round_float(lax.real(a)), _round_float(lax.imag(a)))
else:
return _round_float(a)
around = round
@_wraps(onp.fix)
def fix(x, out=None):
if out is not None:
raise ValueError("fix does not support the `out` argument.")
zero = lax._const(x, 0)
return where(lax.ge(x, zero), lax.floor(x), lax.ceil(x))
@_wraps(onp.isfinite)
def isfinite(x):
dtype = _dtype(x)
if issubdtype(dtype, floating):
return lax.is_finite(x)
elif issubdtype(dtype, complexfloating):
return lax.bitwise_and(lax.is_finite(real(x)), lax.is_finite(imag(x)))
else:
return full_like(x, True, dtype=bool_)
@_wraps(onp.isinf)
def isinf(x):
dtype = _dtype(x)
if issubdtype(dtype, floating):
return lax.eq(lax.abs(x), _constant_like(x, inf))
elif issubdtype(dtype, complexfloating):
re = lax.real(x)
im = lax.imag(x)
return lax.bitwise_or(lax.eq(lax.abs(re), _constant_like(re, inf)),
lax.eq(lax.abs(im), _constant_like(im, inf)))
else:
return full_like(x, False, dtype=bool_)
def _isposneginf(infinity, x):
dtype = _dtype(x)
if issubdtype(dtype, floating):
return lax.eq(x, _constant_like(x, infinity))
elif issubdtype(dtype, complexfloating):
raise ValueError("isposinf/isneginf are not well defined for complex types")
else:
return full_like(x, False, dtype=bool_)
isposinf = _wraps(onp.isposinf)(partial(_isposneginf, inf))
isneginf = _wraps(onp.isneginf)(partial(_isposneginf, -inf))
@_wraps(onp.isnan)
def isnan(x):
return lax.bitwise_and(lax.bitwise_not(isfinite(x)),
lax.bitwise_not(isinf(x)))
@_wraps(onp.nan_to_num)
def nan_to_num(x, copy=True):
del copy
dtype = _dtype(x)
if issubdtype(dtype, complexfloating):
return lax.complex(nan_to_num(lax.real(x)), nan_to_num(lax.imag(x)))
info = finfo(xla_bridge.canonicalize_dtype(dtype))
x = where(isnan(x), _constant_like(x, 0), x)
x = where(isposinf(x), _constant_like(x, info.max), x)
x = where(isneginf(x), _constant_like(x, info.min), x)
return x
### Reducers
def _make_reduction(np_fun, op, init_val, preproc=None):
"""Creates reduction function given a binary operation and monoid identity."""
@_wraps(np_fun)
def reduction(a, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise ValueError("reduction does not support the `out` argument.")
a = a if isinstance(a, ndarray) else asarray(a)
a = preproc(a) if preproc else a
dims = _reduction_dims(a, axis)
result_dtype = _dtype(np_fun(onp.ones((), dtype=dtype or _dtype(a))))
if _dtype(a) != result_dtype:
a = lax.convert_element_type(a, result_dtype)
result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims)
if keepdims:
shape_with_singletons = lax.subvals(shape(a), zip(dims, (1,) * len(dims)))
result = lax.reshape(result, shape_with_singletons)
if dtype and onp.dtype(dtype) != onp.dtype(result_dtype):
result = lax.convert_element_type(result, dtype)
return result
return reduction
def _reduction_dims(a, axis):
if axis is None:
return onp.arange(ndim(a))
elif isinstance(axis, (onp.ndarray, tuple, list)):
return tuple(_canonicalize_axis(x, ndim(a)) for x in axis)
elif isinstance(axis, int):
return (_canonicalize_axis(axis, ndim(a)),)
else:
raise TypeError("Unexpected type of axis argument: {}".format(type(axis)))
def _reduction_init_val(a, init_val):
a_dtype = xla_bridge.canonicalize_dtype(_dtype(a))
if a_dtype == 'bool':
return onp.array(init_val > 0, dtype=a_dtype)
try:
return onp.array(init_val, dtype=a_dtype)
except OverflowError:
assert issubdtype(a_dtype, onp.integer)
sign, iinfo = onp.sign(init_val), onp.iinfo(a_dtype)
return onp.array(iinfo.min if sign < 0 else iinfo.max, dtype=a_dtype)
_cast_to_bool = partial(lax.convert_element_type, new_dtype=onp.bool_)
sum = _make_reduction(onp.sum, lax.add, 0)
product = prod = _make_reduction(onp.prod, lax.mul, 1)
amax = max = _make_reduction(onp.max, lax.max, -onp.inf)
amin = min = _make_reduction(onp.min, lax.min, onp.inf)
all = alltrue = _make_reduction(onp.all, lax.bitwise_and, True, _cast_to_bool)
any = sometrue = _make_reduction(onp.any, lax.bitwise_or, False, _cast_to_bool)
@_wraps(onp.mean)
def mean(a, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise ValueError("mean does not support the `out` argument.")
if axis is None:
normalizer = size(a)
else:
normalizer = onp.prod(onp.take(shape(a), axis))
if dtype is None:
if (issubdtype(_dtype(a), onp.bool_) or
issubdtype(_dtype(a), onp.integer)):
dtype = xla_bridge.canonicalize_dtype(onp.float64)
else:
dtype = _dtype(a)
return lax.div(
sum(a, axis, dtype=dtype, keepdims=keepdims),
lax.convert_element_type(normalizer, dtype))
@_wraps(onp.average)
def average(a, axis=None, weights=None, returned=False):
a = asarray(a)
if weights is None: # Treat all weights as 1
avg = mean(a, axis=axis)
if axis is None:
weights_sum = full((), size(a), dtype=avg.dtype)
else:
weights_sum = full_like(avg, a.shape[axis], dtype=avg.dtype)
else:
weights = asarray(weights)
if issubdtype(a.dtype, integer) or issubdtype(a.dtype, bool_):
out_dtype = xla_bridge.canonicalize_dtype(result_type(a.dtype,
weights.dtype,
floating))
else:
out_dtype = xla_bridge.canonicalize_dtype(result_type(a.dtype, weights.dtype))
a_shape = shape(a)
a_ndim = len(a_shape)
weights_shape = shape(weights)
axis = None if axis is None else _canonicalize_axis(axis, a_ndim)
if a_shape != weights_shape:
# Make sure the dimensions work out
if axis is None:
raise ValueError("Axis must be specified when shapes of a and "
"weights differ.")
if len(weights_shape) != 1:
raise ValueError("1D weights expected when shapes of a and "
"weights differ.")
if weights_shape[0] != a_shape[axis]:
raise ValueError("Length of weights not "
"compatible with specified axis.")
weights = broadcast_to(weights, (a_ndim - 1) * (1,) + weights_shape)
weights = moveaxis(weights, -1, axis)
weights_sum = sum(weights, axis=axis, dtype=out_dtype)
avg = sum(multiply(a, weights), axis=axis, dtype=out_dtype) / weights_sum
if returned:
if avg.shape != weights_sum.shape:
weights_sum = broadcast_to(weights_sum, avg.shape)
return avg, weights_sum
return avg
@_wraps(onp.var)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
if out is not None:
raise ValueError("var does not support the `out` argument.")
if dtype is None:
if (issubdtype(_dtype(a), onp.bool_) or
issubdtype(_dtype(a), onp.integer)):
dtype = xla_bridge.canonicalize_dtype(onp.float64)
centered = subtract(a, mean(a, axis, dtype=dtype, keepdims=True))
if iscomplexobj(centered):
centered = lax.abs(centered)
if axis is None:
normalizer = size(a)
else:
normalizer = onp.prod(onp.take(shape(a), axis))
normalizer = normalizer - ddof
result = sum(lax.mul(centered, centered), axis,
dtype=dtype, keepdims=keepdims)
return lax.div(result, lax.convert_element_type(normalizer, _dtype(result)))
@_wraps(onp.std)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
if out is not None:
raise ValueError("std does not support the `out` argument.")
return sqrt(var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims))
@_wraps(onp.ptp)
def ptp(a, axis=None, out=None, keepdims=False):
if out is not None:
raise ValueError("ptp does not support the `out` argument.")
x = amax(a, axis=axis, keepdims=keepdims)
y = amin(a, axis=axis, keepdims=keepdims)
return lax.sub(x, y)
@_wraps(onp.allclose)
def allclose(a, b, rtol=1e-05, atol=1e-08):
return all(isclose(a, b, rtol, atol))
@_wraps(onp.count_nonzero)
def count_nonzero(a, axis=None):
return sum(lax.ne(a, _constant_like(a, 0)), axis=axis,
dtype=xla_bridge.canonicalize_dtype(onp.int_))
def _make_nan_reduction(onp_reduction, np_reduction, init_val, nan_if_all_nan):
@_wraps(onp_reduction)
def nan_reduction(a, axis=None, out=None, keepdims=False, **kwargs):
out = np_reduction(where(isnan(a), _reduction_init_val(a, init_val), a),
axis=axis, out=out, keepdims=keepdims, **kwargs)
if nan_if_all_nan:
return where(all(isnan(a), axis=axis, keepdims=keepdims),
_constant_like(a, nan), out)
else:
return out
return nan_reduction
nanmin = _make_nan_reduction(onp.nanmin, min, inf, nan_if_all_nan=True)
nanmax = _make_nan_reduction(onp.nanmax, max, -inf, nan_if_all_nan=True)
nansum = _make_nan_reduction(onp.nansum, sum, 0, nan_if_all_nan=False)
nanprod = _make_nan_reduction(onp.nanprod, prod, 1, nan_if_all_nan=False)
@_wraps(onp.nanmean)
def nanmean(a, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise ValueError("nanmean does not support the `out` argument.")
if (issubdtype(_dtype(a), onp.bool_) or
issubdtype(_dtype(a), onp.integer)):
return mean(a, axis, dtype, out, keepdims)
if dtype is None:
dtype = _dtype(a)
nan_mask = logical_not(isnan(a))
normalizer = sum(nan_mask, axis=axis, dtype=int32, keepdims=keepdims)
normalizer = lax.convert_element_type(normalizer, dtype)
td = lax.div(nansum(a, axis, dtype=dtype, keepdims=keepdims), normalizer)
return td
def _make_cumulative_reduction(onp_reduction, window_reduce, init_val,
squash_nan=False):
# We want to allow XLA to fuse the pad and reduce-window operators to
# avoid materializing the padded output.
# Consider removing `jit` once again if reduce-window is generalized to
# support arbitrary padding.
@partial(jit, static_argnums=(1, 2))
def _cumulative_reduction(a, axis, dtype):
if axis is None or isscalar(a):
a = ravel(a)
axis = 0
a_shape = list(shape(a))
num_dims = len(a_shape)
if axis < 0:
axis = axis + num_dims
if axis < 0 or axis >= num_dims:
raise ValueError(
"axis {} is out of bounds for array of dimension {}".format(
axis, num_dims))
if squash_nan:
a = where(isnan(a), _constant_like(a, init_val), a)
if dtype:
a = lax.convert_element_type(a, dtype)
if a_shape[axis] == 0:
return a
padding = [(0, 0, 0)] * num_dims
padding[axis] = (a_shape[axis] - 1, 0, 0)
a = lax.pad(a, _constant_like(a, init_val), padding)
strides = [1] * num_dims
window_dims = [1] * num_dims
window_dims[axis] = a_shape[axis]
return window_reduce(
a, window_dims, strides, xla_client.PaddingType.VALID)
@_wraps(onp_reduction)
def cumulative_reduction(a, axis=None, dtype=None):
# jit doesn't support kwargs as static_args.
return _cumulative_reduction(a, axis, dtype)
return cumulative_reduction
cumsum = _make_cumulative_reduction(
onp.cumsum, lax._reduce_window_sum, 0, squash_nan=False)
cumprod = _make_cumulative_reduction(
onp.cumprod, lax._reduce_window_prod, 1, squash_nan=False)
cumproduct = cumprod
nancumsum = _make_cumulative_reduction(
onp.nancumsum, lax._reduce_window_sum, 0, squash_nan=True)
nancumprod = _make_cumulative_reduction(
onp.nancumprod, lax._reduce_window_prod, 1, squash_nan=True)
### Array-creation functions
@partial(jit, static_argnums=(1, 2))
def _pad(array, pad_width, mode, constant_values):
array = asarray(array)
nd = ndim(array)
pad_width = onp.broadcast_to(onp.asarray(pad_width), (nd, 2))
if any(pad_width < 0):
raise ValueError("index can't contain negative values")
if mode == "constant":
constant_values = broadcast_to(asarray(constant_values), (nd, 2))
constant_values = lax.convert_element_type(constant_values, array.dtype)
for i in xrange(nd):
widths = [(0, 0, 0)] * nd
widths[i] = (pad_width[i, 0], 0, 0)
array = lax.pad(array, constant_values[i, 0], widths)
widths[i] = (0, pad_width[i, 1], 0)
array = lax.pad(array, constant_values[i, 1], widths)
return array
elif mode in ("symmetric", "reflect", "wrap"):
for i in xrange(nd):
if array.shape[i] == 0:
if (pad_width[i, 0] > 0 or pad_width[i, 1] > 0):
msg = "Cannot apply '{}' padding to empty axis"
raise ValueError(msg.format(mode))
continue
n = array.shape[i]
rarray = lax.rev(array, dimensions=(i,))
offset = 1 if (mode == "reflect" and n > 1) else 0
wrap_mode = mode == "wrap"
def build_padding(padding, forward):
xs = []
delta = n - offset
while padding > delta:
padding -= delta
p = array if forward else rarray
xs.append(lax.slice_in_dim(p, offset, n, axis=i))
if not wrap_mode:
forward = not forward
if padding > 0:
x = lax.slice_in_dim(array if forward else rarray, offset,
padding + offset, axis=i)
xs.append(x)
return xs
parts = reversed(build_padding(pad_width[i, 0], forward=not wrap_mode))
parts = [lax.rev(x, dimensions=(i,)) for x in parts]
parts += [array]
parts += build_padding(pad_width[i, 1], forward=wrap_mode)
array = lax.concatenate(parts, dimension=i)
return array
else:
msg = "Unimplemented padding mode '{}' for np.pad."
raise NotImplementedError(msg.format(mode))
@_wraps(onp.pad)
def pad(array, pad_width, mode='constant', constant_values=0):
return _pad(array, pad_width, mode, constant_values)
@_wraps(onp.stack)
def stack(arrays, axis=0):
if not len(arrays):
raise ValueError("Need at least one array to stack.")
shape0 = shape(arrays[0])
axis = _canonicalize_axis(axis, len(shape0) + 1)
new_shape = list(shape0)
new_shape.insert(axis, 1)
new_arrays = []
for a in arrays:
if shape(a) != shape0:
raise ValueError("All input arrays must have the same shape.")
new_arrays.append(reshape(a, new_shape))
return concatenate(new_arrays, axis=axis)
@_wraps(onp.tile)
def tile(a, reps):
if isinstance(reps, int):
reps = (reps,)
a = reshape(a, (1,) * (len(reps) - ndim(a)) + shape(a))
reps = (1,) * (ndim(a) - len(reps)) + tuple(reps)
for i, rep in enumerate(reps):
a = concatenate([a] * int(rep), axis=i)
return a
@_wraps(onp.concatenate)
def concatenate(arrays, axis=0):
if not len(arrays):
raise ValueError("Need at least one array to concatenate.")
if ndim(arrays[0]) == 0:
raise ValueError("Zero-dimensional arrays cannot be concatenated.")
axis = _canonicalize_axis(axis, ndim(arrays[0]))
arrays = _promote_dtypes(*arrays)
# lax.concatenate can be slow to compile for wide concatenations, so form a
# tree of concatenations as a workaround especially for op-by-op mode.
# (https://github.com/google/jax/issues/653).
k = 16
while len(arrays) > 1:
arrays = [lax.concatenate(arrays[i:i+k], axis)
for i in range(0, len(arrays), k)]
return arrays[0]
@_wraps(onp.vstack)
def vstack(tup):
return concatenate([atleast_2d(m) for m in tup], axis=0)
row_stack = vstack
@_wraps(onp.hstack)
def hstack(tup):
arrs = [atleast_1d(m) for m in tup]
if arrs[0].ndim == 1:
return concatenate(arrs, 0)
return concatenate(arrs, 1)
@_wraps(onp.dstack)
def dstack(tup):
return concatenate([atleast_3d(m) for m in tup], axis=2)
@_wraps(onp.column_stack)
def column_stack(tup):
arrays = []
for v in tup:
arr = array(v)
if arr.ndim < 2:
arr = arr.reshape((-1, 1))
arrays.append(arr)
return concatenate(arrays, 1)
@_wraps(onp.atleast_1d, update_doc=False)
def atleast_1d(*arys):
if len(arys) == 1:
arr = array(arys[0])
return arr if ndim(arr) >= 1 else reshape(arr, -1)
else:
return [atleast_1d(arr) for arr in arys]
@_wraps(onp.atleast_2d, update_doc=False)
def atleast_2d(*arys):
if len(arys) == 1:
arr = array(arys[0])
return arr if ndim(arr) >= 2 else reshape(arr, (1, -1))
else:
return [atleast_2d(arr) for arr in arys]
@_wraps(onp.atleast_3d, update_doc=False)
def atleast_3d(*arys):
if len(arys) == 1:
arr = array(arys[0])
if ndim(arr) <= 1:
arr = reshape(arr, (1, -1, 1))
elif ndim(arr) == 2:
arr = reshape(arr, shape(arr) + (1,))
return arr
else:
return [atleast_3d(arr) for arr in arys]
@_wraps(onp.array)
def array(object, dtype=None, copy=True, order="K", ndmin=0):
if order is not None and order != "K":
raise NotImplementedError("Only implemented for order='K'")
lax._check_user_dtype_supported(dtype, "array")
if isinstance(object, ndarray):
if dtype and _dtype(object) != xla_bridge.canonicalize_dtype(dtype):
out = lax.convert_element_type(object, dtype)
else:
out = device_put(object)
elif hasattr(object, '__array__'):
# this case is for duck-typed handling of objects that implement `__array__`
out = array(object.__array__(), dtype and xla_bridge.canonicalize_dtype(dtype))
elif isinstance(object, (list, tuple)):
if object:
out = stack([array(elt, dtype=dtype) for elt in object])
else:
out = onp.array([], dtype)
elif isscalar(object):
out = lax.reshape(object, ())
if dtype and _dtype(out) != xla_bridge.canonicalize_dtype(dtype):
out = lax.convert_element_type(out, dtype)
else:
try:
view = memoryview(object)
except TypeError:
pass # `object` does not support the buffer interface.
else:
return array(onp.asarray(view), dtype, copy)
raise TypeError("Unexpected input type for array: {}".format(type(object)))
if ndmin > ndim(out):
out = lax.reshape(out, (1,) * (ndmin - ndim(out)) + shape(out))
return out
@_wraps(onp.asarray)
def asarray(a, dtype=None, order=None):
lax._check_user_dtype_supported(dtype, "asarray")
return array(a, dtype=dtype, copy=False, order=order)
@_wraps(onp.zeros_like)
def zeros_like(x, dtype=None):
lax._check_user_dtype_supported(dtype, "zeros_like")
return lax.full_like(x, 0, dtype)
@_wraps(onp.ones_like)
def ones_like(x, dtype=None):
lax._check_user_dtype_supported(dtype, "ones_like")
return lax.full_like(x, 1, dtype)
@_wraps(onp.full)
def full(shape, fill_value, dtype=None):
lax._check_user_dtype_supported(dtype, "full")
return lax.full(shape, fill_value, dtype)
@_wraps(onp.full_like)
def full_like(a, fill_value, dtype=None):
lax._check_user_dtype_supported(dtype, "full_like")
return lax.full_like(a, fill_value, dtype)
@_wraps(onp.zeros)
def zeros(shape, dtype=None):
if isinstance(shape, types.GeneratorType):
raise TypeError("expected sequence object with len >= 0 or a single integer")
lax._check_user_dtype_supported(dtype, "zeros")
dtype = onp.dtype("float64") if dtype is None else dtype
shape = (shape,) if onp.isscalar(shape) else shape
return lax.full(shape, 0, dtype)
@_wraps(onp.ones)
def ones(shape, dtype=None):
if isinstance(shape, types.GeneratorType):
raise TypeError("expected sequence object with len >= 0 or a single integer")
lax._check_user_dtype_supported(dtype, "ones")
dtype = onp.dtype("float64") if dtype is None else dtype
shape = (shape,) if onp.isscalar(shape) else shape
return lax.full(shape, 1, dtype)
@_wraps(onp.array_equal)
def array_equal(a1, a2):
try:
a1, a2 = asarray(a1), asarray(a2)
except Exception:
return False
return shape(a1) == shape(a2) and all(asarray(a1 == a2))
# We can't create uninitialized arrays in XLA; use zeros for empty.
empty_like = zeros_like
empty = zeros
@_wraps(onp.eye)
def eye(N, M=None, k=None, dtype=None):
lax._check_user_dtype_supported(dtype, "eye")
dtype = onp.dtype("float64") if dtype is None else dtype
M = N if M is None else M
if N < 0 or M < 0:
msg = "negative dimensions are not allowed, got {} and {}"
raise ValueError(msg.format(N, M))
if k is None:
return lax.broadcasted_eye(dtype, (N, M), (0, 1))
else:
k_dtype = _dtype(k)
if not issubdtype(k_dtype, onp.integer):
msg = "eye argument `k` must be of integer dtype, got {}"
raise TypeError(msg.format(k_dtype))
rows = k + lax.broadcasted_iota(k_dtype, (N, M), 0)
cols = lax.broadcasted_iota(k_dtype, (N, M), 1)
return lax.convert_element_type(lax.eq(rows, cols), dtype)
@_wraps(onp.identity)
def identity(n, dtype=None):
lax._check_user_dtype_supported(dtype, "identity")
return eye(n, dtype=dtype)
@_wraps(onp.arange)
def arange(start, stop=None, step=None, dtype=None):
lax._check_user_dtype_supported(dtype, "arange")
# If called like np.arange(N), we create a lazy lax._IotaConstant.
if stop is None and step is None:
dtype = dtype or _dtype(start)
if issubdtype(dtype, onp.integer):
return lax.iota(dtype, start) # avoids materializing
# Fall back to instantiating an ndarray in host memory
return onp.arange(start, stop=stop, step=step, dtype=dtype)
def _wrap_numpy_nullary_function(f):
"""Adapts `f` to return a DeviceArray instead of an onp.ndarray.
`f` cannot have any non-static array arguments.
"""
@_wraps(f, update_doc=False)
def wrapper(*args, **kwargs):
return asarray(f(*args, **kwargs))
return wrapper
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
axis=0):
lax._check_user_dtype_supported(dtype, "linspace")
try:
out = onp.linspace(start, stop, num, endpoint, retstep, dtype, axis)
if retstep:
return asarray(out[0]), out[1]
else:
return asarray(out)
except TypeError: # Old versions of onp may lack axis arg.
out = onp.linspace(start, stop, num, endpoint, retstep, dtype)
if retstep:
return moveaxis(asarray(out[0]), 0, axis), out[1]
else:
return moveaxis(asarray(out), 0, axis)
logspace = _wrap_numpy_nullary_function(onp.logspace)
geomspace = _wrap_numpy_nullary_function(onp.geomspace)
@_wraps(onp.meshgrid)
def meshgrid(*args, **kwargs):
indexing = kwargs.get("indexing", "xy")
sparse = kwargs.get("sparse", False)
copy = kwargs.get("copy", True)
if not copy:
raise ValueError("jax.numpy.meshgrid only supports copy=True")
args = list(args)
if indexing == "xy":
if len(args) >= 2:
args[0], args[1] = args[1], args[0]
elif indexing != "ij":
raise ValueError("Valid values for indexing are 'xy' and 'ij', got {}"
.format(indexing))
shape = []
for i, a in enumerate(args):
args[i] = a = asarray(a)
if len(a.shape) != 1:
msg = "Arguments to jax.numpy.meshgrid must be 1D, got shape {}"
raise ValueError(msg.format(a.shape))
shape.append(1 if sparse else a.shape[0])
output = []
for i, a in enumerate(args):
a = asarray(a)
s = shape
if sparse:
s = list(s)
s[i] = a.shape[0]
output.append(lax.broadcast_in_dim(a, s, (i,)))
if indexing == "xy" and len(args) >= 2:
output[0], output[1] = output[1], output[0]
return output
@_wraps(onp.ix_)
def ix_(*args):
n = len(args)
output = []
for i, a in enumerate(args):
a = asarray(a)
if len(a.shape) != 1:
msg = "Arguments to jax.numpy.ix_ must be 1-dimensional, got shape {}"
raise ValueError(msg.format(a.shape))
if _dtype(a) == bool_:
raise NotImplementedError(
"Boolean arguments to jax.numpy.ix_ are not implemented")
shape = [1] * n
shape[i] = a.shape[0]
if a.size == 0:
# Numpy uses an integer index type for empty arrays.
output.append(lax.full(shape, onp.zeros((), onp.intp)))
else:
output.append(lax.reshape(a, shape))
return tuple(output)
def _repeat_scalar(a, repeats, axis=None):
if not isscalar(repeats):
raise NotImplementedError(
"_repeat_scalar implementation only supports scalar repeats")
if axis is None or isscalar(a):
a = ravel(a)
axis = 0
a_shape = list(shape(a))
num_dims = len(a_shape)
if axis < 0:
axis = axis + num_dims
if axis < 0 or axis >= num_dims:
raise ValueError(
"axis {} is out of bounds for array of dimension {}".format(
axis, num_dims))
# Broadcasts to [..., X, repeats, ...] and reshapes to [..., X * repeats, ...]
broadcast_shape = list(a_shape)
broadcast_shape.insert(axis + 1, repeats)
broadcast_dims = onp.concatenate((onp.arange(0, axis + 1),
onp.arange(axis + 2, num_dims + 1)))
a_shape[axis] *= repeats
return lax.reshape(
lax.broadcast_in_dim(a, broadcast_shape, broadcast_dims),
a_shape)
@_wraps(onp.repeat)
def repeat(a, repeats, axis=None):
'''
:param repeats: int or array of ints
'''
# use `_repeat_scalar` when possible
if isscalar(repeats):
return _repeat_scalar(a, repeats, axis)
repeats_raveled = ravel(array(repeats)) # make sure it's jax's array type
if size(repeats_raveled) == 1:
return _repeat_scalar(a, list(repeats_raveled)[0], axis)
if axis is None or isscalar(a):
a = ravel(a)
axis = 0
# repeats must match the dimension along the requested axis
a_shape = list(a.shape)
n = a_shape[axis]
if size(repeats_raveled) != n:
raise ValueError("repeats shape {} does not match the dimension on axis {}".format(
repeats_raveled.shape, n
))
# calculating the new shape
total = sum(repeats_raveled)
new_shape = a_shape[:]
new_shape[axis] = total
a_flattened = ravel(a)
'''
main algorithm:
first break down raveled input array into list of chunks; each chunk is the unit of repeat
then tile the repeats to have same length as the list of chunks
finally repeat each unit x number of times according to the tiled repeat list
'''
chunks = product(a_shape[:axis+1]).item()
a_splitted = split(a_flattened, chunks)
repeats_tiled = tile(repeats_raveled, chunks // len(repeats_raveled))
ret = array([], dtype=a.dtype)
for i, repeat in enumerate(repeats_tiled):
if not isinstance(repeat, int):
repeat = repeat.item()
ret = concatenate((ret, tile(a_splitted[i], repeat)))
return reshape(ret, new_shape)
@_wraps(onp.tri)
def tri(N, M=None, k=0, dtype=None):
lax._check_user_dtype_supported(dtype, "tri")
M = M if M is not None else N
dtype = dtype or float32
x = arange(N, dtype=int32)
y = arange(M, dtype=int32)
mask = lax.ge(
(lax.broadcast_in_dim(x, shape=(N, M), broadcast_dimensions=(0,)) +
int32(k)),
lax.broadcast(y, [N]))
return lax.convert_element_type(mask, dtype)
@_wraps(onp.tril)
def tril(m, k=0):
m_shape = shape(m)
if len(m_shape) < 2:
raise ValueError("Argument to jax.numpy.tril must be at least 2D")
mask = tri(*m_shape[-2:], k=k, dtype=bool)
return lax.select(lax.broadcast(mask, m_shape[:-2]), m, zeros_like(m))
@_wraps(onp.triu, update_doc=False)
def triu(m, k=0):
m_shape = shape(m)
if len(m_shape) < 2:
raise ValueError("Argument to jax.numpy.triu must be at least 2D")
mask = tri(*m_shape[-2:], k=k - 1, dtype=bool)
return lax.select(lax.broadcast(mask, m_shape[:-2]), zeros_like(m), m)
@_wraps(onp.trace)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
if out:
raise NotImplementedError("The 'out' argument to trace is not supported.")
lax._check_user_dtype_supported(dtype, "trace")
axis1 = _canonicalize_axis(axis1, ndim(a))
axis2 = _canonicalize_axis(axis2, ndim(a))
a_shape = shape(a)
if dtype is None:
dtype = _dtype(a)
if issubdtype(dtype, integer):
default_int = xla_bridge.canonicalize_dtype(onp.int_)
if iinfo(dtype).bits < iinfo(default_int).bits:
dtype = default_int
# Move the axis? dimensions to the end.
perm = [i for i in range(len(a_shape)) if i != axis1 and i != axis2]
perm = perm + [axis1, axis2]
a = lax.transpose(a, perm)
# Mask out the diagonal and reduce.
a = where(eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool),
a, zeros_like(a))
return sum(a, axis=(-2, -1), dtype=dtype)
def _wrap_indices_function(f):
@_wraps(f, update_doc=False)
def wrapper(*args, **kwargs):
return tuple(asarray(x) for x in f(*args, **kwargs))
return wrapper
diag_indices = _wrap_indices_function(onp.diag_indices)
tril_indices = _wrap_indices_function(onp.tril_indices)
triu_indices = _wrap_indices_function(onp.triu_indices)
mask_indices = _wrap_indices_function(onp.mask_indices)
@_wraps(onp.diagonal)
def diagonal(a, offset=0, axis1=0, axis2=1):
a_shape = shape(a)
a_ndims = len(a_shape)
# Move the two dimensions to the end.
axis1 = _canonicalize_axis(axis1, a_ndims)
axis2 = _canonicalize_axis(axis2, a_ndims)
perm = [i for i in range(a_ndims) if i != axis1 and i != axis2]
perm = perm + [axis1, axis2]
a = lax.transpose(a, perm)
# Mask out the diagonal and reduce over one of the axes
a = where(eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool),
a, zeros_like(a))
reduce_axis = -2 if offset < 0 else -1
d = sum(a, axis=reduce_axis, dtype=_dtype(a))
# Slice out the correct diagonal size.
diag_size = _max(0, _min(a_shape[axis1] + _min(offset, 0),
a_shape[axis2] - _max(offset, 0)))
return lax.slice_in_dim(d, 0, diag_size, axis=-1)
@_wraps(onp.diag)
def diag(v, k=0):
v_shape = shape(v)
if len(v_shape) == 1:
zero = lambda x: lax.full_like(x, shape=(), fill_value=0)
n = v_shape[0] + _abs(k)
v = lax.pad(v, zero(v), ((_max(0, k), _max(0, -k), 0),))
return where(eye(n, k=k, dtype=bool), v, zeros_like(v))
elif len(v_shape) == 2:
return diagonal(v, offset=k)
else:
raise ValueError("diag input must be 1d or 2d")
@_wraps(onp.polyval)
def polyval(p, x):
if isinstance(p, onp.poly1d):
p = onp.asarray(p)
if isinstance(x, onp.poly1d):
y = 0
else:
y = zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
@_wraps(onp.append)
def append(arr, values, axis=None):
if axis is None:
return concatenate([ravel(arr), ravel(values)], 0)
else:
return concatenate([arr, values], axis=axis)
### Tensor contraction operations
@_wraps(onp.dot)
def dot(a, b): # pylint: disable=missing-docstring
_check_arraylike("dot", a, b)
a, b = _promote_dtypes(a, b)
a_ndim, b_ndim = ndim(a), ndim(b)
if a_ndim == 0 or b_ndim == 0:
return lax.mul(a, b)
if _max(a_ndim, b_ndim) <= 2:
return lax.dot(a, b)
if b_ndim == 1:
contract_dims = ((a_ndim - 1,), (0,))
else:
contract_dims = ((a_ndim - 1,), (b_ndim - 2,))
batch_dims = ((), ())
return lax.dot_general(a, b, (contract_dims, batch_dims))
@_wraps(onp.matmul)
def matmul(a, b): # pylint: disable=missing-docstring
_check_arraylike("matmul", a, b)
a_is_vec, b_is_vec = (ndim(a) == 1), (ndim(b) == 1)
a = lax.reshape(a, (1,) + shape(a)) if a_is_vec else a
b = lax.reshape(b, shape(b) + (1,)) if b_is_vec else b
a, b = _promote_dtypes(a, b)
batch_shape = lax.broadcast_shapes(shape(a)[:-2], shape(b)[:-2])
a = broadcast_to(a, batch_shape + shape(a)[-2:])
b = broadcast_to(b, batch_shape + shape(b)[-2:])
batch_dims = tuple(range(len(batch_shape)))
result = lax.dot_general(a, b, (((ndim(a) - 1,), (ndim(b) - 2,)),
(batch_dims, batch_dims)))
if a_is_vec or b_is_vec:
m, n = shape(result)[-2:]
new_m = () if a_is_vec else (m,)
new_n = () if b_is_vec else (n,)
return lax.reshape(result, batch_shape + new_m + new_n)
else:
return result
@_wraps(onp.vdot)
def vdot(a, b):
if issubdtype(_dtype(a), onp.complexfloating):
a = conj(a)
return dot(a.ravel(), b.ravel())
@_wraps(onp.tensordot)
def tensordot(a, b, axes=2):
_check_arraylike("tensordot", a, b)
if not (ndim(a) >= 1 and ndim(b) >= 1):
msg = "tensordot requires a.ndim and b.dim to be at least 1, got {} and {}."
raise TypeError(msg.format(ndim(a), ndim(b)))
if type(axes) is int:
if axes == 0:
a, b = _promote_dtypes(a, b)
return lax.mul(lax.reshape(a, shape(a) + (1,) * ndim(b)),
lax.reshape(b, (1,) * ndim(a) + shape(b)))
else:
a, b = _promote_dtypes(a, b)
a_reshape = lax.reshape(a, (_prod(a.shape[:-axes]), _prod(a.shape[-axes:])))
b_reshape = lax.reshape(b, (_prod(b.shape[:axes]), _prod(b.shape[axes:])))
out_reshape = lax.dot(a_reshape, b_reshape)
return lax.reshape(out_reshape, a.shape[:-axes] + b.shape[axes:])
elif type(axes) in (list, tuple) and len(axes) == 2:
ax1, ax2 = axes
if type(ax1) == type(ax2) == int:
a_transposed = moveaxis(a, ax1, -1) if ax1 != a.ndim - 1 else a
b_transposed = moveaxis(b, ax2, 0) if ax2 != 0 else b
return tensordot(a_transposed, b_transposed, 1)
elif type(ax1) in (list, tuple) and type(ax2) in (list, tuple):
if len(ax1) != len(ax2):
msg = "tensordot requires axes lists to have equal length, got {} and {}."
raise TypeError(msg.format(ax1, ax2))
num_axes = len(ax1)
a_transposed = moveaxis(a, ax1, tuple(range(a.ndim - num_axes, a.ndim)))
b_transposed = moveaxis(b, ax2, tuple(range(num_axes)))
return tensordot(a_transposed, b_transposed, num_axes)
msg = ("tensordot axes argument must be an int, a pair of ints, or a pair of "
"lists/tuples of ints.")
raise TypeError(msg)
@_wraps(onp.einsum)
def einsum(*operands, **kwargs):
optimize = kwargs.pop('optimize', 'auto')
optimize = 'greedy' if optimize is True else optimize
if kwargs:
msg = 'invalid keyword arguments for einsum: {}'
raise TypeError(msg.format(', '.join(kwargs)))
# using einsum_call=True here is an internal api for opt_einsum
operands, contractions = opt_einsum.contract_path(
*operands, einsum_call=True, use_blas=True, optimize=optimize)
contractions = tuple(data[:3] for data in contractions)
return _einsum(operands, contractions)
@_wraps(onp.einsum_path)
def einsum_path(subscripts, *operands, **kwargs):
optimize = kwargs.pop('optimize', 'greedy')
# using einsum_call=True here is an internal api for opt_einsum
return opt_einsum.contract_path(subscripts, *operands, optimize=optimize)
@partial(jit, static_argnums=(1,))
def _einsum(operands, contractions):
operands = list(_promote_dtypes(*operands))
sum = lambda x, axes: lax.reduce(x, onp.array(0, x.dtype), lax.add, axes)
def sum_uniques(operand, names, uniques):
if uniques:
axes = [names.index(name) for name in uniques]
operand = sum(operand, axes)
names = removechars(names, uniques)
return operand, names
def sum_repeats(operand, names, counts, keep_names):
for name, count in counts.items():
if count > 1:
axes = [i for i, n in enumerate(names) if n == name]
eye = lax.broadcasted_eye(operand.dtype, operand.shape, axes)
if name not in keep_names:
operand = sum(operand * eye, axes)
names = names.replace(name, '')
else:
operand = sum(operand * eye, axes[:-1])
names = names.replace(name, '', count - 1)
return operand, names
for operand_indices, contracted_names, einstr in contractions:
input_str, result_names = einstr.split('->')
input_names = input_str.split(',')
# switch on the number of operands to be processed in this loop iteration.
# every case here sets 'operand' and 'names'.
if len(operand_indices) == 1:
operand = operands.pop(operand_indices[0])
names, = input_names
counts = collections.Counter(names)
# sum out unique contracted indices with a single reduce-sum
uniques = [name for name in contracted_names if counts[name] == 1]
operand, names = sum_uniques(operand, names, uniques)
# for every repeated index, do a contraction against an identity matrix
operand, names = sum_repeats(operand, names, counts, result_names)
elif len(operand_indices) == 2:
lhs, rhs = map(operands.pop, operand_indices)
lhs_counts, rhs_counts = map(collections.Counter, input_names)
lhs_names, rhs_names = input_names
# sum out unique contracted indices in lhs and rhs
lhs_uniques = [name for name in contracted_names
if lhs_counts[name] == 1 and rhs_counts[name] == 0]
lhs, lhs_names = sum_uniques(lhs, lhs_names, lhs_uniques)
rhs_uniques = [name for name in contracted_names
if rhs_counts[name] == 1 and lhs_counts[name] == 0]
rhs, rhs_names = sum_uniques(rhs, rhs_names, rhs_uniques)
# for every repeated index, contract against an identity matrix
lhs, lhs_names = sum_repeats(lhs, lhs_names, lhs_counts,
result_names + rhs_names)
rhs, rhs_names = sum_repeats(rhs, rhs_names, rhs_counts,
result_names + lhs_names)
contracted_names = contracted_names & (set(lhs_names) | set(rhs_names))
batch_names = (set(lhs_names) & set(rhs_names)) - contracted_names
lhs_batch, rhs_batch = unzip2((lhs_names.find(n), rhs_names.find(n))
for n in batch_names)
# NOTE(mattjj): this can fail non-deterministically in python3, maybe
# due to opt_einsum
assert _all(name in lhs_names and name in rhs_names and
lhs.shape[lhs_names.index(name)] == rhs.shape[rhs_names.index(name)]
for name in contracted_names)
# move batch dims to the front (required by lax.dot_general, and easier)
batch_dims = tuple(range(len(batch_names)))
if lhs_batch != rhs_batch or set(lhs_batch) != set(batch_dims):
lhs = moveaxis(lhs, lhs_batch, batch_dims)
lhs_names = _movechars(lhs_names, lhs_batch, batch_dims)
rhs = moveaxis(rhs, rhs_batch, batch_dims)
rhs_names = _movechars(rhs_names, rhs_batch, batch_dims)
batch_names = ''.join(batch_names)
else:
batch_dims = tuple(lhs_batch)
batch_names = ''.join(lhs_names[i] for i in range(len(lhs_names))
if i in batch_dims)
if contracted_names:
# contract using lax.dot_general
lhs_cont, rhs_cont = unzip2((lhs_names.index(n), rhs_names.index(n))
for n in contracted_names)
operand = _dot_general(lhs, rhs, lhs_cont, rhs_cont, len(batch_dims))
deleted_names = batch_names + ''.join(contracted_names)
names = (batch_names + removechars(lhs_names, deleted_names)
+ removechars(rhs_names, deleted_names))
else:
# no contraction, just a tensor product
nbatch = len(batch_names)
assert lhs.shape[:nbatch] == rhs.shape[:nbatch]
names = batch_names + lhs_names[nbatch:] + rhs_names[nbatch:]
lhs_shape = lhs.shape + (1,) * (rhs.ndim - nbatch)
rhs_shape = rhs.shape[:nbatch] + (1,) * (lhs.ndim - nbatch) + rhs.shape[nbatch:]
operand = lax.reshape(lhs, lhs_shape) * lax.reshape(rhs, rhs_shape)
else:
raise NotImplementedError # if this is actually reachable, open an issue!
# the resulting 'operand' with axis labels 'names' should be a permutation
# of the desired result
assert len(names) == len(result_names) == len(set(names))
assert set(names) == set(result_names)
if names != result_names:
perm = tuple([names.index(name) for name in result_names])
operand = lax.transpose(operand, perm)
operands.append(operand) # used in next iteration
return operands[0]
def _dot_general(lhs, rhs, lhs_cont, rhs_cont, nbatch):
"""Helper for einsum contractions."""
# lax.dot_general has some tight constraints on dimension_numbers that this
# wrapper loosens via transposes and reshapes
assert len(lhs_cont) == len(rhs_cont) > 0
ncont = len(lhs_cont)
lhs_ntensor = lhs.ndim - nbatch - ncont
rhs_ntensor = rhs.ndim - nbatch - ncont
batch_dims = tuple(range(nbatch))
if ncont == 1 and 0 <= lhs_ntensor <= 1 and 0 <= rhs_ntensor <= 1:
dimension_numbers = [(lhs_cont, rhs_cont), (batch_dims, batch_dims)]
return lax.dot_general(lhs, rhs, dimension_numbers)
else:
# move contracting dimensions to the end. lax.dot_general only allows one
# contracting dimension, so if there's more than one we collapse them.
if ncont > 1:
lhs_cdims = tuple(range(lhs.ndim - ncont, lhs.ndim))
lhs = moveaxis(lhs, lhs_cont, lhs_cdims)
lhs = lhs.reshape(lhs.shape[:-ncont] + (-1,))
rhs_cdims = tuple(range(rhs.ndim - ncont, rhs.ndim))
rhs = moveaxis(rhs, rhs_cont, rhs_cdims)
rhs = rhs.reshape(rhs.shape[:-ncont] + (-1,))
else:
lhs = moveaxis(lhs, lhs_cont[0], -1)
rhs = moveaxis(rhs, rhs_cont[0], -1)
# lax.dot_general only allows zero or one tensor product dims per operand,
# so if there's more than one we collapse them.
result_shape = lhs.shape[:nbatch] + lhs.shape[nbatch:-1] + rhs.shape[nbatch:-1]
if lhs_ntensor > 1:
lhs = lhs.reshape(lhs.shape[:nbatch] + (-1,) + lhs.shape[-1:])
if rhs_ntensor > 1:
rhs = rhs.reshape(rhs.shape[:nbatch] + (-1,) + rhs.shape[-1:])
lhs_cont, rhs_cont = [lhs.ndim - 1], [rhs.ndim - 1]
dimension_numbers = [(lhs_cont, rhs_cont), (batch_dims, batch_dims)]
result = lax.dot_general(lhs, rhs, dimension_numbers)
return lax.reshape(result, result_shape)
def _movechars(s, src, dst):
"""Helper for einsum string munging, like moveaxis on identifier strings."""
chars = [c for i, c in enumerate(s) if i not in src]
for i, j in sorted(zip(dst, src)):
chars.insert(i, s[j])
return ''.join(chars)
@_wraps(onp.inner)
def inner(a, b):
if ndim(a) == 0 or ndim(b) == 0:
return a * b
return tensordot(a, b, (-1, -1))
@_wraps(onp.outer)
def outer(a, b, out=None):
if out:
raise NotImplementedError("The 'out' argument to outer is not supported.")
return ravel(a)[:, None] * ravel(b)
@_wraps(onp.cross)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
if axis is not None:
axisa = axis
axisb = axis
axisc = axis
a_ndims = len(shape(a))
b_ndims = len(shape(b))
axisa = _canonicalize_axis(axisa, a_ndims)
axisb = _canonicalize_axis(axisb, b_ndims)
a = moveaxis(a, axisa, -1)
b = moveaxis(b, axisb, -1)
a_shape = shape(a)
b_shape = shape(b)
if a_shape[-1] not in (2, 3) or b_shape[-1] not in (2, 3):
raise ValueError("Dimension must be either 2 or 3 for cross product")
if a_shape[-1] == 2 and b_shape[-1] == 2:
return a[..., 0] * b[..., 1] - a[..., 1] * b[..., 0]
if a_shape[-1] == 2:
a = concatenate((a, zeros(a_shape[:-1] + (1,), dtype=a.dtype)), axis=-1)
elif b_shape[-1] == 2:
b = concatenate((b, zeros(b_shape[:-1] + (1,), dtype=b.dtype)), axis=-1)
a0 = a[..., 0]
a1 = a[..., 1]
a2 = a[..., 2]
b0 = b[..., 0]
b1 = b[..., 1]
b2 = b[..., 2]
c = array([a1 * b2 - a2 * b1,
a2 * b0 - a0 * b2,
a0 * b1 - a1 * b0])
c_ndims = len(shape(c))
axisc = _canonicalize_axis(axisc, c_ndims)
return moveaxis(c, 0, axisc)
@_wraps(onp.kron)
def kron(a, b):
a, b = _promote_dtypes(a, b)
if ndim(a) < ndim(b):
a = reshape(a, (1,) * (ndim(b) - ndim(a)) + shape(a))
elif ndim(b) < ndim(a):
b = reshape(b, (1,) * (ndim(a) - ndim(b)) + shape(b))
a_reshaped = reshape(a, [i for d in shape(a) for i in (d, 1)])
b_reshaped = reshape(b, [i for d in shape(b) for i in (1, d)])
out_shape = tuple(onp.multiply(shape(a), shape(b)))
return reshape(lax.mul(a_reshaped, b_reshaped), out_shape)
@_wraps(onp.vander)
def vander(x, N=None, increasing=False):
x = asarray(x)
dtype = _dtype(x)
if ndim(x) != 1:
raise ValueError("x must be a one-dimensional array")
x_shape = shape(x)
N = N or x_shape[0]
if N < 0:
raise ValueError("N must be nonnegative")
iota = lax.iota(dtype, N)
if not increasing:
iota = lax.sub(lax._const(iota, N - 1), iota)
return power(x[..., None], iota)
### Misc
@_wraps(onp.argmax)
def argmax(a, axis=None):
if axis is None:
a = ravel(a)
axis = 0
return _argminmax(max, a, axis)
@_wraps(onp.argmin)
def argmin(a, axis=None):
if axis is None:
a = ravel(a)
axis = 0
return _argminmax(min, a, axis)
# TODO(mattjj): redo this lowering with a call to variadic lax.reduce
def _argminmax(op, a, axis):
shape = [1] * a.ndim
shape[axis] = a.shape[axis]
idxs = lax.tie_in(a, arange(a.shape[axis])).reshape(shape)
maxval = onp.iinfo(xla_bridge.canonicalize_dtype(idxs.dtype)).max
maxval = lax.tie_in(a, maxval)
mask_idxs = where(lax._eq_meet(a, op(a, axis, keepdims=True)), idxs, maxval)
return min(mask_idxs, axis)
@_wraps(onp.sort)
def sort(a, axis=-1, kind='quicksort', order=None):
if kind != 'quicksort':
warnings.warn("'kind' argument to sort is ignored.")
if order is not None:
raise ValueError("'order' argument to sort is not supported.")
if axis is None:
return lax.sort(a.ravel(), 0)
else:
return lax.sort(a, _canonicalize_axis(axis, ndim(a)))
@_wraps(onp.argsort)
def argsort(a, axis=-1, kind='quicksort', order=None):
if kind != 'quicksort':
warnings.warn("'kind' argument to argsort is ignored.")
if order is not None:
raise ValueError("'order' argument to argsort is not supported.")
if axis is None:
return argsort(a.ravel(), 0)
else:
axis = _canonicalize_axis(axis, ndim(a))
iota = lax.broadcasted_iota(onp.int64, shape(a), axis)
_, perm = lax.sort_key_val(a, iota, dimension=axis)
return perm
@_wraps(onp.roll)
def roll(a, shift, axis=None):
a = asarray(a)
a_shape = shape(a)
if axis is None:
return lax.reshape(roll(ravel(a), shift, axis=0), a_shape)
a_ndim = len(a_shape)
shift = asarray(shift)
axis = onp.asarray(axis)
b_shape = lax.broadcast_shapes(shift.shape, axis.shape, (1,))
if len(b_shape) != 1:
msg = "'shift' and 'axis' arguments to roll must be scalars or 1D arrays"
raise ValueError(msg)
if b_shape[0] > a_ndim:
raise ValueError("More shifts/axes than dimensions of input to roll.")
for x, i in zip(broadcast_to(shift, b_shape),
onp.broadcast_to(axis, b_shape)):
i = _canonicalize_axis(i, a_ndim)
x = remainder(x, (a_shape[i] or 1))
a = lax.concatenate((a, a), i)
a = lax.dynamic_slice_in_dim(a, a_shape[i] - x, a_shape[i], axis=i)
return a
@_wraps(onp.take)
def take(a, indices, axis=None, out=None, mode=None):
if out:
raise NotImplementedError("The 'out' argument to np.take is not supported.")
a = asarray(a)
indices = asarray(indices)
if axis is None:
a = ravel(a)
axis = 0
axis = _canonicalize_axis(axis, ndim(a))
if mode == "raise":
# TODO(phawkins): we have no way to report out of bounds errors yet.
raise NotImplementedError("The 'raise' mode to np.take is not supported.")
elif mode == "wrap":
indices = mod(indices, _constant_like(indices, a.shape[axis]))
elif mode != "clip" and mode is not None:
raise ValueError("Invalid mode '{}' for np.take".format(mode))
index_dims = len(shape(indices))
slice_sizes = list(shape(a))
slice_sizes[axis] = 1
dnums = lax.GatherDimensionNumbers(
offset_dims=tuple(
list(range(axis)) +
list(range(axis + index_dims, len(a.shape) + index_dims - 1))),
collapsed_slice_dims=(axis,),
start_index_map=(axis,))
return lax.gather(a, indices[..., None], dimension_numbers=dnums,
slice_sizes=tuple(slice_sizes))
def _normalize_index(index, axis_size):
"""Normalizes an index value in the range [-N, N) to the range [0, N)."""
return lax.select(
lax.lt(index, _constant_like(index, 0)),
lax.add(index, _constant_like(index, axis_size)),
index)
@partial(jit, static_argnums=(2,))
def _take_along_axis(arr, indices, axis):
if axis is None:
if ndim(indices) != 1:
msg = "take_along_axis indices must be 1D if axis=None, got shape {}"
raise ValueError(msg.format(indices.shape))
return take_along_axis(arr.ravel(), indices, 0)
rank = ndim(arr)
if rank != ndim(indices):
msg = "indices and arr must have the same number of dimensions; {} vs. {}"
raise ValueError(msg.format(ndim(indices), ndim(arr)))
axis = _canonicalize_axis(axis, rank)
def replace(tup, val):
lst = list(tup)
lst[axis] = val
return tuple(lst)
bcast_shape = lax.broadcast_shapes(replace(arr.shape, 1), replace(indices.shape, 1))
indices = broadcast_to(indices, replace(bcast_shape, indices.shape[axis]))
arr = broadcast_to(arr, replace(bcast_shape, arr.shape[axis]))
axis_size = arr.shape[axis]
arr_shape = replace(arr.shape, 1)
idx_shape = indices.shape
out_shape = lax.broadcast_shapes(idx_shape, arr_shape)
index_dims = [i for i, idx in enumerate(idx_shape) if i == axis or idx != 1]
gather_index_shape = tuple(onp.array(out_shape)[index_dims]) + (1,)
gather_indices = []
slice_sizes = []
offset_dims = []
start_index_map = []
collapsed_slice_dims = []
j = 0
for i in range(rank):
if i == axis:
indices = _normalize_index(indices, axis_size)
gather_indices.append(lax.reshape(indices, gather_index_shape))
slice_sizes.append(1)
start_index_map.append(i)
collapsed_slice_dims.append(i)
j += 1
elif idx_shape[i] != 1:
iota = lax.iota(_dtype(indices), out_shape[i])
iota = lax.tie_in(arr, iota)
iota = lax.broadcast_in_dim(iota, gather_index_shape, (j,))
gather_indices.append(iota)
slice_sizes.append(1)
start_index_map.append(i)
collapsed_slice_dims.append(i)
j += 1
else:
# If idx_shape[i] == 1, we can just take the entirety of the arr's axis
# and avoid forming an iota index.
offset_dims.append(i)
slice_sizes.append(arr_shape[i])
gather_indices = lax.concatenate(gather_indices, dimension=j)
dnums = lax.GatherDimensionNumbers(
offset_dims=tuple(offset_dims),
collapsed_slice_dims=tuple(collapsed_slice_dims),
start_index_map=tuple(start_index_map))
return lax.gather(arr, gather_indices, dnums, tuple(slice_sizes))
@_wraps(getattr(onp, "take_along_axis", None), update_doc=False)
def take_along_axis(arr, indices, axis):
return _take_along_axis(arr, indices, axis)
### Indexing
def _rewriting_take(arr, idx):
# Computes arr[idx].
# All supported cases of indexing can be implemented as an XLA gather,
# followed by an optional reverse and a reshape.
arr = asarray(arr)
treedef, static_idx, dynamic_idx = _split_index_for_jit(idx)
return _gather(arr, treedef, static_idx, dynamic_idx)
# TODO(phawkins): re-enable jit after fixing excessive recompilation for
# slice indexes (e.g., slice(0, 5, None), slice(10, 15, None), etc.).
# @partial(jit, static_argnums=(1, 2))
def _gather(arr, treedef, static_idx, dynamic_idx):
idx = _merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx)
indexer = _index_to_gather(shape(arr), idx) # shared with _scatter_update
y = lax.gather(arr, indexer.gather_indices, indexer.dnums,
indexer.gather_slice_shape)
# Reverses axes with negative strides.
if indexer.reversed_y_dims:
y = lax.rev(y, indexer.reversed_y_dims)
# This adds np.newaxis/None dimensions.
return lax.reshape(y, indexer.slice_shape)
_Indexer = collections.namedtuple("_Indexer", [
# The expected shape of the slice output.
"slice_shape",
# The slice shape to pass to lax.gather().
"gather_slice_shape",
# The gather indices to use.
"gather_indices",
# A GatherDimensionNumbers object describing the gather to perform.
"dnums",
# Slice dimensions that have negative strides, and so must be reversed after
# the gather.
"reversed_y_dims",
# For scatters, we must eliminate any axes created by `newaxis`, which
# are the following dimensions, which must be of size 1. For gathers, we
# simply reshape to `slice_shape` to introduce the new axes.
"newaxis_dims",
])
def _split_index_for_jit(idx):
"""Splits indices into necessarily-static and dynamic parts.
Used to pass indices into `jit`-ted function.
"""
# Convert list indices to tuples in cases (deprecated by NumPy.)
idx = _eliminate_deprecated_list_indexing(idx)
# Expand any (concrete) boolean indices. We can then use advanced integer
# indexing logic to handle them.
idx = _expand_bool_indices(idx)
leaves, treedef = pytree.flatten(idx)
dynamic = [None] * len(leaves)
static = [None] * len(leaves)
for i, x in enumerate(leaves):
if x is Ellipsis:
static[i] = x
elif isinstance(x, slice):
# slice objects aren't hashable.
static[i] = (x.start, x.stop, x.step)
else:
dynamic[i] = x
return treedef, tuple(static), dynamic
def _merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx):
"""Recombines indices that were split by _split_index_for_jit."""
idx = []
for s, d in zip(static_idx, dynamic_idx):
if d is not None:
idx.append(d)
elif isinstance(s, tuple):
idx.append(slice(s[0], s[1], s[2]))
else:
idx.append(s)
return treedef.unflatten(idx)
def _int(aval):
return not aval.shape and issubdtype(aval.dtype, onp.integer)
def _index_to_gather(x_shape, idx):
# Remove ellipses and add trailing slice(None)s.
idx = _canonicalize_tuple_index(len(x_shape), idx)
# Check for advanced indexing:
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
# Do the advanced indexing axes appear contiguously? If not, NumPy semantics
# move the advanced axes to the front.
advanced_axes_are_contiguous = False
advanced_indexes = None
# The positions of the advanced indexing axes in `idx`.
idx_advanced_axes = []
# The positions of the advanced indexes in x's shape.
# collapsed, after None axes have been removed. See below.
x_advanced_axes = None
if _is_advanced_int_indexer(idx):
idx_no_nones = [(i, d) for i, d in enumerate(idx) if d is not None]
advanced_pairs = (
(asarray(e), i, j) for j, (i, e) in enumerate(idx_no_nones)
if (isinstance(e, Sequence) or isinstance(e, ndarray)))
advanced_pairs = ((_normalize_index(e, x_shape[j]), i, j)
for e, i, j in advanced_pairs)
advanced_indexes, idx_advanced_axes, x_advanced_axes = zip(*advanced_pairs)
advanced_axes_are_contiguous = onp.all(onp.diff(idx_advanced_axes) == 1)
x_axis = 0 # Current axis in x.
y_axis = 0 # Current axis in y, before collapsing. See below.
collapsed_y_axis = 0 # Current axis in y, after collapsing.
# Scatter dimension numbers.
offset_dims = []
collapsed_slice_dims = []
start_index_map = []
gather_indices = zeros((0,), dtype=int32)
# We perform three transformations to y before the scatter op, in order:
# First, y is broadcast to slice_shape. In general `y` only need broadcast to
# the right shape.
slice_shape = []
# Next, y is squeezed to remove newaxis_dims. This removes np.newaxis/`None`
# indices, which the scatter cannot remove itself.
newaxis_dims = []
# Finally, we reverse reversed_y_dims to handle slices with negative strides.
reversed_y_dims = []
gather_slice_shape = []
for idx_pos, i in enumerate(idx):
# Handle the advanced indices here if:
# * the advanced indices were not contiguous and we are the start.
# * we are at the position of the first advanced index.
if (advanced_indexes is not None and
(advanced_axes_are_contiguous and idx_pos == idx_advanced_axes[0] or
not advanced_axes_are_contiguous and idx_pos == 0)):
advanced_indexes = broadcast_arrays(*advanced_indexes)
shape = advanced_indexes[0].shape
ndim = len(shape)
advanced_indexes = [
lax.convert_element_type(lax.reshape(a, shape + (1,)), int32)
for a in advanced_indexes]
# Broadcast gather_indices from [..., k] to [..., 1, 1, ..., 1, k].
gather_indices = lax.broadcast_in_dim(
gather_indices, onp.insert(gather_indices.shape, -1, shape),
tuple(range(gather_indices.ndim - 1)) + (gather_indices.ndim + ndim - 1,))
gather_indices = concatenate([gather_indices] + advanced_indexes, -1)
start_index_map.extend(x_advanced_axes)
collapsed_slice_dims.extend(x_advanced_axes)
slice_shape.extend(shape)
y_axis += ndim
collapsed_y_axis += ndim
# Per-index bookkeeping for advanced indexes.
if idx_pos in idx_advanced_axes:
x_axis += 1
gather_slice_shape.append(1)
continue
try:
abstract_i = core.get_aval(i)
except TypeError:
abstract_i = None
# Handle basic int indexes.
if (isinstance(abstract_i, ConcreteArray) or
isinstance(abstract_i, ShapedArray)) and _int(abstract_i):
i = _normalize_index(i, x_shape[x_axis])
i = lax.convert_element_type(i, int32)
i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,))
gather_indices = concatenate((gather_indices, i), -1)
collapsed_slice_dims.append(x_axis)
gather_slice_shape.append(1)
start_index_map.append(x_axis)
x_axis += 1
# Handle np.newaxis (None)
elif i is None:
slice_shape.append(1)
newaxis_dims.append(y_axis)
y_axis += 1
# Handle slice(None)
elif _is_slice_none(i):
slice_shape.append(x_shape[x_axis])
gather_slice_shape.append(x_shape[x_axis])
offset_dims.append(collapsed_y_axis)
collapsed_y_axis += 1
y_axis += 1
x_axis += 1
# Handle slice index (only static, otherwise an error is raised)
elif isinstance(i, slice):
if not _all(elt is None or type(core.get_aval(elt)) is ConcreteArray
for elt in (i.start, i.stop, i.step)):
msg = ("Array slice indices must have static start/stop/step to be used "
"with Numpy indexing syntax. Try lax.dynamic_slice/"
"dynamic_update_slice instead.")
raise IndexError(msg)
start, limit, stride, needs_rev = _static_idx(i, x_shape[x_axis])
if needs_rev:
reversed_y_dims.append(collapsed_y_axis)
if stride == 1:
i = lax.convert_element_type(start, int32)
i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,))
gather_indices = concatenate((gather_indices, i), -1)
slice_shape.append(limit - start)
gather_slice_shape.append(limit - start)
offset_dims.append(collapsed_y_axis)
start_index_map.append(x_axis)
else:
i = arange(start, limit, stride, dtype=int32)
size = i.shape[0]
slice_shape.append(size)
gather_slice_shape.append(1)
gather_indices_shape = tuple(gather_indices.shape[:-1]) + (size,)
i = lax.broadcast_in_dim(
i, shape=gather_indices_shape + (1,),
broadcast_dimensions=(len(gather_indices_shape) - 1,))
gather_indices = lax.broadcast_in_dim(
gather_indices,
shape=gather_indices_shape + (len(start_index_map),),
broadcast_dimensions=(
tuple(range(len(gather_indices_shape) - 1)) +
(len(gather_indices_shape),)))
gather_indices = concatenate(
(gather_indices, i), len(gather_indices_shape))
start_index_map.append(x_axis)
collapsed_slice_dims.append(x_axis)
collapsed_y_axis += 1
y_axis += 1
x_axis += 1
else:
msg = "Indexing mode not yet supported. Open a feature request!\n{}"
raise IndexError(msg.format(idx))
dnums = lax.GatherDimensionNumbers(
offset_dims = tuple(offset_dims),
collapsed_slice_dims = tuple(sorted(collapsed_slice_dims)),
start_index_map = tuple(start_index_map)
)
return _Indexer(
slice_shape=slice_shape,
newaxis_dims=tuple(newaxis_dims),
gather_slice_shape=gather_slice_shape,
reversed_y_dims=reversed_y_dims,
dnums=dnums,
gather_indices=gather_indices)
def _should_unpack_list_index(x):
"""Helper for _eliminate_deprecated_list_indexing."""
return (isinstance(x, ndarray) and onp.ndim(x) != 0
or isinstance(x, Sequence)
or isinstance(x, slice) or x is Ellipsis or x is None)
def _eliminate_deprecated_list_indexing(idx):
# "Basic slicing is initiated if the selection object is a non-array,
# non-tuple sequence containing slice objects, [Ellipses, or newaxis
# objects]". Detects this case and canonicalizes to a tuple. This case is
# deprecated by NumPy and exists for backward compatibility.
if not isinstance(idx, tuple):
if isinstance(idx, Sequence) and not isinstance(idx, ndarray):
if _any(_should_unpack_list_index(i) for i in idx):
idx = tuple(idx)
else:
idx = (idx,)
else:
idx = (idx,)
return idx
def _expand_bool_indices(idx):
"""Converts concrete bool indexes into advanced integer indexes."""
out = []
for i in idx:
try:
abstract_i = core.get_aval(i)
except TypeError:
abstract_i = None
if (isinstance(abstract_i, ShapedArray) and issubdtype(abstract_i.dtype, onp.bool_)
or isinstance(i, list) and _all(not _shape(e) and issubdtype(_dtype(e), onp.bool_)
for e in i)):
if isinstance(i, list):
i = array(i)
abstract_i = core.get_aval(i)
if not type(abstract_i) is ConcreteArray:
msg = ("Array boolean indices must be static (e.g. no dependence on an "
"argument to a jit or vmap function).")
raise IndexError(msg)
else:
out.extend(onp.where(i))
else:
out.append(i)
return tuple(out)
def _is_slice_none(idx):
"""Return True if idx is equal to slice(None), False otherwise."""
if isinstance(idx, slice):
return idx.start is None and idx.stop is None and idx.step is None
# TODO(mattjj): clean up this logic
def _is_advanced_int_indexer(idx):
"""Returns True if idx should trigger int array indexing, False otherwise."""
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
assert isinstance(idx, tuple)
if _all(onp.ndim(elt) == 0 for elt in idx):
return False
return _all(e is None or e is Ellipsis or isinstance(e, slice)
or _is_int_arraylike(e) for e in idx)
def _is_int_arraylike(x):
"""Returns True if x is array-like with integer dtype, False otherwise."""
return (isinstance(x, int) and not isinstance(x, bool)
or issubdtype(getattr(x, "dtype", None), onp.integer)
or isinstance(x, (list, tuple)) and _all(_is_int_arraylike(e) for e in x))
def _canonicalize_tuple_index(arr_ndim, idx):
"""Helper to remove Ellipsis and add in the implicit trailing slice(None)."""
len_without_none = _sum(1 for e in idx if e is not None and e is not Ellipsis)
if len_without_none > arr_ndim:
msg = "Too many indices for array: {} non-None/Ellipsis indices for dim {}."
raise IndexError(msg.format(len_without_none, arr_ndim))
ellipses = (i for i, elt in enumerate(idx) if elt is Ellipsis)
ellipsis_index = next(ellipses, None)
if ellipsis_index is not None:
if next(ellipses, None) is not None:
msg = "Multiple ellipses (...) not supported: {}."
raise IndexError(msg.format(list(map(type, idx))))
colons = (slice(None),) * (arr_ndim - len_without_none)
idx = idx[:ellipsis_index] + colons + idx[ellipsis_index + 1:]
elif len_without_none < arr_ndim:
colons = (slice(None),) * (arr_ndim - len_without_none)
idx = tuple(idx) + colons
return idx
def _static_idx(idx, size):
"""Helper function to compute the static slice start/limit/stride values."""
assert isinstance(idx, slice)
start, stop, step = idx.indices(size)
if (step < 0 and stop >= start) or (step > 0 and start >= stop):
return 0, 0, 1, False # sliced to size zero
if step > 0:
return start, stop, step, False
else:
k = (start - stop - 1) % (-step)
return stop + k + 1, start + 1, -step, True
blackman = _wrap_numpy_nullary_function(onp.blackman)
bartlett = _wrap_numpy_nullary_function(onp.bartlett)
hamming = _wrap_numpy_nullary_function(onp.hamming)
hanning = _wrap_numpy_nullary_function(onp.hanning)
# TODO: lower `kaiser` via lax to allow non-constant beta values.
kaiser = _wrap_numpy_nullary_function(onp.kaiser)
def _gcd_cond_fn(xs):
x1, x2 = xs
return any(x2 != 0)
def _gcd_body_fn(xs):
x1, x2 = xs
x1, x2 = (where(x2 != 0, x2, x1),
where(x2 != 0, lax.rem(x1, x2), lax._const(x2, 0)))
return (where(x1 < x2, x2, x1), where(x1 < x2, x1, x2))
@_wraps(getattr(onp, "gcd", None))
def gcd(x1, x2):
if (not issubdtype(_dtype(x1), integer) or
not issubdtype(_dtype(x2), integer)):
raise ValueError("Arguments to gcd must be integers.")
x1, x2 = _promote_dtypes(lax.abs(x1), lax.abs(x2))
x1, x2 = broadcast_arrays(x1, x2)
gcd, _ = lax.while_loop(_gcd_cond_fn, _gcd_body_fn, (x1, x2))
return gcd
@_wraps(getattr(onp, "lcm", None))
def lcm(x1, x2):
d = gcd(x1, x2)
return where(d == 0, lax._const(d, 0),
lax.div(lax.abs(multiply(x1, x2)), d))
@_wraps(onp.cov)
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None):
msg = ("jax.numpy.cov not implemented for nontrivial {}. "
"Open a feature request at https://github.com/google/jax/issues !")
if y is not None: raise NotImplementedError(msg.format('y'))
# These next two are actually implemented, just not tested.
if fweights is not None: raise NotImplementedError(msg.format('fweights'))
if aweights is not None: raise NotImplementedError(msg.format('aweights'))
if m.ndim > 2:
raise ValueError("m has more than 2 dimensions") # same as numpy error
X = array(m, ndmin=2, dtype=xla_bridge.canonicalize_dtype(result_type(m, onp.float64)), copy=False)
if not rowvar and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return onp.array([]).reshape(0, 0)
if ddof is None:
ddof = 1 if bias == 0 else 0
w = None
if fweights is not None:
if onp.ndim(fweights) > 1:
raise RuntimeError("cannot handle multidimensional fweights")
if onp.shape(fweights)[0] != X.shape[1]:
raise RuntimeError("incompatible numbers of samples and fweights")
w = asarray(fweights)
if aweights is not None:
if onp.ndim(aweights) > 1:
raise RuntimeError("cannot handle multidimensional aweights")
if onp.shape(aweights)[0] != X.shape[1]:
raise RuntimeError("incompatible numbers of samples and aweights")
w = aweights if w is None else w * aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
if w is None:
f = X.shape[1] - ddof
elif ddof == 0:
f = w_sum
elif aweights is None:
f = w_sum - ddof
else:
f = w_sum - ddof * sum(w * aweights) / w_sum
X = X - avg[:, None]
X_T = X.T if w is None else (X * w).T
return true_divide(dot(X, X_T.conj()), f).squeeze()
@_wraps(onp.corrcoef)
def corrcoef(x, y=None, rowvar=True, bias=None, ddof=None):
c = cov(x, y, rowvar)
if len(shape(c)) == 0:
# scalar - this should yield nan for values (nan/nan, inf/inf, 0/0), 1 otherwise
return divide(c, c)
d = diag(c)
stddev = sqrt(real(d))
c = divide(c, stddev[:,None])
c = divide(c, stddev[None,:])
real_part = clip(real(c), -1, 1)
if iscomplexobj(c):
complex_part = clip(imag(c), -1, 1)
c = lax.complex(real_part, complex_part)
else:
c = real_part
return c
@_wraps(getattr(onp, "quantile", None))
def quantile(a, q, axis=None, out=None, overwrite_input=False,
interpolation="linear", keepdims=False):
if overwrite_input or out is not None:
msg = ("jax.numpy.quantile does not support overwrite_input=True or "
"out != None")
raise ValueError(msg)
if interpolation != "linear":
raise NotImplementedError("Only interpolation='linear' is implemented")
a = asarray(a)
q = asarray(q)
if axis is None:
a = ravel(a)
axis = 0
elif isinstance(axis, tuple):
raise NotImplementedError("Tuple values for axis are not implemented")
else:
axis = _canonicalize_axis(axis, ndim(a))
q_ndim = ndim(q)
if q_ndim > 1:
raise ValueError("q must be have rank <= 1, got shape {}".format(shape(q)))
a, q = _promote_dtypes(a, q)
if not issubdtype(a.dtype, floating):
msg = "q and a arguments to quantile must be of float type, got {} and {}"
raise TypeError(msg.format(a.dtype, q.dtype))
a_shape = shape(a)
a = lax.sort(a, dimension=axis)
n = a_shape[axis]
q = lax.mul(q, _constant_like(q, n - 1))
low = lax.floor(q)
high = lax.add(low, _constant_like(low, 1))
high_weight = lax.sub(q, low)
low_weight = lax.sub(_constant_like(high_weight, 1), high_weight)
low = lax.clamp(_constant_like(low, 0), low, _constant_like(low, n - 1))
high = lax.clamp(_constant_like(high, 0), high, _constant_like(high, n - 1))
low = lax.convert_element_type(low, int64)
high = lax.convert_element_type(high, int64)
slice_sizes = list(a_shape)
slice_sizes[axis] = 1
dnums = lax.GatherDimensionNumbers(
offset_dims=tuple(range(
q_ndim,
len(a_shape) + q_ndim if keepdims else len(a_shape) + q_ndim - 1)),
collapsed_slice_dims=() if keepdims else (axis,),
start_index_map=(axis,))
low = low[..., None]
high = high[..., None]
low_value = lax.gather(a, low, dimension_numbers=dnums,
slice_sizes=slice_sizes)
high_value = lax.gather(a, high, dimension_numbers=dnums,
slice_sizes=slice_sizes)
if q_ndim == 1:
low_weight = lax.broadcast_in_dim(low_weight, low_value.shape,
broadcast_dimensions=(0,))
high_weight = lax.broadcast_in_dim(high_weight, high_value.shape,
broadcast_dimensions=(0,))
return lax.add(lax.mul(low_value, low_weight),
lax.mul(high_value, high_weight))
@_wraps(onp.percentile)
def percentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation="linear", keepdims=False):
q = true_divide(asarray(q), float32(100.0))
return quantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
interpolation=interpolation, keepdims=keepdims)
@_wraps(onp.median)
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
q = 0.5
return quantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
keepdims=keepdims)
def _astype(arr, dtype):
lax._check_user_dtype_supported(dtype, "astype")
return lax.convert_element_type(arr, dtype)
### track unimplemented functions
def _not_implemented(fun):
@_wraps(fun)
def wrapped(*args, **kwargs):
msg = "Numpy function {} not yet implemented"
raise NotImplementedError(msg.format(fun))
return wrapped
# Build a set of all unimplemented NumPy functions.
for func in get_module_functions(onp):
if func.__name__ not in globals():
globals()[func.__name__] = _not_implemented(func)
### add method and operator overloads to arraylike classes
# We add operator overloads to DeviceArray and ShapedArray. These method and
# operator overloads mainly just forward calls to the corresponding lax_numpy
# functions, which can themselves handle instances from any of these classes.
def _swap_args(f):
return lambda x, y: f(y, x)
def _unimplemented_setitem(self, i, x):
msg = ("'{}' object does not support item assignment. JAX arrays are "
"immutable; perhaps you want jax.ops.index_update or "
"jax.ops.index_add instead?")
raise TypeError(msg.format(type(self)))
_operators = {
"getitem": _rewriting_take,
"setitem": _unimplemented_setitem,
"neg": negative,
"eq": equal,
"ne": not_equal,
"lt": less,
"le": less_equal,
"gt": greater,
"ge": greater_equal,
"abs": abs,
"add": add,
"radd": add,
"sub": subtract,
"rsub": _swap_args(subtract),
"mul": multiply,
"rmul": multiply,
"div": divide,
"rdiv": _swap_args(divide),
"truediv": true_divide,
"rtruediv": _swap_args(true_divide),
"floordiv": floor_divide,
"rfloordiv": _swap_args(floor_divide),
"divmod": divmod,
"rdivmod": _swap_args(divmod),
"mod": mod,
"rmod": _swap_args(mod),
"pow": power,
"rpow": _swap_args(power),
"matmul": matmul,
"rmatmul": _swap_args(matmul),
"and": bitwise_and,
"rand": bitwise_and,
"or": bitwise_or,
"ror": bitwise_or,
"xor": bitwise_xor,
"rxor": bitwise_xor,
"invert": bitwise_not,
"lshift": left_shift,
"rshift": right_shift,
}
# These numpy.ndarray methods are just refs to an equivalent numpy function
_nondiff_methods = ["all", "any", "argmax", "argmin", "argpartition", "argsort",
"nonzero", "searchsorted", "round"]
_diff_methods = ["clip", "compress", "conj", "conjugate", "cumprod", "cumsum",
"diagonal", "dot", "max", "mean", "min", "prod", "ptp",
"ravel", "repeat", "sort", "squeeze", "std", "sum",
"swapaxes", "take", "tile", "trace", "transpose", "var"]
# Set up operator, method, and property forwarding on Tracer instances containing
# ShapedArray avals by following the forwarding conventions for Tracer.
# Forward operators using a single-underscore-prefix naming convention:
for operator_name, function in _operators.items():
setattr(ShapedArray, "_{}".format(operator_name), staticmethod(function))
# Forward methods and properties using core.aval_method and core.aval_property:
for method_name in _nondiff_methods + _diff_methods:
setattr(ShapedArray, method_name, core.aval_method(globals()[method_name]))
setattr(ShapedArray, "reshape", core.aval_method(_reshape_method))
setattr(ShapedArray, "flatten", core.aval_method(ravel))
setattr(ShapedArray, "T", core.aval_property(transpose))
setattr(ShapedArray, "real", core.aval_property(real))
setattr(ShapedArray, "imag", core.aval_property(imag))
setattr(ShapedArray, "astype", core.aval_method(_astype))
# Forward operators, methods, and properties on DeviceArray to lax_numpy
# functions (with no Tracers involved; this forwarding is direct)
for operator_name, function in _operators.items():
setattr(DeviceArray, "__{}__".format(operator_name), function)
for method_name in _nondiff_methods + _diff_methods:
setattr(DeviceArray, method_name, globals()[method_name])
setattr(DeviceArray, "reshape", _reshape_method)
setattr(DeviceArray, "flatten", ravel)
setattr(DeviceArray, "T", property(transpose))
setattr(DeviceArray, "real", property(real))
setattr(DeviceArray, "imag", property(imag))
setattr(DeviceArray, "astype", _astype)
# Extra methods that are handy
setattr(ShapedArray, "broadcast", core.aval_method(lax.broadcast))
setattr(ShapedArray, "broadcast_in_dim", core.aval_method(lax.broadcast_in_dim))
setattr(ShapedArray, "split", core.aval_method(split))
setattr(DeviceArray, "broadcast", lax.broadcast)
setattr(DeviceArray, "broadcast_in_dim", lax.broadcast_in_dim)
setattr(DeviceArray, "split", split)
@jit
def _unstack(x):
if x.ndim == 0:
raise ValueError("Argument to _unstack must be non-scalar")
return [lax.index_in_dim(x, i, keepdims=False) for i in range(x.shape[0])]
setattr(DeviceArray, "_unstack", _unstack)
| 33.98166 | 101 | 0.671859 |
d144b3b32a184a5b702ada1f995b60bddf521b9c | 965 | py | Python | etl/core/tests.py | cloud-cds/cds-stack | d68a1654d4f604369a071f784cdb5c42fc855d6e | [
"Apache-2.0"
] | 6 | 2018-06-27T00:09:55.000Z | 2019-03-07T14:06:53.000Z | etl/core/tests.py | cloud-cds/cds-stack | d68a1654d4f604369a071f784cdb5c42fc855d6e | [
"Apache-2.0"
] | 3 | 2021-03-31T18:37:46.000Z | 2021-06-01T21:49:41.000Z | etl/core/tests.py | cloud-cds/cds-stack | d68a1654d4f604369a071f784cdb5c42fc855d6e | [
"Apache-2.0"
] | 3 | 2020-01-24T16:40:49.000Z | 2021-09-30T02:28:55.000Z | from etl.core.task import Task
from etl.core.plan import Plan
from etl.core.engine import Engine
from inspect import signature
import logging
import asyncio, uvloop
import os
TEST_LOG_FMT = '%(asctime)s|%(funcName)s|%(process)s-%(thread)s|%(levelname)s|%(message)s'
logging.basicConfig(level=logging.INFO, format=TEST_LOG_FMT)
def abc(ctxt, arg1, arg2):
ctxt.log.info("{} {}".format(arg1, arg2))
return None
t1 = Task(name='task_a', fn=abc, args=["hello", "world"])
print(t1)
p1 = Plan(name='plan_a', config={
'db_name': os.environ['db_name'],
'db_user': os.environ['db_user'],
'db_pass': os.environ['db_password'],
'db_host': os.environ['db_host'],
'db_port': os.environ['db_port']
})
p1.add(t1)
print(p1)
print(p1.plan)
engine = Engine(
plan = p1,
name = "etl-engine",
nprocs = 1,
loglevel = logging.DEBUG,
)
loop = uvloop.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(engine.run())
loop.close()
| 21.931818 | 90 | 0.691192 |
6b59af4ab3476e2832abe36695a85e3f7fc8281f | 31,119 | py | Python | aesara/tensor/type.py | abdalazizrashid/Theano-PyMC | 90fa750461e91fb6281d494ae86404e2153fd7eb | [
"BSD-3-Clause"
] | null | null | null | aesara/tensor/type.py | abdalazizrashid/Theano-PyMC | 90fa750461e91fb6281d494ae86404e2153fd7eb | [
"BSD-3-Clause"
] | null | null | null | aesara/tensor/type.py | abdalazizrashid/Theano-PyMC | 90fa750461e91fb6281d494ae86404e2153fd7eb | [
"BSD-3-Clause"
] | null | null | null | import logging
import warnings
import numpy as np
import aesara
from aesara import config
from aesara import scalar as scal
from aesara.gof import Type, Variable, hashtype
_logger = logging.getLogger("aesara.tensor.type")
class TensorType(Type):
"""
Symbolic `Type` representing a numpy.ndarray value.
Initialize self.dtype and self.broadcastable.
Parameters
----------
dtype: str
Corresponding to numpy dtype (e.g., 'int64')
The value (ndarray) associated to a `Variable` of this `Type` will
have this dtype.
broadcastable: tuple, list, or array of boolean values
This argument serves two purposes. First, the True elements of this
list indicate the dimensions where the shape of an associated value
must be 1. Secondly, the length of this list is the number of
dimensions that an associated value must have. See
doc:`broadcasting` for an explanation of how this list is used.
name : str
Optional name for this type.
"""
context_name = "cpu"
filter_checks_isfinite = False
"""
When this is True, strict filtering rejects data containing NaN or
Inf entries. (Used in `DebugMode`)
"""
def __init__(self, dtype, broadcastable, name=None, sparse_grad=False):
self.dtype = str(dtype)
if self.dtype == "floatX":
self.dtype = config.floatX
# broadcastable is immutable, and all elements are either
# True or False
self.broadcastable = tuple(bool(b) for b in broadcastable)
self.dtype_specs() # error checking is done there
self.name = name
self.numpy_dtype = np.dtype(self.dtype)
self.sparse_grad = sparse_grad
if sparse_grad:
warnings.warn(
"DEPRECATION WARNING: You use an old interface to"
" AdvancedSubtensor1 sparse_grad. Now use"
" aesara.sparse_grad(a_tensor[an_int_vector])."
)
def clone(self, dtype=None, broadcastable=None):
"""
Return a copy of the type optionally with a new dtype or
broadcastable pattern.
"""
if dtype is None:
dtype = self.dtype
if broadcastable is None:
broadcastable = self.broadcastable
return self.__class__(
dtype, broadcastable, name=self.name, sparse_grad=self.sparse_grad
)
def filter(self, data, strict=False, allow_downcast=None):
"""
Convert `data` to something which can be associated to a
`TensorVariable`.
This function is not meant to be called in user code. It is for
`Linker` instances to use when running a compiled graph.
"""
# Explicit error message when one accidentally uses a Variable as
# input (typical mistake, especially with shared variables).
if isinstance(data, Variable):
raise TypeError(
"Expected an array-like object, but found a Variable: "
"maybe you are trying to call a function on a (possibly "
"shared) variable instead of a numeric array?"
)
if (type(data) is np.ndarray) and (data.dtype == self.numpy_dtype):
if data.dtype.num != self.numpy_dtype.num:
data = aesara._asarray(data, dtype=self.dtype)
# -- now fall through to ndim check
elif (type(data) is np.memmap) and (data.dtype == self.numpy_dtype):
# numpy.memmap is a "safe" subclass of ndarray,
# so we can use it wherever we expect a base ndarray.
# however, casting it would defeat the purpose of not
# loading the whole data into memory
pass
elif strict:
# If any of the two conditions above was not met,
# we raise a meaningful TypeError.
if not (type(data) is np.ndarray):
raise TypeError(
"%s expected a ndarray object." % self, data, type(data)
)
if data.dtype != self.numpy_dtype:
raise TypeError(
("%s expected a ndarray object with " "dtype = %s (got %s).")
% (self, self.numpy_dtype, data.dtype)
)
raise AssertionError("This point should never be reached.")
else:
if allow_downcast:
# Convert to self.dtype, regardless of the type of data
data = aesara._asarray(data, dtype=self.dtype)
# TODO: consider to pad shape with ones to make it consistent
# with self.broadcastable... like vector->row type thing
else:
if isinstance(data, np.ndarray):
# Check if self.dtype can accurately represent data
# (do not try to convert the data)
up_dtype = scal.upcast(self.dtype, data.dtype)
if up_dtype == self.dtype:
# Bug in the following line when data is a
# scalar array, see
# http://projects.scipy.org/numpy/ticket/1611
# data = data.astype(self.dtype)
data = aesara._asarray(data, dtype=self.dtype)
if up_dtype != self.dtype:
err_msg = (
"%s cannot store a value of dtype %s without "
"risking loss of precision. If you do not mind "
"this loss, you can: "
"1) explicitly cast your data to %s, or "
'2) set "allow_input_downcast=True" when calling '
'"function". Value: "%s"'
% (self, data.dtype, self.dtype, repr(data))
)
raise TypeError(err_msg)
elif (
allow_downcast is None
and type(data) is float
and self.dtype == aesara.config.floatX
):
# Special case where we allow downcasting of Python float
# literals to floatX, even when floatX=='float32'
data = aesara._asarray(data, self.dtype)
else:
# data has to be converted.
# Check that this conversion is lossless
converted_data = aesara._asarray(data, self.dtype)
# We use the `values_eq` static function from TensorType
# to handle NaN values.
if TensorType.values_eq(
np.asarray(data), converted_data, force_same_dtype=False
):
data = converted_data
else:
# Do not print a too long description of data
# (ndarray truncates it, but it's not sure for data)
str_data = str(data)
if len(str_data) > 80:
str_data = str_data[:75] + "(...)"
err_msg = (
"%s cannot store accurately value %s, "
"it would be represented as %s. "
"If you do not mind this precision loss, you can: "
"1) explicitly convert your data to a numpy array "
"of dtype %s, or "
'2) set "allow_input_downcast=True" when calling '
'"function".' % (self, data, converted_data, self.dtype)
)
raise TypeError(err_msg, data)
if self.ndim != data.ndim:
raise TypeError(
"Wrong number of dimensions: expected %s,"
" got %s with shape %s." % (self.ndim, data.ndim, data.shape)
)
if not data.flags.aligned:
try:
msg = "object buffer" + str(data.data)
except AttributeError:
msg = ""
raise TypeError(
"The numpy.ndarray object is not aligned."
" Aesara C code does not support that.",
msg,
"object shape",
data.shape,
"object strides",
data.strides,
"object dtype",
data.dtype,
)
i = 0
for b in self.broadcastable:
if b and data.shape[i] != 1:
raise TypeError(
"Non-unit value on shape on a broadcastable" " dimension.",
data.shape,
self.broadcastable,
)
i += 1
if self.filter_checks_isfinite and not np.all(np.isfinite(data)):
raise ValueError("non-finite elements not allowed")
return data
def filter_variable(self, other, allow_convert=True):
"""
Convert a symbolic Variable into a TensorType, if compatible.
For the moment, only a TensorType and GpuArrayType will be
converted, provided they have the same number of dimensions
and dtype and have "compatible" broadcastable pattern.
"""
if hasattr(other, "_as_TensorVariable"):
other = other._as_TensorVariable()
if not isinstance(other, Variable):
# The value is not a Variable: we cast it into
# a Constant of the appropriate Type.
other = self.Constant(type=self, data=other)
if other.type == self:
return other
if allow_convert:
# Attempt safe broadcast conversion.
other2 = self.convert_variable(other)
if other2 is not None and other2.type == self:
return other2
raise TypeError(
"Cannot convert Type %(othertype)s "
"(of Variable %(other)s) into Type %(self)s. "
"You can try to manually convert %(other)s into a %(self)s."
% dict(othertype=other.type, other=other, self=self)
)
def value_validity_msg(self, a):
try:
self.filter(a, strict=True)
except Exception as e:
return str(e)
return "value is valid"
def dtype_specs(self):
"""
Return a tuple (python type, c type, numpy typenum) that corresponds
to self.dtype.
This function is used internally as part of C code generation.
"""
# TODO: add more type correspondances for e.g. int32, int64, float32,
# complex64, etc.
try:
return {
"float16": (float, "npy_float16", "NPY_FLOAT16"),
"float32": (float, "npy_float32", "NPY_FLOAT32"),
"float64": (float, "npy_float64", "NPY_FLOAT64"),
"bool": (bool, "npy_bool", "NPY_BOOL"),
"uint8": (int, "npy_uint8", "NPY_UINT8"),
"int8": (int, "npy_int8", "NPY_INT8"),
"uint16": (int, "npy_uint16", "NPY_UINT16"),
"int16": (int, "npy_int16", "NPY_INT16"),
"uint32": (int, "npy_uint32", "NPY_UINT32"),
"int32": (int, "npy_int32", "NPY_INT32"),
"uint64": (int, "npy_uint64", "NPY_UINT64"),
"int64": (int, "npy_int64", "NPY_INT64"),
"complex128": (complex, "aesara_complex128", "NPY_COMPLEX128"),
"complex64": (complex, "aesara_complex64", "NPY_COMPLEX64"),
}[self.dtype]
except KeyError:
raise TypeError(
"Unsupported dtype for {}: {}".format(
self.__class__.__name__, self.dtype
)
)
def to_scalar_type(self):
return scal.get_scalar_type(dtype=self.dtype)
def __eq__(self, other):
"""
Compare True iff other is the same kind of TensorType.
"""
return (
type(self) == type(other)
and other.dtype == self.dtype
and other.broadcastable == self.broadcastable
)
def convert_variable(self, var):
if (
isinstance(self, type(var.type))
and self.dtype == var.type.dtype # noqa
and self.ndim == var.type.ndim
and all(
sb == ob or ob
for sb, ob in zip(self.broadcastable, var.type.broadcastable)
)
):
return aesara.tensor.patternbroadcast(var, self.broadcastable)
@staticmethod
def may_share_memory(a, b):
# This is a method of TensorType, so both a and b should be ndarrays
if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
return np.may_share_memory(a, b)
else:
return False
@staticmethod
def values_eq(a, b, force_same_dtype=True):
# TODO: check to see if the shapes must match
# for now, we err on safe side...
if a.shape != b.shape:
return False
if force_same_dtype and a.dtype != b.dtype:
return False
a_eq_b = a == b
r = np.all(a_eq_b)
if r:
return True
# maybe the trouble is that there are NaNs
a_missing = np.isnan(a)
if a_missing.any():
b_missing = np.isnan(b)
return np.all(a_eq_b + (a_missing == b_missing))
else:
return False
@staticmethod
def values_eq_approx(
a, b, allow_remove_inf=False, allow_remove_nan=False, rtol=None, atol=None
):
return values_eq_approx(a, b, allow_remove_inf, allow_remove_nan, rtol, atol)
def __hash__(self):
"""Hash equal for same kinds of TensorType"""
return hashtype(self) ^ hash(self.dtype) ^ hash(self.broadcastable)
ndim = property(lambda self: len(self.broadcastable), doc="number of dimensions")
"""
Number of dimensions.
This read-only property is the preferred way to get the number of
dimensions of a `TensorType`.
"""
def make_variable(self, name=None):
"""
Return a `TensorVariable` of this type.
Parameters
----------
name : str
A pretty name to identify this `Variable` when printing and
debugging
"""
return self.Variable(self, name=name)
def __str__(self):
if self.name:
return self.name
else:
b = self.broadcastable
named_broadcastable = {
(): "scalar",
(False,): "vector",
(False, True): "col",
(True, False): "row",
(False, False): "matrix",
}
if b in named_broadcastable:
bcast = named_broadcastable[b]
else:
if any(b):
bcast = str(b)
else:
bcast = "%iD" % len(b)
return "TensorType({}, {})".format(self.dtype, bcast)
def __repr__(self):
return str(self)
def c_element_type(self):
return self.dtype_specs()[1]
def c_declare(self, name, sub, check_input=True):
"""
Override `CLinkerType.c_declare`.
"""
if check_input:
check = """
typedef %(dtype)s dtype_%(name)s;
""" % dict(
sub, name=name, dtype=self.dtype_specs()[1]
)
else:
check = ""
declaration = """
PyArrayObject* %(name)s;
""" % dict(
sub, name=name, dtype=self.dtype_specs()[1]
)
return declaration + check
def c_init(self, name, sub):
"""
Override `CLinkerType.c_init`.
"""
return """
%(name)s = NULL;
""" % dict(
sub, name=name, type_num=self.dtype_specs()[2]
)
def c_extract(self, name, sub, check_input=True):
"""
Override `CLinkerType.c_extract`.
"""
if check_input:
check = """
%(name)s = NULL;
if (py_%(name)s == Py_None) {
// We can either fail here or set %(name)s to NULL and rely on Ops
// using tensors to handle the NULL case, but if they fail to do so
// they'll end up with nasty segfaults, so this is public service.
PyErr_SetString(PyExc_ValueError, "expected an ndarray, not None");
%(fail)s
}
if (!PyArray_Check(py_%(name)s)) {
PyErr_SetString(PyExc_ValueError, "expected an ndarray");
%(fail)s
}
// We expect %(type_num)s
if (!PyArray_ISALIGNED((PyArrayObject*) py_%(name)s)) {
PyArrayObject * tmp = (PyArrayObject*) py_%(name)s;
PyErr_Format(PyExc_NotImplementedError,
"expected an aligned array of type %%ld "
"(%(type_num)s), got non-aligned array of type %%ld"
" with %%ld dimensions, with 3 last dims "
"%%ld, %%ld, %%ld"
" and 3 last strides %%ld %%ld, %%ld.",
(long int) %(type_num)s,
(long int) PyArray_TYPE((PyArrayObject*) py_%(name)s),
(long int) PyArray_NDIM(tmp),
(long int) (PyArray_NDIM(tmp) >= 3 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-3] : -1),
(long int) (PyArray_NDIM(tmp) >= 2 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-2] : -1),
(long int) (PyArray_NDIM(tmp) >= 1 ?
PyArray_DIMS(tmp)[PyArray_NDIM(tmp)-1] : -1),
(long int) (PyArray_NDIM(tmp) >= 3 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-3] : -1),
(long int) (PyArray_NDIM(tmp) >= 2 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-2] : -1),
(long int) (PyArray_NDIM(tmp) >= 1 ?
PyArray_STRIDES(tmp)[PyArray_NDIM(tmp)-1] : -1)
);
%(fail)s
}
// This is a TypeError to be consistent with DEBUG_MODE
// Note: DEBUG_MODE also tells the name of the container
if (PyArray_TYPE((PyArrayObject*) py_%(name)s) != %(type_num)s) {
PyErr_Format(PyExc_TypeError,
"expected type_num %%d (%(type_num)s) got %%d",
%(type_num)s, PyArray_TYPE((PyArrayObject*) py_%(name)s));
%(fail)s
}
""" % dict(
sub, name=name, type_num=self.dtype_specs()[2]
)
else:
check = ""
return (
check
+ """
%(name)s = (PyArrayObject*)(py_%(name)s);
Py_XINCREF(%(name)s);
"""
% dict(sub, name=name, type_num=self.dtype_specs()[2])
)
def c_cleanup(self, name, sub):
"""
Override `CLinkerType.c_cleanup`.
"""
return (
"""
if (%(name)s) {
Py_XDECREF(%(name)s);
}
"""
% locals()
)
def c_sync(self, name, sub):
"""
Override `CLinkerType.c_sync`.
"""
fail = sub["fail"]
type_num = self.dtype_specs()[2]
return (
"""
{Py_XDECREF(py_%(name)s);}
if (!%(name)s) {
Py_INCREF(Py_None);
py_%(name)s = Py_None;
}
else if ((void*)py_%(name)s != (void*)%(name)s) {
py_%(name)s = (PyObject*)%(name)s;
}
{Py_XINCREF(py_%(name)s);}
if (%(name)s && !PyArray_ISALIGNED((PyArrayObject*) py_%(name)s)) {
PyErr_Format(PyExc_NotImplementedError,
"c_sync: expected an aligned array, got non-aligned array of type %%ld"
" with %%ld dimensions, with 3 last dims "
"%%ld, %%ld, %%ld"
" and 3 last strides %%ld %%ld, %%ld.",
(long int) PyArray_TYPE((PyArrayObject*) py_%(name)s),
(long int) PyArray_NDIM(%(name)s),
(long int) (PyArray_NDIM(%(name)s) >= 3 ?
PyArray_DIMS(%(name)s)[PyArray_NDIM(%(name)s)-3] : -1),
(long int) (PyArray_NDIM(%(name)s) >= 2 ?
PyArray_DIMS(%(name)s)[PyArray_NDIM(%(name)s)-2] : -1),
(long int) (PyArray_NDIM(%(name)s) >= 1 ?
PyArray_DIMS(%(name)s)[PyArray_NDIM(%(name)s)-1] : -1),
(long int) (PyArray_NDIM(%(name)s) >= 3 ?
PyArray_STRIDES(%(name)s)[PyArray_NDIM(%(name)s)-3] : -1),
(long int) (PyArray_NDIM(%(name)s) >= 2 ?
PyArray_STRIDES(%(name)s)[PyArray_NDIM(%(name)s)-2] : -1),
(long int) (PyArray_NDIM(%(name)s) >= 1 ?
PyArray_STRIDES(%(name)s)[PyArray_NDIM(%(name)s)-1] : -1)
);
%(fail)s
}
"""
% locals()
)
def c_headers(self, c_compiler):
"""
Override `CLinkerObject.c_headers`.
"""
return scal.get_scalar_type(self.dtype).c_headers(c_compiler)
def c_libraries(self, c_compiler):
return scal.get_scalar_type(self.dtype).c_libraries(c_compiler)
def c_compile_args(self, c_compiler):
return scal.get_scalar_type(self.dtype).c_compile_args(c_compiler)
def c_support_code(self):
"""
Override `CLinkerObject.c_support_code`.
"""
return scal.get_scalar_type(self.dtype).c_support_code()
def c_init_code(self):
return scal.get_scalar_type(self.dtype).c_init_code()
def c_code_cache_version(self):
scalar_version = scal.get_scalar_type(self.dtype).c_code_cache_version()
if scalar_version:
return (11,) + scalar_version
else:
return ()
def value_zeros(self, shape):
"""
Create an numpy ndarray full of 0 values.
"""
return np.zeros(shape, dtype=self.dtype)
def get_shape_info(self, obj):
"""
Return the information needed to compute the memory size of ``obj``.
The memory size is only the data, so this excludes the container.
For an ndarray, this is the data, but not the ndarray object and
other data structures such as shape and strides.
``get_shape_info()`` and ``get_size()`` work in tandem for the memory
profiler.
``get_shape_info()`` is called during the execution of the function.
So it is better that it is not too slow.
``get_size()`` will be called on the output of this function
when printing the memory profile.
Parameters
----------
obj
The object that this Type represents during execution.
Returns
-------
object
Python object that ``self.get_size()`` understands.
"""
return obj.shape
def get_size(self, shape_info):
"""
Number of bytes taken by the object represented by shape_info.
Parameters
----------
shape_info
The output of the call to get_shape_info().
Returns
-------
int
The number of bytes taken by the object described by ``shape_info``.
"""
if shape_info:
return np.prod(shape_info) * np.dtype(self.dtype).itemsize
else: # a scalar
return np.dtype(self.dtype).itemsize
aesara.compile.ops.expandable_types += (TensorType,)
def values_eq_approx(
a, b, allow_remove_inf=False, allow_remove_nan=False, rtol=None, atol=None
):
"""
Parameters
----------
allow_remove_inf
If True, when there is an inf in a, we allow any value in b in
that position. Event -inf
allow_remove_nan
If True, when there is a nan in a, we allow any value in b in
that position. Event +-inf
rtol
Relative tolerance, passed to _allclose.
atol
Absolute tolerance, passed to _allclose.
"""
if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
if a.shape != b.shape:
return False
if a.dtype != b.dtype:
return False
if str(a.dtype) not in aesara.tensor.continuous_dtypes:
return np.all(a == b)
else:
cmp = aesara.tensor.basic._allclose(a, b, rtol=rtol, atol=atol)
if cmp:
# Numpy claims they are close, this is good enough for us.
return True
# Numpy is unhappy, but it does not necessarily mean that a and
# b are different. Indeed, Numpy does not like missing values
# and will return False whenever some are found in a or b.
# The proper way would be to use the MaskArray stuff available
# in Numpy. However, it looks like it has been added to Numpy's
# core recently, so it may not be available to everyone. Thus,
# for now we use a home-made recipe, that should probably be
# revisited in the future.
a_missing = np.isnan(a)
a_inf = np.isinf(a)
if not (a_missing.any() or (allow_remove_inf and a_inf.any())):
# There are no missing values in a, thus this is not the
# reason why numpy.allclose(a, b) returned False.
_logger.info(
"numpy allclose failed for abs_err %f and rel_err %f",
np.max(abs(a - b)),
np.max(abs(a - b) / (abs(a) + abs(b))),
)
return False
# The following line is what numpy.allclose bases its decision
# upon, according to its documentation.
rtol = 1.0000000000000001e-05
atol = 1e-8
cmp_elemwise = np.absolute(a - b) <= (atol + rtol * np.absolute(b))
# Find places where both a and b have missing values.
both_missing = a_missing * np.isnan(b)
# Find places where both a and b have inf of the same sign.
both_inf = a_inf * np.isinf(b)
# cmp_elemwise is weird when we have inf and -inf.
# set it to False
cmp_elemwise = np.where(both_inf & cmp_elemwise, a == b, cmp_elemwise)
# check the sign of the inf
both_inf = np.where(both_inf, (a == b), both_inf)
if allow_remove_inf:
both_inf += a_inf
if allow_remove_nan:
both_missing += a_missing
# Combine all information.
return (cmp_elemwise + both_missing + both_inf).all()
return False
def values_eq_approx_remove_inf(a, b):
return values_eq_approx(a, b, True)
def values_eq_approx_remove_nan(a, b):
return values_eq_approx(a, b, False, True)
def values_eq_approx_remove_inf_nan(a, b):
return values_eq_approx(a, b, True, True)
def values_eq_approx_always_true(a, b):
return True
# Register TensorType C code for ViewOp.
aesara.compile.register_view_op_c_code(
TensorType,
"""
Py_XDECREF(%(oname)s);
%(oname)s = %(iname)s;
Py_XINCREF(%(oname)s);
""",
version=1,
)
# Register TensorType C code for Shape Op.
aesara.compile.register_shape_c_code(
TensorType,
"""
npy_intp shape[] = {PyArray_NDIM(%(iname)s)};
if(%(oname)s == NULL || (PyArray_DIMS(%(oname)s)[0] != shape[0]))
{
Py_XDECREF(%(oname)s);
%(oname)s = (PyArrayObject*) PyArray_SimpleNew(1, shape, NPY_INT64);
}
for(int i=0;i<shape[0];i++)
{
((npy_int64*)PyArray_GETPTR1(%(oname)s, i))[0] = PyArray_DIMS(%(iname)s)[i];
}
""",
version=1,
)
# Register TensorType C code for ViewOp.
aesara.compile.register_shape_i_c_code(
TensorType,
"""
if(!%(oname)s)
%(oname)s=(PyArrayObject*)PyArray_EMPTY(0, NULL, NPY_INT64, 0);
((npy_int64*)PyArray_DATA(%(oname)s))[0]=PyArray_DIMS(%(iname)s)[%(i)s];
""",
"""
if (%(i)s>=PyArray_NDIM(%(iname)s)){
PyErr_SetString(PyExc_TypeError,
"Number of dimensions lower than expected");
%(fail)s
}
""",
version=3,
)
# Register TensorType C code for DeepCopyOp
aesara.compile.register_deep_copy_op_c_code(
TensorType,
"""
int alloc = %(oname)s == NULL;
for(int i=0; !alloc && i<PyArray_NDIM(%(oname)s); i++) {
if(PyArray_DIMS(%(iname)s)[i] != PyArray_DIMS(%(oname)s)[i]) {
alloc = true;
break;
}
}
if(alloc) {
Py_XDECREF(%(oname)s);
%(oname)s = (PyArrayObject*)PyArray_NewCopy(%(iname)s,
NPY_ANYORDER);
if (!%(oname)s)
{
PyErr_SetString(PyExc_ValueError,
"DeepCopyOp: the copy failed!");
%(fail)s;
}
} else {
if(PyArray_CopyInto(%(oname)s, %(iname)s)){
PyErr_SetString(PyExc_ValueError,
"DeepCopyOp: the copy failed into already allocated space!");
%(fail)s;
}
}
""",
version=2,
)
aesara.compile.register_rebroadcast_c_code(
TensorType,
"""
if(PyArray_DIMS(%(iname)s)[%(axis)s] != 1){
PyErr_Format(PyExc_ValueError,
"Dimension %(axis)s in Rebroadcast's input was"
" supposed to be 1 (got %%d instead)",
PyArray_DIMS(%(iname)s)[%(axis)s]);
%(fail)s
}
""",
version=1,
)
aesara.compile.register_specify_shape_c_code(
TensorType,
"""
if (PyArray_NDIM(%(iname)s) != PyArray_DIMS(%(shape)s)[0]) {
PyErr_Format(PyExc_AssertionError,
"SpecifyShape: vector of shape has %%d elements,"
" but the input has %%d dimensions.",
PyArray_DIMS(%(shape)s)[0],
PyArray_NDIM(%(iname)s));
%(fail)s;
}
for(int i = 0; i < PyArray_NDIM(%(iname)s); i++){
dtype_%(shape)s shp = ((dtype_%(shape)s*)PyArray_GETPTR1(%(shape)s,
i))[0];
if (PyArray_DIMS(%(iname)s)[i] != shp) {
PyErr_Format(PyExc_AssertionError,
"SpecifyShape: dim %%d of input has shape %%d,"
" expected %%d.",
i, PyArray_DIMS(%(iname)s)[i],
shp);
%(fail)s;
}
}
Py_XDECREF(%(oname)s);
%(oname)s = %(iname)s;
Py_XINCREF(%(oname)s);
""",
version=1,
)
| 35.483466 | 96 | 0.522446 |
81bfd005be90b5f6c7c3eca102fd938eb0bbd34b | 2,796 | py | Python | fa-rename.py | alrojascr/Virus-Data-Curation | efc2584172c7524f8548693c889906e523ca91ef | [
"MIT"
] | null | null | null | fa-rename.py | alrojascr/Virus-Data-Curation | efc2584172c7524f8548693c889906e523ca91ef | [
"MIT"
] | null | null | null | fa-rename.py | alrojascr/Virus-Data-Curation | efc2584172c7524f8548693c889906e523ca91ef | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Usage
import argparse
from argparse import RawTextHelpFormatter
import csv
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from io import StringIO
import os
import sys
import re
# Functions
# Log a message to stderr
def msg(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# Log an error to stderr and quit with non-zero error code
def err(*args, **kwargs):
msg(*args, **kwargs)
sys.exit(1);
# Check file exists
def check_file(f):
return os.path.isfile(f)
# Check if file is in FASTA format
def check_fasta(f):
if not os.path.isfile(f) or os.path.getsize(f) < 1:
return False
with open(f, 'r') as fasta:
if fasta.readline()[0] != '>': # Check if header starts with ">"
return False
for line in fasta:
line = line.strip()
if not line or line[0] == '>':
continue
if bool(re.search('[^ACTGactgNn?\-]', line)): # Check if there are non-nucleotide characters in sequence
return False
return True
def tab2dict(tab, sep):
dict = {}
with open(tab, mode='r') as file:
table = csv.reader(file, delimiter=sep)
for row in table:
dict[row[0]] = row[1]
return dict
parser = argparse.ArgumentParser(
formatter_class=RawTextHelpFormatter,
description='Rename headers/sequence IDs in multi-FASTA file\n',
usage='\n %(prog)s [--tab new_names.txt] FASTA > new.fasta')
parser.add_argument('fasta', metavar='FASTA', nargs=1, help='original FASTA file')
parser.add_argument('--ids', metavar='FILE', required=True, nargs=1, help='specify tab-separated file with [oldnames] [newnames]')
parser.add_argument('--out', metavar='FILE', nargs=1, help='specify output file (default = stdout)')
parser.add_argument('--version', action='version', version='%(prog)s v0.1')
args = parser.parse_args()
# Check input/output files
if not check_file(args.fasta[0]):
err('ERROR: Cannot find "{}". Check file exists in the specified directory.'.format(args.fasta[0]))
if not check_fasta(args.fasta[0]):
err('ERROR: Check "{}" is in FASTA format.'.format(args.fasta[0]))
if not check_file(args.ids[0]):
err('ERROR: Cannot find "{}". Check file exists in the specified directory.'.format(args.ids[0]))
if args.out:
if check_file(args.out[0]):
err('ERROR: "{}" already exists.'.format(args.out[0]))
# Rename leaf nodes
newseqs = []
new_names = tab2dict(args.ids[0], '\t')
for record in SeqIO.parse(args.fasta[0], 'fasta'):
newid = new_names[record.id]
newseqs.append(SeqRecord(record.seq, id=newid, description=''))
# Write masked alignment to file or print to stdout
if args.out:
msg('Masked sequences saved to "{}" ... '.format(args.out[0]))
SeqIO.write(newseqs, args.out[0], 'fasta')
else:
seqFILE = StringIO()
SeqIO.write(newseqs, seqFILE, 'fasta')
output = seqFILE.getvalue().rstrip()
print(output)
sys.exit(0)
| 30.725275 | 130 | 0.700644 |
9174393bae6bdf661d43b65c5cc8384b832080ba | 3,251 | py | Python | rlcard/envs/uno.py | alanzhu39/rlcard | 77adcb919ed66868e0fd0170229856dc143c27ce | [
"MIT"
] | 2 | 2020-08-24T21:30:44.000Z | 2020-10-27T03:44:04.000Z | rlcard/envs/uno.py | daochenzha/rlcard | c92039742ce80825b08d81de1ec6c5a9f7d6a532 | [
"MIT"
] | null | null | null | rlcard/envs/uno.py | daochenzha/rlcard | c92039742ce80825b08d81de1ec6c5a9f7d6a532 | [
"MIT"
] | 2 | 2020-02-23T17:26:14.000Z | 2020-12-22T15:34:13.000Z | import numpy as np
from rlcard.envs.env import Env
from rlcard import models
from rlcard.games.uno.game import UnoGame as Game
from rlcard.games.uno.utils import encode_hand, encode_target
from rlcard.games.uno.utils import ACTION_SPACE, ACTION_LIST
from rlcard.games.uno.card import UnoCard
class UnoEnv(Env):
def __init__(self, allow_step_back=False):
super().__init__(Game(allow_step_back), allow_step_back)
self.state_shape = [7, 4, 15]
def print_state(self, player):
''' Print out the state of a given player
Args:
player (int): Player id
'''
state = self.game.get_state(player)
print('\n=============== Your Hand ===============')
UnoCard.print_cards(state['hand'])
print('')
print('=============== Last Card ===============')
UnoCard.print_cards(state['target'], wild_color=True)
print('')
print('========== Agents Card Number ===========')
for i in range(self.player_num):
if i != self.active_player:
print('Agent {} has {} cards.'.format(i, len(self.game.players[i].hand)))
print('======== Actions You Can Choose =========')
for i, action in enumerate(state['legal_actions']):
print(str(ACTION_SPACE[action])+': ', end='')
UnoCard.print_cards(action, wild_color=True)
if i < len(state['legal_actions']) - 1:
print(', ', end='')
print('\n')
def print_result(self, player):
''' Print the game result when the game is over
Args:
player (int): The human player id
'''
payoffs = self.get_payoffs()
print('=============== Result ===============')
if payoffs[player] > 0:
print('You win!')
else:
print('You lose!')
print('')
@staticmethod
def print_action(action):
''' Print out an action in a nice form
Args:
action (str): A string a action
'''
UnoCard.print_cards(action, wild_color=True)
def load_model(self):
''' Load pretrained/rule model
Returns:
model (Model): A Model object
'''
return models.load('uno-rule-v1')
def extract_state(self, state):
obs = np.zeros((7, 4, 15), dtype=int)
encode_hand(obs[:3], state['hand'])
encode_target(obs[3], state['target'])
encode_hand(obs[4:], state['others_hand'])
legal_action_id = self.get_legal_actions()
extrated_state = {'obs': obs, 'legal_actions': legal_action_id}
return extrated_state
def get_payoffs(self):
return self.game.get_payoffs()
def decode_action(self, action_id):
legal_ids = self.get_legal_actions()
if action_id in legal_ids:
return ACTION_LIST[action_id]
#if (len(self.game.dealer.deck) + len(self.game.round.played_cards)) > 17:
# return ACTION_LIST[60]
return ACTION_LIST[np.random.choice(legal_ids)]
def get_legal_actions(self):
legal_actions = self.game.get_legal_actions()
legal_ids = [ACTION_SPACE[action] for action in legal_actions]
return legal_ids
| 33.173469 | 89 | 0.57367 |
7556c18cf2c188555cabe88ef1b28f9169b4ea39 | 2,894 | py | Python | huey/contrib/djhuey/__init__.py | winkidney/huey | cda66da19e8a92d98453b53e106c3a74c20f640b | [
"MIT"
] | null | null | null | huey/contrib/djhuey/__init__.py | winkidney/huey | cda66da19e8a92d98453b53e106c3a74c20f640b | [
"MIT"
] | null | null | null | huey/contrib/djhuey/__init__.py | winkidney/huey | cda66da19e8a92d98453b53e106c3a74c20f640b | [
"MIT"
] | 1 | 2019-10-22T13:04:23.000Z | 2019-10-22T13:04:23.000Z | from functools import wraps
import sys
from django.conf import settings
from django.db import connection
from huey import crontab
from huey import RedisHuey
from huey.utils import load_class
configuration_message = """
Configuring Huey for use with Django
====================================
Huey was designed to be simple to configure in the general case. For that
reason, huey will "just work" with no configuration at all provided you have
Redis installed and running locally.
On the other hand, you can configure huey manually using the following
setting structure.
The following example uses Redis on localhost, and will run four worker
processes:
HUEY = {
'name': 'my-app',
'connection': {'host': 'localhost', 'port': 6379},
'consumer': {
'workers': 4,
'worker_type': 'process', # "thread" or "greenlet" are other options
},
}
If you would like to configure Huey's logger using Django's integrated logging
settings, the logger used by consumer is named "huey.consumer".
Alternatively you can simply assign `settings.HUEY` to an actual `Huey`
object instance:
from huey import RedisHuey
HUEY = RedisHuey('my-app')
"""
def default_queue_name():
try:
return settings.DATABASE_NAME
except AttributeError:
try:
return settings.DATABASES['default']['NAME']
except KeyError:
return 'huey'
def config_error(msg):
print(configuration_message)
print('\n\n')
print(msg)
sys.exit(1)
HUEY = getattr(settings, 'HUEY', None)
if HUEY is None:
try:
from huey import RedisHuey
except ImportError:
config_error('Error: Huey could not import the redis backend. '
'Install `redis-py`.')
else:
HUEY = RedisHuey(default_queue_name())
if isinstance(HUEY, dict):
huey_config = HUEY.copy() # Operate on a copy.
name = huey_config.pop('name', default_queue_name())
conn_kwargs = huey_config.pop('connection', {})
try:
del huey_config['consumer'] # Don't need consumer opts here.
except KeyError:
pass
if 'always_eager' not in huey_config:
huey_config['always_eager'] = settings.DEBUG
huey_config.update(conn_kwargs)
HUEY = RedisHuey(name, **huey_config)
task = HUEY.task
periodic_task = HUEY.periodic_task
def close_db(fn):
"""Decorator to be used with tasks that may operate on the database."""
@wraps(fn)
def inner(*args, **kwargs):
try:
return fn(*args, **kwargs)
finally:
if not HUEY.always_eager:
connection.close()
return inner
def db_task(*args, **kwargs):
def decorator(fn):
return task(*args, **kwargs)(close_db(fn))
return decorator
def db_periodic_task(*args, **kwargs):
def decorator(fn):
return periodic_task(*args, **kwargs)(close_db(fn))
return decorator
| 27.301887 | 78 | 0.664824 |
5eea98d326737ea22b6d51a5befb1afb43d96094 | 796 | py | Python | venv/lib/python3.8/site-packages/troposphere/mediastore.py | ayfallen/urler | d7bb5c83018a75cb4af2bbb7178bcf364b61f68f | [
"MIT"
] | 2 | 2021-04-03T06:34:08.000Z | 2022-01-14T22:27:02.000Z | venv/lib/python3.8/site-packages/troposphere/mediastore.py | ayfallen/urler | d7bb5c83018a75cb4af2bbb7178bcf364b61f68f | [
"MIT"
] | 6 | 2020-09-05T01:40:23.000Z | 2022-03-12T00:40:58.000Z | venv/lib/python3.8/site-packages/troposphere/mediastore.py | ayfallen/urler | d7bb5c83018a75cb4af2bbb7178bcf364b61f68f | [
"MIT"
] | 1 | 2020-09-05T00:19:03.000Z | 2020-09-05T00:19:03.000Z | # Copyright (c) 2012-2019, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject
from . import AWSProperty
from .validators import boolean
from .validators import integer
class CorsRule(AWSProperty):
props = {
'AllowedHeaders': ([str], False),
'AllowedMethods': ([str], False),
'AllowedOrigins': ([str], False),
'ExposeHeaders': ([str], False),
'MaxAgeSeconds': (integer, False),
}
class Container(AWSObject):
resource_type = "AWS::MediaStore::Container"
props = {
'AccessLoggingEnabled': (boolean, False),
'ContainerName': (str, True),
'CorsPolicy': ([CorsRule], False),
'LifecyclePolicy': (str, False),
'Policy': (str, False),
}
| 24.121212 | 52 | 0.61809 |
a4ae99f2f800bfec6cc641a9454f31fa318dbfb4 | 679 | py | Python | upload_terms.py | vikineema/deafrica-docs | 3e82b1dee56508fe31341263cd1ddb6857532715 | [
"Apache-2.0"
] | 5 | 2020-08-20T05:31:43.000Z | 2021-05-18T13:07:21.000Z | upload_terms.py | vikineema/deafrica-docs | 3e82b1dee56508fe31341263cd1ddb6857532715 | [
"Apache-2.0"
] | 62 | 2020-07-30T06:43:24.000Z | 2021-12-19T23:22:46.000Z | upload_terms.py | vikineema/deafrica-docs | 3e82b1dee56508fe31341263cd1ddb6857532715 | [
"Apache-2.0"
] | 1 | 2020-11-23T10:18:34.000Z | 2020-11-23T10:18:34.000Z | import os
from poeditor import POEditorAPI
project_id = os.environ['POEDITOR_PROJECT_ID']
api_token = os.environ['POEDITOR_API_TOKEN']
client = POEditorAPI(api_token=api_token)
project = client.view_project_details(project_id)
print(f"Before update, {project['name']} (id: {project['id']}) has {project['terms']} terms.")
update_results = client.update_terms(
project_id=project_id,
file_path='_build/docs.pot'
)
terms = update_results['terms']
print("Terms updated:")
for k, v in terms.items():
print(f"\t{k}: {v}")
project = client.view_project_details(project_id)
print(f"After update, {project['name']} (id: {project['id']}) has {project['terms']} terms.")
| 28.291667 | 94 | 0.724595 |
325b8febd802ba0023742dac23d2c3ef83c16e02 | 1,947 | py | Python | papers/DaCy-A-Unified-Framework-for-Danish-NLP/apply_fns/apply_fn_daluke.py | martbern/DaCy | 3c181d37de2c4f38886a729511ac18d728d05b0b | [
"Apache-2.0"
] | 35 | 2021-07-12T15:29:11.000Z | 2022-03-17T04:35:36.000Z | papers/DaCy-A-Unified-Framework-for-Danish-NLP/apply_fns/apply_fn_daluke.py | martbern/DaCy | 3c181d37de2c4f38886a729511ac18d728d05b0b | [
"Apache-2.0"
] | 51 | 2021-07-10T18:12:43.000Z | 2022-03-15T22:28:19.000Z | papers/DaCy-A-Unified-Framework-for-Danish-NLP/apply_fns/apply_fn_daluke.py | martbern/DaCy | 3c181d37de2c4f38886a729511ac18d728d05b0b | [
"Apache-2.0"
] | 10 | 2021-07-15T06:12:03.000Z | 2022-03-17T04:35:49.000Z | ### pip install daluke==0.0.5
from typing import Iterable, List
from spacy.tokens import Span, Doc
from spacy.training import Example
from spacy.lang.da import Danish
from daluke import AutoNERDaLUKE, predict_ner
from .apply_fn_utils import apply_on_multiple_examples, add_iob, no_misc_getter
# This also downloads daluke model (first time)
daluke = AutoNERDaLUKE()
nlp_da = Danish()
def apply_daluke(
examples: Iterable[Example], use_spacy: bool = True, batch_size: int = 16
) -> List[Example]:
docs_y, sentences = list(), list()
for example in examples:
# Tokenization using spacy or nltk
if use_spacy:
sentences.append([t.text for t in nlp_da(example.reference.text)])
else:
from nltk.tokenize import word_tokenize
sentences.append(word_tokenize(example.reference.text))
docs_y.append(example.reference)
# NER using daluke
# join `should` not give size issues, as this string is again crudely split in DaLUKE API
predictions = predict_ner(
[" ".join(sent) for sent in sentences], daluke, batch_size=batch_size
)
out_examples = list()
for doc_y, pred, words in zip(docs_y, predictions, sentences):
doc = add_iob(Doc(nlp_da.vocab, words=words), iob=pred)
out_examples.append(Example(doc, doc_y))
return out_examples
if __name__ == "__main__":
import os
os.chdir("..")
from dacy.datasets import dane
test = dane(splits=["test"])
nlp = Danish()
examples = apply_daluke(test(nlp))
from spacy.scorer import Scorer
tok_scores = Scorer.score_tokenization(examples)
ent_scores = Scorer.score_spans(
examples=examples, attr="ents", getter=no_misc_getter
)
pos_scores = Scorer.score_token_attr(examples, "tag")
from spacy import displacy
displacy.render(examples[0].y, style="ent")
displacy.render(examples[0].x, style="ent")
breakpoint()
| 29.5 | 93 | 0.691834 |
c786fa6438e645f35c14966b7e75a93d0e7b4eb0 | 1,877 | py | Python | electrum_but/plot.py | Butkoin/electrum-but | 254fd26fc09a5fd925669da0f8608a4afacd86da | [
"MIT"
] | null | null | null | electrum_but/plot.py | Butkoin/electrum-but | 254fd26fc09a5fd925669da0f8608a4afacd86da | [
"MIT"
] | null | null | null | electrum_but/plot.py | Butkoin/electrum-but | 254fd26fc09a5fd925669da0f8608a4afacd86da | [
"MIT"
] | null | null | null | import datetime
from collections import defaultdict
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as md
from .i18n import _
from .bitcoin import COIN
class NothingToPlotException(Exception):
def __str__(self):
return _("Nothing to plot.")
def plot_history(history):
if len(history) == 0:
raise NothingToPlotException()
hist_in = defaultdict(int)
hist_out = defaultdict(int)
for item in history:
if not item['confirmations']:
continue
if item['timestamp'] is None:
continue
value = item['value'].value/COIN
date = item['date']
datenum = int(md.date2num(datetime.date(date.year, date.month, 1)))
if value > 0:
hist_in[datenum] += value
else:
hist_out[datenum] -= value
f, axarr = plt.subplots(2, sharex=True)
plt.subplots_adjust(bottom=0.2)
plt.xticks(rotation=25)
ax = plt.gca()
plt.ylabel('BUTK')
plt.xlabel('Month')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[0].set_title('Monthly Volume')
xfmt = md.DateFormatter('%Y-%m')
ax.xaxis.set_major_formatter(xfmt)
width = 20
r1 = None
r2 = None
dates_values = list(zip(*sorted(hist_in.items())))
if dates_values and len(dates_values) == 2:
dates, values = dates_values
r1 = axarr[0].bar(dates, values, width, label='incoming')
axarr[0].legend(loc='upper left')
dates_values = list(zip(*sorted(hist_out.items())))
if dates_values and len(dates_values) == 2:
dates, values = dates_values
r2 = axarr[1].bar(dates, values, width, color='r', label='outgoing')
axarr[1].legend(loc='upper left')
if r1 is None and r2 is None:
raise NothingToPlotException()
return plt
| 29.328125 | 76 | 0.634523 |
af4f1201e276de5f526f6c0ea4c520273a544e4c | 2,305 | py | Python | pyrai/dispatcher/methods/fleet/update_vehicle.py | routable-ai/pyrai | 2aa448886ba14d1f44b408b67040a76d5f732528 | [
"MIT"
] | null | null | null | pyrai/dispatcher/methods/fleet/update_vehicle.py | routable-ai/pyrai | 2aa448886ba14d1f44b408b67040a76d5f732528 | [
"MIT"
] | null | null | null | pyrai/dispatcher/methods/fleet/update_vehicle.py | routable-ai/pyrai | 2aa448886ba14d1f44b408b67040a76d5f732528 | [
"MIT"
] | 1 | 2020-07-13T15:37:15.000Z | 2020-07-13T15:37:15.000Z | import datetime
import json
import requests
from pyrai.dispatcher.structures.fleet_params import FleetParams
from pyrai.dispatcher.structures.defaults import Defaults
from pyrai.dispatcher.structures.endpoints import Endpoints
from pyrai.dispatcher.structures.vehicle import Vehicle
from pyrai.dispatcher.structures.status_error import StatusError
from pyrai.helpers import to_rfc3339
def update_vehicle(self, vid, location, event, direction=Defaults.DEFAULT_DIRECTION, event_time=None, req_id=None):
"""
Attempts to update a vehicle.
Args:
vid (int): The unique vehicle ID
location (Location): The vehicle location.
direction (float): Angle in radians clockwise away from true north
event (VehicleEvent): Describes the current event for the vehicle.
pickup occurs when the vehicle has picked up a request.
dropoff occurs when the vehicle has dropped of a request.
progress should be set when the vehicle is moving to service a request,
either picking up or dropping off. The vehicle should be marked as
unassigned when it is is not assigned to any requests.
event_time (datetime.datetime, optional): Time at which the vehicle update has occurred.
Set to datetime.datetime.now() if not provided. Defaults to None.
req_id (int, optional): The unique ID of request the vehicle is servicing.
If the vehicle is unassigned, this may be omitted. Defaults to None.
Returns:
Vehicle: If successful.
Raises:
StatusError: If unsucessful.
"""
if event_time is None:
event_time = datetime.datetime.now()
if event_time > self.end_time:
self.end_time = event_time
url = self.build_url(Endpoints.UPDATE_VEHICLE)
payload = {
'id': vid,
'location': location.todict(),
'direction': direction,
'event_time': to_rfc3339(event_time),
'event': event,
'user_key': self.user_key.todict()
}
if req_id is not None:
payload['req_id'] = req_id
r = requests.post(url, data = json.dumps(payload))
resp = r.json()
if r.status_code == 200:
return Vehicle.fromdict(self, resp)
else:
raise StatusError(resp = resp) | 37.786885 | 115 | 0.681996 |
dcb72a027867a44375014c80a249ad2e42d106e9 | 696 | py | Python | api/migrations/0002_auto_20180412_0051.py | pokotsun/kyutechAppServer2018 | 9fe579c63e59ee585137e89d11c9cdccd58d3186 | [
"MIT"
] | 4 | 2018-08-09T15:19:06.000Z | 2021-04-03T12:24:57.000Z | api/migrations/0002_auto_20180412_0051.py | pokotsun/kyutechAppServer2018 | 9fe579c63e59ee585137e89d11c9cdccd58d3186 | [
"MIT"
] | 2 | 2018-10-19T15:32:17.000Z | 2020-06-05T19:41:14.000Z | api/migrations/0002_auto_20180412_0051.py | pokotsun/kyutechAppServer2018 | 9fe579c63e59ee585137e89d11c9cdccd58d3186 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.4 on 2018-04-11 15:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='news',
options={'verbose_name_plural': 'News'},
),
migrations.AlterModelOptions(
name='newsheading',
options={'verbose_name_plural': 'NewsHeadings'},
),
migrations.AddField(
model_name='newsheading',
name='short_name',
field=models.CharField(default='Du', max_length=2),
preserve_default=False,
),
]
| 24.857143 | 63 | 0.570402 |
5ccbb337d2a1fdf45c44db2437a63da3bdb23c1f | 9,358 | py | Python | datadog_checks_base/datadog_checks/base/utils/db/utils.py | ichizero/integrations-core | 362f3d3f054a9a3fc8378a6d197906780eec3d2c | [
"BSD-3-Clause"
] | null | null | null | datadog_checks_base/datadog_checks/base/utils/db/utils.py | ichizero/integrations-core | 362f3d3f054a9a3fc8378a6d197906780eec3d2c | [
"BSD-3-Clause"
] | null | null | null | datadog_checks_base/datadog_checks/base/utils/db/utils.py | ichizero/integrations-core | 362f3d3f054a9a3fc8378a6d197906780eec3d2c | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import datetime
import decimal
import logging
import os
import socket
import threading
import time
from concurrent.futures.thread import ThreadPoolExecutor
from itertools import chain
from typing import Any, Callable, Dict, List, Tuple
from cachetools import TTLCache
from datadog_checks.base import is_affirmative
from datadog_checks.base.log import get_check_logger
from datadog_checks.base.utils.db.types import Transformer
try:
import datadog_agent
except ImportError:
from ....stubs import datadog_agent
logger = logging.getLogger(__file__)
# AgentCheck methods to transformer name e.g. set_metadata -> metadata
SUBMISSION_METHODS = {
'gauge': 'gauge',
'count': 'count',
'monotonic_count': 'monotonic_count',
'rate': 'rate',
'histogram': 'histogram',
'historate': 'historate',
'set_metadata': 'metadata',
# These submission methods require more configuration than just a name
# and a value and therefore must be defined as a custom transformer.
'service_check': '__service_check',
}
def create_submission_transformer(submit_method):
# type: (Any) -> Callable[[Any, Any, Any], Callable[[Any, List, Dict], Callable[[Any, Any, Any], Transformer]]]
# During the compilation phase every transformer will have access to all the others and may be
# passed the first arguments (e.g. name) that will be forwarded the actual AgentCheck methods.
def get_transformer(_transformers, *creation_args, **modifiers):
# type: (List[Transformer], Tuple, Dict[str, Any]) -> Transformer
# The first argument of every transformer is a map of named references to collected values.
def transformer(_sources, *call_args, **kwargs):
# type: (Dict[str, Any], Tuple[str, Any], Dict[str, Any]) -> None
kwargs.update(modifiers)
# TODO: When Python 2 goes away simply do:
# submit_method(*creation_args, *call_args, **kwargs)
submit_method(*chain(creation_args, call_args), **kwargs)
return transformer
return get_transformer
def create_extra_transformer(column_transformer, source=None):
# type: (Transformer, str) -> Transformer
# Every column transformer expects a value to be given but in the post-processing
# phase the values are determined by references, so to avoid redefining every
# transformer we just map the proper source to the value.
if source:
def transformer(sources, **kwargs):
return column_transformer(sources, sources[source], **kwargs)
# Extra transformers that call regular transformers will want to pass values directly.
else:
transformer = column_transformer
return transformer
class ConstantRateLimiter:
"""
Basic rate limiter that sleeps long enough to ensure the rate limit is not exceeded. Not thread safe.
"""
def __init__(self, rate_limit_s):
"""
:param rate_limit_s: rate limit in seconds
"""
self.rate_limit_s = max(rate_limit_s, 0)
self.period_s = 1.0 / self.rate_limit_s if self.rate_limit_s > 0 else 0
self.last_event = 0
def sleep(self):
"""
Sleeps long enough to enforce the rate limit
"""
elapsed_s = time.time() - self.last_event
sleep_amount = max(self.period_s - elapsed_s, 0)
time.sleep(sleep_amount)
self.last_event = time.time()
class RateLimitingTTLCache(TTLCache):
"""
TTLCache wrapper used for rate limiting by key
"""
def acquire(self, key):
"""
:return: True if the key has not yet reached its rate limit
"""
if len(self) >= self.maxsize:
return False
if key in self:
return False
self[key] = True
return True
def resolve_db_host(db_host):
agent_hostname = datadog_agent.get_hostname()
if not db_host or db_host in {'localhost', '127.0.0.1'}:
return agent_hostname
try:
host_ip = socket.gethostbyname(db_host)
except socket.gaierror as e:
# could be connecting via a unix domain socket
logger.debug(
"failed to resolve DB host '%s' due to %r. falling back to agent hostname: %s",
db_host,
e,
agent_hostname,
)
return agent_hostname
try:
agent_host_ip = socket.gethostbyname(agent_hostname)
if agent_host_ip == host_ip:
return agent_hostname
except socket.gaierror as e:
logger.debug(
"failed to resolve agent host '%s' due to socket.gaierror(%s). using DB host: %s",
agent_hostname,
e,
db_host,
)
return db_host
def default_json_event_encoding(o):
if isinstance(o, decimal.Decimal):
return float(o)
if isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
raise TypeError
class DBMAsyncJob(object):
executor = ThreadPoolExecutor()
"""
Runs Async Jobs
"""
def __init__(
self,
check,
config_host=None,
min_collection_interval=15,
dbms="TODO",
rate_limit=1,
run_sync=False,
enabled=True,
expected_db_exceptions=(),
shutdown_callback=None,
job_name=None,
):
self._check = check
self._config_host = config_host
self._min_collection_interval = min_collection_interval
# map[dbname -> psycopg connection]
self._log = get_check_logger()
self._job_loop_future = None
self._cancel_event = threading.Event()
self._tags = None
self._tags_no_db = None
self._run_sync = None
self._db_hostname = None
self._last_check_run = 0
self._shutdown_callback = shutdown_callback
self._dbms = dbms
self._rate_limiter = ConstantRateLimiter(rate_limit)
self._run_sync = run_sync
self._enabled = enabled
self._expected_db_exceptions = expected_db_exceptions
self._job_name = job_name
def cancel(self):
self._cancel_event.set()
def run_job_loop(self, tags):
"""
:param tags:
:return:
"""
if not self._enabled:
self._log.debug("[job=%s] Job not enabled.", self._job_name)
return
if not self._db_hostname:
self._db_hostname = resolve_db_host(self._config_host)
self._tags = tags
self._tags_str = ','.join(self._tags)
self._job_tags = self._tags + ["job:{}".format(self._job_name)]
self._job_tags_str = ','.join(self._job_tags)
self._last_check_run = time.time()
if self._run_sync or is_affirmative(os.environ.get('DBM_THREADED_JOB_RUN_SYNC', "false")):
self._log.debug("Running threaded job synchronously. job=%s", self._job_name)
self._run_job_rate_limited()
elif self._job_loop_future is None or not self._job_loop_future.running():
self._job_loop_future = DBMAsyncJob.executor.submit(self._job_loop)
else:
self._log.debug("Job loop already running. job=%s", self._job_name)
def _job_loop(self):
try:
self._log.info("[%s] Starting job loop", self._job_tags_str)
while True:
if self._cancel_event.isSet():
self._log.info("[%s] Job loop cancelled", self._job_tags_str)
self._check.count("dd.{}.async_job.cancel".format(self._dbms), 1, tags=self._job_tags)
break
if time.time() - self._last_check_run > self._min_collection_interval * 2:
self._log.info("[%s] Job loop stopping due to check inactivity", self._job_tags_str)
self._check.count("dd.{}.async_job.inactive_stop".format(self._dbms), 1, tags=self._job_tags)
break
self._run_job_rate_limited()
except self._expected_db_exceptions as e:
self._log.warning(
"[%s] Job loop database error: %s",
self._job_tags_str,
e,
exc_info=self._log.getEffectiveLevel() == logging.DEBUG,
)
self._check.count(
"dd.{}.async_job.error".format(self._dbms),
1,
tags=self._job_tags + ["error:database-{}".format(type(e))],
)
except Exception as e:
self._log.exception("[%s] Job loop crash", self._job_tags_str)
self._check.count(
"dd.{}.async_job.error".format(self._dbms),
1,
tags=self._job_tags + ["error:crash-{}".format(type(e))],
)
finally:
self._log.info("[%s] Shutting down job loop", self._job_tags_str)
if self._shutdown_callback:
self._shutdown_callback()
def _set_rate_limit(self, rate_limit):
if self._rate_limiter.rate_limit_s != rate_limit:
self._rate_limiter = ConstantRateLimiter(rate_limit)
def _run_job_rate_limited(self):
self.run_job()
self._rate_limiter.sleep()
def run_job(self):
raise NotImplementedError()
| 34.153285 | 115 | 0.629087 |
29762791a680f039d5e84b4d4f97c424197f0be2 | 1,604 | py | Python | samples/generated_samples/dataproc_v1_generated_autoscaling_policy_service_list_autoscaling_policies_async.py | gplasky/python-dataproc | 61c7b2133450dbda49d42a179a7e5ad8f3b7de17 | [
"Apache-2.0"
] | null | null | null | samples/generated_samples/dataproc_v1_generated_autoscaling_policy_service_list_autoscaling_policies_async.py | gplasky/python-dataproc | 61c7b2133450dbda49d42a179a7e5ad8f3b7de17 | [
"Apache-2.0"
] | null | null | null | samples/generated_samples/dataproc_v1_generated_autoscaling_policy_service_list_autoscaling_policies_async.py | gplasky/python-dataproc | 61c7b2133450dbda49d42a179a7e5ad8f3b7de17 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListAutoscalingPolicies
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dataproc
# [START dataproc_v1_generated_AutoscalingPolicyService_ListAutoscalingPolicies_async]
from google.cloud import dataproc_v1
async def sample_list_autoscaling_policies():
# Create a client
client = dataproc_v1.AutoscalingPolicyServiceAsyncClient()
# Initialize request argument(s)
request = dataproc_v1.ListAutoscalingPoliciesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_autoscaling_policies(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END dataproc_v1_generated_AutoscalingPolicyService_ListAutoscalingPolicies_async]
| 34.12766 | 86 | 0.773067 |
6979c69b3e33de59b1df4127c013078597379829 | 4,428 | py | Python | tests/test_hlm.py | slavisasarafijanovic/testcloneswifthlm | dd5979b8c86971da5d0a23e5cf31320c42bcac51 | [
"Apache-2.0"
] | null | null | null | tests/test_hlm.py | slavisasarafijanovic/testcloneswifthlm | dd5979b8c86971da5d0a23e5cf31320c42bcac51 | [
"Apache-2.0"
] | null | null | null | tests/test_hlm.py | slavisasarafijanovic/testcloneswifthlm | dd5979b8c86971da5d0a23e5cf31320c42bcac51 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import random
import subprocess
import unittest
import mock
from swift.common.swob import Request
from swifthlm import middleware as swifthlm
class FakeApp(object):
def __init__(self, headers=None):
if headers:
self.headers = headers
else:
self.headers = {}
def __call__(self, env, start_response):
start_response('200 OK', self.headers)
return []
class TestSwiftHLM(unittest.TestCase):
def setUp(self):
self.app = swifthlm.HlmMiddleware(FakeApp(), {})
def test_migrate(self):
subprocess.call = mock.Mock()
random.choice = mock.Mock(return_value='0')
environ = {'REQUEST_METHOD': 'POST'}
req = Request.blank('/v1/a/c?MIGRATE', environ=environ)
resp = req.get_response(self.app)
subprocess.call.assert_called_with(
['/opt/ibm/swift-hlm-backend/migrate', 'a/c', '000000000000'])
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.body, 'Accepted migration request.\n')
def test_recall(self):
subprocess.call = mock.Mock()
random.choice = mock.Mock(return_value='0')
environ = {'REQUEST_METHOD': 'POST'}
req = Request.blank('/v1/a/c?RECALL', environ=environ)
resp = req.get_response(self.app)
subprocess.call.assert_called_with(
['/opt/ibm/swift-hlm-backend/recall', 'a/c', '000000000000'])
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.body, 'Accepted recall request.\n')
def test_get_status(self):
subprocess.check_output = mock.Mock(return_value='status output')
random.choice = mock.Mock(return_value='0')
req = Request.blank('/v1/a/c?STATUS')
resp = req.get_response(self.app)
subprocess.check_output.assert_called_with(
['/opt/ibm/swift-hlm-backend/status', 'a/c', '000000000000',
'STATUS'])
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.body, 'status output')
def test_invalid_get_status_POST(self):
subprocess.check_output = mock.Mock(return_value='status output')
random.choice = mock.Mock(return_value='0')
environ = {'REQUEST_METHOD': 'POST'}
req = Request.blank('/v1/a/c?STATUS', environ=environ)
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.body, '')
def test_invalid_migrate_GET(self):
subprocess.call = mock.Mock()
random.choice = mock.Mock(return_value='0')
req = Request.blank('/v1/a/c?MIGRATE')
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.body, '')
def test_invalid_get_status_exception(self):
subprocess.check_output = mock.Mock(
side_effect=subprocess.CalledProcessError(1, 'cmd', 'boom!'))
random.choice = mock.Mock(return_value='0')
req = Request.blank('/v1/a/c?STATUS')
resp = req.get_response(self.app)
subprocess.check_output.assert_called_with(
['/opt/ibm/swift-hlm-backend/status', 'a/c', '000000000000',
'STATUS'])
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.body, 'boom!')
def test_filter_factory(self):
factory = swifthlm.filter_factory({'migrate_backend': '/a/b/c/migrate',
'recall_backend': '/d/e/f/recall',
'status_backend': '/g/h/i/status'})
thehlm = factory('myapp')
self.assertEqual(thehlm.migrate_backend, '/a/b/c/migrate')
self.assertEqual(thehlm.recall_backend, '/d/e/f/recall')
self.assertEqual(thehlm.status_backend, '/g/h/i/status')
if __name__ == '__main__':
unittest.main()
| 36.9 | 79 | 0.644761 |
f55e29a3f9d1a924bef285ca13ef2c826589a14c | 148 | py | Python | server.py | Kuduxaaa/wavetech-bypass | 9560e2547b2bd8bb2b82286b6512a93188ca24d3 | [
"MIT"
] | null | null | null | server.py | Kuduxaaa/wavetech-bypass | 9560e2547b2bd8bb2b82286b6512a93188ca24d3 | [
"MIT"
] | null | null | null | server.py | Kuduxaaa/wavetech-bypass | 9560e2547b2bd8bb2b82286b6512a93188ca24d3 | [
"MIT"
] | null | null | null | #!/bin/python3
# -*- coding: utf-8 -*-
# Coded By Kuduxaaa
from app import app
if __name__ == '__main__':
app.run(debug=False, port=8083)
| 16.444444 | 35 | 0.628378 |
4d8d59f6e3e7e7c5da4bab8cd04e1522f26627b5 | 1,261 | py | Python | hardware/api/urls.py | KoenVingerhoets/bitnodes-hardware | 75707bbf8aabb0add11e8fa293d4d5848467417c | [
"MIT"
] | 45 | 2015-05-27T18:04:46.000Z | 2021-12-30T11:31:33.000Z | hardware/api/urls.py | KoenVingerhoets/bitnodes-hardware | 75707bbf8aabb0add11e8fa293d4d5848467417c | [
"MIT"
] | 6 | 2015-06-11T03:47:04.000Z | 2016-05-22T00:39:55.000Z | hardware/api/urls.py | KoenVingerhoets/bitnodes-hardware | 75707bbf8aabb0add11e8fa293d4d5848467417c | [
"MIT"
] | 15 | 2015-06-10T13:22:03.000Z | 2021-10-24T00:16:17.000Z | #
# Copyright (c) Addy Yeow Chin Heng <ayeowch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^v1/node-status/$', views.node_status, name='node-status'),
]
| 43.482759 | 72 | 0.766059 |
ae5bc77434b47fc41b395773c969a215983afc56 | 246 | py | Python | Searching/BS/bisect_lib.py | kimjiwook0129/Coding-Interivew-Cheatsheet | 574e6acecdb617b9c3cef7ec3b154ab183d8b99a | [
"MIT"
] | 3 | 2022-01-09T04:33:04.000Z | 2022-02-04T17:40:43.000Z | Searching/BS/bisect_lib.py | kimjiwook0129/Coding-Interivew-Cheatsheet | 574e6acecdb617b9c3cef7ec3b154ab183d8b99a | [
"MIT"
] | null | null | null | Searching/BS/bisect_lib.py | kimjiwook0129/Coding-Interivew-Cheatsheet | 574e6acecdb617b9c3cef7ec3b154ab183d8b99a | [
"MIT"
] | null | null | null | from bisect import bisect_left, bisect_right
# Each function peforms at O(log N)
a = [1, 2, 4, 4, 4, 4, 7, 8, 8]
x = 0
print(f'First index to place x in a {bisect_left(a, x)}') # 2
print(f'Last index to place x in a {bisect_right(a, x)}') # 4
| 30.75 | 62 | 0.646341 |
909b616056cdeee8ad19f151c9b1f3459f5ab042 | 2,536 | py | Python | src/eeschema/export_bom.py | Vman45/kicad-automation-scripts | f7144ac2c6bf8cdf19f4f0a7b414acbe48ec7e41 | [
"Apache-2.0"
] | 55 | 2018-05-05T19:30:29.000Z | 2022-03-27T20:09:38.000Z | src/eeschema/export_bom.py | Vman45/kicad-automation-scripts | f7144ac2c6bf8cdf19f4f0a7b414acbe48ec7e41 | [
"Apache-2.0"
] | 9 | 2018-07-07T22:32:44.000Z | 2021-12-01T21:25:55.000Z | src/eeschema/export_bom.py | Vman45/kicad-automation-scripts | f7144ac2c6bf8cdf19f4f0a7b414acbe48ec7e41 | [
"Apache-2.0"
] | 24 | 2018-05-04T08:32:07.000Z | 2022-02-01T19:47:25.000Z | #!/usr/bin/env python
# Copyright 2015-2016 Scott Bezek and the splitflap contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import subprocess
import sys
import time
from contextlib import contextmanager
electronics_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
repo_root = os.path.dirname(electronics_root)
sys.path.append(repo_root)
from util import file_util
from ui_automation import (
PopenContext,
xdotool,
wait_for_window,
recorded_xvfb,
)
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def eeschema_export_bom(output_directory):
wait_for_window('eeschema', '\[')
logger.info('Focus main eeschema window')
xdotool(['search', '--name', '\[', 'windowfocus'])
logger.info('Open Tools->Generate Bill Of Materials')
xdotool(['key', 'alt+t'])
xdotool(['key', 'm'])
logger.info('Run generate')
wait_for_window('plot', 'Bill of Material')
xdotool(['search', '--name', 'Bill of Material', 'windowfocus'])
xdotool(['key', 'Return'])
logger.info('Wait before shutdown')
time.sleep(2)
def export_bom():
schematic_file = os.path.join(electronics_root, 'splitflap.sch')
output_dir = os.path.join(electronics_root, 'build')
file_util.mkdir_p(output_dir)
screencast_output_file = os.path.join(output_dir, 'export_bom_screencast.ogv')
with recorded_xvfb(screencast_output_file, width=800, height=600, colordepth=24):
with PopenContext(['eeschema', schematic_file], close_fds=True) as eeschema_proc:
eeschema_export_bom(output_dir)
eeschema_proc.terminate()
logger.info('Convert component XML to useful BOM CSV file...')
subprocess.check_call([
'python',
'-u',
os.path.join(electronics_root, 'bom', 'generate_bom_csv.py'),
os.path.join(electronics_root, 'splitflap.xml'),
os.path.join(output_dir, 'bom.csv'),
])
if __name__ == '__main__':
export_bom()
| 30.554217 | 89 | 0.70623 |
3c85254bafbe9c638064088a915fc35b32cc452f | 9,691 | py | Python | add_features.py | brt381/TCAG-WGS-CNV-workflow | b074d6047021551c510e6b1c8384ad3bf05e3cb7 | [
"MIT"
] | 39 | 2018-01-06T01:45:31.000Z | 2022-03-17T08:27:05.000Z | add_features.py | brt381/TCAG-WGS-CNV-workflow | b074d6047021551c510e6b1c8384ad3bf05e3cb7 | [
"MIT"
] | 10 | 2018-09-18T10:12:57.000Z | 2020-06-03T15:11:05.000Z | add_features.py | brt381/TCAG-WGS-CNV-workflow | b074d6047021551c510e6b1c8384ad3bf05e3cb7 | [
"MIT"
] | 16 | 2018-01-11T08:36:03.000Z | 2020-03-09T09:24:10.000Z | #program annotate a merged erds results with merged cnvnator results
import os
import sys
import re
import getopt
import functions
#file names
input_file_name = ""
output_file_name = ""
sample_id = ""
annot_file_name = ""
i_map = {}
alt_id = {}
#% overlap cutoff
overlap_cutoff = 0
overlap_type = "oneway"
##########
#print usage
def usage():
"""Print usage"""
print "usage: program -i <merged erds results> -o <erds+ results> -a <merged cnvnator results> -s <sample_id> -c <overlap cutoff> -p <reciprocal|oneway>"
##########
#read input file -
##SampleID Chr Start End Size ....
def read_input_map(input_file,i_map,sample_id):
i_file = open(input_file)
for line in i_file:
if line[0] == '#' or line[0] == '>':
continue
line = line.replace("\n","")
words = line.split("\t")
sample = sample_id
chrm = words[1].replace("chr","")
start=words[2].replace(",","").replace("\"","")
end=words[3].replace(",","").replace("\"","")
id = chrm+":"+start+"-"+end
if i_map.has_key(sample):
if not i_map[sample].has_key(id):
if int(start) == int(end):
i_map[sample][id]=[chrm,int(start)-1,int(end)]
else:
i_map[sample][id]=[chrm,int(start),int(end)]
else:
if int(start) == int(end):
i_map[sample] = {id:[chrm,int(start)-1,int(end),sample]}
else:
i_map[sample] = {id:[chrm,int(start),int(end),sample]}
i_file.close()
##########
#check if one set of coordinates overlaps with another set
def find_overlap(map_coords, file_to_read, results, overlap_cutoff, sample_id, overlap_type):
ref_coords = {}
ref_coords_by_chrm = {}
read_input_ref(file_to_read, ref_coords, ref_coords_by_chrm, sample_id)
#print "#Number of record found in ", file_to_read, ":", len(ref_coords)
for sample in map_coords.keys():
alt_sample_id = sample_id
for key_map in map_coords[sample]:
temp_key = map_coords[sample][key_map]
chrm = temp_key[0].replace("chr","")
s_1 = temp_key[1]
e_1 = temp_key[2]
cnv_len = e_1 - s_1 + 1
count = 0
ref_dict = {}
cnv_str = ""
sample_str = ""
#check if the reference set has the sample
id = key_map
if not ref_coords_by_chrm.has_key(alt_sample_id):
if results.has_key(sample):
results[sample][id] = [0,"*","*","*","*"]
else:
results[sample]={id:[0,"**","**","**","**"]}
continue
#check if the reference sample has calls for a chrm
if not ref_coords_by_chrm[alt_sample_id].has_key(chrm):
if results.has_key(sample):
results[sample][id] = [0,"#","#","#","#"]
else:
results[sample]={id:[0,"##","##","##","##"]}
continue
#check overlaps
for key_ref in ref_coords_by_chrm[alt_sample_id][chrm]:
temp_ref = ref_coords[alt_sample_id][key_ref]
#check if the coordinates are for the same chromosome
if not temp_ref[0] == temp_key[0]:
continue
temp = functions.reciprocal_overlap(s_1,e_1,temp_ref[1],temp_ref[2])
if overlap_type == "reciprocal":
if temp[0] > overlap_cutoff and temp[1] > overlap_cutoff:
cnv_str += alt_sample_id + "|" + chrm + ":" + `temp_ref[1]` + "-" + `temp_ref[2]` + "|" + temp_ref[3] + ","
else:
if temp[0] > overlap_cutoff:
cnv_str += alt_sample_id + "|" + chrm + ":" + `temp_ref[1]` + "-" + `temp_ref[2]` + "|" + temp_ref[3] + ","
if cnv_str == "":
if results.has_key(sample):
results[sample][id] = [0,"-","-","-","-","-"]
else:
results[sample]={id:[0,"-","-","-","-"]}
else:
cnv_str = cnv_str[:-1]
ref_str = ""
#remove redundant ids
u_cnv = dict.fromkeys(cnv_str.split(",")).keys()
o_data = []
a_o_data = []
for u in u_cnv:
u1 = u.split("|")[1].split(":")[1].split("-")
o_data.append([int(u1[0]),int(u1[1])])
a_o_data.append([int(u1[0]),int(u1[1])])
o_data.sort(functions.sort_list)
a_o_data.sort(functions.sort_list)
c_data = []
a_c_data = []
functions.cluster(o_data,c_data,s_1,e_1)
functions.alt_cluster(a_o_data,a_c_data)
covered = 0
a_length = 0
for c in c_data:
covered += c[1]-c[0]+1
for c in a_c_data:
a_length += c[1]-c[0]+1
u_count = len(dict.fromkeys(cnv_str.split(",")).keys())
if results.has_key(sample):
results[sample][id] = [u_count,cnv_str, covered, covered/float(cnv_len), covered/float(a_length)]
else:
results[sample]={id:[u_count,cnv_str, covered, covered/float(cnv_len), covered/float(a_length)]}
##########
#read input file
def read_input_ref(file_to_read, ref_coords, ref_coords_by_chrm, sample_id):
i_file = open(file_to_read)
##sample chrm start end cnv size normalized_rd e-val2 q0
for line in i_file:
if line[0]=="#" or line[0] == '>':
continue
line = line.replace("\n","")
words = line.split("\t")
sample = sample_id
chrm = words[1].replace("chr","")
id = chrm + ":" + words[2] + "-" + words[3]
info = words[4]+"|"+words[6].replace("|","*")
if ref_coords_by_chrm.has_key(sample):
if ref_coords_by_chrm[sample].has_key(chrm):
if not id in ref_coords_by_chrm[sample][chrm]:
ref_coords_by_chrm[sample][chrm].append(id)
else:
ref_coords_by_chrm[sample][chrm]=[id]
else:
ref_coords_by_chrm[sample]={chrm:[id]}
if ref_coords.has_key(sample):
if ref_coords[sample].has_key(id):
temp = ref_coords[sample][id]
temp[3] += "," + info
ref_coords[sample][id] = temp
else:
#store - chrm, start, end, annotation, count
if int(words[2])==int(words[3]):
ref_coords[sample][id] = [chrm,int(words[2])-1,int(words[3]),info]
else:
ref_coords[sample][id] = [chrm,int(words[2]),int(words[3]),info]
else:
if int(words[2])==int(words[3]):
ref_coords[sample]={id:[chrm,int(words[2])-1,int(words[3]),info]}
else:
ref_coords[sample]={id:[chrm,int(words[2]),int(words[3]),info]}
i_file.close()
#####################################################################
#main
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], "i:o:a:s:c:p:h", ["help"])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(0)
#read command line options
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit(0)
#input file
if o == "-i":
input_file_name = a
#file with calls to annotate with
if o == "-a":
annot_file_name = a
#overlap type to use
if o == "-p":
if a != "oneway" and a != "reciprocal":
print "Only two options allowed - reciprocal and oneway"
sys.exit(0)
else:
overlap_type = a
#overlap cutoff to use
if o == "-c":
overlap_cutoff = int(a)
#sample_id
if o == "-s":
sample_id = a
#formatted output
if o == "-o":
output_file_name = a
if input_file_name == "" or output_file_name == ""or annot_file_name == "" or sample_id == "":
usage()
sys.exit(0)
#check all input files
if not os.path.isfile(input_file_name):
print "Merged erds output not found ..."
sys.exit(1)
if not os.path.isfile(annot_file_name):
print "Merged cnvnator output not found ..."
sys.exit(1)
#check output files
if os.path.isfile(output_file_name):
print "Delete file", output_file_name, "and rerun"
sys.exit(0)
#else open file to write to
out = open(output_file_name,'w')
print output_file_name
#datasets
calls = {}
#read file to annotate
read_input_map(input_file_name, i_map, sample_id)
#find overlaps
find_overlap(i_map, annot_file_name, calls, overlap_cutoff, sample_id, overlap_type)
header = "\tcnvn_count\tcnvn_details\tcnvn_coverage\terds_fraction\tcnvn_fraction\tcnv_type_conflict\tcnv_type_confict_coverage"
flag = 1
i_file = open(input_file_name)
##SampleID Chr Start End .....
for line in i_file:
line = line.replace("\n","")
line = re.sub("$\t","",line)
line = re.sub("chr","",line)
words = line.split("\t")
if flag == 1:
flag = 0
if line[0] == "#" or line[0] == '>':
print >> out, line + header
continue
else:
h = ""
for i in words:
h += "\t"
print >> out , h + header
sample = sample_id
start = int(words[2].replace(",","").replace("\"",""))
end = int(words[3].replace(",","").replace("\"",""))
id = words[1].replace("chr","")+":"+`start`+"-"+`end`
type = words[4]
temp_str = line
length = end - start
#
if calls[sample][id][0]==0:
temp_str += "\t"+`calls[sample][id][0]`+"\t"+calls[sample][id][1]+"\t"+calls[sample][id][2]+"\t"+calls[sample][id][3]+"\t"+calls[sample][id][4] + "\t-\t-"
else:
ovlp_details = calls[sample][id][1].split(",")
comment = "-"
cov_opp_temp = 0
cov_opp = "-"
temp = {}
for d in ovlp_details:
d_type=d.split("|")[2]
if d_type != type:
boundary = d.split("|")[1]
start = int(boundary.split(":")[1].split("-")[0])
end = int(boundary.split(":")[1].split("-")[1])
cov_opp_temp += end - start
temp[d_type]=1
details = "*".join(temp.keys())
if details != type:
comment = type + "|" + details
cov_opp = `cov_opp_temp/float(length)`
temp_str += "\t"+`calls[sample][id][0]`+"\t"+calls[sample][id][1]+"\t"+`calls[sample][id][2]`+"\t"+`calls[sample][id][3]`+"\t"+`calls[sample][id][4]`+"\t"+comment+"\t"+cov_opp
print >> out, temp_str
out.close()
i_file.close()
| 30.570978 | 179 | 0.579507 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.