hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k โ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 โ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 โ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k โ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 โ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 โ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k โ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 โ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 โ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf3f821075043d5d4e8aa04a10e843321a18fdb | 686 | py | Python | example/app/item_view.py | artemShelest/flask-admin-subview | 48a00b5d3b95c8709e90af757d03bbbfd6881e03 | [
"MIT"
] | 12 | 2019-01-22T00:32:29.000Z | 2021-08-06T14:55:14.000Z | example/app/item_view.py | artemShelest/flask-admin-subview | 48a00b5d3b95c8709e90af757d03bbbfd6881e03 | [
"MIT"
] | null | null | null | example/app/item_view.py | artemShelest/flask-admin-subview | 48a00b5d3b95c8709e90af757d03bbbfd6881e03 | [
"MIT"
] | 4 | 2020-07-29T08:34:32.000Z | 2021-09-05T12:47:58.000Z | from flask_admin.contrib.sqla import ModelView
from .formatters import model_link_formatter
class ItemView(ModelView):
details_modal = True
can_view_details = True
can_set_page_size = True
column_searchable_list = ("title",)
column_sortable_list = ("title", ("owner", "owner.name"), ("holder", "holder.name"))
column_filters = ("title",)
column_list = ("title", "owner", "holder")
column_editable_list = ("title",)
form_create_rules = ("title", "owner", "holder")
form_edit_rules = ("title",)
column_formatters = {
'owner': model_link_formatter("person", "owner"),
'holder': model_link_formatter("person", "holder")
}
| 31.181818 | 88 | 0.667638 |
acf3f8cc4ac99689497b3f06c3972a12a03e2c58 | 2,211 | py | Python | tests/test_bloomfilter.py | binh-vu/hugedict | 0cb2d17592a516ff272e4e845d2b96ce404d410a | [
"MIT"
] | null | null | null | tests/test_bloomfilter.py | binh-vu/hugedict | 0cb2d17592a516ff272e4e845d2b96ce404d410a | [
"MIT"
] | null | null | null | tests/test_bloomfilter.py | binh-vu/hugedict | 0cb2d17592a516ff272e4e845d2b96ce404d410a | [
"MIT"
] | null | null | null | import os
from tempfile import TemporaryDirectory
import orjson
from multiprocessing import Process, Pipe
import pybloomfilter
def process1(conn):
msg = orjson.loads(conn.recv())
assert msg["status"] == "start"
bloomfilter = pybloomfilter.BloomFilter(10000, 0.1, msg["bloomfile"])
bloomfilter.add(msg["key1"])
conn.send(orjson.dumps({"status": "done", "has_key": msg["key1"] in bloomfilter}))
msg = orjson.loads(conn.recv())
assert msg["status"] == "start"
bloomfilter.add(msg["key2"])
conn.send(orjson.dumps({"status": "done", "has_key": msg["key2"] in bloomfilter}))
def process2(conn):
msg = orjson.loads(conn.recv())
assert msg["status"] == "start"
bloomfilter = pybloomfilter.BloomFilter.open(msg["bloomfile"], "r")
conn.send(orjson.dumps({"status": "done", "has_key": msg["key1"] in bloomfilter}))
msg = orjson.loads(conn.recv())
assert msg["status"] == "start"
conn.send(orjson.dumps({"status": "done", "has_key": msg["key2"] in bloomfilter}))
def test_bloomfilter_shared_data():
with TemporaryDirectory() as tempdir:
bloomfile = os.path.join(tempdir, "bloomfilter")
p1conn_pp, p1conn_cc = Pipe()
p2conn_pp, p2conn_cc = Pipe()
p1 = Process(target=process1, args=(p1conn_cc,))
p1.start()
p2 = Process(target=process2, args=(p2conn_cc,))
p2.start()
key1 = '["heavy_computing",[0.365],{}]'
key2 = '["heavy_computing",[0.476],{}]'
p1conn_pp.send(
orjson.dumps({"status": "start", "bloomfile": bloomfile, "key1": key1})
)
assert orjson.loads(p1conn_pp.recv()) == {"status": "done", "has_key": True}
p2conn_pp.send(
orjson.dumps({"status": "start", "bloomfile": bloomfile, "key1": key1})
)
assert orjson.loads(p2conn_pp.recv()) == {"status": "done", "has_key": True}
p1conn_pp.send(orjson.dumps({"status": "start", "key2": key2}))
assert orjson.loads(p1conn_pp.recv()) == {"status": "done", "has_key": True}
p2conn_pp.send(orjson.dumps({"status": "start", "key2": key2}))
assert orjson.loads(p2conn_pp.recv()) == {"status": "done", "has_key": True}
| 34.546875 | 86 | 0.618725 |
acf3f93376fa6702e670db656b592b205ca56e75 | 3,847 | py | Python | play.py | tushortz/Football-Match-Engine | 4de6d3cc80ad981f70c579d98b2ebe15c014f5b1 | [
"MIT"
] | null | null | null | play.py | tushortz/Football-Match-Engine | 4de6d3cc80ad981f70c579d98b2ebe15c014f5b1 | [
"MIT"
] | null | null | null | play.py | tushortz/Football-Match-Engine | 4de6d3cc80ad981f70c579d98b2ebe15c014f5b1 | [
"MIT"
] | null | null | null | from game.engine import Engine
from player.player import Player
from player.skill import Skill
from team.team import Team
from random import randint as _
#------------------------------------
# def _(a=0, b=100):
# return random.randint(a, b)
# save, defence, passing, midfield, attack, speed
# skillgk1 = Skill(_(60, 100), _(), _(), _(), _(), _(50))
# skillgk2 = Skill(_(60, 100), _(), _(), _(), _(), _(50))
# skilldef1 = Skill(_(0,10), _(50), _(20), _(), _(), _(50))
# skilldef2 = Skill(_(0,10), _(50), _(20), _(), _(), _(50))
# skillmid1 = Skill(_(0,10), _(30), _(50), _(50), _(50), _(50))
# skillmid2 = Skill(_(0,10), _(30), _(50), _(50), _(50), _(50))
# skillatt1 = Skill(_(0,5), _(10), _(50), _(40), _(50), _(50))
# skillatt2 = Skill(_(0,5), _(10), _(50), _(40), _(50), _(50))
# players1 = []
# players1.append(Player(1, skillgk1, "GK"))
# for _ in range(4):
# players1.append(Player(_+2, skilldef1, "DF"))
# for _ in range(3):
# players1.append(Player(_+2, skillmid1, "MF"))
# for _ in range(3):
# players1.append(Player(_+2, skillatt1, "FW"))
# players2 = []
# players2.append(Player(1, skillgk2, "GK"))
# for _ in range(4):
# players2.append(Player(_+2, skilldef1, "DF"))
# for _ in range(4):
# players2.append(Player(_+2, skillmid1, "MF"))
# for _ in range(2):
# players2.append(Player(_+2, skillatt1, "FW"))
# for x in players1:
# print(x)
# print(players2)
#--------------------------------------
# Generate random data
# Skill(save, defence, passing, midfield, attack, speed)
# Player(name, position, skill)
players1 = [
Player("Courtois", "GK", Skill(_(60,100), _(0,100), _(0,100), _(0,100), _(0,100), _(50,100))),
Player("Azpilicueta", "DF", Skill(_(0,10), _(60,100), _(20,100), _(0,100), _(0,100), _(50,100))),
Player("Luiz", "DF", Skill(_(0,10), _(50,100), _(60,100), _(0,100), _(50,100), _(50,100))),
Player("Cahill", "DF", Skill(_(0,10), _(50,100), _(20,100), _(0,100), _(0,100), _(50,100))),
Player("Moses", "MF", Skill(_(0,10), _(30,100), _(50,100), _(50,100), _(50,100), _(50,100))),
Player("Matic", "MF", Skill(_(0,10), _(30,100), _(50,100), _(50,100), _(50,100), _(50,100))),
Player("Kante", "MF", Skill(_(0,10), _(30,100), _(50,100), _(50,100), _(50,100), _(50,100))),
Player("Alonso", "MF", Skill(_(0,10), _(30,100), _(50,100), _(50,100), _(50,100), _(50,100))),
Player("Hazard", "FW", Skill(_(0,5), _(10,100), _(50,100), _(40,100), _(70,100), _(60,100))),
Player("Costa", "FW", Skill(_(0,5), _(10,100), _(50,100), _(40,100), _(70,100), _(60,100))),
Player("Pedro", "FW", Skill(_(0,5), _(10,100), _(50,100), _(40,100), _(70,100), _(60,100)))
]
players2 = [
Player("Cech", "GK", Skill(_(60,100), _(0,100), _(0,100), _(0,100), _(0,100), _(50,100))),
Player("Paulista", "DF", Skill(_(0,10), _(50,100), _(20,100), _(0,100), _(0,100), _(50,100))),
Player("Mustafi", "DF", Skill(_(0,10), _(50,100), _(20,100), _(0,100), _(0,100), _(50,100))),
Player("Koscielny", "DF", Skill(_(0,10), _(50,100), _(20,100), _(0,100), _(0,100), _(50,100))),
Player("Monreal", "DF", Skill(_(0,10), _(50,100), _(20,100), _(0,100), _(0,100), _(50,100))),
Player("Ramsey", "MF", Skill(_(0,10), _(30,100), _(50,100), _(50,100), _(70,100), _(50,100))),
Player("Xhaka", "MF", Skill(_(0,10), _(30,100), _(50,100), _(50,100), _(60,100), _(50,100))),
Player("Sanchez", "MF", Skill(_(0,10), _(30,100), _(50,100), _(50,100), _(70,100), _(50,100))),
Player("Oezil", "MF", Skill(_(0,10), _(30,100), _(50,100), _(50,100), _(75,100), _(50,100))),
Player("Iwobi", "MF", Skill(_(0,10), _(30,100), _(50,100), _(50,100), _(50,100), _(50,100))),
Player("Giroud", "FW", Skill(_(0,5), _(10,100), _(50,100), _(40,100), _(70,100), _(60,100)))
]
home_team = Team("Chelsea", players1)
away_team = Team("Arsenal", players2)
engine = Engine(home_team, away_team)
engine.play(True)
engine.scoreboard() | 42.274725 | 99 | 0.566675 |
acf3f965da1b4743729f250ff82be8848244a5ea | 5,095 | py | Python | visualDet3D/utils/utils.py | oliver0922/yolo3dstereo | 24c37c4574eedd85593a0060b7c317b3e08c0460 | [
"Apache-2.0"
] | null | null | null | visualDet3D/utils/utils.py | oliver0922/yolo3dstereo | 24c37c4574eedd85593a0060b7c317b3e08c0460 | [
"Apache-2.0"
] | null | null | null | visualDet3D/utils/utils.py | oliver0922/yolo3dstereo | 24c37c4574eedd85593a0060b7c317b3e08c0460 | [
"Apache-2.0"
] | null | null | null | import torch
import numpy as np
import cv2
import sys
import os
import tempfile
import shutil
import importlib
from easydict import EasyDict
class LossLogger():
def __init__(self, recorder, data_split='train'):
self.recorder = recorder
self.data_split = data_split
self.reset()
def reset(self):
self.loss_stats = {} # each will be
def update(self, loss_dict):
for key in loss_dict:
if key not in self.loss_stats:
self.loss_stats[key] = AverageMeter()
self.loss_stats[key].update(loss_dict[key].mean().item())
def log(self, step):
for key in self.loss_stats:
name = key + '/' + self.data_split
self.recorder.add_scalar(name, self.loss_stats[key].avg, step)
def convertAlpha2Rot(alpha, cx, P2):
cx_p2 = P2[..., 0, 2]
fx_p2 = P2[..., 0, 0]
ry3d = alpha + np.arctan2(cx - cx_p2, fx_p2)
ry3d[np.where(ry3d > np.pi)] -= 2 * np.pi
ry3d[np.where(ry3d <= -np.pi)] += 2 * np.pi
return ry3d
def convertRot2Alpha(ry3d, cx, P2):
cx_p2 = P2[..., 0, 2]
fx_p2 = P2[..., 0, 0]
alpha = ry3d - np.arctan2(cx - cx_p2, fx_p2)
alpha[alpha > np.pi] -= 2 * np.pi
alpha[alpha <= -np.pi] += 2 * np.pi
return alpha
def alpha2theta_3d(alpha, x, z, P2):
""" Convert alpha to theta with 3D position
Args:
alpha [torch.Tensor/ float or np.ndarray]: size: [...]
x []: size: [...]
z []: size: [...]
P2 [torch.Tensor/ np.ndarray]: size: [3, 4]
Returns:
theta []: size: [...]
"""
offset = P2[0, 3] / P2[0, 0]
if isinstance(alpha, torch.Tensor):
theta = alpha + torch.atan2(x + offset, z)
else:
theta = alpha + np.arctan2(x + offset, z)
return theta
def theta2alpha_3d(theta, x, z, P2):
""" Convert theta to alpha with 3D position
Args:
theta [torch.Tensor/ float or np.ndarray]: size: [...]
x []: size: [...]
z []: size: [...]
P2 [torch.Tensor/ np.ndarray]: size: [3, 4]
Returns:
alpha []: size: [...]
"""
offset = P2[0, 3] / P2[0, 0]
if isinstance(theta, torch.Tensor):
alpha = theta - torch.atan2(x + offset, z)
else:
alpha = theta - np.arctan2(x + offset, z)
return alpha
def draw_3D_box(img, corners, color = (255, 255, 0)):
"""
draw 3D box in image with OpenCV,
the order of the corners should be the same with BBox3dProjector
"""
points = np.array(corners[0:2], dtype=np.int32) #[2, 8]
points = [tuple(points[:,i]) for i in range(8)]
for i in range(1, 5):
cv2.line(img, points[i], points[(i%4+1)], color, 2)
cv2.line(img, points[(i + 4)%8], points[((i)%4 + 5)%8], color, 2)
cv2.line(img, points[2], points[7], color)
cv2.line(img, points[3], points[6], color)
cv2.line(img, points[4], points[5],color)
cv2.line(img, points[0], points[1], color)
return img
def compound_annotation(labels, max_length, bbox2d, bbox_3d, obj_types):
""" Compound numpy-like annotation formats. Borrow from Retina-Net
Args:
labels: List[List[str]]
max_length: int, max_num_objects, can be dynamic for each iterations
bbox_2d: List[np.ndArray], [left, top, right, bottom].
bbox_3d: List[np.ndArray], [cam_x, cam_y, z, w, h, l, alpha].
obj_types: List[str]
Return:
np.ndArray, [batch_size, max_length, 12]
[x1, y1, x2, y2, cls_index, cx, cy, z, w, h, l, alpha]
cls_index = -1 if empty
"""
annotations = np.ones([len(labels), max_length, bbox_3d[0].shape[-1] + 5]) * -1
for i in range(len(labels)):
label = labels[i]
for j in range(len(label)):
annotations[i, j] = np.concatenate([
bbox2d[i][j], [obj_types.index(label[j])], bbox_3d[i][j]
])
return annotations
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def cfg_from_file(cfg_filename:str)->EasyDict:
assert cfg_filename.endswith('.py')
with tempfile.TemporaryDirectory() as temp_config_dir: #์์ ๋๋ ํ ๋ฆฌ ์์ฑ ์ปจํ
์คํธ๊ฐ ์๋ฃ ๋๋ ์์ ๋๋ ํฐ๋ฆฌ ๊ฐ์ฒด๊ฐ ํ๊ดด๋๋ฉด ์๋ฉธ
temp_config_file = tempfile.NamedTemporaryFile(dir=temp_config_dir, suffix='.py')
temp_config_name = os.path.basename(temp_config_file.name)
shutil.copyfile(cfg_filename, os.path.join(temp_config_dir, temp_config_name))
temp_module_name = os.path.splitext(temp_config_name)[0]
sys.path.insert(0, temp_config_dir)
cfg = getattr(importlib.import_module(temp_module_name), 'cfg')
assert isinstance(cfg, EasyDict)
sys.path.pop()
del sys.modules[temp_module_name]
temp_config_file.close()
return cfg
| 33.519737 | 102 | 0.58685 |
acf3fceb924a67643b801c583db142dcb7430d98 | 29,288 | py | Python | tensorflow_lattice/python/premade_test.py | isabella232/lattice | 7d57bf41cd73dd8d8c546fb41f93ef7557f68fe3 | [
"Apache-2.0"
] | null | null | null | tensorflow_lattice/python/premade_test.py | isabella232/lattice | 7d57bf41cd73dd8d8c546fb41f93ef7557f68fe3 | [
"Apache-2.0"
] | 1 | 2021-02-24T00:56:04.000Z | 2021-02-24T00:56:04.000Z | tensorflow_lattice/python/premade_test.py | isabella232/lattice | 7d57bf41cd73dd8d8c546fb41f93ef7557f68fe3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Tensorflow Lattice premade."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import tempfile
from absl import logging
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow_lattice.python import configs
from tensorflow_lattice.python import premade
from tensorflow_lattice.python import premade_lib
fake_data = {
'train_xs': [np.array([1]), np.array([3]), np.array([0])],
'train_ys': np.array([1]),
'eval_xs': [np.array([2]), np.array([30]), np.array([-3])]
}
unspecified_feature_configs = [
configs.FeatureConfig(
name='numerical_1',
lattice_size=2,
pwl_calibration_input_keypoints=np.linspace(0.0, 1.0, num=10),
),
configs.FeatureConfig(
name='numerical_2',
lattice_size=2,
pwl_calibration_input_keypoints=np.linspace(0.0, 1.0, num=10),
),
configs.FeatureConfig(
name='categorical',
lattice_size=2,
num_buckets=2,
monotonicity=[('0.0', '1.0')],
vocabulary_list=['0.0', '1.0'],
),
]
specified_feature_configs = [
configs.FeatureConfig(
name='numerical_1',
lattice_size=2,
pwl_calibration_input_keypoints=np.linspace(0.0, 1.0, num=10),
),
configs.FeatureConfig(
name='numerical_2',
lattice_size=2,
pwl_calibration_input_keypoints=np.linspace(0.0, 1.0, num=10),
),
configs.FeatureConfig(
name='categorical',
lattice_size=2,
num_buckets=2,
monotonicity=[(0, 1)],
),
]
feature_configs = [
configs.FeatureConfig(
name='numerical_1',
lattice_size=2,
pwl_calibration_input_keypoints=np.linspace(0.0, 1.0, num=10),
),
configs.FeatureConfig(
name='numerical_2',
lattice_size=2,
pwl_calibration_input_keypoints=np.linspace(0.0, 1.0, num=10),
),
configs.FeatureConfig(
name='categorical',
lattice_size=2,
num_buckets=2,
monotonicity=[(0, 1)],
),
]
class PremadeTest(tf.test.TestCase):
"""Tests for TFL premade."""
def setUp(self):
super(PremadeTest, self).setUp()
# UCI Statlog (Heart) dataset.
heart_csv_file = tf.keras.utils.get_file(
'heart.csv',
'http://storage.googleapis.com/download.tensorflow.org/data/heart.csv')
heart_df = pd.read_csv(heart_csv_file)
heart_train_size = int(len(heart_df) * 0.8)
heart_train_dataframe = heart_df[:heart_train_size]
heart_test_dataframe = heart_df[heart_train_size:]
# Features:
# - age
# - sex
# - cp chest pain type (4 values)
# - trestbps resting blood pressure
# - chol serum cholestoral in mg/dl
# - fbs fasting blood sugar > 120 mg/dl
# - restecg resting electrocardiographic results (values 0,1,2)
# - thalach maximum heart rate achieved
# - exang exercise induced angina
# - oldpeak ST depression induced by exercise relative to rest
# - slope the slope of the peak exercise ST segment
# - ca number of major vessels (0-3) colored by flourosopy
# - thal 3 = normal; 6 = fixed defect; 7 = reversable defect
#
# This ordering of feature names will be the exact same order that we
# construct our model to expect.
self.heart_feature_names = [
'age', 'sex', 'cp', 'chol', 'fbs', 'trestbps', 'thalach', 'restecg',
'exang', 'oldpeak', 'slope', 'ca', 'thal'
]
feature_name_indices = {
name: index for index, name in enumerate(self.heart_feature_names)
}
# This is the vocab list and mapping we will use for the 'thal' categorical
# feature.
thal_vocab_list = ['normal', 'fixed', 'reversible']
thal_map = {category: i for i, category in enumerate(thal_vocab_list)}
# Custom function for converting thal categories to buckets
def convert_thal_features(thal_features):
# Note that two examples in the test set are already converted.
return np.array([
thal_map[feature] if feature in thal_vocab_list else feature
for feature in thal_features
])
# Custom function for extracting each feature.
def extract_features(dataframe, label_name='target'):
features = []
for feature_name in self.heart_feature_names:
if feature_name == 'thal':
features.append(
convert_thal_features(
dataframe[feature_name].values).astype(float))
else:
features.append(dataframe[feature_name].values.astype(float))
labels = dataframe[label_name].values.astype(float)
return features, labels
self.heart_train_x, self.heart_train_y = extract_features(
heart_train_dataframe)
self.heart_test_x, self.heart_test_y = extract_features(
heart_test_dataframe)
# Let's define our label minimum and maximum.
self.heart_min_label = float(np.min(self.heart_train_y))
self.heart_max_label = float(np.max(self.heart_train_y))
# Our lattice models may have predictions above 1.0 due to numerical errors.
# We can subtract this small epsilon value from our output_max to make sure
# we do not predict values outside of our label bound.
self.numerical_error_epsilon = 1e-5
def compute_quantiles(features,
num_keypoints=10,
clip_min=None,
clip_max=None,
missing_value=None):
# Clip min and max if desired.
if clip_min is not None:
features = np.maximum(features, clip_min)
features = np.append(features, clip_min)
if clip_max is not None:
features = np.minimum(features, clip_max)
features = np.append(features, clip_max)
# Make features unique.
unique_features = np.unique(features)
# Remove missing values if specified.
if missing_value is not None:
unique_features = np.delete(unique_features,
np.where(unique_features == missing_value))
# Compute and return quantiles over unique non-missing feature values.
return np.quantile(
unique_features,
np.linspace(0., 1., num=num_keypoints),
interpolation='nearest').astype(float)
self.heart_feature_configs = [
configs.FeatureConfig(
name='age',
lattice_size=3,
monotonicity='increasing',
# We must set the keypoints manually.
pwl_calibration_num_keypoints=5,
pwl_calibration_input_keypoints=compute_quantiles(
self.heart_train_x[feature_name_indices['age']],
num_keypoints=5,
clip_max=100),
# Per feature regularization.
regularizer_configs=[
configs.RegularizerConfig(name='calib_wrinkle', l2=0.1),
],
),
configs.FeatureConfig(
name='sex',
num_buckets=2,
),
configs.FeatureConfig(
name='cp',
monotonicity='increasing',
# Keypoints that are uniformly spaced.
pwl_calibration_num_keypoints=4,
pwl_calibration_input_keypoints=np.linspace(
np.min(self.heart_train_x[feature_name_indices['cp']]),
np.max(self.heart_train_x[feature_name_indices['cp']]),
num=4),
),
configs.FeatureConfig(
name='chol',
monotonicity='increasing',
# Explicit input keypoints initialization.
pwl_calibration_input_keypoints=[126.0, 210.0, 247.0, 286.0, 564.0],
# Calibration can be forced to span the full output range
# by clamping.
pwl_calibration_clamp_min=True,
pwl_calibration_clamp_max=True,
# Per feature regularization.
regularizer_configs=[
configs.RegularizerConfig(name='calib_hessian', l2=1e-4),
],
),
configs.FeatureConfig(
name='fbs',
# Partial monotonicity: output(0) <= output(1)
monotonicity=[(0, 1)],
num_buckets=2,
),
configs.FeatureConfig(
name='trestbps',
monotonicity='decreasing',
pwl_calibration_num_keypoints=5,
pwl_calibration_input_keypoints=compute_quantiles(
self.heart_train_x[feature_name_indices['trestbps']],
num_keypoints=5),
),
configs.FeatureConfig(
name='thalach',
monotonicity='decreasing',
pwl_calibration_num_keypoints=5,
pwl_calibration_input_keypoints=compute_quantiles(
self.heart_train_x[feature_name_indices['thalach']],
num_keypoints=5),
),
configs.FeatureConfig(
name='restecg',
# Partial monotonicity:
# output(0) <= output(1), output(0) <= output(2)
monotonicity=[(0, 1), (0, 2)],
num_buckets=3,
),
configs.FeatureConfig(
name='exang',
# Partial monotonicity: output(0) <= output(1)
monotonicity=[(0, 1)],
num_buckets=2,
),
configs.FeatureConfig(
name='oldpeak',
monotonicity='increasing',
pwl_calibration_num_keypoints=5,
pwl_calibration_input_keypoints=compute_quantiles(
self.heart_train_x[feature_name_indices['oldpeak']],
num_keypoints=5),
),
configs.FeatureConfig(
name='slope',
# Partial monotonicity:
# output(0) <= output(1), output(1) <= output(2)
monotonicity=[(0, 1), (1, 2)],
num_buckets=3,
),
configs.FeatureConfig(
name='ca',
monotonicity='increasing',
pwl_calibration_num_keypoints=4,
pwl_calibration_input_keypoints=compute_quantiles(
self.heart_train_x[feature_name_indices['ca']],
num_keypoints=4),
),
configs.FeatureConfig(
name='thal',
# Partial monotonicity:
# output(normal) <= output(fixed)
# output(normal) <= output(reversible)
monotonicity=[('normal', 'fixed'), ('normal', 'reversible')],
num_buckets=3,
# We must specify the vocabulary list in order to later set the
# monotonicities since we used names and not indices.
vocabulary_list=thal_vocab_list,
),
]
premade_lib.set_categorical_monotonicities(self.heart_feature_configs)
def _ResetAllBackends(self):
tf.keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
class Encoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.int32):
return int(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def testSetRandomLattices(self):
random_model_config = configs.CalibratedLatticeEnsembleConfig(
feature_configs=copy.deepcopy(unspecified_feature_configs),
lattices='random',
num_lattices=3,
lattice_rank=2,
separate_calibrators=True,
output_initialization=[-1.0, 1.0])
premade_lib.set_random_lattice_ensemble(random_model_config)
self.assertLen(random_model_config.lattices, 3)
self.assertListEqual(
[2, 2, 2], [len(lattice) for lattice in random_model_config.lattices])
specified_model_config = configs.CalibratedLatticeEnsembleConfig(
feature_configs=copy.deepcopy(specified_feature_configs),
lattices=[['numerical_1', 'categorical'],
['numerical_2', 'categorical']],
num_lattices=2,
lattice_rank=2,
separate_calibrators=True,
output_initialization=[-1.0, 1.0])
with self.assertRaisesRegex(
ValueError, 'model_config.lattices must be set to \'random\'.'):
premade_lib.set_random_lattice_ensemble(specified_model_config)
def testSetCategoricalMonotonicities(self):
set_feature_configs = copy.deepcopy(unspecified_feature_configs)
premade_lib.set_categorical_monotonicities(set_feature_configs)
expectation = [(0, 1)]
self.assertListEqual(expectation, set_feature_configs[2].monotonicity)
def testVerifyConfig(self):
unspecified_model_config = configs.CalibratedLatticeEnsembleConfig(
feature_configs=copy.deepcopy(unspecified_feature_configs),
lattices='random',
num_lattices=3,
lattice_rank=2,
separate_calibrators=True,
output_initialization=[-1.0, 1.0])
with self.assertRaisesRegex(
ValueError, 'Lattices are not fully specified for ensemble config.'):
premade_lib.verify_config(unspecified_model_config)
premade_lib.set_random_lattice_ensemble(unspecified_model_config)
with self.assertRaisesRegex(
ValueError,
'Element 0 for list/tuple 0 for feature categorical monotonicity is '
'not an index: 0.0'):
premade_lib.verify_config(unspecified_model_config)
fixed_feature_configs = copy.deepcopy(unspecified_feature_configs)
premade_lib.set_categorical_monotonicities(fixed_feature_configs)
unspecified_model_config.feature_configs = fixed_feature_configs
premade_lib.verify_config(unspecified_model_config)
specified_model_config = configs.CalibratedLatticeEnsembleConfig(
feature_configs=copy.deepcopy(specified_feature_configs),
lattices=[['numerical_1', 'categorical'],
['numerical_2', 'categorical']],
num_lattices=2,
lattice_rank=2,
separate_calibrators=True,
output_initialization=[-1.0, 1.0])
premade_lib.verify_config(specified_model_config)
def testLatticeEnsembleFromConfig(self):
model_config = configs.CalibratedLatticeEnsembleConfig(
feature_configs=copy.deepcopy(feature_configs),
lattices=[['numerical_1', 'categorical'],
['numerical_2', 'categorical']],
num_lattices=2,
lattice_rank=2,
separate_calibrators=True,
regularizer_configs=[
configs.RegularizerConfig('calib_hessian', l2=1e-3),
configs.RegularizerConfig('torsion', l2=1e-4),
],
output_min=-1.0,
output_max=1.0,
output_calibration=True,
output_calibration_num_keypoints=5,
output_initialization=[-1.0, 1.0])
model = premade.CalibratedLatticeEnsemble(model_config)
loaded_model = premade.CalibratedLatticeEnsemble.from_config(
model.get_config(), custom_objects=premade.get_custom_objects())
self.assertEqual(
json.dumps(model.get_config(), sort_keys=True, cls=self.Encoder),
json.dumps(loaded_model.get_config(), sort_keys=True, cls=self.Encoder))
def testLatticeFromConfig(self):
model_config = configs.CalibratedLatticeConfig(
feature_configs=copy.deepcopy(feature_configs),
regularizer_configs=[
configs.RegularizerConfig('calib_wrinkle', l2=1e-3),
configs.RegularizerConfig('torsion', l2=1e-3),
],
output_min=0.0,
output_max=1.0,
output_calibration=True,
output_calibration_num_keypoints=6,
output_initialization=[0.0, 1.0])
model = premade.CalibratedLattice(model_config)
loaded_model = premade.CalibratedLattice.from_config(
model.get_config(), custom_objects=premade.get_custom_objects())
self.assertEqual(
json.dumps(model.get_config(), sort_keys=True, cls=self.Encoder),
json.dumps(loaded_model.get_config(), sort_keys=True, cls=self.Encoder))
def testLatticeSimplexFromConfig(self):
model_config = configs.CalibratedLatticeConfig(
feature_configs=copy.deepcopy(feature_configs),
regularizer_configs=[
configs.RegularizerConfig('calib_wrinkle', l2=1e-3),
configs.RegularizerConfig('torsion', l2=1e-3),
],
output_min=0.0,
output_max=1.0,
interpolation='simplex',
output_calibration=True,
output_calibration_num_keypoints=6,
output_initialization=[0.0, 1.0])
model = premade.CalibratedLattice(model_config)
loaded_model = premade.CalibratedLattice.from_config(
model.get_config(), custom_objects=premade.get_custom_objects())
self.assertEqual(
json.dumps(model.get_config(), sort_keys=True, cls=self.Encoder),
json.dumps(loaded_model.get_config(), sort_keys=True, cls=self.Encoder))
def testLinearFromConfig(self):
model_config = configs.CalibratedLinearConfig(
feature_configs=copy.deepcopy(feature_configs),
regularizer_configs=[
configs.RegularizerConfig('calib_hessian', l2=1e-4),
configs.RegularizerConfig('torsion', l2=1e-3),
],
use_bias=True,
output_min=0.0,
output_max=1.0,
output_calibration=True,
output_calibration_num_keypoints=6,
output_initialization=[0.0, 1.0])
model = premade.CalibratedLinear(model_config)
loaded_model = premade.CalibratedLinear.from_config(
model.get_config(), custom_objects=premade.get_custom_objects())
self.assertEqual(
json.dumps(model.get_config(), sort_keys=True, cls=self.Encoder),
json.dumps(loaded_model.get_config(), sort_keys=True, cls=self.Encoder))
def testAggregateFromConfig(self):
model_config = configs.AggregateFunctionConfig(
feature_configs=feature_configs,
regularizer_configs=[
configs.RegularizerConfig('calib_hessian', l2=1e-4),
configs.RegularizerConfig('torsion', l2=1e-3),
],
middle_calibration=True,
middle_monotonicity='increasing',
output_min=0.0,
output_max=1.0,
output_calibration=True,
output_calibration_num_keypoints=8,
output_initialization=[0.0, 1.0])
model = premade.AggregateFunction(model_config)
loaded_model = premade.AggregateFunction.from_config(
model.get_config(), custom_objects=premade.get_custom_objects())
self.assertEqual(
json.dumps(model.get_config(), sort_keys=True, cls=self.Encoder),
json.dumps(loaded_model.get_config(), sort_keys=True, cls=self.Encoder))
def testCalibratedLatticeEnsembleCrystals(self):
# Construct model.
self._ResetAllBackends()
model_config = configs.CalibratedLatticeEnsembleConfig(
regularizer_configs=[
configs.RegularizerConfig(name='torsion', l2=1e-4),
configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4),
],
feature_configs=self.heart_feature_configs,
lattices='crystals',
num_lattices=6,
lattice_rank=5,
separate_calibrators=True,
output_calibration=False,
output_min=self.heart_min_label,
output_max=self.heart_max_label - self.numerical_error_epsilon,
output_initialization=[self.heart_min_label, self.heart_max_label],
)
# Perform prefitting steps.
prefitting_model_config = premade_lib.construct_prefitting_model_config(
model_config)
prefitting_model = premade.CalibratedLatticeEnsemble(
prefitting_model_config)
prefitting_model.compile(
loss=tf.keras.losses.BinaryCrossentropy(),
optimizer=tf.keras.optimizers.Adam(0.01))
prefitting_model.fit(
self.heart_train_x,
self.heart_train_y,
batch_size=100,
epochs=50,
verbose=False)
premade_lib.set_crystals_lattice_ensemble(model_config,
prefitting_model_config,
prefitting_model)
# Construct and train final model
model = premade.CalibratedLatticeEnsemble(model_config)
model.compile(
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=tf.keras.metrics.AUC(),
optimizer=tf.keras.optimizers.Adam(0.01))
model.fit(
self.heart_train_x,
self.heart_train_y,
batch_size=100,
epochs=200,
verbose=False)
results = model.evaluate(
self.heart_test_x, self.heart_test_y, verbose=False)
logging.info('Calibrated lattice ensemble crystals classifier results:')
logging.info(results)
self.assertGreater(results[1], 0.85)
def testCalibratedLatticeEnsembleRTL(self):
# Construct model.
self._ResetAllBackends()
rtl_feature_configs = copy.deepcopy(self.heart_feature_configs)
for feature_config in rtl_feature_configs:
feature_config.lattice_size = 2
feature_config.unimodality = 'none'
feature_config.reflects_trust_in = None
feature_config.dominates = None
feature_config.regularizer_configs = None
model_config = configs.CalibratedLatticeEnsembleConfig(
regularizer_configs=[
configs.RegularizerConfig(name='torsion', l2=1e-4),
configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4),
],
feature_configs=rtl_feature_configs,
lattices='rtl_layer',
num_lattices=6,
lattice_rank=5,
separate_calibrators=True,
output_calibration=False,
output_min=self.heart_min_label,
output_max=self.heart_max_label - self.numerical_error_epsilon,
output_initialization=[self.heart_min_label, self.heart_max_label],
)
# Construct and train final model
model = premade.CalibratedLatticeEnsemble(model_config)
model.compile(
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=tf.keras.metrics.AUC(),
optimizer=tf.keras.optimizers.Adam(0.01))
model.fit(
self.heart_train_x,
self.heart_train_y,
batch_size=100,
epochs=200,
verbose=False)
results = model.evaluate(
self.heart_test_x, self.heart_test_y, verbose=False)
logging.info('Calibrated lattice ensemble rtl classifier results:')
logging.info(results)
self.assertGreater(results[1], 0.85)
def testLatticeEnsembleH5FormatSaveLoad(self):
model_config = configs.CalibratedLatticeEnsembleConfig(
feature_configs=copy.deepcopy(feature_configs),
lattices=[['numerical_1', 'categorical'],
['numerical_2', 'categorical']],
num_lattices=2,
lattice_rank=2,
separate_calibrators=True,
regularizer_configs=[
configs.RegularizerConfig('calib_hessian', l2=1e-3),
configs.RegularizerConfig('torsion', l2=1e-4),
],
output_min=-1.0,
output_max=1.0,
output_calibration=True,
output_calibration_num_keypoints=5,
output_initialization=[-1.0, 1.0])
model = premade.CalibratedLatticeEnsemble(model_config)
# Compile and fit model.
model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(0.1))
model.fit(fake_data['train_xs'], fake_data['train_ys'])
# Save model using H5 format.
with tempfile.NamedTemporaryFile(suffix='.h5') as f:
tf.keras.models.save_model(model, f.name)
loaded_model = tf.keras.models.load_model(
f.name, custom_objects=premade.get_custom_objects())
self.assertAllClose(
model.predict(fake_data['eval_xs']),
loaded_model.predict(fake_data['eval_xs']))
def testLatticeEnsembleRTLH5FormatSaveLoad(self):
rtl_feature_configs = copy.deepcopy(feature_configs)
for feature_config in rtl_feature_configs:
feature_config.lattice_size = 2
feature_config.unimodality = 'none'
feature_config.reflects_trust_in = None
feature_config.dominates = None
feature_config.regularizer_configs = None
model_config = configs.CalibratedLatticeEnsembleConfig(
feature_configs=copy.deepcopy(rtl_feature_configs),
lattices='rtl_layer',
num_lattices=2,
lattice_rank=2,
separate_calibrators=True,
regularizer_configs=[
configs.RegularizerConfig('calib_hessian', l2=1e-3),
configs.RegularizerConfig('torsion', l2=1e-4),
],
output_min=-1.0,
output_max=1.0,
output_calibration=True,
output_calibration_num_keypoints=5,
output_initialization=[-1.0, 1.0])
model = premade.CalibratedLatticeEnsemble(model_config)
# Compile and fit model.
model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(0.1))
model.fit(fake_data['train_xs'], fake_data['train_ys'])
# Save model using H5 format.
with tempfile.NamedTemporaryFile(suffix='.h5') as f:
tf.keras.models.save_model(model, f.name)
loaded_model = tf.keras.models.load_model(
f.name, custom_objects=premade.get_custom_objects())
self.assertAllClose(
model.predict(fake_data['eval_xs']),
loaded_model.predict(fake_data['eval_xs']))
def testLatticeH5FormatSaveLoad(self):
model_config = configs.CalibratedLatticeConfig(
feature_configs=copy.deepcopy(feature_configs),
regularizer_configs=[
configs.RegularizerConfig('calib_wrinkle', l2=1e-3),
configs.RegularizerConfig('torsion', l2=1e-3),
],
output_min=0.0,
output_max=1.0,
output_calibration=True,
output_calibration_num_keypoints=6,
output_initialization=[0.0, 1.0])
model = premade.CalibratedLattice(model_config)
# Compile and fit model.
model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(0.1))
model.fit(fake_data['train_xs'], fake_data['train_ys'])
# Save model using H5 format.
with tempfile.NamedTemporaryFile(suffix='.h5') as f:
tf.keras.models.save_model(model, f.name)
loaded_model = tf.keras.models.load_model(
f.name, custom_objects=premade.get_custom_objects())
self.assertAllClose(
model.predict(fake_data['eval_xs']),
loaded_model.predict(fake_data['eval_xs']))
def testLinearH5FormatSaveLoad(self):
model_config = configs.CalibratedLinearConfig(
feature_configs=copy.deepcopy(feature_configs),
regularizer_configs=[
configs.RegularizerConfig('calib_hessian', l2=1e-4),
configs.RegularizerConfig('torsion', l2=1e-3),
],
use_bias=True,
output_min=0.0,
output_max=1.0,
output_calibration=True,
output_calibration_num_keypoints=6,
output_initialization=[0.0, 1.0])
model = premade.CalibratedLinear(model_config)
# Compile and fit model.
model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(0.1))
model.fit(fake_data['train_xs'], fake_data['train_ys'])
# Save model using H5 format.
with tempfile.NamedTemporaryFile(suffix='.h5') as f:
tf.keras.models.save_model(model, f.name)
loaded_model = tf.keras.models.load_model(
f.name, custom_objects=premade.get_custom_objects())
self.assertAllClose(
model.predict(fake_data['eval_xs']),
loaded_model.predict(fake_data['eval_xs']))
def testAggregateH5FormatSaveLoad(self):
model_config = configs.AggregateFunctionConfig(
feature_configs=feature_configs,
regularizer_configs=[
configs.RegularizerConfig('calib_hessian', l2=1e-4),
configs.RegularizerConfig('torsion', l2=1e-3),
],
middle_calibration=True,
middle_monotonicity='increasing',
output_min=0.0,
output_max=1.0,
output_calibration=True,
output_calibration_num_keypoints=8,
output_initialization=[0.0, 1.0])
model = premade.AggregateFunction(model_config)
# Compile and fit model.
model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(0.1))
model.fit(fake_data['train_xs'], fake_data['train_ys'])
# Save model using H5 format.
with tempfile.NamedTemporaryFile(suffix='.h5') as f:
# Note: because of naming clashes in the optimizer, we cannot include it
# when saving in HDF5. The keras team has informed us that we should not
# push to support this since SavedModel format is the new default and no
# new HDF5 functionality is desired.
tf.keras.models.save_model(model, f.name, include_optimizer=False)
loaded_model = tf.keras.models.load_model(
f.name, custom_objects=premade.get_custom_objects())
self.assertAllClose(
model.predict(fake_data['eval_xs']),
loaded_model.predict(fake_data['eval_xs']))
if __name__ == '__main__':
tf.test.main()
| 39.207497 | 80 | 0.661465 |
acf3fd14f5dddae303ec7e96aa4fd63c7d397d60 | 2,654 | py | Python | S10/test.py | VijayPrakashReddy-k/EVA | fd78ff8bda4227aebd0f5db14865d3c5a47b19b0 | [
"MIT"
] | null | null | null | S10/test.py | VijayPrakashReddy-k/EVA | fd78ff8bda4227aebd0f5db14865d3c5a47b19b0 | [
"MIT"
] | null | null | null | S10/test.py | VijayPrakashReddy-k/EVA | fd78ff8bda4227aebd0f5db14865d3c5a47b19b0 | [
"MIT"
] | null | null | null | import torch
def test(model, device, test_loader, criterion, classes, test_losses, test_accs,
misclassified_imgs, correct_imgs, is_last_epoch):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss +=criterion(output, target).item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
is_correct = pred.eq(target.view_as(pred))
if is_last_epoch:
misclassified_inds = (is_correct==0).nonzero()[:,0]
for mis_ind in misclassified_inds:
if len(misclassified_imgs) == 25:
break
misclassified_imgs.append({
"target": target[mis_ind].cpu().numpy(),
"pred": pred[mis_ind][0].cpu().numpy(),
"img": data[mis_ind]
})
correct_inds = (is_correct==1).nonzero()[:,0]
for ind in correct_inds:
if len(correct_imgs) == 25:
break
correct_imgs.append({
"target": target[ind].cpu().numpy(),
"pred": pred[ind][0].cpu().numpy(),
"img": data[ind]
})
correct += is_correct.sum().item()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
test_acc = 100. * correct / len(test_loader.dataset)
test_accs.append(test_acc)
if test_acc >= 90.0:
classwise_acc(model, device, test_loader, classes)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset), test_acc))
def classwise_acc(model, device, test_loader, classes):
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for images, labels in test_loader:
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
# print class-wise test accuracies
print()
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
print()
| 38.463768 | 97 | 0.543708 |
acf3fd6834e409462347b7cdced8fc4d5c645c81 | 3,838 | py | Python | azure-mgmt-storage/azure/mgmt/storage/v2017_06_01/operations/operations.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-mgmt-storage/azure/mgmt/storage/v2017_06_01/operations/operations.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | null | null | null | azure-mgmt-storage/azure/mgmt/storage/v2017_06_01/operations/operations.py | jmalobicky/azure-sdk-for-python | 61234a3d83f8fb481d1dd2386e54e888864878fd | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class Operations(object):
"""Operations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-06-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-06-01"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all of the available Storage Rest API operations.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Operation
:rtype:
~azure.mgmt.storage.v2017_06_01.models.OperationPaged[~azure.mgmt.storage.v2017_06_01.models.Operation]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.OperationPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.OperationPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/providers/Microsoft.Storage/operations'}
| 38.38 | 144 | 0.629755 |
acf3fec178df4cd5fd0b730c0e21f342f674c77c | 8,141 | py | Python | nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/appqoe/appqoepolicy.py | culbertm/NSttyPython | ff9f6aedae3fb8495342cd0fc4247c819cf47397 | [
"Apache-2.0"
] | null | null | null | nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/appqoe/appqoepolicy.py | culbertm/NSttyPython | ff9f6aedae3fb8495342cd0fc4247c819cf47397 | [
"Apache-2.0"
] | null | null | null | nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/appqoe/appqoepolicy.py | culbertm/NSttyPython | ff9f6aedae3fb8495342cd0fc4247c819cf47397 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appqoepolicy(base_resource) :
""" Configuration for AppQoS policy resource. """
def __init__(self) :
self._name = None
self._rule = None
self._action = None
self._hits = None
self.___count = None
@property
def name(self) :
r""".<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r""".<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def rule(self) :
r"""Expression or name of a named expression, against which the request is evaluated. The policy is applied if the rule evaluates to true.
"""
try :
return self._rule
except Exception as e:
raise e
@rule.setter
def rule(self, rule) :
r"""Expression or name of a named expression, against which the request is evaluated. The policy is applied if the rule evaluates to true.
"""
try :
self._rule = rule
except Exception as e:
raise e
@property
def action(self) :
r"""Configured AppQoE action to trigger.<br/>Minimum length = 1.
"""
try :
return self._action
except Exception as e:
raise e
@action.setter
def action(self, action) :
r"""Configured AppQoE action to trigger.<br/>Minimum length = 1
"""
try :
self._action = action
except Exception as e:
raise e
@property
def hits(self) :
r"""Number of hits.
"""
try :
return self._hits
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(appqoepolicy_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appqoepolicy
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
r""" Use this API to add appqoepolicy.
"""
try :
if type(resource) is not list :
addresource = appqoepolicy()
addresource.name = resource.name
addresource.rule = resource.rule
addresource.action = resource.action
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ appqoepolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].rule = resource[i].rule
addresources[i].action = resource[i].action
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
r""" Use this API to delete appqoepolicy.
"""
try :
if type(resource) is not list :
deleteresource = appqoepolicy()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ appqoepolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ appqoepolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
r""" Use this API to update appqoepolicy.
"""
try :
if type(resource) is not list :
updateresource = appqoepolicy()
updateresource.name = resource.name
updateresource.rule = resource.rule
updateresource.action = resource.action
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ appqoepolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].rule = resource[i].rule
updateresources[i].action = resource[i].action
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the appqoepolicy resources that are configured on netscaler.
"""
try :
if not name :
obj = appqoepolicy()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = appqoepolicy()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [appqoepolicy() for _ in range(len(name))]
obj = [appqoepolicy() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = appqoepolicy()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of appqoepolicy resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appqoepolicy()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the appqoepolicy resources configured on NetScaler.
"""
try :
obj = appqoepolicy()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of appqoepolicy resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appqoepolicy()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class appqoepolicy_response(base_response) :
def __init__(self, length=1) :
self.appqoepolicy = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.appqoepolicy = [appqoepolicy() for _ in range(length)]
| 28.766784 | 140 | 0.685419 |
acf3ff373e7699336044f85bd900af8d5f07e9c1 | 2,615 | py | Python | ProstrateCancer/create_data.py | nizamphoenix/kaggle | a9c993d0441a6d9260d605a630f95d938e6329db | [
"MIT"
] | null | null | null | ProstrateCancer/create_data.py | nizamphoenix/kaggle | a9c993d0441a6d9260d605a630f95d938e6329db | [
"MIT"
] | null | null | null | ProstrateCancer/create_data.py | nizamphoenix/kaggle | a9c993d0441a6d9260d605a630f95d938e6329db | [
"MIT"
] | null | null | null | def get_data():
import pandas as pd
susp = pd.read_csv('../input/suspicious/PANDA_Suspicious_Slides.csv')
# ['marks', 'No Mask', 'Background only', 'No cancerous tissue but ISUP Grade > 0', 'tiss', 'blank']
to_drop = susp.query("reason in ['marks','Background only','tiss','blank']")['image_id']
df = pd.read_csv(LABELS).set_index('image_id')
good_index = list(set(df.index)-set(to_drop))
df = df.loc[good_index]
df = df.reset_index()
splits = StratifiedKFold(n_splits=nfolds, random_state=SEED, shuffle=True)
splits = list(splits.split(df,df.isup_grade))
folds_splits = np.zeros(len(df)).astype(np.int)
for i in range(nfolds):
folds_splits[splits[i][1]] = i
df['split'] = folds_splits
df['gleason_score']=df['gleason_score'].replace('negative','0+0')
df[['prim_gleason','secon_gleason']] = df.gleason_score.str.split("+",expand=True)
df[['prim_gleason','secon_gleason']] = df[['prim_gleason','secon_gleason']].astype(np.int64)
df['prim_gleason']=df['prim_gleason'].replace(3,1)
df['prim_gleason']=df['prim_gleason'].replace(4,2)
df['prim_gleason']=df['prim_gleason'].replace(5,3)
df['secon_gleason']=df['secon_gleason'].replace(3,1)
df['secon_gleason']=df['secon_gleason'].replace(4,2)
df['secon_gleason']=df['secon_gleason'].replace(5,3)
print("****************df shape:",df.shape,"***********************")
print(">>>>>>>>>Before sampling<<<<<<<<<<<<<")
for isup in [0,1,2,3,4,5]:
print("isup grade:",isup,"| n_instances:",df.query('isup_grade=={0}'.format(isup)).shape[0],"| corresponding gleason score:",df[['isup_grade','gleason_score']].query('isup_grade=={0}'.format(isup))['gleason_score'].unique())
print("----"*20)
#df.drop([df[df['image_id']=="b0a92a74cb53899311acc30b7405e101"].index[0]],inplace=True)
#b0a92a74cb53899311acc30b7405e101 is the only image id with gleason 4+3 mapping to isup=2
df = pd.concat([df.query('isup_grade==0').iloc[:1200],df.query('isup_grade==1').iloc[:1200],df.query('isup_grade==2 or isup_grade==3 or isup_grade==4 or isup_grade==5')],axis=0)
df = df.sample(n=2000,random_state=SEED).reset_index(drop=True)#shuffling
print(">>>>>>>>>After sampling<<<<<<<<<<")
for isup in [0,1,2,3,4,5]:
print("isup grade:",isup,"| n_instances:",df.query('isup_grade=={0}'.format(isup)).shape[0],"| corresponding gleason score:",df[['isup_grade','gleason_score']].query('isup_grade=={0}'.format(isup))['gleason_score'].unique())
print("----"*20)
return df
df = get_data()
df[['isup_grade','split']].hist(bins=50)
df.head()
| 62.261905 | 232 | 0.646272 |
acf400b6fec88feb7d262d19386520172919d917 | 1,523 | py | Python | cogs/ping.py | War-Keeper/ClassMateBot | 0d29f217c13ff7ac41e5da4a6aacfe2f25d1249e | [
"MIT"
] | 2 | 2021-09-02T14:14:35.000Z | 2021-09-25T20:35:03.000Z | cogs/ping.py | War-Keeper/ClassMateBot | 0d29f217c13ff7ac41e5da4a6aacfe2f25d1249e | [
"MIT"
] | 62 | 2021-09-03T23:28:53.000Z | 2021-11-01T23:41:07.000Z | cogs/ping.py | War-Keeper/ClassMateBot | 0d29f217c13ff7ac41e5da4a6aacfe2f25d1249e | [
"MIT"
] | 6 | 2021-10-02T23:29:34.000Z | 2021-11-04T01:54:57.000Z | # Copyright (c) 2021 War-Keeper
import discord
from discord.ext import commands
# ----------------------------------------------------------------------------------------------
# Returns the ping of the bot, useful for testing bot lag and as a simple functionality command
# ----------------------------------------------------------------------------------------------
class Helpful(commands.Cog):
def __init__(self, bot):
self.bot = bot
# -------------------------------------------------------------------------------------------------------
# Function: ping(self, ctx)
# Description: prints the current ping of the bot, used as a test function
# Inputs:
# - self: used to access parameters passed to the class through the constructor
# - ctx: used to access the values passed through the current context
# Outputs: prints the current ping of the bot, with an upper bound of 999999999 to avoid float errors
# -------------------------------------------------------------------------------------------------------
@commands.command()
async def ping(self, ctx):
# We set an upper bound on the ping of the bot to prevent float_infinity situations which crash testing
await ctx.send(f"Pong! My ping currently is {round(min(999999999, self.bot.latency * 1000))}ms")
# -------------------------------------
# add the file to the bot's cog system
# -------------------------------------
def setup(bot):
bot.add_cog(Helpful(bot))
| 43.514286 | 111 | 0.472095 |
acf40205646210722307914b1a1a504188c14aec | 8,913 | py | Python | code/tmp_rtrip/idlelib/hyperparser.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | 24 | 2018-01-23T05:28:40.000Z | 2021-04-13T20:52:59.000Z | code/tmp_rtrip/idlelib/hyperparser.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | 17 | 2017-12-21T18:32:31.000Z | 2018-12-18T17:09:50.000Z | code/tmp_rtrip/idlelib/hyperparser.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | null | null | null | """Provide advanced parsing abilities for ParenMatch and other extensions.
HyperParser uses PyParser. PyParser mostly gives information on the
proper indentation of code. HyperParser gives additional information on
the structure of code.
"""
from keyword import iskeyword
import string
from idlelib import pyparse
_ASCII_ID_CHARS = frozenset(string.ascii_letters + string.digits + '_')
_ASCII_ID_FIRST_CHARS = frozenset(string.ascii_letters + '_')
_IS_ASCII_ID_CHAR = [(chr(x) in _ASCII_ID_CHARS) for x in range(128)]
_IS_ASCII_ID_FIRST_CHAR = [(chr(x) in _ASCII_ID_FIRST_CHARS) for x in range
(128)]
class HyperParser:
def __init__(self, editwin, index):
"""To initialize, analyze the surroundings of the given index."""
self.editwin = editwin
self.text = text = editwin.text
parser = pyparse.Parser(editwin.indentwidth, editwin.tabwidth)
def index2line(index):
return int(float(index))
lno = index2line(text.index(index))
if not editwin.context_use_ps1:
for context in editwin.num_context_lines:
startat = max(lno - context, 1)
startatindex = repr(startat) + '.0'
stopatindex = '%d.end' % lno
parser.set_str(text.get(startatindex, stopatindex) + ' \n')
bod = parser.find_good_parse_start(editwin.
_build_char_in_string_func(startatindex))
if bod is not None or startat == 1:
break
parser.set_lo(bod or 0)
else:
r = text.tag_prevrange('console', index)
if r:
startatindex = r[1]
else:
startatindex = '1.0'
stopatindex = '%d.end' % lno
parser.set_str(text.get(startatindex, stopatindex) + ' \n')
parser.set_lo(0)
self.rawtext = parser.str[:-2]
self.stopatindex = stopatindex
self.bracketing = parser.get_last_stmt_bracketing()
self.isopener = [(i > 0 and self.bracketing[i][1] > self.bracketing
[i - 1][1]) for i in range(len(self.bracketing))]
self.set_index(index)
def set_index(self, index):
"""Set the index to which the functions relate.
The index must be in the same statement.
"""
indexinrawtext = len(self.rawtext) - len(self.text.get(index, self.
stopatindex))
if indexinrawtext < 0:
raise ValueError('Index %s precedes the analyzed statement' % index
)
self.indexinrawtext = indexinrawtext
self.indexbracket = 0
while self.indexbracket < len(self.bracketing) - 1 and self.bracketing[
self.indexbracket + 1][0] < self.indexinrawtext:
self.indexbracket += 1
if self.indexbracket < len(self.bracketing) - 1 and self.bracketing[
self.indexbracket + 1][0
] == self.indexinrawtext and not self.isopener[self.
indexbracket + 1]:
self.indexbracket += 1
def is_in_string(self):
"""Is the index given to the HyperParser in a string?"""
return self.isopener[self.indexbracket] and self.rawtext[self.
bracketing[self.indexbracket][0]] in ('"', "'")
def is_in_code(self):
"""Is the index given to the HyperParser in normal code?"""
return not self.isopener[self.indexbracket] or self.rawtext[self.
bracketing[self.indexbracket][0]] not in ('#', '"', "'")
def get_surrounding_brackets(self, openers='([{', mustclose=False):
"""Return bracket indexes or None.
If the index given to the HyperParser is surrounded by a
bracket defined in openers (or at least has one before it),
return the indices of the opening bracket and the closing
bracket (or the end of line, whichever comes first).
If it is not surrounded by brackets, or the end of line comes
before the closing bracket and mustclose is True, returns None.
"""
bracketinglevel = self.bracketing[self.indexbracket][1]
before = self.indexbracket
while not self.isopener[before] or self.rawtext[self.bracketing[
before][0]] not in openers or self.bracketing[before][1
] > bracketinglevel:
before -= 1
if before < 0:
return None
bracketinglevel = min(bracketinglevel, self.bracketing[before][1])
after = self.indexbracket + 1
while after < len(self.bracketing) and self.bracketing[after][1
] >= bracketinglevel:
after += 1
beforeindex = self.text.index('%s-%dc' % (self.stopatindex, len(
self.rawtext) - self.bracketing[before][0]))
if after >= len(self.bracketing) or self.bracketing[after][0] > len(
self.rawtext):
if mustclose:
return None
afterindex = self.stopatindex
else:
afterindex = self.text.index('%s-%dc' % (self.stopatindex, len(
self.rawtext) - (self.bracketing[after][0] - 1)))
return beforeindex, afterindex
_ID_KEYWORDS = frozenset({'True', 'False', 'None'})
@classmethod
def _eat_identifier(cls, str, limit, pos):
"""Given a string and pos, return the number of chars in the
identifier which ends at pos, or 0 if there is no such one.
This ignores non-identifier eywords are not identifiers.
"""
is_ascii_id_char = _IS_ASCII_ID_CHAR
i = pos
while i > limit and (ord(str[i - 1]) < 128 and is_ascii_id_char[ord
(str[i - 1])]):
i -= 1
if i > limit and ord(str[i - 1]) >= 128:
while i - 4 >= limit and ('a' + str[i - 4:pos]).isidentifier():
i -= 4
if i - 2 >= limit and ('a' + str[i - 2:pos]).isidentifier():
i -= 2
if i - 1 >= limit and ('a' + str[i - 1:pos]).isidentifier():
i -= 1
if not str[i:pos].isidentifier():
return 0
elif i < pos:
if not _IS_ASCII_ID_FIRST_CHAR[ord(str[i])]:
return 0
if i < pos and (iskeyword(str[i:pos]) and str[i:pos] not in cls.
_ID_KEYWORDS):
return 0
return pos - i
_whitespace_chars = ' \t\n\\'
def get_expression(self):
"""Return a string with the Python expression which ends at the
given index, which is empty if there is no real one.
"""
if not self.is_in_code():
raise ValueError(
'get_expression should only be calledif index is inside a code.'
)
rawtext = self.rawtext
bracketing = self.bracketing
brck_index = self.indexbracket
brck_limit = bracketing[brck_index][0]
pos = self.indexinrawtext
last_identifier_pos = pos
postdot_phase = True
while 1:
while 1:
if pos > brck_limit and rawtext[pos - 1
] in self._whitespace_chars:
pos -= 1
elif not postdot_phase and pos > brck_limit and rawtext[pos - 1
] == '.':
pos -= 1
postdot_phase = True
elif pos == brck_limit and brck_index > 0 and rawtext[
bracketing[brck_index - 1][0]] == '#':
brck_index -= 2
brck_limit = bracketing[brck_index][0]
pos = bracketing[brck_index + 1][0]
else:
break
if not postdot_phase:
break
ret = self._eat_identifier(rawtext, brck_limit, pos)
if ret:
pos = pos - ret
last_identifier_pos = pos
postdot_phase = False
elif pos == brck_limit:
level = bracketing[brck_index][1]
while brck_index > 0 and bracketing[brck_index - 1][1] > level:
brck_index -= 1
if bracketing[brck_index][0] == brck_limit:
break
pos = bracketing[brck_index][0]
brck_index -= 1
brck_limit = bracketing[brck_index][0]
last_identifier_pos = pos
if rawtext[pos] in '([':
pass
else:
if rawtext[pos] in '\'"':
while pos > 0 and rawtext[pos - 1] in 'rRbBuU':
pos -= 1
last_identifier_pos = pos
break
else:
break
return rawtext[last_identifier_pos:self.indexinrawtext]
if __name__ == '__main__':
import unittest
unittest.main('idlelib.idle_test.test_hyperparser', verbosity=2)
| 41.073733 | 80 | 0.560081 |
acf402a0c8ac9c323c1ecbb4ee89fac078e22b30 | 26,407 | py | Python | homeassistant/components/device_tracker/__init__.py | adolfoeliazat/voidhomecontrol | 6d733253811c553912e46e24debec818b28b0688 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/device_tracker/__init__.py | adolfoeliazat/voidhomecontrol | 6d733253811c553912e46e24debec818b28b0688 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/device_tracker/__init__.py | adolfoeliazat/voidhomecontrol | 6d733253811c553912e46e24debec818b28b0688 | [
"Apache-2.0"
] | null | null | null | """
Provide functionality to keep track of devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/device_tracker/
"""
import asyncio
from datetime import timedelta
import logging
import os
from typing import Any, List, Sequence, Callable
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.core import callback
from homeassistant.components import group, zone
from homeassistant.components.discovery import SERVICE_NETGEAR
from homeassistant.config import load_yaml_config_file, async_log_exception
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers import config_per_platform, discovery
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.restore_state import async_get_last_state
from homeassistant.helpers.typing import GPSType, ConfigType, HomeAssistantType
import homeassistant.helpers.config_validation as cv
import homeassistant.util as util
from homeassistant.util.async import run_coroutine_threadsafe
import homeassistant.util.dt as dt_util
from homeassistant.util.yaml import dump
from homeassistant.helpers.event import async_track_utc_time_change
from homeassistant.const import (
ATTR_GPS_ACCURACY, ATTR_LATITUDE, ATTR_LONGITUDE, CONF_NAME, CONF_MAC,
DEVICE_DEFAULT_NAME, STATE_HOME, STATE_NOT_HOME, ATTR_ENTITY_ID,
CONF_ICON, ATTR_ICON)
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'device_tracker'
DEPENDENCIES = ['zone']
GROUP_NAME_ALL_DEVICES = 'all devices'
ENTITY_ID_ALL_DEVICES = group.ENTITY_ID_FORMAT.format('all_devices')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
YAML_DEVICES = 'known_devices.yaml'
CONF_TRACK_NEW = 'track_new_devices'
DEFAULT_TRACK_NEW = True
CONF_CONSIDER_HOME = 'consider_home'
DEFAULT_CONSIDER_HOME = timedelta(seconds=180)
CONF_SCAN_INTERVAL = 'interval_seconds'
DEFAULT_SCAN_INTERVAL = timedelta(seconds=12)
CONF_AWAY_HIDE = 'hide_if_away'
DEFAULT_AWAY_HIDE = False
EVENT_NEW_DEVICE = 'device_tracker_new_device'
SERVICE_SEE = 'see'
ATTR_ATTRIBUTES = 'attributes'
ATTR_BATTERY = 'battery'
ATTR_DEV_ID = 'dev_id'
ATTR_GPS = 'gps'
ATTR_HOST_NAME = 'host_name'
ATTR_LOCATION_NAME = 'location_name'
ATTR_MAC = 'mac'
ATTR_NAME = 'name'
ATTR_SOURCE_TYPE = 'source_type'
SOURCE_TYPE_GPS = 'gps'
SOURCE_TYPE_ROUTER = 'router'
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SCAN_INTERVAL): cv.time_period,
vol.Optional(CONF_TRACK_NEW, default=DEFAULT_TRACK_NEW): cv.boolean,
vol.Optional(CONF_CONSIDER_HOME,
default=DEFAULT_CONSIDER_HOME): vol.All(
cv.time_period, cv.positive_timedelta)
})
DISCOVERY_PLATFORMS = {
SERVICE_NETGEAR: 'netgear',
}
def is_on(hass: HomeAssistantType, entity_id: str=None):
"""Return the state if any or a specified device is home."""
entity = entity_id or ENTITY_ID_ALL_DEVICES
return hass.states.is_state(entity, STATE_HOME)
def see(hass: HomeAssistantType, mac: str=None, dev_id: str=None,
host_name: str=None, location_name: str=None,
gps: GPSType=None, gps_accuracy=None,
battery=None, attributes: dict=None):
"""Call service to notify you see device."""
data = {key: value for key, value in
((ATTR_MAC, mac),
(ATTR_DEV_ID, dev_id),
(ATTR_HOST_NAME, host_name),
(ATTR_LOCATION_NAME, location_name),
(ATTR_GPS, gps),
(ATTR_GPS_ACCURACY, gps_accuracy),
(ATTR_BATTERY, battery)) if value is not None}
if attributes:
data[ATTR_ATTRIBUTES] = attributes
hass.services.call(DOMAIN, SERVICE_SEE, data)
@asyncio.coroutine
def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Set up the device tracker."""
yaml_path = hass.config.path(YAML_DEVICES)
try:
conf = config.get(DOMAIN, [])
except vol.Invalid as ex:
async_log_exception(ex, DOMAIN, config, hass)
return False
else:
conf = conf[0] if conf else {}
consider_home = conf.get(CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME)
track_new = conf.get(CONF_TRACK_NEW, DEFAULT_TRACK_NEW)
devices = yield from async_load_config(yaml_path, hass, consider_home)
tracker = DeviceTracker(hass, consider_home, track_new, devices)
@asyncio.coroutine
def async_setup_platform(p_type, p_config, disc_info=None):
"""Set up a device tracker platform."""
platform = yield from async_prepare_setup_platform(
hass, config, DOMAIN, p_type)
if platform is None:
return
_LOGGER.info("Setting up %s.%s", DOMAIN, p_type)
try:
scanner = None
setup = None
if hasattr(platform, 'async_get_scanner'):
scanner = yield from platform.async_get_scanner(
hass, {DOMAIN: p_config})
elif hasattr(platform, 'get_scanner'):
scanner = yield from hass.async_add_job(
platform.get_scanner, hass, {DOMAIN: p_config})
elif hasattr(platform, 'async_setup_scanner'):
setup = yield from platform.async_setup_scanner(
hass, p_config, tracker.async_see, disc_info)
elif hasattr(platform, 'setup_scanner'):
setup = yield from hass.async_add_job(
platform.setup_scanner, hass, p_config, tracker.see,
disc_info)
else:
raise HomeAssistantError("Invalid device_tracker platform.")
if scanner:
async_setup_scanner_platform(
hass, p_config, scanner, tracker.async_see, p_type)
return
if not setup:
_LOGGER.error("Error setting up platform %s", p_type)
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error setting up platform %s", p_type)
setup_tasks = [async_setup_platform(p_type, p_config) for p_type, p_config
in config_per_platform(config, DOMAIN)]
if setup_tasks:
yield from asyncio.wait(setup_tasks, loop=hass.loop)
yield from tracker.async_setup_group()
@callback
def async_device_tracker_discovered(service, info):
"""Handle the discovery of device tracker platforms."""
hass.async_add_job(
async_setup_platform(DISCOVERY_PLATFORMS[service], {}, info))
discovery.async_listen(
hass, DISCOVERY_PLATFORMS.keys(), async_device_tracker_discovered)
@asyncio.coroutine
def async_platform_discovered(platform, info):
"""Load a platform."""
yield from async_setup_platform(platform, {}, disc_info=info)
discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)
# Clean up stale devices
async_track_utc_time_change(
hass, tracker.async_update_stale, second=range(0, 60, 5))
@asyncio.coroutine
def async_see_service(call):
"""Service to see a device."""
args = {key: value for key, value in call.data.items() if key in
(ATTR_MAC, ATTR_DEV_ID, ATTR_HOST_NAME, ATTR_LOCATION_NAME,
ATTR_GPS, ATTR_GPS_ACCURACY, ATTR_BATTERY, ATTR_ATTRIBUTES)}
yield from tracker.async_see(**args)
descriptions = yield from hass.async_add_job(
load_yaml_config_file,
os.path.join(os.path.dirname(__file__), 'services.yaml')
)
hass.services.async_register(
DOMAIN, SERVICE_SEE, async_see_service, descriptions.get(SERVICE_SEE))
# restore
yield from tracker.async_setup_tracked_device()
return True
class DeviceTracker(object):
"""Representation of a device tracker."""
def __init__(self, hass: HomeAssistantType, consider_home: timedelta,
track_new: bool, devices: Sequence) -> None:
"""Initialize a device tracker."""
self.hass = hass
self.devices = {dev.dev_id: dev for dev in devices}
self.mac_to_dev = {dev.mac: dev for dev in devices if dev.mac}
self.consider_home = consider_home
self.track_new = track_new
self.group = None # type: group.Group
self._is_updating = asyncio.Lock(loop=hass.loop)
for dev in devices:
if self.devices[dev.dev_id] is not dev:
_LOGGER.warning('Duplicate device IDs detected %s', dev.dev_id)
if dev.mac and self.mac_to_dev[dev.mac] is not dev:
_LOGGER.warning('Duplicate device MAC addresses detected %s',
dev.mac)
def see(self, mac: str=None, dev_id: str=None, host_name: str=None,
location_name: str=None, gps: GPSType=None, gps_accuracy=None,
battery: str=None, attributes: dict=None,
source_type: str=SOURCE_TYPE_GPS):
"""Notify the device tracker that you see a device."""
self.hass.add_job(
self.async_see(mac, dev_id, host_name, location_name, gps,
gps_accuracy, battery, attributes, source_type)
)
@asyncio.coroutine
def async_see(self, mac: str=None, dev_id: str=None, host_name: str=None,
location_name: str=None, gps: GPSType=None,
gps_accuracy=None, battery: str=None, attributes: dict=None,
source_type: str=SOURCE_TYPE_GPS):
"""Notify the device tracker that you see a device.
This method is a coroutine.
"""
if mac is None and dev_id is None:
raise HomeAssistantError('Neither mac or device id passed in')
elif mac is not None:
mac = str(mac).upper()
device = self.mac_to_dev.get(mac)
if not device:
dev_id = util.slugify(host_name or '') or util.slugify(mac)
else:
dev_id = cv.slug(str(dev_id).lower())
device = self.devices.get(dev_id)
if device:
yield from device.async_seen(
host_name, location_name, gps, gps_accuracy, battery,
attributes, source_type)
if device.track:
yield from device.async_update_ha_state()
return
# If no device can be found, create it
dev_id = util.ensure_unique_string(dev_id, self.devices.keys())
device = Device(
self.hass, self.consider_home, self.track_new,
dev_id, mac, (host_name or dev_id).replace('_', ' '))
self.devices[dev_id] = device
if mac is not None:
self.mac_to_dev[mac] = device
yield from device.async_seen(
host_name, location_name, gps, gps_accuracy, battery, attributes,
source_type)
if device.track:
yield from device.async_update_ha_state()
self.hass.bus.async_fire(EVENT_NEW_DEVICE, {
ATTR_ENTITY_ID: device.entity_id,
ATTR_HOST_NAME: device.host_name,
})
# During init, we ignore the group
if self.group is not None:
yield from self.group.async_update_tracked_entity_ids(
list(self.group.tracking) + [device.entity_id])
# lookup mac vendor string to be stored in config
yield from device.set_vendor_for_mac()
# update known_devices.yaml
self.hass.async_add_job(
self.async_update_config(
self.hass.config.path(YAML_DEVICES), dev_id, device)
)
@asyncio.coroutine
def async_update_config(self, path, dev_id, device):
"""Add device to YAML configuration file.
This method is a coroutine.
"""
with (yield from self._is_updating):
yield from self.hass.async_add_job(
update_config, self.hass.config.path(YAML_DEVICES),
dev_id, device)
@asyncio.coroutine
def async_setup_group(self):
"""Initialize group for all tracked devices.
This method is a coroutine.
"""
entity_ids = (dev.entity_id for dev in self.devices.values()
if dev.track)
self.group = yield from group.Group.async_create_group(
self.hass, GROUP_NAME_ALL_DEVICES, entity_ids, False)
@callback
def async_update_stale(self, now: dt_util.dt.datetime):
"""Update stale devices.
This method must be run in the event loop.
"""
for device in self.devices.values():
if (device.track and device.last_update_home) and \
device.stale(now):
self.hass.async_add_job(device.async_update_ha_state(True))
@asyncio.coroutine
def async_setup_tracked_device(self):
"""Set up all not exists tracked devices.
This method is a coroutine.
"""
@asyncio.coroutine
def async_init_single_device(dev):
"""Init a single device_tracker entity."""
yield from dev.async_added_to_hass()
yield from dev.async_update_ha_state()
tasks = []
for device in self.devices.values():
if device.track and not device.last_seen:
tasks.append(self.hass.async_add_job(
async_init_single_device(device)))
if tasks:
yield from asyncio.wait(tasks, loop=self.hass.loop)
class Device(Entity):
"""Represent a tracked device."""
host_name = None # type: str
location_name = None # type: str
gps = None # type: GPSType
gps_accuracy = 0
last_seen = None # type: dt_util.dt.datetime
battery = None # type: str
attributes = None # type: dict
vendor = None # type: str
icon = None # type: str
# Track if the last update of this device was HOME.
last_update_home = False
_state = STATE_NOT_HOME
def __init__(self, hass: HomeAssistantType, consider_home: timedelta,
track: bool, dev_id: str, mac: str, name: str=None,
picture: str=None, gravatar: str=None, icon: str=None,
hide_if_away: bool=False, vendor: str=None) -> None:
"""Initialize a device."""
self.hass = hass
self.entity_id = ENTITY_ID_FORMAT.format(dev_id)
# Timedelta object how long we consider a device home if it is not
# detected anymore.
self.consider_home = consider_home
# Device ID
self.dev_id = dev_id
self.mac = mac
# If we should track this device
self.track = track
# Configured name
self.config_name = name
# Configured picture
if gravatar is not None:
self.config_picture = get_gravatar_for_email(gravatar)
else:
self.config_picture = picture
self.icon = icon
self.away_hide = hide_if_away
self.vendor = vendor
self.source_type = None
self._attributes = {}
@property
def name(self):
"""Return the name of the entity."""
return self.config_name or self.host_name or DEVICE_DEFAULT_NAME
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def entity_picture(self):
"""Return the picture of the device."""
return self.config_picture
@property
def state_attributes(self):
"""Return the device state attributes."""
attr = {
ATTR_SOURCE_TYPE: self.source_type
}
if self.gps:
attr[ATTR_LATITUDE] = self.gps[0]
attr[ATTR_LONGITUDE] = self.gps[1]
attr[ATTR_GPS_ACCURACY] = self.gps_accuracy
if self.battery:
attr[ATTR_BATTERY] = self.battery
return attr
@property
def device_state_attributes(self):
"""Return device state attributes."""
return self._attributes
@property
def hidden(self):
"""If device should be hidden."""
return self.away_hide and self.state != STATE_HOME
@asyncio.coroutine
def async_seen(self, host_name: str=None, location_name: str=None,
gps: GPSType=None, gps_accuracy=0, battery: str=None,
attributes: dict=None, source_type: str=SOURCE_TYPE_GPS):
"""Mark the device as seen."""
self.source_type = source_type
self.last_seen = dt_util.utcnow()
self.host_name = host_name
self.location_name = location_name
if battery:
self.battery = battery
if attributes:
self._attributes.update(attributes)
self.gps = None
if gps is not None:
try:
self.gps = float(gps[0]), float(gps[1])
self.gps_accuracy = gps_accuracy or 0
except (ValueError, TypeError, IndexError):
self.gps = None
self.gps_accuracy = 0
_LOGGER.warning(
"Could not parse gps value for %s: %s", self.dev_id, gps)
# pylint: disable=not-an-iterable
yield from self.async_update()
def stale(self, now: dt_util.dt.datetime=None):
"""Return if device state is stale.
Async friendly.
"""
return self.last_seen and \
(now or dt_util.utcnow()) - self.last_seen > self.consider_home
@asyncio.coroutine
def async_update(self):
"""Update state of entity.
This method is a coroutine.
"""
if not self.last_seen:
return
elif self.location_name:
self._state = self.location_name
elif self.gps is not None and self.source_type == SOURCE_TYPE_GPS:
zone_state = zone.async_active_zone(
self.hass, self.gps[0], self.gps[1], self.gps_accuracy)
if zone_state is None:
self._state = STATE_NOT_HOME
elif zone_state.entity_id == zone.ENTITY_ID_HOME:
self._state = STATE_HOME
else:
self._state = zone_state.name
elif self.stale():
self._state = STATE_NOT_HOME
self.gps = None
self.last_update_home = False
else:
self._state = STATE_HOME
self.last_update_home = True
@asyncio.coroutine
def set_vendor_for_mac(self):
"""Set vendor string using api.macvendors.com."""
self.vendor = yield from self.get_vendor_for_mac()
@asyncio.coroutine
def get_vendor_for_mac(self):
"""Try to find the vendor string for a given MAC address."""
if not self.mac:
return None
if '_' in self.mac:
_, mac = self.mac.split('_', 1)
else:
mac = self.mac
if not len(mac.split(':')) == 6:
return 'unknown'
# We only need the first 3 bytes of the MAC for a lookup
# this improves somewhat on privacy
oui_bytes = mac.split(':')[0:3]
# bytes like 00 get truncates to 0, API needs full bytes
oui = '{:02x}:{:02x}:{:02x}'.format(*[int(b, 16) for b in oui_bytes])
url = 'http://api.macvendors.com/' + oui
try:
websession = async_get_clientsession(self.hass)
with async_timeout.timeout(5, loop=self.hass.loop):
resp = yield from websession.get(url)
# mac vendor found, response is the string
if resp.status == 200:
vendor_string = yield from resp.text()
return vendor_string
# If vendor is not known to the API (404) or there
# was a failure during the lookup (500); set vendor
# to something other then None to prevent retry
# as the value is only relevant when it is to be stored
# in the 'known_devices.yaml' file which only happens
# the first time the device is seen.
return 'unknown'
except (asyncio.TimeoutError, aiohttp.ClientError):
# Same as above
return 'unknown'
@asyncio.coroutine
def async_added_to_hass(self):
"""Add an entity."""
state = yield from async_get_last_state(self.hass, self.entity_id)
if not state:
return
self._state = state.state
for attr, var in (
(ATTR_SOURCE_TYPE, 'source_type'),
(ATTR_GPS_ACCURACY, 'gps_accuracy'),
(ATTR_BATTERY, 'battery'),
):
if attr in state.attributes:
setattr(self, var, state.attributes[attr])
if ATTR_LONGITUDE in state.attributes:
self.gps = (state.attributes[ATTR_LATITUDE],
state.attributes[ATTR_LONGITUDE])
class DeviceScanner(object):
"""Device scanner object."""
hass = None # type: HomeAssistantType
def scan_devices(self) -> List[str]:
"""Scan for devices."""
raise NotImplementedError()
def async_scan_devices(self) -> Any:
"""Scan for devices.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.scan_devices)
def get_device_name(self, mac: str) -> str:
"""Get device name from mac."""
raise NotImplementedError()
def async_get_device_name(self, mac: str) -> Any:
"""Get device name from mac.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.get_device_name, mac)
def load_config(path: str, hass: HomeAssistantType, consider_home: timedelta):
"""Load devices from YAML configuration file."""
return run_coroutine_threadsafe(
async_load_config(path, hass, consider_home), hass.loop).result()
@asyncio.coroutine
def async_load_config(path: str, hass: HomeAssistantType,
consider_home: timedelta):
"""Load devices from YAML configuration file.
This method is a coroutine.
"""
dev_schema = vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_ICON, default=False):
vol.Any(None, cv.icon),
vol.Optional('track', default=False): cv.boolean,
vol.Optional(CONF_MAC, default=None):
vol.Any(None, vol.All(cv.string, vol.Upper)),
vol.Optional(CONF_AWAY_HIDE, default=DEFAULT_AWAY_HIDE): cv.boolean,
vol.Optional('gravatar', default=None): vol.Any(None, cv.string),
vol.Optional('picture', default=None): vol.Any(None, cv.string),
vol.Optional(CONF_CONSIDER_HOME, default=consider_home): vol.All(
cv.time_period, cv.positive_timedelta),
vol.Optional('vendor', default=None): vol.Any(None, cv.string),
})
try:
result = []
try:
devices = yield from hass.async_add_job(
load_yaml_config_file, path)
except HomeAssistantError as err:
_LOGGER.error("Unable to load %s: %s", path, str(err))
return []
for dev_id, device in devices.items():
try:
device = dev_schema(device)
device['dev_id'] = cv.slugify(dev_id)
except vol.Invalid as exp:
async_log_exception(exp, dev_id, devices, hass)
else:
result.append(Device(hass, **device))
return result
except (HomeAssistantError, FileNotFoundError):
# When YAML file could not be loaded/did not contain a dict
return []
@callback
def async_setup_scanner_platform(hass: HomeAssistantType, config: ConfigType,
scanner: Any, async_see_device: Callable,
platform: str):
"""Set up the connect scanner-based platform to device tracker.
This method must be run in the event loop.
"""
interval = config.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
update_lock = asyncio.Lock(loop=hass.loop)
scanner.hass = hass
# Initial scan of each mac we also tell about host name for config
seen = set() # type: Any
@asyncio.coroutine
def async_device_tracker_scan(now: dt_util.dt.datetime):
"""Handle interval matches."""
if update_lock.locked():
_LOGGER.warning(
"Updating device list from %s took longer than the scheduled "
"scan interval %s", platform, interval)
return
with (yield from update_lock):
found_devices = yield from scanner.async_scan_devices()
for mac in found_devices:
if mac in seen:
host_name = None
else:
host_name = yield from scanner.async_get_device_name(mac)
seen.add(mac)
kwargs = {
'mac': mac,
'host_name': host_name,
'source_type': SOURCE_TYPE_ROUTER
}
zone_home = hass.states.get(zone.ENTITY_ID_HOME)
if zone_home:
kwargs['gps'] = [zone_home.attributes[ATTR_LATITUDE],
zone_home.attributes[ATTR_LONGITUDE]]
kwargs['gps_accuracy'] = 0
hass.async_add_job(async_see_device(**kwargs))
async_track_time_interval(hass, async_device_tracker_scan, interval)
hass.async_add_job(async_device_tracker_scan(None))
def update_config(path: str, dev_id: str, device: Device):
"""Add device to YAML configuration file."""
with open(path, 'a') as out:
device = {device.dev_id: {
ATTR_NAME: device.name,
ATTR_MAC: device.mac,
ATTR_ICON: device.icon,
'picture': device.config_picture,
'track': device.track,
CONF_AWAY_HIDE: device.away_hide,
'vendor': device.vendor,
}}
out.write('\n')
out.write(dump(device))
def get_gravatar_for_email(email: str):
"""Return an 80px Gravatar for the given email address.
Async friendly.
"""
import hashlib
url = 'https://www.gravatar.com/avatar/{}.jpg?s=80&d=wavatar'
return url.format(hashlib.md5(email.encode('utf-8').lower()).hexdigest())
| 34.976159 | 79 | 0.627788 |
acf40353840ea77f8e5acae1a84f2c46d40ca88d | 405 | py | Python | doc2color/cli.py | soichiroota/doc2color | 8dd58f0dd894745f0c2e30ed354e2c898bda6a95 | [
"MIT"
] | null | null | null | doc2color/cli.py | soichiroota/doc2color | 8dd58f0dd894745f0c2e30ed354e2c898bda6a95 | [
"MIT"
] | 1 | 2021-12-13T20:29:14.000Z | 2021-12-13T20:29:14.000Z | doc2color/cli.py | soichiroota/doc2color | 8dd58f0dd894745f0c2e30ed354e2c898bda6a95 | [
"MIT"
] | null | null | null | """Console script for doc2color."""
import sys
import click
@click.command()
def main(args=None):
"""Console script for doc2color."""
click.echo("Replace this message by putting your code into "
"doc2color.cli.main")
click.echo("See click documentation at https://click.palletsprojects.com/")
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 23.823529 | 79 | 0.666667 |
acf403a4bc90224487f37a82772d687b11e8fb2b | 6,034 | py | Python | sqlalchemy_utils/types/choice.py | malun22/sqlalchemy-utils | b9cdfbb6f45942afb5df644f492706cf3075e801 | [
"BSD-3-Clause"
] | null | null | null | sqlalchemy_utils/types/choice.py | malun22/sqlalchemy-utils | b9cdfbb6f45942afb5df644f492706cf3075e801 | [
"BSD-3-Clause"
] | 1 | 2021-12-16T10:37:56.000Z | 2021-12-16T10:37:56.000Z | sqlalchemy_utils/types/choice.py | kod-kristoff/sqlalchemy-utils | 9cd0b478f16d327f3b6055e002d55bc83fa89439 | [
"BSD-3-Clause"
] | null | null | null | from enum import Enum
from sqlalchemy import types
from ..exceptions import ImproperlyConfigured
from .scalar_coercible import ScalarCoercible
class Choice(object):
def __init__(self, code, value):
self.code = code
self.value = value
def __eq__(self, other):
if isinstance(other, Choice):
return self.code == other.code
return other == self.code
def __hash__(self):
return hash(self.code)
def __ne__(self, other):
return not (self == other)
def __str__(self):
return str(self.value)
def __repr__(self):
return 'Choice(code={code}, value={value})'.format(
code=self.code,
value=self.value
)
class ChoiceType(ScalarCoercible, types.TypeDecorator):
"""
ChoiceType offers way of having fixed set of choices for given column. It
could work with a list of tuple (a collection of key-value pairs), or
integrate with :mod:`enum` in the standard library of Python 3.4+ (the
enum34_ backported package on PyPI is compatible too for ``< 3.4``).
.. _enum34: https://pypi.python.org/pypi/enum34
Columns with ChoiceTypes are automatically coerced to Choice objects while
a list of tuple been passed to the constructor. If a subclass of
:class:`enum.Enum` is passed, columns will be coerced to :class:`enum.Enum`
objects instead.
::
class User(Base):
TYPES = [
(u'admin', u'Admin'),
(u'regular-user', u'Regular user')
]
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
type = sa.Column(ChoiceType(TYPES))
user = User(type=u'admin')
user.type # Choice(code='admin', value=u'Admin')
Or::
import enum
class UserType(enum.Enum):
admin = 1
regular = 2
class User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
type = sa.Column(ChoiceType(UserType, impl=sa.Integer()))
user = User(type=1)
user.type # <UserType.admin: 1>
ChoiceType is very useful when the rendered values change based on user's
locale:
::
from babel import lazy_gettext as _
class User(Base):
TYPES = [
(u'admin', _(u'Admin')),
(u'regular-user', _(u'Regular user'))
]
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
type = sa.Column(ChoiceType(TYPES))
user = User(type=u'admin')
user.type # Choice(code='admin', value=u'Admin')
print user.type # u'Admin'
Or::
from enum import Enum
from babel import lazy_gettext as _
class UserType(Enum):
admin = 1
regular = 2
UserType.admin.label = _(u'Admin')
UserType.regular.label = _(u'Regular user')
class User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
type = sa.Column(ChoiceType(UserType, impl=sa.Integer()))
user = User(type=UserType.admin)
user.type # <UserType.admin: 1>
print user.type.label # u'Admin'
"""
impl = types.Unicode(255)
def __init__(self, choices, impl=None):
self.choices = tuple(choices) if isinstance(choices, list) else choices
if (
Enum is not None and
isinstance(choices, type) and
issubclass(choices, Enum)
):
self.type_impl = EnumTypeImpl(enum_class=choices)
else:
self.type_impl = ChoiceTypeImpl(choices=choices)
if impl:
self.impl = impl
@property
def python_type(self):
return self.impl.python_type
def _coerce(self, value):
return self.type_impl._coerce(value)
def process_bind_param(self, value, dialect):
return self.type_impl.process_bind_param(value, dialect)
def process_result_value(self, value, dialect):
return self.type_impl.process_result_value(value, dialect)
class ChoiceTypeImpl(object):
"""The implementation for the ``Choice`` usage."""
def __init__(self, choices):
if not choices:
raise ImproperlyConfigured(
'ChoiceType needs list of choices defined.'
)
self.choices_dict = dict(choices)
def _coerce(self, value):
if value is None:
return value
if isinstance(value, Choice):
return value
return Choice(value, self.choices_dict[value])
def process_bind_param(self, value, dialect):
if value and isinstance(value, Choice):
return value.code
return value
def process_result_value(self, value, dialect):
if value:
return Choice(value, self.choices_dict[value])
return value
class EnumTypeImpl(object):
"""The implementation for the ``Enum`` usage."""
def __init__(self, enum_class):
if Enum is None:
raise ImproperlyConfigured(
"'enum34' package is required to use 'EnumType' in Python "
"< 3.4"
)
if not issubclass(enum_class, Enum):
raise ImproperlyConfigured(
"EnumType needs a class of enum defined."
)
self.enum_class = enum_class
def _coerce(self, value):
if value is None:
return None
return self.enum_class(value)
def process_bind_param(self, value, dialect):
if value is None:
return None
return self.enum_class(value).value
def process_result_value(self, value, dialect):
return self._coerce(value)
| 26.581498 | 79 | 0.588499 |
acf403c59dcf19ef1a043becef3ad94c61c1e297 | 1,988 | py | Python | strain_finder/app.py | Jroc561/strain_finder | 95e32aa027d5d482c3a3f144b2338c3c8fd3df02 | [
"MIT"
] | null | null | null | strain_finder/app.py | Jroc561/strain_finder | 95e32aa027d5d482c3a3f144b2338c3c8fd3df02 | [
"MIT"
] | null | null | null | strain_finder/app.py | Jroc561/strain_finder | 95e32aa027d5d482c3a3f144b2338c3c8fd3df02 | [
"MIT"
] | null | null | null | from os import getenv, getcwd
import pandas as pd
from flask import Flask, flash, redirect, render_template, request, session, abort
from pandas.core.base import DataError
from .models import flavors, effects, ailments, categories, columns
import joblib
model = joblib.load('predictor.joblib')
weed = pd.read_csv('Data/weed.csv')
def order_data(c, a, e, f):
df = pd.DataFrame(data=[[0]*len(columns)], columns=columns)
for val in c + a + e + f:
df[val] = 1
return df
def create_app():
app = Flask(__name__, )
@app.route('/')
def confirm():
return render_template('confirm.html', title="Confirm")
@app.route('/home', methods=['GET', 'POST'])
def home():
return render_template('home.html', title="Home", categories=categories,
ailments=ailments, effects=effects, flavors=flavors)
@app.route('/results', methods=['GET', 'POST'])
def results():
categories1 = request.form.getlist('categories')
ailments1 = request.form.getlist('ailments')
effects1 = request.form.getlist('effects')
flavors1 = request.form.getlist('flavors')
df = order_data(categories1, ailments1, effects1, flavors1)
out = model.kneighbors([df.iloc[0].values])
indexs = out[1].flat[0:5].tolist()
pred = weed.iloc[indexs]
print(pred[['name', 'brand', 'strain', 'rating']])
return render_template('results.html', title="Results", message="Based on the selected inputs, we suggest:",
results=[f"{name} by {brand} (Strain: {strain})" for name, brand, strain,
rating in pred[['name', 'brand', 'strain', 'rating']].values])
@app.route('/about')
def about():
return render_template('about.html', title="About")
@app.route('/insights')
def insights():
return render_template('insights.html', title="Insights")
return app
| 34.275862 | 117 | 0.61167 |
acf4040562c002a6ef90d40e924d63fddcb16ac3 | 1,252 | py | Python | osf/management/commands/update_auth_groups.py | yuanyuan-deng/RDM-osf.io | e1c54e97c898d26406d71129db7e4baf82802224 | [
"Apache-2.0"
] | 1 | 2019-12-23T04:30:20.000Z | 2019-12-23T04:30:20.000Z | osf/management/commands/update_auth_groups.py | yuanyuan-deng/RDM-osf.io | e1c54e97c898d26406d71129db7e4baf82802224 | [
"Apache-2.0"
] | 80 | 2015-02-25T15:12:15.000Z | 2015-06-11T18:44:55.000Z | osf/management/commands/update_auth_groups.py | yuanyuan-deng/RDM-osf.io | e1c54e97c898d26406d71129db7e4baf82802224 | [
"Apache-2.0"
] | 1 | 2015-08-28T20:00:52.000Z | 2015-08-28T20:00:52.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from osf.models.mixins import ReviewProviderMixin
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""Add/update reviews auth groups for all reviews providers"""
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--dry',
action='store_true',
dest='dry_run',
help='Run, then roll back changes to db',
)
def handle(self, *args, **options):
dry = options.get('dry_run')
# Start a transaction that will be rolled back if any exceptions are raised
with transaction.atomic():
for cls in ReviewProviderMixin.__subclasses__():
for provider in cls.objects.all():
logger.info('Updating auth groups for review provider %s', provider)
provider.update_group_permissions()
if dry:
# When running in dry mode force the transaction to rollback
raise Exception('Abort Transaction - Dry Run')
| 33.837838 | 88 | 0.639776 |
acf4042bc23ee0397c110353250bbff56ac236e8 | 345 | py | Python | src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/python/tests/test_s3.py | cattz/aws-deployment-framework | 089ac40b5a91b4d1ae3151be601a818244eaf1c6 | [
"Apache-2.0"
] | 3 | 2019-03-07T00:33:36.000Z | 2020-01-16T02:32:52.000Z | src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/python/tests/test_s3.py | cattz/aws-deployment-framework | 089ac40b5a91b4d1ae3151be601a818244eaf1c6 | [
"Apache-2.0"
] | null | null | null | src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/python/tests/test_s3.py | cattz/aws-deployment-framework | 089ac40b5a91b4d1ae3151be601a818244eaf1c6 | [
"Apache-2.0"
] | 1 | 2019-11-12T10:08:32.000Z | 2019-11-12T10:08:32.000Z | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
# pylint: skip-file
import os
import boto3
from pytest import fixture
from stubs import stub_s3
from mock import Mock
from s3 import S3
@fixture
def cls():
cls = S3(
'us-east-1',
'some_bucket'
)
return cls
| 16.428571 | 73 | 0.686957 |
acf4049231debba3c1e9ed75f4a433f2dbe50e62 | 11,135 | py | Python | open_spiel/python/mfg/games/crowd_modelling.py | ajain-23/open_spiel | a6a0f0129571bb6f0e6832e870e299663fb7cdd5 | [
"Apache-2.0"
] | 3,167 | 2019-08-27T06:50:30.000Z | 2022-03-31T22:33:48.000Z | open_spiel/python/mfg/games/crowd_modelling.py | AliBeikmohammadi/open_spiel | 38941dee3beb52ffdb134b66f420a758634d9a20 | [
"Apache-2.0"
] | 650 | 2019-08-27T16:24:09.000Z | 2022-03-30T19:41:09.000Z | open_spiel/python/mfg/games/crowd_modelling.py | AliBeikmohammadi/open_spiel | 38941dee3beb52ffdb134b66f420a758634d9a20 | [
"Apache-2.0"
] | 774 | 2019-08-27T10:36:04.000Z | 2022-03-29T15:44:42.000Z | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Mean Field Crowd Modelling, implemented in Python.
This is a demonstration of implementing a mean field game in Python.
Fictitious play for mean field games: Continuous time analysis and applications,
Perrin & al. 2019 (https://arxiv.org/abs/2007.03458). This game corresponds
to the game in section 4.2.
"""
from typing import Any, List, Mapping
import numpy as np
from open_spiel.python.observation import IIGObserverForPublicInfoGame
import pyspiel
_NUM_PLAYERS = 1
_SIZE = 10
_HORIZON = 10
_NUM_ACTIONS = 3
_NUM_CHANCE = 3
_EPSILON = 10**(-25)
_DEFAULT_PARAMS = {"size": _SIZE, "horizon": _HORIZON}
_GAME_TYPE = pyspiel.GameType(
short_name="python_mfg_crowd_modelling",
long_name="Python Mean Field Crowd Modelling",
dynamics=pyspiel.GameType.Dynamics.MEAN_FIELD,
chance_mode=pyspiel.GameType.ChanceMode.EXPLICIT_STOCHASTIC,
information=pyspiel.GameType.Information.PERFECT_INFORMATION,
utility=pyspiel.GameType.Utility.GENERAL_SUM,
reward_model=pyspiel.GameType.RewardModel.REWARDS,
max_num_players=_NUM_PLAYERS,
min_num_players=_NUM_PLAYERS,
provides_information_state_string=True,
provides_information_state_tensor=False,
provides_observation_string=True,
provides_observation_tensor=True,
parameter_specification=_DEFAULT_PARAMS)
class MFGCrowdModellingGame(pyspiel.Game):
"""A Mean Field Crowd Modelling game.
A game starts by an initial chance node that select the initial state
of the MFG.
Then the game sequentially alternates between:
- An action selection node (Where the player Id >= 0)
- A chance node (the player id is pyspiel.PlayerId.CHANCE)
- A Mean Field node (the player id is pyspiel.PlayerId.MEAN_FIELD)
"""
# pylint:disable=dangerous-default-value
def __init__(self, params: Mapping[str, Any] = _DEFAULT_PARAMS):
game_info = pyspiel.GameInfo(
num_distinct_actions=_NUM_ACTIONS,
max_chance_outcomes=max(params["size"], _NUM_CHANCE),
num_players=_NUM_PLAYERS,
min_utility=-np.inf,
max_utility=+np.inf,
utility_sum=0.0,
max_game_length=params["horizon"])
super().__init__(_GAME_TYPE, game_info, params)
self.size = params["size"]
self.horizon = params["horizon"]
def new_initial_state(self):
"""Returns a state corresponding to the start of a game."""
return MFGCrowdModellingState(self)
def make_py_observer(self, iig_obs_type=None, params=None):
"""Returns an object used for observing game state."""
if ((iig_obs_type is None) or
(iig_obs_type.public_info and not iig_obs_type.perfect_recall)):
return Observer(params, self)
return IIGObserverForPublicInfoGame(iig_obs_type, params)
def max_chance_nodes_in_history(self):
"""Maximun chance nodes in game history."""
return self.horizon + 1
class MFGCrowdModellingState(pyspiel.State):
"""A Mean Field Crowd Modelling state."""
# Maps legal actions to the corresponding move along the 1-D axis of the game.
_ACTION_TO_MOVE = {0: -1, 1: 0, 2: 1}
# Action that corresponds to no displacement.
_NEUTRAL_ACTION = 1
def __init__(self, game):
"""Constructor; should only be called by Game.new_initial_state."""
super().__init__(game)
self._is_chance_init = True # is true for the first state of the game.
self._player_id = pyspiel.PlayerId.CHANCE
self._x = None
self._t = 0
# We initialize last_action to the neutral action. This makes sure
# that the first reward does not include any displacement penalty.
self._last_action = self._NEUTRAL_ACTION
self.size = game.size
self.horizon = game.horizon
self.return_value = 0.0
# Represents the current probability distribution over game states.
# Initialized with a uniform distribution.
self._distribution = [1. / self.size for i in range(self.size)]
@property
def x(self):
return self._x
@property
def t(self):
return self._t
def state_to_str(self, x, t, player_id=pyspiel.PlayerId.DEFAULT_PLAYER_ID):
"""A string that uniquely identify a triplet x, t, player_id."""
if self._is_chance_init:
return "initial"
if player_id == pyspiel.PlayerId.DEFAULT_PLAYER_ID:
return str((x, t))
if player_id == pyspiel.PlayerId.MEAN_FIELD:
return str((x, t)) + "_a"
if player_id == pyspiel.PlayerId.CHANCE:
return str((x, t)) + "_a_mu"
raise ValueError(
"player_id is not mean field, chance or default player id.")
# OpenSpiel (PySpiel) API functions are below. This is the standard set that
# should be implemented by every perfect-information sequential-move game.
def _legal_actions(self, player):
"""Returns a list of legal actions for player and MFG nodes."""
if player == pyspiel.PlayerId.MEAN_FIELD:
return []
if (player == pyspiel.PlayerId.DEFAULT_PLAYER_ID
and player == self.current_player()):
return [0, 1, 2]
raise ValueError(f"Unexpected player {player}. "
"Expected a mean field or current player 0.")
def chance_outcomes(self):
"""Returns the possible chance outcomes and their probabilities."""
if self._is_chance_init:
return list(enumerate(self._distribution))
return [(0, 1. / 3.), (1, 1. / 3.), (2, 1. / 3.)]
def _apply_action(self, action):
"""Applies the specified action to the state."""
if self._player_id == pyspiel.PlayerId.MEAN_FIELD:
raise ValueError(
"_apply_action should not be called at a MEAN_FIELD state.")
self.return_value += self._rewards()
if self._is_chance_init:
# Here the action is between 0 and self.size - 1
if action < 0 or action >= self.size:
raise ValueError(
"The action is between 0 and self.size - 1 at an init chance node")
self._x = action
self._is_chance_init = False
self._player_id = 0
elif self._player_id == pyspiel.PlayerId.CHANCE:
# Here the action is between 0 and 2
if action < 0 or action > 2:
raise ValueError(
"The action is between 0 and 2 at any chance node")
self._x = (self.x + self._ACTION_TO_MOVE[action]) % self.size
self._t += 1
self._player_id = pyspiel.PlayerId.MEAN_FIELD
elif self._player_id == 0:
# Here the action is between 0 and 2
if action < 0 or action > 2:
raise ValueError(
"The action is between 0 and 2 at any chance node")
self._x = (self.x + self._ACTION_TO_MOVE[action]) % self.size
self._last_action = action
self._player_id = pyspiel.PlayerId.CHANCE
def _action_to_string(self, player, action):
"""Action -> string."""
del player
if self.is_chance_node() and self._is_chance_init:
return f"init_state={action}"
return str(self._ACTION_TO_MOVE[action])
def distribution_support(self):
"""return a list of state string."""
return [
self.state_to_str(
i, self.t, player_id=pyspiel.PlayerId.MEAN_FIELD)
for i in range(self.size)
]
def update_distribution(self, distribution):
"""This function is central and specific to the logic of the MFG.
Args:
distribution: a distribution to register.
- function should be called when the node is in MEAN_FIELD state.
- distribution are probabilities that correspond to each game state
given by distribution_support.
"""
if self._player_id != pyspiel.PlayerId.MEAN_FIELD:
raise ValueError(
"update_distribution should only be called at a MEAN_FIELD state.")
self._distribution = distribution.copy()
self._player_id = pyspiel.PlayerId.DEFAULT_PLAYER_ID
def is_terminal(self):
"""Returns True if the game is over."""
return self.t >= self.horizon
def current_player(self):
"""Returns id of the next player to move, or TERMINAL if game is over."""
if self.is_terminal():
return pyspiel.PlayerId.TERMINAL
return self._player_id
def _rewards(self):
"""Reward for the player for this state."""
if self._player_id == 0:
r_x = 1 - (1.0 * np.abs(self.x - self.size // 2)) / (self.size // 2)
r_a = -(1.0 * np.abs(self._ACTION_TO_MOVE[self._last_action])) / self.size
r_mu = - np.log(self._distribution[self.x] + _EPSILON)
return r_x + r_a + r_mu
return 0.0
def rewards(self) -> List[float]:
"""Rewards for all players."""
# For now, only single-population (single-player) mean field games
# are supported.
return [self._rewards()]
def _returns(self):
"""Returns is the sum of all payoffs collected so far."""
return self.return_value + self._rewards()
def returns(self) -> List[float]:
"""Returns for all players."""
# For now, only single-population (single-player) mean field games
# are supported.
return [self._returns()]
def __str__(self):
"""A string that uniquely identify the current state."""
return self.state_to_str(self.x, self.t, player_id=self._player_id)
class Observer:
"""Observer, conforming to the PyObserver interface (see observation.py)."""
def __init__(self, params, game):
"""Initializes an empty observation tensor."""
del params
self.size = game.size
self.horizon = game.horizon
# +1 to allow t == horizon.
self.tensor = np.zeros(self.size + self.horizon + 1, np.float32)
self.dict = {"x": self.tensor[:self.size], "t": self.tensor[self.size:]}
def set_from(self, state: MFGCrowdModellingState, player: int):
"""Updates `tensor` and `dict` to reflect `state` from PoV of `player`."""
del player
# We update the observation via the shaped tensor since indexing is more
# convenient than with the 1-D tensor. Both are views onto the same memory.
self.tensor.fill(0)
# state.x is None for the initial (blank) state, don't set any
# position bit in that case.
if state.x is not None:
if not 0 <= state.x < self.size:
raise ValueError(
f"Expected {state} x position to be in [0, {self.size})")
self.dict["x"][state.x] = 1
if not 0 <= state.t <= self.horizon:
raise ValueError(f"Expected {state} time to be in [0, {self.horizon}]")
self.dict["t"][state.t] = 1
def string_from(self, state, player):
"""Observation of `state` from the PoV of `player`, as a string."""
del player
return str(state)
# Register the game with the OpenSpiel library
pyspiel.register_game(_GAME_TYPE, MFGCrowdModellingGame)
| 36.508197 | 80 | 0.691064 |
acf4075c172006a4ed2399c57e4de301eb59d854 | 4,709 | py | Python | priority_queue/min_priority_queue.py | OmarThinks/python-algorithms-project | a49be94201e73b26b51911fd731770197dd6ad3d | [
"MIT"
] | 1 | 2021-10-17T12:01:43.000Z | 2021-10-17T12:01:43.000Z | priority_queue/min_priority_queue.py | OmarThinks/python-algorithms-project | a49be94201e73b26b51911fd731770197dd6ad3d | [
"MIT"
] | null | null | null | priority_queue/min_priority_queue.py | OmarThinks/python-algorithms-project | a49be94201e73b26b51911fd731770197dd6ad3d | [
"MIT"
] | null | null | null | from pprint import pprint as pp
from random import randint
import unittest
from math import log
import json
assertEqual = unittest.TestCase().assertEqual
class MinPriorityQueue():
"""docstring for MinPriorityQueue"""
priority_queue = []
length = 0
def __init__(self, priority_queue:list):
self.priority_queue = sorted(priority_queue)
self.length = len(self.priority_queue)
def get_parent_index(self, node_index, do_assertions = True):
if node_index == 0 or None: return None
if node_index == None:
return None
parent_index = int((node_index+1)/2)-1
if do_assertions:
#print(parent_index, node_index)
#print(self.get_children_indices(parent_index, do_assertions=False))
assertEqual(True, node_index in
self.get_children_indices(parent_index, do_assertions=False))
return int((node_index+1)/2)-1
def get_children_indices(self, node_index, do_assertions= True):
left_child_index = node_index*2+1
right_child_index = node_index*2+2
if do_assertions:
assertEqual(node_index,
self.get_parent_index(left_child_index, do_assertions=False))
assertEqual(node_index,
self.get_parent_index(right_child_index, do_assertions=False))
return [left_child_index, right_child_index]
def get_node_depth(self, node_index):
return int(log(node_index+1, 2))
def get_node(self, node_index):
if node_index == None:
return None
if (node_index>= self.length) or (node_index<0):
return None
return self.priority_queue[node_index]
def get_structure(self, node_index = 0):
node_value = self.get_node(node_index)
if node_value == None: # Base Case
return None
left_child_index, right_child_index = self.get_children_indices(node_index)
left_child_value, right_child_value = (self.get_node(left_child_index),
self.get_node(right_child_index))
if left_child_value == right_child_value == None:
return node_value
return {str(node_value):[
self.get_structure(left_child_index),
self.get_structure(right_child_index)]
}
def print(self):
print(json.dumps(self.get_structure(), indent=4))
# Must Be BFS
def find_node(self, node_value, root_indices = [0]):
children_indices = []
for parent_index in root_indices:
parent_value = self.get_node(parent_index)
if parent_value == node_value:
return parent_index
if parent_value == None:
continue
children_indices.extend(self.get_children_indices(parent_index))
if len(children_indices) == 0:
return False
return self.find_node(node_value, children_indices)
current_value = self.get_node(root_index)
print(root_index, current_value)
if current_value==node_value: # Base Case
return root_index
if current_value == None: # Base Case
return False
left_child_index, right_child_index = self.get_children_indices(root_index)
#print(root_index, self.get_children_indices(root_index))
#print(root_index, left_child_index)
return (self.find_node(node_value, left_child_index) or
self.find_node(node_value, right_child_index))
def push_up(self, node_index):
node_value = self.get_node(node_index)
parent_index = self.get_parent_index(node_index)
parent_value = self.get_node(parent_index)
if parent_value == None: return # reached top of list
print(node_value, parent_value)
if node_value > parent_value: return # It's ok, that's how it should be
self.priority_queue[node_index], self.priority_queue[parent_index]=(
parent_value, node_value)
self.push_up(parent_index)
def insert(self, value):
print("insert")
empty_index = self.find_node(None)
#print(empty_index)
#print(self.priority_queue)
if empty_index >= len(self.priority_queue):
self.priority_queue.append(value)
else:
self.priority_queue[empty_index] = value
#print(self.priority_queue)
#print(empty_index)
self.length +=1
self.push_up(empty_index)
my_mpq = MinPriorityQueue([7654,1324,12,1,2,75,7,6,8,3])
print(my_mpq.priority_queue)
my_mpq.print()
#pp([my_mpq.get_parent_index(node) for node in range(my_mpq.length)])
#pp([my_mpq.get_children_indices(node) for node in range(my_mpq.length)])
assertEqual(my_mpq.get_node_depth(0),0)
assertEqual(my_mpq.get_node_depth(1),1)
assertEqual(my_mpq.get_node_depth(2),1)
assertEqual(my_mpq.get_node_depth(3),2)
assertEqual(my_mpq.get_node_depth(4),2)
assertEqual(my_mpq.get_node_depth(5),2)
assertEqual(my_mpq.get_node_depth(6),2)
assertEqual(my_mpq.get_node_depth(7),3)
assertEqual(my_mpq.get_node(9), my_mpq.priority_queue[my_mpq.length-1])
assertEqual(my_mpq.get_node(10), None)
assertEqual(my_mpq.find_node(3), 2)
assertEqual(my_mpq.find_node(70000), False)
my_mpq.insert(900)
my_mpq.insert(1900)
my_mpq.insert(0)
my_mpq.print()
| 26.908571 | 77 | 0.761733 |
acf4075feb94ab8bf195b912071384b147e6ffec | 92,695 | py | Python | App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/pandas/tests/arithmetic/test_datetime64.py | tanerqy/coffeegrindsize | 57f6c48213afda2704478b3fc2d0749332ca9d0e | [
"MIT"
] | 6,989 | 2017-07-18T06:23:18.000Z | 2022-03-31T15:58:36.000Z | App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/pandas/tests/arithmetic/test_datetime64.py | tanerqy/coffeegrindsize | 57f6c48213afda2704478b3fc2d0749332ca9d0e | [
"MIT"
] | 1,978 | 2017-07-18T09:17:58.000Z | 2022-03-31T14:28:43.000Z | App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/pandas/tests/arithmetic/test_datetime64.py | tanerqy/coffeegrindsize | 57f6c48213afda2704478b3fc2d0749332ca9d0e | [
"MIT"
] | 1,228 | 2017-07-18T09:03:13.000Z | 2022-03-29T05:57:40.000Z | # -*- coding: utf-8 -*-
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import datetime, timedelta
from itertools import product, starmap
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.compat.numpy import np_datetime64_compat
from pandas.errors import NullFrequencyError, PerformanceWarning
import pandas as pd
from pandas import (
DatetimeIndex, NaT, Period, Series, Timedelta, TimedeltaIndex, Timestamp,
date_range)
from pandas.core.indexes.datetimes import _to_M8
import pandas.util.testing as tm
def assert_all(obj):
"""
Test helper to call call obj.all() the appropriate number of times on
a Series or DataFrame.
"""
if isinstance(obj, pd.DataFrame):
assert obj.all().all()
else:
assert obj.all()
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64DataFrameComparison(object):
@pytest.mark.parametrize('timestamps', [
[pd.Timestamp('2012-01-01 13:00:00+00:00')] * 2,
[pd.Timestamp('2012-01-01 13:00:00')] * 2])
def test_tz_aware_scalar_comparison(self, timestamps):
# GH#15966
df = pd.DataFrame({'test': timestamps})
expected = pd.DataFrame({'test': [False, False]})
tm.assert_frame_equal(df == -1, expected)
def test_dt64_nat_comparison(self):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
ts = pd.Timestamp.now()
df = pd.DataFrame([ts, pd.NaT])
expected = pd.DataFrame([True, False])
result = df == ts
tm.assert_frame_equal(result, expected)
class TestDatetime64SeriesComparison(object):
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize('pair', [
([pd.Timestamp('2011-01-01'), NaT, pd.Timestamp('2011-01-03')],
[NaT, NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), NaT, pd.Timedelta('3 days')],
[NaT, NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), NaT,
pd.Period('2011-03', freq='M')],
[NaT, NaT, pd.Period('2011-03', freq='M')]),
])
@pytest.mark.parametrize('reverse', [True, False])
@pytest.mark.parametrize('box', [Series, pd.Index])
@pytest.mark.parametrize('dtype', [None, object])
def test_nat_comparisons(self, dtype, box, reverse, pair):
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
# Series, Index
expected = Series([False, False, True])
tm.assert_series_equal(left == right, expected)
expected = Series([True, True, False])
tm.assert_series_equal(left != right, expected)
expected = Series([False, False, False])
tm.assert_series_equal(left < right, expected)
expected = Series([False, False, False])
tm.assert_series_equal(left > right, expected)
expected = Series([False, False, True])
tm.assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
tm.assert_series_equal(left <= right, expected)
def test_comparison_invalid(self, box_with_array):
# GH#4968
# invalid date/int comparisons
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
ser = Series(range(5))
ser2 = Series(pd.date_range('20010101', periods=5))
ser = tm.box_expected(ser, box_with_array)
ser2 = tm.box_expected(ser2, box_with_array)
for (x, y) in [(ser, ser2), (ser2, ser)]:
result = x == y
expected = tm.box_expected([False] * 5, xbox)
tm.assert_equal(result, expected)
result = x != y
expected = tm.box_expected([True] * 5, xbox)
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
x >= y
with pytest.raises(TypeError):
x > y
with pytest.raises(TypeError):
x < y
with pytest.raises(TypeError):
x <= y
@pytest.mark.parametrize('data', [
[Timestamp('2011-01-01'), NaT, Timestamp('2011-01-03')],
[Timedelta('1 days'), NaT, Timedelta('3 days')],
[Period('2011-01', freq='M'), NaT, Period('2011-03', freq='M')]
])
@pytest.mark.parametrize('dtype', [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
if box_with_array is tm.to_array and dtype is object:
# dont bother testing ndarray comparison methods as this fails
# on older numpys (since they check object identity)
return
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box_with_array)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
def test_series_comparison_scalars(self):
series = Series(date_range('1/1/2000', periods=10))
val = datetime(2000, 1, 4)
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
val = series[5]
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
def test_dt64_ser_cmp_date_warning(self):
# https://github.com/pandas-dev/pandas/issues/21359
# Remove this test and enble invalid test below
ser = pd.Series(pd.date_range('20010101', periods=10), name='dates')
date = ser.iloc[0].to_pydatetime().date()
with tm.assert_produces_warning(FutureWarning) as m:
result = ser == date
expected = pd.Series([True] + [False] * 9, name='dates')
tm.assert_series_equal(result, expected)
assert "Comparing Series of datetimes " in str(m[0].message)
assert "will not compare equal" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser != date
tm.assert_series_equal(result, ~expected)
assert "will not compare equal" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser <= date
tm.assert_series_equal(result, expected)
assert "a TypeError will be raised" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser < date
tm.assert_series_equal(result, pd.Series([False] * 10, name='dates'))
assert "a TypeError will be raised" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser >= date
tm.assert_series_equal(result, pd.Series([True] * 10, name='dates'))
assert "a TypeError will be raised" in str(m[0].message)
with tm.assert_produces_warning(FutureWarning) as m:
result = ser > date
tm.assert_series_equal(result, pd.Series([False] + [True] * 9,
name='dates'))
assert "a TypeError will be raised" in str(m[0].message)
@pytest.mark.skip(reason="GH#21359")
def test_dt64ser_cmp_date_invalid(self, box_with_array):
# GH#19800 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
ser = pd.date_range('20010101', periods=10)
date = ser.iloc[0].to_pydatetime().date()
ser = tm.box_expected(ser, box_with_array)
assert not (ser == date).any()
assert (ser != date).all()
with pytest.raises(TypeError):
ser > date
with pytest.raises(TypeError):
ser < date
with pytest.raises(TypeError):
ser >= date
with pytest.raises(TypeError):
ser <= date
@pytest.mark.parametrize("left,right", [
("lt", "gt"),
("le", "ge"),
("eq", "eq"),
("ne", "ne"),
])
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = pd.Series(pd.date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = pd.Timestamp("nat")
ser[3] = pd.Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
ser = pd.Series([pd.Timestamp('2000-01-29 01:59:00'), 'NaT'])
ser = tm.box_expected(ser, box_with_array)
result = ser != ser
expected = tm.box_expected([False, True], xbox)
tm.assert_equal(result, expected)
result = ser != ser[0]
expected = tm.box_expected([False, True], xbox)
tm.assert_equal(result, expected)
result = ser != ser[1]
expected = tm.box_expected([True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, False], xbox)
tm.assert_equal(result, expected)
result = ser == ser[0]
expected = tm.box_expected([True, False], xbox)
tm.assert_equal(result, expected)
result = ser == ser[1]
expected = tm.box_expected([False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.gt, operator.ge,
operator.lt, operator.le])
def test_comparison_tzawareness_compat(self, op):
# GH#18162
dr = pd.date_range('2016-01-01', periods=6)
dz = dr.tz_localize('US/Pacific')
# Check that there isn't a problem aware-aware and naive-naive do not
# raise
naive_series = Series(dr)
aware_series = Series(dz)
with pytest.raises(TypeError):
op(dz, naive_series)
with pytest.raises(TypeError):
op(dr, aware_series)
# TODO: implement _assert_tzawareness_compat for the reverse
# comparison with the Series on the left-hand side
class TestDatetimeIndexComparisons(object):
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
@pytest.mark.parametrize("op", [
operator.eq, operator.ne, operator.gt, operator.lt,
operator.ge, operator.le
])
def test_comparators(self, op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = _to_M8(element)
arr = np.array(index)
arr_result = op(arr, element)
index_result = op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize('other', [datetime(2016, 1, 1),
Timestamp('2016-01-01'),
np.datetime64('2016-01-01')])
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
def dt64arr_cmp_non_datetime(self, tz_naive_fixture, box_with_array):
# GH#19301 by convention datetime.date is not considered comparable
# to Timestamp or DatetimeIndex. This may change in the future.
tz = tz_naive_fixture
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = datetime(2016, 1, 1).date()
assert not (dtarr == other).any()
assert (dtarr != other).all()
with pytest.raises(TypeError):
dtarr < other
with pytest.raises(TypeError):
dtarr <= other
with pytest.raises(TypeError):
dtarr > other
with pytest.raises(TypeError):
dtarr >= other
@pytest.mark.parametrize('other', [None, np.nan, pd.NaT])
def test_dti_eq_null_scalar(self, other, tz_naive_fixture):
# GH#19301
tz = tz_naive_fixture
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
assert not (dti == other).any()
@pytest.mark.parametrize('other', [None, np.nan, pd.NaT])
def test_dti_ne_null_scalar(self, other, tz_naive_fixture):
# GH#19301
tz = tz_naive_fixture
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
assert (dti != other).all()
@pytest.mark.parametrize('other', [None, np.nan])
def test_dti_cmp_null_scalar_inequality(self, tz_naive_fixture, other,
box_with_array):
# GH#19301
tz = tz_naive_fixture
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
# FIXME: ValueError with transpose
dtarr = tm.box_expected(dti, box_with_array, transpose=False)
with pytest.raises(TypeError):
dtarr < other
with pytest.raises(TypeError):
dtarr <= other
with pytest.raises(TypeError):
dtarr > other
with pytest.raises(TypeError):
dtarr >= other
@pytest.mark.parametrize('dtype', [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
if box_with_array is tm.to_array and dtype is object:
# dont bother testing ndarray comparison methods as this fails
# on older numpys (since they check object identity)
return
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == pd.NaT, expected)
tm.assert_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != pd.NaT, expected)
tm.assert_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < pd.NaT, expected)
tm.assert_equal(pd.NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
'2014-05-01', '2014-07-01'])
didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT,
'2014-06-01', '2014-07-01'])
darr = np.array([np_datetime64_compat('2014-02-01 00:00Z'),
np_datetime64_compat('2014-03-01 00:00Z'),
np_datetime64_compat('nat'), np.datetime64('nat'),
np_datetime64_compat('2014-06-01 00:00Z'),
np_datetime64_compat('2014-07-01 00:00Z')])
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.gt, operator.ge,
operator.lt, operator.le])
def test_comparison_tzawareness_compat(self, op, box_with_array):
# GH#18162
dr = pd.date_range('2016-01-01', periods=6)
dz = dr.tz_localize('US/Pacific')
# FIXME: ValueError with transpose
dr = tm.box_expected(dr, box_with_array, transpose=False)
dz = tm.box_expected(dz, box_with_array, transpose=False)
with pytest.raises(TypeError):
op(dr, dz)
if box_with_array is not pd.DataFrame:
# DataFrame op is invalid until transpose bug is fixed
with pytest.raises(TypeError):
op(dr, list(dz))
with pytest.raises(TypeError):
op(dr, np.array(list(dz), dtype=object))
with pytest.raises(TypeError):
op(dz, dr)
if box_with_array is not pd.DataFrame:
# DataFrame op is invalid until transpose bug is fixed
with pytest.raises(TypeError):
op(dz, list(dr))
with pytest.raises(TypeError):
op(dz, np.array(list(dr), dtype=object))
# Check that there isn't a problem aware-aware and naive-naive do not
# raise
assert_all(dr == dr)
assert_all(dz == dz)
if box_with_array is not pd.DataFrame:
# DataFrame doesn't align the lists correctly unless we transpose,
# which we cannot do at the moment
assert (dr == list(dr)).all()
assert (dz == list(dz)).all()
# Check comparisons against scalar Timestamps
ts = pd.Timestamp('2000-03-14 01:59')
ts_tz = pd.Timestamp('2000-03-14 01:59', tz='Europe/Amsterdam')
assert_all(dr > ts)
with pytest.raises(TypeError):
op(dr, ts_tz)
assert_all(dz > ts_tz)
with pytest.raises(TypeError):
op(dz, ts)
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError):
op(ts, dz)
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.gt, operator.ge,
operator.lt, operator.le])
@pytest.mark.parametrize('other', [datetime(2016, 1, 1),
Timestamp('2016-01-01'),
np.datetime64('2016-01-01')])
def test_scalar_comparison_tzawareness(self, op, other, tz_aware_fixture,
box_with_array):
tz = tz_aware_fixture
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
# FIXME: ValueError with transpose
dtarr = tm.box_expected(dti, box_with_array, transpose=False)
with pytest.raises(TypeError):
op(dtarr, other)
with pytest.raises(TypeError):
op(other, dtarr)
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.gt, operator.ge,
operator.lt, operator.le])
def test_nat_comparison_tzawareness(self, op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
dti = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
'2014-05-01', '2014-07-01'])
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, pd.NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize('US/Pacific'), pd.NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range('1/1/2000', periods=10, tz=tz)
other = '1/1/2000'
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('other', ['foo', 99, 4.0,
object(), timedelta(days=2)])
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture,
box_with_array):
# GH#22074
tz = tz_naive_fixture
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
rng = date_range('1/1/2000', periods=10, tz=tz)
# FIXME: ValueError with transpose
rng = tm.box_expected(rng, box_with_array, transpose=False)
result = rng == other
expected = np.array([False] * 10)
expected = tm.box_expected(expected, xbox, transpose=False)
tm.assert_equal(result, expected)
result = rng != other
expected = np.array([True] * 10)
expected = tm.box_expected(expected, xbox, transpose=False)
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
rng < other
with pytest.raises(TypeError):
rng <= other
with pytest.raises(TypeError):
rng > other
with pytest.raises(TypeError):
rng >= other
def test_dti_cmp_list(self):
rng = date_range('1/1/2000', periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('other', [
pd.timedelta_range('1D', periods=10),
pd.timedelta_range('1D', periods=10).to_series(),
pd.timedelta_range('1D', periods=10).asi8.view('m8[ns]')
], ids=lambda x: type(x).__name__)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range('2000-01-01', periods=10, tz='Asia/Tokyo')
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
with pytest.raises(TypeError):
dti < other
with pytest.raises(TypeError):
dti <= other
with pytest.raises(TypeError):
dti > other
with pytest.raises(TypeError):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range('2000-01-01', periods=10, tz='Asia/Tokyo')
other = dti.astype('O')
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
with pytest.raises(TypeError):
# tzawareness failure
dti != other
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
with pytest.raises(TypeError):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic(object):
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
def test_dt64arr_add_timedeltalike_scalar(self, tz_naive_fixture,
two_hours, box_with_array):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
# FIXME: calling with transpose=True raises ValueError
rng = tm.box_expected(rng, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_dt64arr_iadd_timedeltalike_scalar(self, tz_naive_fixture,
two_hours, box_with_array):
tz = tz_naive_fixture
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
# FIXME: calling with transpose=True raises ValueError
rng = tm.box_expected(rng, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(self, tz_naive_fixture,
two_hours, box_with_array):
tz = tz_naive_fixture
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
# FIXME: calling with transpose=True raises ValueError
rng = tm.box_expected(rng, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = rng - two_hours
tm.assert_equal(result, expected)
def test_dt64arr_isub_timedeltalike_scalar(self, tz_naive_fixture,
two_hours, box_with_array):
tz = tz_naive_fixture
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
# FIXME: calling with transpose=True raises ValueError
rng = tm.box_expected(rng, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
rng -= two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
expected = Series([Timestamp('20130101 9:01:01'),
Timestamp('20130101 9:02:01')])
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, 's')
tm.assert_equal(result, expected)
result = np.timedelta64(1, 's') + dtarr
tm.assert_equal(result, expected)
expected = Series([Timestamp('20130101 9:01:00.005'),
Timestamp('20130101 9:02:00.005')])
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, 'ms')
tm.assert_equal(result, expected)
result = np.timedelta64(5, 'ms') + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = pd.date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = pd.DatetimeIndex(["NaT"] * 9, tz=tz)
# FIXME: fails with transpose=True due to tz-aware DataFrame
# transpose bug
obj = tm.box_expected(dti, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture,
box_with_array):
if box_with_array is pd.DataFrame:
pytest.xfail("FIXME: ValueError with transpose; "
"alignment error without")
tz = tz_naive_fixture
dti = pd.date_range('2016-01-01', periods=3, tz=tz)
tdi = pd.TimedeltaIndex(['-1 Day', '-1 Day', '-1 Day'])
tdarr = tdi.values
expected = pd.date_range('2015-12-31', periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = pd.date_range('2016-01-02', periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize('ts', [
pd.Timestamp('2013-01-01'),
pd.Timestamp('2013-01-01').to_pydatetime(),
pd.Timestamp('2013-01-01').to_datetime64()])
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = pd.date_range('2013-01-01', periods=3)
idx = tm.box_expected(idx, box_with_array)
expected = pd.TimedeltaIndex(['0 Days', '1 Day', '2 Days'])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64('2013-01-01')
assert dt64.dtype == 'datetime64[D]'
dti = pd.date_range('20130101', periods=3)
dtarr = tm.box_expected(dti, box_with_array)
expected = pd.TimedeltaIndex(['0 Days', '1 Day', '2 Days'])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = pd.date_range('2014-03-17', periods=2, freq='D',
tz='US/Eastern')
ts = ser[0]
# FIXME: transpose raises ValueError
ser = tm.box_expected(ser, box_with_array, transpose=False)
delta_series = pd.Series([np.timedelta64(0, 'D'),
np.timedelta64(1, 'D')])
expected = tm.box_expected(delta_series, box_with_array,
transpose=False)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = pd.DatetimeIndex([pd.NaT, pd.Timestamp('19900315')])
ser = tm.box_expected(dti, box_with_array, transpose=False)
result = ser - pd.NaT
expected = pd.Series([pd.NaT, pd.NaT], dtype='timedelta64[ns]')
# FIXME: raises ValueError with transpose
expected = tm.box_expected(expected, box_with_array, transpose=False)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize('Asia/Tokyo')
ser_tz = tm.box_expected(dti_tz, box_with_array, transpose=False)
result = ser_tz - pd.NaT
expected = pd.Series([pd.NaT, pd.NaT], dtype='timedelta64[ns]')
expected = tm.box_expected(expected, box_with_array, transpose=False)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(self, tz_aware_fixture,
box_with_array):
if box_with_array is pd.DataFrame:
pytest.xfail("FIXME: ValueError with transpose; "
"alignment error without")
tz = tz_aware_fixture
dti = pd.date_range('2016-01-01', periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
with pytest.raises(TypeError):
dtarr - dt64vals
with pytest.raises(TypeError):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture,
box_with_array):
if box_with_array is pd.DataFrame:
pytest.xfail("FIXME: ValueError with transpose; "
"alignment error without")
tz = tz_naive_fixture
dti = pd.date_range('2016-01-01', periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
with pytest.raises(TypeError):
dtarr + dt64vals
with pytest.raises(TypeError):
dt64vals + dtarr
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
idx = tm.box_expected(idx, box_with_array)
msg = "cannot add"
with pytest.raises(TypeError, match=msg):
idx + Timestamp('2011-01-01')
with pytest.raises(TypeError, match=msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_dt64arr_add_sub_float(self, other, box_with_array):
dti = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
dtarr = tm.box_expected(dti, box_with_array)
with pytest.raises(TypeError):
dtarr + other
with pytest.raises(TypeError):
other + dtarr
with pytest.raises(TypeError):
dtarr - other
with pytest.raises(TypeError):
other - dtarr
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('dti_freq', [None, 'D'])
def test_dt64arr_add_sub_parr(self, dti_freq, pi_freq,
box_with_array, box_with_array2):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
with pytest.raises(TypeError):
dtarr + parr
with pytest.raises(TypeError):
parr + dtarr
with pytest.raises(TypeError):
dtarr - parr
with pytest.raises(TypeError):
parr - dtarr
@pytest.mark.parametrize('dti_freq', [None, 'D'])
def test_dt64arr_add_sub_period_scalar(self, dti_freq, box_with_array):
# GH#13078
# not supported, check TypeError
per = pd.Period('2011-01-01', freq='D')
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=dti_freq)
dtarr = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
dtarr + per
with pytest.raises(TypeError):
per + dtarr
with pytest.raises(TypeError):
dtarr - per
with pytest.raises(TypeError):
per - dtarr
class TestDatetime64DateOffsetArithmetic(object):
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
expected = Series([Timestamp('20130101 9:01:05'),
Timestamp('20130101 9:02:05')])
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
expected = Series([Timestamp('20130101 9:00:55'),
Timestamp('20130101 9:01:55')])
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
with pytest.raises(TypeError):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize('cls_name', ['Day', 'Hour', 'Minute', 'Second',
'Milli', 'Micro', 'Nano'])
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name,
box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == 'US/Pacific':
dates = date_range('2012-11-01', periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range('2010-11-01 00:00',
periods=3, tz=tz, freq='H')
expected = DatetimeIndex(['2010-11-01 05:00', '2010-11-01 06:00',
'2010-11-01 07:00'], freq='H', tz=tz)
# FIXME: these raise ValueError with transpose=True
dates = tm.box_expected(dates, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
# TODO: parametrize over the scalar being added? radd? sub?
offset = dates + pd.offsets.Hour(5)
tm.assert_equal(offset, expected)
offset = dates + np.timedelta64(5, 'h')
tm.assert_equal(offset, expected)
offset = dates + timedelta(hours=5)
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-03-31'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31'),
Timestamp('2000-05-15'),
Timestamp('2001-06-15')])
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.squeeze() if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [('years', 2), ('months', 5), ('days', 3),
('hours', 5), ('minutes', 10), ('seconds', 2),
('microseconds', 5)]
for i, kwd in enumerate(relative_kwargs):
off = pd.DateOffset(**dict([kwd]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = pd.DateOffset(**dict(relative_kwargs[:i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
with pytest.raises(TypeError):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize('cls_and_kwargs', [
'YearBegin', ('YearBegin', {'month': 5}),
'YearEnd', ('YearEnd', {'month': 5}),
'MonthBegin', 'MonthEnd',
'SemiMonthEnd', 'SemiMonthBegin',
'Week', ('Week', {'weekday': 3}),
'Week', ('Week', {'weekday': 6}),
'BusinessDay', 'BDay', 'QuarterEnd', 'QuarterBegin',
'CustomBusinessDay', 'CDay', 'CBMonthEnd',
'CBMonthBegin', 'BMonthBegin', 'BMonthEnd',
'BusinessHour', 'BYearBegin', 'BYearEnd',
'BQuarterBegin', ('LastWeekOfMonth', {'weekday': 2}),
('FY5253Quarter', {'qtr_with_extra_week': 1,
'startingMonth': 1,
'weekday': 2,
'variation': 'nearest'}),
('FY5253', {'weekday': 0, 'startingMonth': 2, 'variation': 'nearest'}),
('WeekOfMonth', {'weekday': 2, 'week': 2}),
'Easter', ('DateOffset', {'day': 4}),
('DateOffset', {'month': 5})])
@pytest.mark.parametrize('normalize', [True, False])
@pytest.mark.parametrize('n', [0, 5])
def test_dt64arr_add_sub_DateOffsets(self, box_with_array,
n, normalize, cls_and_kwargs):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in ['WeekOfMonth', 'LastWeekOfMonth',
'FY5253Quarter', 'FY5253']:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-03-31'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31'),
Timestamp('2000-05-15'),
Timestamp('2001-06-15')])
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.squeeze() if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
with pytest.raises(TypeError):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range('2000-01-01', '2000-01-31', name='a')
s = tm.box_expected(s, box_with_array)
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = date_range('2001-01-01', '2001-01-31', name='a')
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - pd.DateOffset(years=1)
exp = date_range('1999-01-01', '1999-01-31', name='a')
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
# FIXME: ValueError with tzaware DataFrame transpose
s = tm.box_expected(s, box_with_array, transpose=False)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex([Timestamp('2000-01-16 00:15:00', tz='US/Central'),
Timestamp('2000-02-16', tz='US/Central')],
name='a')
exp = tm.box_expected(exp, box_with_array, transpose=False)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
s = tm.box_expected(s, box_with_array, transpose=False)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')],
name='a')
exp = tm.box_expected(exp, box_with_array, transpose=False)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
# TODO: __sub__, __rsub__
def test_dt64arr_add_mixed_offset_array(self, box_with_array):
# GH#10699
# array of offsets
s = DatetimeIndex([Timestamp('2000-1-1'), Timestamp('2000-2-1')])
s = tm.box_expected(s, box_with_array)
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn,
clear=[pd.core.arrays.datetimelike]):
other = pd.Index([pd.offsets.DateOffset(years=1),
pd.offsets.MonthEnd()])
other = tm.box_expected(other, box_with_array)
result = s + other
exp = DatetimeIndex([Timestamp('2001-1-1'),
Timestamp('2000-2-29')])
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
# same offset
other = pd.Index([pd.offsets.DateOffset(years=1),
pd.offsets.DateOffset(years=1)])
other = tm.box_expected(other, box_with_array)
result = s + other
exp = DatetimeIndex([Timestamp('2001-1-1'),
Timestamp('2001-2-1')])
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
# TODO: overlap with test_dt64arr_add_mixed_offset_array?
def test_dt64arr_add_sub_offset_ndarray(self, tz_naive_fixture,
box_with_array):
# GH#18849
if box_with_array is pd.DataFrame:
pytest.xfail("FIXME: ValueError with transpose; "
"alignment error without")
tz = tz_naive_fixture
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn,
clear=[pd.core.arrays.datetimelike]):
res = dtarr + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn,
clear=[pd.core.arrays.datetimelike]):
res2 = other + dtarr
tm.assert_equal(res2, expected)
with tm.assert_produces_warning(warn,
clear=[pd.core.arrays.datetimelike]):
res = dtarr - other
expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(res, expected)
class TestDatetime64OverflowHandling(object):
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp('1969-12-31')])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = pd.Timestamp('1700-01-31')
td = pd.Timedelta('20000 Days')
dti = pd.date_range('1949-09-30', freq='100Y', periods=4)
ser = pd.Series(dti)
with pytest.raises(OverflowError):
ser - dt
with pytest.raises(OverflowError):
dt - ser
with pytest.raises(OverflowError):
ser + td
with pytest.raises(OverflowError):
td + ser
ser.iloc[-1] = pd.NaT
expected = pd.Series(['2004-10-03', '2104-10-04', '2204-10-04', 'NaT'],
dtype='datetime64[ns]')
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = pd.NaT
expected = pd.Series(['91279 Days', 'NaT', 'NaT', 'NaT'],
dtype='timedelta64[ns]')
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(['now', pd.Timestamp.max])
dtimin = pd.to_datetime(['now', pd.Timestamp.min])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants:
with pytest.raises(OverflowError):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(['now', pd.Timestamp.max])
dtimin = pd.to_datetime(['now', pd.Timestamp.min])
ts_neg = pd.to_datetime(['1950-01-01', '1950-01-01'])
ts_pos = pd.to_datetime(['1980-01-01', '1980-01-01'])
# General tests
expected = pd.Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = pd.Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
with pytest.raises(OverflowError):
dtimax - ts_neg
with pytest.raises(OverflowError):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([pd.Timestamp.min])
t1 = tmin + pd.Timedelta.max + pd.Timedelta('1us')
with pytest.raises(OverflowError):
t1 - tmin
tmax = pd.to_datetime([pd.Timestamp.max])
t2 = tmax + pd.Timedelta.min - pd.Timedelta('1us')
with pytest.raises(OverflowError):
tmax - t2
class TestTimestampSeriesArithmetic(object):
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype='M8[ns]')
b = Series(dtype='m8[ns]')
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
with pytest.raises(TypeError):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series([pd.Timestamp('20111230'), pd.Timestamp('20120101'),
pd.Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([pd.Timestamp('20111231'), pd.Timestamp('20120102'),
pd.Timestamp('20120104')])
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == 'timedelta64[ns]'
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(TypeError,
match='operate|[cC]annot|unsupported operand'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
if op_str not in ['__sub__', '__rsub__']:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ['__add__', '__radd__', '__sub__']:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ['__add__', '__radd__', '__sub__', '__rsub__']:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = pd.date_range('1999-09-30', periods=10, tz='US/Pacific')
ser = pd.Series(dti)
expected = pd.Series(pd.TimedeltaIndex(['0days'] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), pd.NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta('1 days'), pd.NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'),
Timestamp('20130101 9:02:00.005')])
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'),
Timestamp('20130101 9:07:00.005')])
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
# subtraction
tm.assert_series_equal(-NaT + datetime_series,
nat_series_dtype_timestamp)
with pytest.raises(TypeError):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with pytest.raises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
tm.assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
tm.assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
tm.assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize('dt64_series', [
Series([Timestamp('19900315'), Timestamp('19900315')]),
Series([pd.NaT, Timestamp('19900315')]),
Series([pd.NaT, pd.NaT], dtype='datetime64[ns]')])
@pytest.mark.parametrize('one', [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
with pytest.raises(TypeError):
dt64_series * one
with pytest.raises(TypeError):
one * dt64_series
# division
with pytest.raises(TypeError):
dt64_series / one
with pytest.raises(TypeError):
one / dt64_series
@pytest.mark.parametrize('op', ['__add__', '__radd__',
'__sub__', '__rsub__'])
@pytest.mark.parametrize('tz', [None, 'Asia/Tokyo'])
def test_dt64_series_add_intlike(self, tz, op):
# GH#19123
dti = pd.DatetimeIndex(['2016-01-02', '2016-02-03', 'NaT'], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype='uint8')
method = getattr(ser, op)
with pytest.raises(TypeError):
method(1)
with pytest.raises(TypeError):
method(other)
with pytest.raises(TypeError):
method(other.values)
with pytest.raises(TypeError):
method(pd.Index(other))
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError):
td1 - dt1
with pytest.raises(TypeError):
td2 - dt2
class TestDatetimeIndexArithmetic(object):
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz_naive_fixture, one):
tz = tz_naive_fixture
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz_naive_fixture, one):
tz = tz_naive_fixture
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz_naive_fixture, one):
tz = tz_naive_fixture
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize('freq', ['H', 'D'])
@pytest.mark.parametrize('int_holder', [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = pd.date_range('2016-01-01', periods=2, freq=freq)
other = int_holder([4, -1])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
expected = DatetimeIndex([dti[n] + other[n]
for n in range(len(dti))])
result = dti + other
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = other + dti
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('freq', ['W', 'M', 'MS', 'Q'])
@pytest.mark.parametrize('int_holder', [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = pd.date_range('2016-01-01', periods=2, freq=freq)
other = int_holder([4, -1])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
expected = DatetimeIndex([dti[n] + other[n]
for n in range(len(dti))])
# tm.assert_produces_warning does not handle cases where we expect
# two warnings, in this case PerformanceWarning and FutureWarning.
# Until that is fixed, we don't catch either
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = dti + other
tm.assert_index_equal(result, expected)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = other + dti
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('int_holder', [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = pd.DatetimeIndex(['2016-01-01', 'NaT', '2017-04-05 06:07:08'])
other = int_holder([9, 4, -1])
with pytest.raises(NullFrequencyError):
dti + other
with pytest.raises(NullFrequencyError):
other + dti
with pytest.raises(NullFrequencyError):
dti - other
with pytest.raises(TypeError):
other - dti
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract .*TimedeltaArray'
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot subtract DatetimeArray from'
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract .* from a TimedeltaArray'
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types',
'cannot subtract DatetimeArray from'])
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize('addend', [
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')
], ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('tz', [None, 'US/Eastern'])
def test_add_datetimelike_and_dti(self, addend, tz):
# GH#9631
dti = DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize(tz)
msg = ('cannot add DatetimeArray and {0}'
.format(type(addend).__name__)).replace('DatetimeIndex',
'DatetimeArray')
with pytest.raises(TypeError, match=msg):
dti + addend
with pytest.raises(TypeError, match=msg):
addend + dti
# -------------------------------------------------------------
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------------
# TODO: Most of this block is moved from series or frame tests, needs
# cleanup, box-parametrization, and de-duplication
@pytest.mark.parametrize('op', [operator.add, operator.sub])
def test_timedelta64_equal_timedelta_supported_ops(self, op):
ser = Series([Timestamp('20130301'),
Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'),
Timestamp('20130228 21:00:00')])
intervals = ['D', 'h', 'm', 's', 'us']
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for d, h, m, s, us in product(*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
tm.assert_series_equal(lhs, rhs)
def test_ops_nat_mixed_datetime64_timedelta64(self):
# GH#11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
tm.assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
tm.assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
tm.assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
tm.assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with pytest.raises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
tm.assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
tm.assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
tm.assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '3D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
@pytest.mark.parametrize('names', [('foo', None, None),
('baz', 'bar', None),
('bar', 'bar', 'bar')])
@pytest.mark.parametrize('tz', [None, 'America/Chicago'])
def test_dti_add_series(self, tz, names):
# GH#13905
index = DatetimeIndex(['2016-06-28 05:30', '2016-06-28 05:31'],
tz=tz, name=names[0])
ser = Series([Timedelta(seconds=5)] * 2,
index=index, name=names[1])
expected = Series(index + Timedelta(seconds=5),
index=index, name=names[2])
# passing name arg isn't enough when names[2] is None
expected.name = names[2]
assert expected.dtype == index.dtype
result = ser + index
tm.assert_series_equal(result, expected)
result2 = index + ser
tm.assert_series_equal(result2, expected)
expected = index + Timedelta(seconds=5)
result3 = ser.values + index
tm.assert_index_equal(result3, expected)
result4 = index + ser.values
tm.assert_index_equal(result4, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_add_offset_index(self, tz_naive_fixture, names):
# GH#18849, GH#19744
tz = tz_naive_fixture
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = pd.Index([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
with tm.assert_produces_warning(PerformanceWarning,
clear=[pd.core.arrays.datetimelike]):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=names[2], freq='infer')
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning,
clear=[pd.core.arrays.datetimelike]):
res2 = other + dti
tm.assert_index_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_sub_offset_index(self, tz_naive_fixture, names):
# GH#18824, GH#19744
tz = tz_naive_fixture
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = pd.Index([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
with tm.assert_produces_warning(PerformanceWarning,
clear=[pd.core.arrays.datetimelike]):
res = dti - other
expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))],
name=names[2], freq='infer')
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_with_offset_series(self, tz_naive_fixture, names):
# GH#18849
tz = tz_naive_fixture
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
expected_add = Series([dti[n] + other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning,
clear=[pd.core.arrays.datetimelike]):
res = dti + other
tm.assert_series_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning,
clear=[pd.core.arrays.datetimelike]):
res2 = other + dti
tm.assert_series_equal(res2, expected_add)
expected_sub = Series([dti[n] - other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning,
clear=[pd.core.arrays.datetimelike]):
res3 = dti - other
tm.assert_series_equal(res3, expected_sub)
@pytest.mark.parametrize('years', [-1, 0, 1])
@pytest.mark.parametrize('months', [-2, 0, 2])
def test_shift_months(years, months):
dti = DatetimeIndex([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31')])
actual = DatetimeIndex(shift_months(dti.asi8, years * 12 + months))
raw = [x + pd.offsets.DateOffset(years=years, months=months)
for x in dti]
expected = DatetimeIndex(raw)
tm.assert_index_equal(actual, expected)
| 39.698073 | 79 | 0.574346 |
acf40852845a4c3850134b0a742971a021c1cbbe | 513 | py | Python | source/base/templatetags/formatting.py | mverleg/svsite | 5c9dbcacf81020cf0c1960e337bdd33113acd597 | [
"BSD-3-Clause"
] | null | null | null | source/base/templatetags/formatting.py | mverleg/svsite | 5c9dbcacf81020cf0c1960e337bdd33113acd597 | [
"BSD-3-Clause"
] | 142 | 2015-06-05T07:53:09.000Z | 2020-03-31T18:37:07.000Z | source/base/templatetags/formatting.py | mdilli/svsite | 5c9dbcacf81020cf0c1960e337bdd33113acd597 | [
"BSD-3-Clause"
] | null | null | null |
from django import template
from django.utils.timesince import timesince
from datetime import datetime
from django.utils.timezone import is_aware, utc
register = template.Library()
@register.filter()
def timesince_short(date):
nw = datetime.now(utc if is_aware(date) else None)
if date is None:
return ''
if abs(date - nw).seconds < 120:
return 'just now'
if date < nw:
return '{0:s} ago'.format(*timesince(date, nw).split(','))
else:
return 'in {0:s}'.format(*timesince(nw, date).split(','))
| 21.375 | 60 | 0.707602 |
acf4086f93ee224034c9339d589588bc13dae57f | 876 | py | Python | app/core/admin.py | bamboo2panda/recipe-app-api | a46f73f402f511f6d790df5f4a8185fdc08435f7 | [
"MIT"
] | null | null | null | app/core/admin.py | bamboo2panda/recipe-app-api | a46f73f402f511f6d790df5f4a8185fdc08435f7 | [
"MIT"
] | null | null | null | app/core/admin.py | bamboo2panda/recipe-app-api | a46f73f402f511f6d790df5f4a8185fdc08435f7 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import gettext as _
from core import models
class UserAdmin(BaseUserAdmin):
ordering = ['id']
list_display = ['email', 'name']
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal Info'), {'fields': ('name', )}),
(
_('Permissions'),
{'fields': ('is_active', 'is_staff', 'is_superuser')}
),
(_('Important dates'), {'fields': ('last_login',)}),
)
add_fieldsets = (
(None, {
'classes': ('wide', ),
'fields': ('email', 'password1', 'password2',)
}),
)
admin.site.register(models.User, UserAdmin)
admin.site.register(models.Tag)
admin.site.register(models.Ingredient)
admin.site.register(models.Recipe)
| 27.375 | 65 | 0.591324 |
acf4090ef533caf75c9d2cf0e599b068b324e480 | 7,502 | py | Python | main/data_models.py | keith-lewis100/pont-workbench | 010716e115c47ca881645800befffcc97d07f638 | [
"MIT"
] | null | null | null | main/data_models.py | keith-lewis100/pont-workbench | 010716e115c47ca881645800befffcc97d07f638 | [
"MIT"
] | null | null | null | main/data_models.py | keith-lewis100/pont-workbench | 010716e115c47ca881645800befffcc97d07f638 | [
"MIT"
] | null | null | null | #_*_ coding: UTF-8 _*_
import logging
from google.appengine.api import users
from google.appengine.ext import ndb
import db
import mailer
import renderers
import urls
from role_types import RoleType
logger = logging.getLogger('model')
workbench = db.WorkBench.get_or_insert('main')
committee_labels=[
('AMB', 'Ambulance'),
('PHC', 'PrimaryHealth'),
('SEC', 'SecondaryHealth'),
('LIV', 'Livelihoods'),
('ENG', 'Engineering'),
('EDU', 'Education'),
('CHU', 'Churches'),
('WEC', 'Wildlife Centre'),
('GEN', 'General')]
if db.User.query().count() == 0:
user = db.User();
user.name = 'Keith'
user.email = 'keith.lewis@pont-mbale.org.uk'
key = user.put()
role = db.Role(parent=key)
role.type_index = RoleType.USER_ADMIN
role.committee = ''
role.put()
class Committee:
def __init__(self, id, name):
self.id = id
self.name = name
self.key = self
def kind(self):
return 'Committee'
def urlsafe(self):
return self.id
def parent(self):
return None
def get_committee_list():
return [Committee(id, name) for id, name in committee_labels]
def lookup_committee(c_id):
for id, name in committee_labels:
if id == c_id:
return Committee(id, name)
return None
def get_next_ref():
ref = workbench.last_ref_id + 1
workbench.last_ref_id = ref
workbench.put()
return ref
def lookup_entity(db_id):
key = create_key(db_id)
return key.get()
def create_key(db_id):
if db_id is None or db_id == "":
return None
return ndb.Key(urlsafe=db_id)
def get_parent(entity):
parent_key = entity.key.parent()
if parent_key is not None:
return parent_key.get()
if entity.key.kind() == 'Fund':
return lookup_committee(entity.committee)
return None
def lookup_user_by_email(email):
user = db.User.query(db.User.email == email).get()
if user is None:
user = db.User()
user.name = email
return user
def lookup_current_user():
email = users.get_current_user().email()
return lookup_user_by_email(email)
def logout_url():
return users.create_logout_url('/')
def calculate_transferred_amount(payment):
if payment is None or payment.transfer is None:
return ""
transfer = payment.transfer.get()
if transfer.exchange_rate is None:
return ""
requested_amount = payment.amount.value
if payment.amount.currency == 'sterling':
sterling = requested_amount
shillings = int(requested_amount * transfer.exchange_rate)
if payment.amount.currency == 'ugx':
sterling = int(requested_amount / transfer.exchange_rate)
shillings = requested_amount
return u"ยฃ{:,}".format(sterling) + "/" + u"{:,}".format(shillings) + ' Ush'
STATE_CLOSED = 0
def email_entity_creator(entity, user, message):
if not hasattr(entity, 'creator'):
return
if user.key == entity.creator:
logging.info('not sending email same user %s', user.name)
return
creator = entity.creator.get()
entity_type = entity.key.kind()
entity_ref = renderers.render_link(entity.name, urls.url_for_entity(entity, external=True))
content = renderers.render_single_column((entity_type, entity_ref, message, user.name),
('EntityType', 'Entity', 'Message', 'User'))
mailer.send_email('Workbench Entity State Change', content, [creator.email])
class Model(object):
def __init__(self, entity, committee=None, table=None):
self.entity = entity
self.committee = committee
self.table = table
self.user = lookup_current_user()
self.forms = {}
self.errors=[]
self.next_entity = None
self.entity_deleted = False
self.show_closed = False
def get_state(self):
return getattr(self.entity, 'state_index', 0)
def user_has_role(self, role_type):
if self.user.key is None:
return False
query = db.Role.query(ancestor=self.user.key).filter(db.Role.type_index==role_type)
if role_type == RoleType.COMMITTEE_ADMIN:
query = query.filter(db.Role.committee==self.committee)
return query.iter().has_next()
def lookup_users_with_role(self, role_type):
query = db.Role.query(db.Role.type_index==role_type)
if role_type == RoleType.COMMITTEE_ADMIN:
query = query.filter(db.Role.committee==self.committee)
return query.map(lambda r: r.key.parent().get())
def add_form(self, action_name, form):
self.forms[action_name] = form
def get_form(self, action_name):
return self.forms.get(action_name)
def is_stateful(self):
return hasattr(self.table, 'state_index')
def apply_query(self, entity_query):
if not hasattr(self.table, 'state_index'):
return entity_query.fetch()
if self.show_closed:
return entity_query.filter(self.table.state_index == 0).fetch()
else:
return entity_query.filter(self.table.state_index > 0).fetch()
def perform_create(self, action_name):
form = self.get_form(action_name)
if not form.validate():
return False
if not self.check_uniqueness(form):
return False
entity = self.entity
form.populate_obj(entity)
if hasattr(entity, 'creator'):
entity.creator = self.user.key
entity.put()
self.audit(action_name, "Create performed")
return True
def check_uniqueness(self, form):
if not hasattr(form, 'name'):
return True
name = form.name.data
if name == self.entity.name:
return True
parent_key = None
if self.entity.key:
parent_key = self.entity.key.parent()
existing = self.table.query(self.table.name == name, ancestor=parent_key).count(1)
if existing > 0:
form.name.errors = [ 'Entity named %s already exists' % name ]
return False
return True
def perform_update(self, action_name):
form = self.get_form(action_name)
if not form.validate():
return False
if not self.check_uniqueness(form):
return False
form.populate_obj(self.entity)
self.entity.put()
self.audit(action_name, "Update performed")
return True
def perform_close(self, action_name):
self.entity.state_index = STATE_CLOSED
self.entity.put()
return self.email_and_audit(action_name, "%s performed" % action_name.title())
def add_error(self, error_text):
self.errors.append(error_text)
def audit(self, action_name, message, entity=None, parent_key=None):
if not entity:
entity = self.entity
audit = db.AuditRecord()
audit.entity = entity.key
audit.parent = parent_key
audit.user = self.user.key
audit.action = action_name
audit.message = message
audit.put()
return audit
def email_and_audit(self, action_name, message):
audit = self.audit(action_name, message)
email_entity_creator(self.entity, self.user, message)
return audit
def __repr__(self):
return 'Model(%s, %s)' % (repr(self.entity), self.committee)
| 31 | 95 | 0.6253 |
acf40d8a220591ef1bc3c99dae767e044ba62a46 | 792 | py | Python | tests/test_api_v1_status_log_system.py | pincher95/pfsense-api | 001a4b8a1ec39138668d6d92b3c9d0c89a7f1b45 | [
"Apache-2.0"
] | null | null | null | tests/test_api_v1_status_log_system.py | pincher95/pfsense-api | 001a4b8a1ec39138668d6d92b3c9d0c89a7f1b45 | [
"Apache-2.0"
] | null | null | null | tests/test_api_v1_status_log_system.py | pincher95/pfsense-api | 001a4b8a1ec39138668d6d92b3c9d0c89a7f1b45 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Jared Hendrickson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import e2e_test_framework
class APIE2ETestStatusLogSystem(e2e_test_framework.APIE2ETest):
uri = "/api/v1/status/log/system"
get_tests = [{"name": "Read the system log"}]
APIE2ETestStatusLogSystem()
| 33 | 74 | 0.762626 |
acf40da3e2d449f57e501ef86d88a20bb04a982a | 1,540 | py | Python | setup.py | GitBib/deluge-utilities | c4f29e01126925676eef3c7a26389f2638804938 | [
"Apache-2.0"
] | null | null | null | setup.py | GitBib/deluge-utilities | c4f29e01126925676eef3c7a26389f2638804938 | [
"Apache-2.0"
] | null | null | null | setup.py | GitBib/deluge-utilities | c4f29e01126925676eef3c7a26389f2638804938 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import os
from io import open
from setuptools import find_packages, setup
def read(filename):
path = os.path.join(os.path.dirname(__file__), filename)
with open(path, encoding="utf-8") as handle:
return handle.read()
version = __import__("deluge_utilities").__version__
setup(
name="deluge-utilities",
version=version,
description="Is a set of utilities to help you work with Deluge.",
long_description=read("README.md"),
long_description_content_type="text/markdown",
author="Ivan Vyalov",
author_email="job@bnff.website",
url="https://github.com/GitBib/deluge-utilities",
download_url=f"https://github.com/GitBib/deluge-utilities/archive/{version}.zip",
license="Apache License, Version 2.0, see LICENSE file",
packages=find_packages(exclude=["tests", "testapp"]),
install_requires=["setuptools", "deluge-client"],
py_modules=["batch"],
entry_points="""
[console_scripts]
deluge_utilities = batch:master
""",
include_package_data=True,
zip_safe=False,
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python :: Implementation :: CPython",
],
)
| 32.083333 | 85 | 0.67013 |
acf40e4f7346910857edda2c3549f72d24dd5d09 | 701 | py | Python | django_datawatch/management/commands/datawatch_run_checks.py | kseniyapotter/django-datawatch | b42648219940b85ecb5d7fd6f4d84686ecd272e7 | [
"MIT"
] | 10 | 2016-09-04T21:10:57.000Z | 2021-06-19T06:37:28.000Z | django_datawatch/management/commands/datawatch_run_checks.py | kseniyapotter/django-datawatch | b42648219940b85ecb5d7fd6f4d84686ecd272e7 | [
"MIT"
] | 44 | 2016-09-06T13:35:58.000Z | 2022-01-25T10:05:26.000Z | django_datawatch/management/commands/datawatch_run_checks.py | kseniyapotter/django-datawatch | b42648219940b85ecb5d7fd6f4d84686ecd272e7 | [
"MIT"
] | 7 | 2016-09-28T14:23:28.000Z | 2019-06-28T14:39:20.000Z | # -*- coding: UTF-8 -*-
from django.core.management.base import BaseCommand
from django_datawatch.datawatch import Scheduler
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--force',
action='store_true',
dest='force',
default=False,
help='Execute all checks.',
)
parser.add_argument(
'--slug',
dest='slug',
default=None,
help='Slug of check to refresh, all checks will be refreshed if slug is not provided',
)
def handle(self, force, slug, *args, **options):
Scheduler().run_checks(force=force, slug=slug)
| 28.04 | 98 | 0.579173 |
acf40f8f3af58e2ea3fefd2545295421238f9c69 | 51 | py | Python | taxman/person/person.py | robinmitra/taxman | a7afc0b4a1449cd46e90cd3af05f4a5d65a8acbf | [
"MIT"
] | 3 | 2019-01-07T13:08:59.000Z | 2021-01-11T10:34:52.000Z | taxman/person/person.py | robinmitra/taxman | a7afc0b4a1449cd46e90cd3af05f4a5d65a8acbf | [
"MIT"
] | null | null | null | taxman/person/person.py | robinmitra/taxman | a7afc0b4a1449cd46e90cd3af05f4a5d65a8acbf | [
"MIT"
] | null | null | null | class Person:
def __init__(self):
pass
| 12.75 | 23 | 0.588235 |
acf40f9f47f4eeb38ebb72b028c82400a21c6ce0 | 113 | py | Python | direct/__init__.py | cnmy-ro/direct-custom | a354e82d4f4b7598037e7b9dc73456fc361820ac | [
"Apache-2.0"
] | null | null | null | direct/__init__.py | cnmy-ro/direct-custom | a354e82d4f4b7598037e7b9dc73456fc361820ac | [
"Apache-2.0"
] | 7 | 2021-11-30T07:39:48.000Z | 2021-12-28T16:09:27.000Z | direct/__init__.py | cnmy-ro/direct-custom | a354e82d4f4b7598037e7b9dc73456fc361820ac | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright (c) DIRECT Contributors
__author__ = """direct contributors"""
__version__ = "1.0.1"
| 18.833333 | 38 | 0.707965 |
acf40ffbe3cb27221f509fdae7d3b60019802cd1 | 5,186 | py | Python | ltcl/modules/pnl_vae.py | anonymous-authors-iclr2022-481/ltcl | 0d8902228fa6c37f875bb60c4d16988462a9655a | [
"MIT"
] | 8 | 2021-10-16T08:35:37.000Z | 2022-02-10T09:25:50.000Z | leap/modules/pnl_vae.py | weirayao/leap | 8d10b8413d02d3be49d5c02a13a0aa60a741d8da | [
"MIT"
] | null | null | null | leap/modules/pnl_vae.py | weirayao/leap | 8d10b8413d02d3be49d5c02a13a0aa60a741d8da | [
"MIT"
] | 1 | 2021-11-30T04:06:43.000Z | 2021-11-30T04:06:43.000Z | """Post-Nonlinear Causal Model Estimated by Temporal VAE"""
import torch
from torch import nn
class LinearUnit(nn.Module):
def __init__(self, in_features, out_features, batchnorm=False, nonlinearity=nn.LeakyReLU(0.2)):
super(LinearUnit, self).__init__()
if batchnorm is True:
self.model = nn.Sequential(
nn.Linear(in_features, out_features),
nn.BatchNorm1d(out_features), nonlinearity)
else:
self.model = nn.Sequential(
nn.Linear(in_features, out_features), nonlinearity)
def forward(self, x):
return self.model(x)
class TemporalVAESynthetic(nn.Module):
def __init__(
self,
y_dim=2,
input_dim=2,
hidden_dim=128,
negative_slope = 0.2,
factorised=True):
"""Synthetic uses 3-layer MLP+leakly RELU as mixing/unmixing function
Args:
y_dim: Dimensions of latent causal factors.
input_dim: Dimensions of observation data.
hidden_dim: Dimensions of MLP hidden layer.
negative_slope: LeakyRELU slope.
"""
super().__init__()
self.y_dim = y_dim
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.factorised = factorised
self.encoder = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.LeakyReLU(negative_slope),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(negative_slope),
nn.Linear(hidden_dim, hidden_dim)
)
self.decoder = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.LeakyReLU(1/negative_slope),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(1/negative_slope),
nn.Linear(hidden_dim, hidden_dim)
)
# Prior of content is a uniform Gaussian and prior of the dynamics is an LSTM
self.z_prior_lstm = nn.LSTMCell(self.y_dim, self.hidden_dim)
self.z_prior_mean = nn.Linear(self.hidden_dim, self.y_dim)
self.z_prior_logvar = nn.Linear(self.hidden_dim, self.y_dim)
if self.factorised is True:
# Paper says : 1 Hidden Layer MLP. Last layers shouldn't have any nonlinearities
self.z_inter = LinearUnit(self.hidden_dim, self.hidden_dim, batchnorm=False)
self.z_mean = nn.Linear(self.hidden_dim, self.y_dim)
self.z_logvar = nn.Linear(self.hidden_dim, self.y_dim)
else:
# TODO: Check if one affine transform is sufficient. Paper says distribution is parameterised by RNN over LSTM. Last layer shouldn't have any nonlinearities
self.z_lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, 1, bidirectional=True, batch_first=True)
self.z_rnn = nn.RNN(self.hidden_dim * 2, self.hidden_dim, batch_first=True)
# Each timestep is for each z so no reshaping and feature mixing
self.z_mean = nn.Linear(self.hidden_dim, self.y_dim)
self.z_logvar = nn.Linear(self.hidden_dim, self.y_dim)
def sample_y(self, batch_size, random_sampling=True):
y_out = None
y_means = None
y_logvars = None
# All states are initially set to 0, especially z_0 = 0
y_t = torch.zeros(batch_size, self.y_dim, device=device)
y_mean_t = torch.zeros(batch_size, self.y_dim, device=device)
y_logvar_t = torch.zeros(batch_size, self.y_dim, device=device)
h_t = torch.zeros(batch_size, self.hidden_dim, device=device)
c_t = torch.zeros(batch_size, self.hidden_dim, device=device)
h_t, c_t = self.z_prior_lstm(y_t, (h_t, c_t))
y_mean_t = self.z_prior_mean(h_t)
y_logvar_t = self.z_prior_logvar(h_t)
y_t = self.reparameterize(y_mean_t, y_logvar_t, random_sampling)
if y_out is None:
# If z_out is none it means z_t is z_1, hence store it in the format [batch_size, 1, y_dim]
y_out = y_t.unsqueeze(1)
y_means = y_mean_t.unsqueeze(1)
y_logvars = y_logvar_t.unsqueeze(1)
else:
# If z_out is not none, z_t is not the initial z and hence append it to the previous z_ts collected in z_out
y_out = torch.cat((y_out, y_t.unsqueeze(1)), dim=1)
y_means = torch.cat((y_means, y_mean_t.unsqueeze(1)), dim=1)
y_logvars = torch.cat((y_logvars, y_logvar_t.unsqueeze(1)), dim=1)
return y_means, y_logvars, y_out
def encode_frames(self, x):
x = x.view(-1, self.input_dim)
yt = self.encoder(x)
# yt = x.view(-1, self.hidden_dim)
return yt
def decode_frames(self, yt):
x = self.decoder(yt)
x = x.view(-1, self.input_dim)
return x
def reparameterize(self, mean, logvar, random_sampling=True):
if random_sampling is True:
eps = torch.randn_like(logvar)
std = torch.exp(0.5*logvar)
z = mean + eps*std
return z
else:
return mean
def encode_y(self, yt):
mean = self.z_mean(yt)
logvar = self.z_logvar(yt)
yt_ = self.z_inter(yt)
yt_ = self.z_inter(yt_)
return mean, logvar, self.reparameterize(mean, logvar, self.training), yt_
def forward(self, xt, xt_):
# Past frames/snapshots: xt = x_t (batch_size, length, size)
# Current frame/snapshot: xt_ = x_(t+1) (batch_size, length, size)
yt = self.encode_frames(xt)
y_mean_prior, y_logvar_prior, _ = self.sample_y(xt.size(0), random_sampling=self.training)
yt_mean, yt_logvar, yt, yt_ = self.encode_y(yt)
# yt ~ Guassian distribution, while we do not constraint on yt_
recon_xt = self.decode_frames(yt)
recon_xt_ = self.decode_frames(yt_)
return yt_mean, yt_logvar, yt, yt_, y_mean_prior, y_logvar_prior, recon_xt, recon_xt_
| 37.042857 | 159 | 0.716159 |
acf41023cec2c059c435ba8c17111f76e5e3d3fd | 10,828 | py | Python | vermin/config.py | pybpc/vermin | aa66cf0701ae60a9bf41c254e7e7612827d152e3 | [
"MIT"
] | null | null | null | vermin/config.py | pybpc/vermin | aa66cf0701ae60a9bf41c254e7e7612827d152e3 | [
"MIT"
] | null | null | null | vermin/config.py | pybpc/vermin | aa66cf0701ae60a9bf41c254e7e7612827d152e3 | [
"MIT"
] | null | null | null | import io
import sys
import os
# novm
try:
from configparser import ConfigParser, ParsingError
except ImportError:
from ConfigParser import SafeConfigParser as ConfigParser, ParsingError
from .backports import Backports
from .features import Features
from .formats import Format, DefaultFormat
from .constants import DEFAULT_PROCESSES, CONFIG_FILE_NAMES, CONFIG_SECTION, PROJECT_BOUNDARIES
from .utility import parse_target
from . import formats
class Config:
def __init__(self):
self.reset()
def reset(self):
self.__quiet = False
self.__verbose = 0
self.__print_visits = False
self.__processes = DEFAULT_PROCESSES
self.__ignore_incomp = False
self.__lax = False
self.__pessimistic = False
self.__show_tips = True
self.__analyze_hidden = False
self.__exclusions = set()
self.__backports = set()
self.__features = set()
self.__targets = []
self.set_format(DefaultFormat())
def override_from(self, other_config):
self.__quiet = other_config.quiet()
self.__verbose = other_config.verbose()
self.__print_visits = other_config.print_visits()
self.__processes = other_config.processes()
self.__ignore_incomp = other_config.ignore_incomp()
self.__lax = other_config.lax()
self.__pessimistic = other_config.pessimistic()
self.__show_tips = other_config.show_tips()
self.__analyze_hidden = other_config.analyze_hidden()
self.__exclusions = other_config.exclusions()
self.__backports = other_config.backports()
self.__features = other_config.features()
self.__targets = other_config.targets()
self.set_format(other_config.format())
def __repr__(self):
return """{}(
quiet = {}
verbose = {}
print_visits = {}
processes = {}
ignore_incomp = {}
lax = {}
pessimistic = {}
show_tips = {}
analyze_hidden = {}
exclusions = {}
backports = {}
features = {}
targets = {}
format = {}
)""".format(self.__class__.__name__, self.quiet(), self.verbose(), self.print_visits(),
self.processes(), self.ignore_incomp(), self.lax(), self.pessimistic(),
self.show_tips(), self.analyze_hidden(), self.exclusions(), list(self.backports()),
list(self.features()), self.targets(), self.format().name())
@staticmethod
def parse_file(path):
try:
return Config.parse_fp(open(path, mode="r"), filename=path)
except Exception as ex:
print("Could not load config file: {}".format(path))
print(ex)
return None
@staticmethod
def parse_data(data):
try:
return Config.parse_fp(io.StringIO(data))
except Exception as ex:
print("Could not load config data")
print(ex)
return None
@staticmethod
def parse_fp(fp, filename=None):
filename = filename or "<???>"
config = Config()
def encode_list(iterable):
return "\n".join(iterable)
# Parser with default values from initial instance.
parser = ConfigParser({
"quiet": str(config.quiet()),
"verbose": str(config.verbose()),
"print_visits": str(config.print_visits()),
"processes": str(config.processes()),
"ignore_incomp": str(config.ignore_incomp()),
"lax": str(config.lax()),
"pessimistic": str(config.pessimistic()),
"show_tips": str(config.show_tips()),
"analyze_hidden": str(config.analyze_hidden()),
"exclusions": encode_list(config.exclusions()),
"backports": encode_list(config.backports()),
"features": encode_list(config.features()),
"targets": encode_list(config.targets()),
"format": config.format().name(),
}, allow_no_value=True)
try:
if sys.version_info < (3, 2):
parser.readfp(fp, filename=filename) # pylint: disable=deprecated-method
else:
# `read_file` supercedes `readfp` since 3.2.
def readline_generator(fp):
line = fp.readline()
while line:
yield line
line = fp.readline()
parser.read_file(readline_generator(fp), source=filename)
except Exception as ex:
print("Could not load config: {}".format(filename))
print(ex)
return None
if not parser.has_section(CONFIG_SECTION):
print("Missing `[{}]` section in config: {}".format(CONFIG_SECTION, filename))
return None
def getbool(option):
try:
return parser.getboolean(CONFIG_SECTION, option)
except ValueError:
return str(True) == parser.defaults()[option]
def getuint(option):
value = parser.get(CONFIG_SECTION, option)
if len(value) == 0:
return int(parser.defaults()[option])
value = int(value)
if value < 0:
raise ValueError("Not a positive integer (0+): {}".format(option))
return value
def getstringlist(option):
keepends = False
return parser.get(CONFIG_SECTION, option).strip().splitlines(keepends)
config.set_quiet(getbool("quiet"))
config.set_verbose(getuint("verbose"))
config.set_print_visits(getbool("print_visits"))
config.set_processes(getuint("processes"))
config.set_ignore_incomp(getbool("ignore_incomp"))
config.set_lax(getbool("lax"))
config.set_pessimistic(getbool("pessimistic"))
config.set_show_tips(getbool("show_tips"))
config.set_analyze_hidden(getbool("analyze_hidden"))
for exclusion in getstringlist("exclusions"):
config.add_exclusion(exclusion)
for backport in getstringlist("backports"):
if not config.add_backport(backport):
print("Unknown backport: {}".format(backport))
return None
for feature in getstringlist("features"):
if not config.enable_feature(feature):
print("Unknown feature: {}".format(feature))
return None
targets = getstringlist("targets")
for target in targets:
if not config.add_target(target):
print("Invalid target: {}".format(target))
return None
fmt_str = parser.get(CONFIG_SECTION, "format").strip()
fmt = formats.from_name(fmt_str)
if fmt is None:
print("Unknown format: {}".format(fmt_str))
return None
config.set_format(fmt)
return config
@staticmethod
def detect_config_file(init_folder=None):
"""Detects Vermin config file starting from `init_folder` or CWD. It proceeds through parent
folders until root or project boundaries are reached. Each candidate is checked to be an INI with a
`[vermin]` section in it."""
folder = init_folder or os.getcwd()
while True:
for candidate in CONFIG_FILE_NAMES:
look_for = os.path.join(folder, candidate)
if os.path.exists(look_for):
try:
cp = ConfigParser()
if look_for in cp.read(look_for) and cp.has_section(CONFIG_SECTION):
return look_for
except ParsingError:
pass
# Stop if didn't find config and is at project boundary, which means it has ".git/" or
# similar.
stop = False
for boundary in PROJECT_BOUNDARIES:
path = os.path.join(folder, boundary)
if os.path.exists(path):
stop = True
break
if stop:
break
# Go up one level and stop at root.
old_folder = folder
folder = os.path.abspath(os.path.join(folder, ".."))
if folder == old_folder:
break
return None
def quiet(self):
return self.__quiet
def set_quiet(self, quiet):
self.__quiet = quiet
def verbose(self):
return self.__verbose
def set_verbose(self, verbose):
self.__verbose = verbose
def print_visits(self):
return self.__print_visits
def set_print_visits(self, enable):
self.__print_visits = enable
def processes(self):
return self.__processes
def set_processes(self, processes):
self.__processes = processes if processes > 0 else DEFAULT_PROCESSES
def ignore_incomp(self):
return self.__ignore_incomp
def set_ignore_incomp(self, ignore):
self.__ignore_incomp = ignore
def lax(self):
return self.__lax
def set_lax(self, lax):
self.__lax = lax
def add_exclusion(self, name):
self.__exclusions.add(name)
def add_exclusion_file(self, filename):
try:
with open(filename, mode="r") as f:
for line in f.readlines():
self.add_exclusion(line.strip())
except Exception as ex:
print(ex)
def clear_exclusions(self):
self.__exclusions.clear()
def exclusions(self):
res = list(self.__exclusions)
res.sort()
return res
def is_excluded(self, name):
return name in self.__exclusions
def is_excluded_kwarg(self, function, keyword):
return "{}({})".format(function, keyword) in self.__exclusions
def is_excluded_codecs_error_handler(self, name):
return "ceh={}".format(name) in self.__exclusions
def is_excluded_codecs_encoding(self, name):
return "ce={}".format(name) in self.__exclusions
def add_backport(self, name):
if not Backports.is_backport(name):
return False
self.__backports.add(name)
return True
def clear_backports(self):
self.__backports.clear()
def backports(self):
return self.__backports
def enable_feature(self, name):
if not Features.is_feature(name):
return False
self.__features.add(name)
return True
def has_feature(self, name):
return name in self.__features
def clear_features(self):
self.__features.clear()
def features(self):
return self.__features
def set_format(self, fmt):
assert(isinstance(fmt, Format))
fmt.set_config(self)
self.__format = fmt
def format(self):
return self.__format
def set_pessimistic(self, pessimistic):
self.__pessimistic = pessimistic
def pessimistic(self):
return self.__pessimistic
def set_show_tips(self, show_tips):
self.__show_tips = show_tips
def show_tips(self):
return self.__show_tips
def set_analyze_hidden(self, analyze_hidden):
self.__analyze_hidden = analyze_hidden
def analyze_hidden(self):
return self.__analyze_hidden
def add_target(self, target):
if len(self.targets()) == 2:
print("A maximum of two targets can be specified!")
return False
# pylint: disable=undefined-variable
if isinstance(target, str) or\
(sys.version_info.major == 2 and isinstance(target, unicode)): # novm
target = parse_target(target)
if target is None:
return None
if len(target) != 2 or not isinstance(target[0], bool) or not isinstance(target[1], tuple) or\
len(target[1]) != 2:
return False
# Add target and sort for target versions, not boolean values.
self.__targets.append(target)
self.__targets.sort(key=lambda t: t[1])
return True
def clear_targets(self):
self.__targets = []
def targets(self):
return self.__targets
| 28.645503 | 99 | 0.670669 |
acf410b036845ac994a8bd7afee142e015afbd19 | 5,076 | py | Python | script/ff.py | difosschan/easy-sh-alias | 283e3e7a3966b2c3ebd98ca97e516ef73de04534 | [
"MIT"
] | 8 | 2018-09-25T07:51:12.000Z | 2021-06-09T04:51:18.000Z | script/ff.py | difosschan/easy-sh-alias | 283e3e7a3966b2c3ebd98ca97e516ef73de04534 | [
"MIT"
] | null | null | null | script/ff.py | difosschan/easy-sh-alias | 283e3e7a3966b2c3ebd98ca97e516ef73de04534 | [
"MIT"
] | 6 | 2018-08-03T11:48:00.000Z | 2021-06-09T04:51:13.000Z | #!/usr/bin/python3
#coding:utf-8
import sys, os, subprocess
from typing import Tuple
def Usage():
msg = '''Usage: find -name $2 | grep $1
example :
ff fileName: Find files
find . -name "Makefile"
ff word .py : Find content in files
find . -type f -name "*.[py]" | xargs -I{} sh -c "grep --color=yes -anH 'word'"
ff home -d : Find Directory
find . -type d | grep --color=yes -anH home '''
if os.getenv('LANG') == 'C.GBK':
print(msg.decode('utf8'))
else:
print(msg)
def CheckContent():
global content
if content == '':
sys.stderr.write( '[ERROR] miss content:\n\t%s\n' % ' '.join( sys.argv ) )
sys.exit()
def db(*args):
global g_debug
if g_debug: print(args)
fileset = {
'just find':[],
'grep all object':[],
'grep all file':[],
'find dir':[],
'grep some files':[],
'grep some postfix':[],
}
content = ''
def ParseArg():
global fileset, content, g_debug
if '-d' in sys.argv:
g_debug = 1
db( sys.argv )
if len( sys.argv ) == 1:
Usage()
sys.exit()
for arg in sys.argv[1:]:
if len( sys.argv ) == 2:
key = 'just find'
fileset[key].append(arg) if fileset[key] == [] else fileset[key].append(arg)
db( key ,fileset[key] )
elif arg == '.o':
key = 'grep all object'
fileset[key].append(arg) if fileset[key] == [] else fileset[key].append(arg)
db( key ,fileset[key] )
elif arg == '-d':
key = 'find dir'
fileset[key].append(arg) if fileset[key] == [] else fileset[key].append(arg)
db( key ,fileset[key] )
elif arg.startswith('.'):
key = 'grep some postfix'
fileset[key].append(arg[1:]) if fileset[key] == [] else fileset[key].append(arg[1:])
db( key ,fileset[key] )
elif content != '':
key = 'grep some files'
fileset[key].append(arg) if fileset[key] == [] else fileset[key].append(arg)
db( key ,fileset[key] )
elif content == '':
content = arg
else:
key = 'grep all file'
fileset[key].append(arg) if fileset[key] == [] else fileset[key].append(arg)
db( key ,fileset[key] )
#raise Exception('too much content:\t%s' % ' '.join(sys.argv) )
def CombiseCommand():
global fileset, content
if fileset['just find'] != []:
key = 'just find'
filename = ''.join( fileset[key] )
if filename.startswith('..'):
command = ''' find . -name "%s" ''' % filename[1:]
elif filename.startswith('.'):
command = ''' find . -name "*%s" ''' % filename
else:
command = ''' find . -name "%s" ''' % filename
elif fileset['grep all object'] != []:
key = 'grep all object'
CheckContent()
#alias fs="find . -type f -name '*.*o'|awk '{printf (\"nm -A %s|cat -n\n\", \$0) }'|sh|grep -a --color=yes"
command = ''' find . -type f -name "*.o" | awk '{printf ("nm -A %%s|cat -n\\n", $0) }' | sh | grep --color=yes -anH '%s' ''' % content
elif fileset['grep all file'] != []:
key = 'grep all file'
CheckContent()
command = ''' find . -type f | xargs -I{} sh -c "grep --color=yes -anH '%s' {}" ''' % content
elif fileset['find dir'] != []:
key = 'find dir'
CheckContent()
command = ''' find . -type d | grep --color=yes -anH '%s' ''' % content
elif len( fileset['grep some postfix'] ) != 0 or len( fileset['grep some files'] ) != 0:
CheckContent()
i = 0
postfix=''
files=''
key = 'grep some postfix'
if len( fileset[key] ) != 0:
if len( fileset[key] ) == 1:
postfix = '''-name "*.%s"''' % ''.join( fileset[key] )
i+=1
else:
postfix = '''-name "*.[%s]"''' % '|'.join( fileset[key] )
i+=1
key = 'grep some files'
if len( fileset[key] ) != 0:
for f in fileset[key]:
if i == 0:
files = '''-name "%s"''' % f
i+=1
else:
files += ''' -o -name "%s"''' % f
i+=1
command = ''' find . %s%s | xargs -I{} sh -c "grep --color=yes -anH '%s' {}" ''' % ( postfix, files, content )
return command
def run_command(args) -> Tuple[str, str]:
out = err = ""
try:
with subprocess.Popen(args, shell=True,
stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) as p:
out = p.stdout.read().decode()[:-1]
err = p.stderr.read().decode()[:-1]
except Exception as e:
err = traceback.format_exc()
return out, err
def main():
ParseArg()
command = CombiseCommand( )
sys.stderr.write('%s\n' % command)
s, _ = run_command(command)
print(s)
g_debug = 0
if __name__ == "__main__":
main()
| 31.725 | 142 | 0.489165 |
acf4114c7d7f8beb578f18a4a269b9fcad59ca86 | 3,795 | py | Python | .github/contrib/macdeploy/custom_dsstore.py | minblock/telnetcoin | 9ff7f7561e9246290321777b7da8f716ce6cd98c | [
"MIT"
] | null | null | null | .github/contrib/macdeploy/custom_dsstore.py | minblock/telnetcoin | 9ff7f7561e9246290321777b7da8f716ce6cd98c | [
"MIT"
] | null | null | null | .github/contrib/macdeploy/custom_dsstore.py | minblock/telnetcoin | 9ff7f7561e9246290321777b7da8f716ce6cd98c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00telnetcoinuser:\x00Documents:\x00telnetcoin:\x00telnetcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/telnetcoinuser/Documents/telnetcoin/telnetcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Telnetcoin-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 62.213115 | 1,817 | 0.728854 |
acf411fbe909ac9caca578cdd280baa05e8431ab | 3,652 | py | Python | arcade/examples/sprite_move_keyboard.py | Jayman2000/arcade-pull | 82d8085bd446ac42c634f56a4d9b8d97ac01b417 | [
"MIT"
] | null | null | null | arcade/examples/sprite_move_keyboard.py | Jayman2000/arcade-pull | 82d8085bd446ac42c634f56a4d9b8d97ac01b417 | [
"MIT"
] | null | null | null | arcade/examples/sprite_move_keyboard.py | Jayman2000/arcade-pull | 82d8085bd446ac42c634f56a4d9b8d97ac01b417 | [
"MIT"
] | null | null | null | """
Move Sprite With Keyboard
Simple program to show moving a sprite with the keyboard.
The sprite_move_keyboard_better.py example is slightly better
in how it works, but also slightly more complex.
Artwork from https://kenney.nl
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.sprite_move_keyboard
"""
import arcade
SPRITE_SCALING = 0.5
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Move Sprite with Keyboard Example"
MOVEMENT_SPEED = 5
class Player(arcade.Sprite):
""" Player Class """
def update(self):
""" Move the player """
# Move player.
# Remove these lines if physics engine is moving player.
self.center_x += self.change_x
self.center_y += self.change_y
# Check for out-of-bounds
if self.left < 0:
self.left = 0
elif self.right > SCREEN_WIDTH - 1:
self.right = SCREEN_WIDTH - 1
if self.bottom < 0:
self.bottom = 0
elif self.top > SCREEN_HEIGHT - 1:
self.top = SCREEN_HEIGHT - 1
class MyGame(arcade.Window):
"""
Main application class.
"""
def __init__(self, width, height, title):
"""
Initializer
"""
# Call the parent class initializer
super().__init__(width, height, title)
# Variables that will hold sprite lists
self.player_list = None
# Set up the player info
self.player_sprite = None
# Set the background color
arcade.set_background_color(arcade.color.AMAZON)
def setup(self):
""" Set up the game and initialize the variables. """
# Sprite lists
self.player_list = arcade.SpriteList()
# Set up the player
self.player_sprite = Player(":resources:images/animated_characters/female_person/femalePerson_idle.png", SPRITE_SCALING)
self.player_sprite.center_x = 50
self.player_sprite.center_y = 50
self.player_list.append(self.player_sprite)
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
# Draw all the sprites.
self.player_list.draw()
def on_update(self, delta_time):
""" Movement and game logic """
# Move the player
self.player_list.update()
def on_key_press(self, key, modifiers):
"""Called whenever a key is pressed. """
# If the player presses a key, update the speed
if key == arcade.key.UP:
self.player_sprite.change_y = MOVEMENT_SPEED
elif key == arcade.key.DOWN:
self.player_sprite.change_y = -MOVEMENT_SPEED
elif key == arcade.key.LEFT:
self.player_sprite.change_x = -MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player_sprite.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""Called when the user releases a key. """
# If a player releases a key, zero out the speed.
# This doesn't work well if multiple keys are pressed.
# Use 'better move by keyboard' example if you need to
# handle this.
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player_sprite.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player_sprite.change_x = 0
def main():
""" Main function """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main()
| 27.458647 | 128 | 0.626506 |
acf41446cb9009d25b340f3175f413b91264a56a | 2,928 | py | Python | synapse/rest/notify/__init__.py | eachchat/synapse | 889a5f3e011a859a0b2fe432429933f3aba1a8ce | [
"Apache-2.0"
] | null | null | null | synapse/rest/notify/__init__.py | eachchat/synapse | 889a5f3e011a859a0b2fe432429933f3aba1a8ce | [
"Apache-2.0"
] | null | null | null | synapse/rest/notify/__init__.py | eachchat/synapse | 889a5f3e011a859a0b2fe432429933f3aba1a8ce | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018-2019 New Vector Ltd
# Copyright 2020, 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import platform
import synapse
from synapse.api.errors import Codes, NotFoundError, SynapseError
from synapse.http.server import JsonResource
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin
from synapse.rest.admin.devices import (
DeleteDevicesRestServlet,
DeviceRestServlet,
DevicesRestServlet,
)
from synapse.rest.admin.event_reports import (
EventReportDetailRestServlet,
EventReportsRestServlet,
)
from synapse.rest.admin.groups import DeleteGroupAdminRestServlet
from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo
from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet
from synapse.rest.admin.rooms import (
DeleteRoomRestServlet,
ForwardExtremitiesRestServlet,
JoinRoomAliasServlet,
ListRoomRestServlet,
MakeRoomAdminRestServlet,
RoomEventContextServlet,
RoomMembersRestServlet,
RoomRestServlet,
RoomStateRestServlet,
ShutdownRoomRestServlet,
)
from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
from synapse.rest.admin.statistics import UserMediaStatisticsRestServlet
from synapse.rest.admin.users import (
AccountValidityRenewServlet,
DeactivateAccountRestServlet,
PushersRestServlet,
ResetPasswordRestServlet,
SearchUsersRestServlet,
ShadowBanRestServlet,
UserAdminServlet,
UserMediaRestServlet,
UserMembershipRestServlet,
UserRegisterServlet,
UserRestServletV2,
UsersRestServletV2,
UserTokenRestServlet,
WhoisRestServlet,
)
from synapse.rest.notify.pushgateway import PushGateway
from synapse.types import RoomStreamToken
from synapse.util.versionstring import get_version_string
logger = logging.getLogger(__name__)
class PushNotifyRestResource(JsonResource):
"""The REST resource which gets mounted at /_matrix/push"""
def __init__(self, hs):
JsonResource.__init__(self, hs, canonical_json=False)
register_servlets(hs, self)
def register_servlets(hs, http_server):
"""
Register all the push notify servlets.
"""
PushGateway(hs).register(http_server)
| 31.483871 | 86 | 0.791325 |
acf415b0fb6e041f9c7d48d3b98e20ba365f3acf | 1,513 | py | Python | tests/unit/test_word_tokenize.py | dhhse/prereform2modern | b35f7d99f5a38fadf63e7d11ce0a59fed8ef80a1 | [
"MIT"
] | 1 | 2020-09-09T09:51:30.000Z | 2020-09-09T09:51:30.000Z | tests/unit/test_word_tokenize.py | dhhse/prereform2modern | b35f7d99f5a38fadf63e7d11ce0a59fed8ef80a1 | [
"MIT"
] | null | null | null | tests/unit/test_word_tokenize.py | dhhse/prereform2modern | b35f7d99f5a38fadf63e7d11ce0a59fed8ef80a1 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This module contains tests for word_tokenize.
You can run this with:
python2.7 -m unittest -v tests.unit.test_word_tokenize
or
python3 -m unittest -v tests.unit.test_word_tokenize
"""
from unittest import TestCase
from prereform2modern.word_tokenize import WordTokenizer
class TestWordTokenizer(TestCase):
def test_tokenize_alphabetical_text(self):
# ะญัะพั ัะตัั ัะฐะฑะพัะฐะตั ะธ ะฒ Py2, ะธ ะฒ Py3
text = u"ะะฝั ััะพัะปั ะฟะพะดะปัฃ ะฟะธััะผะตะฝะฝะฐะณะพ ััะพะปะฐ."
result = WordTokenizer.tokenize(text)
expected = [u'ะะฝั',
u' ',
u'ััะพัะปั',
u' ',
u'ะฟะพะดะปัฃ',
u' ',
u'ะฟะธััะผะตะฝะฝะฐะณะพ',
u' ',
u'ััะพะปะฐ',
u'.'
]
self.assertListEqual(result, expected)
# TODO: ะัะปะธ ะฒ ะผะตัะพะด tokenize() ะฟะพะดะฐัั u'[', ัะพ ะพะฝ ัะฐะฑะพัะฐะตั
# ะฒ Py2, ะฝะพ ะฝะต ัะฐะฑะพัะฐะตั ะฒ Py3 ะธะท-ะทะฐ ะพัะธะฑะบะธ "NameError: name 'unicode'
# is not defined", ะฟะพััะพะผั ั
ะพัะพัะพ ะฑั ะดะปั ััะพะณะพ ัะปััะฐั ะฝะฐะฟะธัะฐัั ัะตัั
#
# ะฅะพัะพัะพ ะฑั ัะฐะบะถะต ะฟะพัะผะพััะตัั, ะฟัะตะดะฟะพะปะฐะณะฐะตััั ะปะธ ะฒัะทัะฒะฐัั ััะพั ะผะตัะพะด
# ั ัะตะผ-ะปะธะฑะพ, ะบัะพะผะต ัะธััะพะณะพ ัะตะบััะฐ, ะผะพะถะตั ะบะฐะบะธะต-ัะพ ัะตะณะธ ะธ ั.ะฟ.
#
# ะ ะพะฑัะตะผ, ะฟะพะฟััะฐัััั ะฝะฐะนัะธ ะบะฐะบะธะต-ัะพ ัะปััะฐะธ, ะฒ ะบะพัะพััั
ะบะพะด ัะฐะฑะพัะฐะตั ะฒ
# Py2, ะฝะพ ะฒะพะทะผะพะถะฝะพ ะฝะต ััะฐะป ะฑั ัะฐะฑะพัะฐัั ัะฐะบะถะต ะฒ Py3
# def test_tokenize_opening_bracket(self):
# # TODO:
# pass
| 30.26 | 77 | 0.574356 |
acf415d0fca03b648e1c201bf50d287ec79b9d30 | 913 | py | Python | python/constant.py | JasonFriedman/RepeatedMeasuresServers | 6c0b7f0207f5e25e75a190db83d51580708cb05c | [
"MIT"
] | null | null | null | python/constant.py | JasonFriedman/RepeatedMeasuresServers | 6c0b7f0207f5e25e75a190db83d51580708cb05c | [
"MIT"
] | null | null | null | python/constant.py | JasonFriedman/RepeatedMeasuresServers | 6c0b7f0207f5e25e75a190db83d51580708cb05c | [
"MIT"
] | null | null | null | # Message codes for communicating between the client and server
DUMMY = 'D'
GETSAMPLE ='G'
SETUPRECORDING = 'S' # 3 parameters: filename, num markers, maximum time (seconds)
STARTRECORDING = 'T' # no parameters
STOPRECORDING = 'Z' # no parameters
SAVEFILE = 'F' # no parameters
CLOSEDEVICE = 'C' # no parameters
MARKEVENT = 'M' # 1 parameter: number to mark
# Liberty specific
# Set mode to ASCII (0) or binary (1)
LIBERTY_SetMode = '1'
# Set units to inches (0) or cm (1)
LIBERTY_SetUnits = '2'
# Set active hemisphere
LIBERTY_SetHemisphere = '3'
# Set the sample rate (3 = 120Hz, 4 = 240Hz)
LIBERTY_SetSampleRate = '4'
# Reset the frame count
LIBERTY_ResetFrameCount = '5'
#Set output format
LIBERTY_SetOutputFormat = '6'
# Get a single sample
LIBERTY_GetSingleSample = '7'
# Get the update rate
LIBERTY_GetUpdateRate = '8'
# Set the alignment frame (which way is x,y,z)
LIBERTY_AlignmentReferenceFrame = '9'
| 29.451613 | 82 | 0.733844 |
acf416eb4c546ea60a305523c703ff4f03eab771 | 222 | py | Python | selco/selco/doctype/financing_institution_branch/test_financing_institution_branch.py | codingCoffee/selco_v2 | 276b62ac3f904bdc3b4ddf92882fd0bb318c5a35 | [
"MIT"
] | null | null | null | selco/selco/doctype/financing_institution_branch/test_financing_institution_branch.py | codingCoffee/selco_v2 | 276b62ac3f904bdc3b4ddf92882fd0bb318c5a35 | [
"MIT"
] | 111 | 2018-04-26T13:14:09.000Z | 2018-08-04T05:54:48.000Z | selco/selco/doctype/financing_institution_branch/test_financing_institution_branch.py | codingCoffee/selco_v2 | 276b62ac3f904bdc3b4ddf92882fd0bb318c5a35 | [
"MIT"
] | 5 | 2018-02-08T13:34:03.000Z | 2021-07-20T10:03:06.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2017, SELCO and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestFinancingInstitutionBranch(unittest.TestCase):
pass
| 20.181818 | 56 | 0.783784 |
acf41789ec2c0d018304a7a84e58981fff3564b9 | 23,361 | py | Python | iron/pythonlib/MultiplePSLBasics.py | jason-weirather/Au-public | c4e37770229f64a24bc79cd380e393f6de1fd7cb | [
"Apache-2.0"
] | 4 | 2016-03-11T07:48:18.000Z | 2017-01-01T20:49:10.000Z | iron/pythonlib/MultiplePSLBasics.py | jason-weirather/Au-public | c4e37770229f64a24bc79cd380e393f6de1fd7cb | [
"Apache-2.0"
] | null | null | null | iron/pythonlib/MultiplePSLBasics.py | jason-weirather/Au-public | c4e37770229f64a24bc79cd380e393f6de1fd7cb | [
"Apache-2.0"
] | 5 | 2015-03-09T18:13:06.000Z | 2019-01-15T10:19:07.000Z | import re, sys
from RangeBasics import GenomicRange, Bed
from SequenceBasics import rc as rc_seq
from PSLBasics import is_valid, PSL
import GraphBasics
class MultiplePSLAlignments:
def __init__(self):
self.entries = [] # a list of PSL entries
self.qName = None
self.g = None
def entry_count(self):
return len(self.entries)
def add_entry(self,entry):
if not self.qName:
self.qName = entry.value('qName')
else:
if entry.value('qName') != self.qName:
sys.stderr.write("WARNING multiple alignments must have the same query name. This entry will not be added\n")
return False
self.entries.append(entry)
return True
def get_root_paths(self):
if not self.g:
sys.stderr.write("ERROR must run compatible_graph() before get_root_paths()\n")
sys.exit()
rs = self.g.get_roots()
paths = []
for r in rs:
ps = self.g.get_directed_paths_from_node(r)
for p in ps:
paths.append(p)
outputs = []
for path in paths:
simples = _traverse([self.g.get_nodes()[x].get_payload() for x in path])
for simple in simples:
#print str(simple)+" "+str([self.entries[i].get_target_bed().get_range_string() for i in simple])+" "+str([self.entries[i].get_query_bed().get_range_string() for i in simple])
outputs.append(simple)
return outputs
# Pre: max_intron - set this to -1 if you don't care about target placement (use for fusion)
# max_gap - set to -1 to allow unaligned sequences of any size on the query
# max_query_overlap - The largest number of bases you permit two query sequences to overlap
# max_target_overlap - The largest number of bases you permit two target sequences to overlap
def compatible_graph(self,max_intron=400000,max_gap=-1,max_query_overlap=0,max_target_overlap=0,max_query_fraction_overlap=-1):
# Create a flow graph of how multiple PSL files can be connected
g = GraphBasics.Graph() #directed graph
node_dict = {}
for i in range(0,len(self.entries)):
n1 = GraphBasics.Node([i])
g.add_node(n1)
node_dict[i] = n1
for i in range(0,len(self.entries)):
for j in range(0,len(self.entries)):
if i == j: continue # not looking when its itself
#See if we can go from i to j
qd = self.entries[i].query_distance(self.entries[j])
td = self.entries[i].target_distance(self.entries[j],use_direction=True)
if max_intron >= 0 and td == -1: continue
if max_intron > 0:
if td > max_intron: continue
if max_gap > 0:
if qd > max_gap: continue
target_overlap = self.entries[i].target_overlap_size(self.entries[j])
query_overlap = self.entries[i].query_overlap_size(self.entries[j])
if max_query_fraction_overlap > 0:
frac = max(float(query_overlap)/float(self.entries[i].get_coverage()),float(query_overlap)/float(self.entries[j].get_coverage()))
if frac > max_query_fraction_overlap: continue
if query_overlap > max_query_overlap and max_query_overlap >= 0: continue
if target_overlap > max_target_overlap and max_target_overlap >= 0: continue
# make sure j target is greater than i target
# check for plus stand
if self.entries[j].value('tStarts')[0] < self.entries[i].value('tStarts')[0] and max_intron >= 0 and self.entries[i].value('strand')=='+':
continue
# check for minus strand
if self.entries[j].value('tStarts')[0] > self.entries[i].value('tStarts')[0] and max_intron >= 0 and self.entries[i].value('strand')=='-':
continue
# make sure j query is greater than i query
if self.entries[j].value('qStarts_actual')[0] < self.entries[i].value('qStarts_actual')[0]:
continue
#print self.entries[i].value('tName') +"\t"+self.entries[j].value('tName')+"\t"+str(qd)+"\t"+str(td)
# add to graph
#n1 = GraphBasics.Node([i])
#n2 = GraphBasics.Node([j])
g.add_edge(GraphBasics.Edge(node_dict[i],node_dict[j]))
g.merge_cycles()
self.g = g
return g
# values is an array of possible values at each positoin
# return possible arrays
def _traverse(values,start_index=0,prev=[]):
if start_index >= len(values):
return [prev]
outputs = []
for v in values[start_index]:
newprev = prev[:]
newprev.append(v)
output = _traverse(values,start_index=start_index+1,prev=newprev)
if output:
for o in output:
outputs.append(o)
return outputs
class MultiplePSLAlignmentsOld:
def __init__(self):
self.entries = [] # a list of PSL entries
self.minimum_coverage = 1 #how many base pairs an alignment must cover to be part of a multiple alignment
self.qName = None
self.best_coverage_fraction = 0.9 #how much of an alignment be the best alignment
#where it aligns to be kept later on
self.multiply_mapped_minimum_quality_difference = 0.05 # if the qualities are similar we can
# treat it as an ambiguous alignment
self.multiply_mapped_minimum_overlap = 50 # The minimum number of bases that must be shared between two alignments to consider them multiply mapped
self.best_alignment = None # This will hold a BestAlignmentCollection class and is set by 'best_query'
self.verbose = False
def entry_count(self):
return len(self.entries)
def set_minimum_coverage(self,mincov):
self.minimum_coverage = mincov
def add_line(self,line):
self.add_entry(line_to_entry(line))
def add_entry(self,entry):
if not self.qName:
self.qName = entry.value('qName')
else:
if entry.value('qName') != self.qName:
sys.stderr.write("WARNING multiple alignments must have the same query name. This entry will not be added\n")
return False
if self.minimum_coverage > 1:
cov = entry.get_coverage()
if cov < self.minimum_coverage:
if self.verbose: sys.stderr.write("WARNING alignment less than minimum coverage.\n")
return False
self.entries.append(entry)
return True
def get_alignment_count(self):
return len(self.entries)
def get_tNames(self):
names = set()
for name in [x.value('tName') for x in self.entries]:
names.add(name)
return sorted(list(names))
# Use the multiple alignments to set information about the query
# Pre: alignment(s) to have been loaded alread
# Post: A hash by position of query that contains all
# alignments and information on the quality of the alignment
# self.query[query_base index-0][entry index]
# then contains tName, coverage, and quality
def populate_query(self):
query = {}
# Go through each alignment
for eindex in range(0,len(self.entries)):
e = self.entries[eindex].entry
cov = self.entries[eindex].get_coverage()
qual = self.entries[eindex].get_quality()
# Go through each block of the alignment
for i in range(0,e['blockCount']):
# Set relevant mapped alignments for each base of the query
for z in range(e['qStarts_actual'][i],e['qStarts_actual'][i]+e['blockSizes'][i]):
if z not in query: query[z] = {}
query[z][eindex] = {}
query[z][eindex]['tName'] = e['tName']
query[z][eindex]['coverage'] = cov
query[z][eindex]['quality'] = qual
self.query = query
return
# Try to determine the fraction of each alignment that is 'best' for
# Wherever it is aligning to.
# Pre: 1. A list of indecies of valid alignment entries in self.entries
# 2. A list of indecies valid bases in self.quality
# Post: A hash keyed by valid entry indecies that contains
# 'coverage', 'quality' and 'best_coverage'
def evaluate_entry_coverage(self,valid_entries,valid_bases):
qbases = sorted(valid_bases)
es = {}
for i in valid_entries:
es[i]={}
es[i]['coverage'] = self.entries[i].get_coverage()
es[i]['quality'] = self.entries[i].get_quality()
es[i]['best_coverage'] = 0 # need to calculate this
bestbases = {}
# Step 1: Calculate coverage fraction for all alignments
for z in qbases:
bestindex = -1
bestquality = 0
for eindex in self.query[z]:
if eindex not in valid_entries: continue # only consider the candidates
if es[eindex]['quality'] > bestquality:
bestindex = eindex
bestquality = self.query[z][eindex]['quality']
if bestindex > -1:
bestbases[z] = bestindex
# For each alignment set the amount that alignment constitutes the best
# alignment
for z in sorted(bestbases.keys()):
ebest = bestbases[z]
es[ebest]['best_coverage'] += 1
return [es,bestbases]
# Filter our best based on a requirement for being the 'best' for some large
# fraction of an aligned region. Reevaluates best coverage for remaining
# Pre: 1. list of valid entries
# 2. list of valid bases
# Post: entry evaluation for entries after removing filtered entries
# bases post filtering
def filter_by_coverage_fraction(self,valid_entries,qbases):
filteredbases = {}
contributing_indecies = set()
[entry_evaluation,temp] = self.evaluate_entry_coverage(valid_entries,qbases)
# Step 2: Filter out alignments not satisfying the coverage fraction
for z in sorted(qbases):
bestindex = -1
bestquality = 0
for eindex in entry_evaluation.keys():
if eindex not in self.query[z]: continue
if float(entry_evaluation[eindex]['best_coverage'])/float(entry_evaluation[eindex]['coverage']) < self.best_coverage_fraction: continue
if entry_evaluation[eindex]['quality'] > bestquality:
bestindex = eindex
bestquality = self.query[z][eindex]['quality']
if bestindex > -1:
filteredbases[z] = bestindex
contributing_indecies.add(bestindex)
nentries = list(contributing_indecies)
[new_eval,new_bases] = self.evaluate_entry_coverage(nentries,filteredbases.keys())
return [new_eval,new_bases]
# Very simply return one psl entry with the best coverage
# Pre: entry has been added
# Post: a PSL type
def best_psl(self):
best = 0
for e in self.entries:
if e.value('matches') > best: best = e.value('matches')
for e in self.entries:
if e.value('matches') == best:
return e.copy()
# Read throught he query data and find the best explainations
# Pre: 1. Have loaded in alignments
# 2. Have ran populate_query()
# Now populate query contains a hash of query indecies
# that have the hashes of matching alignments
# and information regarding the quality of each of those alignments
# Post: Sets self.best_contributing_entries
# and self.best_alignment_segments = []
def best_query(self):
qbases = sorted(self.query.keys())
all_entries = range(0,len(self.entries))
# Step 1: Calculate coverage fraction for all alignments
[entry_evaluation,bases] = self.evaluate_entry_coverage(all_entries,qbases)
# Step 2: Filter out alignments not satisfying the coverage fraction
[filtered_entry_evaluation,filtered_bases] = self.filter_by_coverage_fraction(entry_evaluation,bases.keys())
# Get bed information for the alignment
qbases = sorted(filtered_bases.keys())
if len(qbases) == 0: return False
qstart = qbases[0]
current = filtered_bases[qstart]
last = qstart
beds = []
eindex = filtered_bases[qstart]
for i in range(1,len(qbases)):
e = self.entries[eindex]
current = filtered_bases[qbases[i]]
if current not in filtered_entry_evaluation: continue
if eindex != current:
beds.append([qstart,last+1,eindex])
qstart = qbases[i]
eindex = filtered_bases[qbases[i]]
last = qbases[i]
beds.append([qstart,last+1,eindex])
contributing_indecies = set()
filtered_beds = []
for bed in beds:
seglen = bed[1]-bed[0]
if seglen < self.minimum_coverage: # remove a fragment too short to call
for z in range(bed[0],bed[1]):
if z in filtered_bases:
del filtered_bases[z]
else:
filtered_beds.append(bed)
contributing_indecies.add(bed[2])
#if len(contributing_indecies) < 2: return
#print '---'
#for i in sorted(list(contributing_indecies)):
# print str(i)+"\t"+self.entries[i]['tName']+"\t"+self.entries[i]['strand']+"\t"+str(get_coverage(self.entries[i]))+"\t"+str(get_quality(self.entries[i]))
self.best_alignment = BestAlignmentCollection()
entries_present = set()
for bed in filtered_beds:
temp = {}
temp['query_bed'] = [bed[0],bed[1]]
temp['psl_entry_index'] = bed[2]
temp['multiply_mapped'] = self.get_multiply_mapped(bed[2],bed[0],bed[1])
entries_present.add(bed[2])
self.best_alignment.segments.append(temp)
for i in entries_present:
self.best_alignment.entries[i] = self.entries[i]
self.best_alignment.qName = self.qName
return self.best_alignment
def get_multiply_mapped(self,eindex,bed_start,bed_finish):
multibase = {}
for i in range(bed_start,bed_finish):
if i in self.query:
if eindex in self.query[i]:
bestquality = self.query[i][eindex]['quality']
for eindex2 in self.query[i]:
if eindex2 == eindex: continue
if eindex2 not in multibase: multibase[eindex2] = 0
if self.query[i][eindex]['quality'] > bestquality - self.multiply_mapped_minimum_quality_difference:
multibase[eindex2] += 1
if multibase[eindex2] >= self.multiply_mapped_minimum_overlap:
return True
return False
def get_psl_quality(entry):
return float(entry['matches'])/float(entry['matches']+entry['misMatches']+entry['tNumInsert']+entry['qNumInsert'])
# Store the result of a 'best_query' in this
# Can go on to calculate get_trimmed_entries() to cut our entries down by segment
class BestAlignmentCollection:
def __init__(self):
self.entries = {} # psl entries stored by an integer key
self.segments = [] # contains a query_bed and a psl_entry_index
self.qName = None
self.minimum_overlap = 1 # by default consider any overlap as reportable overlap
self.overlapping_segment_targets = None # set by find_overlapping_segment_targets
self.minimum_locus_distance = 400000 # minimum number of bases to consider something a different locus
self.segment_trimmed_entires = None # set by function can be set to an array equal to size segments
return
# Pre: A best alignment collection, for each segment, trim the PSL entry
# to fit within these query bed bounds
# Post: sets self.segement_trimmed_entries
def get_trimmed_entries(self):
self.segment_trimmed_entries = []
for seg in self.segments:
qbed = seg['query_bed']
psl = self.entries[seg['psl_entry_index']]
tpsl = psl.left_qactual_trim(qbed[0]+1)
tpsl = tpsl.right_qactual_trim(qbed[1])
self.segment_trimmed_entries.append(tpsl)
return self.segment_trimmed_entries
def has_multiply_mapped_segments(self):
for i in range(0,len(self.segments)):
if self.segments[i]['multiply_mapped']: return True
return False
def has_overlapped_segments(self):
if not self.overlapping_segment_targets:
self.find_overlapping_segment_targets()
if len(self.overlapping_segment_targets.keys()) > 0:
return True
return False
def segment_count(self):
return len(self.segments)
def alignment_count(self):
return len(self.entries)
def get_gap_sizes(self):
if len(self.segments)==0: return [0]
return [self.segments[x]['query_bed'][0]-self.segments[x-1]['query_bed'][1] for x in range(1,len(self.segments))]
def print_report(self):
if not self.overlapping_segment_targets:
self.find_overlapping_segment_targets()
print '-----'
print self.qName
if len(self.entries) > 1:
biggest_gap_between_entries = max(self.get_gap_sizes())
print str(biggest_gap_between_entries)+" biggest gap between entries"
for i in range(0,len(self.segments)):
overstring = ''
if i in self.overlapping_segment_targets: overstring = 'OVERLAPPED'
eindex = self.segments[i]['psl_entry_index']
mm = self.segments[i]['multiply_mapped']
mmstring = ''
if mm: mmstring = 'MULTIPLYMAPPED'
e = self.entries[self.segments[i]['psl_entry_index']].entry
print e['tName']+"\t"+str(e['tStart'])+"\t"+str(e['tEnd'])+"\t"+\
e['strand']+"\t"+str(self.segments[i]['query_bed'])+"\t"+str(get_psl_quality(e))+"\t"+str(eindex)+"\t"+overstring+"\t"+mmstring
# For the collection of alignments go through
# all possible pairs and report any that overlap with eachother
# in the target sequence and how much they overlap with eachother
def find_overlapping_segment_targets(self):
self.overlapping_segment_targets = {}
overlapping = []
for segindex1 in range(0,len(self.segments)):
for segindex2 in range(segindex1+1,len(self.segments)):
over = self.get_target_overlaps(segindex1,segindex2)
if not over: continue
if over[2] < self.minimum_overlap: continue #Too small to call overlapped
overlapping.append([segindex1, segindex2, over[2]])
for over in overlapping:
self.overlapping_segment_targets[over[0]] = {}
self.overlapping_segment_targets[over[1]] = {}
for over in overlapping:
self.overlapping_segment_targets[over[0]][over[1]] = over[2]
self.overlapping_segment_targets[over[1]][over[0]] = over[2]
return
def get_target_overlaps(self,segindex1,segindex2):
over = []
i = self.segments[segindex1]['psl_entry_index']
ibed = self.segments[segindex1]['query_bed']
j = self.segments[segindex2]['psl_entry_index']
jbed = self.segments[segindex2]['query_bed']
ei = self.entries[i].entry
ej = self.entries[j].entry
iobs = set()
for iexon in range(0,len(ei['blockSizes'])):
for ibase in range(0,ei['blockSizes'][iexon]):
qactual = ei['qStarts_actual'][iexon]+ibase
t = ei['tStarts'][iexon]+ibase
if qactual >= ibed[0] and qactual < ibed[1]:
iobs.add(ei['tName']+':'+str(t))
jobs = set()
for jexon in range(0,len(ej['blockSizes'])):
for jbase in range(0,ej['blockSizes'][jexon]):
qactual = ej['qStarts_actual'][jexon]+jbase
t = ej['tStarts'][jexon]+jbase
if qactual >= jbed[0] and qactual < jbed[1]:
jobs.add(ej['tName']+':'+str(t))
overset = set()
for jcoord in jobs:
if jcoord in iobs:
overset.add(jcoord)
if len(overset) > 0:
return [len(iobs),len(jobs),len(overset)]
return False
class GenericOrderedMultipleAlignmentPSLReader():
def __init__(self,fh=None):
self.fh = fh
self.previous = None
def set_handle(self,input_fh):
self.fh = input_fh
def open_file(self,filename):
self.fh = open(filename)
def close(self):
self.fh.close()
def read_next(self):
mpa = MultiplePSLAlignments()
mcnt = 0
current_name = None
if self.previous: #We have one waiting to go into an alignment
l1 = self.previous
p1 = PSL(l1.rstrip())
current_name = p1.value('qName')
mpa.add_entry(p1)
mcnt += 1
else: # It must be our first entry, so prime our buffer
l1 = None
while True:
l1 = self.fh.readline()
if not l1:
return None
if not is_valid(l1.rstrip()): continue # go till we get a PSL
break
p1 = PSL(l1.rstrip())
current_name = p1.value('qName')
mpa.add_entry(p1)
mcnt += 1
while True:
l2 = self.fh.readline()
if not l2:
self.previous = None
if mcnt > 0:
return mpa
return None
if not is_valid(l2):
sys.stderr.write("Warning line is not a valid psl line\n"+l2.rstrip()+"\n")
continue # just skip strange bad lines like we never saw them
p2 = PSL(l2.rstrip())
if p2.value('qName') == current_name: # We are working on this set of entries
mpa.add_entry(p2)
mcnt += 1
else: # We have a new set so buffer it and output what we have so far
self.previous = l2 # buffer the line
if mcnt > 0:
return mpa
sys.stderr.write("ERROR: How are we here?\n")
sys.exit()
def is_num(val):
if re.match('^\d+$',str(val)): return True
return False
# Pre: an array of PSL entries ordered by the actual query
# So a positive strand is ready to go
# but a negative strand set needs to be traversed backwards
# All entries must be on the same strand and must be on the same chromosome
# This will throw an error if not satisfied.
# Multiple query names won't throw an error, but only the first will be used
def stitch_query_trimmed_psl_entries(entries):
if len(entries) == 0:
sys.stderr.write("WARNING tried stitch together zero sequences")
return None
strand = entries[0].value('strand')
chrom = entries[0].value('tName')
for e in entries:
if e.value('strand') != strand:
sys.stderr.write("ERROR: stitching requires same strand for all PSL")
sys.exit()
if e.value('tName') != chrom:
sys.stderr.write("ERROR: stitching requires same ref sequence for all PSL")
sys.exit()
eordered = entries[:]
if strand == '-':
eordered = entries[::-1]
prevend = 0
outpsl = eordered[0].copy()
tstarts = []
qstarts = []
bsizes = []
for i in range(0,len(eordered)):
#left trim by the right most value of the previous
if eordered[i].value('tEnd') < prevend:
sys.stderr.write("WARNING: block skipped because of order\n")
continue
te = eordered[i].left_t_trim(prevend+1)
if len(tstarts) == 0:
for j in range(0,te.value('blockCount')):
tstarts.append(te.value('tStarts')[j])
qstarts.append(te.value('qStarts')[j])
bsizes.append(te.value('blockSizes')[j])
elif tstarts[-1]+bsizes[-1]+1==te.value('tStarts')[0] and \
qstarts[-1]+bsizes[-1]+1==te.value('qStarts')[0]:
#Handle the special case where the next block is exactly after the previous... the are combined
sys.stderr.write("Warning: APPEND CASE.. not a bad thing... just not common\n")
bsizes[-1]+=te.value('blockSizes')[0]
# The rest can be done normally
if te.value('blockCount') > 1:
for j in range(1,te.value('blockCount')):
tstarts.append(te.value('tStarts')[j])
qstarts.append(te.value('qStarts')[j])
bsizes.append(te.value('blockSizes')[j])
else:
# Most normally we would just add the blocks
for j in range(0,te.value('blockCount')):
tstarts.append(te.value('tStarts')[j])
qstarts.append(te.value('qStarts')[j])
bsizes.append(te.value('blockSizes')[j])
prevend = te.value('tEnd')
outpsl.update_alignment_details(bsizes,qstarts,tstarts)
#print len(qstarts)
#print len(tstarts)
#print len(bsizes)
#print outpsl.value('blockCount')
#print "positive strand"
#print outpsl.get_line()
return outpsl
| 41.865591 | 183 | 0.658876 |
acf4191e4898af1b8f1b224eba91a2fb29352e3f | 407 | py | Python | 2016/day15-2.py | alvaropp/AdventOfCode2017 | 2827dcc18ecb9ad59a1a5fe11e469f31bafb74ad | [
"MIT"
] | null | null | null | 2016/day15-2.py | alvaropp/AdventOfCode2017 | 2827dcc18ecb9ad59a1a5fe11e469f31bafb74ad | [
"MIT"
] | null | null | null | 2016/day15-2.py | alvaropp/AdventOfCode2017 | 2827dcc18ecb9ad59a1a5fe11e469f31bafb74ad | [
"MIT"
] | null | null | null | import numpy as np
with open("day15.txt", "r") as f:
discs = f.read().splitlines()
n_pos = np.array([int(disc.split("has ")[1].split(" pos")[0]) for disc in discs])
pos = np.array([int(disc[-2]) for disc in discs])
n_pos = np.append(n_pos, 11)
pos = np.append(pos, 0)
offset = np.arange(1, len(discs) + 2)
t = 0
while sum((pos + offset) % n_pos) > 0:
pos = (pos + 1) % n_pos
t += 1
print(t)
| 22.611111 | 81 | 0.594595 |
acf41acbab4b1199a98b7867936b183e78fb2693 | 5,338 | py | Python | configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/hrnet_w48_coco_384x288_udp.py | stoil-ganev/mmpose | 88c984155d2a07f1d409cf6b3aee01a7ccdebdb8 | [
"Apache-2.0"
] | 1 | 2021-09-29T02:19:07.000Z | 2021-09-29T02:19:07.000Z | configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/hrnet_w48_coco_384x288_udp.py | stoil-ganev/mmpose | 88c984155d2a07f1d409cf6b3aee01a7ccdebdb8 | [
"Apache-2.0"
] | null | null | null | configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/hrnet_w48_coco_384x288_udp.py | stoil-ganev/mmpose | 88c984155d2a07f1d409cf6b3aee01a7ccdebdb8 | [
"Apache-2.0"
] | 1 | 2021-10-20T04:43:28.000Z | 2021-10-20T04:43:28.000Z | _base_ = ['../../../../_base_/datasets/coco.py']
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', save_best='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
target_type = 'GaussianHeatmap'
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
# model settings
model = dict(
type='TopDown',
pretrained='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w48-8ef0771d.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(48, 96)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(48, 96, 192)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(48, 96, 192, 384))),
),
keypoint_head=dict(
type='TopdownHeatmapSimpleHead',
in_channels=48,
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=0,
extra=dict(final_conv_kernel=1, ),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=False,
target_type=target_type,
modulate_kernel=17,
use_udp=True))
data_cfg = dict(
image_size=[288, 384],
heatmap_size=[72, 96],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine', use_udp=True),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='TopDownGenerateTarget',
sigma=3,
encoding='UDP',
target_type=target_type),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine', use_udp=True),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=32,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline,
dataset_info={{_base_.dataset_info}}),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
)
| 29.01087 | 79 | 0.590858 |
acf41b0d1f82bcf89b418b288a0e53884f3e6659 | 4,034 | py | Python | bokeh/server/tests/utils.py | al-yakubovich/bokeh | 3d88647a5d10160cfef440278a2b0ae23258f290 | [
"BSD-3-Clause"
] | 1 | 2020-12-28T20:23:28.000Z | 2020-12-28T20:23:28.000Z | bokeh/server/tests/utils.py | al-yakubovich/bokeh | 3d88647a5d10160cfef440278a2b0ae23258f290 | [
"BSD-3-Clause"
] | null | null | null | bokeh/server/tests/utils.py | al-yakubovich/bokeh | 3d88647a5d10160cfef440278a2b0ae23258f290 | [
"BSD-3-Clause"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
from tornado.ioloop import IOLoop
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
from tornado.websocket import websocket_connect
# Bokeh imports
from bokeh.server.server import Server
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'http_get',
'ManagedServerLoop',
'url',
'websocket_open',
'ws_url',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def url(server, prefix=""):
return "http://localhost:" + str(server.port) + prefix + "/"
def ws_url(server, prefix=""):
return "ws://localhost:" + str(server.port) + prefix + "/ws"
def http_get(io_loop, url):
result = {}
def handle_request(response):
result['response'] = response
io_loop.stop()
# for some reason passing a loop to AsyncHTTPClient is deprecated
assert io_loop is IOLoop.current()
http_client = AsyncHTTPClient()
headers = dict()
http_client.fetch(url, handle_request, headers=headers)
io_loop.start()
if 'response' not in result:
raise RuntimeError("Failed to http get")
response = result['response']
if response.error:
raise response.error
else:
return response
def websocket_open(io_loop, url, origin=None):
result = {}
def handle_connection(future):
result['connection'] = future
io_loop.stop()
request = HTTPRequest(url)
if origin is not None:
request.headers['Origin'] = origin
websocket_connect(request, callback=handle_connection)
io_loop.start()
if 'connection' not in result:
raise RuntimeError("Failed to handle websocket connect")
future = result['connection']
if future.exception():
raise future.exception()
else:
future.result().close()
return None
# lets us use a current IOLoop with "with"
# and ensures the server unlistens
class ManagedServerLoop(object):
def __init__(self, application, **server_kwargs):
loop = IOLoop()
loop.make_current()
server_kwargs['io_loop'] = loop
self._server = Server(application, **server_kwargs)
def __exit__(self, type, value, traceback):
self._server.unlisten()
self._server.stop()
self._server.io_loop.close()
def __enter__(self):
self._server.start()
return self._server
@property
def io_loop(self):
return self.s_server.io_loop
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 32.015873 | 82 | 0.465047 |
acf41b7e9feeeff7b326e39b4dd5b607adf777bc | 22,287 | py | Python | SFIWikiBotLib/GeneralUtils.py | mtmosier/SFIWikiBot | 5569a65d2640fa2d2815797a6259160ccab85259 | [
"MIT"
] | null | null | null | SFIWikiBotLib/GeneralUtils.py | mtmosier/SFIWikiBot | 5569a65d2640fa2d2815797a6259160ccab85259 | [
"MIT"
] | null | null | null | SFIWikiBotLib/GeneralUtils.py | mtmosier/SFIWikiBot | 5569a65d2640fa2d2815797a6259160ccab85259 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import io
import re
import json
import decimal
import math
import hashlib
from html.parser import HTMLParser
from collections import OrderedDict
from contextlib import suppress
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
# levelUpBase = 246
def RoundToMeaningful(input):
if input >= 1000000:
return NumDisplay(NormalRound(input / 50000) * 50000, 0)
if input >= 10000:
return NumDisplay(NormalRound(input / 1000) * 1000, 0)
if input >= 500:
return NumDisplay(NormalRound(input / 100) * 100, 0)
return NumDisplay(NormalRound(input / 50) * 50, 0)
levelUpBase = 246.2
levelMultInc = 0.0514
def LevelStart(level, debug=False):
num = 0
num2 = 1
for i in range(level+1):
num += int(RoundToMeaningful(i * levelUpBase * num2))
num2 += levelMultInc
if debug:
print(i, RoundToMeaningful(num), '::', num, num + (i * levelUpBase * num2))
# return num
return RoundToMeaningful(num)
# from deepdiff import DeepDiff
# ddiff = DeepDiff(t1, t2, ignore_order=True)
# print (DeepDiff(t1, t2, exclude_paths={"root['ingredients']"}))
# ddiff.to_dict()
# ddiff.to_json()
# https://stackoverflow.com/questions/27265939/comparing-python-dictionaries-and-nested-dictionaries
def DictCompare(d1, d2, path=""):
# Consider using DeepDiff instead! https://github.com/seperman/deepdiff
for k in d1:
if (k not in d2):
print (path, ":")
print (k + " as key not in d2", "\n")
else:
if type(d1[k]) is dict:
if path == "":
path = k
else:
path = path + "->" + k
findDiff(d1[k],d2[k], path)
else:
if d1[k] != d2[k]:
print (path, ":")
print (" - ", k," : ", d1[k])
print (" + ", k," : ", d2[k])
def mkdirr(path):
os.makedirs(path, exist_ok=True)
def CleanupImportedText(input):
return input.replace("\u00e2\u20ac\u0153", '"') \
.replace("\u00e2\u20ac?", '"') \
.replace('รขโฌโข', "'") \
.replace('\r\n', '\n') \
.strip()
def floatCmp(v1, c, v2, dec=None):
if v1 is None or v2 is None:
if c == '!=' or c == '<>' or c == '><':
return v1 is not v2
elif c == '==':
return v1 is v2
return False
if c == '==':
return math.isclose(v1, v2, rel_tol=1e-04)
if c == '!=' or c == '<>' or c == '><':
return not math.isclose(v1, v2, rel_tol=1e-04)
if c == '>=' or c == '=>':
return math.isclose(v1, v2, rel_tol=1e-04) or v1 > v2
if c == '<=' or c == '=<':
return math.isclose(v1, v2, rel_tol=1e-04) or v1 < v2
if c == '>':
return not math.isclose(v1, v2, rel_tol=1e-04) and v1 > v2
if c == '<':
return not math.isclose(v1, v2, rel_tol=1e-04) and v1 < v2
raise NotImplementedError
class MLStripper(HTMLParser):
def __init__(self):
super().__init__()
self.reset()
self.strict = False
self.convert_charrefs= True
self.text = io.StringIO()
def handle_data(self, d):
self.text.write(d)
def get_data(self):
return self.text.getvalue()
def StripTags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
def GenerateDataSignature(data):
try:
input = data.copy()
except:
input = data
if type(input) == OrderedDict or type(input) == dict:
input = NormalizeDict(input)
if type(input) == str:
input = input.replace('\r', '').replace('\n', '').strip()
jsonStr = json.dumps(input, sort_keys=True)
return hashlib.md5(jsonStr.encode("utf-8")).hexdigest()
def NormalizeDict(data):
if type(data) == OrderedDict or type(data) == dict:
fixedList = OrderedDict()
for k, v in sorted(data.items()):
if type(v) == OrderedDict or type(v) == dict:
v = NormalizeDict(v)
elif type(v) == list:
v = NormalizeList(v)
else:
v = str(v).replace('\r', '').strip()
fixedList[k] = v
data = fixedList
return data
def NormalizeList(data):
if type(data) == list:
fixedList = []
for v in data:
if type(v) == OrderedDict or type(v) == dict:
v = NormalizeDict(v)
elif type(v) == list:
v = NormalizeList(v)
else:
v = str(v).replace('\r', '').strip()
fixedList.append(v)
data = fixedList
return data
# https://stackoverflow.com/a/45424214
def NormalRound(num):
return ((num > 0) - (num < 0)) * int(abs(num) + 0.5)
# https://stackoverflow.com/questions/5807952/removing-trailing-zeros-in-python
def NumDisplay(num, decPlaces=..., insertCommas=False):
try:
dec = decimal.Decimal(num)
if decPlaces is not ...:
dec = round(dec, decPlaces)
except:
return ''
tup = dec.as_tuple()
delta = len(tup.digits) + tup.exponent
digits = ''.join(str(d) for d in tup.digits)
if delta <= 0:
zeros = abs(tup.exponent) - len(tup.digits)
val = '0.' + ('0'*zeros) + digits
else:
val = digits[:delta] + ('0'*tup.exponent) + '.' + digits[delta:]
val = val.rstrip('0')
if val[-1] == '.':
val = val[:-1]
if tup.sign:
val = '-' + val
if insertCommas:
val = '{:,}'.format(decimal.Decimal(val))
return stripTrailingZerosFromNumStr(val)
def stripTrailingZerosFromNumStr(input):
if '.' in input:
input = input.rstrip('0').rstrip('.')
return input
def RoundToSignificantAmount(input, smallValue=False, allowDec=False, largeValue=False):
if largeValue and input >= 10000:
return NumDisplay(NormalRound(input / 1000) * 1000, 0)
if input >= 500:
return NumDisplay(NormalRound(input / 100) * 100, 0)
if smallValue and input >= 100: # Used for ammo cost
return NumDisplay(NormalRound(input / 50) * 50, 0)
if input >= 50:
return NumDisplay(NormalRound(input / 10) * 10, 0)
if input >= 10:
return NumDisplay(NormalRound(input / 5) * 5, 0)
if allowDec:
return NumDisplay(input, 1)
return NumDisplay(input, 0)
# https://www.geeksforgeeks.org/python-program-to-convert-camel-case-string-to-snake-case/
def CamelCaseToTitleCase(input):
rtnVal = ''.join([ ' ' + i if i.isupper() else i for i in input ])
return rtnVal.strip().title()
def GetPluralForm(word):
if word[-1] == 'y':
return "{}ies".format(word[:-1])
if word[-2:] == 'es':
return word
if word[-1] == 's':
return "{}es".format(word)
return "{}s".format(word)
def PrettyPrint(input):
print(json.dumps(input, indent=4))
def SecondsDisplay(sec):
rtnVal = str(sec) + " second"
if sec != 1:
rtnVal += "s"
return rtnVal
def FixJson(input):
input = re.sub(r',\s*}$', '}', input)
input = re.sub(r',\s*\]', ']', input)
return input
def NormalizeString(str, includeStemming=False):
with suppress(AttributeError):
str = str.lower()
if includeStemming:
str = StemSentence(str)
return str
def ConvertListToHtmlTable(tableData, tableHeader = None, tableTitle = None, tableClass = None, tableId = None, tableColumnTitleList = None):
rtnVal = ""
if len(tableData) > 0:
if tableHeader:
rtnVal += '<h3>{}</h3>\n'.format(tableHeader)
if tableTitle:
rtnVal += '<div style="display: inline-block;"><h4><span class="mw-headline" id="{0}"><div class="tableCaption">{0}</div></span></h4>'.format(tableTitle)
rtnVal += "<table"
if tableId:
rtnVal += ' id="sf-{}"'.format(tableId)
if tableClass:
rtnVal += ' class="{}"'.format(tableClass)
rtnVal += ">\n"
rtnVal += "<thead>\n"
first = True
for row in tableData:
values = "<tr>\n"
headings = ""
if first:
headings += "<tr>\n"
idx = 0
for key, val in row.items():
if first:
if tableColumnTitleList and len(tableColumnTitleList) > idx and tableColumnTitleList[idx]:
headings += '<th scope="col"><span title="{}">{}</span></th>\n'.format(tableColumnTitleList[idx], key)
else:
headings += "<th>{}</th>\n".format(key)
values += "<td>{}</td>\n".format(val)
idx += 1
rtnVal += headings
if first:
rtnVal += "</thead>\n"
rtnVal += "<tbody>\n"
rtnVal += values
first = False
rtnVal += "</tbody>\n"
rtnVal += "</table>\n\n"
if tableTitle:
rtnVal += '</div>'
return rtnVal
# def stemSentence(sentence):
# token_words = nltk.word_tokenize(sentence)
# token_words
# stem_sentence=[]
# for word in token_words:
# stem_sentence.append(porter.stem(word))
# stem_sentence.append(" ")
# return "".join(stem_sentence)
#
# tokens = nltk.word_tokenize(sentence)
# def ReplaceWikiLinksWithPlaceholders(content):
# def ReplacePlaceholdersWithWikiLinks(content, placeholderMap):
# def GetWikiLink(input):
# def GetWikiArticlePageForNameList(nameList, finalRedirect=False):
# def GetWikiArticlePageList():
def StemSentence(input):
f = StemSentence
if "stripRegex" not in f.__dict__:
f.stripRegex = re.compile(r'[^a-zA-Z0-9\(\):-]')
stripRegex = f.stripRegex
if "splitRegex" not in f.__dict__:
f.splitRegex = re.compile(r'\s+')
splitRegex = f.splitRegex
input = stripRegex.sub(" ", input)
inputWordList = splitRegex.split(input)
stemmedWordList = [];
for iword in inputWordList:
stemmedWordList.append(stemmer.stem(iword))
return ' '.join(stemmedWordList).strip()
def SplitTextIntoPhrases(input, maxPhrase):
f = SplitTextIntoPhrases
if "stripRegex" not in f.__dict__:
f.stripRegex = re.compile(r'[^a-zA-Z0-9\(\):-]')
stripRegex = f.stripRegex
if "splitRegex" not in f.__dict__:
f.splitRegex = re.compile(r'\s+')
splitRegex = f.splitRegex
input = stripRegex.sub(" ", input)
inputWordList = splitRegex.split(input)
phraseList = [];
for plen in range(maxPhrase, 0, -1):
for sIdx in range(0, len(inputWordList) - plen + 1):
phrase = ' '.join(inputWordList[sIdx:sIdx+plen]).strip()
phraseList.append(phrase)
return phraseList
# SplitNameIntoBaseNameAndItemLevel(input)
# return { 'name': name, 'fullNameMinusLevel': fullNameMinusLevel, 'levelDisplay': levelDisplayOrig, 'levelIdx': levelIdx, 'namePostfix': postfix }
def RemoveWikiLinksFromText(input):
return re.sub(r'\[\[([^\]|]+\|)?([^\]]+)\]\]', r'\2', input)
def AddWikiLinksToText(input, useHtml=False, allowExtraWeaponCheck=True, additionalReplacementOverrides=None):
from SFIWikiBotLib import WikiUtils
from SFIWikiBotLib import Config
from SFIWikiBotLib import ItemUtils
includeStemming = True
lowercaseReplacementExactMatchList = [ v.lower().replace('_', ' ') for v in Config.wikiLinkReplacementExactMatchRequiredList ]
normalizedReplacementExclusionList = [ NormalizeString(v, includeStemming) for v in Config.wikiLinkReplacementExclusionList ]
# normalizedReplacementOverrideList = { NormalizeString(k, includeStemming):NormalizeString(v, includeStemming) for k, v in Config.wikiLinkReplacementOverrideList.items() }
normalizedReplacementOverrideList = {}
if type(additionalReplacementOverrides) == dict:
for k, v in additionalReplacementOverrides.items():
if not v: normalizedReplacementExclusionList.append(NormalizeString(k, includeStemming))
else: normalizedReplacementOverrideList[NormalizeString(k, includeStemming)] = NormalizeString(v, includeStemming)
lowercaseNonStemmedReplacementOverrideList = { k.lower().replace('_', ' '):v.lower().replace('_', ' ') for k, v in Config.wikiLinkReplacementOverrideList.items() }
phraseList = SplitTextIntoPhrases(input, Config.maxWordsInArticleTitleSearch)
replacementList = []
replacementInfo = {}
placeholderCount = 0
for origPhrase in phraseList:
normalizedPhrase = origPhrase.lower().replace('_', ' ')
for s, r in lowercaseNonStemmedReplacementOverrideList.items():
normalizedPhrase = normalizedPhrase.replace(s, r)
if normalizedPhrase not in lowercaseReplacementExactMatchList:
normalizedPhraseTmp = NormalizeString(normalizedPhrase, includeStemming)
if normalizedPhraseTmp not in lowercaseReplacementExactMatchList:
normalizedPhrase = normalizedPhraseTmp
if normalizedPhrase in normalizedReplacementExclusionList:
continue
if normalizedPhrase in normalizedReplacementOverrideList:
normalizedPhrase = normalizedReplacementOverrideList[normalizedPhrase]
nameList = [ origPhrase ]
if normalizedPhrase not in nameList:
nameList.append(normalizedPhrase)
if allowExtraWeaponCheck:
altNameInfo = ItemUtils.SplitNameIntoBaseNameAndItemLevel(origPhrase)
altNameLower = altNameInfo['fullNameMinusLevel'].lower().replace('_', ' ')
if altNameLower != origPhrase.lower().replace('_', ' '):
if altNameLower in ItemUtils.itemBaseNameList:
altName = NormalizeString(altNameInfo['fullNameMinusLevel'], includeStemming)
if altName not in normalizedReplacementExclusionList:
nameList.append(altNameInfo['fullNameMinusLevel'])
wikiPage = WikiUtils.GetWikiArticlePageForNameList(nameList)
if wikiPage:
replacementInfo = {};
replacementInfo["originalText"] = origPhrase;
if useHtml:
replacementInfo["replacementText"] = '<a href="{}" target="_blank">{}</a>'.format(WikiUtils.GetWikiLink(wikiPage), origPhrase)
else:
replacementInfo["replacementText"] = WikiUtils.GetWikiTextLink(wikiPage, origPhrase)
replacementInfo["placeholder"] = "~~placeholder:{}:~~".format(placeholderCount)
replacementList.append(replacementInfo)
placeholderCount += 1
input = re.sub(r'\b{}\b'.format(re.escape(replacementInfo["originalText"])), replacementInfo["placeholder"], input, 0, re.I)
for replacementInfo in replacementList:
input = re.sub(re.escape(replacementInfo["placeholder"]), replacementInfo["replacementText"], input, 0, re.I)
return input
# Example Rule List
# ruleList = [
# { 'id': 'name', 'op': 'contains', 'val': 'Tornadian', },
# { 'id': 'weaponType', 'op': '==', 'val': 4, },
# ]
def SearchObjectListUsingSimpleRules(dataList, ruleList):
rtnList = [ obj for obj in dataList if TestObjectAgainstRuleList(obj, ruleList) ]
return rtnList
def TestObjectAgainstRuleList(object, ruleList):
rtnVal = True
try:
for rule in ruleList:
if 'operator' not in rule: rule['operator'] = rule['op']
if 'value' not in rule: rule['value'] = rule['val']
rtnVal = rtnVal and ResolveRuleForObject(object, rule)
except:
return False
return rtnVal
# Example RuleSet
# ruleSet = {
# 'condition': 'AND',
# 'rules': [
# { 'id': 'name', 'operator': 'contains', 'value': 'orbital' },
# { 'condition': 'OR', 'rules': [
# { 'id': 'weaponType', 'operator': 'equal', 'value': 0, },
# { 'id': 'weaponType', 'operator': 'equal', 'value': 4, },
# ] }
# ]
# }
def SearchObjectListUsingRuleset(dataList, ruleSet):
rtnList = [ obj for obj in dataList if TestObjectAgainstRuleset(obj, ruleSet) ]
return rtnList
def TestObjectAgainstRuleset(object, ruleSet):
rtnVal = False
if ruleSet['condition'] == 'AND':
rtnVal = True
for rule in ruleSet['rules']:
if 'rules' in rule:
ruleVal = TestObjectAgainstRuleset(object, rule)
else:
ruleVal = ResolveRuleForObject(object, rule)
if ruleSet['condition'] == 'AND':
rtnVal = rtnVal and ruleVal
else:
rtnVal = rtnVal or ruleVal
return rtnVal
switcher = {
'=': (lambda v1, v2: v1 == v2),
'==': (lambda v1, v2: v1 == v2),
'!=': (lambda v1, v2: v1 != v2),
'=!': (lambda v1, v2: v1 != v2),
'<': (lambda v1, v2: v1 < v2),
'<=': (lambda v1, v2: v1 <= v2),
'=<': (lambda v1, v2: v1 <= v2),
'>': (lambda v1, v2: v1 > v2),
'>=': (lambda v1, v2: v1 >= v2),
'=>': (lambda v1, v2: v1 >= v2),
'equal': (lambda v1, v2: v1 == v2),
'not_equal': (lambda v1, v2: v1 != v2),
'in': (lambda v1, v2: v2.find(v1) >= 0 if v2 else False),
'in_list': (lambda v1, v2: v2 in v1 if v1 else False),
'not_in_list': (lambda v1, v2: v2 not in v1 if v1 else True),
'not_in': (lambda v1, v2: v2.find(v1) == -1 if v2 else True),
'begins_with': (lambda v1, v2: v1.find(v2) == 0 if v1 else False),
'not_begins_with': (lambda v1, v2: v1.find(v2) != 0 if v1 else True),
'contains': (lambda v1, v2: v1.find(v2) >= 0 if v1 else False),
'not_contains': (lambda v1, v2: v1.find(v2) == -1 if v1 else True),
'ends_with': (lambda v1, v2: v1.find(v2) == len(v1) - len(v2) if v1 else False),
'not_ends_with': (lambda v1, v2: v1.find(v2) != len(v1) - len(v2) if v1 else True),
'is_empty': (lambda v1, v2: False if v1 else True),
'is_not_empty': (lambda v1, v2: True if v1 else False),
'is_null': (lambda v1, v2: v1 is None),
'is_not_null': (lambda v1, v2: v1 is not None),
'less': (lambda v1, v2: v1 < v2),
'less_or_equal': (lambda v1, v2: v1 <= v2),
'greater': (lambda v1, v2: v1 > v2),
'greater_or_equal': (lambda v1, v2: v1 >= v2),
'between': (lambda v1, v2: v1 >= v2[0] and v1 <= v2[1]),
'not_between': (lambda v1, v2: v1 < v2[0] or v1 > v2[1]),
}
def ResolveRuleForObject(object, rule):
from SFIWikiBotLib import ItemUtils
from SFIWikiBotLib import ShipUtils
value1 = None
if rule['id'] in object:
value1 = NormalizeString(object[rule['id']])
elif rule['id'] == 'ItemUtils.IsBeamWeapon':
value1 = ItemUtils.IsBeamWeapon(object)
elif rule['id'] == 'ItemUtils.IsItemHidden':
value1 = ItemUtils.IsItemHidden(object)
elif rule['id'] == 'ItemUtils.IsItemNprExclusive':
value1 = ItemUtils.IsItemNprExclusive(object)
elif rule['id'] == 'ItemUtils.GetItemRange':
value1 = ItemUtils.GetItemRange(object)
elif rule['id'] == 'ItemUtils.GetItemDps':
value1 = ItemUtils.GetItemDps(object)
elif rule['id'] == 'ItemUtils.GetItemDpsIncludingEffectDamage':
value1 = ItemUtils.GetItemDpsIncludingEffectDamage(object)
elif rule['id'] == 'ItemUtils.GetItemDpe':
value1 = ItemUtils.GetItemDpe(object)
elif rule['id'] == 'ItemUtils.GetItemDpeIncludingEffectDamage':
value1 = ItemUtils.GetItemDpeIncludingEffectDamage(object)
elif rule['id'] == 'ItemUtils.GetWeaponEffectName':
v = ItemUtils.GetWeaponEffectName(object)
value1 = NormalizeString(v if v else "None")
elif rule['id'] == 'ItemUtils.GetShieldEffectName':
v = ItemUtils.GetShieldEffectName(object)
value1 = NormalizeString(v if v else "None")
elif rule['id'] == 'ItemUtils.GetItemEffectTime':
value1 = ItemUtils.GetItemEffectTime(object)
elif rule['id'] == 'ItemUtils.GetItemSource':
value1 = NormalizeString(ItemUtils.GetItemSource(object))
elif rule['id'] == 'ItemUtils.GetItemSkillName':
value1 = NormalizeString(ItemUtils.GetItemSkillName(object))
elif rule['id'] == 'ItemUtils.GetItemAugType':
value1 = NormalizeString(ItemUtils.GetItemAugType(object))
elif rule['id'] == 'ItemUtils.GetItemSkillLevel':
value1 = ItemUtils.GetItemSkillLevel(object)
elif rule['id'] == 'ItemUtils.GetItemDamageType':
value1 = NormalizeString(ItemUtils.GetItemDamageType(object) or '')
elif rule['id'] == 'ItemUtils.GetItemDescription':
value1 = NormalizeString(ItemUtils.GetItemDescription(object))
elif rule['id'] == 'ItemUtils.ItemDisplayStatBPLocation':
value1 = NormalizeString(ItemUtils.ItemDisplayStatBPLocation(object))
elif rule['id'] == 'ItemUtils.GetItemPurchasePrice':
value1 = ItemUtils.GetItemPurchasePrice(object)
elif rule['id'] == 'ShipUtils.GetShipPurchasePrice':
value1 = ShipUtils.GetShipPurchasePrice(object)
elif rule['id'] == 'ShipUtils.GetMaxSpeedForShip':
value1 = ShipUtils.GetMaxSpeedForShip(object)
elif rule['id'] == 'ShipUtils.ShipCanBeBoughtByPlayers':
value1 = ShipUtils.ShipCanBeBoughtByPlayers(object)
elif rule['id'] == 'ShipUtils.GetTypeForShip':
value1 = NormalizeString(ShipUtils.GetTypeForShip(object))
elif rule['id'] == 'ShipUtils.IsShipHidden':
value1 = ShipUtils.IsShipHidden(object)
elif rule['id'] == 'ShipUtils.GetRaceForShip':
value1 = NormalizeString(ShipUtils.GetRaceForShip(object))
elif rule['id'] == 'ItemUtils.GetRaceForItem':
value1 = NormalizeString(ItemUtils.GetRaceForItem(object))
elif rule['id'] == 'ItemUtils.GetItemTotalDamagePerVolley':
value1 = ItemUtils.GetItemTotalDamagePerVolley(object)
if 'degrees.' in rule['id'].lower() and value1 is None:
prop = rule['id'].split('.', 1)[1]
if prop in object:
value1 = object[prop] * 30
if 'percent.' in rule['id'].lower() and value1 is None:
prop = rule['id'].split('.', 1)[1]
if prop in object:
value1 = object[prop] * 100
value2 = NormalizeString(rule['value'])
func = switcher.get(rule['operator'], (lambda v1, v2: None))
try:
rtnVal = func(value1, value2)
except:
rtnVal = False
return rtnVal
| 34.660964 | 176 | 0.607709 |
acf41d82f8a3852e4a9c0190bdee32f2d190ec5e | 81,901 | py | Python | datacube_ows/ows_cfg_example.py | GispoCoding/datacube-ows | 4ffe14ccf9f382bff8f4a33fc54729d3e18b670e | [
"Apache-2.0"
] | null | null | null | datacube_ows/ows_cfg_example.py | GispoCoding/datacube-ows | 4ffe14ccf9f382bff8f4a33fc54729d3e18b670e | [
"Apache-2.0"
] | null | null | null | datacube_ows/ows_cfg_example.py | GispoCoding/datacube-ows | 4ffe14ccf9f382bff8f4a33fc54729d3e18b670e | [
"Apache-2.0"
] | null | null | null | # pylint: skip-file
# Example configuration file for datacube_ows.
#
# For detailed formal documentation see:
#
# https://datacube-ows.readthedocs.io/en/latest/configuration.html
#
# OVERVIEW
#
# This file forms the primary documentation for the configuration file format at this stage.
#
# The actual configuration is held in a single serialisable object that can be directly
# declared as a python object or imported from JSON.
#
# WHERE IS CONFIGURATION READ FROM?
#
# Configuration is read by default from the ows_cfg object in datacube_ows/ows_cfg.py
#
# but this can be overridden by setting the $DATACUBE_OWS_CFG environment variable.
#
# $DATACUBE_OWS_CFG is interpreted as follows (first matching alternative applies):
#
# 1. Has a leading slash, e.g. "/opt/odc_ows_cfg/odc_ows_cfg_prod.json"
# Config loaded as json from absolute file path.
#
# 2. Contains a slash, e.g. "configs/prod.json"
# Config loaded as json from relative file path.
#
# 3. Begins with an open brace "{", e.g. "{...}"
# Config loaded directly from the environment variable as json (not recommended)
#
# 4. Ends in ".json", e.g. "cfg_prod.json"
# Config loaded as json from file in working directory.
#
# 5. Contains a dot (.), e.g. "package.sub_package.module.cfg_object_name"
# Imported as python object (expected to be a dictionary).
# N.B. It is up to you that the Python file in question is in your Python path.
#
# 6. Valid python object name, e.g. "cfg_prod"
# Imported as python object from named object in datacube_ows/ows_cfg.py
#
# 7. Blank or not set
# Default to import ows_cfg object in datacube_ows/ows_cfg.py as described above.
#
# REUSING CHUNKS OF CONFIGURATION
#
# Often it is desirable to re-use chunks of configuration in multiple places. E.g. a set
# of related data products may share a band index or style definition configurations.
#
# If you are loading config as a Python object, this is trivial, as demonstrated in this
# example file.
#
# If you want to reuse chunks of config in json, or wish to combine bits of json config
# with bits of python config, the following convention applies in both Python and JSON
# configuration:
#
# Any JSON or Python element that forms the full configuration tree or a subset of it,
# can be supplied in any of the following ways:
#
# 1. Directly embed the config content:
# {
# "a_cfg_entry": 1,
# "another_entry": "llama",
# }
#
# 2. Include a python object (by FQN):
# {
# "include": "path.to.module.object",
# "type": "python"
# }
#
# N.B. It is up to you to make sure the included Python file is in your Python Path.
# Relative Python imports are not supported.
#
# 3. Include a JSON file (by absolute or relative file path):
# {
# "include": "path/to/file.json",
# "type": "json"
# }
#
# N.B. Resolution of relative file paths is done in the following order:
# a) Relative to the working directory of the web app.
# b) If a JSON file is being included from another JSON file, relative to
# directory in which the including file resides.
#
# Note that this does not just apply to dictionaries. Either of the above include dictionaries
# could expand to an array, or even to single integer or string.
#
# THIS EXAMPLE FILE
#
# In this example file, there are some reusable code chunks defined at the top. The actual
# config tree is defined as ows_cfg below the reusable chunks.
#
# REUSABLE CONFIG FRAGMENTS - Band alias maps
landsat8_bands = {
# Supported bands, mapping native band names to a list of possible aliases.
# 1. Aliases must be unique for the product.
# 2. Band aliases can be used anywhere in the configuration that refers to bands by name.
# 3. The native band name MAY be explicitly declared as an alias for the band, but are always treated as
# a valid alias.
# 4. The band labels used in GetFeatureInfo and WCS responses will be the first declared alias (or the native name
# if no aliases are declared.)
# 5. Bands NOT listed here will not be included in the GetFeatureInfo output and cannot be referenced
# elsewhere in the configuration.
# 6. If not specified for a product, defaults to all available bands, using only their native names.
# 7. The following are reserved words that may not be used as aliases. (N.B. If they occur as a native
# band name, an alias should be declared and used in the config in preference to the native name):
# scale_range
# function
#
"red": [],
"green": [],
"blue": [ "near_blue" ],
"nir": [ "near_infrared" ],
"swir1": [ "shortwave_infrared_1", "near_shortwave_infrared" ],
"swir2": [ "shortwave_infrared_2", "far_shortwave_infrared" ],
"coastal_aerosol": [ "far_blue" ],
# N.B. Include pixel quality bands if they are in the main data product.
}
sentinel2_bands= {
"nbar_coastal_aerosol": [ 'nbar_far_blue' ],
"nbar_blue": [],
"nbar_green": [],
"nbar_red": [],
"nbar_red_edge_1": [],
"nbar_red_edge_2": [],
"nbar_red_edge_3": [],
"nbar_nir_1": [ "nbar_near_infrared_1" ],
"nbar_nir_2": [ "nbar_near_infrared_2" ],
"nbar_swir_2": [ "nbar_shortwave_infrared_2" ],
"nbar_swir_3": [ "nbar_shortwave_infrared_3" ],
"nbart_coastal_aerosol": [ 'coastal_aerosol', 'nbart_far_blue', 'far_blue'],
"nbart_blue": [ 'blue' ],
"nbart_green": [ 'green' ],
"nbart_red": [ 'red' ],
"nbart_red_edge_1": [ 'red_edge_1' ],
"nbart_red_edge_2": [ 'red_edge_2' ],
"nbart_red_edge_3": [ 'red_edge_3' ],
"nbart_nir_1": [ "nir_1", "nbart_near_infrared_1" ],
"nbart_nir_2": [ "nir_2", "nbart_near_infrared_2" ],
"nbart_swir_2": [ "swir_2", "nbart_shortwave_infrared_2" ],
"nbart_swir_3": [ "swir_3", "nbart_shortwave_infrared_3" ],
# N.B. Include pixel quality bands if they are in the main data product.
"quality": [],
}
# REUSABLE CONFIG FRAGMENTS - Style definitions
# Examples of styles which are linear combinations of the available spectral bands.
style_rgb = {
# Machine readable style name. (required. Must be unique within a layer.)
"name": "simple_rgb",
# Human readable style title (required. Must be unique within a layer.)
"title": "Simple RGB",
# Abstract - a longer human readable style description. (required. Must be unique within a layer.)
"abstract": "Simple true-colour image, using the red, green and blue bands",
# Components section is required for linear combination styles.
# The component keys MUST be "red", "green" and "blue" (and optionally "alpha")
"components": {
"red": {
# Band aliases may be used here.
# Values are multipliers. The should add to 1.0 for each component to preserve overall brightness levels,
# but this is not enforced.
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
# The raw band value range to be compressed to an 8 bit range for the output image tiles.
# Band values outside this range are clipped to 0 or 255 as appropriate.
"scale_range": [0.0, 3000.0],
# Legend section is optional for linear combination styles. If not supplied, no legend is displayed
"legend": {
# Whether or not to display a legend for this style.
# Defaults to False for linear combination styles.
"show_legend": True,
# A legend cannot be auto-generated for a linear combination style, so a url pointing to
# legend PNG image must be supplied if 'show_legend' is True.
# Note that legend urls are proxied, not displayed directly to the user.
"url": "http://example.com/custom_style_image.png"
}
}
style_rgb_cloudmask = {
"name": "cloud_masked_rgb",
"title": "Simple RGB with cloud masking",
"abstract": "Simple true-colour image, using the red, green and blue bands, with cloud masking",
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
# PQ masking example
# Pixels with any of the listed flag values are masked out (made transparent).
"pq_masks": [
{
"flags": {
"cloud_acca": "no_cloud",
"cloud_fmask": "no_cloud",
},
},
],
"scale_range": [0.0, 3000.0]
}
style_rgb_cloud_and_shadowmask = {
"name": "cloud_and_shadow_masked_rgb",
"title": "Simple RGB with cloud and cloud shadow masking",
"abstract": "Simple true-colour image, using the red, green and blue bands, with cloud and cloud shadow masking",
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
# PQ masking example
"pq_masks": [
{
"flags": {
"cloud_acca": "no_cloud",
"cloud_fmask": "no_cloud",
"cloud_shadow_acca": "no_cloud_shadow",
"cloud_shadow_fmask": "no_cloud_shadow",
},
},
],
"scale_range": [0.0, 3000.0]
}
style_ext_rgb = {
"name": "extended_rgb",
"title": "Extended RGB",
"abstract": "Extended true-colour image, incorporating the coastal aerosol band",
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 0.6,
"coastal_aerosol": 0.4
}
},
"scale_range": [0.0, 3000.0]
}
style_ls8_allband_false_colour = {
"name": "wideband",
"title": "Wideband false-colour",
"abstract": "False-colour image, incorporating all available LS8 spectral bands",
"components": {
"red": {
"swir2": 0.255,
"swir1": 0.45,
"nir": 0.255,
},
"green": {
"nir": 0.255,
"red": 0.45,
"green": 0.255,
},
"blue": {
"green": 0.255,
"blue": 0.45,
"coastal_aerosol": 0.255,
}
},
"scale_range": [0.0, 3000.0]
}
style_infrared_false_colour = {
"name": "infra_red",
"title": "False colour multi-band infra-red",
"abstract": "Simple false-colour image, using the near and short-wave infra-red bands",
"components": {
"red": {
"swir1": 1.0,
# The special dictionary value 'scale_range' can be used to provide a component-specific
# scale_range that overrides the style scale_range below.
# (N.B. if you are unlucky enough to have a native band called "scale_range", you can access it
# by defining a band alias.)
"scale_range": [5.0, 4000.0],
},
"green": {
"swir2": 1.0,
"scale_range": [25.0, 4000.0],
},
"blue": {
"nir": 1.0,
"scale_range": [0.0, 3000.0],
}
},
# The style scale_range can be omitted if all components have a component-specific scale_range defined.
# "scale_range": [0.0, 3000.0]
}
style_mineral_content = {
"name": "mineral_content",
"title": "Multi-band mineral indexes",
"abstract": "Red: Ferric Iron. Green: Bare soil. Blue: Clay/mica",
"components": {
"red": {
# If the component dictionary contains the key "function", then the dictionary as treated as
# a function callback as follows:
# a) "function" (required): A string containing the fully qualified path to a python function
# b) "args" (optional): An array of additional positional arguments that will always be passed to the function.
# c) "kwargs" (optional): An array of additional keyword arguments that will always be passed to the function.
# d) "mapped_bands" (optional): Boolean (defaults to False). If true, a band mapping function is passed
# to the function as a keyword argument named "band_mapper". This is useful if you are passing band aliases
# to the function in the args or kwargs. The band_mapper allows the index function to convert band aliases to
# to band names.
#
# The function is assumed to take one arguments, an xarray Dataset. (Plus any additional
# arguments required by the args and kwargs values in format 3, possibly including product_cfg.)
#
# An xarray DataArray is returned containing the band data. Note that it is up to the function
# to normalise the output to 0-255.
#
"function": "datacube_ows.band_utils.norm_diff",
"mapped_bands": True,
"kwargs": {
"band1": "red",
"band2": "blue",
"scale_from": [-0.1, 1.0],
}
},
"green": {
"function": "datacube_ows.band_utils.norm_diff",
"mapped_bands": True,
"kwargs": {
"band1": "nir",
"band2": "swir1",
"scale_from": [-0.1, 1.0],
}
},
"blue": {
"function": "datacube_ows.band_utils.norm_diff",
"mapped_bands": True,
"kwargs": {
"band1": "swir1",
"band2": "swir2",
"scale_from": [-0.1, 1.0],
}
}
},
# If ANY components include a function callback, the bands that need to be passed to the callback
# MUST be declared in a "additional_bands" item:
"additional_bands": [ "red", "blue", "nir", "swir1", "swir2" ]
#
# The style scale_range can be omitted if all components have a component-specific scale_range defined or
# a function callback.
# "scale_range": [0.0, 3000.0]
}
# Monochrome single band layers
style_pure_ls8_coastal_aerosol = {
"name": "coastal_aerosol",
"title": "Spectral band 1 - Coastal aerosol",
"abstract": "Coastal aerosol band, approximately 435nm to 450nm",
"components": {
"red": {
"coastal_aerosol": 1.0
},
"green": {
"coastal_aerosol": 1.0
},
"blue": {
"coastal_aerosol": 1.0
}
},
"scale_range": [0.0, 3000.0]
}
style_pure_ls8_blue = {
"name": "blue",
"title": "Spectral band 2 - Blue",
"abstract": "Blue band, approximately 453nm to 511nm",
"components": {
"red": {
"blue": 1.0
},
"green": {
"blue": 1.0
},
"blue": {
"blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
}
style_pure_ls8_green = {
"name": "green",
"title": "Spectral band 3 - Green",
"abstract": "Green band, approximately 534nm to 588nm",
"components": {
"red": {
"green": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"green": 1.0
}
},
"scale_range": [0.0, 3000.0]
}
style_pure_ls8_red = {
"name": "red",
"title": "Spectral band 4 - Red",
"abstract": "Red band, roughly 637nm to 672nm",
"components": {
"red": {
"red": 1.0
},
"green": {
"red": 1.0
},
"blue": {
"red": 1.0
}
},
"scale_range": [0.0, 3000.0]
}
style_pure_ls8_nir = {
"name": "nir",
"title": "Spectral band 5 - Near infra-red",
"abstract": "Near infra-red band, roughly 853nm to 876nm",
"components": {
"red": {
"nir": 1.0
},
"green": {
"nir": 1.0
},
"blue": {
"nir": 1.0
}
},
"scale_range": [0.0, 3000.0]
}
style_pure_ls8_swir1 = {
"name": "swir1",
"title": "Spectral band 6 - Short wave infra-red 1",
"abstract": "Short wave infra-red band 1, roughly 1575nm to 1647nm",
"components": {
"red": {
"swir1": 1.0
},
"green": {
"swir1": 1.0
},
"blue": {
"swir1": 1.0
}
},
"scale_range": [0.0, 3000.0]
}
style_pure_ls8_swir2 = {
"name": "swir2",
"title": "Spectral band 7 - Short wave infra-red 2",
"abstract": "Short wave infra-red band 2, roughly 2117nm to 2285nm",
"components": {
"red": {
"swir2": 1.0
},
"green": {
"swir2": 1.0
},
"blue": {
"swir2": 1.0
}
},
"scale_range": [0.0, 3000.0]
}
# Examples of non-linear colour-ramped styles.
style_ndvi = {
"name": "ndvi",
"title": "NDVI",
"abstract": "Normalised Difference Vegetation Index - a derived index that correlates well with the existence of vegetation",
# The index function is continuous value from which the heat map is derived.
#
# Two formats are supported:
# 1. A string containing a fully qualified path to a python function
# e.g. "index_function": "datacube_ows.ogc_utils.not_a_real_function_name",
#
# 2. A dict containing the following elements:
# a) "function" (required): A string containing the fully qualified path to a python function
# b) "args" (optional): An array of additional positional arguments that will always be passed to the function.
# c) "kwargs" (optional): An array of additional keyword arguments that will always be passed to the function.
# d) "mapped_bands" (optional): Boolean (defaults to False). If true, a band mapping function is passed
# to the function as a keyword argument named "band_mapper". This is useful if you are passing band aliases
# to the function in the args or kwargs. The band_mapper allows the index function to convert band aliases to
# to band names.
#
# The function is assumed to take one arguments, an xarray Dataset. (Plus any additional
# arguments required by the args and kwargs values in format 3, possibly including product_cfg.)
#
"index_function": {
"function": "datacube_ows.band_utils.norm_diff",
"mapped_bands": True,
"kwargs": {
"band1": "nir",
"band2": "red"
}
},
# List of bands used by this style. The band may not be passed to the index function if it is not declared
# here, resulting in an error. Band aliases can be used here.
"needed_bands": ["red", "nir"],
# The color ramp. Values between specified entries have both their alphas and colours
# interpolated.
"color_ramp": [
# Any value less than the first entry will have colour and alpha of the first entry.
# (i.e. in this example all negative values will be fully transparent (alpha=0.0).)
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
# do not have to defined alpha value
# if no alpha is specified, alpha will default to 1.0 (fully opaque)
"value": 0.1,
"color": "#A35F18"
},
{
"value": 0.2,
"color": "#B88512"
},
{
"value": 0.3,
"color": "#CEAC0E"
},
{
"value": 0.4,
"color": "#E5D609"
},
{
"value": 0.5,
"color": "#FFFF0C"
},
{
"value": 0.6,
"color": "#C3DE09"
},
{
"value": 0.7,
"color": "#88B808"
},
{
"value": 0.8,
"color": "#529400"
},
{
"value": 0.9,
"color": "#237100"
},
# Values greater than the last entry will use the colour and alpha of the last entry.
# (N.B. This will not happen for this example because it is normalised so that 1.0 is
# maximum possible value.)
{
"value": 1.0,
"color": "#114D04"
}
],
# If true, the calculated index value for the pixel will be included in GetFeatureInfo responses.
# Defaults to True.
"include_in_feature_info": True,
# Legend section is optional for non-linear colour-ramped styles.
# If not supplied, a legend for the style will be automatically generated from the colour ramp.
"legend": {
# Whether or not to display a legend for this style.
# Defaults to True for non-linear colour-ramped styles.
"show_legend": True,
# Instead of using the generated color ramp legend for the style, a URL to an PNG file can
# be used instead. If 'url' is not supplied, the generated legend is used.
"url": "http://example.com/custom_style_image.png"
}
}
# Examples of non-linear colour-ramped style with multi-date support.
style_ndvi_delta = {
"name": "ndvi_delta",
"title": "NDVI Delta",
"abstract": "Normalised Difference Vegetation Index - with delta support",
"index_function": {
"function": "datacube_ows.band_utils.norm_diff",
"mapped_bands": True,
"kwargs": {
"band1": "nir",
"band2": "red"
}
},
"needed_bands": ["red", "nir"],
# The color ramp for single-date requests - same as ndvi style example above
"color_ramp": [
{
"value": -0.0,
"color": "#8F3F20",
"alpha": 0.0
},
{
"value": 0.0,
"color": "#8F3F20",
"alpha": 1.0
},
{
"value": 0.1,
"color": "#A35F18"
},
{
"value": 0.2,
"color": "#B88512"
},
{
"value": 0.3,
"color": "#CEAC0E"
},
{
"value": 0.4,
"color": "#E5D609"
},
{
"value": 0.5,
"color": "#FFFF0C"
},
{
"value": 0.6,
"color": "#C3DE09"
},
{
"value": 0.7,
"color": "#88B808"
},
{
"value": 0.8,
"color": "#529400"
},
{
"value": 0.9,
"color": "#237100"
},
{
"value": 1.0,
"color": "#114D04"
}
],
"include_in_feature_info": True,
"legend": {
# Show the legend (default True for colour ramp styles)
"show_legend": True,
# Example config for colour ramp style auto-legend generation.
# The range covered by the legend.
# Defaults to the first and last non transparent (alpha != 0.0)
# entry in the explicit colour ramp, or the values in the range parameter.
# It is recommended that values be supplied as integers or strings rather
# than floating point.
"begin": "0.0",
"end": "1.0",
# Ticks.
# One of the following alternatives. All the examples below result in the same tick behaviour, given
# the begin and end values above.
#
# 1. Regularly spaced ticks, by size, starting from the begin tick.
"ticks_every": "0.2",
# 2. Regularly spaced ticks, by number of ticks, not counting the begin tick, but including the end tick. (int)
# "tick_count": 5,
# 3. Explicit ticks
# "ticks": [ "0.0", "0.2", "0.4", "0.6". "0.8", "1.0"]
# Default is a tick_count of 1, which means only the begin and end ticks.
# Legend title. Defaults to the style name.
"title": "This is not a legend",
# Units
# added to title of legend in parenthesis, default is to not display units. To emulate
# the previous default behaviour use:
"units": "unitless",
# decimal_places. 1 for "1.0" style labels, 2 for "1.00" and 0 for "1", etc.
# (default 1)
"decimal_places": 1,
# tick_labels
# Labels for individual ticks can be customised"
"tick_labels": {
# The special entry "default" allows setting
# a prefix and/or suffix for all labels.
# Default is no prefix or suffix
"default": {
# E.g. this encloses every tick label in parentheses.
"prefix": "(",
"suffix": ")",
},
# Other entries override the label for individual ticks.
# If they do not match a tick, as defined by the tick behaviour
# described above, the entry is ignored. If you are having trouble
# getting the right tick value, use the "ticks" option to explicitly
# declare your tick locations and make sure you use strings instead of
# floats.
# The default prefix and suffix can be over-ridden.
"0.0": {
# E.g. to remove the parentheses for the 0.0 tick
"prefix": "",
"suffix": "",
},
# Or the label can changed. Note that the default prefix and suffix
# are still applied unless explicitly over-ridden.
# E.g. To display "(max)" for the 1.0 tick:
"1.0": {
"label": "max"
}
},
# MatPlotLib rcparams options.
# Defaults to {} (i.e. matplotlib defaults)
# See https://matplotlib.org/3.2.2/tutorials/introductory/customizing.html
"rcParams": {
"lines.linewidth": 2,
"font.weight": "bold",
},
# Image size (in "inches").
# Matplotlib's default dpi is 100, so measured in hundreds of pixels unless the dpi
# is over-ridden by the rcParams above.
# Default is 4x1.25, i.e. 400x125 pixels
"width": 4,
"height": 1.25,
# strip_location
# The location and size of the coloured strip, in format:
# [ left, bottom, width, height ], as passed to Matplotlib Figure.add_axes function.
# All values as fractions of the width and height. (i.e. between 0.0 and 1.0)
# The default is:
"strip_location": [ 0.05, 0.5, 0.9, 0.15 ]
} ,
# Define behaviour(s) for multi-date requests. If not declared, style only supports single-date requests.
"multi_date": [
# A multi-date handler. Different handlers can be declared for different numbers of dates in a request.
{
# The count range for which this handler is to be used - a tuple of two ints, the smallest and
# largest date counts for which this handler will be used. Required.
"allowed_count_range": [2, 2],
# A function, expressed in the standard format as described elsewhere in this example file.
# The function is assumed to take one arguments, an xarray Dataset.
# The function returns an xarray Dataset with a single band, which is the input to the
# colour ramp defined below.
"aggregator_function": {
"function": "datacube_ows.band_utils.multi_date_delta"
},
# The multi-date color ramp. May be defined as an explicit colour ramp, as shown above for the single
# date case; or may be defined with a range and unscaled color ramp as shown here.
#
# The range specifies the min and max values for the color ramp. Required if an explicit color
# ramp is not defined.
"range": [-1.0, 1.0],
# The name of a named matplotlib color ramp.
# Reference here: https://matplotlib.org/examples/color/colormaps_reference.html
# Only used if an explicit colour ramp is not defined. Optional - defaults to a simple (but
# kind of ugly) blue-to-red rainbow ramp.
"mpl_ramp": "RdBu",
"legend": {
# Legend only covers positive part of ramp.
"begin": "0.0",
"end": "1.0"
},
# The feature info label for the multi-date index value.
"feature_info_label": "ndvi_delta"
}
]
}
# Examples of Matplotlib Color-Ramp styles
style_deform = {
"name": "deform",
"title": "InSAR Deformation",
"abstract": "InSAR Derived Deformation Map",
# The range specifies the min and max values for the color ramp. Required if an explicit color ramp is not
# defined.
"range": [-110.0, 110.0],
# The Matplotlib color ramp. Value specified is a string that indicates a Matplotlib Colour Ramp should be
# used. Reference here: https://matplotlib.org/examples/color/colormaps_reference.html
# Only used if an explicit colour ramp is not defined. Optional - defaults to a simple (but
# kind of ugly) blue-to-red rainbow ramp.
"mpl_ramp": "RdBu",
# If true, the calculated index value for the pixel will be included in GetFeatureInfo responses.
# Defaults to True.
"include_in_feature_info": True,
# Legend section is optional for non-linear colour-ramped styles.
# If not supplied, a legend for the style will be automatically generated from the colour ramp.
"legend": {
## Only use positive part of range.
# tick labels will be created for values that
# are modulo 0 by this value
"ticks_every": "10",
"begin": "0.0",
# appended to the title of the legend
"units": "mm",
# decimal places for tick labels
# set to 0 for ints
"decimal_places": 0,
}
}
style_ndvi_cloudmask = {
"name": "ndvi_cloudmask",
"title": "NDVI with cloud masking",
"abstract": "Normalised Difference Vegetation Index (with cloud masking) - a derived index that correlates well with the existence of vegetation",
"index_function": {
"function": "datacube_ows.band_utils.norm_diff",
"mapped_bands": True,
"kwargs": {
"band1": "nir",
"band2": "red"
}
},
"needed_bands": ["red", "nir"],
# If a "range" is supplied instead of a "color_ramp", a default color ramp is used.
# Areas where the index_function returns less the lower range limit are transparent.
# Areas where the index_function returns within the range limits are mapped to a
# simple heat map ranging from dark blue, through blue, green, yellow, orange, and red to dark red.
# Areas where the index_function returns greater than the upper range limit are displayed as dark red.
"range": [0.0, 1.0],
# Cloud masks work the same as for linear combination styles.
"pq_masks": [
{
"flags": {
"cloud_acca": "no_cloud",
"cloud_fmask": "no_cloud",
},
},
],
# Already have NDVI in GetFeatureInfo.
"include_in_feature_info": False,
}
style_ndwi = {
"name": "ndwi",
"title": "NDWI",
"abstract": "Normalised Difference Water Index - a derived index that correlates well with the existence of water",
"index_function": {
"function": "datacube_ows.band_utils.norm_diff",
"mapped_bands": True,
"kwargs": {
"band1": "green",
"band2": "nir"
}
},
"needed_bands": ["green", "nir"],
"range": [0.0, 1.0],
}
style_ndbi = {
"name": "ndbi",
"title": "NDBI",
"abstract": "Normalised Difference Buildup Index - a derived index that correlates with the existence of urbanisation",
"index_function": {
"function": "datacube_ows.band_utils.norm_diff",
"mapped_bands": True,
"kwargs": {
"band1": "swir2",
"band2": "nir"
}
},
"needed_bands": ["swir2", "nir"],
"range": [0.0, 1.0],
}
style_wofs_frequency = {
"name": "WOfS_frequency",
"title": " Wet and Dry Count",
"abstract": "WOfS summary showing the frequency of Wetness",
"needed_bands": ["frequency"],
"index_function": {
"function": "datacube_ows.band_utils.single_band",
"mapped_bands": True,
"kwargs": {
"band": "frequency",
}
},
# Should the index_function value be shown as a derived band in GetFeatureInfo responses.
# Defaults to true for style types with an index function.
"include_in_feature_info": False,
"color_ramp": [
{
"value": 0.002,
"color": "#000000",
"alpha": 0.0
},
{
"value": 0.005,
"color": "#8e0101",
"alpha": 0.25
},
{
"value": 0.01,
"color": "#cf2200",
"alpha": 0.75
},
{
"value": 0.02,
"color": "#e38400"
},
{
"value": 0.05,
"color": "#e3df00"
},
{
"value": 0.1,
"color": "#a6e300"
},
{
"value": 0.2,
"color": "#62e300"
},
{
"value": 0.3,
"color": "#00e32d"
},
{
"value": 0.4,
"color": "#00e384"
},
{
"value": 0.5,
"color": "#00e3c8"
},
{
"value": 0.6,
"color": "#00c5e3"
},
{
"value": 0.7,
"color": "#0097e3"
},
{
"value": 0.8,
"color": "#005fe3"
},
{
"value": 0.9,
"color": "#000fe3"
},
{
"value": 1.0,
"color": "#5700e3",
}
],
# defines the format of the legend generated
# for this style
"legend": {
"units": "%",
# Formatting 0.0-1.0 data as a percentage
# setup ticks every 25% (0.25 raw)
"begin": "0.00",
"end": "1.00",
"ticks_every": "0.25",
"decimal_places": 2,
# override tick labels.
"tick_labels": {
"0.00": {"label": "0"},
"0.25": {"label": "25"},
"0.50": {"label": "50"},
"0.75": {"label": "75"},
"1.00": {"label": "100"},
}
}
}
# Mask layers - examples of how to display raw pixel quality data.
# This works by creatively mis-using the colormap styles.
# The index function returns a constant, so the output is a flat single colour, masked by the
# relevant pixel quality flags.
style_cloud_mask = {
"name": "cloud_mask",
"title": "Cloud Mask",
"abstract": "Highlight pixels with cloud.",
"index_function": {
"function": "datacube_ows.band_utils.constant",
"mapped_bands": True,
"kwargs": {
"band": "red",
"const": "0.1"
}
},
"needed_bands": ["red"],
"range": [0.0, 1.0],
# Mask flags normally describe which areas SHOULD be shown.
# (i.e. show pixels with any of the declared flag values)
# pq_mask_invert inverts this logic.
# (i.e. show pixels for which none of the declared flags are true)
#
# i.e. Specifying like this shows pixels which are not clouds under either algorithm.
# Specifying "cloud"for both flags and setting the "pq_mask_invert" to False would
# show pixels which are not clouds in both metrics.
"pq_masks": [
{
"invert": True,
"flags": {
"cloud_acca": "no_cloud",
"cloud_fmask": "no_cloud",
},
},
],
"legend": {
# Default legend won't work well with mask layers, so set 'show_legend' to False or provide a url to
# legend PNG.
"show_legend": False
},
# The constant function causes errors in GetFeatureInfo.
# In any case, pixel flags are already included in GetFeatureInfo, so styles like this are not needed there.
"include_in_feature_info": False,
}
# Hybrid style - blends a linear mapping and an colour-ramped index style
# There is no scientific justification for these styles, I just think they look cool. :)
style_rgb_ndvi = {
"name": "rgb_ndvi",
"title": "NDVI plus RGB",
"abstract": "Normalised Difference Vegetation Index (blended with RGB) - a derived index that correlates well with the existence of vegetation",
# Mixing ration between linear components and colour ramped index. 1.0 is fully linear components, 0.0 is fully colour ramp.
"component_ratio": 0.6,
"index_function": {
"function": "datacube_ows.band_utils.norm_diff",
"mapped_bands": True,
"kwargs": {
"band1": "nir",
"band2": "red"
}
},
"needed_bands": ["red", "nir"],
"range": [0.0, 1.0],
"components": {
"red": {
"red": 1.0
},
"green": {
"green": 1.0
},
"blue": {
"blue": 1.0
}
},
"scale_range": [0.0, 3000.0]
# N.B. The "pq_mask" section works the same as for the style types above.
}
# Describes a style which uses several bitflags to create a style
style_mangrove = {
"name": "mangrove",
"title": "Mangrove Cover",
"abstract": "",
# Each entry in the value_map dict
# represents a band which is a bitflagged band
"value_map": {
"canopy_cover_class": [
{
"title": "Woodland",
"abstract": "(20% - 50% cover)",
# flags that all must match
# in order for this style color to apply
# "and" and "or" flags cannot be mixed
"flags": {
"and": {
"woodland": True
}
},
"color": "#9FFF4C",
# If specified as True (defaults to False)
# Any areas which match this flag set
# will be masked out completely, similar to using an extent
# mask function or pq masking
"mask": True
},
{
"title": "Open Forest",
"abstract": "(50% - 80% cover)",
# flags that any may match
# in order for this style color to apply
# "and" and "or" flags cannot be mixed
"flags": {
"or": {
"open_forest": True
}
},
"color": "#5ECC00",
# Can set an optional alpha value (0.0 - 1.0) for these colors
# will default to 1.0 (fully opaque)
"alpha": 0.5
},
{
"title": "Closed Forest",
"abstract": "(>80% cover)",
"flags": {
"closed_forest": True
},
"color": "#3B7F00"
},
]
}
# NB: You can also do additional masking using the "pq_mask" section as described above for other
# style types.
}
# REUSABLE CONFIG FRAGMENTS - resource limit declarations
standard_resource_limits = {
"wms": {
# WMS/WMTS resource limits
#
# There are two independent resource limits applied to WMS/WMTS requests. If either
# limit is exceeded, then either the low-resolution summary product is used if one is defined, otherwise
# indicative polygon showing the extent of the data is rendered.
#
# The fill-colour of the indicative polygons when either wms/wmts resource limits is exceeded.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
#
# (The fourth number in an rgba quadruplet represents opacity with 255 being fully opaque and
# 0 being fully transparent.)
#
# Defaults to [150, 180, 200, 160]
"zoomed_out_fill_colour": [150, 180, 200, 160],
# WMS/WMTS Resource Limit 1: Min zoom factor
#
# The zoom factor is a dimensionless number calculated from the request in a way that is independent
# of the CRS. A higher zoom factor corresponds to a more zoomed in view.
#
# If the zoom factor of the request is less than the minimum zoom factor (i.e. is zoomed out too far)
# then indicative polygons are rendered instead of accessing the actual data.
#
# Defaults to 300.0
"min_zoom_factor": 500.0,
# Min zoom factor (above) works well for small-tiled requests, (e.g. 256x256 as sent by Terria).
# However, for large-tiled requests (e.g. as sent by QGIS), large and intensive queries can still
# go through to the datacube.
#
# max_datasets specifies a maximum number of datasets that a GetMap or GetTile request can retrieve.
# Indicatative polygons are displayed if a request exceeds the limits imposed by EITHER max_dataset
# OR min_zoom_factor.
#
# max_datasets should be set in conjunction with min_zoom_factor so that Terria style 256x256
# tiled requests respond consistently - you never want to see a mixture of photographic tiles and polygon
# tiles at a given zoom level. i.e. max_datasets should be greater than the number of datasets
# required for most intensive possible photographic query given the min_zoom_factor.
# Note that the ideal value may vary from product to product depending on the size of the dataset
# extents for the product.
# Defaults to zero, which is interpreted as no dataset limit.
"max_datasets": 10,
# Dataset cache rules.
#
# The number of datasets accessed by a GetMap/GetTile/GetCoverage query can be used to control
# the cache-control headers returned by the query.
#
# Special cases:
#
# 1. No dataset_cache_rules element: Never return a cache-control header
# 2. dataset_cache_rules set to an empty list []: Return no-cache for all queries.
# 3. General case: refer to comments embedded in example below.
"dataset_cache_rules": [
# Where number of datasets less than the min_datasets element of the first cache rule (0-3 in this example):
# no-cache.
{
# Where number of datasets greater than or equal to the min_datasets value for this rule AND
# less than the min_datasets of the next rule (4-7 in this example)
"min_datasets": 4, # Must be greater than zero. Blank tiles (0 datasets) are NEVER cached
# The cache-control max-age for this rule, in seconds.
"max_age": 86400, # 86400 seconds = 24 hours
},
{
# Rules must be sorted in ascending order of min_datasets values.
"min_datasets": 8,
"max_age": 604800, # 604800 seconds = 1 week
},
# If a resource limit is exceeded, no-cache applies.
# Summarising the cache-control results for this example:
# 0-3 datasets: no-cache
# 4-7 datasets: max-age: 86400
# 8-10 datasets: max-age: 604800
# 11+ datasets: no-cache (over-limit behaviour. Low-resolution summary product or shaded polygons.)
]
},
"wcs": {
# wcs::max_datasets is the WCS equivalent of wms::max_datasets. The main requirement for setting this
# value is to avoid gateway timeouts on overly large WCS requests (and reduce server load).
#
# Defaults to zero, which is interpreted as no dataset limit.
"max_datasets": 16,
# dataset_cache_rules can be set independently for WCS requests. This example omits it, so
# WCS GetCoverage requests will always return no cache-control header.
}
}
# MAIN CONFIGURATION OBJECT
ows_cfg = {
# Config entries in the "global" section apply to all services and all layers/coverages
"global": {
# These HTML headers are added to all responses
# Optional, default {} - no added headers
"response_headers": {
"Access-Control-Allow-Origin": "*", # CORS header (strongly recommended)
},
## Which web service(s) should be implemented by this instance
# Optional, defaults: wms,wmts: True, wcs: False
"services": {
"wms": True,
"wmts": True,
"wcs": True
},
# Service title - appears e.g. in Terria catalog (required)
"title": "Open web-services for the Open Data Cube",
# Service URL.
# A list of fully qualified URLs that the service can return
# in the GetCapabilities documents based on the requesting url
"allowed_urls": [ "http://localhost/odc_ows",
"https://localhost/odc_ows",
"https://alternateurl.domain.org/odc_ows",
"http://127.0.0.1:5000/"],
# URL that humans can visit to learn more about the service(s) or organization
# should be fully qualified
"info_url": "http://opendatacube.org",
# Abstract - longer description of the service (Note this text is used for both WM(T)S and WCS)
# Optional - defaults to empty string.
"abstract": """This web-service serves georectified raster data from our very own special Open Data Cube instance.""",
# Keywords included for all services and products
# Optional - defaults to empty list.
"keywords": [
"satellite",
"australia",
"time-series",
],
# Contact info.
# Optional but strongly recommended - defaults to blank.
"contact_info": {
"person": "Firstname Surname",
"organisation": "Acme Corporation",
"position": "CIO (Chief Imaginary Officer)",
"address": {
"type": "postal",
"address": "GPO Box 999",
"city": "Metropolis",
"state": "North Arcadia",
"postcode": "12345",
"country": "Elbonia",
},
"telephone": "+61 2 1234 5678",
"fax": "+61 2 1234 6789",
"email": "test@example.com",
},
# If fees are charged for the use of the service, these can be described here in free text.
# If blank or not included, defaults to "none".
"fees": "",
# If there are constraints on access to the service, they can be described here in free text.
# If blank or not included, defaults to "none".
"access_constraints": "",
# Supported co-ordinate reference systems. Any coordinate system supported by GDAL and Proj.4J can be used.
# At least one CRS must be included. At least one geographic CRS must be included if WCS is active.
# WGS-84 (EPSG:4326) is strongly recommended, but not required.
# Web Mercator (EPSG:3857) is strongly recommended, but is only required if WMTS is active.
"published_CRSs": {
"EPSG:3857": { # Web Mercator
"geographic": False,
"horizontal_coord": "x",
"vertical_coord": "y",
},
"EPSG:4326": { # WGS-84
"geographic": True,
"vertical_coord_first": True
},
"EPSG:3577": { # GDA-94, internal representation
"geographic": False,
"horizontal_coord": "x",
"vertical_coord": "y",
},
},
}, #### End of "global" section.
# Config items in the "wms" section apply to the WMS service (and WMTS, which is implemented as a
# thin wrapper to the WMS code unless stated otherwise) to all WMS/WMTS layers (unless over-ridden).
"wms": {
# Provide S3 data URL, bucket name for data_links in GetFeatureinfo responses
# Note that this feature is currently restricted to data stored in AWS S3.
# This feature is also fairly specialised to DEA requirements and may not be suited to more general use.
# All Optional
"s3_url": "http://data.au",
"s3_bucket": "s3_bucket_name",
"s3_aws_zone": "ap-southeast-2",
# Max tile height/width for wms. (N.B. Does not apply to WMTS)
# Optional, defaults to 256x256
"max_width": 512,
"max_height": 512,
# Attribution. This provides a way to identify the source of the data used in a layer or layers.
# This entire section is optional. If provided, it is taken as the
# default attribution for any layer that does not override it.
"attribution": {
# Attribution must contain at least one of ("title", "url" and "logo")
# A human readable title for the attribution - e.g. the name of the attributed organisation
"title": "Acme Satellites",
# The associated - e.g. URL for the attributed organisation
"url": "http://www.acme.com/satellites",
# Logo image - e.g. for the attributed organisation
"logo": {
# Image width in pixels (optional)
"width": 370,
# Image height in pixels (optional)
"height": 73,
# URL for the logo image. (required if logo specified)
"url": "https://www.acme.com/satellites/images/acme-370x73.png",
# Image MIME type for the logo - should match type referenced in the logo url (required if logo specified.)
"format": "image/png",
}
},
# These define the AuthorityURLs.
# They represent the authorities that define the "Identifiers" defined layer by layer below.
# The spec allows AuthorityURLs to be defined anywhere on the Layer heirarchy, but datacube_ows treats them
# as global entities.
# Required if identifiers are to be declared for any layer.
"authorities": {
# The authorities dictionary maps names to authority urls.
"auth": "https://authoritative-authority.com",
"idsrus": "https://www.identifiers-r-us.com",
}
}, #### End of "wms" section.
# Config items in the "wmts" section apply to the WMTS service only.
# Note that most items in the "wms" section apply to the WMTS service
# as well as the WMS service.
#
# Config items in the "wmts" section apply to all WMTS layers. All
# entries are optional and the entire section may be omitted.
"wmts": {
# Datacube-ows always supports the standard "Google Maps" style
# EPSG:3857-based tile matrix set.
# If you require a custom tile matrix set (or sets) you can
# define them here.
"tile_matrix_sets": {
# Example custom tile matrix set
# Vic Grid (EPSG:3111) GeoCortex compatible tile matrix set
# The key is the identifier for the Tile Matrix Set in WMTS instance.
"vicgrid": {
# The CRS of the Tile Matrix Set
"crs": "EPSG:3111",
# The coordinates (in the CRS above) of the upper-left
# corner of the tile matrix set.
"matrix_origin": (1786000.0, 3081000.0),
# The size of tiles (must not exceed the WMS maximum tile size)
"tile_size": (512, 512),
# The scale denominators (as defined in the WMTS spec) for
# the various zoom level from right out, to zoomed right in.
"scale_set": [
7559538.928601667,
3779769.4643008336,
1889884.7321504168,
944942.3660752084,
472471.1830376042,
236235.5915188021,
94494.23660752083,
47247.11830376041,
23623.559151880207,
9449.423660752083,
4724.711830376042,
2362.355915188021,
1181.1779575940104,
755.9538928601667,
],
# Defaults to (0,0), which means the first tile matrix
# will have 1 tile (1x1), then doubling each time
# (then 2x2, 4x4, 8x8, 16x16, etc.)
#
# (1, 0) means the width of the first tile matrix has
# is 2**1 = 2
# So tiles side by side (2x1) (then 4x2, 8x4, 16x8, etc.)
"matrix_exponent_initial_offsets": (1, 0),
},
}
},
# Config items in the "wcs" section apply to the WCS service to all WCS coverages
# (unless over-ridden).
"wcs": {
# Supported WCS formats
# NetCDF and GeoTIFF work "out of the box". Other formats will require writing a Python function
# to do the rendering.
"formats": {
# Key is the format name, as used in DescribeCoverage XML
"GeoTIFF": {
# Writing your own renderers is not documented.
"renderers": {
"1": "datacube_ows.wcs1_utils.get_tiff",
"2": "datacube_ows.wcs2_utils.get_tiff",
},
# The MIME type of the image, as used in the Http Response.
"mime": "image/geotiff",
# The file extension to add to the filename.
"extension": "tif",
# Whether or not the file format supports multiple time slices.
"multi-time": False
},
"netCDF": {
"renderers": {
"1": "datacube_ows.wcs1_utils.get_netcdf",
"2": "datacube_ows.wcs2_utils.get_netcdf",
},
"mime": "application/x-netcdf",
"extension": "nc",
"multi-time": True,
}
},
# The wcs:native_format must be declared in wcs:formats dict above.
# Maybe over-ridden at the named layer (i.e. coverage)
# level.
"native_format": "GeoTIFF",
}, ###### End of "wcs" section
# Products published by this datacube_ows instance.
# The layers section is a list of layer definitions. Each layer may be either:
# 1) A folder-layer. Folder-layers are not named and can contain a list of child layers. Folder-layers are
# only used by WMS and WMTS - WCS does not support a hierarchical index of coverages.
# 2) A mappable named layer that can be requested in WMS GetMap or WMTS GetTile requests. A mappable named layer
# is also a coverage, that may be requested in WCS DescribeCoverage or WCS GetCoverage requests.
"layers": [
{
# NOTE: This layer is a folder - it is NOT "named layer" that can be selected in GetMap requests
# Every layer must have a human-readable title
"title": "Landsat 8",
# Top level layers must have a human-readable abstract. The abstract is optional for child-layers - defaulting
# to that of the parent layer.
"abstract": "Images from the Landsat 8 satellite",
# NOTE: Folder-layers do not have a layer "name".
# Keywords are optional, but can be added at any folder level and are cumulative.
# A layer combines its own keywords, the keywords of it's parent (and grandparent, etc) layers,
# and any keywords defined in the global section above.
#
"keywords": [
"landsat",
"landsat8",
],
# Attribution. This entire section is optional. If provided, it overrides any
# attribution defined in the wms section above or any higher layers, and
# applies to this layer and all child layers under this layer unless itself
# overridden.
"attribution": {
# Attribution must contain at least one of ("title", "url" and "logo")
# A human readable title for the attribution - e.g. the name of the attributed organisation
"title": "Digital Earth Australia",
# The associated - e.g. URL for the attributed organisation
"url": "http://www.ga.gov.au/dea",
# Logo image - e.g. for the attributed organisation
"logo": {
# Image width in pixels (optional)
"width": 370,
# Image height in pixels (optional)
"height": 73,
# URL for the logo image. (required if logo specified)
"url": "https://www.ga.gov.au/__data/assets/image/0011/61589/GA-DEA-Logo-Inline-370x73.png",
# Image MIME type for the logo - should match type referenced in the logo url (required if logo specified.)
"format": "image/png",
}
},
# Folder-type layers include a list of sub-layers
"layers": [
{
# NOTE: This layer IS a mappable "named layer" that can be selected in GetMap requests
# Every layer must have a distinct human-readable title and abstract.
"title": "Level 2 DEA NBART Landsat-8 Data",
"abstract": "Imagery from DEA's Level 2 NBART Analysis-Ready Data Set",
# Mappable layers must have a name - this is the layer name that appears in WMS GetMap
# or WMTS GetTile requests and the coverage name that appears in WCS
# DescribeCoverage/GetCoverage requests.
"name": "ls8_nbart_albers",
# The ODC product name for the associated data product
"product_name": "ls8_nbart_albers",
# Supported bands, mapping native band names to a list of possible aliases.
# See reusable band alias maps above for documentation.
"bands": landsat8_bands,
# Resource limits.
# See reusable resource limit declarations above for documentation.
"resource_limits": standard_resource_limits,
# If "dynamic" is False (the default) the the ranges for the product are cached in memory.
# Dynamic products slow down the generation of the GetCapabilities document - use sparingly.
"dynamic": False,
# The resolution of the time access. Optional. Allowed values are: "raw" (the default - daily),
# "month" (for monthly summary datasets) or "year" (for annual summary datasets)
"time_resolution": "raw",
"flags": {
# Data may include flags that mark which pixels have missing or poor-quality data,
# or contain cloud, or cloud-shadow, etc. This section describes how
# datacube_ows handles such flags. The entire section may be omitted if no
# flag masking is to be supported by the layer.
#
# Items in this section affect WMS/WMTS requests only, unless explicitly stated
# otherwise.
#
# The name of the measurement band for the pixel-quality flags
# Pixel-quality bitmasks and flags can be used for image/data masking.
#
# Required, unless the whole "flags" section is empty or None.
#
"band": "pixelquality",
# Sometimes the pixel quality band is packaged in a separate ODC product
# If this is the case, you can specify this product with the "flags::product"
# element. If "flags::band" is set but "flags::product" is omitted, then the
# pixel quality band is assumed to be included in the main data product.
"product": "ls8_pq_albers",
# Flags Fuse func
# Determines how multiple dataset arrays are compressed into a single time array for
# the PQ layer
#
# Two formats are supported:
# 1. A string containing a fully qualified path to a python function (e.g. as shown below)
#
# 2. A dict containing the following elements:
# a) "function" (required): A string containing the fully qualified path to a python function
# b) "args" (optional): An array of additional positional arguments that will always be passed to the function.
# c) "kwargs" (optional): An array of additional keyword arguments that will always be passed to the function.
# d) "mapped_bands" (optional): Boolean (defaults to False). If true, a band mapping function is passed
# to the function as a keyword argument named "band_mapper". This is useful if you are passing band aliases
# to the function in the args or kwargs. The band_mapper allows the index function to convert band aliases to
# to band names.
#
# Passed directly to the datacube load_data function. Defaults to None.
"fuse_func": "datacube.helpers.ga_pq_fuser",
# Flags Ignore time
# Doesn't use the time from the data to find a corresponding mask layer
# Used when you have a mask layer that doesn't have a time dimension
#
# Defaults to False
"ignore_time": False,
# Values of flags listed here are not included in GetFeatureInfo responses.
# (defaults to empty list)
"ignore_info_flags": [],
# Set to true if the pq product dataset extents include nodata regions.
#
# Default to False.
"manual_merge": False,
},
# The image_processing section must be supplied.
"image_processing": {
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
#
# All the formats described above for "flags->fuse_func" are
# supported here as well.
#
# Additionally, multiple extent mask functions can be specified as a list of any of
# supported formats. The result is the intersection of all supplied mask functions.
#
# The function is assumed to take two arguments, data (an xarray Dataset) and band (a band name). (Plus any additional
# arguments required by the args and kwargs values in format 3, possibly including product_cfg.)
#
"extent_mask_func": "datacube_ows.ogc_utils.mask_by_val",
# Bands to always fetch from the Datacube, even if it is not used by the active style.
# Useful for when a particular band is always needed for the extent_mask_func,
"always_fetch_bands": [],
# Fuse func
#
# Determines how multiple dataset arrays are compressed into a single time array
# All the formats described above for "extent_mask_func" are supported here as well.
# (Passed through to datacube load_data() function.)
#
# Defaults to None.
"fuse_func": None,
# Set to true if the band product dataset extents include nodata regions.
# Defaults to False.
"manual_merge": False,
# Apply corrections for solar angle, for "Level 1" products.
# (Defaults to false - should not be used for NBAR/NBAR-T or other Analysis Ready products
"apply_solar_corrections": False,
},
# If the WCS section is not supplied, then this named layer will NOT appear as a WCS
# coverage (but will still be a layer in WMS and WMTS).
"wcs": {
# The "native" CRS for WCS. Must be in the global "published_CRSs" list.
# Can be omitted if the product has a single native CRS, as this will be used in preference.
"native_crs": "EPSG:3577",
# The resolution (x,y) for WCS. Required for WCS-enabled layers.
# This is the number of CRS units (e.g. degrees, metres) per pixel in the horizontal
# and vertical # directions for the native resolution.
# E.g. for EPSG:3577; (25.0,25.0) for Landsat-8 and (10.0,10.0 for Sentinel-2)
"native_resolution": [ 25.0, 25.0 ],
# The default bands for a WCS request.
# 1. Must be provided if WCS is activated.
# 2. Must contain at least one band.
# 3. All bands must exist in the band index.
# 4. Bands may be referred to by either native name or alias
"default_bands": [ "red", "green", "blue" ],
# The native format advertised for the coverage.
# Must be one of the formats defined
# in the global wcs formats section.
# Optional: if not supplied defaults to the
# globally defined native_format.
"native_format": "NetCDF"
},
# Each key of the identifiers dictionary must match a name from the authorities dictionary
# in the global section. The values are the identifiers defined for this layer by that
# authority.
"identifiers": {
"auth": "ls8_ard",
"idsrus": "12345435::0054234::GHW::24356-splunge"
},
# The urls section provides the values that are included in the FeatureListURLs and
# DataURLs sections of a WMS GetCapabilities document.
# Multiple of each may be defined per product.
#
# The entire section the "features and "data" subsections within it are optional. The
# default is an empty list(s).
#
# Each individual entry must include a url and MIME type format.
#
# FeatureListURLs point to "a list of the features represented in a Layer".
# DataURLs "offer a link to the underlying data represented by a particular layer"
"urls": {
"features": [
{
"url": "http://domain.tld/path/to/page.html",
"format": "text/html"
},
{
"url": "http://another-domain.tld/path/to/image.png",
"format": "image/png"
}
],
"data": [
{
"url": "http://abc.xyz/data-link.xml",
"format": "application/xml"
}
]
},
# The feature_info section is optional.
"feature_info": {
# Include an additional list of utc dates in the WMS Get Feature Info. Defaults to False.
# HACK: only used for GSKY non-solar day lookup.
"include_utc_dates": False,
# Optional: custom data to be included in GetFeatureInfo responses. Defaults to an empty
# dictionary.
# Keys are the keys to insert into the GetFeatureInfo response. Values are function wrappers,
# using the same format options available elsewhere in the config. Specified functions are
# expected to be passed a dictionary of band values (as parameter "data") and return any data
# that can be serialised to JSON.
"include_custom": {
"timeseries": {
"function": "datacube_ows.ogc_utils.feature_info_url_template",
"mapped_bands": False,
"kwargs": {
"template": "https://host.domain/path/{data['f_id']:06}.csv"
}
}
}
},
# The sub_products section is optional.
"sub_products": {
# A function that extracts the "sub-product" id (e.g. path number) from a dataset.
# Function should return a (small) integer. If None or not specified, the product
# has no sub-layers.
# All the formats supported for extent_mask_func as described above are supported here.
# The function is assumed to take a datacube dataset object and return an integer
# sub-product id.
"extractor": "datacube_ows.ogc_utils.ls8_subproduct",
# A prefix used to describe the sub-layer in the GetCapabilities response.
# E.g. sub-layer 109 will be described as "Landsat Path 109"
"label": "Landsat Path",
},
# Style definitions
# The "styling" section is required
"styling": {
# The default_style is the style used when no style is explicitly given in the
# request. If given, it must be the name of a style in the "styles" list. If
# not explictly defined it defaults to the first style in "styles" list.
"default_style": "simple_rgb",
# The "styles" list must be explicitly supplied, and must contain at least one
# style. See reusable style definitions above for more documentation on
# defining styles.
"styles": [
style_rgb, style_rgb_cloudmask, style_rgb_cloud_and_shadowmask,
style_ext_rgb, style_ls8_allband_false_colour, style_infrared_false_colour,
style_pure_ls8_coastal_aerosol, style_pure_ls8_blue,
style_pure_ls8_green, style_pure_ls8_red,
style_pure_ls8_nir, style_pure_ls8_swir1, style_pure_ls8_swir2,
style_ndvi, style_ndvi_cloudmask,
style_ndwi, style_ndbi,
style_cloud_mask,
style_rgb_ndvi
]
}
}, #### End of ls8_nbart_albers product
{
# NOTE: This layer IS a mappable "named layer" that can be selected in GetMap requests
"title": "Level 1 USGS Landsat-8 Public Data Set",
"abstract": "Imagery from the Level 1 Landsat-8 USGS Public Data Set",
"name": "ls8_level1_pds",
"product_name": "ls8_level1_usgs",
"bands": landsat8_bands,
"resource_limits": standard_resource_limits,
"flags": {
"band": "quality",
"ignore_time": False,
"ignore_info_flags": [],
"manual_merge": True,
},
"image_processing": {
# Extent mask function
#
# See documentation above. This is an example of multiple extent_mask_functions.
"extent_mask_func": [
"datacube_ows.ogc_utils.mask_by_quality",
"datacube_ows.ogc_utils.mask_by_val",
],
# Bands to always fetch from the Datacube, even if it is not used by the active style.
# Useful for when a particular band is always needed for the extent_mask_func, as
# is the case here.
"always_fetch_bands": [ "quality" ],
"fuse_func": None,
"manual_merge": True,
# Apply corrections for solar angle, for "Level 1" products.
# (Defaults to false - should not be used for NBAR/NBAR-T or other Analysis Ready products
"apply_solar_corrections": True
},
"wcs": {
"native_crs": "EPSG:3857",
"native_resolution": [ 25.0, 25.0 ],
"default_bands": [ "red", "green", "blue" ],
},
"styling": {
"default_style": "simple_rgb",
"styles": [
style_rgb, style_ext_rgb,
style_ls8_allband_false_colour, style_infrared_false_colour,
style_pure_ls8_coastal_aerosol, style_pure_ls8_blue,
style_pure_ls8_green, style_pure_ls8_red,
style_pure_ls8_nir, style_pure_ls8_swir1, style_pure_ls8_swir2,
style_ndvi, style_ndwi, style_ndbi,
style_rgb_ndvi
]
}
}, ##### End of ls8_level1_pds product definition.
{
# NOTE: This layer IS a mappable "named layer" that can be selected in GetMap requests
"title": "WOfS Summary",
"abstract": "Water Observations from Space - Summary",
"name": "wofs_summary",
"product_name": "wofs_summary",
"bands": { "frequency": [] },
"resource_limits": standard_resource_limits,
"flags": None,
"image_processing": {
"extent_mask_func": "datacube_ows.ogc_utils.mask_by_val",
"fuse_func": "datacube_ows.wms_utils.wofls_fuser",
},
"wcs": {
"native_crs": "EPSG:3857",
"native_resolution": [ 25.0, 25.0 ],
"default_bands": [ "frequency" ],
},
"styling": {
"styles": [
style_wofs_frequency
]
}
}, ##### End of wofs_summary product definition.
]
}, ### End of Landsat 8 folder.
{
# NOTE: This layer is a folder - it is NOT "named layer" that can be selected in GetMap requests
"title": "Sentinel-2 Products",
"abstract": "Products containing data ultimately derived from ESA's Sentinel-2 satellite.",
"keywords": [
"sentinel2",
],
"layers": [
{
# NOTE: This layer IS a mappable "named layer" that can be selected in GetMap requests
"title": "Near Real-Time images from Sentinel-2 Satellites",
"abstract": "Imagery from the ESA Sentinel2 Satellites",
"name": "sentinel2_nrt",
# Multi-product layers merge two separate datacube products with similar metadata (i.e.
# projections, bands, pixel quality band format, etc.)
"multi_product": True,
# For multi-product layers, use "product_names" for the list of constituent ODC products.
"product_names": ["s2a_nrt_granule", "s2b_nrt_granule"],
"bands": sentinel2_bands,
"resource_limits": standard_resource_limits,
# Near Real Time datasets are being regularly updated - do not cache ranges in memory.
"dynamic": True,
"flags": {
"band": "quality",
"ignore_time": False,
"ignore_info_flags": [],
"manual_merge": False,
},
"image_processing": {
"extent_mask_func": "datacube_ows.ogc_utils.mask_by_val",
"always_fetch_bands": [ ],
"fuse_func": None,
"manual_merge": False,
"apply_solar_corrections": False,
},
"wcs": {
"native_crs": "EPSG:3577",
"native_resolution": [ 10.0, 10.0 ],
"default_bands": [ "red", "green", "blue" ],
},
"identifiers": {
"auth": "s2_nrt_multi",
},
"urls": {
"features": [
{
"url": "http://domain.tld/path/to/page.html",
"format": "text/html"
}
],
"data": [
{
"url": "http://abc.xyz/data-link.xml",
"format": "application/xml"
}
]
},
"styling": {
"default_style": "simple_rgb",
"styles": [ style_rgb ],
}
} ##### End of sentinel2_nrt multi-product definition
],
}, #### End of Sentinel-2 folder
{
# NOTE: This layer IS a mappable "named layer" that can be selected in GetMap requests
# NOTE: Named layers can sit at the same heirarchical level as folder layers.
"name": "mangrove_cover",
"title": "Mangrove Canopy Cover",
"abstract": "Mangrove Canopy Cover - example of bitflag value-mapped style.",
"product_names": "mangrove_cover",
"bands": { "canopy_cover_class": [], "extent": [] },
"resource_limits": standard_resource_limits,
"flags": None,
"image_processing": {
"extent_mask_func": "datacube_ows.ogc_utils.mask_by_extent_flag",
"always_fetch_bands": [ "extent" ],
"fuse_func": None,
"manual_merge": False,
"apply_solar_corrections": False,
},
"wcs": {
"native_crs": "EPSG:3577",
"native_resolution": [ 25.0, 25.0 ],
"default_bands": [ "canopy_cover_class" ],
},
"identifiers": {
"auth": "mangrove_canopy_cover",
},
"urls": {
"features": [
{
"url": "http://domain.tld/path/to/page.html",
"format": "text/html"
}
],
"data": [
{
"url": "http://abc.xyz/data-link.xml",
"format": "application/xml"
}
]
},
"styling": {
"default_style": "mangrove",
"styles": [ style_mangrove ],
}
} ##### End of mangrove_cover definition
] ##### End of "layers" list.
} #### End of example configuration object
| 42.590224 | 150 | 0.534291 |
acf41e419608a8b0b7cc64e90df1509f6a5166bb | 5,612 | py | Python | release-stories.py | menicosia/pivotal-tracker-scripts | 2f71653c000c3603b9ce9d016816ce1af7b1c139 | [
"Apache-2.0"
] | null | null | null | release-stories.py | menicosia/pivotal-tracker-scripts | 2f71653c000c3603b9ce9d016816ce1af7b1c139 | [
"Apache-2.0"
] | 1 | 2019-02-27T22:23:59.000Z | 2019-02-27T22:23:59.000Z | release-stories.py | menicosia/pivotal-tracker-scripts | 2f71653c000c3603b9ce9d016816ce1af7b1c139 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Print out the stories that were completed by tracking a specific label.
import getopt, os, sys
import httplib, urllib
import json
from pprint import pprint, pformat
def isChore(story):
if ("chore" == story["story_type"]):
return True ;
else:
return False ;
def isFeature(story):
if ("feature" == story["story_type"]):
return True ;
else:
return False ;
def storyState(story):
return(story["current_state"]) ;
class trackerRequest:
"""Encapsulate our interactions with Pivotal Tracker's API."""
def __init__(self, token, projectNum):
self.__token = token ;
self.__projectNum = projectNum ;
self.__headers = {"Content-Type": "application/json",
"X-TrackerToken": self.__token}
self.__httpPath = "/services/v5/projects/%s" % self.__projectNum
self.__trackerAPI = httplib.HTTPSConnection("www.pivotaltracker.com")
self.__trackerAPI.request("GET", self.__httpPath, None, self.__headers)
response = self.__trackerAPI.getresponse()
if (200 == response.status):
self.project = json.loads(response.read())
else:
raise("[ERROR] http error response: %s, %s" % (response.status, response.reason)) ;
def stories(self, weeksInPast=False, searchLabel=False):
"""Query Tracker to get a set of stories. specify either:
- a number of weeks as a weeksInPast to get all stories completed that week
- a label as searchLabel to get all stories associated with that label
"""
if (weeksInPast):
lastIter = self.project["current_iteration_number"] - weeksInPast
self.__trackerAPI.request("GET", "%s/iterations/%s" % (self.__httpPath, lastIter),
None, self.__headers)
elif (searchLabel):
searchPath = "search?query=%s" % urllib.quote("label:\"%s\" includedone:true" % searchLabel)
self.__trackerAPI.request("GET", "%s/%s" % (self.__httpPath, searchPath),
None, self.__headers)
response = self.__trackerAPI.getresponse()
if (200 == response.status):
output = json.loads(response.read())
else:
raise("[ERROR] http error response: %s, %s" % (response.status, response.reason))
if (weeksInPast):
return(output["stories"]) ;
# In Tracker's API, the results contain more than just stories.
if (searchLabel):
return(output["stories"]["stories"]) ;
def close(self):
self.__trackerAPI.close()
if __name__=="__main__":
weeks = None
label = None
verbose = False
if (not os.environ.has_key('TRACKER_API_TOKEN')):
print >>sys.stderr, "[ERROR] Environment variable not defined: TRACKER_API_TOKEN"
sys.exit(-1) ;
try:
(opts, args) = getopt.getopt(sys.argv[1:], "vw:l:", ["weeks=","label="])
except getopt.GetoptError:
(err, why, tb) = sys.exc_info()
print >>sys.stderr, "[ERROR]: %s" % why
sys.exit(1)
for opt, arg in opts:
if opt == "-v":
verbose = True
elif opt in ("-w", "--weeks"):
weeks = int(arg)
elif opt in ("-l", "--label"):
label = arg
for projectNum in args:
myRequest = trackerRequest(os.environ['TRACKER_API_TOKEN'], projectNum)
if (weeks):
stories = myRequest.stories(weeksInPast=weeks)
elif (label):
stories = myRequest.stories(searchLabel=label)
print "# %s\n" % myRequest.project["name"]
if (0 == len(stories)):
print >>sys.stderr, " No results."
else:
for i in stories:
if (isChore(i)):
pass ;
elif ("unscheduled" == storyState(i)):
pass ;
elif (isFeature(i) and ("accepted" == storyState(i) or "delivered" == storyState(i))):
if (0 != len(i["labels"])):
try:
print "- %s (_%s_) [[#%s](%s)]" % (i["name"].encode("ascii", "ignore"),
",".join([g["name"].encode("ascii", "ignore")
for g in i["labels"]]),
i["id"], i["url"])
except:
(err, why, tb) = sys.exc_info() ;
print >>sys.stderr, "Error: %s, %s\n%s" % (err, why, i["url"])
else:
print "- %s [[#%s](%s)]" % (i["name"].encode("ascii", "ignore"), i["id"], i["url"])
else:
if (0 != len(i["labels"])):
print "- [%s] %s (_%s_) [[#%s](%s)]" % (i["story_type"].upper(),
i["name"].encode("ascii", "ignore"),
",".join([g["name"] for g in i["labels"]]),
i["id"], i["url"])
else:
print "- [%s] %s [[#%s](%s)]" % (i["story_type"].upper(),
i["name"].encode("ascii", "ignore"),
i["id"], i["url"])
print "\n"
myRequest.close()
| 40.085714 | 108 | 0.482003 |
acf41e49d15bc8ff86159674525b4a3f94f9af1a | 4,081 | py | Python | cfripper/model/utils.py | mansong1/cfripper | 9fb9075bf091e5c529e488e98af32218d7677639 | [
"Apache-2.0"
] | null | null | null | cfripper/model/utils.py | mansong1/cfripper | 9fb9075bf091e5c529e488e98af32218d7677639 | [
"Apache-2.0"
] | null | null | null | cfripper/model/utils.py | mansong1/cfripper | 9fb9075bf091e5c529e488e98af32218d7677639 | [
"Apache-2.0"
] | null | null | null | import json
import logging
import re
from contextlib import suppress
from functools import lru_cache
from typing import Optional
from urllib.parse import unquote
import boto3
import yaml
from cfn_flip import to_json
from pycfmodel.model.resources.properties.policy import Policy
from cfripper.config.regex import REGEX_ARN, REGEX_IAM_ARN, REGEX_STS_ARN
logger = logging.getLogger(__file__)
class InvalidURLException(Exception):
pass
def extract_bucket_name_and_path_from_url(url):
# Remove query string
url = unquote(url).split("?")[0]
bucket_name = None
path = None
# https://bucket.s3.amazonaws.com/path1/path2
match = re.search(r"^https://([^.]+)\.s3\.amazonaws\.com(.*?)$", url)
if match:
bucket_name, path = match.group(1), match.group(2)[1:] # Trim start /
# https://bucket.s3-aws-region.amazonaws.com/path1/path2
match = re.search(r"^https://([^.]+)\.s3-[^\.]+\.amazonaws\.com(.*?)$", url)
if match:
bucket_name, path = match.group(1), match.group(2)[1:] # Trim start /
# https://s3.amazonaws.com/bucket/path1/path2
match = re.search(r"^https://s3\.amazonaws\.com/([^\/]+)(.*?)$", url)
if match:
bucket_name, path = match.group(1), match.group(2)[1:] # Trim start /
# https://s3.aws-region.amazonaws.com/bucket/path1/path2
match = re.search(r"^https://s3\.[^.]+\.amazonaws\.com/([^\/]+)(.*?)$", url)
if match:
bucket_name, path = match.group(1), match.group(2)[1:] # Trim start /
# https://s3-aws-region.amazonaws.com/bucket/path1/path2
match = re.search(r"^https://s3-[^.]+\.amazonaws\.com/([^\/]+)(.*?)$", url)
if match:
bucket_name, path = match.group(1), match.group(2)[1:] # Trim start /
if bucket_name is None and path is None:
raise InvalidURLException(f"Couldn't extract bucket name and path from url: {url}")
logger.info(f"extract_bucket_name_and_path_from_url. returning for {bucket_name} and {path} for {url}")
return bucket_name, path
def convert_json_or_yaml_to_dict(file_content):
with suppress(ValueError):
return json.loads(file_content)
try:
# Convert file_content (assuming that is YAML) to JSON if possible
file_content = to_json(file_content)
return json.loads(file_content)
except yaml.YAMLError:
logger.exception("Could not convert YAML to JSON template")
except ValueError:
logger.exception("Could not parse JSON template")
return None
@lru_cache(maxsize=None)
def get_managed_policy(managed_policy_arn):
iam_client = boto3.client("iam")
managed_policy = iam_client.get_policy(PolicyArn=managed_policy_arn)
version_id = managed_policy.get("Policy", {}).get("DefaultVersionId")
if version_id:
policy_version = iam_client.get_policy_version(PolicyArn=managed_policy_arn, VersionId=version_id)
return Policy(
**{
"PolicyDocument": policy_version["PolicyVersion"]["Document"],
"PolicyName": f"AutoTransformedManagedPolicy{version_id}",
}
)
return None
def get_aws_service_from_arn(arn: str) -> Optional[str]:
match = REGEX_ARN.match(arn)
if match:
return match.group(1)
def get_account_id_from_arn(arn: str) -> Optional[str]:
match = REGEX_ARN.match(arn)
if match:
return match.group(3)
def get_account_id_from_iam_arn(arn: str) -> Optional[str]:
match = REGEX_IAM_ARN.match(arn)
if match:
return match.group(1)
def get_account_id_from_sts_arn(arn: str) -> Optional[str]:
match = REGEX_STS_ARN.match(arn)
if match:
return match.group(1)
def get_account_id_from_principal(principal: str) -> Optional[str]:
if principal.isnumeric():
return principal
aws_service = get_aws_service_from_arn(principal)
if aws_service not in ["iam", "sts"]:
return None
if aws_service == "iam":
return get_account_id_from_iam_arn(principal)
elif aws_service == "sts":
return get_account_id_from_sts_arn(principal)
| 31.152672 | 107 | 0.671894 |
acf41ecfb7f406948a272ab87e0527e775b33433 | 7,141 | py | Python | armi/reactor/blueprints/blockBlueprint.py | youngmit/armi | 67688e4e67d2a217dfc7b1ccfa64028c20b57a5b | [
"Apache-2.0"
] | null | null | null | armi/reactor/blueprints/blockBlueprint.py | youngmit/armi | 67688e4e67d2a217dfc7b1ccfa64028c20b57a5b | [
"Apache-2.0"
] | null | null | null | armi/reactor/blueprints/blockBlueprint.py | youngmit/armi | 67688e4e67d2a217dfc7b1ccfa64028c20b57a5b | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module defines the ARMI input for a block definition, and code for constructing an ARMI ``Block``.
"""
import collections
import yamlize
import armi
from armi import runLog
from armi.reactor import blocks
from armi.reactor import parameters
from armi.reactor.blueprints import componentBlueprint
from armi.reactor.converters import blockConverters
from armi.reactor.locations import AXIAL_CHARS
def _configureGeomOptions():
blockTypes = dict()
pm = armi.getPluginManagerOrFail()
for pluginBlockTypes in pm.hook.defineBlockTypes():
for compType, blockType in pluginBlockTypes:
blockTypes[compType] = blockType
return blockTypes
class BlockBlueprint(yamlize.KeyedList):
"""Input definition for Block."""
item_type = componentBlueprint.ComponentBlueprint
key_attr = componentBlueprint.ComponentBlueprint.name
name = yamlize.Attribute(type=str)
_geomOptions = _configureGeomOptions()
def _getBlockClass(self, outerComponent):
"""
Get the ARMI ``Block`` class for the specified geomType.
Parameters
----------
outerComponent : Component
Largest component in block.
"""
for compCls, blockCls in self._geomOptions.items():
if isinstance(outerComponent, compCls):
return blockCls
raise ValueError(
"Block input for {} has outer component {} which is "
" not a supported Block geometry subclass. Update geometry."
"".format(self.name, outerComponent)
)
def construct(
self, cs, blueprint, axialIndex, axialMeshPoints, height, xsType, materialInput
):
"""
Construct an ARMI ``Block`` to be placed in an ``Assembly``.
Parameters
----------
cs : CaseSettings
CaseSettings object for the appropriate simulation.
blueprint : Blueprints
Blueprints object containing various detailed information, such as nuclides to model
axialIndex : int
The Axial index this block exists within the parent assembly
axialMeshPoints : int
number of mesh points for use in the neutronics kernel
height : float
initial height of the block
xsType : str
String representing the xsType of this block.
materialInput : dict
dict containing material modification names and values
"""
runLog.debug("Constructing block {}".format(self.name))
appliedMatMods = False
components = collections.OrderedDict()
for cDesign in self:
c, compAppliedMatMods = cDesign.construct(blueprint, materialInput)
components[c.name] = c
appliedMatMods |= compAppliedMatMods
if any(materialInput) and not appliedMatMods:
raise ValueError(
"Failure to apply material modifications {} in block {}".format(
materialInput, self.name
)
)
for c in components.values():
c._resolveLinkedDims(components)
boundingComp = sorted(components.values())[-1]
b = self._getBlockClass(boundingComp)("Bxxx{0}".format(AXIAL_CHARS[axialIndex]))
for paramDef in b.p.paramDefs.inCategory(
parameters.Category.assignInBlueprints
):
val = getattr(self, paramDef.name)
if val is not None:
b.p[paramDef.name] = val
b.setType(self.name)
for c in components.values():
b.addComponent(c)
b.p.nPins = b.getNumPins()
b.p.axMesh = _setBlueprintNumberOfAxialMeshes(
axialMeshPoints, cs["axialMeshRefinementFactor"]
)
b.p.height = height
b.p.heightBOL = height # for fuel performance
b.p.xsType = xsType
b.setBuLimitInfo(cs)
b.buildNumberDensityParams(nucNames=blueprint.allNuclidesInProblem)
b = self._mergeComponents(b)
b.verifyBlockDims()
return b
def _mergeComponents(self, b):
solventNamesToMergeInto = set(c.p.mergeWith for c in b if c.p.mergeWith)
if solventNamesToMergeInto:
runLog.warning(
"Component(s) {} in block {} has merged components inside it. The merge was valid at hot "
"temperature, but the merged component only has the basic thermal expansion factors "
"of the component(s) merged into. Expansion properties or dimensions of non hot "
"temperature may not be representative of how the original components would have acted had "
"they not been merged. It is recommended that merging happen right before "
"a physics calculation using a block converter to avoid this."
"".format(solventNamesToMergeInto, b.name),
single=True,
)
for solventName in solventNamesToMergeInto:
soluteNames = []
for c in b:
if c.p.mergeWith == solventName:
soluteNames.append(c.name)
converter = blockConverters.MultipleComponentMerger(
b, soluteNames, solventName
)
b = converter.convert()
return b
for paramDef in parameters.forType(blocks.Block).inCategory(
parameters.Category.assignInBlueprints
):
setattr(
BlockBlueprint,
paramDef.name,
yamlize.Attribute(name=paramDef.name, default=None),
)
def _setBlueprintNumberOfAxialMeshes(meshPoints, factor):
"""
Set the blueprint number of axial mesh based on the axial mesh refinement factor.
"""
if factor != 1:
runLog.important(
"An axial mesh refinement factor of {} is applied "
"to blueprint based on setting specification.".format(factor),
single=True,
)
return int(meshPoints) * factor
class BlockKeyedList(yamlize.KeyedList):
"""
An OrderedDict of BlockBlueprints keyed on the name. Utilizes yamlize for serialization to and from YAML.
This is used within the ``blocks:`` main entry of the blueprints.
"""
item_type = BlockBlueprint
key_attr = BlockBlueprint.name
class BlockList(yamlize.Sequence):
"""
A list of BlockBlueprints keyed on the name. Utilizes yamlize for serialization to and from YAML.
This is used to define the ``blocks:`` attribute of the assembly definitions.
"""
item_type = BlockBlueprint
| 33.060185 | 109 | 0.646688 |
acf41f127ab0633de99ea743fb18aefbcd6966c7 | 3,799 | py | Python | hikyuu/gui/data/ImportPytdxToH5Task.py | lianchaoyun/hikyuu | 213fdadd120b927d9831fe7426206511e0bc3f0b | [
"MIT"
] | null | null | null | hikyuu/gui/data/ImportPytdxToH5Task.py | lianchaoyun/hikyuu | 213fdadd120b927d9831fe7426206511e0bc3f0b | [
"MIT"
] | null | null | null | hikyuu/gui/data/ImportPytdxToH5Task.py | lianchaoyun/hikyuu | 213fdadd120b927d9831fe7426206511e0bc3f0b | [
"MIT"
] | null | null | null | # coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2010-2017 fasiondog/hikyuu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import sqlite3
import mysql.connector
from pytdx.hq import TdxHq_API
from hikyuu.data.pytdx_to_h5 import import_data as h5_import_data
from hikyuu.data.pytdx_to_mysql import import_data as mysql_import_data
from hikyuu.util import capture_multiprocess_all_logger, get_default_logger
class ProgressBar:
def __init__(self, src):
self.src = src
def __call__(self, cur, total):
self.src.queue.put([self.src.task_name, self.src.market, self.src.ktype, (cur + 1) * 100 // total, 0])
class ImportPytdxToH5:
def __init__(self, log_queue, queue, config, market, ktype, quotations, ip, port, dest_dir, start_datetime):
self.logger = logging.getLogger(self.__class__.__name__)
self.task_name = 'IMPORT_KDATA'
self.queue = queue
self.log_queue = log_queue
self.config = config
self.market = market
self.ktype = ktype
self.quotations = quotations
self.ip = ip
self.port = port
self.dest_dir = dest_dir
self.startDatetime = start_datetime
def __call__(self):
capture_multiprocess_all_logger(self.log_queue, get_default_logger().level)
if self.config.getboolean('hdf5', 'enable', fallback=True):
sqlite_file = "{}/stock.db".format(self.config['hdf5']['dir'])
connect = sqlite3.connect(sqlite_file, timeout=1800)
import_data = h5_import_data
self.logger.debug('use hdf5 import kdata')
else:
db_config = {
'user': self.config['mysql']['usr'],
'password': self.config['mysql']['pwd'],
'host': self.config['mysql']['host'],
'port': self.config['mysql']['port']
}
connect = mysql.connector.connect(**db_config)
import_data = mysql_import_data
self.logger.debug('use mysql import kdata')
count = 0
try:
progress = ProgressBar(self)
api = TdxHq_API()
api.connect(self.ip, self.port)
count = import_data(
connect, self.market, self.ktype, self.quotations, api, self.dest_dir, self.startDatetime, progress
)
self.logger.info("ๅฏผๅ
ฅ {} {} ่ฎฐๅฝๆฐ: {}".format(self.market, self.ktype, count))
except Exception as e:
self.logger.error("ImportPytdxToH5Task failed! {}".format(e))
#self.queue.put([self.task_name, self.market, self.ktype, str(e), count])
finally:
connect.commit()
connect.close()
self.queue.put([self.task_name, self.market, self.ktype, None, count])
| 41.293478 | 115 | 0.665175 |
acf41f38094f47e010feb83d0ed842db75cf8ec4 | 4,043 | py | Python | src/app_utils.py | TheSin-/terracoin-masternode-tool | 9d670c89a428cdbdc359234c0d1b83ce6ff569a2 | [
"MIT"
] | null | null | null | src/app_utils.py | TheSin-/terracoin-masternode-tool | 9d670c89a428cdbdc359234c0d1b83ce6ff569a2 | [
"MIT"
] | null | null | null | src/app_utils.py | TheSin-/terracoin-masternode-tool | 9d670c89a428cdbdc359234c0d1b83ce6ff569a2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Bertrand256
# Created on: 2017-10
import re
import base64
import binascii
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
def extract_app_version(lines):
"""
Extracts version string from array of lines (content of version.txt file)
:param lines:
:return: version string
"""
for line in lines:
parts = [elem.strip() for elem in line.split('=')]
if len(parts) == 2 and parts[0].lower() == 'version_str':
return parts[1].strip("'")
return ''
def version_str_to_number(version_str):
elems = version_str.split('.')
if elems:
# last element of a version string can have a suffix
last_elem = elems[len(elems) - 1]
if not last_elem.isdigit():
res = re.findall(r'^\d+', last_elem)
if res:
elems[len(elems) - 1] = res[0]
else:
del elems[len(elems) - 1]
ver_list = [n.zfill(4) for n in elems]
version_nr_str = ''.join(ver_list)
version_nr = int(version_nr_str)
return version_nr
def encrypt(input_str, key):
salt = b'D9\x82\xbfSibW(\xb1q\xeb\xd1\x84\x118'
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend()
)
key = base64.urlsafe_b64encode(kdf.derive(key.encode('utf-8')))
fer = Fernet(key)
h = fer.encrypt(input_str.encode('utf-8'))
h = h.hex()
return h
def decrypt(input_str, key):
try:
input_str = binascii.unhexlify(input_str)
salt = b'D9\x82\xbfSibW(\xb1q\xeb\xd1\x84\x118'
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend()
)
key = base64.urlsafe_b64encode(kdf.derive(key.encode('utf-8')))
fer = Fernet(key)
h = fer.decrypt(input_str)
h = h.decode('utf-8')
except:
return ''
return h
def seconds_to_human(number_of_seconds, out_seconds=True, out_minutes=True, out_hours=True):
"""
Converts number of seconds to string representation.
:param out_seconds: False, if seconds part in output is to be trucated
:param number_of_seconds: number of seconds.
:return: string representation of time delta
"""
human_strings = []
weeks = 0
days = 0
hours = 0
if number_of_seconds > 604800:
# weeks
weeks = int(number_of_seconds / 604800)
number_of_seconds = number_of_seconds - (weeks * 604800)
elem_str = str(int(weeks)) + ' week'
if weeks > 1:
elem_str += 's'
human_strings.append(elem_str)
if number_of_seconds > 86400:
# days
days = int(number_of_seconds / 86400)
number_of_seconds = number_of_seconds - (days * 86400)
elem_str = str(int(days)) + ' day'
if days > 1:
elem_str += 's'
human_strings.append(elem_str)
if out_hours and number_of_seconds > 3600:
hours = int(number_of_seconds / 3600)
number_of_seconds = number_of_seconds - (hours * 3600)
elem_str = str(int(hours)) + ' hour'
if hours > 1:
elem_str += 's'
human_strings.append(elem_str)
if out_minutes and number_of_seconds > 60:
minutes = int(number_of_seconds / 60)
number_of_seconds = number_of_seconds - (minutes * 60)
elem_str = str(int(minutes)) + ' minute'
if minutes > 1:
elem_str += 's'
human_strings.append(elem_str)
if out_seconds and number_of_seconds >= 1:
elem_str = str(int(number_of_seconds)) + ' second'
if number_of_seconds > 1:
elem_str += 's'
human_strings.append(elem_str)
return ' '.join(human_strings)
| 30.398496 | 92 | 0.611674 |
acf420e70998c9f0852d22864d96ff116687a81e | 10,128 | py | Python | ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py | dawnwish/ambari | accbb0048435de2d08e3d1b8771d966a94b98707 | [
"Apache-2.0"
] | 16 | 2018-05-24T10:28:24.000Z | 2021-08-05T03:13:26.000Z | ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py | chinaworld/ambari-zh | f3b9afcbf0ed708fa5b5995a3acfb9f4131dc019 | [
"Apache-2.0"
] | 1 | 2018-10-22T17:50:00.000Z | 2018-10-22T17:50:00.000Z | ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py | chinaworld/ambari-zh | f3b9afcbf0ed708fa5b5995a3acfb9f4131dc019 | [
"Apache-2.0"
] | 17 | 2018-07-06T08:57:00.000Z | 2021-11-04T11:00:36.000Z | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ambari_commons.constants import AMBARI_SUDO_BINARY
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.script.script import Script
import os
import status_params
def get_port_from_url(address):
if not is_empty(address):
return address.split(':')[-1]
else:
return address
def get_name_from_principal(principal):
if not principal: # return if empty
return principal
slash_split = principal.split('/')
if len(slash_split) == 2:
return slash_split[0]
else:
at_split = principal.split('@')
return at_split[0]
# config object that holds the configurations declared in the -site.xml file
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_version = default("/commandParams/version", None)
sudo = AMBARI_SUDO_BINARY
security_enabled = status_params.security_enabled
infra_solr_conf = "/etc/ambari-infra-solr/conf"
infra_solr_port = status_params.infra_solr_port
infra_solr_piddir = status_params.infra_solr_piddir
infra_solr_pidfile = status_params.infra_solr_pidfile
user_group = config['configurations']['cluster-env']['user_group']
fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
# shared configs
java64_home = config['hostLevelParams']['java_home']
java_exec = format("{java64_home}/bin/java")
zookeeper_hosts_list = config['clusterHostInfo']['zookeeper_hosts']
zookeeper_hosts_list.sort()
# get comma separated list of zookeeper hosts from clusterHostInfo
zookeeper_hosts = ",".join(zookeeper_hosts_list)
#####################################
# Solr configs
#####################################
# Only supporting SolrCloud mode - so hardcode those options
solr_cloudmode = 'true'
solr_dir = '/usr/lib/ambari-infra-solr'
solr_client_dir = '/usr/lib/ambari-infra-solr-client'
solr_bindir = solr_dir + '/bin'
cloud_scripts = solr_dir + '/server/scripts/cloud-scripts'
if "infra-solr-env" in config['configurations']:
infra_solr_hosts = config['clusterHostInfo']['infra_solr_hosts']
infra_solr_znode = config['configurations']['infra-solr-env']['infra_solr_znode']
infra_solr_min_mem = format(config['configurations']['infra-solr-env']['infra_solr_minmem'])
infra_solr_max_mem = format(config['configurations']['infra-solr-env']['infra_solr_maxmem'])
infra_solr_instance_count = len(config['clusterHostInfo']['infra_solr_hosts'])
infra_solr_datadir = format(config['configurations']['infra-solr-env']['infra_solr_datadir'])
infra_solr_data_resources_dir = os.path.join(infra_solr_datadir, 'resources')
infra_solr_jmx_port = config['configurations']['infra-solr-env']['infra_solr_jmx_port']
infra_solr_ssl_enabled = default('configurations/infra-solr-env/infra_solr_ssl_enabled', False)
infra_solr_keystore_location = config['configurations']['infra-solr-env']['infra_solr_keystore_location']
infra_solr_keystore_password = config['configurations']['infra-solr-env']['infra_solr_keystore_password']
infra_solr_keystore_type = config['configurations']['infra-solr-env']['infra_solr_keystore_type']
infra_solr_truststore_location = config['configurations']['infra-solr-env']['infra_solr_truststore_location']
infra_solr_truststore_password = config['configurations']['infra-solr-env']['infra_solr_truststore_password']
infra_solr_truststore_type = config['configurations']['infra-solr-env']['infra_solr_truststore_type']
infra_solr_user = config['configurations']['infra-solr-env']['infra_solr_user']
infra_solr_log_dir = config['configurations']['infra-solr-env']['infra_solr_log_dir']
infra_solr_log = format("{infra_solr_log_dir}/solr-install.log")
solr_env_content = config['configurations']['infra-solr-env']['content']
zookeeper_port = default('/configurations/zoo.cfg/clientPort', None)
# get comma separated list of zookeeper hosts from clusterHostInfo
index = 0
zookeeper_quorum = ""
for host in config['clusterHostInfo']['zookeeper_hosts']:
zookeeper_quorum += host + ":" + str(zookeeper_port)
index += 1
if index < len(config['clusterHostInfo']['zookeeper_hosts']):
zookeeper_quorum += ","
default_ranger_audit_users = 'nn,hbase,hive,knox,kafka,kms,storm,yarn,nifi'
if security_enabled:
kinit_path_local = status_params.kinit_path_local
_hostname_lowercase = config['hostname'].lower()
infra_solr_jaas_file = infra_solr_conf + '/infra_solr_jaas.conf'
infra_solr_kerberos_keytab = config['configurations']['infra-solr-env']['infra_solr_kerberos_keytab']
infra_solr_kerberos_principal = config['configurations']['infra-solr-env']['infra_solr_kerberos_principal'].replace('_HOST',_hostname_lowercase)
infra_solr_web_kerberos_keytab = config['configurations']['infra-solr-env']['infra_solr_web_kerberos_keytab']
infra_solr_web_kerberos_principal = config['configurations']['infra-solr-env']['infra_solr_web_kerberos_principal'].replace('_HOST',_hostname_lowercase)
infra_solr_kerberos_name_rules = config['configurations']['infra-solr-env']['infra_solr_kerberos_name_rules'].replace('$', '\$')
infra_solr_sasl_user = get_name_from_principal(infra_solr_kerberos_principal)
kerberos_realm = config['configurations']['kerberos-env']['realm']
ranger_audit_principal_conf_key = "xasecure.audit.jaas.Client.option.principal"
ranger_audit_principals = []
ranger_audit_principals.append(default('configurations/ranger-hdfs-audit/' + ranger_audit_principal_conf_key, 'nn'))
ranger_audit_principals.append(default('configurations/ranger-hbase-audit/' + ranger_audit_principal_conf_key, 'hbase'))
ranger_audit_principals.append(default('configurations/ranger-hive-audit/' + ranger_audit_principal_conf_key, 'hive'))
ranger_audit_principals.append(default('configurations/ranger-knox-audit/' + ranger_audit_principal_conf_key, 'knox'))
ranger_audit_principals.append(default('configurations/ranger-kafka-audit/' + ranger_audit_principal_conf_key, 'kafka'))
ranger_audit_principals.append(default('configurations/ranger-kms-audit/' + ranger_audit_principal_conf_key, 'rangerkms'))
ranger_audit_principals.append(default('configurations/ranger-storm-audit/' + ranger_audit_principal_conf_key, 'storm'))
ranger_audit_principals.append(default('configurations/ranger-yarn-audit/' + ranger_audit_principal_conf_key, 'yarn'))
ranger_audit_principals.append(default('configurations/ranger-nifi-audit/' + ranger_audit_principal_conf_key, 'nifi'))
ranger_audit_names_from_principals = [ get_name_from_principal(x) for x in ranger_audit_principals ]
default_ranger_audit_users = ','.join(ranger_audit_names_from_principals)
infra_solr_ranger_audit_service_users = format(config['configurations']['infra-solr-security-json']['infra_solr_ranger_audit_service_users']).split(',')
infra_solr_security_json_content = config['configurations']['infra-solr-security-json']['content']
#Solr log4j
infra_log_maxfilesize = default('configurations/infra-solr-log4j/infra_log_maxfilesize',10)
infra_log_maxbackupindex = default('configurations/infra-solr-log4j/infra_log_maxbackupindex',9)
solr_xml_content = default('configurations/infra-solr-xml/content', None)
solr_log4j_content = default('configurations/infra-solr-log4j/content', None)
smokeuser = config['configurations']['cluster-env']['smokeuser']
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
ranger_solr_collection_name = default('configurations/ranger-env/ranger_solr_collection_name', 'ranger_audits')
logsearch_service_logs_collection = default('configurations/logsearch-properties/logsearch.solr.collection.service.logs', 'hadoop_logs')
logsearch_audit_logs_collection = default('configurations/logsearch-properties/logsearch.solr.collection.audit.logs', 'audit_logs')
ranger_admin_kerberos_service_user = get_name_from_principal(default('configurations/ranger-admin-site/ranger.admin.kerberos.principal', 'rangeradmin'))
atlas_kerberos_service_user = get_name_from_principal(default('configurations/application-properties/atlas.authentication.principal', 'atlas'))
logsearch_kerberos_service_user = get_name_from_principal(default('configurations/logsearch-env/logsearch_kerberos_principal', 'logsearch'))
logfeeder_kerberos_service_user = get_name_from_principal(default('configurations/logfeeder-env/logfeeder_kerberos_principal', 'logfeeder'))
infra_solr_kerberos_service_user = get_name_from_principal(default('configurations/infra-solr-env/infra_solr_kerberos_principal', 'infra-solr'))
infra_solr_role_ranger_admin = default('configurations/infra-solr-security-json/infra_solr_role_ranger_admin', 'ranger_user')
infra_solr_role_ranger_audit = default('configurations/infra-solr-security-json/infra_solr_role_ranger_audit', 'ranger_audit_user')
infra_solr_role_atlas = default('configurations/infra-solr-security-json/infra_solr_role_atlas', 'atlas_user')
infra_solr_role_logsearch = default('configurations/infra-solr-security-json/infra_solr_role_logsearch', 'logsearch_user')
infra_solr_role_logfeeder = default('configurations/infra-solr-security-json/infra_solr_role_logfeeder', 'logfeeder_user')
infra_solr_role_dev = default('configurations/infra-solr-security-json/infra_solr_role_dev', 'dev')
| 59.576471 | 154 | 0.805095 |
acf4217159f0b4ec7a5dccc29d3d781e1ad188b7 | 552 | py | Python | insta/migrations/0004_auto_20190522_0859.py | lilianwaweru/Instagram | 7a4d4dae3f644646c0aebbd8e69ff32bb2a5323f | [
"MIT"
] | null | null | null | insta/migrations/0004_auto_20190522_0859.py | lilianwaweru/Instagram | 7a4d4dae3f644646c0aebbd8e69ff32bb2a5323f | [
"MIT"
] | 3 | 2021-03-19T00:47:33.000Z | 2021-09-08T00:59:44.000Z | insta/migrations/0004_auto_20190522_0859.py | lilianwaweru/Instagram | 7a4d4dae3f644646c0aebbd8e69ff32bb2a5323f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-05-22 08:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('insta', '0003_auto_20190522_0844'),
]
operations = [
migrations.RemoveField(
model_name='comments',
name='pictur',
),
migrations.AddField(
model_name='comments',
name='picture',
field=models.IntegerField(default=0),
),
]
| 22.08 | 49 | 0.586957 |
acf42364cf09c9fdb1a92e4b2d310516b655e82d | 175 | py | Python | model/settings.py | brunojacobs/ulsdpb | 7beff2e5f086d352258cd128430ec16ebfde7d53 | [
"MIT"
] | 5 | 2020-11-14T09:59:03.000Z | 2021-06-10T14:27:40.000Z | model/settings.py | tanetpongc/ulsdpb | 7beff2e5f086d352258cd128430ec16ebfde7d53 | [
"MIT"
] | null | null | null | model/settings.py | tanetpongc/ulsdpb | 7beff2e5f086d352258cd128430ec16ebfde7d53 | [
"MIT"
] | 1 | 2021-04-28T10:44:04.000Z | 2021-04-28T10:44:04.000Z | NUMBA_NOPYTHON = True
NUMBA_CACHE = True
NUMBA_FASTMATH = False
NUMBA_OPTIONS = {
'nopython': NUMBA_NOPYTHON,
'cache': NUMBA_CACHE,
'fastmath': NUMBA_FASTMATH,
}
| 17.5 | 31 | 0.714286 |
acf423d68ec1624cd17bd3f21766a8b8bf47bfbb | 8,274 | py | Python | src/z3c/authenticator/authentication.py | zopefoundation/z3c.authenticator | 9c14afc20431a5d8c26e965978845fb92e1488ce | [
"ZPL-2.1"
] | null | null | null | src/z3c/authenticator/authentication.py | zopefoundation/z3c.authenticator | 9c14afc20431a5d8c26e965978845fb92e1488ce | [
"ZPL-2.1"
] | 2 | 2021-02-11T06:36:54.000Z | 2021-09-17T06:45:45.000Z | src/z3c/authenticator/authentication.py | zopefoundation/z3c.authenticator | 9c14afc20431a5d8c26e965978845fb92e1488ce | [
"ZPL-2.1"
] | 1 | 2015-04-03T05:42:26.000Z | 2015-04-03T05:42:26.000Z | ##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Authentication
"""
import zope.interface
import zope.component
import zope.event
from zope.schema.fieldproperty import FieldProperty
from zope.schema.interfaces import ISourceQueriables
from zope.location.interfaces import ILocation
from zope.component import queryNextUtility
from zope.container import btree
from zope.authentication.interfaces import IAuthentication
from zope.authentication.interfaces import PrincipalLookupError
from zope.authentication.interfaces import IUnauthenticatedPrincipal
from z3c.authenticator import interfaces
from z3c.authenticator import event
@zope.interface.implementer(IAuthentication,
interfaces.IAuthenticator, ISourceQueriables)
class Authenticator(btree.BTreeContainer):
"""See z3c.authentication.interfaces.IAuthenticator."""
authenticatorPlugins = ()
credentialsPlugins = ()
includeNextUtilityForAuthenticate = FieldProperty(
interfaces.IAuthenticator['includeNextUtilityForAuthenticate'])
def _plugins(self, names, interface):
for name in names:
plugin = self.get(name)
if not interface.providedBy(plugin):
plugin = zope.component.queryUtility(interface, name,
context=self)
if plugin is not None:
yield name, plugin
def getAuthenticatorPlugins(self):
return self._plugins(self.authenticatorPlugins,
interfaces.IAuthenticatorPlugin)
def getCredentialsPlugins(self):
return self._plugins(self.credentialsPlugins,
interfaces.ICredentialsPlugin)
def authenticate(self, request):
authenticatorPlugins = [p for n, p in self.getAuthenticatorPlugins()]
for name, credplugin in self.getCredentialsPlugins():
credentials = credplugin.extractCredentials(request)
if credentials is None:
# do not invoke the auth plugin without credentials
continue
for authplugin in authenticatorPlugins:
if authplugin is None:
continue
principal = authplugin.authenticateCredentials(credentials)
if principal is None:
continue
# create authenticated principal
authenticated = interfaces.IAuthenticatedPrincipal(principal)
# send the IAuthenticatedPrincipalCreated event
zope.event.notify(event.AuthenticatedPrincipalCreated(
self, authenticated, request))
return authenticated
if self.includeNextUtilityForAuthenticate:
next = queryNextUtility(self, IAuthentication)
if next is not None:
principal = next.authenticate(request)
if principal is not None:
return principal
return None
def getPrincipal(self, id):
for name, authplugin in self.getAuthenticatorPlugins():
principal = authplugin.queryPrincipal(id)
if principal is None:
continue
# create found principal
found = interfaces.IFoundPrincipal(principal)
# send the IFoundPrincipalCreated event
zope.event.notify(event.FoundPrincipalCreated(self, found))
return found
next = queryNextUtility(self, IAuthentication)
if next is not None:
return next.getPrincipal(id)
raise PrincipalLookupError(id)
def getQueriables(self):
for name, authplugin in self.getAuthenticatorPlugins():
queriable = zope.component.queryMultiAdapter(
(authplugin, self), interfaces.IQueriableAuthenticator)
if queriable is not None:
yield name, queriable
def unauthenticatedPrincipal(self):
"""Return unauthenticated principal or None.
This allows you to return an unauthenticated principal. This could be
usefull if you don't like to fallback to the global unauthenticated
principal usage. Why is this usefull. The reason is, if a global
principal get returned, there is no event notification involved like
we have in IPrincipalCreated which whould allow to apply groups. And
there is no way to apply local groups to global unauthenticated
principals it they get returned by the global IAuthentication or the
fallback implementation. See zope.principalregistry
Usage:
Return an unauthenticated principal within this method if you need to
apply local groups. This allows to apply local groups for the returned
unauthenticated principal if you use a custom subscriber for
IPrincipalCreated. Note, the local group must define the global
unauthenticated principals id in the principals list. Use the zcml
directive called unauthenticatedPrincipal for define the global
unauthenticated principal.
"""
principal = zope.component.queryUtility(IUnauthenticatedPrincipal)
if principal is not None:
zope.event.notify(event.UnauthenticatedPrincipalCreated(self,
principal))
return principal
def unauthorized(self, id, request):
challengeProtocol = None
for name, credplugin in self.getCredentialsPlugins():
protocol = getattr(credplugin, 'challengeProtocol', None)
if challengeProtocol is None or protocol == challengeProtocol:
if credplugin.challenge(request):
if protocol is None:
return
elif challengeProtocol is None:
challengeProtocol = protocol
if challengeProtocol is None:
next = queryNextUtility(self, IAuthentication)
if next is not None:
next.unauthorized(id, request)
def logout(self, request):
challengeProtocol = None
for name, credplugin in self.getCredentialsPlugins():
protocol = getattr(credplugin, 'challengeProtocol', None)
if challengeProtocol is None or protocol == challengeProtocol:
if credplugin.logout(request):
if protocol is None:
return
elif challengeProtocol is None:
challengeProtocol = protocol
if challengeProtocol is None:
next = queryNextUtility(self, IAuthentication)
if next is not None:
next.logout(request)
@zope.component.adapter(interfaces.ISearchable, interfaces.IAuthenticator)
@zope.interface.implementer(interfaces.IQueriableAuthenticator, ILocation)
class QueriableAuthenticator(object):
"""Performs schema-based principal searches adapting ISearchable and
IAuthenticator.
Delegates the search to the adapted authenticator which also provides
ISearchable. See IAuthenticator.getQueriables for more infos.
"""
def __init__(self, authplugin, pau):
# locate them
if ILocation.providedBy(authplugin):
self.__parent__ = authplugin.__parent__
self.__name__ = authplugin.__name__
else:
self.__parent__ = pau
self.__name__ = ""
self.authplugin = authplugin
self.pau = pau
def search(self, query, start=None, batch_size=None):
for id in self.authplugin.search(query, start, batch_size):
yield id
| 40.360976 | 79 | 0.645274 |
acf4242a7dd0ce102bf122623624f668922cbf6e | 13,195 | py | Python | telegram-logger.py | small-projects-related-to-telegram/telegram-logger-X | d5f712740a88fadbee7bfef8806d890ac1726ea9 | [
"MIT"
] | null | null | null | telegram-logger.py | small-projects-related-to-telegram/telegram-logger-X | d5f712740a88fadbee7bfef8806d890ac1726ea9 | [
"MIT"
] | null | null | null | telegram-logger.py | small-projects-related-to-telegram/telegram-logger-X | d5f712740a88fadbee7bfef8806d890ac1726ea9 | [
"MIT"
] | 1 | 2021-06-19T23:02:57.000Z | 2021-06-19T23:02:57.000Z | #!/usr/bin/env python3
import re
import sys
import sqlite3
from datetime import datetime
from pathlib import Path
from typing import Optional, Union
import toml
from telethon import TelegramClient, events
from telethon.tl.types import Channel, Chat, DocumentAttributeFilename, MessageMediaWebPage, User
DB_PATH = 'data.sqlite3'
config = toml.load('config.toml')
api_id = config.get('api_id')
api_hash = config.get('api_hash')
enabled_chats = config.get('enabled_chats', [])
disabled_chats = config.get('disabled_chats', [])
save_media = config.get('save_media', True)
log_to_file = config.get('log_to_file', False)
log_to_stdout = config.get('log_to_stdout', True)
log_seperate_files = config.get('log_seperate_files', True)
log_colors = config.get('log_colors', not log_to_file and sys.stdout.isatty())
if (log_to_file is False) and (log_to_stdout is False):
print('ERROR: Misconfigured. Needs either log_to_stdout, log_to_file or both to be set to true.')
sys.exit(1)
if log_colors:
RESET = '\x1b[0m'
BOLD = '\x1b[1m'
DIM = '\x1b[2m'
RED = '\x1b[31m'
GREEN = '\x1b[32m'
YELLOW = '\x1b[33m'
BLUE = '\x1b[34m'
MAGENTA = '\x1b[35m'
CYAN = '\x1b[36m'
WHITE = '\x1b[37m'
GRAY = '\x1b[90m'
else:
RESET = ''
BOLD = ''
DIM = ''
RED = ''
GREEN = ''
YELLOW = ''
BLUE = ''
MAGENTA = ''
CYAN = ''
WHITE = ''
GRAY = ''
client = TelegramClient('telegram-logger', api_id, api_hash)
client.start()
def get_log_filename(chat_id):
if log_seperate_files is True:
if chat_id:
return Path('logs', f'{chat_id}.log')
else:
return Path('logs', 'unknown.log')
else:
return Path('logs', 'telegram_messages.log')
def get_display_name(entity: Union[Channel, Chat, User]) -> str:
username = getattr(entity, 'username', None)
if username:
return username
if isinstance(entity, User):
display_name = entity.first_name
if entity.last_name:
display_name += f' {entity.last_name}'
else:
display_name = entity.title
return display_name
def is_enabled(chat_id) -> bool:
return (not enabled_chats or chat_id in enabled_chats) and (chat_id not in disabled_chats)
def iso_date(dt) -> str:
return dt.strftime('%Y-%m-%d %H:%M:%S')
async def get_user(user_id: int, chat_id: Optional[int] = None) -> User:
if not user_id:
return None
try:
return await client.get_entity(user_id)
except ValueError:
if not chat_id:
return None
await client.get_participants(chat_id)
try:
return await client.get_entity(user_id)
except ValueError:
await client.get_participants(chat_id, aggressive=True)
try:
return await client.get_entity(user_id)
except ValueError:
return None
@client.on(events.NewMessage)
async def on_new_message(event: events.NewMessage.Event) -> None:
msg = event.message
date = msg.date
chat = await client.get_entity(msg.peer_id)
if not is_enabled(chat.id):
return
user = await get_user(msg.from_id, chat.id)
text = msg.message
chat_display = f'[{get_display_name(chat)} ({chat.id})]'
msg_display = f'({msg.id})'
if user:
user_display = f'<{get_display_name(user)} ({user.id})>'
out = f'{GRAY}{iso_date(date)}{RESET} {BOLD}{BLUE}MSG{RESET} {BOLD}{GRAY}{chat_display}{RESET} {GRAY}{msg_display}{RESET}'
if user:
out += f' {BOLD}{user_display}{RESET}'
if text:
out += f' {text}{RESET}'
if msg.media and not isinstance(msg.media, MessageMediaWebPage):
media_type = re.sub(r'^MessageMedia', '', msg.media.__class__.__name__)
try:
filename = next(x.file_name for x in msg.media.document.attributes if isinstance(x, DocumentAttributeFilename))
except (AttributeError, StopIteration):
filename = None
if filename:
media_display = f'[{media_type}: {filename}]'
else:
media_display = f'[{media_type}]'
out += f' {MAGENTA}{media_display}{RESET}'
else:
media_type = None
filename = None
if log_to_file:
logfile = get_log_filename(chat.id if chat else None)
logfile.parent.mkdir(exist_ok=True)
with logfile.open('a') as fd:
fd.write(f'{out}\n')
if log_to_stdout:
print(out)
with sqlite3.connect(DB_PATH) as conn:
c = conn.cursor()
c.execute("""
INSERT INTO event
(type, date, chat_id, message_id, user_id, text, media_type, media_filename)
VALUES
('new_message', :date, :chat_id, :message_id, :user_id, :text, :media_type, :media_filename)
""", {
'date': msg.date.timestamp(),
'chat_id': chat.id,
'message_id': msg.id,
'user_id': user.id if user else None,
'text': text,
'media_type': media_type,
'media_filename': filename,
})
if msg.media and not isinstance(msg.media, MessageMediaWebPage) and save_media:
path = Path('media', str(chat.id), str(msg.id))
path.mkdir(parents=True, exist_ok=True)
await client.download_media(msg, path)
@client.on(events.MessageEdited)
async def on_message_edited(event: events.MessageEdited.Event) -> None:
msg = event.message
date = msg.edit_date
chat = await client.get_entity(msg.peer_id)
if not is_enabled(chat.id):
return
user = await get_user(msg.from_id, chat.id)
text = msg.message
with sqlite3.connect(DB_PATH) as conn:
c = conn.cursor()
c.execute("""
SELECT
text, media_type, media_filename
FROM
event
WHERE
chat_id = :chat_id
AND message_id = :message_id
ORDER BY
rowid DESC
LIMIT
1
""", {'chat_id': chat.id, 'message_id': msg.id})
row = c.fetchone()
if row:
old_text, old_media_type, old_filename = row
else:
old_text, old_media_type, old_filename = None, None, None
# TODO: Find a way to check if media is the same
#if text == old_text:
# # Non-text change (e.g. inline keyboard)
# return
chat_display = f'[{get_display_name(chat)} ({chat.id})]'
msg_display = f'({msg.id})'
if user:
user_display = f'<{get_display_name(user)} ({user.id})>'
if msg.media and not isinstance(msg.media, MessageMediaWebPage):
media_type = re.sub(r'^MessageMedia', '', msg.media.__class__.__name__)
try:
filename = next(x.file_name for x in msg.media.document.attributes if isinstance(x, DocumentAttributeFilename))
except (AttributeError, StopIteration):
filename = None
if filename:
media_display = f'[{media_type}: {filename}]'
else:
media_display = f'[{media_type}]'
else:
media_type = None
filename = None
out = f'{GRAY}{iso_date(date)}{RESET} {BOLD}{YELLOW}EDIT{RESET} {BOLD}{GRAY}{chat_display}{RESET} {GRAY}{msg_display}{RESET}'
if user:
out += f' {BOLD}{user_display}{RESET}'
if old_text or old_media_type:
out += '\n-'
if old_text:
out += f'{RED}{old_text}{RESET}'
if old_media_type:
if old_filename:
old_media_display = f'[{old_media_type}: {old_filename}]'
else:
old_media_display = f'[{old_media_type}]'
if old_text:
out += ' '
out += f'{MAGENTA}{old_media_display}{RESET}'
out += '\n+'
if text:
out += f'{GREEN}{text}{RESET}'
if msg.media and not isinstance(msg.media, MessageMediaWebPage):
if text:
out += ' '
if filename:
media_display = f'[{media_type}: {filename}]'
else:
media_display = f'[{media_type}]'
out += f'{MAGENTA}{media_display}{RESET}'
else:
if text:
out += f' {GREEN}{text}{RESET}'
if msg.media and not isinstance(msg.media, MessageMediaWebPage):
out += f' {MAGENTA}{media_display}{RESET}'
if log_to_file:
logfile = get_log_filename(chat.id if chat else None)
logfile.parent.mkdir(exist_ok=True)
with logfile.open('a') as fd:
fd.write(f'{out}\n')
if log_to_stdout:
print(out)
with sqlite3.connect(DB_PATH) as conn:
c = conn.cursor()
c.execute("""
INSERT INTO event
(type, date, chat_id, message_id, user_id, text, media_type, media_filename)
VALUES
('message_edited', :date, :chat_id, :message_id, :user_id, :text, :media_type, :media_filename)
""", {
'date': msg.date.timestamp(),
'chat_id': chat.id,
'message_id': msg.id,
'user_id': user.id if user else None,
'text': text,
'media_type': media_type,
'media_filename': filename,
})
if msg.media and not isinstance(msg.media, MessageMediaWebPage) and save_media:
path = Path('media', str(chat.id), str(msg.id))
path.mkdir(parents=True, exist_ok=True)
await client.download_media(msg, path)
@client.on(events.MessageDeleted)
async def on_message_deleted(event: events.MessageDeleted.Event) -> None:
msg = event.original_update
date = datetime.utcnow()
if getattr(msg, 'channel_id', None):
chat = await client.get_entity(msg.channel_id)
if not is_enabled(chat.id):
return
else:
chat = None
if chat:
chat_display = f'[{get_display_name(chat)} ({chat.id})]'
for msg_id in event.deleted_ids:
msg_display = f'({msg_id})'
with sqlite3.connect(DB_PATH) as conn:
c = conn.cursor()
c.execute("""
SELECT
chat_id, user_id, text, media_type, media_filename
FROM
event
WHERE
chat_id LIKE :chat_id
AND message_id = :message_id
ORDER BY
rowid DESC
LIMIT
1
""", {
'chat_id': chat.id if chat else '%',
'message_id': msg_id,
})
row = c.fetchone()
if row:
chat_id, user_id, old_text, old_media_type, old_filename = row
else:
chat_id, user_id, old_text, old_media_type, old_filename = None, None, None, None, None
if chat_id and not is_enabled(chat_id):
return
if user_id:
user = await get_user(user_id, chat.id if chat else None)
else:
user = None
if user:
user_display = f'<{get_display_name(user)} ({user.id})>'
out = f'{GRAY}{iso_date(date)}{RESET} {BOLD}{RED}DEL{RESET}'
if chat:
out += f' {BOLD}{GRAY}{chat_display}{RESET}'
out += f' {GRAY}{msg_display}{RESET}'
if user:
out += f' {RESET}{BOLD}{user_display}'
if old_text:
out += f' {RESET}{RED}{old_text}'
if old_media_type:
if old_filename:
old_media_display = f'[{old_media_type}: {old_filename}]'
else:
old_media_display = f'[{old_media_type}]'
if old_text:
out += ' '
out += f'{MAGENTA}{old_media_display}{RESET}'
out += RESET
if log_to_file:
logfile = get_log_filename(chat.id if chat else None)
with logfile.open('a') as fd:
fd.write(f'{out}\n')
if log_to_stdout:
print(out)
with sqlite3.connect(DB_PATH) as conn:
c = conn.cursor()
c.execute("""
INSERT INTO event
(type, date, chat_id, message_id)
VALUES
('message_deleted', :date, :chat_id, :message_id)
""", {
'date': date.timestamp(),
'chat_id': chat.id if chat else None,
'message_id': msg_id,
})
with sqlite3.connect(DB_PATH) as conn:
c = conn.cursor()
row = c.execute('PRAGMA user_version')
schema_version = row.fetchone()[0]
c.execute("""
CREATE TABLE IF NOT EXISTS events (
type TEXT NOT NULL,
date REAL NOT NULL,
chat_id INTEGER,
message_id INTEGER NOT NULL,
user_id INTEGER,
text TEXT
)
""")
if schema_version < 1:
print('Performing database migration from version 0 to 1')
c.execute('ALTER TABLE events RENAME TO event')
c.execute('ALTER TABLE event ADD media_type TEXT')
c.execute('ALTER TABLE event ADD media_filename TEXT')
c.execute('PRAGMA user_version = 1')
print('Listening for messages')
if log_to_file:
print('Logging to file')
client.run_until_disconnected()
| 29.718468 | 129 | 0.576203 |
acf424549ac649e4039d41075bdb567d69dd79e3 | 37,330 | py | Python | src/oci/golden_gate/models/deployment.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2020-09-10T22:09:45.000Z | 2021-12-24T17:00:07.000Z | src/oci/golden_gate/models/deployment.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/golden_gate/models/deployment.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class Deployment(object):
"""
A container for your OCI GoldenGate resources, such as the OCI GoldenGate deployment console.
"""
#: A constant which can be used with the lifecycle_state property of a Deployment.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a Deployment.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a Deployment.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a Deployment.
#: This constant has a value of "INACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
#: A constant which can be used with the lifecycle_state property of a Deployment.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a Deployment.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
#: A constant which can be used with the lifecycle_state property of a Deployment.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
#: A constant which can be used with the lifecycle_state property of a Deployment.
#: This constant has a value of "NEEDS_ATTENTION"
LIFECYCLE_STATE_NEEDS_ATTENTION = "NEEDS_ATTENTION"
#: A constant which can be used with the lifecycle_state property of a Deployment.
#: This constant has a value of "IN_PROGRESS"
LIFECYCLE_STATE_IN_PROGRESS = "IN_PROGRESS"
#: A constant which can be used with the lifecycle_state property of a Deployment.
#: This constant has a value of "CANCELING"
LIFECYCLE_STATE_CANCELING = "CANCELING"
#: A constant which can be used with the lifecycle_state property of a Deployment.
#: This constant has a value of "CANCELED"
LIFECYCLE_STATE_CANCELED = "CANCELED"
#: A constant which can be used with the lifecycle_state property of a Deployment.
#: This constant has a value of "SUCCEEDED"
LIFECYCLE_STATE_SUCCEEDED = "SUCCEEDED"
#: A constant which can be used with the lifecycle_sub_state property of a Deployment.
#: This constant has a value of "RECOVERING"
LIFECYCLE_SUB_STATE_RECOVERING = "RECOVERING"
#: A constant which can be used with the lifecycle_sub_state property of a Deployment.
#: This constant has a value of "STARTING"
LIFECYCLE_SUB_STATE_STARTING = "STARTING"
#: A constant which can be used with the lifecycle_sub_state property of a Deployment.
#: This constant has a value of "STOPPING"
LIFECYCLE_SUB_STATE_STOPPING = "STOPPING"
#: A constant which can be used with the lifecycle_sub_state property of a Deployment.
#: This constant has a value of "MOVING"
LIFECYCLE_SUB_STATE_MOVING = "MOVING"
#: A constant which can be used with the lifecycle_sub_state property of a Deployment.
#: This constant has a value of "UPGRADING"
LIFECYCLE_SUB_STATE_UPGRADING = "UPGRADING"
#: A constant which can be used with the lifecycle_sub_state property of a Deployment.
#: This constant has a value of "RESTORING"
LIFECYCLE_SUB_STATE_RESTORING = "RESTORING"
#: A constant which can be used with the lifecycle_sub_state property of a Deployment.
#: This constant has a value of "BACKUP_IN_PROGRESS"
LIFECYCLE_SUB_STATE_BACKUP_IN_PROGRESS = "BACKUP_IN_PROGRESS"
#: A constant which can be used with the license_model property of a Deployment.
#: This constant has a value of "LICENSE_INCLUDED"
LICENSE_MODEL_LICENSE_INCLUDED = "LICENSE_INCLUDED"
#: A constant which can be used with the license_model property of a Deployment.
#: This constant has a value of "BRING_YOUR_OWN_LICENSE"
LICENSE_MODEL_BRING_YOUR_OWN_LICENSE = "BRING_YOUR_OWN_LICENSE"
#: A constant which can be used with the deployment_type property of a Deployment.
#: This constant has a value of "OGG"
DEPLOYMENT_TYPE_OGG = "OGG"
def __init__(self, **kwargs):
"""
Initializes a new Deployment object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this Deployment.
:type id: str
:param display_name:
The value to assign to the display_name property of this Deployment.
:type display_name: str
:param description:
The value to assign to the description property of this Deployment.
:type description: str
:param compartment_id:
The value to assign to the compartment_id property of this Deployment.
:type compartment_id: str
:param deployment_backup_id:
The value to assign to the deployment_backup_id property of this Deployment.
:type deployment_backup_id: str
:param time_created:
The value to assign to the time_created property of this Deployment.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this Deployment.
:type time_updated: datetime
:param lifecycle_state:
The value to assign to the lifecycle_state property of this Deployment.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED", "FAILED", "NEEDS_ATTENTION", "IN_PROGRESS", "CANCELING", "CANCELED", "SUCCEEDED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param lifecycle_sub_state:
The value to assign to the lifecycle_sub_state property of this Deployment.
Allowed values for this property are: "RECOVERING", "STARTING", "STOPPING", "MOVING", "UPGRADING", "RESTORING", "BACKUP_IN_PROGRESS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_sub_state: str
:param lifecycle_details:
The value to assign to the lifecycle_details property of this Deployment.
:type lifecycle_details: str
:param freeform_tags:
The value to assign to the freeform_tags property of this Deployment.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this Deployment.
:type defined_tags: dict(str, dict(str, object))
:param is_healthy:
The value to assign to the is_healthy property of this Deployment.
:type is_healthy: bool
:param subnet_id:
The value to assign to the subnet_id property of this Deployment.
:type subnet_id: str
:param fqdn:
The value to assign to the fqdn property of this Deployment.
:type fqdn: str
:param license_model:
The value to assign to the license_model property of this Deployment.
Allowed values for this property are: "LICENSE_INCLUDED", "BRING_YOUR_OWN_LICENSE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type license_model: str
:param cpu_core_count:
The value to assign to the cpu_core_count property of this Deployment.
:type cpu_core_count: int
:param is_auto_scaling_enabled:
The value to assign to the is_auto_scaling_enabled property of this Deployment.
:type is_auto_scaling_enabled: bool
:param nsg_ids:
The value to assign to the nsg_ids property of this Deployment.
:type nsg_ids: list[str]
:param is_public:
The value to assign to the is_public property of this Deployment.
:type is_public: bool
:param public_ip_address:
The value to assign to the public_ip_address property of this Deployment.
:type public_ip_address: str
:param private_ip_address:
The value to assign to the private_ip_address property of this Deployment.
:type private_ip_address: str
:param deployment_url:
The value to assign to the deployment_url property of this Deployment.
:type deployment_url: str
:param system_tags:
The value to assign to the system_tags property of this Deployment.
:type system_tags: dict(str, dict(str, object))
:param is_latest_version:
The value to assign to the is_latest_version property of this Deployment.
:type is_latest_version: bool
:param time_upgrade_required:
The value to assign to the time_upgrade_required property of this Deployment.
:type time_upgrade_required: datetime
:param deployment_type:
The value to assign to the deployment_type property of this Deployment.
Allowed values for this property are: "OGG", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type deployment_type: str
:param ogg_data:
The value to assign to the ogg_data property of this Deployment.
:type ogg_data: oci.golden_gate.models.OggDeployment
"""
self.swagger_types = {
'id': 'str',
'display_name': 'str',
'description': 'str',
'compartment_id': 'str',
'deployment_backup_id': 'str',
'time_created': 'datetime',
'time_updated': 'datetime',
'lifecycle_state': 'str',
'lifecycle_sub_state': 'str',
'lifecycle_details': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))',
'is_healthy': 'bool',
'subnet_id': 'str',
'fqdn': 'str',
'license_model': 'str',
'cpu_core_count': 'int',
'is_auto_scaling_enabled': 'bool',
'nsg_ids': 'list[str]',
'is_public': 'bool',
'public_ip_address': 'str',
'private_ip_address': 'str',
'deployment_url': 'str',
'system_tags': 'dict(str, dict(str, object))',
'is_latest_version': 'bool',
'time_upgrade_required': 'datetime',
'deployment_type': 'str',
'ogg_data': 'OggDeployment'
}
self.attribute_map = {
'id': 'id',
'display_name': 'displayName',
'description': 'description',
'compartment_id': 'compartmentId',
'deployment_backup_id': 'deploymentBackupId',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated',
'lifecycle_state': 'lifecycleState',
'lifecycle_sub_state': 'lifecycleSubState',
'lifecycle_details': 'lifecycleDetails',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags',
'is_healthy': 'isHealthy',
'subnet_id': 'subnetId',
'fqdn': 'fqdn',
'license_model': 'licenseModel',
'cpu_core_count': 'cpuCoreCount',
'is_auto_scaling_enabled': 'isAutoScalingEnabled',
'nsg_ids': 'nsgIds',
'is_public': 'isPublic',
'public_ip_address': 'publicIpAddress',
'private_ip_address': 'privateIpAddress',
'deployment_url': 'deploymentUrl',
'system_tags': 'systemTags',
'is_latest_version': 'isLatestVersion',
'time_upgrade_required': 'timeUpgradeRequired',
'deployment_type': 'deploymentType',
'ogg_data': 'oggData'
}
self._id = None
self._display_name = None
self._description = None
self._compartment_id = None
self._deployment_backup_id = None
self._time_created = None
self._time_updated = None
self._lifecycle_state = None
self._lifecycle_sub_state = None
self._lifecycle_details = None
self._freeform_tags = None
self._defined_tags = None
self._is_healthy = None
self._subnet_id = None
self._fqdn = None
self._license_model = None
self._cpu_core_count = None
self._is_auto_scaling_enabled = None
self._nsg_ids = None
self._is_public = None
self._public_ip_address = None
self._private_ip_address = None
self._deployment_url = None
self._system_tags = None
self._is_latest_version = None
self._time_upgrade_required = None
self._deployment_type = None
self._ogg_data = None
@property
def id(self):
"""
**[Required]** Gets the id of this Deployment.
The `OCID`__ of the deployment being referenced.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The id of this Deployment.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Deployment.
The `OCID`__ of the deployment being referenced.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param id: The id of this Deployment.
:type: str
"""
self._id = id
@property
def display_name(self):
"""
Gets the display_name of this Deployment.
An object's Display Name.
:return: The display_name of this Deployment.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this Deployment.
An object's Display Name.
:param display_name: The display_name of this Deployment.
:type: str
"""
self._display_name = display_name
@property
def description(self):
"""
Gets the description of this Deployment.
Metadata about this specific object.
:return: The description of this Deployment.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this Deployment.
Metadata about this specific object.
:param description: The description of this Deployment.
:type: str
"""
self._description = description
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this Deployment.
The `OCID`__ of the compartment being referenced.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this Deployment.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this Deployment.
The `OCID`__ of the compartment being referenced.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this Deployment.
:type: str
"""
self._compartment_id = compartment_id
@property
def deployment_backup_id(self):
"""
Gets the deployment_backup_id of this Deployment.
The `OCID`__ of the backup being referenced.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The deployment_backup_id of this Deployment.
:rtype: str
"""
return self._deployment_backup_id
@deployment_backup_id.setter
def deployment_backup_id(self, deployment_backup_id):
"""
Sets the deployment_backup_id of this Deployment.
The `OCID`__ of the backup being referenced.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param deployment_backup_id: The deployment_backup_id of this Deployment.
:type: str
"""
self._deployment_backup_id = deployment_backup_id
@property
def time_created(self):
"""
Gets the time_created of this Deployment.
The time the resource was created. The format is defined by `RFC3339`__, such as `2016-08-25T21:10:29.600Z`.
__ https://tools.ietf.org/html/rfc3339
:return: The time_created of this Deployment.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this Deployment.
The time the resource was created. The format is defined by `RFC3339`__, such as `2016-08-25T21:10:29.600Z`.
__ https://tools.ietf.org/html/rfc3339
:param time_created: The time_created of this Deployment.
:type: datetime
"""
self._time_created = time_created
@property
def time_updated(self):
"""
Gets the time_updated of this Deployment.
The time the resource was last updated. The format is defined by `RFC3339`__, such as `2016-08-25T21:10:29.600Z`.
__ https://tools.ietf.org/html/rfc3339
:return: The time_updated of this Deployment.
:rtype: datetime
"""
return self._time_updated
@time_updated.setter
def time_updated(self, time_updated):
"""
Sets the time_updated of this Deployment.
The time the resource was last updated. The format is defined by `RFC3339`__, such as `2016-08-25T21:10:29.600Z`.
__ https://tools.ietf.org/html/rfc3339
:param time_updated: The time_updated of this Deployment.
:type: datetime
"""
self._time_updated = time_updated
@property
def lifecycle_state(self):
"""
Gets the lifecycle_state of this Deployment.
Possible lifecycle states.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED", "FAILED", "NEEDS_ATTENTION", "IN_PROGRESS", "CANCELING", "CANCELED", "SUCCEEDED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this Deployment.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this Deployment.
Possible lifecycle states.
:param lifecycle_state: The lifecycle_state of this Deployment.
:type: str
"""
allowed_values = ["CREATING", "UPDATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED", "FAILED", "NEEDS_ATTENTION", "IN_PROGRESS", "CANCELING", "CANCELED", "SUCCEEDED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def lifecycle_sub_state(self):
"""
Gets the lifecycle_sub_state of this Deployment.
Possible GGS lifecycle sub-states.
Allowed values for this property are: "RECOVERING", "STARTING", "STOPPING", "MOVING", "UPGRADING", "RESTORING", "BACKUP_IN_PROGRESS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_sub_state of this Deployment.
:rtype: str
"""
return self._lifecycle_sub_state
@lifecycle_sub_state.setter
def lifecycle_sub_state(self, lifecycle_sub_state):
"""
Sets the lifecycle_sub_state of this Deployment.
Possible GGS lifecycle sub-states.
:param lifecycle_sub_state: The lifecycle_sub_state of this Deployment.
:type: str
"""
allowed_values = ["RECOVERING", "STARTING", "STOPPING", "MOVING", "UPGRADING", "RESTORING", "BACKUP_IN_PROGRESS"]
if not value_allowed_none_or_none_sentinel(lifecycle_sub_state, allowed_values):
lifecycle_sub_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_sub_state = lifecycle_sub_state
@property
def lifecycle_details(self):
"""
Gets the lifecycle_details of this Deployment.
Describes the object's current state in detail. For example, it can be used to provide actionable information for a resource in a Failed state.
:return: The lifecycle_details of this Deployment.
:rtype: str
"""
return self._lifecycle_details
@lifecycle_details.setter
def lifecycle_details(self, lifecycle_details):
"""
Sets the lifecycle_details of this Deployment.
Describes the object's current state in detail. For example, it can be used to provide actionable information for a resource in a Failed state.
:param lifecycle_details: The lifecycle_details of this Deployment.
:type: str
"""
self._lifecycle_details = lifecycle_details
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this Deployment.
A simple key-value pair that is applied without any predefined name, type, or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this Deployment.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this Deployment.
A simple key-value pair that is applied without any predefined name, type, or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this Deployment.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this Deployment.
Tags defined for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this Deployment.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this Deployment.
Tags defined for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this Deployment.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def is_healthy(self):
"""
Gets the is_healthy of this Deployment.
True if all of the aggregate resources are working correctly.
:return: The is_healthy of this Deployment.
:rtype: bool
"""
return self._is_healthy
@is_healthy.setter
def is_healthy(self, is_healthy):
"""
Sets the is_healthy of this Deployment.
True if all of the aggregate resources are working correctly.
:param is_healthy: The is_healthy of this Deployment.
:type: bool
"""
self._is_healthy = is_healthy
@property
def subnet_id(self):
"""
**[Required]** Gets the subnet_id of this Deployment.
The `OCID`__ of the subnet being referenced.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The subnet_id of this Deployment.
:rtype: str
"""
return self._subnet_id
@subnet_id.setter
def subnet_id(self, subnet_id):
"""
Sets the subnet_id of this Deployment.
The `OCID`__ of the subnet being referenced.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param subnet_id: The subnet_id of this Deployment.
:type: str
"""
self._subnet_id = subnet_id
@property
def fqdn(self):
"""
Gets the fqdn of this Deployment.
A three-label Fully Qualified Domain Name (FQDN) for a resource.
:return: The fqdn of this Deployment.
:rtype: str
"""
return self._fqdn
@fqdn.setter
def fqdn(self, fqdn):
"""
Sets the fqdn of this Deployment.
A three-label Fully Qualified Domain Name (FQDN) for a resource.
:param fqdn: The fqdn of this Deployment.
:type: str
"""
self._fqdn = fqdn
@property
def license_model(self):
"""
**[Required]** Gets the license_model of this Deployment.
The Oracle license model that applies to a Deployment.
Allowed values for this property are: "LICENSE_INCLUDED", "BRING_YOUR_OWN_LICENSE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The license_model of this Deployment.
:rtype: str
"""
return self._license_model
@license_model.setter
def license_model(self, license_model):
"""
Sets the license_model of this Deployment.
The Oracle license model that applies to a Deployment.
:param license_model: The license_model of this Deployment.
:type: str
"""
allowed_values = ["LICENSE_INCLUDED", "BRING_YOUR_OWN_LICENSE"]
if not value_allowed_none_or_none_sentinel(license_model, allowed_values):
license_model = 'UNKNOWN_ENUM_VALUE'
self._license_model = license_model
@property
def cpu_core_count(self):
"""
**[Required]** Gets the cpu_core_count of this Deployment.
The Minimum number of OCPUs to be made available for this Deployment.
:return: The cpu_core_count of this Deployment.
:rtype: int
"""
return self._cpu_core_count
@cpu_core_count.setter
def cpu_core_count(self, cpu_core_count):
"""
Sets the cpu_core_count of this Deployment.
The Minimum number of OCPUs to be made available for this Deployment.
:param cpu_core_count: The cpu_core_count of this Deployment.
:type: int
"""
self._cpu_core_count = cpu_core_count
@property
def is_auto_scaling_enabled(self):
"""
**[Required]** Gets the is_auto_scaling_enabled of this Deployment.
Indicates if auto scaling is enabled for the Deployment's CPU core count.
:return: The is_auto_scaling_enabled of this Deployment.
:rtype: bool
"""
return self._is_auto_scaling_enabled
@is_auto_scaling_enabled.setter
def is_auto_scaling_enabled(self, is_auto_scaling_enabled):
"""
Sets the is_auto_scaling_enabled of this Deployment.
Indicates if auto scaling is enabled for the Deployment's CPU core count.
:param is_auto_scaling_enabled: The is_auto_scaling_enabled of this Deployment.
:type: bool
"""
self._is_auto_scaling_enabled = is_auto_scaling_enabled
@property
def nsg_ids(self):
"""
Gets the nsg_ids of this Deployment.
An array of `Network Security Group`__ OCIDs used to define network access for a deployment.
__ https://docs.cloud.oracle.com/Content/Network/Concepts/networksecuritygroups.htm
:return: The nsg_ids of this Deployment.
:rtype: list[str]
"""
return self._nsg_ids
@nsg_ids.setter
def nsg_ids(self, nsg_ids):
"""
Sets the nsg_ids of this Deployment.
An array of `Network Security Group`__ OCIDs used to define network access for a deployment.
__ https://docs.cloud.oracle.com/Content/Network/Concepts/networksecuritygroups.htm
:param nsg_ids: The nsg_ids of this Deployment.
:type: list[str]
"""
self._nsg_ids = nsg_ids
@property
def is_public(self):
"""
Gets the is_public of this Deployment.
True if this object is publicly available.
:return: The is_public of this Deployment.
:rtype: bool
"""
return self._is_public
@is_public.setter
def is_public(self, is_public):
"""
Sets the is_public of this Deployment.
True if this object is publicly available.
:param is_public: The is_public of this Deployment.
:type: bool
"""
self._is_public = is_public
@property
def public_ip_address(self):
"""
Gets the public_ip_address of this Deployment.
The public IP address representing the access point for the Deployment.
:return: The public_ip_address of this Deployment.
:rtype: str
"""
return self._public_ip_address
@public_ip_address.setter
def public_ip_address(self, public_ip_address):
"""
Sets the public_ip_address of this Deployment.
The public IP address representing the access point for the Deployment.
:param public_ip_address: The public_ip_address of this Deployment.
:type: str
"""
self._public_ip_address = public_ip_address
@property
def private_ip_address(self):
"""
Gets the private_ip_address of this Deployment.
The private IP address in the customer's VCN representing the access point for the associated endpoint service in the GoldenGate service VCN.
:return: The private_ip_address of this Deployment.
:rtype: str
"""
return self._private_ip_address
@private_ip_address.setter
def private_ip_address(self, private_ip_address):
"""
Sets the private_ip_address of this Deployment.
The private IP address in the customer's VCN representing the access point for the associated endpoint service in the GoldenGate service VCN.
:param private_ip_address: The private_ip_address of this Deployment.
:type: str
"""
self._private_ip_address = private_ip_address
@property
def deployment_url(self):
"""
Gets the deployment_url of this Deployment.
The URL of a resource.
:return: The deployment_url of this Deployment.
:rtype: str
"""
return self._deployment_url
@deployment_url.setter
def deployment_url(self, deployment_url):
"""
Sets the deployment_url of this Deployment.
The URL of a resource.
:param deployment_url: The deployment_url of this Deployment.
:type: str
"""
self._deployment_url = deployment_url
@property
def system_tags(self):
"""
Gets the system_tags of this Deployment.
The system tags associated with this resource, if any. The system tags are set by Oracle Cloud Infrastructure services. Each key is predefined and scoped to namespaces. For more information, see `Resource Tags`__.
Example: `{orcl-cloud: {free-tier-retain: true}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The system_tags of this Deployment.
:rtype: dict(str, dict(str, object))
"""
return self._system_tags
@system_tags.setter
def system_tags(self, system_tags):
"""
Sets the system_tags of this Deployment.
The system tags associated with this resource, if any. The system tags are set by Oracle Cloud Infrastructure services. Each key is predefined and scoped to namespaces. For more information, see `Resource Tags`__.
Example: `{orcl-cloud: {free-tier-retain: true}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param system_tags: The system_tags of this Deployment.
:type: dict(str, dict(str, object))
"""
self._system_tags = system_tags
@property
def is_latest_version(self):
"""
Gets the is_latest_version of this Deployment.
Indicates if the resource is the the latest available version.
:return: The is_latest_version of this Deployment.
:rtype: bool
"""
return self._is_latest_version
@is_latest_version.setter
def is_latest_version(self, is_latest_version):
"""
Sets the is_latest_version of this Deployment.
Indicates if the resource is the the latest available version.
:param is_latest_version: The is_latest_version of this Deployment.
:type: bool
"""
self._is_latest_version = is_latest_version
@property
def time_upgrade_required(self):
"""
Gets the time_upgrade_required of this Deployment.
The date the existing version in use will no longer be considered as usable and an upgrade will be required. This date is typically 6 months after the version was released for use by GGS. The format is defined by `RFC3339`__, such as `2016-08-25T21:10:29.600Z`.
__ https://tools.ietf.org/html/rfc3339
:return: The time_upgrade_required of this Deployment.
:rtype: datetime
"""
return self._time_upgrade_required
@time_upgrade_required.setter
def time_upgrade_required(self, time_upgrade_required):
"""
Sets the time_upgrade_required of this Deployment.
The date the existing version in use will no longer be considered as usable and an upgrade will be required. This date is typically 6 months after the version was released for use by GGS. The format is defined by `RFC3339`__, such as `2016-08-25T21:10:29.600Z`.
__ https://tools.ietf.org/html/rfc3339
:param time_upgrade_required: The time_upgrade_required of this Deployment.
:type: datetime
"""
self._time_upgrade_required = time_upgrade_required
@property
def deployment_type(self):
"""
**[Required]** Gets the deployment_type of this Deployment.
The type of deployment, the value determines the exact 'type' of service executed in the Deployment. NOTE: Use of the value OGG is maintained for backward compatibility purposes. Its use is discouraged
in favor of the equivalent DATABASE_ORACLE value.
Allowed values for this property are: "OGG", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The deployment_type of this Deployment.
:rtype: str
"""
return self._deployment_type
@deployment_type.setter
def deployment_type(self, deployment_type):
"""
Sets the deployment_type of this Deployment.
The type of deployment, the value determines the exact 'type' of service executed in the Deployment. NOTE: Use of the value OGG is maintained for backward compatibility purposes. Its use is discouraged
in favor of the equivalent DATABASE_ORACLE value.
:param deployment_type: The deployment_type of this Deployment.
:type: str
"""
allowed_values = ["OGG"]
if not value_allowed_none_or_none_sentinel(deployment_type, allowed_values):
deployment_type = 'UNKNOWN_ENUM_VALUE'
self._deployment_type = deployment_type
@property
def ogg_data(self):
"""
Gets the ogg_data of this Deployment.
:return: The ogg_data of this Deployment.
:rtype: oci.golden_gate.models.OggDeployment
"""
return self._ogg_data
@ogg_data.setter
def ogg_data(self, ogg_data):
"""
Sets the ogg_data of this Deployment.
:param ogg_data: The ogg_data of this Deployment.
:type: oci.golden_gate.models.OggDeployment
"""
self._ogg_data = ogg_data
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 34.953184 | 271 | 0.656014 |
acf4266847f871c1b8280d08536c0e7db9ac800b | 1,797 | py | Python | migrations/d7cd5138bb9b_minor_fixes.py | szkkteam/agrosys | a390332202f7200632d2ff3816e1b0f3cc76f586 | [
"MIT"
] | null | null | null | migrations/d7cd5138bb9b_minor_fixes.py | szkkteam/agrosys | a390332202f7200632d2ff3816e1b0f3cc76f586 | [
"MIT"
] | null | null | null | migrations/d7cd5138bb9b_minor_fixes.py | szkkteam/agrosys | a390332202f7200632d2ff3816e1b0f3cc76f586 | [
"MIT"
] | null | null | null | """minor fixes
Revision ID: d7cd5138bb9b
Revises: 0fed690a57ce
Create Date: 2020-09-18 07:56:14.159782
"""
from alembic import op
import geoalchemy2
import sqlalchemy as sa
import backend
# revision identifiers, used by Alembic.
revision = 'd7cd5138bb9b'
down_revision = '0fed690a57ce'
branch_labels = ()
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('task_harvesting',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('specific_product_id', sa.BigInteger(), nullable=False),
sa.ForeignKeyConstraint(['id'], ['task.task_id'], name=op.f('fk_task_harvesting_id_task'), onupdate='CASCADE', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['specific_product_id'], ['specific_product.id'], name=op.f('fk_task_harvesting_specific_product_id_specific_product')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_task_harvesting')),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('spatial_ref_sys',
sa.Column('srid', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('auth_name', sa.VARCHAR(length=256), autoincrement=False, nullable=True),
sa.Column('auth_srid', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('srtext', sa.VARCHAR(length=2048), autoincrement=False, nullable=True),
sa.Column('proj4text', sa.VARCHAR(length=2048), autoincrement=False, nullable=True),
sa.CheckConstraint('(srid > 0) AND (srid <= 998999)', name='spatial_ref_sys_srid_check'),
sa.PrimaryKeyConstraint('srid', name='spatial_ref_sys_pkey')
)
op.drop_table('task_harvesting')
# ### end Alembic commands ###
| 36.673469 | 148 | 0.71675 |
acf42702a8bc70470582fbe1d96aa23e71dc251b | 2,260 | py | Python | tests/teams/test_team.py | AndersFreund/Office365-REST-Python-Client | 98448bbe0d34dfe3d1a7a2005e0e730b92faae75 | [
"MIT"
] | null | null | null | tests/teams/test_team.py | AndersFreund/Office365-REST-Python-Client | 98448bbe0d34dfe3d1a7a2005e0e730b92faae75 | [
"MIT"
] | null | null | null | tests/teams/test_team.py | AndersFreund/Office365-REST-Python-Client | 98448bbe0d34dfe3d1a7a2005e0e730b92faae75 | [
"MIT"
] | null | null | null | import uuid
from tests.graph_case import GraphTestCase
from office365.directory.group import Group
from office365.directory.group_profile import GroupProfile
def _create_group(client):
grp_name = "Group_" + uuid.uuid4().hex
properties = GroupProfile(grp_name)
properties.securityEnabled = False
properties.mailEnabled = True
properties.groupTypes = ["Unified"]
return client.groups.add(properties)
class TestGraphTeam(GraphTestCase):
"""Tests for teams"""
target_group = None # type: Group
@classmethod
def setUpClass(cls):
super(TestGraphTeam, cls).setUpClass()
def test1_ensure_team(self):
self.__class__.target_group = _create_group(self.client).execute_query()
new_team = self.__class__.target_group.add_team().execute_query_retry()
self.assertIsNotNone(new_team.id)
def test3_get_all_teams(self):
teams = self.client.teams.get_all().execute_query()
self.assertGreater(len(teams), 0)
def test4_get_joined_teams(self):
my_teams = self.client.me.joined_teams.get().execute_query()
self.assertIsNotNone(my_teams.resource_path)
self.assertGreater(len(my_teams), 0)
def test5_get_team(self):
group_id = self.__class__.target_group.id
existing_team = self.client.teams[group_id].get().execute_query()
self.assertIsNotNone(existing_team.resource_url)
self.assertIsNotNone(existing_team.messagingSettings)
if existing_team.properties["isArchived"]:
existing_team.unarchive()
self.client.load(existing_team)
self.client.execute_query()
self.assertFalse(existing_team.properties["isArchived"])
def test6_update_team(self):
team_id = self.__class__.target_group.properties['id']
team_to_update = self.client.teams[team_id]
team_to_update.funSettings.allowGiphy = False
team_to_update.update().execute_query()
def test7_archive_team(self):
group_id = self.__class__.target_group.id
self.client.teams[group_id].archive().execute_query()
def test8_delete_team(self):
grp_to_delete = self.__class__.target_group
grp_to_delete.delete_object(True).execute_query()
| 34.242424 | 80 | 0.712389 |
acf4281bd88e72afd7d9b392721de97414d79a42 | 850 | py | Python | AutoFormula/operations/two.py | GYMS-PKU/HIgh-Frequency-Predictor | aac5efa73d6e15d95d1b99d529dcf639fb8181f4 | [
"Apache-2.0"
] | 1 | 2022-02-24T03:20:26.000Z | 2022-02-24T03:20:26.000Z | AutoFormula/operations/two.py | GYMS-PKU/HIgh-Frequency-Predictor | aac5efa73d6e15d95d1b99d529dcf639fb8181f4 | [
"Apache-2.0"
] | null | null | null | AutoFormula/operations/two.py | GYMS-PKU/HIgh-Frequency-Predictor | aac5efa73d6e15d95d1b99d529dcf639fb8181f4 | [
"Apache-2.0"
] | 2 | 2022-01-15T15:37:34.000Z | 2022-02-24T03:22:23.000Z | # Copyright (c) 2021 Dai HBG
"""
่ฏฅไปฃ็ ๅฎไน2ๅ่ฟ็ฎ็ฌฆ
"""
import numpy as np
import numba as nb
def add(a, b):
return a + b
def minus(a, b):
return a - b
def prod(a, b):
c = a * b
c[np.isnan(c)] = 0
c[np.isinf(c)] = 0
return a * b
def div(a, b):
s = np.zeros(a.shape)
if type(b) == float:
s = a / b
else:
s[b != 0] = a[b != 0] / b[b != 0]
return s
# @nb.jit
def intratsregres(a, b): # ๆฅๅ
ๆถๅบๅๅฝๆฎๅทฎ
tmp_a = a.transpose(1, 0, 2)
tmp_b = b.transpose(1, 0, 2)
tmp_a -= np.nanmean(tmp_a, axis=0)
tmp_b -= np.nanmean(tmp_b, axis=0)
beta = np.nansum(tmp_a * tmp_b, axis=0) / np.nansum(tmp_a ** 2, axis=0)
s = tmp_b - beta * tmp_a
return s
def lt(a, b):
return a < b
def le(a, b):
return a <= b
def gt(a, b):
return a > b
def ge(a, b):
return a >= b | 13.934426 | 75 | 0.510588 |
acf428ff78ddd5b4471ffd7f43d44e58e30220f2 | 636 | py | Python | main/cryptkeeper/views.py | kwikcode/cryptkeeper | fdfb3ae6fd5bb6693148e98e0563bd67b4c3ee4f | [
"MIT"
] | 1 | 2021-10-16T14:53:15.000Z | 2021-10-16T14:53:15.000Z | main/cryptkeeper/views.py | kwikcode/cryptkeeper | fdfb3ae6fd5bb6693148e98e0563bd67b4c3ee4f | [
"MIT"
] | null | null | null | main/cryptkeeper/views.py | kwikcode/cryptkeeper | fdfb3ae6fd5bb6693148e98e0563bd67b4c3ee4f | [
"MIT"
] | null | null | null | from django.http import HttpResponse
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.contrib.auth.models import User
from .forms import *
def index(request):
return render(request, 'cryptkeeper/index.html')
@login_required
def transactions(request):
return render(request, 'cryptkeeper/transactions.html')
@login_required
def transaction_importer(request):
return render(request, 'cryptkeeper/transaction-importer.html')
@login_required
def hidden(request):
return HttpResponse("Must be logged in!") | 30.285714 | 67 | 0.806604 |
acf42a72e2098e35132f2b3269e14f0390f62396 | 1,361 | py | Python | uff/setup_config.py | ManuDeBuck/UforaFileFetcher | a2218300333629e1f35a82e9801cb138d1a26ea0 | [
"WTFPL"
] | null | null | null | uff/setup_config.py | ManuDeBuck/UforaFileFetcher | a2218300333629e1f35a82e9801cb138d1a26ea0 | [
"WTFPL"
] | null | null | null | uff/setup_config.py | ManuDeBuck/UforaFileFetcher | a2218300333629e1f35a82e9801cb138d1a26ea0 | [
"WTFPL"
] | null | null | null | import argparse
import json
import inquirer
from inquirer import Path
from uff.brightspace import BrightspaceAPI
from uff.courses import get_courses_list
from uff.ufora_login import get_session
def setup():
username = inquirer.text(message="What's your username")
password = inquirer.password(message="What's your password")
session = get_session(username, password)
if session is None:
print("Invalid login credentials")
return
config_file = inquirer.shortcuts.path(message="Specify config file", path_type=Path.FILE)
output_directory = inquirer.shortcuts.path(message="Specify output directory", path_type=Path.DIRECTORY)
brightspace_api = BrightspaceAPI(username, password)
courses = get_courses_list(brightspace_api)
selected_courses = inquirer.checkbox(message="Select courses to sync (press enter when ready)",
choices=courses
)
course_ids = [courses[course] for course in selected_courses]
with open(config_file, "w+") as f:
f.write(json.dumps({
"output_directory": output_directory,
"courses": course_ids,
"credentials": {
"username": username,
"password": password
}
}, indent=4))
print("Setup complete!")
| 36.783784 | 108 | 0.6554 |
acf42b87a4925952fcebd19bf7ed578b70346e10 | 1,790 | py | Python | game_of_life/game_of_life_v1.py | czeildi/python-tdd-practice | 00d15b8f3a48e03093951d45af7a6eff07689d7f | [
"MIT"
] | null | null | null | game_of_life/game_of_life_v1.py | czeildi/python-tdd-practice | 00d15b8f3a48e03093951d45af7a6eff07689d7f | [
"MIT"
] | 1 | 2017-12-11T12:47:06.000Z | 2017-12-11T12:47:06.000Z | game_of_life/game_of_life_v1.py | czeildi/python-tdd-practice | 00d15b8f3a48e03093951d45af7a6eff07689d7f | [
"MIT"
] | null | null | null | class GameOfLife:
"""implement Conway's game of life"""
def next(self, living_cells = []):
cells_remaining_alive = self.cellsRemainingAlive(living_cells)
borning_cells = self.borningCells(living_cells)
return list(set(cells_remaining_alive + borning_cells))
def cellsRemainingAlive(self, living_cells):
return [c for c in living_cells if self.cellRemainsAlive(living_cells, c)]
def borningCells(self, living_cells):
possible_newborns = self.neighborsOfAnyLiving(living_cells)
return [c for c in possible_newborns if self.cellBorns(living_cells, c)]
def cellRemainsAlive(self, living_cells, cell):
return self.numOfLivingNeighbors(living_cells, cell) in (2, 3)
def cellBorns(self, living_cells, cell):
return self.numOfLivingNeighbors(living_cells, cell) == 3
def neighborsOfAnyLiving(self, living_cells):
neighbor_lists = [self.neighbors(c) for c in living_cells]
return [c for neighbors in neighbor_lists for c in neighbors]
def numOfLivingNeighbors(self, living_cells, cell):
living_neigbors = [c for c in self.neighbors(cell) if c in living_cells]
return len(living_neigbors)
def neighbors(self, cell):
directions = [[1, 0], [0, -1], [0, 1], [-1, 0],
[1, 1], [1, -1], [-1, 1], [-1, -1]]
return [self.neighborInDirection(cell, x, y) for (x, y) in directions]
def neighborInDirection(self, cell, xShift, yShift):
(x, y) = self.coordsOfCell(cell)
return self.cellOfCoords(x + xShift, y + yShift)
def coordsOfCell(self, cell):
return [int(coord) for coord in cell.split(', ')]
def cellOfCoords(self, x, y):
return str(x) + ', ' + str(y)
| 40.681818 | 82 | 0.649721 |
acf42bf26fc198ece32cb7178837a55696b34429 | 1,174 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/lms_xblock/migrations/0001_initial.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/lms_xblock/migrations/0001_initial.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/lms_xblock/migrations/0001_initial.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='XBlockAsidesConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('disabled_blocks', models.TextField(default='about course_info static_tab', help_text='Space-separated list of XBlocks on which XBlockAsides should never render.')),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
]
| 41.928571 | 182 | 0.640545 |
acf42d2010bf77addbe4dd5456903467772ee84f | 1,781 | py | Python | nova/ipv6/rfc2462.py | armaan/nova | 22859fccb95502efcb73ecf2bd827c45c0886bd3 | [
"Apache-2.0"
] | 1 | 2021-11-08T10:11:44.000Z | 2021-11-08T10:11:44.000Z | nova/ipv6/rfc2462.py | armaan/nova | 22859fccb95502efcb73ecf2bd827c45c0886bd3 | [
"Apache-2.0"
] | null | null | null | nova/ipv6/rfc2462.py | armaan/nova | 22859fccb95502efcb73ecf2bd827c45c0886bd3 | [
"Apache-2.0"
] | 1 | 2020-05-10T16:36:03.000Z | 2020-05-10T16:36:03.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RFC2462 style IPv6 address generation"""
import netaddr
def to_global(prefix, mac, project_id):
try:
mac64 = netaddr.EUI(mac).eui64().words
int_addr = int(''.join(['%02x' % i for i in mac64]), 16)
mac64_addr = netaddr.IPAddress(int_addr)
maskIP = netaddr.IPNetwork(prefix).ip
return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).\
format()
except netaddr.AddrFormatError:
raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac)
except TypeError:
raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix)
def to_mac(ipv6_address):
address = netaddr.IPAddress(ipv6_address)
mask1 = netaddr.IPAddress('::ffff:ffff:ffff:ffff')
mask2 = netaddr.IPAddress('::0200:0:0:0')
mac64 = netaddr.EUI(int(address & mask1 ^ mask2)).words
return ':'.join(['%02x' % i for i in mac64[0:3] + mac64[5:8]])
| 39.577778 | 78 | 0.669848 |
acf42d35ce370ab0efca55f6cd3065c759bcf85f | 1,333 | py | Python | setup.py | jaaamessszzz/BindingSitesFromFragments | 8f8c1ee6bf5f5783e89f3c4b4eb9d0da0ea1e280 | [
"BSD-3-Clause"
] | 7 | 2020-10-06T07:38:59.000Z | 2022-02-28T11:00:44.000Z | setup.py | jaaamessszzz/BindingSitesFromFragments | 8f8c1ee6bf5f5783e89f3c4b4eb9d0da0ea1e280 | [
"BSD-3-Clause"
] | 2 | 2021-06-16T16:04:01.000Z | 2021-08-06T14:52:47.000Z | setup.py | jaaamessszzz/BindingSitesFromFragments | 8f8c1ee6bf5f5783e89f3c4b4eb9d0da0ea1e280 | [
"BSD-3-Clause"
] | 2 | 2020-04-20T21:30:29.000Z | 2021-12-30T18:15:58.000Z | #!/usr/bin/env python3
# encoding: utf-8
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.md') as file:
readme = file.read()
# Setup
setup(
name='BindingSitesFromFragments',
version='0.1',
author='James Lucas',
author_email='james.lucas@berkeley.edu',
description='',
long_description=readme,
url='https://github.com/jaaamessszzz/BindingSitesFromFragments',
keywords=[
'Binding Sites',
'Fragments'
],
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
packages=[
'BindingSitesFromFragments',
],
install_requires=[
'docopt',
'matplotlib',
'numpy',
'decorator',
'pubchempy',
'pypdb',
'pandas',
'biopython',
'seaborn',
'multiprocess',
'networkx',
'pathos',
'prody',
'scipy',
'xmltodict',
'appdirs'
],
entry_points={
'console_scripts': [
'bsff = BindingSitesFromFragments.commands.bsff:main',
'bsff_clean = BindingSitesFromFragments.commands.bsff:main'
],
},
include_package_data=True,
zip_safe=False,
) | 22.59322 | 71 | 0.571643 |
acf42d7357fe086c0c8da1af1f8e4f20c8dc2588 | 3,421 | py | Python | tasks/recall.py | vohoaiviet/NTM-tensorflow | 5376c35e8800157b08af44ddec1cb870d2ef3c83 | [
"MIT"
] | 1,140 | 2015-12-14T16:53:10.000Z | 2022-03-31T09:06:06.000Z | tasks/recall.py | vohoaiviet/NTM-tensorflow | 5376c35e8800157b08af44ddec1cb870d2ef3c83 | [
"MIT"
] | 29 | 2015-12-30T23:14:19.000Z | 2019-08-26T21:19:25.000Z | tasks/recall.py | vohoaiviet/NTM-tensorflow | 5376c35e8800157b08af44ddec1cb870d2ef3c83 | [
"MIT"
] | 275 | 2015-12-13T16:44:13.000Z | 2022-02-24T08:35:47.000Z | import os
import time
import numpy as np
import tensorflow as tf
from random import randint
from ntm import NTM
from utils import pprint
from ntm_cell import NTMCell
print_interval = 5
def run(ntm, seq_length, sess, print_=True):
start_symbol = np.zeros([ntm.cell.input_dim], dtype=np.float32)
start_symbol[0] = 1
end_symbol = np.zeros([ntm.cell.input_dim], dtype=np.float32)
end_symbol[1] = 1
seq = generate_recall_sequence(seq_length, ntm.cell.input_dim - 2)
feed_dict = {input_:vec for vec, input_ in zip(seq, ntm.inputs)}
feed_dict.update(
{true_output:vec for vec, true_output in zip(seq, ntm.true_outputs)}
)
feed_dict.update({
ntm.start_symbol: start_symbol,
ntm.end_symbol: end_symbol
})
input_states = [state['write_w'] for state in ntm.input_states[seq_length]]
output_states = [state['read_w'] for state in ntm.get_output_states(seq_length)]
result = sess.run(ntm.get_outputs(seq_length) + \
input_states + output_states + \
[ntm.get_loss(seq_length)],
feed_dict=feed_dict)
is_sz = len(input_states)
os_sz = len(output_states)
outputs = result[:seq_length]
read_ws = result[seq_length:seq_length + is_sz]
write_ws = result[seq_length + is_sz:seq_length + is_sz + os_sz]
loss = result[-1]
if print_:
np.set_printoptions(suppress=True)
print(" true output : ")
pprint(seq)
print(" predicted output :")
pprint(np.round(outputs))
print(" Loss : %f" % loss)
np.set_printoptions(suppress=False)
else:
return seq, outputs, read_ws, write_ws, loss
def train(ntm, config, sess):
if not os.path.isdir(config.checkpoint_dir):
raise Exception(" [!] Directory %s not found" % config.checkpoint_dir)
delim_symbol = np.zeros([config.input_dim], dtype=np.float32)
start_symbol[0] = 1
query_symbol = np.zeros([config.input_dim], dtype=np.float32)
end_symbol[1] = 1
print(" [*] Initialize all variables")
tf.initialize_all_variables().run()
print(" [*] Initialization finished")
start_time = time.time()
for idx in xrange(config.epoch):
seq_length = randint(config.min_length, config.max_length)
seq = generate_recall_sequence(seq_length, config.input_dim - 2)
feed_dict = {input_:vec for vec, input_ in zip(seq, ntm.inputs)}
feed_dict.update(
{true_output:vec for vec, true_output in zip(seq, ntm.true_outputs)}
)
feed_dict.update({
ntm.start_symbol: start_symbol,
ntm.end_symbol: end_symbol
})
_, cost, step = sess.run([ntm.optims[seq_length],
ntm.get_loss(seq_length),
ntm.global_step], feed_dict=feed_dict)
if idx % 100 == 0:
ntm.save(config.checkpoint_dir, 'recall', step)
if idx % print_interval == 0:
print("[%5d] %2d: %.2f (%.1fs)" \
% (idx, seq_length, cost, time.time() - start_time))
print("Training Copy task finished")
def generate_recall_sequence(num_items, item_length, input_dim):
items = []
for idx in xrange(num_items):
item = np.random.rand(item_length, input_dim).round()
item[0:item_length+1, 0:2] = 0
items.append(item)
return items
| 31.971963 | 84 | 0.63081 |
acf42ea76eef0ce0863ab1aea25f439ff8aa217b | 1,760 | py | Python | jdcloud_sdk/services/vm/apis/CreateImageRequest.py | lidaobing/jdcloud-sdk-python | f305e8ddd74ab4ad445477744534e7299d4d93fb | [
"Apache-2.0"
] | null | null | null | jdcloud_sdk/services/vm/apis/CreateImageRequest.py | lidaobing/jdcloud-sdk-python | f305e8ddd74ab4ad445477744534e7299d4d93fb | [
"Apache-2.0"
] | null | null | null | jdcloud_sdk/services/vm/apis/CreateImageRequest.py | lidaobing/jdcloud-sdk-python | f305e8ddd74ab4ad445477744534e7299d4d93fb | [
"Apache-2.0"
] | 1 | 2019-03-01T08:44:37.000Z | 2019-03-01T08:44:37.000Z | # coding=utf8
# Copyright 2018-2025 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class CreateImageRequest(JDCloudRequest):
"""
"่ๆบๅๅปบ็งๆ้ๅ"
"่ๆบ็ถๆๅฟ
้กปไธบstopped"
"ๅฆๆ่ๆบไธๆๆ่ฝฝๆฐๆฎ็๏ผ้ป่ฎคไผๅฐๆฐๆฎ็ๅๅปบๅฟซ็
ง๏ผ็ๆๆๅ
้ๅ"
"ไธปๆบๆฒกๆๆชๅฎๆ็ไปปๅกๆๅฏๅถไฝ้ๅ"
"""
def __init__(self, parameters, header=None, version="v1"):
super(CreateImageRequest, self).__init__(
'/regions/{regionId}/instances/{instanceId}:createImage', 'POST', header, version)
self.parameters = parameters
class CreateImageParameters(object):
def __init__(self, regionId, instanceId, name, description, ):
"""
:param regionId: Region ID
:param instanceId: Instance ID
:param name: ๅ็งฐ
:param description: ๆ่ฟฐ
"""
self.regionId = regionId
self.instanceId = instanceId
self.name = name
self.description = description
self.dataDisks = None
def setDataDisks(self, dataDisks):
"""
:param dataDisks: (Optional) ๆฐๆฎ็ๅ่กจ๏ผๅฆๆๆๅฎ๏ผๅ้้ๅไธ่ตทๆๅ
ๅๅปบๅฟซ็
ง๏ผๅฎ้
ๆๅคไธ่ฝ่ถ
่ฟ4ไธช
"""
self.dataDisks = dataDisks
| 29.830508 | 94 | 0.694886 |
acf42ef0da13b8828dc72a0df00673e852048ed3 | 25,403 | py | Python | tests/test_views_rate.py | HiveTechies/django-star-ratings | f1a17031f2dd694342451b4c2216b2afc5d17acd | [
"BSD-3-Clause"
] | 1 | 2021-02-18T10:29:01.000Z | 2021-02-18T10:29:01.000Z | tests/test_views_rate.py | HiveTechies/django-star-ratings | f1a17031f2dd694342451b4c2216b2afc5d17acd | [
"BSD-3-Clause"
] | null | null | null | tests/test_views_rate.py | HiveTechies/django-star-ratings | f1a17031f2dd694342451b4c2216b2afc5d17acd | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import json
import os
import uuid
import pytest
from random import randint
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from .base import BaseFooTest
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
from django.test import override_settings, Client, TestCase
from model_mommy import mommy
from star_ratings import get_star_ratings_rating_model
from star_ratings.models import UserRating
class BaseTestViewRate:
csrf_checks = False
client = Client(REMOTE_ADDR='127.0.0.1')
def post_json(self, url, data, **kwargs):
if 'user' in kwargs:
self.client.login(username=kwargs['user'].username, password='password')
if 'xhr' in kwargs:
return self.client.post(url, json.dumps(data), content_type='application/json', HTTP_X_REQUESTED_WITH='XMLHttpRequest')
return self.client.post(url, json.dumps(data), content_type='application/json')
def get_user(self, username='username'):
return get_user_model().objects.create_user(
username=username,
first_name='first',
last_name='last',
email='example@example.com',
password='password'
)
@override_settings(STAR_RATINGS_ANONYMOUS=False)
def test_view_is_called_when_nobody_is_logged_in_and_anon_ratings_is_false___user_is_forwarded_to_login(self):
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
url = self.get_url(obj=ratings)
response = self.post_json(url, {'score': 1})
self.assertRedirects(response, settings.LOGIN_URL + '?next=' + url, fetch_redirect_response=False)
@override_settings(STAR_RATINGS_ANONYMOUS=True)
def test_view_is_called_when_nobody_is_logged_in_and_anon_ratings_is_true___rating_is_created(self):
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
score = randint(1, 5)
url = self.get_url(obj=ratings)
self.post_json(url, {'score': score})
ct = ContentType.objects.get_for_model(foo)
self.assertTrue(UserRating.objects.filter(rating__object_id=foo.pk, rating__content_type=ct, score=score, ip='127.0.0.1').exists())
def test_user_is_logged_in_and_doesnt_already_have_a_rating___rating_is_created(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
score = randint(1, 5)
url = self.get_url(obj=ratings)
self.post_json(url, {'score': score}, user=user)
ct = ContentType.objects.get_for_model(foo)
self.assertTrue(UserRating.objects.filter(user=user, rating__object_id=foo.pk, rating__content_type=ct, score=score).exists())
def test_user_is_logged_in_and_doesnt_already_have_a_rating_no_next_url_is_given___redirected_to_root(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
score = randint(1, 5)
url = self.get_url(obj=ratings)
response = self.post_json(url, {'score': score}, user=user)
self.assertRedirects(response, '/', fetch_redirect_response=False)
def test_user_is_logged_in_and_doesnt_already_have_a_rating_next_url_is_given___redirected_to_next(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
score = randint(1, 5)
url = self.get_url(obj=ratings)
response = self.post_json(url, {'score': score, 'next': '/foo/bar'}, user=user)
self.assertRedirects(response, '/foo/bar', fetch_redirect_response=False)
def test_user_is_logged_in_and_doesnt_already_have_a_rating_request_is_ajax___rating_is_created(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
score = randint(1, 5)
url = self.get_url(obj=ratings)
self.post_json(url, {'score': score}, user=user, xhr=True)
ct = ContentType.objects.get_for_model(foo)
self.assertTrue(UserRating.objects.filter(user=user, rating__object_id=foo.pk, rating__content_type=ct, score=score).exists())
def test_user_is_logged_in_and_doesnt_already_have_a_rating_request_is_ajax___response_is_updated_aggregate_data(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
score = randint(1, 5)
url = self.get_url(obj=ratings)
response = self.post_json(
url, {'score': score}, user=user, xhr=True)
ratings = get_star_ratings_rating_model().objects.get(pk=ratings.pk)
expected = ratings.to_dict()
expected['user_rating'] = score
expected['percentage'] = float(expected['percentage'])
try:
json_resp = response.json()
except AttributeError:
json_resp = json.loads(response.content.decode())
self.assertEqual(expected, json_resp)
@override_settings(STAR_RATINGS_RERATE=True)
def test_user_is_logged_in_already_has_a_rating_rerate_is_true___rating_is_updated(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
rating = mommy.make(UserRating, rating=ratings, score=1, user=user)
score = randint(2, 5)
url = self.get_url(obj=ratings)
self.post_json(url, {'score': score}, user=user)
rating = UserRating.objects.get(pk=rating.pk)
self.assertEqual(score, rating.score)
@override_settings(STAR_RATINGS_RERATE=True)
def test_user_is_logged_in_already_has_a_rating_rerate_is_true___redirected_to_root(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
mommy.make(UserRating, rating=ratings, score=1, user=user)
score = randint(2, 5)
url = self.get_url(obj=ratings)
response = self.post_json(url, {'score': score}, user=user)
self.assertRedirects(response, '/', fetch_redirect_response=False)
@override_settings(STAR_RATINGS_RERATE=True)
def test_user_is_logged_in_already_has_a_rating_rerate_is_true___redirected_to_next(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
mommy.make(UserRating, rating=ratings, score=1, user=user)
score = randint(2, 5)
url = self.get_url(obj=ratings)
response = self.post_json(url, {'score': score, 'next': '/foo/bar'}, user=user)
self.assertRedirects(response, '/foo/bar', fetch_redirect_response=False)
@override_settings(STAR_RATINGS_RERATE=True)
def test_user_is_logged_in_already_has_a_rating_rerate_is_true_request_is_ajax___rating_is_updated(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
rating = mommy.make(UserRating, rating=ratings, score=1, user=user)
score = randint(2, 5)
url = self.get_url(obj=ratings)
self.post_json(url, {'score': score}, user=user, xhr=True)
rating = UserRating.objects.get(pk=rating.pk)
self.assertEqual(score, rating.score)
@override_settings(STAR_RATINGS_RERATE=True)
def test_user_is_logged_in_already_has_a_rating_rerate_is_true_request_is_ajax___response_is_updated_aggregate_data(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
mommy.make(UserRating, rating=ratings, score=1, user=user)
score = randint(2, 5)
url = self.get_url(obj=ratings)
response = self.post_json(url, {'score': score}, user=user, xhr=True)
ratings = get_star_ratings_rating_model().objects.get(pk=ratings.pk)
expected = ratings.to_dict()
expected['percentage'] = float(expected['percentage'])
expected['user_rating'] = score
try:
json_resp = response.json()
except AttributeError:
json_resp = json.loads(response.content.decode())
self.assertEqual(expected, json_resp)
@override_settings(STAR_RATINGS_RERATE=False)
def test_user_is_logged_in_already_has_a_rating_rerate_is_false___rating_is_not_changed(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
rating = mommy.make(UserRating, rating=ratings, score=1, user=user)
orig_score = rating.score
score = randint(2, 5)
url = self.get_url(obj=ratings)
self.post_json(url, {'score': score}, user=user)
rating = UserRating.objects.get(pk=rating.pk)
self.assertEqual(orig_score, rating.score)
@override_settings(STAR_RATINGS_RERATE=False)
def test_user_is_logged_in_already_has_a_rating_rerate_is_false___redirected_to_next(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
mommy.make(UserRating, rating=ratings, score=1, user=user)
score = randint(2, 5)
url = self.get_url(obj=ratings)
response = self.post_json(url, {'score': score, 'next': '/foo/bar'}, user=user)
self.assertRedirects(response, '/foo/bar', fetch_redirect_response=False)
@override_settings(STAR_RATINGS_RERATE=False)
def test_user_is_logged_in_already_has_a_rating_rerate_is_false_request_is_ajax___rating_is_not_changed(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
rating = mommy.make(UserRating, rating=ratings, score=1, user=user)
orig_score = rating.score
score = randint(2, 5)
url = self.get_url(obj=ratings)
self.post_json(url, {'score': score}, user=user, xhr=True, expect_errors=True)
rating = UserRating.objects.get(pk=rating.pk)
self.assertEqual(orig_score, rating.score)
@override_settings(STAR_RATINGS_RERATE=False)
def test_user_is_logged_in_already_has_a_rating_rerate_is_false_reqest_is_ajax___response_is_400(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
mommy.make(UserRating, rating=ratings, score=1, user=user)
score = randint(2, 5)
url = self.get_url(obj=ratings, extra='?next=/foo/bar')
response = self.post_json(url, {'score': score}, user=user, xhr=True, expect_errors=True)
self.assertEqual(400, response.status_code)
@override_settings(STAR_RATINGS_RERATE_SAME_DELETE=True)
def test_user_is_logged_in_already_has_a_rating_rerate_is_delete_score_same__rating_deleted(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
rating = mommy.make(UserRating, rating=ratings, score=1, user=user)
orig_score = rating.score
url = reverse('ratings:rate', args=(ratings.content_type_id, ratings.object_id))
self.post_json(url, {'score': orig_score}, user=user)
with self.assertRaises(UserRating.DoesNotExist):
UserRating.objects.get(pk=rating.pk)
@override_settings(STAR_RATINGS_RERATE_SAME_DELETE=True)
def test_user_is_logged_in_already_has_a_rating_rerate_is_delete_score_same__redirected_to_next(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
rating = mommy.make(UserRating, rating=ratings, score=1, user=user)
url = reverse('ratings:rate', args=(ratings.content_type_id, ratings.object_id))
response = self.post_json(url, {'score': rating.score, 'next': '/foo/bar'}, user=user)
self.assertRedirects(response, '/foo/bar', fetch_redirect_response=False)
@override_settings(STAR_RATINGS_RERATE_SAME_DELETE=True)
def test_user_is_logged_in_already_has_a_rating_rerate_is_delete_request_is_ajax_score_same__rating_deleted(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
rating = mommy.make(UserRating, rating=ratings, score=1, user=user)
orig_score = rating.score
url = reverse('ratings:rate', args=(ratings.content_type_id, ratings.object_id))
self.post_json(url, {'score': orig_score}, user=user, xhr=True, expect_errors=True)
with self.assertRaises(UserRating.DoesNotExist):
UserRating.objects.get(pk=rating.pk)
@override_settings(STAR_RATINGS_RERATE_SAME_DELETE=True)
def test_user_is_logged_in_already_has_a_rating_rerate_is_delete_reqest_is_ajax_score_same__response_is_200(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
rating = mommy.make(UserRating, rating=ratings, score=1, user=user)
url = reverse('ratings:rate', args=(ratings.content_type_id, ratings.object_id)) + '?next=/foo/bar'
response = self.post_json(url, {'score': rating.score}, user=user, xhr=True, expect_errors=True)
self.assertEqual(200, response.status_code)
@override_settings(STAR_RATINGS_RERATE_SAME_DELETE=True)
def test_user_is_logged_in_already_has_a_rating_rerate_is_delete_reqest_is_ajax_score_same__response_empty(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
rating = mommy.make(UserRating, rating=ratings, score=1, user=user)
# expecting it to be removed
expected = {'average': 0.0, 'count': 0, 'percentage': 0.0, 'total': 0, 'user_rating': None}
url = reverse('ratings:rate', args=(ratings.content_type_id, ratings.object_id)) + '?next=/foo/bar'
response = self.post_json(url, {'score': rating.score}, user=user, xhr=True, expect_errors=True)
try:
json_resp = response.json()
except AttributeError:
json_resp = json.loads(response.content.decode())
self.assertEqual(expected, json_resp)
@override_settings(STAR_RATINGS_RERATE_SAME_DELETE=True)
def test_user_is_logged_in_already_has_a_rating_rerate_is_delete_reqest_is_ajax_score_same__response_updated(self):
user = self.get_user()
other_user = self.get_user(username='other')
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
rating = mommy.make(UserRating, rating=ratings, score=1, user=user)
mommy.make(UserRating, rating=ratings, score=2, user=other_user)
# expecting it to be removed
expected = {'average': 2.0, 'count': 1, 'percentage': 40.0, 'total': 2, 'user_rating': None}
url = reverse('ratings:rate', args=(ratings.content_type_id, ratings.object_id)) + '?next=/foo/bar'
response = self.post_json(url, {'score': rating.score}, user=user, xhr=True, expect_errors=True)
try:
json_resp = response.json()
except AttributeError:
json_resp = json.loads(response.content.decode())
self.assertEqual(expected, json_resp)
@override_settings(STAR_RATINGS_RERATE_SAME_DELETE=True)
def test_user_is_logged_in_already_has_a_rating_rerate_is_delete_score_diff__ratingchanged(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
rating = mommy.make(UserRating, rating=ratings, score=1, user=user)
score = randint(2, 5)
url = reverse('ratings:rate', args=(ratings.content_type_id, ratings.object_id))
self.post_json(url, {'score': score}, user=user)
rating = UserRating.objects.get(pk=rating.pk)
self.assertEqual(score, rating.score)
@override_settings(STAR_RATINGS_RERATE_SAME_DELETE=True)
def test_user_is_logged_in_already_has_a_rating_rerate_is_delete_score_diff__redirected_to_next(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
mommy.make(UserRating, rating=ratings, score=1, user=user)
score = randint(2, 5)
url = reverse('ratings:rate', args=(ratings.content_type_id, ratings.object_id))
response = self.post_json(url, {'score': score, 'next': '/foo/bar'}, user=user)
self.assertRedirects(response, '/foo/bar', fetch_redirect_response=False)
@override_settings(STAR_RATINGS_RERATE_SAME_DELETE=True)
def test_user_is_logged_in_already_has_a_rating_rerate_is_delete_score_diff__rating_changed(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
rating = mommy.make(UserRating, rating=ratings, score=1, user=user)
score = randint(2, 5)
url = reverse('ratings:rate', args=(ratings.content_type_id, ratings.object_id))
self.post_json(url, {'score': score}, user=user, xhr=True, expect_errors=True)
rating = UserRating.objects.get(pk=rating.pk)
self.assertEqual(score, rating.score)
@override_settings(STAR_RATINGS_RERATE_SAME_DELETE=True)
def test_user_is_logged_in_already_has_a_rating_rerate_is_delete_score_diff__response_is_200(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
mommy.make(UserRating, rating=ratings, score=1, user=user)
score = randint(2, 5)
url = reverse('ratings:rate', args=(ratings.content_type_id, ratings.object_id)) + '?next=/foo/bar'
response = self.post_json(url, {'score': score}, user=user, xhr=True, expect_errors=True)
self.assertEqual(200, response.status_code)
@override_settings(STAR_RATINGS_CLEARABLE=True)
def test_user_is_logged_in_already_has_a_rating__clearable__rating_deleted(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
rating = mommy.make(UserRating, rating=ratings, score=1, user=user)
url = reverse('ratings:rate', args=(ratings.content_type_id, ratings.object_id))
self.post_json(url, {'clear': 1}, user=user)
with self.assertRaises(UserRating.DoesNotExist):
UserRating.objects.get(pk=rating.pk)
@override_settings(STAR_RATINGS_CLEARABLE=True)
def test_user_is_logged_in_already_has_a_rating__clearable__redirected_to_next(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
mommy.make(UserRating, rating=ratings, score=1, user=user)
url = reverse('ratings:rate', args=(ratings.content_type_id, ratings.object_id))
response = self.post_json(url, {'clear': 1, 'next': '/foo/bar'}, user=user)
self.assertRedirects(response, '/foo/bar', fetch_redirect_response=False)
@override_settings(STAR_RATINGS_CLEARABLE=True)
def test_user_is_logged_in_already_has_a_rating__clearable__request_is_ajax__rating_deleted(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
rating = mommy.make(UserRating, rating=ratings, score=1, user=user)
url = reverse('ratings:rate', args=(ratings.content_type_id, ratings.object_id))
self.post_json(url, {'clear': 1}, user=user, xhr=True, expect_errors=True)
with self.assertRaises(UserRating.DoesNotExist):
UserRating.objects.get(pk=rating.pk)
@override_settings(STAR_RATINGS_CLEARABLE=True)
def test_user_is_logged_in_already_has_a_rating__clearable_request_is_ajax__response_is_200(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
mommy.make(UserRating, rating=ratings, score=1, user=user)
url = reverse('ratings:rate', args=(ratings.content_type_id, ratings.object_id)) + '?next=/foo/bar'
response = self.post_json(url, {'clear': 1}, user=user, xhr=True, expect_errors=True)
self.assertEqual(200, response.status_code)
@override_settings(STAR_RATINGS_CLEARABLE=True)
def test_user_is_logged_in_already_has_a_rating__clearable_request_is_ajax__response_empty(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
mommy.make(UserRating, rating=ratings, score=1, user=user)
# expecting it to be removed
expected = {'average': 0.0, 'count': 0, 'percentage': 0.0, 'total': 0, 'user_rating': None}
url = reverse('ratings:rate', args=(ratings.content_type_id, ratings.object_id)) + '?next=/foo/bar'
response = self.post_json(url, {'clear': 1}, user=user, xhr=True, expect_errors=True)
try:
json_resp = response.json()
except AttributeError:
json_resp = json.loads(response.content.decode())
self.assertEqual(expected, json_resp)
@override_settings(STAR_RATINGS_CLEARABLE=False)
def test_user_is_logged_in_already_has_a_rating__clearable__disabled__rating_not_deleted(self):
user = self.get_user()
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
rating = mommy.make(UserRating, rating=ratings, score=1, user=user)
url = reverse('ratings:rate', args=(ratings.content_type_id, ratings.object_id))
self.post_json(url, {'clear': 1}, user=user)
self.assertEqual(UserRating.objects.filter(pk=rating.pk).count(), 1)
@pytest.mark.skipif(os.environ.get('USE_CUSTOM_MODEL', 'false') == 'true', reason='Only run without swapped model.')
@pytest.mark.django_db
class TestViewRateWithStandardURLPattern(BaseTestViewRate, BaseFooTest, TestCase):
"""
Run TestViewRate with standard URL/no model change.
"""
def setUp(self):
super().setUp()
self.foo_model = self.foo_model
@staticmethod
def get_url(obj, extra=''):
return reverse('ratings:rate', args=(obj.content_type_id, obj.object_id)) + extra
def test_url__correct(self):
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
self.assertIsInstance(ratings.object_id, int)
self.assertEqual(
self.get_url(ratings),
'/ratings/{}/{}/'.format(ratings.content_type_id, ratings.object_id)
)
def test_url_with_extra__correct(self):
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
self.assertIsInstance(ratings.object_id, int)
self.assertEqual(
self.get_url(ratings, extra='?123'),
'/ratings/{}/{}/?123'.format(ratings.content_type_id, ratings.object_id)
)
@pytest.mark.skipif(os.environ.get('USE_CUSTOM_MODEL', 'false') == 'false', reason='Only run when with swapped model.')
@pytest.mark.django_db
@override_settings(STAR_RATINGS_OBJECT_ID_PATTERN='[0-9a-f-]+')
class TestViewRateWithCustomURLPattern(BaseTestViewRate, BaseFooTest, TestCase):
"""
Run TestViewRate with swapped URL/model change.
Handles the change in URL.
"""
@staticmethod
def get_url(obj, extra=''):
return reverse('ratings:rate', args=(obj.content_type_id, str(obj.object_id))) + extra
def test_url__correct(self):
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
self.assertIsInstance(ratings.object_id, uuid.UUID)
self.assertEqual(
self.get_url(ratings),
'/ratings/{}/{}/'.format(ratings.content_type_id, ratings.object_id)
)
def test_url_with_extra__correct(self):
foo = mommy.make(self.foo_model)
ratings = get_star_ratings_rating_model().objects.for_instance(foo)
self.assertIsInstance(ratings.object_id, uuid.UUID)
self.assertEqual(
self.get_url(ratings, extra='?123'),
'/ratings/{}/{}/?123'.format(ratings.content_type_id, ratings.object_id)
)
| 42.910473 | 139 | 0.703618 |
acf42f751f7affedf22942d267dedd8643461996 | 1,378 | py | Python | models/actions.py | k4t0mono/hashtag_analysis | b7386691911467e503b246398774c795bb286440 | [
"BSD-2-Clause"
] | null | null | null | models/actions.py | k4t0mono/hashtag_analysis | b7386691911467e503b246398774c795bb286440 | [
"BSD-2-Clause"
] | null | null | null | models/actions.py | k4t0mono/hashtag_analysis | b7386691911467e503b246398774c795bb286440 | [
"BSD-2-Clause"
] | null | null | null | from sqlalchemy import Column, ForeignKey, Integer, String, DateTime
from sqlalchemy.orm import relationship
from get_tweets import Base
from .User import User
from .Tweet import Tweet
class Retweet(Base):
__tablename__ = 'retweet'
id = Column(String(128), primary_key=True)
created_at = Column(DateTime, nullable=False)
original_tweet_id = Column(String(128), ForeignKey('tweet.id'))
original_tweet = relationship(Tweet, foreign_keys=[original_tweet_id])
user_id = Column(String(128), ForeignKey('user.id'))
user = relationship(User)
class Mention(Base):
__tablename__ = 'mention'
tweet_id = Column(String(128), ForeignKey('tweet.id'), primary_key=True)
tweet = relationship(Tweet)
user_id = Column(String(128), ForeignKey('user.id'), primary_key=True)
user = relationship(User)
class Reply(Base):
__tablename__ = 'reply'
reply_id = Column(String(128), ForeignKey('tweet.id'), primary_key=True)
reply = relationship(Tweet, foreign_keys=[reply_id])
replyee_id = Column(String(128), ForeignKey('tweet.id'), primary_key=True)
replyee = relationship(Tweet, foreign_keys=[replyee_id])
class Hashtag(Base):
__tablename__ = 'hashtags'
tweet_id = Column(String(128), ForeignKey('tweet.id'), primary_key=True)
tweet = relationship(Tweet)
hashtag = Column(String(140), primary_key=True)
| 29.319149 | 78 | 0.722061 |
acf4301317951f72cce74f08857857f573e20285 | 1,030 | py | Python | project_euler/problem_20/sol1.py | MKiperszmid/Python | 6b368e6ab2fa1a839b029fd45e127521bbe76005 | [
"MIT"
] | 1 | 2020-08-28T18:25:45.000Z | 2020-08-28T18:25:45.000Z | project_euler/problem_20/sol1.py | MKiperszmid/Python | 6b368e6ab2fa1a839b029fd45e127521bbe76005 | [
"MIT"
] | 1 | 2020-08-28T18:24:31.000Z | 2020-08-28T19:35:47.000Z | project_euler/problem_20/sol1.py | MKiperszmid/Python | 6b368e6ab2fa1a839b029fd45e127521bbe76005 | [
"MIT"
] | null | null | null | """
n! means n ร (n โ 1) ร ... ร 3 ร 2 ร 1
For example, 10! = 10 ร 9 ร ... ร 3 ร 2 ร 1 = 3628800,
and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.
Find the sum of the digits in the number 100!
"""
def factorial(n):
fact = 1
for i in range(1, n + 1):
fact *= i
return fact
def split_and_add(number):
"""Split number digits and add them."""
sum_of_digits = 0
while number > 0:
last_digit = number % 10
sum_of_digits += last_digit
number = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def solution(n):
"""Returns the sum of the digits in the number 100!
>>> solution(100)
648
>>> solution(50)
216
>>> solution(10)
27
>>> solution(5)
3
>>> solution(3)
6
>>> solution(2)
2
>>> solution(1)
1
"""
f = factorial(n)
return split_and_add(f)
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 20.196078 | 78 | 0.552427 |
acf4309ff4ee23908a5d44126b9f642da83a9477 | 3,680 | py | Python | estimagic/tests/dashboard/test_monitoring_app.py | SofiaBadini/estimagic | ff4948dc4175cd690b3a021969c6119a6a619f96 | [
"BSD-3-Clause"
] | null | null | null | estimagic/tests/dashboard/test_monitoring_app.py | SofiaBadini/estimagic | ff4948dc4175cd690b3a021969c6119a6a619f96 | [
"BSD-3-Clause"
] | null | null | null | estimagic/tests/dashboard/test_monitoring_app.py | SofiaBadini/estimagic | ff4948dc4175cd690b3a021969c6119a6a619f96 | [
"BSD-3-Clause"
] | null | null | null | """Test the functions of the monitoring app."""
import webbrowser
from pathlib import Path
import pandas as pd
import pytest
from bokeh.document import Document
from bokeh.io import output_file
from bokeh.io import save
from bokeh.models import ColumnDataSource
import estimagic.dashboard.monitoring_app as monitoring
from estimagic.logging.create_database import load_database
@pytest.fixture()
def database():
database_name = "db1.db"
current_dir_path = Path(__file__).resolve().parent
database_path = current_dir_path / database_name
database = load_database(database_path)
return database
def test_monitoring_app():
"""Integration test that no Error is raised when calling the monitoring app."""
doc = Document()
database_name = "test_db"
current_dir_path = Path(__file__).resolve().parent
session_data = {"last_retrieved": 0, "database_path": current_dir_path / "db1.db"}
monitoring.monitoring_app(
doc=doc, database_name=database_name, session_data=session_data
)
def test_create_bokeh_data_sources(database):
tables = ["criterion_history", "params_history"]
criterion_history, params_history = monitoring._create_bokeh_data_sources(
database=database, tables=tables
)
assert criterion_history.data == {"iteration": [1], "value": [426.5586492569206]}
assert params_history.data == {
"iteration": [1],
"beta_pared": [0.47738201898674737],
"beta_public": [0.22650218067445926],
"beta_gpa": [-0.46745804687921866],
"cutoff_0": [0.0],
"cutoff_1": [2.0],
}
# skip test create_initial_convergence_plots
def test_plot_time_series_with_large_initial_values():
cds = ColumnDataSource({"y": [2e17, 1e16, 1e5], "x": [1, 2, 3]})
title = "Are large initial values shown?"
fig = monitoring._plot_time_series(data=cds, y_keys=["y"], x_name="x", title=title)
title = "Test _plot_time_series can handle large initial values."
output_file("time_series_initial_value.html", title=title)
path = save(obj=fig)
webbrowser.open_new_tab("file://" + path)
def test_map_groups_to_params_group_none():
params = pd.DataFrame()
params["value"] = [0, 1, 2, 3]
params["group"] = None
params["name"] = ["a", "b", "c", "d"]
params.index = ["a", "b", "c", "d"]
expected = {}
res = monitoring._map_groups_to_params(params)
assert expected == res
def test_map_groups_to_params_group_not_none():
params = pd.DataFrame()
params["value"] = [0, 1, 2, 3]
params["group"] = [None, "A", "B", "B"]
params.index = ["a", "b", "c", "d"]
params["name"] = ["a", "b", "c", "d"]
expected = {"A": ["b"], "B": ["c", "d"]}
res = monitoring._map_groups_to_params(params)
assert expected == res
def test_map_groups_to_params_group_int_index():
params = pd.DataFrame()
params["value"] = [0, 1, 2, 3]
params.index = ["0", "1", "2", "3"]
params["name"] = ["0", "1", "2", "3"]
params["group"] = [None, "A", "B", "B"]
expected = {"A": ["1"], "B": ["2", "3"]}
res = monitoring._map_groups_to_params(params)
assert expected == res
def test_map_groups_to_params_group_multi_index():
params = pd.DataFrame()
params["value"] = [0, 1, 2, 3]
params["group"] = [None, "A", "B", "B"]
params["ind1"] = ["beta", "beta", "cutoff", "cutoff"]
params["ind2"] = ["edu", "exp", 1, 2]
params.set_index(["ind1", "ind2"], inplace=True)
params["name"] = ["beta_edu", "beta_exp", "cutoff_1", "cutoff_2"]
expected = {"A": ["beta_exp"], "B": ["cutoff_1", "cutoff_2"]}
res = monitoring._map_groups_to_params(params)
assert expected == res
| 33.454545 | 87 | 0.651359 |
acf430a2ecf8e8fef2d15f77c09aa47a71c4a5d9 | 1,299 | py | Python | gensm_ex1.py | MuAuan/RaspberryPi4_conversation | 8a1f3817df1d6cbb0167791418078e012a437c78 | [
"MIT"
] | null | null | null | gensm_ex1.py | MuAuan/RaspberryPi4_conversation | 8a1f3817df1d6cbb0167791418078e012a437c78 | [
"MIT"
] | 7 | 2020-02-11T13:57:50.000Z | 2020-03-02T23:26:21.000Z | gensm_ex1.py | MuAuan/RaspberryPi4_conversation | 8a1f3817df1d6cbb0167791418078e012a437c78 | [
"MIT"
] | null | null | null | from gensim import models
def sample():
sentence = models.doc2vec.TaggedDocument(words=[u'็ฌ', u'ไปๆฅ', u'ๅ ใใ'], tags=["SENT_0"])
sentence1 = models.doc2vec.TaggedDocument(words=[u'็ซ', u'ๆๆฅ', u'ๅ ใใ'], tags=["SENT_1"])
sentence2 = models.doc2vec.TaggedDocument(words=[u'ไป', u'็ซ', u'้ญ'], tags=["SENT_2"])
sentence3 = models.doc2vec.TaggedDocument(words=[u'้ญ', u'ๆณณใ', u'ๆตท'], tags=["SENT_3"])
sentences = [sentence, sentence1, sentence2, sentence3]
model = models.Doc2Vec(sentences, dm=0, vector_size=300, window=15, alpha=.025, min_alpha=.025, min_count=1, sample=1e-6)
print('\n่จ็ทด้ๅง')
for epoch in range(20):
print('Epoch: {}'.format(epoch + 1))
model.train(sentences, epochs=model.iter, total_examples=model.corpus_count)
model.alpha -= (0.025 - 0.0001) / 19
model.min_alpha = model.alpha
model.save("my_model.doc2vec")
model_loaded = models.Doc2Vec.load('my_model.doc2vec')
# ใใๆๆธใซไผผใฆใใๆๆธใ่กจ็คบ
print ("SENT_0")
print (model.docvecs.most_similar(["SENT_0"]) )
print ("SENT_3")
print (model.docvecs.most_similar(["SENT_3"]) )
print ("SENT_1")
print (model_loaded.docvecs.most_similar(["SENT_1"]) )
# ใใๅ่ชใซ้กไผผใใๅ่ชใๅๅพ
print (model.similar_by_word(u"้ญ"))
if __name__ == '__main__':
sample() | 36.083333 | 125 | 0.655119 |
acf431e3a0fedd13b8cc1c196eb7adf553ecc13c | 45,772 | py | Python | randomizer/handlers/base.py | soulweaver91/pokemon-gc-randomizer | dd65af90ec214986485887084b2d85191aa41db7 | [
"MIT"
] | null | null | null | randomizer/handlers/base.py | soulweaver91/pokemon-gc-randomizer | dd65af90ec214986485887084b2d85191aa41db7 | [
"MIT"
] | null | null | null | randomizer/handlers/base.py | soulweaver91/pokemon-gc-randomizer | dd65af90ec214986485887084b2d85191aa41db7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
import logging
import math
import os
import random
from struct import unpack, pack
from randomizer import config
from randomizer.constants import BANNER_META_SIZE, BANNER_META_FIRST_OFFSET, IsoRegion
from randomizer.iso.constants import Ability, Move, Type, EvolutionType, PokemonSpecies, VALID_POKEMON_TYPES, Item
from randomizer.iso.fsys import FsysArchive
from randomizer.iso.structs import StatSet, EvoEntry, LevelUpMoveEntry
from randomizer.util import chunked, flatten
RANDOM_BST_MIN = 120
RANDOM_BST_MAX = 800
BASE_STAT_MINIMUM = 15
BASE_STAT_MAXIMUM = 255
class AbstractHandlerMethodError(NotImplementedError):
def __init__(self):
super().__init__('Internal error: Game specific class should implement all abstract game class methods!')
class BasePokemon:
def __init__(self):
self.exp_class = None
self.catch_rate = None
self.gender_ratio = None
self.exp_gain = None
self.base_happiness = None
self.natdex_no = None
self.base_stats = None
self.ev_gain = None
self.type1 = None
self.type2 = None
self.ability1 = None
self.ability2 = None
self.item1 = None
self.item2 = None
self.height = None
self.weight = None
self.species = None
self.tm_compatibility = [False for _ in range(0, 50)]
self.hm_compatibility = [False for _ in range(0, 8)]
self.tutor_compatibility = []
self.egg_moves = []
self.evolution = []
self.level_up_moves = []
self.tm_moves_list = []
def __str__(self):
return "#%03d %s, %s/%s" % (self.natdex_no, self.species.name, self.type1.name, self.type2.name)
def set_base_stats(self, *args):
self.base_stats = StatSet(*args)
def set_ev_gain(self, *args):
self.ev_gain = StatSet(*args)
def set_learn_flags(self, tm_compatibility, hm_compatibility, tutor_compatibility=b''):
self.tm_compatibility = [b == 1 for b in tm_compatibility]
self.hm_compatibility = [b == 1 for b in hm_compatibility]
self.tutor_compatibility = [b == 1 for b in tutor_compatibility]
def set_egg_moves(self, egg_moves_packed):
self.egg_moves = []
for move_entry in list(chunked(2, egg_moves_packed)):
try:
move = Move(unpack('>H', move_entry)[0])
if move != Move.NONE:
self.egg_moves.append(move)
except KeyError:
pass
def set_evolution(self, evolution_data_packed):
self.evolution = []
for evo_entry in list(chunked(6, evolution_data_packed)):
entry = EvoEntry(*unpack('>BBHH', evo_entry))
if entry.type != 0:
self.evolution.append(entry)
def set_level_up_moves(self, level_up_moves_packed):
self.level_up_moves = []
for move_entry in list(chunked(4, level_up_moves_packed)):
entry = LevelUpMoveEntry(*unpack('>BBH', move_entry))
if entry.move != Move.NONE:
self.level_up_moves.append(entry)
pass
def encode_learn_flags(self):
return tuple([b''.join([b'\x01' if b is True else b'\x00' for b in flag_list])
for flag_list in [self.tm_compatibility, self.hm_compatibility, self.tutor_compatibility]])
def encode_egg_moves(self):
egg_moves = b''
for i in range(0, 8):
try:
egg_moves += pack('>H', self.egg_moves[i].value)
except IndexError:
egg_moves += b'\x00\x00'
return egg_moves
def encode_evolution(self):
evolution = b''
for i in range(0, 5):
try:
evolution += pack('>BBHH', self.evolution[i].type.value, self.evolution[i].unknown1,
self.evolution[i].level, self.evolution[i].evolves_to)
except IndexError:
evolution += b'\x00\x00\x00\x00\x00\x00'
return evolution
def encode_level_up_moves(self):
level_up_moves = b''
for i in range(0, 20):
try:
level_up_moves += pack('>BBH', self.level_up_moves[i].level, self.level_up_moves[i].unknown1,
self.level_up_moves[i].move.value)
except IndexError:
level_up_moves += b'\x00\x00\x00\x00'
return level_up_moves
def randomize_base_stats(self, keep_bst, stat_distribution=None, list_index=None):
if self.base_stats.total == 0:
return
if list_index is not None and list_index > 0:
random.shuffle(stat_distribution)
new_bst = self.base_stats.total if keep_bst else round(
min(RANDOM_BST_MAX, max(RANDOM_BST_MIN, random.gauss(
(RANDOM_BST_MAX + RANDOM_BST_MIN) / 2,
(RANDOM_BST_MAX - RANDOM_BST_MIN) / 6)))
)
if stat_distribution is None:
stat_distribution = [max(0, random.gauss(1, config.rng_pkstats_variance)) for _ in range(0, 6)]
multiplier = sum(stat_distribution) / 6 * (new_bst / 6)
new_stats = [min(BASE_STAT_MAXIMUM, max(BASE_STAT_MINIMUM, round(stat * multiplier)))
for stat in stat_distribution]
if config.rng_pkstats_wg_1hp and self.ability1 == Ability.WONDER_GUARD or self.ability2 == Ability.WONDER_GUARD:
new_stats[0] = 1
# Fudge random stats until we're at target BST
while sum(new_stats) > new_bst:
stat_idx = random.randint(0, 5)
if new_stats[stat_idx] > BASE_STAT_MINIMUM and new_stats[stat_idx] != 1:
new_stats[stat_idx] -= 1
while sum(new_stats) < new_bst:
stat_idx = random.randint(0, 5)
if new_stats[stat_idx] < BASE_STAT_MAXIMUM and new_stats[stat_idx] != 1:
new_stats[stat_idx] += 1
self.base_stats = StatSet(*new_stats)
logging.debug('%s\'s base stats are now %s', self.species.name, self.base_stats)
return stat_distribution
def randomize_types(self, previous_stage_types=None):
if previous_stage_types is not None:
self.type1, self.type2 = previous_stage_types
if random.random() < config.rng_pktypes_family_change_ratio / 100:
# Normal type is handled differently:
# - solo Normal type can evolve into solo or dual other type
# - Normal/? type needs to have its first type replaced rather than the second one
# - Non-Normal type cannot be randomized back to Normal
if self.type1 == Type.NORMAL and self.type2 == Type.NORMAL:
return self.randomize_types()
elif self.type1 == Type.NORMAL:
self.type1 = random.choice([t for t in VALID_POKEMON_TYPES if t != Type.NORMAL])
else:
self.type2 = random.choice([t for t in VALID_POKEMON_TYPES if t != self.type2 and t != Type.NORMAL])
else:
self.type1 = random.choice(VALID_POKEMON_TYPES)
if random.random() < config.rng_pktypes_monotype_ratio / 100:
self.type2 = self.type1
else:
# Normal type as the second type sometimes appears as hidden (for example in XD Strategy Memo)
# even though canonically solo types are encoded as matching type 1 and type 2. To fix this,
# Normal/? types are never allowed unless the Pokรฉmon is monotype
self.type2 = random.choice([t for t in VALID_POKEMON_TYPES if t != Type.NORMAL])
logging.debug('%s is now the %s%s%s type', self.species.name,
self.type1.name,
'/' if self.type1 != self.type2 else '',
self.type2.name if self.type1 != self.type2 else '')
return self.type1, self.type2
def randomize_abilities(self, allowed_abilities, previous_stage_abilities=None):
if previous_stage_abilities is not None:
self.ability1, self.ability2 = previous_stage_abilities
if random.random() < config.rng_pkabi_family_change_ratio / 100:
return self.randomize_abilities(allowed_abilities)
else:
self.ability1 = random.choice(allowed_abilities)
if random.random() < config.rng_pkabi_monoabi_ratio / 100:
self.ability2 = Ability.NONE
else:
self.ability2 = random.choice(allowed_abilities)
# Special case: if Wonder Guard is allowed and a Pokรฉmon with it has 1 HP, then that Pokรฉmon should never
# get an alternate ability as it would just be unusable.
if config.rng_pkstats_wg_1hp and (
self.ability1 == Ability.WONDER_GUARD or self.ability2 == Ability.WONDER_GUARD):
self.ability1 = Ability.WONDER_GUARD
self.ability2 = Ability.NONE
if self.ability1 == self.ability2:
self.ability2 = Ability.NONE
logging.debug('%s has now the abilit%s %s%s%s', self.species.name,
'ies' if self.ability2 != Ability.NONE else 'y',
self.ability1.name,
'/' if self.ability2 != Ability.NONE else '',
self.ability2.name if self.ability2 != Ability.NONE else '')
return self.ability1, self.ability2
def select_random_move_from_movepool(self, movepool, allow_all_types, force_same_type, force_offensive):
viable_moves = [
m for m in movepool
if (
not force_offensive or m.power > 0
) and (
allow_all_types
or m.power == 0
or m.type in [Type.NORMAL, self.type1, self.type2]
) and (
not force_same_type
or m.type in [self.type1, self.type2]
) and not (
config.rng_pkmoves_no_dupes
and m.move in [n.move for n in self.level_up_moves]
)
]
return random.choice(viable_moves)
def randomize_moveset(self, movepool):
if config.rng_pkmoves_lv1_fullset:
lv1_move_count = len([m for m in self.level_up_moves if m.move != Move.NONE and m.level == 1])
for i in range(lv1_move_count, 4):
self.level_up_moves.insert(0, LevelUpMoveEntry(1, 0, Move.NONE))
if len(self.level_up_moves) > 20:
self.level_up_moves = self.level_up_moves[0:20]
# Already assigned moves are possibly filtered out
for move in self.level_up_moves:
move.move = Move.NONE
offensive_moves = []
offensive_move_drought = 4 if config.rng_pkmoves_lv1_ensure_damaging else 0
for i, slot in enumerate(self.level_up_moves):
allow_all_types = random.random() < (config.rng_pkmoves_any_type_ratio / 100)
force_offensive = random.random() < (config.rng_pkmoves_min_damaging_ratio / 100)
force_same_type = random.random() < (config.rng_pkmoves_min_own_type_ratio / 100)
try:
if config.rng_pkmoves_ensure_damaging_interval and offensive_move_drought > 3:
offensive_move_drought = 0
move = self.select_random_move_from_movepool(
movepool, allow_all_types, force_same_type, True)
else:
move = self.select_random_move_from_movepool(
movepool, allow_all_types, force_same_type, force_offensive)
except IndexError:
# Restrictions exhausted all available move options, so add one with no restrictions
move = self.select_random_move_from_movepool(
movepool, True, False, False)
slot.move = move.move
if move.power == 0:
offensive_move_drought += 1
else:
offensive_move_drought = 0
# Exclude special damage moves (1 BP) from rearrangement
if move.power >= 10:
offensive_moves.append((i, move))
if config.rng_pkmoves_dmg_progression:
indices = [m[0] for m in offensive_moves]
offensive_moves = sorted(offensive_moves, key=lambda m: m[1].power)
for i, m in enumerate(offensive_moves):
self.level_up_moves[indices[i]].move = m[1].move
logging.debug('%s now learns %s', self.species.name,
', '.join(['%s on level %d' % (m.move.name, m.level) for m in self.level_up_moves]))
def randomize_compatibility(self, moves_data, previous_stage_compat, target_property,
status_ratio, own_type_ratio, normal_type_ratio, other_type_ratio):
for i, move in enumerate(moves_data):
if move.power == 0:
# Status move
ratio = status_ratio
elif move.type == self.type1 or move.type == self.type2:
ratio = own_type_ratio
elif move.type == Type.NORMAL:
ratio = normal_type_ratio
else:
ratio = other_type_ratio
getattr(self, target_property)[i] = random.random() < ratio / 100
# Optionally mark previous stage learnable moves as learnable by this Pokรฉmon too
if previous_stage_compat is not None:
for i in range(len(previous_stage_compat)):
getattr(self, target_property)[i] = getattr(self, target_property)[i] or previous_stage_compat[i]
return getattr(self, target_property)
def randomize_tms(self, tm_data, previous_stage_tms=None):
compatibility = self.randomize_compatibility(tm_data, previous_stage_tms, "tm_compatibility",
config.rng_pktm_min_status_ratio,
config.rng_pktm_min_own_type_ratio,
config.rng_pktm_min_normal_type_ratio,
config.rng_pktm_min_other_type_ratio)
self.update_tm_move_set(tm_data)
logging.debug('%s now learns the following TM moves: %s', self.species.name,
', '.join([tm_data[i].move.name for i, l in enumerate(self.tm_compatibility) if l is True]))
return compatibility
def update_tm_move_set(self, tm_data):
self.tm_moves_list = [tm_data[i].move for i, l in enumerate(self.tm_compatibility) if l is True]
def randomize_item(self):
if random.random() < config.rng_pkitem_ratio / 100:
item = random.choice([i for i in Item if Item.NONE.value < i.value <= Item.TM50.value])
self.item1 = item
self.item2 = item
else:
self.item1 = Item.NONE
self.item2 = Item.NONE
def patch_evolution(self, index, evo_type, level_or_item):
self.evolution[index].type = evo_type
self.evolution[index].level = level_or_item.value if type(level_or_item) == Item else level_or_item
self.evolution[index].item = Item(level_or_item)
def get_legal_moves_at_level(self, level):
return [m.move for m in self.level_up_moves if m.level <= level], self.tm_moves_list
def encode(self):
raise AbstractHandlerMethodError()
class BaseMoveEntry:
def __init__(self):
self.priority = None
self.pp = None
self.type = None
self.targets = None
self.accuracy = None
self.effect_proc = None
self.contact = None
self.protectable = None
self.magic_coat = None
self.snatchable = None
self.mirror_movable = None
self.kings_rock_proc = None
self.sound_move = None
self.hm = None
self.recoil = None
self.power = None
self.effect_type = None
self.name_id = None
self.anim_id = None
self.desc_id = None
self.move = None
def __str__(self):
return "%s (%s), %dBP, %d%%" % (self.move.name, self.type.name, self.power, self.accuracy)
def encode(self):
raise AbstractHandlerMethodError()
def randomize(self, change_type, change_pp, change_power, change_acc):
if change_type and self.move not in [Move.CURSE, Move.STRUGGLE]:
self.type = random.choice(VALID_POKEMON_TYPES)
if change_power and self.power > 10:
self.power = random.randint(2, 18) * 5
if change_acc and self.accuracy > 0:
rand = random.random()
# Linear scale from 30% to 280% that is clamped to 30-100%, meaning about 70% chance of
# normal 100% accuracy and about 30% chance spread evenly among all multiples of 5 between 30% and 100%.
self.accuracy = min(100, 30 + round(rand * 50) * 5)
if change_pp and self.pp > 1:
self.pp = random.randint(1, 8) * 5
class ItemBox:
SIGNATURE = '>BBhHH6sHfff'
def __init__(self, data, idx):
super().__init__()
(
self.type,
self.quantity,
self.angle,
self.room_id,
self.flags,
self.unknown_0x08_0x0D,
item_id,
self.coord_x,
self.coord_y,
self.coord_z
) = unpack(self.SIGNATURE, data)
self.item = Item(item_id)
def encode(self):
return pack(
self.SIGNATURE,
self.type,
self.quantity,
self.angle,
self.room_id,
self.flags,
self.unknown_0x08_0x0D,
self.item.value,
self.coord_x,
self.coord_y,
self.coord_z)
@property
def type_text(self):
if self.type == 0x24:
return 'box'
elif self.type == 0x44:
return 'sparkle'
elif self.type == 0x6C:
return 'ring binder'
else:
return 'unknown'
def randomize(self, allowed_items, berry_reroll_count, random_qty, item_pool):
# Don't ever randomize key items
if self.item not in allowed_items:
return
if item_pool is not None:
item_entry = item_pool.pop()
self.item = item_entry[0]
self.quantity = item_entry[1]
else:
for i in range(berry_reroll_count):
self.item = random.choice(allowed_items)
if self.item.value < Item.CHERI_BERRY.value or self.item.value > Item.STARF_BERRY.value:
break
if random_qty:
self.quantity = 16 - round(math.pow(random.randint(1, 65536), 1/4) * 15/16)
class BaseHandler:
# these should be filled in in the derived handlers
POKEMON_DATA_LIST_LENGTH = 0
MOVE_DATA_LIST_LENGTH = 0
ITEM_BOX_LIST_LENGTH = 0
def __init__(self, iso, region):
self.iso = iso
self.region = region
self.archives = dict()
self.pokemon_data = dict()
self.move_data = dict()
self.item_box_data = dict()
self.tm_data = []
self.tutor_data = []
# Cacophony is banned because it doesn't have a description and as such crashes the Pokรฉmon status screen and
# Strategy Memo. It might not work anyways, and its effect is a duplicate one, so it isn't needed in any case.
self.allowed_abilities = [a for a in list(Ability)
if a not in [Ability.NONE, Ability.CACOPHONY]
and a.name not in [n.upper() for n in config.rng_pkabi_ban]]
self.banned_learnset_moves = [n.upper() for n in config.rng_pkmoves_ban]
self.normal_pokemon = []
self.dol_file = iso.open(b'start.dol')
if config.dump_files:
dump_path = os.path.join(config.working_dir, 'dump', 'start.dol')
try:
with open(dump_path, 'wb') as f:
f.write(self.dol_file.read())
except IOError:
logging.warning('Couldn\'t dump the file %s, skipping dumping.', dump_path)
def open_archives(self):
for archive in self.archive_list:
self.open_archive(archive)
def open_archive(self, name):
self.archives[name] = FsysArchive.from_iso(self.iso, bytes(name))
def write_archives(self):
logging.info('Compressing and writing archive files back into the ISO. Be patient, this may take a while!')
for archive in self.archive_list:
self.write_archive(archive)
def write_archive(self, name):
logging.info('Writing archive %s into the file.' % name.decode('ascii', errors='ignore'))
data = self.archives[name].encode()
self.iso.resizeFile(name, len(data))
self.iso.writeFile(name, 0, data)
logging.debug('Wrote %s (%d bytes) back into the ISO.', name.decode('ascii', errors='ignore'), len(data))
def load_pokemon_data(self):
logging.info('Reading Pokรฉmon data from the archive file.')
try:
common_rel = self.archives[b'common.fsys'].get_file(b'common_rel').data
common_rel.seek(self.pokemon_data_offset)
for i in range(1, self.POKEMON_DATA_LIST_LENGTH + 1):
logging.debug('Reading index %d of %d...', i, self.POKEMON_DATA_LIST_LENGTH)
pkmn = self.pokemon_data[i] = self.make_pokemon_data(common_rel, i)
pkmn.update_tm_move_set(self.tm_data)
logging.debug(
' #%d %s, %s%s%s, %d/%d/%d/%d/%d/%d (BST %d), %s%s%s',
pkmn.natdex_no,
PokemonSpecies(i).name,
pkmn.type1.name,
'/' if pkmn.type1 != pkmn.type2 else '',
pkmn.type2.name if pkmn.type1 != pkmn.type2 else '',
pkmn.base_stats.hp,
pkmn.base_stats.attack,
pkmn.base_stats.defense,
pkmn.base_stats.sp_attack,
pkmn.base_stats.sp_defense,
pkmn.base_stats.speed,
pkmn.base_stats.total,
pkmn.ability1.name,
'/' if pkmn.ability2 != Ability.NONE else '',
pkmn.ability2.name if pkmn.ability2 != Ability.NONE else '',
)
logging.debug(' Learnset: %s', ', '.join([
'%s (%d)' % (m.move.name, m.level) for m in pkmn.level_up_moves]))
logging.debug(' TMs: %s',
', '.join(['TM%02d %s' % (n + 1, self.tm_data[n].move.name)
for n, b in enumerate(pkmn.tm_compatibility) if b is True]))
logging.debug(' HMs (not available): %s',
', '.join(['HM%02d' % (n + 1)
for n, b in enumerate(pkmn.hm_compatibility) if b is True]))
logging.debug(' Egg moves: %s', ', '.join([m.name for m in pkmn.egg_moves]))
for evo in pkmn.evolution:
if evo.type == EvolutionType.NONE:
continue
evo_specifier = ''
if evo.type.param_is_level:
evo_specifier = 'at level %d ' % evo.level
if evo.type.param_is_item:
evo_specifier = 'with item %s ' % evo.item.name
logging.debug(' Evolves to %s %s(%s)',
PokemonSpecies(evo.evolves_to).name, evo_specifier, evo.type.name)
self.normal_pokemon = list(filter(lambda pkmn: 0 < pkmn.natdex_no < 388, self.pokemon_data.values()))
except KeyError as e:
logging.error('Couldn\'t read Pokรฉmon data since the required data file was not loaded.')
raise e
def load_move_data(self):
logging.info('Reading move data from the archive file.')
try:
common_rel = self.archives[b'common.fsys'].get_file(b'common_rel').data
common_rel.seek(self.move_data_offset)
for i in range(1, self.MOVE_DATA_LIST_LENGTH + 1):
logging.debug('Reading index %d of %d...', i, self.MOVE_DATA_LIST_LENGTH)
move = self.move_data[i] = self.make_move_data(common_rel, i)
logging.debug(
' #%d %s, %s, %d BP, %d PP, %d%% accuracy',
i,
self.move_data[i].move.name,
move.type.name,
move.power,
move.pp,
move.accuracy
)
except KeyError as e:
logging.error('Couldn\'t read move data since the required data file was not loaded.')
raise e
def load_tm_data(self):
logging.debug('Reading TM data from the executable binary.')
self.dol_file.seek(self.tm_data_offset)
for i in range(0, 50):
self.dol_file.seek(6, 1)
move = self.move_data[unpack(">H", self.dol_file.read(2))[0]]
self.tm_data.append(move)
logging.debug(' TM%02d contains %s', i + 1, move.move.name)
def write_pokemon_data(self):
logging.debug('Encoding Pokรฉmon data in preparation to be written to the ISO.')
common_rel = self.archives[b'common.fsys'].get_file(b'common_rel').data
common_rel.seek(self.pokemon_data_offset)
for i, pkmn in self.pokemon_data.items():
logging.debug('Encoding index %d of %d...', i, self.POKEMON_DATA_LIST_LENGTH)
common_rel.write(pkmn.encode())
def write_move_data(self):
logging.debug('Encoding move data in preparation to be written to the ISO.')
common_rel = self.archives[b'common.fsys'].get_file(b'common_rel').data
common_rel.seek(self.move_data_offset)
for i, move in self.move_data.items():
logging.debug('Encoding index %d of %d...', i, self.MOVE_DATA_LIST_LENGTH)
common_rel.write(move.encode())
def write_tm_data(self):
logging.info('Writing TM data into the executable binary.')
self.dol_file.seek(self.tm_data_offset)
for m in self.tm_data:
self.dol_file.seek(6, 1)
self.dol_file.write(pack(">H", m.move.value))
def load_trainer_data(self):
raise AbstractHandlerMethodError()
def write_trainer_data(self):
raise AbstractHandlerMethodError()
def load_item_box_data(self):
logging.info('Reading item box data from the archive file.')
try:
common_rel = self.archives[b'common.fsys'].get_file(b'common_rel').data
common_rel.seek(self.item_data_offset)
for i in range(1, self.ITEM_BOX_LIST_LENGTH + 1):
item_box = self.item_box_data[i] = self.make_item_box_data(common_rel, i)
logging.debug(
' #%d %d x %s (%s)',
i,
item_box.quantity,
item_box.item.name,
item_box.type_text
)
except KeyError as e:
logging.error('Couldn\'t read item box data since the required data file was not loaded.')
raise e
def write_item_box_data(self):
logging.debug('Encoding item box data in preparation to be written to the ISO.')
common_rel = self.archives[b'common.fsys'].get_file(b'common_rel').data
common_rel.seek(self.item_data_offset)
for i, item in self.item_box_data.items():
logging.debug('Encoding index %d of %d...', i, self.ITEM_BOX_LIST_LENGTH)
common_rel.write(item.encode())
def get_available_regular_moves(self):
return [m for _, m in self.move_data.items() if m.move not in [Move.STRUGGLE, Move.NONE]
and m.move.value < Move.UNUSED_0x163.value]
def get_game_specific_randomizable_items(self):
return []
def randomize_pokemon_get_root_level_list(self, condition):
return self.get_first_stages() if condition else self.normal_pokemon
def randomize_pokemon_aspect_recur(self, aspect, result_arg_name, pkmn_list, recurse,
previous_result=None, pass_index=False, already_done=set(), **kwargs):
for i, pkmn in enumerate(pkmn_list):
args = {
result_arg_name: previous_result
}
if pass_index and recurse and previous_result is not None:
args['list_index'] = i
randomization_result = getattr(pkmn, 'randomize_' + aspect)(**args, **kwargs)
if recurse:
evolution_targets = [self.pokemon_data[evo.evolves_to.value] for evo in pkmn.evolution
if evo.evolves_to is not PokemonSpecies.NONE
and self.pokemon_data[evo.evolves_to.value].species not in already_done]
self.randomize_pokemon_aspect_recur(
aspect, result_arg_name, evolution_targets,
previous_result=randomization_result, recurse=True, pass_index=pass_index,
already_done=already_done.union(set([p.species for p in pkmn_list])), **kwargs)
def randomize_pokemon_stats(self):
logging.info('Randomizing Pokรฉmon stats.')
self.randomize_pokemon_aspect_recur('base_stats', 'stat_distribution',
self.randomize_pokemon_get_root_level_list(config.rng_pkstats_family),
recurse=config.rng_pkstats_family, pass_index=True,
keep_bst=config.rng_pkstats_retain_bst)
def randomize_pokemon_types(self):
logging.info('Randomizing Pokรฉmon types.')
self.randomize_pokemon_aspect_recur('types', 'previous_stage_types',
self.randomize_pokemon_get_root_level_list(config.rng_pktypes_family),
recurse=config.rng_pktypes_family)
def randomize_pokemon_abilities(self):
logging.info('Randomizing Pokรฉmon abilities.')
self.randomize_pokemon_aspect_recur('abilities', 'previous_stage_abilities',
self.randomize_pokemon_get_root_level_list(config.rng_pkabi_family),
recurse=config.rng_pkabi_family, allowed_abilities=self.allowed_abilities)
def randomize_pokemon_movesets(self):
logging.info('Randomizing Pokรฉmon movesets.')
allowed_moves = [m for m in self.get_available_regular_moves() if m.move.name not in self.banned_learnset_moves]
for pkmn in self.normal_pokemon:
pkmn.randomize_moveset(allowed_moves)
def randomize_pokemon_tms(self):
logging.info('Randomizing Pokรฉmon TM learnsets.')
self.randomize_pokemon_aspect_recur('tms', 'previous_stage_tms',
self.randomize_pokemon_get_root_level_list(config.rng_pktm_family),
recurse=config.rng_pktm_family, tm_data=self.tm_data)
def randomize_pokemon_items(self):
logging.info('Randomizing Pokรฉmon held items.')
for pkmn in self.normal_pokemon:
pkmn.randomize_item()
if pkmn.item1 == Item.NONE:
logging.debug('Wild %s is no longer holding any item' % pkmn.species.name)
else:
logging.debug('Wild %s is now holding %s' % (pkmn.species.name, pkmn.item1.name))
def randomize_pokemon_evolutions_pass(self, evolvers, evolutions):
def debug_message(pkmn):
new_evos = [e.evolves_to.name for e in pkmn.evolution
if e.evolves_to != PokemonSpecies.NONE]
if len(new_evos) == 0:
return
logging.debug('%s now evolves into %s' % (pkmn.species.name, ', '.join(new_evos)))
if config.rng_pkevo_shuffle:
evolutions = [e for e in evolutions]
random.shuffle(evolutions)
for evolver in evolvers:
for evo in evolver.evolution:
if evo.evolves_to != PokemonSpecies.NONE:
# We should always have the right amount of evolutions to pop from the list.
evo.evolves_to = evolutions.pop().species
debug_message(evolver)
else:
for evolver in evolvers:
for evo in evolver.evolution:
if evo.evolves_to != PokemonSpecies.NONE:
evo.evolves_to = random.choice(evolutions).species
debug_message(evolver)
pass
def randomize_pokemon_evolution(self):
logging.info('Randomizing Pokรฉmon evolutions.')
if config.rng_pkevo_samestage:
current_stage = self.get_first_stages()
while len(current_stage) > 0:
next_stage = [
self.pokemon_data[e.evolves_to.value] for e in flatten([p.evolution for p in current_stage])
if e.evolves_to != PokemonSpecies.NONE
]
self.randomize_pokemon_evolutions_pass(current_stage, next_stage)
current_stage = next_stage
else:
self.randomize_pokemon_evolutions_pass(self.normal_pokemon, self.normal_pokemon)
def randomize_moves(self):
logging.info('Randomizing move data.')
for i, move in self.move_data.items():
move.randomize(config.rng_move_types, config.rng_move_pp,
config.rng_move_power, config.rng_move_accuracy)
def randomize_tms(self):
# TODO: TM item descriptions should be updated with the newly selected moves' descriptions as well.
logging.info('Randomizing TM data.')
self.tm_data = random.sample(self.get_available_regular_moves(), 50)
for i, move in enumerate(self.tm_data):
logging.debug(' TM%02d now contains %s', i + 1, move.move.name)
def randomize_trainers(self):
raise AbstractHandlerMethodError()
def randomize_item_boxes(self):
logging.info('Randomizing item boxes.')
allowed_items = [i for i in Item if Item.NONE.value < i.value <= Item.TM50.value] \
+ self.get_game_specific_randomizable_items()
original_items = None
if config.rng_items_shuffle:
original_items = [(box.item, box.quantity) for i, box in self.item_box_data.items()
if box.item in allowed_items]
random.shuffle(original_items)
for i, item_box in self.item_box_data.items():
item_box.randomize(allowed_items, config.rng_items_berry_reroll,
config.rng_items_random_qty, original_items)
logging.debug(
' Item %s #%d now contains %d x %s',
item_box.type_text,
i,
item_box.quantity,
item_box.item.name
)
def load_game_specific_data(self):
pass
def write_game_specific_data(self):
pass
def randomize_game_specific_features(self):
pass
def randomize_and_write_starter_data(self):
raise AbstractHandlerMethodError()
def randomize_and_write_trades_and_gifts(self):
raise AbstractHandlerMethodError()
def improve_catch_rates(self):
raise AbstractHandlerMethodError()
def patch_impossible_evolutions(self):
logging.info('Patching impossible evolutions.')
# Plain trade evolution after evolving once
self.pokemon_data[PokemonSpecies.KADABRA].patch_evolution(0, EvolutionType.LEVEL_UP, 32)
self.pokemon_data[PokemonSpecies.MACHOKE].patch_evolution(0, EvolutionType.LEVEL_UP, 37)
self.pokemon_data[PokemonSpecies.GRAVELER].patch_evolution(0, EvolutionType.LEVEL_UP, 37)
self.pokemon_data[PokemonSpecies.HAUNTER].patch_evolution(0, EvolutionType.LEVEL_UP, 37)
# Trade evolution with item, no branching
self.pokemon_data[PokemonSpecies.ONIX].patch_evolution(0, EvolutionType.LEVEL_UP, 30)
self.pokemon_data[PokemonSpecies.SCYTHER].patch_evolution(0, EvolutionType.LEVEL_UP, 30)
self.pokemon_data[PokemonSpecies.PORYGON].patch_evolution(0, EvolutionType.LEVEL_UP, 30)
self.pokemon_data[PokemonSpecies.SEADRA].patch_evolution(0, EvolutionType.LEVEL_UP, 42)
# Trade evolution with item, with branching
self.pokemon_data[PokemonSpecies.POLIWHIRL].patch_evolution(1, EvolutionType.STONE_EVOLUTION, Item.SUN_STONE)
self.pokemon_data[PokemonSpecies.SLOWPOKE].patch_evolution(1, EvolutionType.STONE_EVOLUTION, Item.MOON_STONE)
self.pokemon_data[PokemonSpecies.CLAMPERL].patch_evolution(0, EvolutionType.STONE_EVOLUTION, Item.SUN_STONE)
self.pokemon_data[PokemonSpecies.CLAMPERL].patch_evolution(1, EvolutionType.STONE_EVOLUTION, Item.MOON_STONE)
# High beauty evolution; Orre doesn't have PokรฉBlocks
self.pokemon_data[PokemonSpecies.FEEBAS].patch_evolution(0, EvolutionType.LEVEL_UP, 30)
self.patch_impossible_game_specific_evolutions()
def patch_impossible_game_specific_evolutions(self):
return
def make_pokemon_data(self, io_in, idx) -> BasePokemon:
raise AbstractHandlerMethodError()
def make_move_data(self, io_in, idx) -> BaseMoveEntry:
raise AbstractHandlerMethodError()
def make_item_box_data(self, io_in, idx):
return ItemBox(io_in.read(0x1C), idx)
def fix_name_casing(self):
logging.debug('Fixing name casing.')
common_rel = self.archives[b'common.fsys'].get_file(b'common_rel').data
regions = self.fixable_name_offsets
for region_start, region_end in regions:
common_rel.seek(region_start)
should_cap = True
while common_rel.tell() < region_end:
char = unpack('>H', common_rel.read(2))[0]
if 0x0040 < char < 0x005C or 0x00BF < char < 0x00D7 or 0x00D7 < char < 0x00E0:
if not should_cap:
common_rel.seek(-2, 1)
common_rel.write(pack('>H', (char | 0x0020)))
should_cap = False
else:
# keep using lowercase after an apostrophe (the Farfetch'd case)
# and after already lowercase letters (like in POKรฉMON which would otherwise become PokรฉMon)
if char in [0x0027] or 0x0060 < char < 0x007C or 0x00DF < char < 0x00F7 or 0x00F7 < char < 0x0100:
continue
should_cap = True
@property
def archive_list(self):
raise AbstractHandlerMethodError()
# in common.fsys/common_rel
@property
def pokemon_data_offset(self):
raise AbstractHandlerMethodError()
# in common.fsys/common_rel
@property
def move_data_offset(self):
raise AbstractHandlerMethodError()
# in common.fsys/common_rel
@property
def item_data_offset(self):
raise AbstractHandlerMethodError()
# in start.dol
@property
def tm_data_offset(self):
raise AbstractHandlerMethodError()
# in start.dol
@property
def starter_data_offsets(self):
raise AbstractHandlerMethodError()
# in common.fsys/common_rel
@property
def fixable_name_offsets(self):
raise AbstractHandlerMethodError()
def get_first_stages(self):
pokemon_data = self.normal_pokemon
first_stage_candidates = [pkmn.species for pkmn in pokemon_data]
for pkmn in pokemon_data:
for evo in pkmn.evolution:
if evo.evolves_to in first_stage_candidates:
first_stage_candidates.remove(evo.evolves_to)
return [self.pokemon_data[p.value] for p in first_stage_candidates]
def get_random_starter(self, idx, disallowed=None):
if disallowed is None:
disallowed = []
if len(config.rng_starters_fixed) > idx:
name = config.rng_starters_fixed[idx].upper()
try:
return self.pokemon_data[PokemonSpecies[name].value]
except KeyError:
raise ValueError("No such Pokรฉmon: %s" % name)
else:
min_bst = min([p.base_stats.total for p in self.normal_pokemon if p.base_stats.total > 0
and p not in disallowed])
max_bst = max(min_bst, config.rng_starters_max_bst)
return random.choice([p for p in self.normal_pokemon if p.base_stats.total <= max_bst])
def write_banner_name(self, text):
from randomizer import PROG_VERSION
banner = self.iso.open(b'opening.bnr')
banner_meta_count = int(math.floor((banner.size - BANNER_META_FIRST_OFFSET) / BANNER_META_SIZE))
banner.seek(BANNER_META_FIRST_OFFSET)
start_pos = BANNER_META_FIRST_OFFSET
text = text[0:32]
template = b'Randomized at %s\x0awith Pok\xe9mon GameCube Randomizer v%s'
date_str = datetime.now().replace(microsecond=0).isoformat(' ').encode('ascii', errors='ignore')
template_params = (date_str, PROG_VERSION.encode('ascii', errors='ignore'))
if self.region == IsoRegion.JPN:
template = "ใใฑใขใณใฒใผใ ใญใฅใผใใฉใณใใใคใถใผv%sใง\x0a%sใซใฉใณใใ ๅใใพใใ".encode('shift-jis')
template_params = (template_params[1], template_params[0])
for i in range(banner_meta_count):
banner.seek(start_pos)
banner.write(b'\x00' * 0x20)
banner.seek(start_pos)
banner.write(text)
banner.seek(start_pos + 0x40)
banner.write(b'\x00' * 0x40)
banner.seek(start_pos + 0x40)
banner.write(text)
banner.seek(start_pos + 0xC0)
banner.write(b'\x00' * 0x80)
banner.seek(start_pos + 0xC0)
banner.write(template % template_params)
start_pos += BANNER_META_SIZE
def write_rom_header_name(self, text):
self.iso.file.seek(0x20)
self.iso.file.write(b'\x00' * 32)
self.iso.file.seek(0x20)
self.iso.file.write(text[0:32])
def update_banner(self):
pass
def get_bst_range_for_level(level, bst_min, bst_max):
level_bst_min = min(bst_max - 100, max(bst_min, bst_min + (level - 10) / 65 * (bst_max - bst_min)))
level_bst_max = min(bst_max, max(bst_min, bst_min + (level + 30) / 80 * (bst_max - bst_min)))
return level_bst_min, level_bst_max
def randomize_pokemon(pokemon, pokemon_data, move_data, bst_min, bst_max, is_shadow, shadow_candidates,
fixed_species=None):
if pokemon.species == PokemonSpecies.NONE:
return
level_bst_min = 0
level_bst_max = 5000
if config.rng_trainers_power_progression:
level_bst_min, level_bst_max = get_bst_range_for_level(pokemon.level, bst_min, bst_max)
if fixed_species:
pokemon.species = fixed_species
elif is_shadow and config.rng_trainers_unique_shadow:
current_bst = 0
attempts = 0
# Go through the whole list once. If there are no suitable BST Pokรฉmon,
# just pick the first one in the queue.
while (level_bst_min > current_bst or level_bst_max < current_bst) \
and attempts < len(shadow_candidates):
attempts += 1
pokemon.species = shadow_candidates.pop()
current_bst = pokemon_data[pokemon.species.value].base_stats.total
if level_bst_min > current_bst or level_bst_max < current_bst:
shadow_candidates.insert(0, pokemon.species)
else:
available_pokemon = [p.species for p in pokemon_data.values() if 0 < p.natdex_no < 388
and level_bst_min <= p.base_stats.total <= level_bst_max]
if len(available_pokemon) == 0:
# Only happens if the BSTs have a huge gap somewhere. In that case, just pick randomly.
available_pokemon = [p.species for p in pokemon_data.values() if 0 < p.natdex_no < 388]
pokemon.species = random.choice(available_pokemon)
level_up_moves, tm_moves = pokemon_data[pokemon.species.value].get_legal_moves_at_level(pokemon.level)
if config.rng_trainers_level_up_only:
pokemon.moves = level_up_moves[-4:]
else:
if len(level_up_moves) + len(tm_moves) <= 4:
pokemon.moves = level_up_moves + tm_moves
else:
moves = []
pool = tm_moves + level_up_moves * 4
# Pick a damaging move for the pool if only possible
damaging_moves = [m for m in pool if move_data[m.value].power > 0]
if len(damaging_moves) > 0:
moves.append(random.choice(damaging_moves))
while len(moves) < 4:
move = random.choice(pool)
if move not in moves:
moves.append(move)
pokemon.moves = moves
pokemon.moves = pokemon.moves + [Move.NONE] * max(0, 4 - len(pokemon.moves))
if config.rng_trainers_item:
if random.random() < config.rng_trainers_item_ratio / 100:
pokemon.item = random.choice([i for i in Item if Item.NONE.value < i.value <= Item.TM50.value])
else:
pokemon.item = Item.NONE
| 41.686703 | 120 | 0.605217 |
acf43201f43eefece382505232ca3c845d97262a | 812 | py | Python | examples/aws/manage.py | yedpodtrzitko/wagtail-bakery | 0937dde07d1791a3780079a1b6959eb0b9c43d55 | [
"MIT"
] | 1 | 2019-10-06T17:19:07.000Z | 2019-10-06T17:19:07.000Z | examples/aws/manage.py | yedpodtrzitko/wagtail-bakery | 0937dde07d1791a3780079a1b6959eb0b9c43d55 | [
"MIT"
] | null | null | null | examples/aws/manage.py | yedpodtrzitko/wagtail-bakery | 0937dde07d1791a3780079a1b6959eb0b9c43d55 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.304348 | 77 | 0.641626 |
acf43262a84d0961c057cd00b4af6d903a35c6e0 | 5,478 | py | Python | git-log-multi-all.py | gjost/git-log-multi-all | 0b587987397b3eb1690865227a13f38e42728db3 | [
"MIT"
] | null | null | null | git-log-multi-all.py | gjost/git-log-multi-all | 0b587987397b3eb1690865227a13f38e42728db3 | [
"MIT"
] | null | null | null | git-log-multi-all.py | gjost/git-log-multi-all | 0b587987397b3eb1690865227a13f38e42728db3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# git-log-multi-all.py
#
DESCRIPTION = """Lists commits on all branches within specified period across multiple Git repositories"""
EPILOG = """
Useful for filling out timesheets. Note that dates reflect the time
*commits* were made rather than when the actual work was performed.
Specify a list of Git repositories. The script will list all commits
on all branches in all of the specified repositories during the
specified time. Useful for getting a good picture of activity across
projects.
Filename must contain list of absolute paths to Git repositories you
with to read, one per line. Comments using '#' are understood.
INSTALL
source $VIRTUALENV
pip install blessings dateutil gitpython
EXAMPLES
python timesheets.py $LISTOFREPOS --start=2017-05-01 --end=2017-05-31
python timesheets.py $LISTOFREPOS --month=2017-05
"""
import calendar
from datetime import date,datetime
import os
from blessings import Terminal
import click
import git
from dateutil import parser
TERM = Terminal()
TEMPLATE = '{ts} ' \
'{t.yellow}{commit}{t.normal} ' \
'{t.green}({author}){t.normal} ' \
'{t.yellow}{repo}{t.normal} ' \
'{t.red}[{branch}]{t.normal} ' \
'{subject}'
@click.command()
@click.option('-s','--start', help='Start date (ex: "2017-05-01")')
@click.option('-e','--end', help='End date (ex: "2017-05-31")')
@click.option('-m','--month', help='Month (ex: "2017-05")')
@click.argument('filename')
def main(start, end, month, filename):
"""
filename: Filename containing list of abs paths to repositories.
"""
if not (month or (start and end)):
raise Exception('Enter start/end dates or a month.')
start,end = get_start_end(start=start, end=end, month=month)
click.echo('start: %s' % start)
click.echo(' end: %s' % end)
click.echo('')
commits_by_date = {}
click.echo('Reading list...')
REPOS = get_repos_list(filename)
click.echo('Gathering data...')
for path in REPOS:
click.echo(path)
repo = get_repo(path)
commits = repo_commits(repo, start, end)
commits_by_date = assign_to_date(commits, commits_by_date)
click.echo('')
dates = sorted(commits_by_date.keys())
for d in dates:
commits = commits_by_date[d]
print_day(d, commits, TEMPLATE)
def get_repos_list(filename):
with open(filename, 'r') as f:
return [
line
for line in f.read().splitlines()
if not (line.find('#') == 0)
]
def get_start_end(start=None, end=None, month=None):
"""
@param start: str
@param end: str
@param month: str
"""
if start and end:
start = parser.parse(start)
end = parser.parse(end)
elif month:
start = parser.parse(month)
end = date(
start.year,
start.month,
calendar.monthrange(start.year, start.month)[1]
)
return start,end
def get_repo(path):
return git.Repo(path)
def repo_commits(repo, since, until):
"""
@param repo: git.Repo
@param start: datetime
@param end: datetime
"""
repo_name = os.path.basename(repo.working_dir)
# git log --all --since=2017-05-01 --until=2017-05-31 --no-merges --pretty=format:"%h|%ci|%ce|%d|%s" --reverse
raw = repo.git.log(
'--all',
'--no-merges',
'--since=%s' % since.strftime('%Y-%m-%d'),
'--until=%s' % until.strftime('%Y-%m-%d'),
'--pretty=format:"%h|%ci|%cn|%d|%s"',
)
commits = []
branch = ''
for line in raw.splitlines():
commit,rawdate,email,refname,subject = line.strip().replace('"','').split('|')
# branch in every commit
if refname:
# strip out parens, keep only final branch name
branch = refname.strip().replace('(','').replace(')','') # .split(',')[-1].strip()
ts = parser.parse(rawdate)
commits.append({
'repo': repo_name,
'commit': commit,
'date': rawdate,
'ts': ts,
'author': email,
'branch': branch,
'subject': subject
})
sorted(commits, key=lambda commit: commit['ts'])
commits.reverse()
return commits
def assign_to_date(commits, dates):
"""Add each commit to a list according to date
@param commits: list of commit dicts
@param dates: dict of commit dicts by date '%Y-%m-%d'
@returns: dict of lists by dates
"""
for commit in commits:
d = commit['ts'].strftime('%Y-%m-%d')
if not dates.get(d):
dates[d] = []
dates[d].append(commit)
return dates
def print_commits(commits, template):
"""Print list of commits
"""
sorted(commits, key=lambda commit: commit['ts'])
commits.reverse()
for c in commits:
click.echo(template.format(
t=TERM,
commit=c['commit'],
ts=c['ts'].strftime('%H:%M:%S'),
repo=c['repo'],
branch=c['branch'],
author=c['author'],
subject=c['subject'],
))
def print_day(dstr, commits, template):
day = parser.parse(dstr)
click.echo('------------------------------------------------------------------------')
click.echo(day.strftime('%Y-%m-%d %A'))
print_commits(commits, template)
click.echo('')
if __name__ == '__main__':
main()
| 28.092308 | 114 | 0.582147 |
acf43289b0d6000681d3b7607197b0bd9a4bb1e6 | 3,355 | py | Python | data/process_data.py | adagloria/DisasterResponse | de6620b2530d414615a49f5ab3f6069698148f2d | [
"MIT"
] | null | null | null | data/process_data.py | adagloria/DisasterResponse | de6620b2530d414615a49f5ab3f6069698148f2d | [
"MIT"
] | null | null | null | data/process_data.py | adagloria/DisasterResponse | de6620b2530d414615a49f5ab3f6069698148f2d | [
"MIT"
] | null | null | null | import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
'''
Load data from specified file path
Parameters:
---------------------------------------------------------
messages_filepath - path to messages.csv file
categories_filepath - path to categories.csv file
Returns:
---------------------------------------------------------
df - Merged dataframe (merger of messages and categories dataframes
'''
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = pd.merge(messages, categories, on='id')
return df
def clean_data(df):
'''
Cleans loaded dataframe and give appropriate names to columns
Parameters:
---------------------------------------------------------
df -> raw data Pandas DataFrame
Returns:
---------------------------------------------------------
df -> clean data Pandas DataFrame
'''
categories = df['categories'].str.split(pat=';', n=-1, expand=True)
row = categories.head(1)
rowlist = [item for inner_list in row.values for item in inner_list]
f = lambda x: x[:-2]
category_colnames = []
for i in rowlist:
changed_value = f(i)
category_colnames.append(changed_value)
categories.columns = category_colnames
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column][0][-1]
# convert column from string to numeric
categories[column] = categories[column].astype('int')
#drop the messy categories column in the df
df = df.drop(columns='categories')
#add the cleaned categories
df = pd.concat([df, categories], axis=1, sort=False)
#drop duplicates
df.drop_duplicates()
return df
def save_data(df, database_filename):
"""
Saves cleaned DataFrame to the Database
Parameters:
---------------------------------------------------------
df - Cleaned DataFrame
database_filename - Database Name
"""
engine = create_engine('sqlite:///{}.db'.format(database_filename))
df.to_sql('df', engine, index=False)
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main() | 31.650943 | 117 | 0.582116 |
acf4336ee0ae996a80e252a0dde9d6d13b915c1b | 1,429 | py | Python | Adventure6/LinesCirclesAndSpheres.py | AdventuresInMinecraft/code-files | 44ef692e6caa6d8b7a61e2eae0c682f27b0df488 | [
"MIT"
] | 5 | 2017-09-10T17:10:41.000Z | 2021-10-31T21:22:41.000Z | Adventure6/LinesCirclesAndSpheres.py | AdventuresInMinecraft/code-files | 44ef692e6caa6d8b7a61e2eae0c682f27b0df488 | [
"MIT"
] | 2 | 2017-09-11T22:18:14.000Z | 2017-09-12T21:03:23.000Z | Adventure6/LinesCirclesAndSpheres.py | AdventuresInMinecraft/code-files | 44ef692e6caa6d8b7a61e2eae0c682f27b0df488 | [
"MIT"
] | 2 | 2018-12-27T00:05:29.000Z | 2021-03-14T16:13:21.000Z | # Adventure 6: LinesCirclesAndSpheres.py
# From the book: "Adventures in Minecraft", 2nd Edition
# written by David Whale and Martin O'Hanlon, Wiley, 2017
# http://eu.wiley.com/WileyCDA/WileyTitle/productCd-1119439582.html
#
# This program shows how to create lines, spheres and circles in
# Minecraft using the MinecraftDrawing functions in minecraftstuff.
import mcpi.minecraft as minecraft
import mcpi.block as block
import mcpi.minecraftstuff as minecraftstuff
import time
#create the minecraft api
mc = minecraft.Minecraft.create()
#create the minecraft drawing object
mcdrawing = minecraftstuff.MinecraftDrawing(mc)
#get the players position
pos = mc.player.getTilePos()
#draw 3 lines
mcdrawing.drawLine(pos.x, pos.y, pos.z,
pos.x, pos.y+20, pos.z,
block.WOOL.id,1)
mcdrawing.drawLine(pos.x, pos.y, pos.z,
pos.x+20, pos.y, pos.z,
block.WOOL.id,2)
mcdrawing.drawLine(pos.x, pos.y, pos.z,
pos.x + 20, pos.y + 20, pos.z,
block.WOOL.id, 3)
#sleep so the player can move to a different position
time.sleep(5)
#draw a circle above the player
pos = mc.player.getTilePos()
mcdrawing.drawCircle(pos.x, pos.y + 20, pos.z, 20, block.WOOL.id, 4)
time.sleep(5)
#draw a sphere above the player
pos = mc.player.getTilePos()
mcdrawing.drawSphere(pos.x, pos.y + 20, pos.z, 15, block.WOOL.id, 5)
time.sleep(5)
| 29.163265 | 68 | 0.687894 |
acf4337d487f8db1a373c3507f5a9ab005f8650d | 24,866 | py | Python | gd/api/struct.py | nekitdev/gd.py | b9d5e29c09f953f54b9b648fb677e987d9a8e103 | [
"MIT"
] | 58 | 2020-09-30T16:51:22.000Z | 2022-02-13T17:27:48.000Z | gd/api/struct.py | NeKitDS/gd.py | b9d5e29c09f953f54b9b648fb677e987d9a8e103 | [
"MIT"
] | 30 | 2019-07-29T12:03:41.000Z | 2020-09-15T17:01:37.000Z | gd/api/struct.py | NeKitDS/gd.py | b9d5e29c09f953f54b9b648fb677e987d9a8e103 | [
"MIT"
] | 20 | 2019-12-06T03:16:57.000Z | 2020-09-16T17:45:27.000Z | # type: ignore
# DOCUMENT
from builtins import iter as std_iter
from iters import iter
from gd.api.guidelines import Guidelines
from gd.api.hsv import HSV
from gd.api.recording import Recording, RecordingEntry
from gd.api.utils import get_dir, get_id
from gd.color import Color
from gd.converters import Password, Version
from gd.crypto import decode_base64_str, encode_base64_str, unzip_level_str, zip_level_str
from gd.decorators import cache_by
from gd.enums import (
Easing,
Enum,
Gamemode,
InstantCountComparison,
InternalType,
LevelLength,
LevelType,
PickupItemMode,
PlayerColor,
PortalType,
PulseMode,
PulseType,
Speed,
SpeedChange,
SpeedMagic,
TargetPosCoordinates,
TouchToggleMode,
ZLayer,
)
from gd.index_parser import IndexParser
from gd.iter_utils import is_iterable
from gd.model_backend import (
Base64Field,
BaseField,
BoolField,
EnumField,
FloatField,
IntField,
IterableField,
MappingField,
Model,
ModelField,
ModelIterField,
StrField,
partial,
)
from gd.text_utils import is_level_probably_decoded
from gd.typing import (
TYPE_CHECKING,
Callable,
Dict,
Iterable,
Iterator,
Mapping,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
__all__ = (
"PORTAL_IDS",
"SPEED_IDS",
"SPEEDS",
"Object",
"ColorChannel",
"Channel",
"Header",
"LevelAPI",
"ColorCollection",
"DEFAULT_COLORS",
)
if TYPE_CHECKING:
from gd.api.editor import Editor
IntoColor = Union[Color, Tuple[int, int, int], str, int]
SPEEDS = {}
for speed in Speed:
name = speed.name.casefold()
magic = SpeedMagic.from_name(name)
speed_change = SpeedChange.from_name(name)
SPEEDS.update({speed.value: magic.value, speed_change.value: magic.value})
del speed, name, magic, speed_change
PORTAL_IDS = {portal.value for portal in PortalType}
SPEED_IDS = {speed.value for speed in SpeedChange}
SPEED_AND_PORTAL_IDS = PORTAL_IDS | SPEED_IDS
T = TypeVar("T")
KT = TypeVar("KT")
VT = TypeVar("VT")
KU = TypeVar("KU")
VU = TypeVar("VU")
def color_from(color: IntoColor) -> Color:
if isinstance(color, Color):
return color
elif isinstance(color, int):
return Color(color)
elif isinstance(color, str):
return Color.from_hex(color)
elif is_iterable(color):
return Color.from_rgb(*color)
else:
raise ValueError(
f"Do not know how to convert {color} to color. Known conversions: {IntoColor}."
)
def map_key_value(
mapping: Mapping[KT, VT], key_func: Callable[[KT], KU], value_func: Callable[[VT], VU]
) -> Mapping[KU, VU]:
return {key_func(key): value_func(value) for key, value in mapping.items()}
def enum_from_value(value: T, enum_type: Type[Enum]) -> Enum:
return enum_type.from_value(value)
def enum_to_value(enum: Enum) -> T:
return enum.value
class Object(Model):
PARSER = IndexParser(",", map_like=True)
id: int = IntField(index=1, default=0)
x: float = FloatField(index=2, default=0.0)
y: float = FloatField(index=3, default=0.0)
h_flipped: bool = BoolField(index=4)
v_flipped: bool = BoolField(index=5)
rotation: float = FloatField(index=6)
red: int = IntField(index=7, aliases=("r",))
green: int = IntField(index=8, aliases=("g",))
blue: int = IntField(index=9, aliases=("b",))
duration: float = FloatField(index=10)
touch_triggered: bool = BoolField(index=11)
secret_coin_id: int = IntField(index=12)
special_checked: bool = BoolField(index=13)
tint_ground: bool = BoolField(index=14) # deprecated
use_player_color_1: bool = BoolField(index=15)
use_player_color_2: bool = BoolField(index=16)
blending: bool = BoolField(index=17)
# index_18: ... = ?Field(index=18)
# index_19: ... = ?Field(index=19)
editor_layer_1: int = IntField(index=20)
color_1_id: int = IntField(index=21, aliases=("color_1",))
color_2_id: int = IntField(index=22, aliases=("color_2",))
target_color_id: int = IntField(index=23)
z_layer: ZLayer = EnumField(index=24, enum_type=ZLayer, from_field=IntField)
z_order: int = IntField(index=25)
# index_26: ... = ?Field(index=26)
# index_27: ... = ?Field(index=27)
move_x: float = FloatField(index=28)
move_y: float = FloatField(index=29)
easing: Easing = EnumField(index=30, enum_type=Easing, from_field=IntField)
text: str = Base64Field(index=31)
scale: float = FloatField(index=32)
# index_33: ... = ?Field(index=33)
group_parent: bool = BoolField(index=34)
opacity: float = FloatField(index=35)
trigger: bool = BoolField(index=36)
# index_37: ... = ?Field(index=37)
# index_38: ... = ?Field(index=38)
# index_39: ... = ?Field(index=39)
# index_40: ... = ?Field(index=40)
color_1_hsv_enabled: bool = BoolField(index=41)
color_2_hsv_enabled: bool = BoolField(index=42)
color_1_hsv: HSV = ModelField(index=43, model=HSV)
color_2_hsv: HSV = ModelField(index=44, model=HSV)
fade_in_time: float = FloatField(index=45)
hold_time: float = FloatField(index=46)
fade_out_time: float = FloatField(index=47)
pulse_mode: PulseMode = EnumField(index=48, enum_type=PulseMode, from_field=IntField)
copied_color_hsv: HSV = ModelField(index=49, model=HSV)
copied_color_id: int = IntField(index=50)
target_group_id: int = IntField(index=51)
pulse_type: PulseType = EnumField(index=52, enum_type=PulseType, from_field=IntField)
# index_53: ... = ?Field(index=53)
teleport_portal_distance: float = FloatField(index=54)
# index_55: ... = ?Field(index=53)
activate_group: bool = BoolField(index=56)
groups: Set[int] = IterableField(index=57, delim=".", transform=set, from_field=IntField)
lock_to_player_x: bool = BoolField(index=58)
lock_to_player_y: bool = BoolField(index=59)
copy_opacity: bool = BoolField(index=60)
editor_layer_2: int = IntField(index=61)
spawn_triggered: bool = BoolField(index=62)
spawn_duration: float = FloatField(index=63)
do_not_fade: bool = BoolField(index=64)
main_only: bool = BoolField(index=65)
detail_only: bool = BoolField(index=66)
do_not_enter: bool = BoolField(index=67)
degrees: int = IntField(index=68)
full_rotation_times: int = IntField(index=69)
lock_object_rotation: bool = BoolField(index=70)
other_id: int = IntField(
index=71, aliases=("follow_group_id", "target_pos_id", "center_id", "secondary_id"),
)
x_mod: float = FloatField(index=72)
y_mod: float = FloatField(index=73)
# index_74: ... = ?Field(index=74)
strength: float = FloatField(index=75)
animation_id: int = IntField(index=76)
count: int = IntField(index=77)
subtract_count: bool = BoolField(index=78)
pickup_item_mode: PickupItemMode = EnumField(
index=79, enum_type=PickupItemMode, from_field=IntField
)
item_or_block_id: int = IntField(index=80, aliases=("item_id", "block_id", "block_a_id"))
hold_mode: bool = BoolField(index=81)
touch_toggle_mode: TouchToggleMode = EnumField(
index=82, enum_type=TouchToggleMode, from_field=IntField
)
# index_83: ... = ?Field(index=83)
interval: float = FloatField(index=84)
easing_rate: float = FloatField(index=85)
exclusive: bool = BoolField(index=86)
multi_trigger: bool = BoolField(index=87)
comparison: InstantCountComparison = EnumField(
index=88, enum_type=InstantCountComparison, from_field=IntField
)
dual_mode: bool = BoolField(index=89)
speed: float = FloatField(index=90)
follow_y_delay: float = FloatField(index=91)
follow_y_offset: float = FloatField(index=92)
trigger_on_exit: bool = BoolField(index=93)
dynamic_block: bool = BoolField(index=94)
block_b_id: int = IntField(index=95)
disable_glow: bool = BoolField(index=96)
custom_rotation_speed: float = FloatField(index=97)
disable_rotation: float = FloatField(index=98)
multi_activate: bool = BoolField(index=99)
use_target: bool = BoolField(index=100)
target_pos_coordinates: TargetPosCoordinates = EnumField(
index=101, enum_type=TargetPosCoordinates, from_field=IntField
)
editor_disable: bool = BoolField(index=102)
high_detail: bool = BoolField(index=103)
# index_104: ... = ?Field(index=104)
follow_y_max_speed: float = FloatField(index=105)
randomize_start: bool = BoolField(index=106)
animation_speed: float = FloatField(index=107)
linked_group_id: int = IntField(index=108)
... # 2.2 future proofing fields will be added when it gets released
def h_flip(self) -> "Object":
self.h_flipped = not self.h_flipped
def v_flip(self) -> "Object":
self.v_flipped = not self.v_flipped
def set_id(self, directive: str) -> "Object":
self.id = get_id(directive)
return self
def set_z_layer(self, directive: str) -> "Object":
self.z_layer = get_id(get_dir(directive, "layer"), into_enum=True)
return self
def set_easing(self, directive: str) -> "Object":
self.easing = get_id(get_dir(directive, "easing"), into_enum=True)
return self
def add_groups(self, *groups: int) -> "Object":
if self.groups is None:
self.groups = set(groups)
else:
self.groups |= set(groups)
return self
def remove_groups(self, *groups: int) -> "Object":
if self.groups is not None:
self.groups -= set(groups)
return self
def get_pos(self) -> Tuple[float, float]:
return (self.x, self.y)
def set_pos(self, x: float, y: float) -> "Object":
self.x = x
self.y = y
return self
def move(self, x: float = 0.0, y: float = 0.0) -> "Object":
self.x += x
self.y += y
return self
def rotate(self, degrees: float = 0.0) -> "Object":
if self.rotation is None:
self.rotation = degrees
else:
self.rotation += degrees
return self
def is_checked(self) -> bool:
return self.special_checked
def is_portal(self) -> bool:
return self.id in PORTAL_IDS
def is_speed(self) -> bool:
return self.id in SPEED_IDS
def is_speed_or_portal(self) -> bool:
return self.id in SPEED_AND_PORTAL_IDS
class ColorChannel(Model):
PARSER = IndexParser("_", map_like=True)
red: int = IntField(index=1, default=255, aliases=("r",))
green: int = IntField(index=2, default=255, aliases=("g",))
blue: int = IntField(index=3, default=255, aliases=("b",))
player_color: PlayerColor = EnumField(
index=4, enum_type=PlayerColor, from_field=IntField, default=PlayerColor.NotUsed
)
blending: bool = BoolField(index=5, default=False)
id: int = IntField(index=6, default=0)
opacity: float = FloatField(index=7, default=1.0)
index_8: bool = BoolField(index=8, default=True)
copied_id: int = IntField(index=9)
hsv: HSV = ModelField(index=10, model=HSV)
unknown_red: int = IntField(index=11, default=255, aliases=("unknown_r",))
unknown_green: int = IntField(index=12, default=255, aliases=("unknown_g",))
unknown_blue: int = IntField(index=13, default=255, aliases=("unknown_b",))
index_15: bool = BoolField(index=15, default=True)
copy_opacity: bool = BoolField(index=17)
index_18: bool = BoolField(index=18, default=False)
def __init__(self, directive: Optional[str] = None, **kwargs) -> None:
super().__init__(**kwargs)
if directive is not None:
self.set_id(directive)
def set_id(self, directive: str) -> "ColorChannel":
self.id = get_id(get_dir(directive, "color"))
return self
def get_color(self) -> Color:
return Color.from_rgb(self.r, self.g, self.b)
def set_color(self, color: IntoColor) -> "ColorChannel":
new = color_from(color)
self.r = new.r
self.g = new.g
self.b = new.b
return self
color = property(get_color, set_color)
DEFAULT_COLORS = (
ColorChannel("BG").set_color(0x287DFF),
ColorChannel("G").set_color(0x0066FF),
ColorChannel("Line").set_color(0xFFFFFF),
ColorChannel("P1").set_color(0x7DFF00),
ColorChannel("P2").set_color(0x00FFFF),
ColorChannel("G2").set_color(0x0066FF),
)
Channel = ColorChannel
class ColorCollection(set):
def __init__(
self, iterable: Optional[Iterable[ColorChannel]] = None, use_default: bool = True,
) -> None:
if use_default:
super().__init__(DEFAULT_COLORS)
else:
super().__init__()
if iterable is not None:
super().update(iterable)
@classmethod
def new(cls, *args: ColorChannel, use_default: bool = True) -> "ColorCollection":
return cls(args, use_default=use_default)
def remove(self, directive_or_id: Union[int, str]) -> None:
self.discard(self.get(directive_or_id))
def copy(self) -> "ColorCollection":
return self.__class__(channel.copy() for channel in self)
def clone(self) -> "ColorCollection":
return self.__class__(channel.clone() for channel in self)
def difference(self, other: Iterable[ColorChannel]) -> "ColorCollection":
return self.__class__(super().difference(other))
def intersection(self, other: Iterable[ColorChannel]) -> "ColorCollection":
return self.__class__(super().intersection(other))
def symmetric_difference(self, other: Iterable[ColorChannel]) -> "ColorCollection":
return self.__class__(super().symmetric_difference(other))
def union(self, other: Iterable[ColorChannel]) -> "ColorCollection":
return self.__class__(super().union(other))
def update(self, other: Iterable[ColorChannel]) -> "ColorCollection":
super().update(other)
return self
def get(self, directive_or_id: Union[int, str]) -> Optional[ColorChannel]:
if isinstance(directive_or_id, str):
id = get_id(get_dir(directive_or_id, "color"))
else:
id = directive_or_id
return iter(self).get(id=id)
def __or__(self, other: Set[ColorChannel]) -> "ColorCollection":
return self.__class__(super().__or__(other))
def __xor__(self, other: Set[ColorChannel]) -> "ColorCollection":
return self.__class__(super().__xor__(other))
def __sub__(self, other: Set[ColorChannel]) -> "ColorCollection":
return self.__class__(super().__sub__(other))
def __and__(self, other: Set[ColorChannel]) -> "ColorCollection":
return self.__class__(super().__and__(other))
def __ror__(self, other: Set[ColorChannel]) -> "ColorCollection":
return self.__class__(super().__ror__(other))
def __rxor__(self, other: Set[ColorChannel]) -> "ColorCollection":
return self.__class__(super().__rxor__(other))
def __rsub__(self, other: Set[ColorChannel]) -> "ColorCollection":
return self.__class__(super().__rsub__(other))
def __rand__(self, other: Set[ColorChannel]) -> "ColorCollection":
return self.__class__(super().__rand__(other))
class Header(Model):
PARSER = IndexParser(",", map_like=True)
audio_track: int = IntField(index="kA1")
gamemode: Gamemode = EnumField(
index="kA2", enum_type=Gamemode, from_field=IntField, default=Gamemode.CUBE
)
minimode: bool = BoolField(index="kA3", default=False)
speed: Speed = EnumField(
index="kA4", enum_type=Speed, from_field=IntField, default=Speed.NORMAL
)
index_kA5: str = StrField(index="kA5") # need to check this, something to do with blending
background: int = IntField(index="kA6", default=0)
ground: int = IntField(index="kA7", default=0)
dual_mode: bool = BoolField(index="kA8", default=False)
has_start_pos: bool = BoolField(index="kA9", default=False)
two_player_mode: bool = BoolField(index="kA10", default=False)
flip_gravity: bool = BoolField(index="kA11", default=False)
song_offset: float = FloatField(index="kA13", default=0.0)
guidelines: Guidelines = MappingField(
index="kA14",
delim="~",
transform=Guidelines,
key_from_field=FloatField,
value_from_field=FloatField,
skip_empty=True,
)
song_fade_in: bool = BoolField(index="kA15", default=False)
song_fade_out: bool = BoolField(index="kA16", default=False)
ground_line: int = IntField(index="kA17", default=0)
font: int = IntField(index="kA18", default=0)
colors: Set[ColorChannel] = ModelIterField(
index="kS38",
model=ColorChannel,
delim="|",
transform=partial(ColorCollection, use_default=False),
factory=ColorCollection,
)
color_pages: int = IntField(index="kS39", default=0)
background_r: int = IntField(index="kS1")
background_b: int = IntField(index="kS2")
background_g: int = IntField(index="kS3")
ground_r: int = IntField(index="kS4")
ground_b: int = IntField(index="kS5")
ground_g: int = IntField(index="kS6")
line_r: int = IntField(index="kS7")
line_g: int = IntField(index="kS8")
line_b: int = IntField(index="kS9")
object_r: int = IntField(index="kS10")
object_g: int = IntField(index="kS11")
object_b: int = IntField(index="kS12")
color_1_r: int = IntField(index="kS13")
color_1_g: int = IntField(index="kS14")
color_1_b: int = IntField(index="kS15")
background_player_color: PlayerColor = EnumField(
index="kS16", enum_type=PlayerColor, from_field=IntField
)
ground_player_color: PlayerColor = EnumField(
index="kS17", enum_type=PlayerColor, from_field=IntField
)
line_player_color: PlayerColor = EnumField(
index="kS18", enum_type=PlayerColor, from_field=IntField
)
object_player_color: PlayerColor = EnumField(
index="kS19", enum_type=PlayerColor, from_field=IntField
)
color_1_player_color: PlayerColor = EnumField(
index="kS20", enum_type=PlayerColor, from_field=IntField
)
background_color: ColorChannel = ModelField(index="kS29", model=ColorChannel)
ground_color: ColorChannel = ModelField(index="kS30", model=ColorChannel)
line_color: ColorChannel = ModelField(index="kS31", model=ColorChannel)
object_color: ColorChannel = ModelField(index="kS32", model=ColorChannel)
color_1: ColorChannel = ModelField(index="kS33", model=ColorChannel)
color_2: ColorChannel = ModelField(index="kS34", model=ColorChannel)
color_3: ColorChannel = ModelField(index="kS35", model=ColorChannel)
color_4: ColorChannel = ModelField(index="kS36", model=ColorChannel)
color_3dl: ColorChannel = ModelField(index="kS37", model=ColorChannel)
class LevelAPI(Model):
ENFORCE_STR = False
REPR_IGNORE = {"unprocessed_data", "recording_string"}
id: int = BaseField(index="k1", de=int, ser=int, default=0)
name: str = BaseField(index="k2", de=str, ser=str, default="Unnamed")
description: str = BaseField(index="k3", de=decode_base64_str, ser=encode_base64_str)
unprocessed_data: str = BaseField(index="k4", de=str, ser=str)
creator: str = BaseField(index="k5", de=str, ser=str)
track_id: int = BaseField(index="k8", de=int, ser=int)
downloads: int = BaseField(index="k11", de=int, ser=int)
index_k13: bool = BaseField(index="k13", de=bool, ser=bool, default=True)
verified: bool = BaseField(index="k14", de=bool, ser=bool)
uploaded: bool = BaseField(index="k15", de=bool, ser=bool)
version: int = BaseField(index="k16", de=int, ser=int, default=1)
attempts: int = BaseField(index="k18", de=int, ser=int)
normal_mode_percentage: int = BaseField(index="k19", de=int, ser=int)
practice_mode_percentage: int = BaseField(index="k20", de=int, ser=int)
level_type: LevelType = BaseField(
index="k21", de=partial(enum_from_value, enum_type=LevelType), ser=enum_to_value
)
likes: int = BaseField(index="k22", de=int, ser=int)
length: LevelLength = BaseField(
index="k23", de=partial(enum_from_value, enum_type=LevelLength), ser=enum_to_value,
)
stars: int = BaseField(index="k26", de=int, ser=int)
recording_string: str = BaseField(index="k34", de=str, ser=str)
jumps: int = BaseField(index="k36", de=int, ser=int)
password_field: Password = BaseField(
index="k41", de=Password.from_robtop_number, ser=Password.to_robtop_number
)
original_id: int = BaseField(index="k42", de=int, ser=int)
song_id: int = BaseField(index="k45", de=int, ser=int)
revision: int = BaseField(index="k46", de=int, ser=int)
index_k47: bool = BaseField(index="k47", de=bool, ser=bool, default=True)
object_count: int = BaseField(index="k48", de=int, ser=int)
binary_version: Version = BaseField(
index="k50", de=Version.from_number, ser=Version.to_number, default=Version(3, 5),
)
first_coint_acquired: bool = BaseField(index="k61", de=bool, ser=bool)
second_coin_acquired: bool = BaseField(index="k62", de=bool, ser=bool)
third_coin_acquired: bool = BaseField(index="k63", de=bool, ser=bool)
requested_stars: int = BaseField(index="k66", de=int, ser=int)
extra_string: str = BaseField(index="k67", de=str, ser=str)
timely_id: int = BaseField(index="k74", de=int, ser=int)
unlisted: bool = BaseField(index="k79", de=bool, ser=bool)
editor_seconds: int = BaseField(index="k80", de=int, ser=int)
copies_seconds: int = BaseField(index="k81", de=int, ser=int)
folder: int = BaseField(index="k84", de=int, ser=int)
x: float = BaseField(index="kI1", de=float, ser=float)
y: float = BaseField(index="kI2", de=float, ser=float)
zoom: float = BaseField(index="kI3", de=float, ser=float)
build_tab_page: int = BaseField(index="kI4", de=int, ser=int)
build_tab: int = BaseField(index="kI5", de=int, ser=int)
build_tab_pages_dict: Dict[int, int] = BaseField(
index="kI6",
de=partial(map_key_value, key_func=int, value_func=int),
ser=partial(map_key_value, key_func=str, value_func=str),
)
editor_layer: int = BaseField(index="kI7", de=int, ser=int)
internal_type: InternalType = BaseField(
index="kCEK",
de=partial(enum_from_value, enum_type=InternalType),
ser=enum_to_value,
default=InternalType.LEVEL,
)
def get_password(self) -> Optional[int]:
if self.password_field is None:
return None
return self.password_field.password
def set_password(self, password: Optional[int]) -> None:
if self.password_field is None:
self.password_field = Password(password)
else:
self.password_field.password = password
password = property(get_password, set_password)
def get_copyable(self) -> bool:
if self.password_field is None:
return False
return self.password_field.copyable
def set_copyable(self, copyable: bool) -> None:
if self.password_field is None:
self.password_field = Password(None, copyable)
else:
self.password_field.copyable = copyable
copyable = property(get_copyable, set_copyable)
@cache_by("unprocessed_data")
def get_data(self) -> str:
unprocessed_data = self.unprocessed_data
if unprocessed_data is None:
return ""
if is_level_probably_decoded(unprocessed_data):
return unprocessed_data
else:
return unzip_level_str(unprocessed_data)
def set_data(self, data: str) -> None:
if is_level_probably_decoded(data):
self.unprocessed_data = zip_level_str(data)
else:
self.unprocessed_data = data
data = property(get_data, set_data)
@cache_by("recording_string")
def get_recording(self) -> Recording:
if self.recording_string is None:
return Recording()
return Recording.from_string(unzip_level_str(self.recording_string))
def set_recording(self, recording: Iterable[RecordingEntry]) -> None:
self.recording_string = zip_level_str(Recording.collect_string(recording))
recording = property(get_recording, set_recording)
@cache_by("recording_string")
def iter_recording(self) -> Iterator[RecordingEntry]:
if self.recording_string is None:
return std_iter(())
return Recording.iter_string(unzip_level_str(self.recording_string))
def open_editor(self) -> "Editor":
from gd.api.editor import Editor
return Editor.load_from(self, "data")
def to_dict(self) -> Dict[str, T]:
result = super().to_dict()
result.update(password=self.password, copyable=self.copyable)
return result
| 35.371266 | 95 | 0.670836 |
acf4346d40c82df3c374a64e9949719ae7b1b999 | 11,377 | py | Python | src/DyldExtractor/converter/slide_info.py | arandomdev/DyldExtractor | 8471c9b6c7c62d97d350d89d1e0af16d01d3630d | [
"MIT"
] | 177 | 2020-11-03T21:02:45.000Z | 2022-03-26T00:01:08.000Z | src/DyldExtractor/converter/slide_info.py | arandomdev/DyldExtractor | 8471c9b6c7c62d97d350d89d1e0af16d01d3630d | [
"MIT"
] | 30 | 2020-11-05T09:41:16.000Z | 2022-03-30T12:27:08.000Z | src/DyldExtractor/converter/slide_info.py | arandomdev/DyldExtractor | 8471c9b6c7c62d97d350d89d1e0af16d01d3630d | [
"MIT"
] | 20 | 2020-11-05T08:19:12.000Z | 2022-03-26T00:01:24.000Z | import struct
from typing import (
Type,
TypeVar,
Union,
)
from DyldExtractor.extraction_context import ExtractionContext
from DyldExtractor.structure import Structure
from DyldExtractor.dyld.dyld_context import DyldContext
from DyldExtractor.dyld.dyld_constants import *
from DyldExtractor.dyld.dyld_structs import (
dyld_cache_mapping_and_slide_info,
dyld_cache_mapping_info,
dyld_cache_slide_info2,
dyld_cache_slide_info3,
dyld_cache_slide_pointer3
)
from DyldExtractor.macho.macho_context import MachOContext
from DyldExtractor.macho.macho_structs import (
segment_command_64
)
_SlideInfoMap = {
2: dyld_cache_slide_info2,
3: dyld_cache_slide_info3
}
class _V2Rebaser(object):
def __init__(
self,
extractionCtx: ExtractionContext,
mapping: dyld_cache_mapping_info,
slideInfo: dyld_cache_slide_info2
) -> None:
super().__init__()
self.statusBar = extractionCtx.statusBar
self.dyldCtx = extractionCtx.dyldCtx
self.machoCtx = extractionCtx.machoCtx
self.logger = extractionCtx.logger
self.mapping = mapping
self.slideInfo = slideInfo
def run(self) -> None:
"""Process all slide info.
"""
self.statusBar.update(unit="Slide Info Rebaser")
# get pageStarts, an array of uint_16
pageStartOff = self.slideInfo._fileOff_ + self.slideInfo.page_starts_offset
self.dyldCtx.file.seek(pageStartOff)
pageStarts = self.dyldCtx.file.read(self.slideInfo.page_starts_count * 2)
pageStarts = [page[0] for page in struct.iter_unpack("<H", pageStarts)]
for segment in self.machoCtx.segmentsI:
self._rebaseSegment(pageStarts, segment.seg)
def _rebaseSegment(
self,
pageStarts: tuple[int],
segment: segment_command_64
) -> None:
"""Process all slide info for a segment"""
# check if the segment is included in the mapping
if not (
segment.vmaddr >= self.mapping.address
and segment.vmaddr < self.mapping.address + self.mapping.size
):
return
# get the indices of relevent pageStarts
dataStart = self.mapping.address
pageSize = self.slideInfo.page_size
startAddr = segment.vmaddr - dataStart
startIndex = int(startAddr / pageSize)
endAddr = ((segment.vmaddr + segment.vmsize) - dataStart) + pageSize
endIndex = int(endAddr / pageSize)
if endIndex == len(pageStarts) + 1:
endIndex -= 2
pass
for i in range(startIndex, endIndex):
page = pageStarts[i]
if page == DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE:
pass
elif page & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA:
pageAddr = (i * pageSize) + self.mapping.address
self.logger.warning(f"Unable to handle page extras at {hex(pageAddr)}")
elif (page & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA) == 0:
pageOff = (i * pageSize) + self.mapping.fileOffset
# The page offset are 32bit jumps
self._rebasePage(pageOff, page * 4)
self.statusBar.update(status="Rebasing Pages")
pass
def _rebasePage(self, pageStart: int, pageOffset: int) -> None:
"""Process the slide info for a page.
Args:
pageStart: the file offset to the page.
pageOffset: the offset from the pageStart to the first rebase location.
"""
deltaMask = self.slideInfo.delta_mask
valueMask = ~deltaMask
valueAdd = self.slideInfo.value_add
# basically __builtin_ctzll(deltaMask) - 2;
deltaShift = "{0:b}".format(deltaMask)
deltaShift = len(deltaShift) - len(deltaShift.rstrip("0"))
deltaShift = deltaShift - 2
delta = 1
while delta != 0:
loc = pageStart + pageOffset
rawValue = self.dyldCtx.readFormat(loc, "<Q")[0]
delta = (rawValue & deltaMask) >> deltaShift
newValue = rawValue & valueMask
if valueMask != 0:
newValue += valueAdd
self.machoCtx.file[loc:loc + 8] = struct.pack("<Q", newValue)
pageOffset += delta
pass
pass
class _V3Rebaser(object):
def __init__(
self,
extractionCtx: ExtractionContext,
mapping: dyld_cache_mapping_info,
slideInfo: dyld_cache_slide_info3
) -> None:
super().__init__()
self.statusBar = extractionCtx.statusBar
self.dyldCtx = extractionCtx.dyldCtx
self.machoCtx = extractionCtx.machoCtx
self.mapping = mapping
self.slideInfo = slideInfo
def run(self) -> None:
self.statusBar.update(unit="Slide Info Rebaser")
pageStartsOff = self.slideInfo._fileOff_ + len(self.slideInfo)
self.dyldCtx.file.seek(pageStartsOff)
pageStarts = self.dyldCtx.file.read(self.slideInfo.page_starts_count * 2)
pageStarts = [page[0] for page in struct.iter_unpack("<H", pageStarts)]
for segment in self.machoCtx.segmentsI:
self._rebaseSegment(pageStarts, segment.seg)
def _rebaseSegment(
self,
pageStarts: tuple[int],
segment: segment_command_64
) -> None:
# check if the segment is included in the mapping
if not (
segment.vmaddr >= self.mapping.address
and segment.vmaddr < self.mapping.address + self.mapping.size
):
return
# get the indices of relevent pageStarts
dataStart = self.mapping.address
pageSize = self.slideInfo.page_size
startAddr = segment.vmaddr - dataStart
startIndex = int(startAddr / pageSize)
endAddr = ((segment.vmaddr + segment.vmsize) - dataStart) + pageSize
endIndex = int(endAddr / pageSize)
endIndex = min(endIndex, len(pageStarts))
for i in range(startIndex, endIndex):
page = pageStarts[i]
if page == DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE:
continue
else:
pageOff = (i * pageSize) + self.mapping.fileOffset
self._rebasePage(pageOff, page)
self.statusBar.update(status="Rebasing Pages")
pass
def _rebasePage(self, pageOffset, delta) -> None:
locOff = pageOffset
while True:
locOff += delta
locInfo = dyld_cache_slide_pointer3(self.dyldCtx.file, locOff)
# It appears the delta encoded in the pointers are 64bit jumps...
delta = locInfo.plain.offsetToNextPointer * 8
if locInfo.auth.authenticated:
newValue = locInfo.auth.offsetFromSharedCacheBase
newValue += self.slideInfo.auth_value_add
else:
value51 = locInfo.plain.pointerValue
top8Bits = value51 & 0x0007F80000000000
bottom43Bits = value51 & 0x000007FFFFFFFFFF
newValue = (top8Bits << 13) | bottom43Bits
self.machoCtx.file[locOff:locOff + 8] = struct.pack("<Q", newValue)
if delta == 0:
break
def _getMappingSlidePairs(
extractionCtx: ExtractionContext
) -> list[tuple[Union[dyld_cache_mapping_info, dyld_cache_slide_info2]]]:
dyldCtx = extractionCtx.dyldCtx
logger = extractionCtx.logger
mappingSlidePairs = []
if dyldCtx.header.slideInfoOffsetUnused:
# the version is encoded as the first uint32 field
slideInfoOff = dyldCtx.header.slideInfoOffsetUnused
dyldCtx.file.seek(slideInfoOff)
slideInfoVersion = struct.unpack("<I", dyldCtx.file.read(4))[0]
if slideInfoVersion not in _SlideInfoMap:
logger.error("Unknown slide info version: " + slideInfoVersion)
return None
# Assume that only the second mapping has slide info
mapping = dyldCtx.mappings[1]
slideInfo = _SlideInfoMap[slideInfoVersion](dyldCtx.file, slideInfoOff)
mappingSlidePairs.append((mapping, slideInfo))
elif dyldCtx.headerContainsField("mappingWithSlideOffset"):
# slide info is now in different location
for i in range(dyldCtx.header.mappingWithSlideCount):
mappingOff = dyldCtx.header.mappingWithSlideOffset
mappingOff += i * dyld_cache_mapping_and_slide_info.SIZE
mapping = dyld_cache_mapping_and_slide_info(dyldCtx.file, mappingOff)
if mapping.slideInfoFileOffset:
dyldCtx.file.seek(mapping.slideInfoFileOffset)
slideInfoVersion = struct.unpack("<I", dyldCtx.file.read(4))[0]
if slideInfoVersion not in _SlideInfoMap:
logger.error(f"Unknown slide info version: {slideInfoVersion}")
continue
slideInfoStruct = _SlideInfoMap[slideInfoVersion]
slideInfo = slideInfoStruct(dyldCtx.file, mapping.slideInfoFileOffset)
mappingSlidePairs.append((mapping, slideInfo))
else:
logger.error("Unable to get slide info!")
return None
return mappingSlidePairs
StructureT = TypeVar("StructureT", bound=Structure)
class PointerSlider(object):
def __init__(self, extractionCtx: ExtractionContext) -> None:
"""Provides a way to slide individual pointers.
"""
super().__init__()
self._dyldCtx = extractionCtx.dyldCtx
self._mappingSlidePairs = _getMappingSlidePairs(extractionCtx)
def slideAddress(self, address: int) -> int:
"""Slide and return the pointer at the address.
Args:
address: The address of the pointer.
Returns:
The slide version of the pointer. This will return None if
the pointer could not be slid.
"""
if not (offset := self._dyldCtx.convertAddr(address)):
return None
return self.slideOffset(offset)
def slideOffset(self, offset: int) -> int:
"""Slide and return the pointer at the file offset.
Args:
offset: The file offset.
Returns:
The slide version of the pointer. This will return None if
the pointer could not be slid.
"""
for pair in self._mappingSlidePairs:
mapping = pair[0]
mappingHighBound = mapping.fileOffset + mapping.size
if offset >= mapping.fileOffset and offset < mappingHighBound:
slideInfo = pair[1]
# regular arm64 pointer
if slideInfo.version == 2:
return self._dyldCtx.readFormat(offset, "<Q")[0] & 0xfffffffff
# arm64e pointer
elif slideInfo.version == 3:
ptrInfo = dyld_cache_slide_pointer3(self._dyldCtx.file, offset)
if ptrInfo.auth.authenticated:
newValue = ptrInfo.auth.offsetFromSharedCacheBase
return newValue + slideInfo.auth_value_add
else:
value51 = ptrInfo.plain.pointerValue
top8Bits = value51 & 0x0007F80000000000
bottom43Bits = value51 & 0x000007FFFFFFFFFF
return (top8Bits << 13) | bottom43Bits
else:
return None
return None
def slideStruct(
self,
address: int,
structDef: Type[StructureT]
) -> StructureT:
"""Read and slide a structure at the address.
This will use the _pointers_ class property to
slide the correct variables. If the structure does
not have this, nothing will be slid.
Args:
address: The address of the structure.
structure: The structure class to fill.
Return:
The filled and slid structure.
"""
structOff = self._dyldCtx.convertAddr(address)
structData = structDef(self._dyldCtx.file, structOff)
if ptrNames := getattr(structData, "_pointers_", None):
for ptrName in ptrNames:
ptrOff = structOff + getattr(structDef, ptrName).offset
slidPtr = self.slideOffset(ptrOff)
setattr(structData, ptrName, slidPtr)
pass
pass
return structData
def processSlideInfo(extractionCtx: ExtractionContext) -> None:
"""Process and remove rebase info.
Pointers in the Dyld shared cache don't have the usual rebase info
found in regular MachO files. Instead they put that info in the pointer
themselves. This results in pointers that look like this 0x800XXXXXXXXXX.
This removes that info.
Args:
dyldCtx: The dyld context
machoCtx: The MachO context. This must be writable!
Returns:
The processed file.
"""
logger = extractionCtx.logger
# get a list of mapping and slide info
mappingSlidePairs = _getMappingSlidePairs(extractionCtx)
if not mappingSlidePairs:
return
# Process each pair
for pair in mappingSlidePairs:
if pair[1].version == 2:
_V2Rebaser(extractionCtx, pair[0], pair[1]).run()
elif pair[1].version == 3:
_V3Rebaser(extractionCtx, pair[0], pair[1]).run()
else:
logger.error("Unknown slide version.")
| 27.480676 | 77 | 0.736398 |
acf43483d538ea38a357eeb0450cc740ef80be01 | 6,451 | py | Python | sentence_suggestion/train_nn_models/sequence_model_trainer.py | LuoDingo/Langauge_model | f10e18ac2c9f31b187f77bc8f927ffb6b8d77d7e | [
"MIT"
] | null | null | null | sentence_suggestion/train_nn_models/sequence_model_trainer.py | LuoDingo/Langauge_model | f10e18ac2c9f31b187f77bc8f927ffb6b8d77d7e | [
"MIT"
] | 2 | 2020-03-24T15:15:23.000Z | 2020-04-16T01:41:17.000Z | sentence_suggestion/train_nn_models/sequence_model_trainer.py | LuoDingo/Langauge_model | f10e18ac2c9f31b187f77bc8f927ffb6b8d77d7e | [
"MIT"
] | null | null | null | import torch
import time
import math
def _epoch_time(start_time):
total = time.time() - start_time
return int(total/60), int(total%60)
def _default_init_weights(model):
for name, param in model.named_parameters():
torch.nn.init.normal_(param.data, mean=0, std=0.01)
class TrainModel():
def __init__(self,
model,
train_iterator,
val_iterator,
optimizer,
criterion,
output_dim,
weight_initializer=None):
self.model = model
self.train_iterator = train_iterator
self.val_iterator = val_iterator
self.optimizer = optimizer
self.criterion = criterion
self.weight_initializer = weight_initializer
self.output_dim = output_dim
def train(self, clip):
self.model.train()
epoch_loss = 0
for batch in self.train_iterator:
if isinstance(batch.keywords, tuple):
keyword, keyword_len = batch.keywords
# keywords_len = [batch_size]
else:
keyword = batch.keywords
# keyword = [keywords_len, batch_size]
trg = batch.target
# trg = [target_len, batch_size]
trg_len = trg.shape[0]
batch_size = trg.shape[1]
self.optimizer.zero_grad()
# prediction: probability distribution over searching space given keywords for each batch
if isinstance(batch.keywords, tuple):
prediction = self.model(keyword, keyword_len, trg)
else:
prediction = self.model(keyword, trg)
# prediction = [trg_len, batch_size, output_dim]
# cut off the first token ([CLS]) and put batch and token distribution together
prediction = prediction[1:].view((trg_len - 1)*batch_size, self.output_dim)
# prediction = [(trg_len - 1)*batch_size, output_dim ]
trg = trg[1:].view((trg_len - 1)*batch_size)
# trg = [(trg_len - 1)*batch_size]
loss = self.criterion(prediction, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), clip)
self.optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(self.train_iterator)
def evaluate(self):
self.model.eval()
epoch_loss = 0
with torch.no_grad():
for batch in self.val_iterator:
if isinstance(batch.keywords, tuple):
keyword, keyword_len = batch.keywords
# keywords_len = [batch_size]
else:
keyword = batch.keywords
# keyword = [keywords_len, batch_size]
trg = batch.target
# trg = [target_len, batch_size]
trg_len = trg.shape[0]
batch_size = trg.shape[1]
# prob: probability distribution over searching space given keywords for each batch
if isinstance(batch.keywords, tuple):
prediction = self.model(keyword, keyword_len, trg)
else:
prediction = self.model(keyword, trg)
# prediction = [trg_len, batch_size, output_dim]
# cut off the first token ([CLS]) and put batch and token distribution together
prediction = prediction[1:].view((trg_len - 1)*batch_size, self.output_dim)
# prediction = [(trg_len - 1)*batch_size, output_dim ]
trg = trg[1:].view((trg_len - 1)*batch_size)
# trg = [(trg_len - 1)*batch_size]
loss = self.criterion(prediction, trg)
epoch_loss += loss.item()
return epoch_loss / len(self.val_iterator)
def epoch(self, n_epochs, clip, model_name):
# Initialize weights
if self.weight_initializer==None:
self.model.apply(_default_init_weights)
else:
self.model.apply(self.weight_initializer)
# Keep track of the best model (the one with minimum validation loss)
best_valid_loss = float('inf')
for epoch in range(n_epochs):
start_time = time.time()
train_loss = self.train(clip)
valid_loss = self.evaluate()
epoch_mins, epoch_secs = _epoch_time(start_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(self.model.state_dict(), model_name)
print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')
def test(self, iterator, model_name=None):
if model_name is not None:
self.model.load_state_dict(torch.load(model_name))
self.model.eval()
epoch_loss = 0
with torch.no_grad():
for batch in iterator:
if isinstance(batch.keywords, tuple):
keyword, keyword_len = batch.keywords
# keywords_len = [batch_size]
else:
keyword = batch.keywords
# keyword = [keywords_len, batch_size]
trg = batch.target
# trg = [target_len, batch_size]
trg_len = trg.shape[0]
batch_size = trg.shape[1]
# prob: probability distribution over searching space given keywords for each batch
if isinstance(batch.keywords, tuple):
prediction = self.model(keyword, keyword_len, trg)
else:
prediction = self.model(keyword, trg)
# prediction = [trg_len, batch_size, output_dim]
# cut off the first token ([CLS]) and put batch and token distribution together
prediction = prediction[1:].view((trg_len - 1)*batch_size, self.output_dim)
# prediction = [(trg_len - 1)*batch_size, output_dim ]
trg = trg[1:].view((trg_len - 1)*batch_size)
# trg = [(trg_len - 1)*batch_size]
loss = self.criterion(prediction, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
| 42.163399 | 101 | 0.562238 |
acf434890251a912a5bca498185ac52633f520ee | 6,685 | py | Python | resolution/controllers/redirect_handler.py | openpermissions/resolution-srv | 92e3110ee115c3997dc5d906f949418c21faf0d6 | [
"Apache-2.0"
] | 3 | 2016-05-03T20:08:12.000Z | 2019-05-20T01:39:36.000Z | resolution/controllers/redirect_handler.py | openpermissions/resolution-srv | 92e3110ee115c3997dc5d906f949418c21faf0d6 | [
"Apache-2.0"
] | 3 | 2016-05-17T09:41:57.000Z | 2016-05-31T10:41:47.000Z | resolution/controllers/redirect_handler.py | openpermissions/resolution-srv | 92e3110ee115c3997dc5d906f949418c21faf0d6 | [
"Apache-2.0"
] | 1 | 2019-05-20T01:39:26.000Z | 2019-05-20T01:39:26.000Z | # -*- coding: utf-8 -*-
# Copyright 2016 Open Permissions Platform Coalition
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""Resolve an asset from parameters"""
import logging
import urllib
from chub import API
from koi import base, exceptions
from koi.configure import ssl_server_options
from bass.hubkey import generate_hub_key
from tornado import httpclient, httputil
from tornado.gen import coroutine, Return
from tornado.options import options, define
from tornado.web import RedirectHandler
from hub_key_handler import redirectToAsset, _get_provider_by_name, _get_repository, _get_repos_for_source_id
from memoize import MemoizeCoroutine
define('redirect_to_website', default='http://openpermissions.org/',
help='The website to which the resolution service redirects for unknown requests')
@MemoizeCoroutine
@coroutine
def _get_providers_by_type_and_id(source_id_type, source_id):
""" get the matching providers for a given source_id_type and source_id
:param source_id_type: str
:param source_id: str
:returns: list of organisations
:raises: koi.exceptsion.HTTPError
"""
client = API(options.url_query, ssl_options=ssl_server_options())
try:
res = yield client.query.licensors.get(source_id_type=source_id_type, source_id=source_id)
raise Return(res['data'])
except httpclient.HTTPError as exc:
if exc.code == 404:
msg = 'No matching providers found'
else:
msg = 'Unexpected error ' + exc.message
raise exceptions.HTTPError(exc.code, msg, source='query')
def _getHostSubDomain(cls):
"""
returns the subdomain portion of the request hostname
eg something.copyrighthub.org would return "something"
"""
subDomain = ""
host = cls.request.headers.get('Host')
host, port = httputil.split_host_and_port(host)
# get the subdomain part
hostparts = host.split('.')
# match subdomain, but only if not in list of ignore_subdomains
if len(hostparts) == 3:
if not hostparts[0] in options.ignored_subdomains \
and hostparts[1] == 'copyrighthub' \
and hostparts[2] == 'org':
subDomain = hostparts[0]
return subDomain
class RedirectHandler(base.BaseHandler):
def initialize(self, **kwargs):
try:
self.version = kwargs['version']
except KeyError:
raise KeyError('App version is required')
@coroutine
def get(self):
"""
Resolve an asset from querystring parameters:
. hubpid = provider id
. hubidt = asset id type
. hubaid = asset id
"""
providerId = self.get_query_argument('hubpid', None)
assetIdType = self.get_query_argument('hubidt', None)
assetId = self.get_query_argument('hubaid', None)
global showJson
showJson = self.get_query_argument('hubjson', None)
# get the subdomain from the request
hostProvider = _getHostSubDomain(self)
# if hostname provider is specified then use it, but check it doesn't
# conflict with any provider passed in the queryString
if hostProvider:
if not providerId:
providerId = hostProvider
else:
if hostProvider.lower() != providerId.lower():
self.render('error.html', errors=['hostname contradicts querystring provider'])
raise Return()
# if our parameters are all missing redirect to default page
if not providerId and not assetIdType and not assetId:
logging.debug("A : redirect to options.redirect_to_website")
self.redirect(options.redirect_to_website)
raise Return()
# if providerId is missing but other two are there then look for multiple providers for asset
if not providerId and assetIdType and assetId:
logging.debug("C : lookup asset")
# search for providers by assetId and assetIdType
providers = yield _get_providers_by_type_and_id(assetIdType, assetId)
if len(providers) == 1:
yield redirectToAsset(self, providers[0], assetIdType, assetId, showJson)
raise Return()
else:
links=[]
# search for all matching assets
assets = yield _get_repos_for_source_id(assetIdType, assetId)
# get provider details for each and build a link
for asset in assets:
repo = yield _get_repository(asset["repository_id"])
provider_name = repo["data"]["organisation"]["name"]
hub_key = generate_hub_key(options.default_resolver_id, options.hub_id, asset["repository_id"], 'asset', asset["entity_id"])
link = {
'name': provider_name,
'href': hub_key
}
links.append(link)
self.render('multiple_providers_template.html', links=links)
raise Return()
# look for just providerId specified
if providerId and not assetIdType and not assetId:
logging.debug("D : show provider landing page")
# get provider info
provider = yield _get_provider_by_name(providerId)
# show the provider's special branded landing page
self.render('provider_template.html', data=provider)
raise Return()
# look for all three parameters specified
if providerId and assetIdType and assetId:
logging.debug("B : all specified")
# look up reference links stuff and redirect
provider = yield _get_provider_by_name(providerId)
logging.debug ('prov ' + str(provider))
yield redirectToAsset(self, provider, assetIdType, assetId, showJson)
else:
# this should never happen so return error if it does
self.render('error.html', errors=['unable to find matching asset from provided identifiers'])
raise Return()
| 38.641618 | 144 | 0.646223 |
acf437b1382a7ac24633a85f0cad2fe385c06f80 | 1,386 | py | Python | api/progress.py | astutespruce/secas-blueprint | 5f742290167df75b871a5043716541b9922f104c | [
"MIT"
] | null | null | null | api/progress.py | astutespruce/secas-blueprint | 5f742290167df75b871a5043716541b9922f104c | [
"MIT"
] | null | null | null | api/progress.py | astutespruce/secas-blueprint | 5f742290167df75b871a5043716541b9922f104c | [
"MIT"
] | null | null | null | import arq
from api.settings import REDIS, JOB_TIMEOUT
JOB_PREFIX = "arq:job-progress:"
EXPIRATION = JOB_TIMEOUT + 3600
async def set_progress(job_id, progress=0, message="", errors=None):
"""Store job progress to redis, and expire after EXPIRATION seconds.
Parameters
----------
job_id : str
progress : int, optional (default 0)
message : str (optional, default '')
short status message, if any
errors : list-like (optional, default None)
list of short error message, if any
"""
error_str = ",".join(errors) if errors else ""
redis = await arq.create_pool(REDIS)
await redis.setex(
f"{JOB_PREFIX}{job_id}", EXPIRATION, f"{progress}|{message}|{error_str}"
)
redis.close()
await redis.wait_closed()
async def get_progress(job_id):
"""Get job progress from redis, or None if the job_id is not found.
Parameters
----------
job_id : str
Returns
-------
(int, str, list)
tuple of progress percent, message, errors
"""
redis = await arq.create_pool(REDIS)
progress = await redis.get(f"{JOB_PREFIX}{job_id}")
redis.close()
await redis.wait_closed()
if progress is None:
return 0, "", []
progress, message, errors = progress.split("|")
errors = errors.split(",") if errors else []
return int(progress), message, errors
| 24.315789 | 80 | 0.631313 |
acf438644ec773b7be5ddab33875a6d19e386800 | 2,909 | py | Python | src/datapane/client/api/__init__.py | philopon/datapane | d7d69865d4def0cbe6eb334acd9edeb829dd67e6 | [
"Apache-2.0"
] | null | null | null | src/datapane/client/api/__init__.py | philopon/datapane | d7d69865d4def0cbe6eb334acd9edeb829dd67e6 | [
"Apache-2.0"
] | null | null | null | src/datapane/client/api/__init__.py | philopon/datapane | d7d69865d4def0cbe6eb334acd9edeb829dd67e6 | [
"Apache-2.0"
] | null | null | null | """# API docs for Datapane Client
These docs describe the Python API for building Datapane Reports, along with additional information on the Datapane Teams API.
Usage docs for Datapane can be found at https://docs.datapane.com
These objects are all available under the `datapane` module, via `import datapane as dp` (they are re-exported from `datapane.client.api`).
### Datapane Reports API
The core reporting APIs are available on both the public and teams plans, these are found in `datapane.client.api.report`, including,
- `datapane.client.api.report.core.Report`
- Layout Blocks
- `datapane.client.api.report.blocks.Page`
- `datapane.client.api.report.blocks.Group`
- `datapane.client.api.report.blocks.Select`
- Data Blocks
- `datapane.client.api.report.blocks.Plot`
- `datapane.client.api.report.blocks.Table`
- `datapane.client.api.report.blocks.DataTable`
- `datapane.client.api.report.blocks.File`
- `datapane.client.api.report.blocks.BigNumber`
- `datapane.client.api.report.blocks.Text`
- `datapane.client.api.report.blocks.Code`
- `datapane.client.api.report.blocks.HTML`
### Datapane Teams
Additional API docs for teams and enterprise features are found in `datapane.client.api.teams` that provide automation and sharing of data analytics workflows
- `datapane.client.api.teams.Blob`
- `datapane.client.api.teams.Variable`
- `datapane.client.api.teams.Script`
- `datapane.client.api.teams.Schedule`
.. note:: These docs describe the latest version of the datapane API available on [pypi](https://pypi.org/project/datapane/)
<a href="https://pypi.org/project/datapane/">
<img src="https://img.shields.io/pypi/v/datapane?color=blue" alt="Latest release" />
</a>
"""
# flake8: noqa F401
# Internal API re-exports
import warnings
from .common import HTTPError, Resource
from .dp_object import DPObjectRef
from .teams import Blob, Run, Schedule, Script, Variable
from .report.blocks import (
BigNumber,
Code,
Group,
DataTable,
Embed,
File,
HTML,
Page,
Plot,
Select,
SelectType,
Text,
Table,
)
from .report.core import Report, ReportType, Visibility
from .runtime import Params, Result, by_datapane, on_datapane, _reset_runtime, _report
from .user import login, logout, ping
from ..utils import IncompatibleVersionError
from ..config import init
from . import templates
################################################################################
# deprecations
# TODO - remove deprecation
class Markdown(Text):
def __init__(self, *a, **kw):
warnings.warn("Deprecated, to be removed in next release, use dp.Text instead.")
super().__init__(*a, **kw)
class Blocks(Group):
def __init__(self, *a, **kw):
warnings.warn("Deprecated, to be removed in next release, use dp.Group instead.")
super().__init__(*a, **kw)
| 32.685393 | 158 | 0.697834 |
acf439b752df89299225bb40e7e04649822d4864 | 4,584 | py | Python | test/functional/mempool_reorg.py | VsyncCrypto/VSX-3.9.5 | 9a30f997bf48bf3d95451930a75619b5da1fef62 | [
"MIT"
] | 29 | 2017-09-11T17:50:29.000Z | 2021-06-30T18:04:49.000Z | test/functional/mempool_reorg.py | VsyncCrypto/VSX-3.9.5 | 9a30f997bf48bf3d95451930a75619b5da1fef62 | [
"MIT"
] | 1 | 2020-09-29T23:18:28.000Z | 2020-09-29T23:18:28.000Z | test/functional/mempool_reorg.py | VsyncCrypto/VSX-3.9.5 | 9a30f997bf48bf3d95451930a75619b5da1fef62 | [
"MIT"
] | 10 | 2017-09-12T19:54:26.000Z | 2019-11-22T02:14:22.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool re-org scenarios.
Test re-org scenarios with a mempool that contains transactions
that spend (directly or indirectly) coinbase transactions.
"""
from test_framework.test_framework import VsyncTestFramework
from test_framework.util import *
import time
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(VsyncTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-checkmempool"]] * 2
def run_test(self):
# Start with a 200 block chain
assert_equal(self.nodes[0].getblockcount(), 200)
# Mine four blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = create_tx(self.nodes[0], coinbase_txids[1], node1_address, 249.99)
spend_102_raw = create_tx(self.nodes[0], coinbase_txids[2], node0_address, 249.99)
spend_103_raw = create_tx(self.nodes[0], coinbase_txids[3], node0_address, 249.99)
# Create a transaction which is time-locked to two blocks in the future
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 249.99})
# Set the time lock
timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
# This will raise an exception because the timelock transaction is too immature to spend
assert_raises_rpc_error(-26, "non-final", self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
# Time-locked transaction is still too immature to spend
assert_raises_rpc_error(-26,'non-final', self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = create_tx(self.nodes[0], spend_102_id, node1_address, 249.98)
spend_103_1_raw = create_tx(self.nodes[0], spend_103_id, node1_address, 249.98)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
# Time-locked transaction can now be spent
#timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id})
self.sync_all()
for node in self.nodes:
node.invalidateblock(last_block[0])
# Time-locked transaction is now too immature and has been removed from the mempool
# spend_103_1 has been re-orged out of the chain and is back in the mempool
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
self.sync_all()
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| 48.252632 | 123 | 0.691099 |
acf43af2b4f7198fe550937864bf6e4b5cf280a8 | 374 | py | Python | 69_my_sqrt.py | FireinDark/leetcode_practice | 6457f5343a90289707d1082211527ec5e6c4674e | [
"MIT"
] | 2 | 2019-04-28T03:06:11.000Z | 2019-04-28T03:06:14.000Z | 69_my_sqrt.py | FireinDark/leetcode_practice | 6457f5343a90289707d1082211527ec5e6c4674e | [
"MIT"
] | null | null | null | 69_my_sqrt.py | FireinDark/leetcode_practice | 6457f5343a90289707d1082211527ec5e6c4674e | [
"MIT"
] | null | null | null |
def my_sqrt(x):
left = 0
right = x
while left <= right:
mid = (left + right) // 2
if (x - mid ** 2) >= 0 and (x - (mid + 1) ** 2) < 0:
return mid
elif mid ** 2 > x:
right = mid - 1
else:
left = mid + 1
return 0
if __name__ == '__main__':
print(my_sqrt(4))
print(my_sqrt(8))
| 18.7 | 60 | 0.419786 |
acf43b0ef3314241f116cc8b7d001d1e25a694bf | 3,254 | py | Python | CMDA/svc/src/EOF/call_EOF.py | cmda-jpl/cmda_webservices | d672a5be00527aced4d53cc413665c690b6800c2 | [
"Apache-2.0"
] | null | null | null | CMDA/svc/src/EOF/call_EOF.py | cmda-jpl/cmda_webservices | d672a5be00527aced4d53cc413665c690b6800c2 | [
"Apache-2.0"
] | null | null | null | CMDA/svc/src/EOF/call_EOF.py | cmda-jpl/cmda_webservices | d672a5be00527aced4d53cc413665c690b6800c2 | [
"Apache-2.0"
] | 1 | 2021-09-03T15:56:44.000Z | 2021-09-03T15:56:44.000Z | # call_EOF.py
import string
import subprocess
import os
from os.path import basename
class call_EOF:
def __init__(self, pFile):
self.pFile = pFile
def display(self):
### print 'current dir: ', os.getcwd()
# inputs: model name, variable name, start-year-mon, end-year-mon, 'start lon, end lon', 'start lat, end lat', 'mon list'
# example: ./octaveWrapper ukmo_hadgem2-a ts 199001 199512 '0,100' '-29,29' '4,5,6,10,12'
#'%g'%self.lon1 + ',' + '%g'%self.lon2 + ' ' + '%g'%self.lat1 + ',' + '%g'%self.lat2 + ' ' + \
'''
inputs = str(self.nVar)
for iVar in range(self.nVar):
inputs = inputs + ' ' + self.models[iVar] + ' ' + self.vars1[iVar] + ' ' + self.pres1[iVar]
inputs = inputs + ' ' + self.months + ' ' + \
self.lon1 + ',' + self.lon2 + ' ' + self.lat1 + ',' + self.lat2 + ' ' + \
self.output_dir
print 'inputs: ', inputs
command = './wrapper ' + inputs
cmd = command.split(' ')
cmdstring = string.join(cmd, ' ')
print 'cmdstring: ', cmdstring
'''
#print 'self.pFile: ', self.pFile
import os
#print "os.path.isfile('./wrapper'): ", os.path.isfile('./wrapper')
command = './wrapper ' + self.pFile
print command
cmd = command.split(' ')
cmdstring = string.join(cmd, ' ')
#print 'cmd: ', cmd
#if 1:
try:
proc=subprocess.Popen(cmd, cwd='.', stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
# wait for the process to finish
stdout_value, stderr_value = proc.communicate()
print 'stdout_value: ', stdout_value
print 'stderr_value: ', stderr_value
if stderr_value.find('error:') >= 0:
return (stderr_value, '')
fst = 'figFile: '
l1 = len(fst)
### print 'l1: ', l1
image_filename = ''
fst2 = 'dataFile: '
l2 = len(fst2)
### print 'l2: ', l2
data_filename = ''
lines = stdout_value.split('\n')
for line in lines:
### print '*****: ', line
if line.find('figFile: ') >= 0:
print '***** line: ', line
image_filename = line[l1:]
if line.find('dataFile: ') >= 0:
print '***** line: ', line
data_filename = line[l2:]
image_filename = os.path.basename(image_filename)
print 'image_filename: ', image_filename
data_filename = os.path.basename(data_filename)
print 'data_filename: ', data_filename
return (stdout_value, image_filename, data_filename)
#if 0:
except OSError, e:
err_mesg = 'The subprocess "%s" returns with an error: %s.' % (cmdstring, e)
return (err_mesg, '', '')
if __name__ == '__main__':
c1 = call_randomForest(\
'3',
'ukmo_hadgem2-a', 'ts', '200',
'ukmo_hadgem2-a', 'clt', '200',
'ukmo_hadgem2-a', 'clt', '200',
'0', '100', '-29', '29', \
'/home/svc/cmac/trunk/services/svc/svc/static/randomForest')
mesg = c1.display()
print 'mesg: ', mesg
| 33.204082 | 129 | 0.515673 |
acf43b0fb0d3d6e309fccddb158fe6e306417df0 | 5,516 | py | Python | xrspatial/terrain.py | SapirLastimoza-Dooley/xarray-spatial | e6f789d5a7e2b5911429a57bc9059e9c1aa5dbfc | [
"MIT"
] | 1 | 2021-02-01T18:03:50.000Z | 2021-02-01T18:03:50.000Z | xrspatial/terrain.py | SapirLastimoza-Dooley/xarray-spatial | e6f789d5a7e2b5911429a57bc9059e9c1aa5dbfc | [
"MIT"
] | null | null | null | xrspatial/terrain.py | SapirLastimoza-Dooley/xarray-spatial | e6f789d5a7e2b5911429a57bc9059e9c1aa5dbfc | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import datashader as ds
from typing import Optional
import xarray as xr
from xarray import DataArray
from .perlin import _perlin
# TODO: add optional name parameter `name='terrain'`
def generate_terrain(x_range: tuple = (0, 500),
y_range: tuple = (0, 500),
width: int = 25,
height: int = 30,
canvas: ds.Canvas = None,
seed: int = 10,
zfactor: int = 4000,
full_extent: Optional[str] = None) -> xr.DataArray:
"""
Generates a pseudo-random terrain which can be helpful
for testing raster functions
Parameters:
----------
x_range: tuple (default = (0, 500))
Range of x values.
x_range: tuple (default = (0, 500))
Range of y values.
width: int (default = 25)
Width of output data array in pixels.
height: int (default = 30)
Height of output data array in pixels.
canvas: ds.Canvas (default = None)
Instance for passing output dimensions / ranges
seed: int (default = 10)
Seed for random number generator.
zfactor: int (default = 4000)
Multipler for z values.
full_extent: str, optional (default = None)
bbox<xmin, ymin, xmax, ymax>. Full extent of coordinate system.
Returns:
----------
terrain: xarray.DataArray
2D array of generated terrain.
Notes:
----------
Algorithm References:
- This was inspired by Michael McHugh's 2016 PyCon Canada talk:
https://www.youtube.com/watch?v=O33YV4ooHSo
- https://www.redblobgames.com/maps/terrain-from-noise/
Examples:
----------
>>> # Imports
>>> import datashader as ds
>>> from datashader.transfer_functions import shade
>>> from xrspatial import generate_terrain
>>> # Create Canvas
>>> cvs = ds.Canvas(plot_width=800,
>>> plot_height=600,
>>> x_range=(-20e6, 20e6),
>>> y_range=(-20e6, 20e6))
>>> # Generate Terrain Data Array
>>> terrain = generate_terrain(canvas = cvs)
>>> print(terrain)
<xarray.DataArray 'terrain' (y: 600, x: 800)>
array([[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
...,
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.]])
Coordinates:
* x (x) float64 -1.998e+07 -1.992e+07 ... 1.992e+07 1.997e+07
* y (y) float64 -1.997e+07 -1.99e+07 -1.983e+07 ... 1.99e+07 1.997e+07
Attributes:
res: 1
"""
def _gen_heights(bumps):
out = np.zeros(len(bumps))
for i, b in enumerate(bumps):
x = b[0]
y = b[1]
val = agg.data[y, x]
if val >= 0.33 and val <= 3:
out[i] = 0.1
return out
def _scale(value, old_range, new_range):
d = (value - old_range[0]) / (old_range[1] - old_range[0])
return d * (new_range[1] - new_range[0]) + new_range[0]
mercator_extent = (-np.pi * 6378137, -np.pi * 6378137,
np.pi * 6378137, np.pi * 6378137)
crs_extents = {'3857': mercator_extent}
if isinstance(full_extent, str):
full_extent = crs_extents[full_extent]
elif full_extent is None:
full_extent = (canvas.x_range[0], canvas.y_range[0],
canvas.x_range[1], canvas.y_range[1])
elif not isinstance(full_extent, (list, tuple)) and len(full_extent) != 4:
raise TypeError('full_extent must be tuple(4) or str wkid')
full_xrange = (full_extent[0], full_extent[2])
full_yrange = (full_extent[1], full_extent[3])
x_range_scaled = (_scale(canvas.x_range[0], full_xrange, (0.0, 1.0)),
_scale(canvas.x_range[1], full_xrange, (0.0, 1.0)))
y_range_scaled = (_scale(canvas.y_range[0], full_yrange, (0.0, 1.0)),
_scale(canvas.y_range[1], full_yrange, (0.0, 1.0)))
data = _gen_terrain(canvas.plot_width, canvas.plot_height, seed,
x_range=x_range_scaled, y_range=y_range_scaled)
data = (data - np.min(data))/np.ptp(data)
data[data < 0.3] = 0 # create water
data *= zfactor
# DataArray coords were coming back different from cvs.points...
hack_agg = canvas.points(pd.DataFrame({'x': [], 'y': []}), 'x', 'y')
agg = DataArray(data,
name='terrain',
coords=hack_agg.coords,
dims=hack_agg.dims,
attrs={'res': 1})
return agg
def _gen_terrain(width, height, seed, x_range=None, y_range=None):
if not x_range:
x_range = (0, 1)
if not y_range:
y_range = (0, 1)
# multiplier, (xfreq, yfreq)
NOISE_LAYERS = ((1 / 2**i, (2**i, 2**i)) for i in range(16))
linx = np.linspace(x_range[0], x_range[1], width, endpoint=False)
liny = np.linspace(y_range[0], y_range[1], height, endpoint=False)
x, y = np.meshgrid(linx, liny)
height_map = None
for i, (m, (xfreq, yfreq)) in enumerate(NOISE_LAYERS):
noise = _perlin(x * xfreq, y * yfreq, seed=seed + i) * m
if height_map is None:
height_map = noise
else:
height_map += noise
height_map /= (1.00 + 0.50 + 0.25 + 0.13 + 0.06 + 0.03)
height_map = height_map ** 3
return height_map
| 33.02994 | 78 | 0.548405 |
acf43b346a9945a44b9c72898b7693c539b1ed19 | 5,751 | py | Python | tests/test_providers.py | rheinwerk-verlag/pganonymize | de897b821b3593e3183b7a3799a0d06f095721fc | [
"MIT"
] | 1 | 2022-03-25T14:35:35.000Z | 2022-03-25T14:35:35.000Z | tests/test_providers.py | rheinwerk-verlag/pganonymize | de897b821b3593e3183b7a3799a0d06f095721fc | [
"MIT"
] | 3 | 2021-12-13T16:00:01.000Z | 2022-03-30T09:37:54.000Z | tests/test_providers.py | rheinwerk-verlag/pganonymize | de897b821b3593e3183b7a3799a0d06f095721fc | [
"MIT"
] | null | null | null | import operator
import uuid
from collections import OrderedDict
import pytest
import six
from mock import MagicMock, Mock, patch
from pganonymize import exceptions, providers
def test_register():
registry = providers.ProviderRegistry()
@providers.register('foo', registry=registry)
class FooProvider(providers.Provider):
def alter_value(self, value):
return 'foo'
@providers.register('bar', registry=registry)
class BarProvider(providers.Provider):
def alter_value(self, value):
return 'bar'
assert len(registry._registry) == 2
assert 'foo' in registry._registry
assert 'bar' in registry._registry
class TestProviderRegistry:
def test_constructor(self):
registry = providers.ProviderRegistry()
assert registry._registry == {}
@pytest.mark.parametrize('classes, expected', [
(
OrderedDict([
('foo', Mock(spec=providers.Provider)),
]),
['foo']
),
(
OrderedDict([
('foo', Mock(spec=providers.Provider)),
('bar', Mock(spec=providers.Provider)),
]),
['foo', 'bar']
)
])
def test_register(self, classes, expected):
registry = providers.ProviderRegistry()
for key, cls in classes.items():
registry.register(cls, key)
assert len(registry._registry) == len(classes)
assert list(registry._registry.keys()) == expected
def test_register_raises_exception(self):
registry = providers.ProviderRegistry()
registry.register(Mock(), 'foo1')
registry.register(Mock(), 'foo2')
with pytest.raises(exceptions.ProviderAlreadyRegistered):
registry.register(Mock(), 'foo1')
registry.register(Mock(), 'foo2')
@pytest.mark.parametrize('provider_id, effect', [
('foooooo', pytest.raises(exceptions.InvalidProvider)),
('foobar', pytest.raises(exceptions.InvalidProvider)),
('barr', pytest.raises(exceptions.InvalidProvider)),
('foo', MagicMock()),
('bar', MagicMock()),
('baz', MagicMock()),
('baz.uuid', MagicMock()),
])
def test_get_provider(self, provider_id, effect):
provider = None
registry = providers.ProviderRegistry()
with patch.object(registry, '_registry', {
'foo': Mock(spec=providers.Provider),
'bar': Mock(spec=providers.Provider),
'baz.*': Mock(spec=providers.Provider, regex_match=True)
}):
with effect:
provider = registry.get_provider(provider_id)
if provider is not None:
assert isinstance(provider, providers.Provider)
def test_providers(self):
pass
class TestProvider:
def test_alter_value(self):
provider = providers.Provider()
with pytest.raises(NotImplementedError):
provider.alter_value('Foo')
class TestChoiceProvider:
def test_alter_value(self):
choices = ['Foo', 'Bar', 'Baz']
provider = providers.ChoiceProvider(values=choices)
for choice in choices:
assert provider.alter_value(choice) in choices
class TestClearProvider:
def test_alter_value(self):
provider = providers.ClearProvider()
assert provider.alter_value('Foo') is None
class TestFakeProvider:
@pytest.mark.parametrize('name, function_name', [
('fake.first_name', 'first_name'),
('fake.unique.first_name', 'unique.first_name'),
])
@patch('pganonymize.providers.fake_data')
def test_alter_value(self, mock_fake_data, name, function_name):
provider = providers.FakeProvider(name=name)
provider.alter_value('Foo')
assert operator.attrgetter(function_name)(mock_fake_data).call_count == 1
@pytest.mark.parametrize('name', [
'fake.foo_name'
])
def test_invalid_names(self, name):
provider = providers.FakeProvider(name=name)
with pytest.raises(exceptions.InvalidProviderArgument):
provider.alter_value('Foo')
class TestMaskProvider:
@pytest.mark.parametrize('value, sign, expected', [
('Foo', None, 'XXX'),
('Baaaar', '?', '??????'),
])
def test_alter_value(self, value, sign, expected):
provider = providers.MaskProvider(sign=sign)
assert provider.alter_value(value) == expected
class TestMD5Provider:
def test_alter_value(self):
provider = providers.MD5Provider()
value = provider.alter_value('foo')
assert isinstance(value, six.string_types)
assert len(value) == 32
def test_as_number(self):
provider = providers.MD5Provider(as_number=True)
value = provider.alter_value('foo')
assert isinstance(value, six.integer_types)
assert value == 985560
provider = providers.MD5Provider(as_number=True, as_number_length=8)
value = provider.alter_value('foobarbazadasd')
assert isinstance(value, six.integer_types)
assert value == 45684001
class TestSetProvider:
@pytest.mark.parametrize('kwargs, expected', [
({'value': None}, None),
({'value': 'Bar'}, 'Bar')
])
def test_alter_value(self, kwargs, expected):
provider = providers.SetProvider(**kwargs)
assert provider.alter_value('Foo') == expected
class TestUUID4Provider:
@pytest.mark.parametrize('kwargs, expected', [
({'value': None}, None),
({'value': 'Bar'}, 'Bar')
])
def test_alter_value(self, kwargs, expected):
provider = providers.UUID4Provider(**kwargs)
assert type(provider.alter_value('Foo')) == uuid.UUID
| 30.428571 | 81 | 0.630151 |
acf43d76551866f0e48117022fbbf5aa1876db6f | 45 | py | Python | cde/__init__.py | jalanb/kd | 67a45a16e8e6b689a0120a28ad968bfaa81ac209 | [
"MIT"
] | 4 | 2019-12-11T15:01:26.000Z | 2022-01-18T19:00:49.000Z | cde/__init__.py | jalanb/kd | 67a45a16e8e6b689a0120a28ad968bfaa81ac209 | [
"MIT"
] | 4 | 2019-11-03T23:28:19.000Z | 2021-03-23T03:58:13.000Z | cde/__init__.py | jalanb/cde | 67a45a16e8e6b689a0120a28ad968bfaa81ac209 | [
"MIT"
] | 1 | 2019-09-27T02:00:53.000Z | 2019-09-27T02:00:53.000Z | """cde extends cd"""
__version__ = "0.7.34"
| 11.25 | 22 | 0.6 |
acf43e1ec681d06a23822208aec7b2abc1f416f0 | 253 | py | Python | #exercise2- webscrapping.py | regsevillasibal/FTW3_Webscrapping | ed64cd866858e612e3a99f9144e6967c687e057f | [
"Apache-2.0"
] | null | null | null | #exercise2- webscrapping.py | regsevillasibal/FTW3_Webscrapping | ed64cd866858e612e3a99f9144e6967c687e057f | [
"Apache-2.0"
] | null | null | null | #exercise2- webscrapping.py | regsevillasibal/FTW3_Webscrapping | ed64cd866858e612e3a99f9144e6967c687e057f | [
"Apache-2.0"
] | null | null | null | #exercise2.py webscrapping - opening file
from selenium import webdriver
import os
dirpath = os.getcwd
filepath = dirpath + '/chromedriver'
print('Path to Driver: ' + filepath)
browser = webdriver.Chrome(executable_path = filepath)
# browser.get('... | 23 | 54 | 0.754941 |
acf43e4128b2952e2634b6eec6bed1f34d7904a0 | 222 | py | Python | keywords/try_except_finally.py | janbodnar/Python-Course | 51705ab5a2adef52bcdb99a800e94c0d67144a38 | [
"BSD-2-Clause"
] | 13 | 2017-08-22T12:26:07.000Z | 2021-07-29T16:13:50.000Z | keywords/try_except_finally.py | janbodnar/Python-Course | 51705ab5a2adef52bcdb99a800e94c0d67144a38 | [
"BSD-2-Clause"
] | 1 | 2021-02-08T10:24:33.000Z | 2021-02-08T10:24:33.000Z | keywords/try_except_finally.py | janbodnar/Python-Course | 51705ab5a2adef52bcdb99a800e94c0d67144a38 | [
"BSD-2-Clause"
] | 17 | 2018-08-13T11:10:33.000Z | 2021-07-29T16:14:02.000Z | #!/usr/bin/python
# try_except_finally.py
f = None
try:
f = open('films', 'r')
for i in f:
print (i, end="")
except IOError:
print ("Error reading file")
finally:
if f:
f.close()
| 10.090909 | 31 | 0.527027 |
acf43e79b5e38579176f575133604f4212caee93 | 270 | py | Python | genrl/deep/agents/__init__.py | infinitemugen/genrl | 602587417ce167380c90a726764a3efa4643dc38 | [
"MIT"
] | null | null | null | genrl/deep/agents/__init__.py | infinitemugen/genrl | 602587417ce167380c90a726764a3efa4643dc38 | [
"MIT"
] | null | null | null | genrl/deep/agents/__init__.py | infinitemugen/genrl | 602587417ce167380c90a726764a3efa4643dc38 | [
"MIT"
] | null | null | null | from genrl.deep.agents.ddpg import DDPG
from genrl.deep.agents.dqn import DQN
from genrl.deep.agents.ppo1 import PPO1
from genrl.deep.agents.sac import SAC
from genrl.deep.agents.td3 import TD3
from genrl.deep.agents.vpg import VPG
from genrl.deep.agents.a2c import A2C
| 33.75 | 39 | 0.818519 |
acf43f3b9f7d6c6215452b7f8e0ad0d7bc841074 | 27,169 | py | Python | selfdrive/controls/controlsd.py | menwenliang/dragonpilot | 9d45ebd444415b8a8f5fc6090ea6c9f39ee4f58c | [
"MIT"
] | null | null | null | selfdrive/controls/controlsd.py | menwenliang/dragonpilot | 9d45ebd444415b8a8f5fc6090ea6c9f39ee4f58c | [
"MIT"
] | null | null | null | selfdrive/controls/controlsd.py | menwenliang/dragonpilot | 9d45ebd444415b8a8f5fc6090ea6c9f39ee4f58c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import math
from cereal import car, log
from common.numpy_fast import clip
from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import cereal.messaging as messaging
from selfdrive.config import Conversions as CV
from selfdrive.swaglog import cloudlog
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can
from selfdrive.controls.lib.lane_planner import CAMERA_OFFSET
from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise
from selfdrive.controls.lib.longcontrol import LongControl, STARTING_TARGET_SPEED
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.latcontrol_angle import LatControlAngle
from selfdrive.controls.lib.events import Events, ET
from selfdrive.controls.lib.alertmanager import AlertManager
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.controls.lib.longitudinal_planner import LON_MPC_STEP
from selfdrive.locationd.calibrationd import Calibration
from selfdrive.hardware import HARDWARE, TICI
LDW_MIN_SPEED = 31 * CV.MPH_TO_MS
LANE_DEPARTURE_THRESHOLD = 0.1
STEER_ANGLE_SATURATION_TIMEOUT = 1.0 / DT_CTRL
STEER_ANGLE_SATURATION_THRESHOLD = 2.5 # Degrees
SIMULATION = "SIMULATION" in os.environ
NOSENSOR = "NOSENSOR" in os.environ
IGNORE_PROCESSES = set(["rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned", "logcatd", "proclogd", "clocksd", "updated", "timezoned", "manage_athenad", "dragonConf"])
ThermalStatus = log.DeviceState.ThermalStatus
State = log.ControlsState.OpenpilotState
PandaType = log.PandaState.PandaType
Desire = log.LateralPlan.Desire
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
EventName = car.CarEvent.EventName
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None):
params = Params()
self.dp_jetson = params.get('dp_jetson') == b'1'
config_realtime_process(4 if TICI else 3, Priority.CTRL_HIGH)
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.sm = sm
if self.sm is None:
ignore = ['driverCameraState', 'managerState'] if (self.dp_jetson or SIMULATION) else None
self.sm = messaging.SubMaster(['deviceState', 'pandaState', 'modelV2', 'liveCalibration',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'roadCameraState', 'driverCameraState', 'managerState', 'liveParameters', 'radarState', 'dragonConf'],
ignore_alive=ignore, ignore_avg_freq=['radarState', 'longitudinalPlan'])
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
# wait for one pandaState and one CAN packet
self.hw_type = messaging.recv_one(self.sm.sock['pandaState']).pandaState.pandaType
has_relay = self.hw_type in [PandaType.blackPanda, PandaType.uno, PandaType.dos]
print("Waiting for CAN messages...")
get_one_can(self.can_sock)
self.CI, self.CP = get_car(self.can_sock, self.pm.sock['sendcan'], has_relay)
# read params
self.is_metric = params.get_bool("IsMetric")
self.is_ldw_enabled = params.get_bool("IsLdwEnabled")
self.enable_lte_onroad = params.get_bool("EnableLteOnroad")
community_feature_toggle = params.get_bool("CommunityFeaturesToggle")
openpilot_enabled_toggle = params.get_bool("OpenpilotEnabledToggle")
passive = params.get_bool("Passive") or not openpilot_enabled_toggle
# detect sound card presence and ensure successful init
sounds_available = HARDWARE.get_sound_card_online()
car_recognized = self.CP.carName != 'mock'
fuzzy_fingerprint = self.CP.fuzzyFingerprint
# If stock camera is disconnected, we loaded car controls and it's not dashcam mode
controller_available = self.CP.enableCamera and self.CI.CC is not None and not passive and not self.CP.dashcamOnly
community_feature = self.CP.communityFeature or fuzzy_fingerprint
community_feature_disallowed = community_feature and (not community_feature_toggle)
self.read_only = not car_recognized or not controller_available or \
self.CP.dashcamOnly or community_feature_disallowed
if self.read_only:
self.CP.safetyModel = car.CarParams.SafetyModel.noOutput
# Write CarParams for radard
cp_bytes = self.CP.to_bytes()
params.put("CarParams", cp_bytes)
put_nonblocking("CarParamsCache", cp_bytes)
self.CC = car.CarControl.new_message()
self.AM = AlertManager()
self.events = Events()
self.LoC = LongControl(self.CP, self.CI.compute_gb)
self.VM = VehicleModel(self.CP)
if params.get('dp_lqr') == b'1':
self.LaC = LatControlLQR(self.CP)
elif self.CP.steerControlType == car.CarParams.SteerControlType.angle:
self.LaC = LatControlAngle(self.CP)
elif self.CP.lateralTuning.which() == 'pid':
self.LaC = LatControlPID(self.CP)
elif self.CP.lateralTuning.which() == 'indi':
self.LaC = LatControlINDI(self.CP)
elif self.CP.lateralTuning.which() == 'lqr':
self.LaC = LatControlLQR(self.CP)
self.initialized = False
self.state = State.disabled
self.enabled = False
self.active = False
self.can_rcv_error = False
self.soft_disable_timer = 0
self.v_cruise_kph = 255
self.v_cruise_kph_last = 0
self.mismatch_counter = 0
self.can_error_counter = 0
self.last_blinker_frame = 0
self.saturated_count = 0
self.distance_traveled = 0
self.last_functional_fan_frame = 0
self.events_prev = []
self.current_alert_types = [ET.PERMANENT]
self.logged_comm_issue = False
# TODO: no longer necessary, aside from process replay
self.sm['liveParameters'].valid = True
self.startup_event = get_startup_event(car_recognized, controller_available, fuzzy_fingerprint)
# if not sounds_available:
# self.events.add(EventName.soundsUnavailable, static=True)
if community_feature_disallowed:
self.events.add(EventName.communityFeatureDisallowed, static=True)
if not car_recognized:
self.events.add(EventName.carUnrecognized, static=True)
elif self.read_only:
self.events.add(EventName.dashcamMode, static=True)
# controlsd is driven by can recv, expected at 100Hz
self.rk = Ratekeeper(100, print_delay_threshold=None)
self.prof = Profiler(False) # off by default
# dp
self.sm['dragonConf'].dpAtl = False
def update_events(self, CS):
"""Compute carEvents from carState"""
self.events.clear()
self.events.add_from_msg(CS.events)
self.events.add_from_msg(self.sm['driverMonitoringState'].events)
# Handle startup event
if self.startup_event is not None:
self.events.add(self.startup_event)
self.startup_event = None
# Don't add any more events if not initialized
if not self.initialized:
self.events.add(EventName.controlsInitializing)
return
# Create events for battery, temperature, disk space, and memory
# if self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError:
# # at zero percent battery, while discharging, OP should not allowed
# self.events.add(EventName.lowBattery)
if self.sm['deviceState'].thermalStatus >= ThermalStatus.red:
self.events.add(EventName.overheat)
if self.sm['deviceState'].freeSpacePercent < 7:
# under 7% of space free no enable allowed
self.events.add(EventName.outOfSpace)
if self.sm['deviceState'].memoryUsagePercent > 90:
self.events.add(EventName.lowMemory)
# Alert if fan isn't spinning for 5 seconds
# if self.sm['pandaState'].pandaType in [PandaType.uno, PandaType.dos]:
# if self.sm['pandaState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50:
# if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0:
# self.events.add(EventName.fanMalfunction)
# else:
# self.last_functional_fan_frame = self.sm.frame
# Handle calibration status
cal_status = self.sm['liveCalibration'].calStatus
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
self.events.add(EventName.calibrationIncomplete)
else:
self.events.add(EventName.calibrationInvalid)
# Handle lane change
if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange:
direction = self.sm['lateralPlan'].laneChangeDirection
if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \
(CS.rightBlindspot and direction == LaneChangeDirection.right):
self.events.add(EventName.laneChangeBlocked)
else:
if direction == LaneChangeDirection.left:
self.events.add(EventName.preLaneChangeLeftALC if self.sm['lateralPlan'].dpALCAllowed else EventName.preLaneChangeLeft)
else:
self.events.add(EventName.preLaneChangeRightALC if self.sm['lateralPlan'].dpALCAllowed else EventName.preLaneChangeRight)
elif self.sm['lateralPlan'].laneChangeState in [LaneChangeState.laneChangeStarting,
LaneChangeState.laneChangeFinishing]:
self.events.add(EventName.laneChange)
if self.can_rcv_error or not CS.canValid:
self.events.add(EventName.pcmDisable if self.sm['dragonConf'].dpAtl else EventName.canError)
safety_mismatch = self.sm['pandaState'].safetyModel != self.CP.safetyModel or self.sm['pandaState'].safetyParam != self.CP.safetyParam
if safety_mismatch or self.mismatch_counter >= 200:
self.events.add(EventName.controlsMismatch)
if not self.sm['liveParameters'].valid:
self.events.add(EventName.vehicleModelInvalid)
if len(self.sm['radarState'].radarErrors):
self.events.add(EventName.radarFault)
elif not self.dp_jetson and not self.sm.all_alive_and_valid():
self.events.add(EventName.commIssue)
if not self.logged_comm_issue:
cloudlog.error(f"commIssue - valid: {self.sm.valid} - alive: {self.sm.alive}")
self.logged_comm_issue = True
else:
self.logged_comm_issue = False
if not self.sm['lateralPlan'].mpcSolutionValid:
self.events.add(EventName.steerTempUnavailableUserOverride if self.sm['dragonConf'].dpAtl else EventName.plannerError)
if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR:
if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs
self.events.add(EventName.sensorDataInvalid)
if not self.sm['liveLocationKalman'].posenetOK:
self.events.add(EventName.posenetInvalid)
if not self.sm['liveLocationKalman'].deviceStable:
self.events.add(EventName.deviceFalling)
if log.PandaState.FaultType.relayMalfunction in self.sm['pandaState'].faults:
self.events.add(EventName.relayMalfunction)
if self.sm['longitudinalPlan'].fcw:
self.events.add(EventName.fcw)
# TODO: fix simulator
if not self.dp_jetson and not SIMULATION:
# if not NOSENSOR:
# if not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000) and \
# (not TICI or self.enable_lte_onroad):
# # Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes
# self.events.add(EventName.noGps)
if not self.sm.all_alive(['roadCameraState', 'driverCameraState']):
self.events.add(EventName.cameraMalfunction)
if self.sm['modelV2'].frameDropPerc > 20:
self.events.add(EventName.modeldLagging)
# Check if all manager processes are running
not_running = set(p.name for p in self.sm['managerState'].processes if not p.running)
if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES):
self.events.add(EventName.processNotRunning)
# Only allow engagement with brake pressed when stopped behind another stopped car
if not self.sm['dragonConf'].dpAtl and CS.brakePressed and self.sm['longitudinalPlan'].vTargetFuture >= STARTING_TARGET_SPEED \
and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3:
self.events.add(EventName.noTarget)
def data_sample(self):
"""Receive data from sockets and update carState"""
# Update carState from CAN
can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True)
CS = self.CI.update(self.CC, can_strs, self.sm['dragonConf'])
self.sm.update(0)
all_valid = CS.canValid and self.sm.all_alive_and_valid()
if not self.initialized and (all_valid or self.sm.frame * DT_CTRL > 2.0):
self.initialized = True
Params().put_bool("ControlsReady", True)
# Check for CAN timeout
if not can_strs:
self.can_error_counter += 1
self.can_rcv_error = True
else:
self.can_rcv_error = False
# When the panda and controlsd do not agree on controls_allowed
# we want to disengage openpilot. However the status from the panda goes through
# another socket other than the CAN messages and one can arrive earlier than the other.
# Therefore we allow a mismatch for two samples, then we trigger the disengagement.
if not self.enabled:
self.mismatch_counter = 0
if not self.sm['dragonConf'].dpAtl and not self.sm['pandaState'].controlsAllowed and self.enabled:
self.mismatch_counter += 1
self.distance_traveled += CS.vEgo * DT_CTRL
return CS
def state_transition(self, CS):
"""Compute conditional state transitions and execute actions on state transitions"""
self.v_cruise_kph_last = self.v_cruise_kph
# if stock cruise is completely disabled, then we can use our own set speed logic
if not self.CP.enableCruise:
self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.enabled)
elif self.CP.enableCruise and CS.cruiseState.enabled:
self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
# decrease the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
self.soft_disable_timer = max(0, self.soft_disable_timer - 1)
self.current_alert_types = [ET.PERMANENT]
# ENABLED, PRE ENABLING, SOFT DISABLING
if self.state != State.disabled:
# user and immediate disable always have priority in a non-disabled state
if self.events.any(ET.USER_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.USER_DISABLE)
elif self.events.any(ET.IMMEDIATE_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.IMMEDIATE_DISABLE)
else:
# ENABLED
if self.state == State.enabled:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = 300 # 3s
self.current_alert_types.append(ET.SOFT_DISABLE)
# SOFT DISABLING
elif self.state == State.softDisabling:
if not self.events.any(ET.SOFT_DISABLE):
# no more soft disabling condition, so go back to ENABLED
self.state = State.enabled
elif self.events.any(ET.SOFT_DISABLE) and self.soft_disable_timer > 0:
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.soft_disable_timer <= 0:
self.state = State.disabled
# PRE ENABLING
elif self.state == State.preEnabled:
if not self.events.any(ET.PRE_ENABLE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.PRE_ENABLE)
# DISABLED
elif self.state == State.disabled:
if self.events.any(ET.ENABLE):
if self.events.any(ET.NO_ENTRY):
self.current_alert_types.append(ET.NO_ENTRY)
else:
if self.events.any(ET.PRE_ENABLE):
self.state = State.preEnabled
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last)
# Check if actuators are enabled
self.active = self.state == State.enabled or self.state == State.softDisabling
if self.active:
self.current_alert_types.append(ET.WARNING)
# Check if openpilot is engaged
self.enabled = self.active or self.state == State.preEnabled
def state_control(self, CS):
"""Given the state, this function returns an actuators packet"""
# Update VehicleModel
params = self.sm['liveParameters']
x = max(params.stiffnessFactor, 0.1)
sr = max(params.steerRatio, 0.1)
self.VM.update_params(x, sr)
lat_plan = self.sm['lateralPlan']
long_plan = self.sm['longitudinalPlan']
actuators = car.CarControl.Actuators.new_message()
if CS.leftBlinker or CS.rightBlinker:
self.last_blinker_frame = self.sm.frame
# State specific actions
if not self.active:
self.LaC.reset()
self.LoC.reset(v_pid=CS.vEgo)
long_plan_age = DT_CTRL * (self.sm.frame - self.sm.rcv_frame['longitudinalPlan'])
# no greater than dt mpc + dt, to prevent too high extraps
dt = min(long_plan_age, LON_MPC_STEP + DT_CTRL) + DT_CTRL
a_acc_sol = long_plan.aStart + (dt / LON_MPC_STEP) * (long_plan.aTarget - long_plan.aStart)
v_acc_sol = long_plan.vStart + dt * (a_acc_sol + long_plan.aStart) / 2.0
# Gas/Brake PID loop
actuators.gas, actuators.brake = self.LoC.update(self.active, CS, v_acc_sol, long_plan.vTargetFuture, a_acc_sol, self.CP)
# Steering PID loop and lateral MPC
actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(self.active, CS, self.CP, self.VM, params, lat_plan)
# Check for difference between desired angle and angle for angle based control
angle_control_saturated = self.CP.steerControlType == car.CarParams.SteerControlType.angle and \
abs(actuators.steeringAngleDeg - CS.steeringAngleDeg) > STEER_ANGLE_SATURATION_THRESHOLD
if angle_control_saturated and not CS.steeringPressed and self.active:
self.saturated_count += 1
else:
self.saturated_count = 0
# Send a "steering required alert" if saturation count has reached the limit
if (lac_log.saturated and not CS.steeringPressed) or \
(self.saturated_count > STEER_ANGLE_SATURATION_TIMEOUT):
if len(lat_plan.dPathPoints):
# Check if we deviated from the path
left_deviation = actuators.steer > 0 and lat_plan.dPathPoints[0] < -0.1
right_deviation = actuators.steer < 0 and lat_plan.dPathPoints[0] > 0.1
if left_deviation or right_deviation:
self.events.add(EventName.steerSaturated)
return actuators, v_acc_sol, a_acc_sol, lac_log
def publish_logs(self, CS, start_time, actuators, v_acc, a_acc, lac_log):
"""Send actuators and hud commands to the car, send controlsstate and MPC logging"""
CC = car.CarControl.new_message()
CC.enabled = self.enabled
CC.actuators = actuators
CC.cruiseControl.override = True
CC.cruiseControl.cancel = not self.CP.enableCruise or (not self.enabled and CS.cruiseState.enabled)
# Some override values for Honda
# brake discount removes a sharp nonlinearity
brake_discount = (1.0 - clip(actuators.brake * 3., 0.0, 1.0))
speed_override = max(0.0, (self.LoC.v_pid + CS.cruiseState.speedOffset) * brake_discount)
CC.cruiseControl.speedOverride = float(speed_override if self.CP.enableCruise else 0.0)
CC.cruiseControl.accelOverride = self.CI.calc_accel_override(CS.aEgo, self.sm['longitudinalPlan'].aTarget, CS.vEgo, self.sm['longitudinalPlan'].vTarget)
CC.hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS)
CC.hudControl.speedVisible = self.enabled
CC.hudControl.lanesVisible = self.enabled
CC.hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead
right_lane_visible = self.sm['lateralPlan'].rProb > 0.5
left_lane_visible = self.sm['lateralPlan'].lProb > 0.5
CC.hudControl.rightLaneVisible = bool(right_lane_visible)
CC.hudControl.leftLaneVisible = bool(left_lane_visible)
recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown
ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \
and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED
meta = self.sm['modelV2'].meta
if len(meta.desirePrediction) and ldw_allowed:
l_lane_change_prob = meta.desirePrediction[Desire.laneChangeLeft - 1]
r_lane_change_prob = meta.desirePrediction[Desire.laneChangeRight - 1]
l_lane_close = left_lane_visible and (self.sm['modelV2'].laneLines[1].y[0] > -(1.08 + CAMERA_OFFSET))
r_lane_close = right_lane_visible and (self.sm['modelV2'].laneLines[2].y[0] < (1.08 - CAMERA_OFFSET))
CC.hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close)
CC.hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close)
if CC.hudControl.rightLaneDepart or CC.hudControl.leftLaneDepart:
self.events.add(EventName.ldw)
clear_event = ET.WARNING if ET.WARNING not in self.current_alert_types else None
alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric])
self.AM.add_many(self.sm.frame, alerts, self.enabled)
self.AM.process_alerts(self.sm.frame, clear_event)
CC.hudControl.visualAlert = self.AM.visual_alert
if not self.read_only and self.initialized:
# send car controls over can
can_sends = self.CI.apply(CC)
self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
force_decel = (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \
(self.state == State.softDisabling)
# Curvature & Steering angle
params = self.sm['liveParameters']
lat_plan = self.sm['lateralPlan']
steer_angle_without_offset = math.radians(CS.steeringAngleDeg - params.angleOffsetAverageDeg)
curvature = -self.VM.calc_curvature(steer_angle_without_offset, CS.vEgo)
angle_steers_des = math.degrees(self.VM.get_steer_from_curvature(-lat_plan.curvature, CS.vEgo))
angle_steers_des += params.angleOffsetDeg
# controlsState
dat = messaging.new_message('controlsState')
dat.valid = CS.canValid
controlsState = dat.controlsState
controlsState.alertText1 = self.AM.alert_text_1
controlsState.alertText2 = self.AM.alert_text_2
controlsState.alertSize = self.AM.alert_size
controlsState.alertStatus = self.AM.alert_status
controlsState.alertBlinkingRate = self.AM.alert_rate
controlsState.alertType = self.AM.alert_type
controlsState.alertSound = self.AM.audible_alert
controlsState.canMonoTimes = list(CS.canMonoTimes)
controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan']
controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan']
controlsState.enabled = self.enabled
controlsState.active = self.active
controlsState.curvature = curvature
controlsState.angleSteers = CS.steeringAngleDeg
controlsState.steeringAngleDesiredDeg = angle_steers_des
controlsState.state = self.state
controlsState.engageable = not self.events.any(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
controlsState.vCruise = float(self.v_cruise_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.i)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
controlsState.vTargetLead = float(v_acc)
controlsState.aTarget = float(a_acc)
controlsState.cumLagMs = -self.rk.remaining * 1000.
controlsState.startMonoTime = int(start_time * 1e9)
controlsState.forceDecel = bool(force_decel)
controlsState.canErrorCounter = self.can_error_counter
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
controlsState.lateralControlState.angleState = lac_log
elif self.CP.lateralTuning.which() == 'pid':
controlsState.lateralControlState.pidState = lac_log
elif self.CP.lateralTuning.which() == 'lqr':
controlsState.lateralControlState.lqrState = lac_log
elif self.CP.lateralTuning.which() == 'indi':
controlsState.lateralControlState.indiState = lac_log
self.pm.send('controlsState', dat)
# carState
car_events = self.events.to_msg()
cs_send = messaging.new_message('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = car_events
self.pm.send('carState', cs_send)
# carEvents - logged every second or on change
if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev):
ce_send = messaging.new_message('carEvents', len(self.events))
ce_send.carEvents = car_events
self.pm.send('carEvents', ce_send)
self.events_prev = self.events.names.copy()
# carParams - logged every 50 seconds (> 1 per segment)
if (self.sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message('carParams')
cp_send.carParams = self.CP
self.pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
self.pm.send('carControl', cc_send)
# copy CarControl to pass to CarInterface on the next iteration
self.CC = CC
def step(self):
start_time = sec_since_boot()
self.prof.checkpoint("Ratekeeper", ignore=True)
# Sample data from sockets and get a carState
CS = self.data_sample()
self.prof.checkpoint("Sample")
self.update_events(CS)
if not self.read_only and self.initialized:
# Update control state
self.state_transition(CS)
self.prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
actuators, v_acc, a_acc, lac_log = self.state_control(CS)
self.prof.checkpoint("State Control")
# Publish data
self.publish_logs(CS, start_time, actuators, v_acc, a_acc, lac_log)
self.prof.checkpoint("Sent")
def controlsd_thread(self):
while True:
self.step()
self.rk.monitor_time()
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
controls.controlsd_thread()
if __name__ == "__main__":
main()
| 43.193959 | 189 | 0.719901 |
acf43fd3b2b9cfc034b8588bd59cc2abc47ac7f9 | 4,821 | py | Python | tests/test_ext_math.py | zzqcn/sphinx-doc | d7adc8efd7f71bb3f1633bf6cde19273fc95b977 | [
"BSD-2-Clause"
] | null | null | null | tests/test_ext_math.py | zzqcn/sphinx-doc | d7adc8efd7f71bb3f1633bf6cde19273fc95b977 | [
"BSD-2-Clause"
] | null | null | null | tests/test_ext_math.py | zzqcn/sphinx-doc | d7adc8efd7f71bb3f1633bf6cde19273fc95b977 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
test_ext_math
~~~~~~~~~~~~~
Test math extensions.
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import pytest
from util import SkipTest
@pytest.mark.sphinx(
'html', testroot='ext-math',
confoverrides = {'extensions': ['sphinx.ext.jsmath'], 'jsmath_path': 'dummy.js'})
def test_jsmath(app, status, warning):
app.builder.build_all()
content = (app.outdir / 'math.html').text()
assert '<div class="math">\na^2 + b^2 = c^2</div>' in content
assert '<div class="math">\n\\begin{split}a + 1 < b\\end{split}</div>' in content
assert (u'<span class="eqno">(1)<a class="headerlink" href="#equation-foo" '
u'title="Permalink to this equation">\xb6</a></span>'
u'<div class="math" id="equation-foo">\ne^{i\\pi} = 1</div>' in content)
assert (u'<span class="eqno">(2)<a class="headerlink" href="#equation-math:0" '
u'title="Permalink to this equation">\xb6</a></span>'
u'<div class="math" id="equation-math:0">\n'
u'e^{ix} = \\cos x + i\\sin x</div>' in content)
assert '<div class="math">\nn \\in \\mathbb N</div>' in content
assert '<div class="math">\na + 1 < b</div>' in content
@pytest.mark.sphinx('html', testroot='ext-math-simple',
confoverrides = {'extensions': ['sphinx.ext.imgmath']})
def test_imgmath_png(app, status, warning):
app.builder.build_all()
if "LaTeX command 'latex' cannot be run" in warning.getvalue():
raise SkipTest('LaTeX command "latex" is not available')
if "dvipng command 'dvipng' cannot be run" in warning.getvalue():
raise SkipTest('dvipng command "dvipng" is not available')
content = (app.outdir / 'index.html').text()
html = ('<div class="math">\s*<p>\s*<img src="_images/math/\w+.png"'
'\s*alt="a\^2\+b\^2=c\^2"/>\s*</p>\s*</div>')
assert re.search(html, content, re.S)
@pytest.mark.sphinx('html', testroot='ext-math-simple',
confoverrides={'extensions': ['sphinx.ext.imgmath'],
'imgmath_image_format': 'svg'})
def test_imgmath_svg(app, status, warning):
app.builder.build_all()
if "LaTeX command 'latex' cannot be run" in warning.getvalue():
raise SkipTest('LaTeX command "latex" is not available')
if "dvisvgm command 'dvisvgm' cannot be run" in warning.getvalue():
raise SkipTest('dvisvgm command "dvisvgm" is not available')
content = (app.outdir / 'index.html').text()
html = ('<div class="math">\s*<p>\s*<img src="_images/math/\w+.svg"'
'\s*alt="a\^2\+b\^2=c\^2"/>\s*</p>\s*</div>')
assert re.search(html, content, re.S)
@pytest.mark.sphinx('html', testroot='ext-math',
confoverrides={'extensions': ['sphinx.ext.mathjax']})
def test_mathjax_align(app, status, warning):
app.builder.build_all()
content = (app.outdir / 'index.html').text()
html = (r'<div class="math">\s*'
r'\\\[ \\begin\{align\}\\begin\{aligned\}S \&= \\pi r\^2\\\\'
r'V \&= \\frac\{4\}\{3\} \\pi r\^3\\end\{aligned\}\\end\{align\} \\\]</div>')
assert re.search(html, content, re.S)
@pytest.mark.sphinx('html', testroot='ext-math',
confoverrides={'math_number_all': True,
'extensions': ['sphinx.ext.mathjax']})
def test_math_number_all_mathjax(app, status, warning):
app.builder.build_all()
content = (app.outdir / 'index.html').text()
html = (r'<div class="math" id="equation-index:0">\s*'
r'<span class="eqno">\(1\)<a .*>\xb6</a></span>\\\[a\^2\+b\^2=c\^2\\\]</div>')
assert re.search(html, content, re.S)
@pytest.mark.sphinx('latex', testroot='ext-math',
confoverrides={'extensions': ['sphinx.ext.mathjax']})
def test_math_number_all_latex(app, status, warning):
app.builder.build_all()
content = (app.outdir / 'test.tex').text()
macro = (r'\\begin{equation\*}\s*'
r'\\begin{split}a\^2\+b\^2=c\^2\\end{split}\s*'
r'\\end{equation\*}')
assert re.search(macro, content, re.S)
macro = r'Inline \\\(E=mc\^2\\\)'
assert re.search(macro, content, re.S)
macro = (r'\\begin{equation\*}\s*'
r'\\begin{split}e\^{i\\pi}\+1=0\\end{split}\s+'
r'\\end{equation\*}')
assert re.search(macro, content, re.S)
macro = (r'\\begin{align\*}\\!\\begin{aligned}\s*'
r'S &= \\pi r\^2\\\\\s*'
r'V &= \\frac\{4}\{3} \\pi r\^3\\\\\s*'
r'\\end{aligned}\\end{align\*}')
assert re.search(macro, content, re.S)
macro = r'Referencing equation \\eqref{equation:math:foo}.'
assert re.search(macro, content, re.S)
| 40.175 | 93 | 0.577059 |
acf43ff11d4e3792469f75ba6b0c6ae79f034088 | 3,632 | py | Python | tensorflow_io/core/python/ops/json_dataset_ops.py | pshiko/io | a1793e6b41ed7a8db572249aba15a8e513a348a5 | [
"Apache-2.0"
] | null | null | null | tensorflow_io/core/python/ops/json_dataset_ops.py | pshiko/io | a1793e6b41ed7a8db572249aba15a8e513a348a5 | [
"Apache-2.0"
] | null | null | null | tensorflow_io/core/python/ops/json_dataset_ops.py | pshiko/io | a1793e6b41ed7a8db572249aba15a8e513a348a5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JSONDataset"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import uuid
import tensorflow as tf
from tensorflow_io.core.python.ops import core_ops
class _JSONIODatasetFunction(object):
def __init__(self, function, resource, component, shape, dtype):
self._function = function
self._resource = resource
self._component = component
self._shape = tf.TensorShape([None]).concatenate(shape[1:])
self._dtype = dtype
def __call__(self, start, stop):
return self._function(
self._resource, start=start, stop=stop,
component=self._component, shape=self._shape, dtype=self._dtype)
class JSONIODataset(tf.compat.v2.data.Dataset):
"""JSONIODataset"""
def __init__(self,
filename,
columns=None,
mode=None,
internal=True):
"""JSONIODataset."""
if not internal:
raise ValueError("JSONIODataset constructor is private; please use one "
"of the factory methods instead (e.g., "
"IODataset.from_json())")
with tf.name_scope("JSONIODataset") as scope:
capacity = 4096
metadata = [] if mode is None else ["mode: %s" % mode]
resource, columns_v = core_ops.io_json_readable_init(
filename, metadata=metadata,
container=scope,
shared_name="%s/%s" % (filename, uuid.uuid4().hex))
columns = columns if columns is not None else columns_v.numpy()
columns_dataset = []
columns_function = []
for column in columns:
shape, dtype = core_ops.io_json_readable_spec(resource, column)
shape = tf.TensorShape([None if e < 0 else e for e in shape.numpy()])
dtype = tf.as_dtype(dtype.numpy())
function = _JSONIODatasetFunction(
core_ops.io_json_readable_read, resource, column, shape, dtype)
columns_function.append(function)
for (column, function) in zip(columns, columns_function):
column_dataset = tf.compat.v2.data.Dataset.range(
0, sys.maxsize, capacity)
column_dataset = column_dataset.map(
lambda index: function(
index, index+capacity))
column_dataset = column_dataset.apply(
tf.data.experimental.take_while(
lambda v: tf.greater(tf.shape(v)[0], 0)))
columns_dataset.append(column_dataset)
if len(columns_dataset) == 1:
dataset = columns_dataset[0]
else:
dataset = tf.compat.v2.data.Dataset.zip(tuple(columns_dataset))
dataset = dataset.unbatch()
self._function = columns_function
self._dataset = dataset
super(JSONIODataset, self).__init__(
self._dataset._variant_tensor) # pylint: disable=protected-access
def _inputs(self):
return []
@property
def element_spec(self):
return self._dataset.element_spec
| 36.686869 | 80 | 0.657489 |
acf44014b83ce00bd9273e65e22c2e33207576ee | 600 | py | Python | aiomatrix/types/responses/server_capabilities.py | Forden/aiomatrix | d258076bae8eb776495b92be46ee9f4baec8d9a6 | [
"MIT"
] | 2 | 2021-10-29T18:07:08.000Z | 2021-11-19T00:25:43.000Z | aiomatrix/types/responses/server_capabilities.py | Forden/aiomatrix | d258076bae8eb776495b92be46ee9f4baec8d9a6 | [
"MIT"
] | 1 | 2022-03-06T11:17:43.000Z | 2022-03-06T11:17:43.000Z | aiomatrix/types/responses/server_capabilities.py | Forden/aiomatrix | d258076bae8eb776495b92be46ee9f4baec8d9a6 | [
"MIT"
] | null | null | null | from typing import Dict
import pydantic
from pydantic import Field
from ..misc import RoomStabilityEnum
class ChangePasswordCapability(pydantic.BaseModel):
enabled: bool
class RoomVersionsCapability(pydantic.BaseModel):
default: str
available: Dict[str, RoomStabilityEnum]
class ServerCapabilities(pydantic.BaseModel):
change_password: ChangePasswordCapability = Field(None, alias='m.change_password')
room_versions: RoomVersionsCapability = Field(None, alias='m.room_versions')
class ServerCapabilitiesResponse(pydantic.BaseModel):
capabilities: ServerCapabilities
| 24 | 86 | 0.803333 |
acf4405272cb419f7c97553c19913c847f651b27 | 304 | py | Python | wsgi/iportalen_django/events/migrations/0037_merge.py | I-sektionen/i-portalen | 1713e5814d40c0da1bf3278d60a561e7d3df3550 | [
"MIT"
] | 4 | 2016-09-21T17:06:01.000Z | 2018-02-06T16:36:44.000Z | wsgi/iportalen_django/events/migrations/0037_merge.py | I-sektionen/i-portalen | 1713e5814d40c0da1bf3278d60a561e7d3df3550 | [
"MIT"
] | 149 | 2016-03-07T23:50:47.000Z | 2022-03-11T23:16:33.000Z | wsgi/iportalen_django/events/migrations/0037_merge.py | I-sektionen/i-portalen | 1713e5814d40c0da1bf3278d60a561e7d3df3550 | [
"MIT"
] | 1 | 2016-03-07T23:02:06.000Z | 2016-03-07T23:02:06.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0036_imageattachment_otherattachment'),
('events', '0036_event_finished'),
]
operations = [
]
| 19 | 59 | 0.664474 |
acf441181dd7eb65012747f2d9349e24f2a88f70 | 4,496 | py | Python | profiles_api/views.py | Hadraniel/profiles-rest-api | 2e197359a5c711a825518410106130d89f251719 | [
"MIT"
] | null | null | null | profiles_api/views.py | Hadraniel/profiles-rest-api | 2e197359a5c711a825518410106130d89f251719 | [
"MIT"
] | 4 | 2021-03-19T08:46:28.000Z | 2022-02-10T13:52:40.000Z | profiles_api/views.py | Hadraniel/profiles-rest-api | 2e197359a5c711a825518410106130d89f251719 | [
"MIT"
] | null | null | null | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
from profiles_api import serializers
from profiles_api import models
from profiles_api import permissions
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self, request, format = None):
"""Returns a list of APIView features"""
an_apiview = [
'Uses HTTP methods as function (get, post, patch, put, delete)',
'Is similar to a traditional Django View',
'Gives you the most control over your application logic',
'Is mapped manually to URLs',
]
return Response({'message': 'Hello!', 'an_apiview': an_apiview})
def post(self, request):
"""Create a hello message with our name"""
serializer = self.serializer_class(data = request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message': message})
else:
return Response(
serializer.errors,
status = status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk = None):
"""Handle updating an object"""
return Response({'method': 'PUT'})
def patch(self, request, pk = None):
"""Handle a partial update of an object"""
return Response({'method': 'PATCH'})
def delete(self, request, pk = None):
"""Delete an object"""
return Response({'method': 'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""Test API ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Return a hello message"""
a_viewset = [
'Uses actions (list, create, retrieve, update, partial_update)',
'Automatically maps to URLs using Routers',
'Provides more functionality with less code',
]
return Response({'message': 'Hello', 'a_viewset': a_viewset})
def create(self, request):
"""Create a new hello message"""
serializer = self.serializer_class(data = request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message': message})
else:
return Response(
serializer.errors,
status = status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk = None):
"""Handle getting an object by its ID"""
return Response({'http_method': 'GET'})
def update(self, request, pk = None):
"""Handle updating an object"""
return Response({'http_method': 'PUT'})
def partial_update(self, request, pk = None):
"""Handle updating part of an object"""
return Response({'http_method': 'PATCH'})
def destroy(self, request, pk = None):
"""Handle removing an object"""
return Response({'http_method': 'DELETE'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handle creating and updating profiles"""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email',)
class UserLoginApiView(ObtainAuthToken):
"""Handle creating user authentication tokens"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handles creating, reading and updating profile feed items"""
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permission_classes = (permissions.UpdateOwnStatus, IsAuthenticated)
def perform_create(self, serializer):
"""Sets the user profile to the logged in user"""
serializer.save(user_profile = self.request.user)
| 35.125 | 76 | 0.663034 |
acf4417a9fbb4726fad13970e429c6da80c9edaf | 4,726 | py | Python | labelling_tool/fix_voc.py | abhineet123/animal_detection_ | be0dd60d2b56b267f329b7be71d7f037499f98bc | [
"CC-BY-4.0"
] | 6 | 2020-06-18T16:41:40.000Z | 2022-03-10T07:15:13.000Z | labelling_tool/fix_voc.py | abhineet123/animal_detection_ | be0dd60d2b56b267f329b7be71d7f037499f98bc | [
"CC-BY-4.0"
] | 1 | 2021-08-11T08:42:28.000Z | 2021-08-11T08:42:28.000Z | labelling_tool/fix_voc.py | abhineet123/animal_detection_ | be0dd60d2b56b267f329b7be71d7f037499f98bc | [
"CC-BY-4.0"
] | 1 | 2022-02-25T11:06:17.000Z | 2022-02-25T11:06:17.000Z | import os, sys, glob, re
from libs.pascal_voc_io import PascalVocReader
sys.path.append('..')
from tf_api.utilities import processArguments
def saveBoxesTXT(_type, voc_path, class_dict, out_dir=''):
if _type == 0:
_type_str = 'mAP'
else:
_type_str = 'yolo'
if not voc_path or not os.path.isdir(voc_path):
print('Folder containing the loaded boxes does not exist')
return None
files = glob.glob(os.path.join(voc_path, '*.xml'))
n_files = len(files)
if n_files == 0:
print('No loaded boxes found')
return None
def convert_to_yolo(size, box):
dw = 1. / size[0]
dh = 1. / size[1]
x = (box[0] + box[1]) / 2.0
y = (box[2] + box[3]) / 2.0
w = box[1] - box[0]
h = box[3] - box[2]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return x, y, w, h
def getint(fn):
basename = os.path.basename(fn)
num = re.sub("\D", "", basename)
try:
return int(num)
except:
return 0
if len(files) > 0:
files = sorted(files, key=getint)
if not out_dir:
out_dir = os.path.join(os.path.dirname(voc_path), _type_str)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
list_file = None
if _type == 1:
list_path = os.path.join(out_dir, 'list.txt')
list_file = open(list_path, 'w')
print('Loading VOC annotations from {:d} files at {:s}...'.format(n_files, voc_path))
print('Writing {} annotations to {:s}...'.format(_type_str, out_dir))
file_id = 0
n_boxes = 0
for file in files:
file_no_ext = os.path.splitext(os.path.basename(file))[0]
out_file_path = os.path.join(out_dir, '{}.txt'.format(file_no_ext))
out_file = open(out_file_path, 'w')
xml_reader = PascalVocReader(file)
shapes = xml_reader.getShapes()
img_width = xml_reader.width
img_height = xml_reader.height
for shape in shapes:
label, points, _, _, difficult, bbox_source, id_number, score, _, _ = shape
xmin, ymin = points[0]
xmax, ymax = points[2]
def clamp(x, min_value=0.0, max_value=1.0):
return max(min(x, max_value), min_value)
xmin = int(clamp(xmin, 0, img_width-1))
xmax = int(clamp(xmax, 0, img_width-1))
ymin = int(clamp(ymin, 0, img_height-1))
ymax = int(clamp(ymax, 0, img_height-1))
if _type == 0:
out_file.write('{:s} {:d} {:d} {:d} {:d}\n'.format(label, xmin, ymin, xmax, ymax))
else:
class_id = class_dict[label] + 1
bb = convert_to_yolo((xml_reader.width, xml_reader.height), [xmin, xmax, ymin, ymax])
out_file.write('{:d} {:f} {:f} {:f} {:f}\n'.format(class_id, bb[0], bb[1], bb[2], bb[3]))
if _type == 1:
list_file.write('{:s}\n'.format(xml_reader.filename))
n_boxes += 1
file_id += 1
sys.stdout.write('\rDone {:d}/{:d} files with {:d} boxes ({:d}x{:d})'.format(
file_id, n_files, n_boxes, img_width, img_height))
sys.stdout.flush()
out_file.close()
if _type == 1:
list_file.close()
sys.stdout.write('\n')
sys.stdout.flush()
return out_dir
if __name__ == '__main__':
params = {
'list_file': 'vis_list.txt',
'class_names_path': '../labelling_tool/data//predefined_classes_orig.txt',
'type': 0,
# 'file_name': 'videos/grizzly_bear_video.mp4',
'out_dir': '',
'save_dir': '',
'save_file_name': '',
'csv_file_name': '',
'map_folder': '',
'load_path': '',
'n_classes': 4,
'img_ext': 'png',
'batch_size': 1,
'show_img': 0,
'save_video': 1,
'n_frames': 0,
'codec': 'H264',
'fps': 20,
}
processArguments(sys.argv[1:], params)
list_file = params['list_file']
class_names_path = params['class_names_path']
_type = params['type']
out_dir = params['out_dir']
class_names = open(class_names_path, 'r').readlines()
class_dict = {x.strip(): i for (i, x) in enumerate(class_names)}
with open(list_file) as f:
img_paths = f.readlines()
img_paths = [x.strip() for x in img_paths]
for img_path in img_paths:
voc_path = os.path.join(img_path, 'annotations')
seq_out_dir = out_dir
if seq_out_dir:
seq_name = os.path.basename(img_path)
seq_out_dir = os.path.join(seq_out_dir, seq_name)
saveBoxesTXT(_type, voc_path, class_dict, seq_out_dir)
| 30.490323 | 105 | 0.552687 |
acf4426a6ee0c9a691b1457ad1a441b81f3414b4 | 483 | py | Python | tests/core/records/test_document_type.py | pyramidoereb/pyramid_oereb | 764c03e98e01ebc709cd17bd0ffd817bfe318892 | [
"BSD-2-Clause"
] | 2 | 2018-01-23T13:16:12.000Z | 2018-01-26T06:27:29.000Z | tests/core/records/test_document_type.py | camptocamp/pyramid_oereb | 2d33aceb796f0afada6728820fa9d4691f7e273a | [
"BSD-2-Clause"
] | 298 | 2017-08-30T07:12:10.000Z | 2019-01-31T10:52:07.000Z | tests/core/records/test_document_type.py | pyramidoereb/pyramid_oereb | 764c03e98e01ebc709cd17bd0ffd817bfe318892 | [
"BSD-2-Clause"
] | 4 | 2017-12-01T09:51:42.000Z | 2018-11-21T11:02:47.000Z | # -*- coding: utf-8 -*-
from pyramid_oereb.core.records.document_types import DocumentTypeRecord
def test_document_type_init():
record = DocumentTypeRecord(u'code', {u'de': u'Gesetzliche Grundlage'})
assert record.code == u'code'
assert record.title == {
u'de': u'Gesetzliche Grundlage'
}
def test_wrong_types():
record = DocumentTypeRecord({'de': 'titel'}, 'content')
assert isinstance(record.code, dict)
assert isinstance(record.title, str)
| 28.411765 | 75 | 0.691511 |
acf443755c54879d7c5f1fcda762fba6d1843ba0 | 3,729 | py | Python | compute_flops.py | kbui1993/NonconvexNetworkSlimming | 659b7945e932f089c11b8966d3149e59bf677496 | [
"MIT"
] | 3 | 2020-10-22T03:09:30.000Z | 2021-09-30T22:00:19.000Z | compute_flops.py | kbui1993/NonconvexNetworkSlimming | 659b7945e932f089c11b8966d3149e59bf677496 | [
"MIT"
] | null | null | null | compute_flops.py | kbui1993/NonconvexNetworkSlimming | 659b7945e932f089c11b8966d3149e59bf677496 | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
def print_model_param_flops(model=None, input_res=224, multiply_adds=True):
prods = {}
def save_hook(name):
def hook_per(self, input, output):
prods[name] = np.prod(input[0].shape)
return hook_per
list_1=[]
def simple_hook(self, input, output):
list_1.append(np.prod(input[0].shape))
list_2={}
def simple_hook2(self, input, output):
list_2['names'] = np.prod(input[0].shape)
list_conv=[]
def conv_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = (kernel_ops * (2 if multiply_adds else 1) + bias_ops) * output_channels * output_height * output_width * batch_size
list_conv.append(flops)
list_linear=[]
def linear_hook(self, input, output):
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
bias_ops = self.bias.nelement()
flops = batch_size * (weight_ops + bias_ops)
list_linear.append(flops)
list_bn=[]
def bn_hook(self, input, output):
list_bn.append(input[0].nelement() * 2)
list_relu=[]
def relu_hook(self, input, output):
list_relu.append(input[0].nelement())
list_pooling=[]
def pooling_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size * self.kernel_size
bias_ops = 0
params = 0
flops = (kernel_ops + bias_ops) * output_channels * output_height * output_width * batch_size
list_pooling.append(flops)
list_upsample=[]
# For bilinear upsample
def upsample_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
flops = output_height * output_width * output_channels * batch_size * 12
list_upsample.append(flops)
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, torch.nn.Conv2d):
net.register_forward_hook(conv_hook)
if isinstance(net, torch.nn.Linear):
net.register_forward_hook(linear_hook)
if isinstance(net, torch.nn.BatchNorm2d):
net.register_forward_hook(bn_hook)
if isinstance(net, torch.nn.ReLU):
net.register_forward_hook(relu_hook)
if isinstance(net, torch.nn.MaxPool2d) or isinstance(net, torch.nn.AvgPool2d):
net.register_forward_hook(pooling_hook)
if isinstance(net, torch.nn.Upsample):
net.register_forward_hook(upsample_hook)
return
for c in childrens:
foo(c)
if model == None:
model = torchvision.models.alexnet()
foo(model)
input = Variable(torch.rand(3,input_res,input_res).unsqueeze(0), requires_grad = True)
out = model(input)
total_flops = (sum(list_conv) + sum(list_linear) + sum(list_bn) + sum(list_relu) + sum(list_pooling) + sum(list_upsample))
print(' + Number of FLOPs: %.2fG' % (total_flops / 1e9))
return total_flops
| 35.179245 | 131 | 0.645481 |
acf44457cbf101a3c34086d6e391cb2d428dc48a | 5,341 | py | Python | homeassistant/components/canary/sensor.py | danielkucera/core | b3b9c52df21a8420cc3e8d18b6bfa8b396e156ad | [
"Apache-2.0"
] | 3 | 2017-09-16T23:34:59.000Z | 2021-12-20T11:11:27.000Z | homeassistant/components/canary/sensor.py | danielkucera/core | b3b9c52df21a8420cc3e8d18b6bfa8b396e156ad | [
"Apache-2.0"
] | 45 | 2020-10-15T06:47:06.000Z | 2022-03-31T06:26:16.000Z | homeassistant/components/canary/sensor.py | denics/home-assistant | 53bf972ed7839db564f9c5f257cfb18e00f954eb | [
"Apache-2.0"
] | 2 | 2019-08-04T13:39:43.000Z | 2020-02-07T23:01:23.000Z | """Support for Canary sensors."""
from typing import Callable, List
from canary.api import SensorType
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_SIGNAL_STRENGTH,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
TEMP_CELSIUS,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from . import CanaryData
from .const import DATA_CANARY, DOMAIN, MANUFACTURER
SENSOR_VALUE_PRECISION = 2
ATTR_AIR_QUALITY = "air_quality"
# Define variables to store the device names, as referred to by the Canary API.
# Note: If Canary change the name of any of their devices (which they have done),
# then these variables will need updating, otherwise the sensors will stop working
# and disappear in Home Assistant.
CANARY_PRO = "Canary Pro"
CANARY_FLEX = "Canary Flex"
# Sensor types are defined like so:
# sensor type name, unit_of_measurement, icon, device class, products supported
SENSOR_TYPES = [
["temperature", TEMP_CELSIUS, None, DEVICE_CLASS_TEMPERATURE, [CANARY_PRO]],
["humidity", PERCENTAGE, None, DEVICE_CLASS_HUMIDITY, [CANARY_PRO]],
["air_quality", None, "mdi:weather-windy", None, [CANARY_PRO]],
["wifi", "dBm", None, DEVICE_CLASS_SIGNAL_STRENGTH, [CANARY_FLEX]],
["battery", PERCENTAGE, None, DEVICE_CLASS_BATTERY, [CANARY_FLEX]],
]
STATE_AIR_QUALITY_NORMAL = "normal"
STATE_AIR_QUALITY_ABNORMAL = "abnormal"
STATE_AIR_QUALITY_VERY_ABNORMAL = "very_abnormal"
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up Canary sensors based on a config entry."""
data: CanaryData = hass.data[DOMAIN][entry.entry_id][DATA_CANARY]
sensors = []
for location in data.locations:
for device in location.devices:
if device.is_online:
device_type = device.device_type
for sensor_type in SENSOR_TYPES:
if device_type.get("name") in sensor_type[4]:
sensors.append(
CanarySensor(data, sensor_type, location, device)
)
async_add_entities(sensors, True)
class CanarySensor(Entity):
"""Representation of a Canary sensor."""
def __init__(self, data, sensor_type, location, device):
"""Initialize the sensor."""
self._data = data
self._sensor_type = sensor_type
self._device_id = device.device_id
self._device_name = device.name
self._device_type_name = device.device_type["name"]
self._sensor_value = None
sensor_type_name = sensor_type[0].replace("_", " ").title()
self._name = f"{location.name} {device.name} {sensor_type_name}"
@property
def name(self):
"""Return the name of the Canary sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._sensor_value
@property
def unique_id(self):
"""Return the unique ID of this sensor."""
return f"{self._device_id}_{self._sensor_type[0]}"
@property
def device_info(self):
"""Return the device_info of the device."""
return {
"identifiers": {(DOMAIN, str(self._device_id))},
"name": self._device_name,
"model": self._device_type_name,
"manufacturer": MANUFACTURER,
}
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._sensor_type[1]
@property
def device_class(self):
"""Device class for the sensor."""
return self._sensor_type[3]
@property
def icon(self):
"""Icon for the sensor."""
return self._sensor_type[2]
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._sensor_type[0] == "air_quality" and self._sensor_value is not None:
air_quality = None
if self._sensor_value <= 0.4:
air_quality = STATE_AIR_QUALITY_VERY_ABNORMAL
elif self._sensor_value <= 0.59:
air_quality = STATE_AIR_QUALITY_ABNORMAL
elif self._sensor_value <= 1.0:
air_quality = STATE_AIR_QUALITY_NORMAL
return {ATTR_AIR_QUALITY: air_quality}
return None
def update(self):
"""Get the latest state of the sensor."""
self._data.update()
canary_sensor_type = None
if self._sensor_type[0] == "air_quality":
canary_sensor_type = SensorType.AIR_QUALITY
elif self._sensor_type[0] == "temperature":
canary_sensor_type = SensorType.TEMPERATURE
elif self._sensor_type[0] == "humidity":
canary_sensor_type = SensorType.HUMIDITY
elif self._sensor_type[0] == "wifi":
canary_sensor_type = SensorType.WIFI
elif self._sensor_type[0] == "battery":
canary_sensor_type = SensorType.BATTERY
value = self._data.get_reading(self._device_id, canary_sensor_type)
if value is not None:
self._sensor_value = round(float(value), SENSOR_VALUE_PRECISION)
| 33.591195 | 84 | 0.658304 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.