hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
55b06f25c1d96b1becadf3a363fb778fd324768c | 1,179 | py | Python | VLLV/Models/__init__.py | rzumer/VideoLowLevelVision | 1c15dbe74f0b33d2e74d0a717a22ccbd67cdf269 | [
"MIT"
] | 3 | 2019-04-15T10:00:39.000Z | 2019-04-26T07:51:27.000Z | VLLV/Models/__init__.py | rzumer/VideoLowLevelVision | 1c15dbe74f0b33d2e74d0a717a22ccbd67cdf269 | [
"MIT"
] | null | null | null | VLLV/Models/__init__.py | rzumer/VideoLowLevelVision | 1c15dbe74f0b33d2e74d0a717a22ccbd67cdf269 | [
"MIT"
] | null | null | null | from . import (Srcnn, Espcn, Dcscn, DnCnn, Vdsr, Drcn, Drrn, LapSrn, MemNet,
Edsr, Idn, Rdn, Dbpn, Carn, Rcan, SrGan, Vespcn, Msrn,
SRDenseNet, SRFeat, Gan)
__all__ = ['get_model', 'list_supported_models']
alias = {
'srcnn': Srcnn.SRCNN,
'espcn': Espcn.ESPCN,
'vdsr': Vdsr.VDSR,
'drcn': Drcn.DRCN,
'dncnn': DnCnn.DnCNN,
'idn': Idn.InformationDistillationNetwork,
'rdn': Rdn.ResidualDenseNetwork,
'dcscn': Dcscn.DCSCN,
'lapsrn': LapSrn.LapSRN,
'drrn': Drrn.DRRN,
'memnet': MemNet.MEMNET,
'dbpn': Dbpn.DBPN,
'edsr': Edsr.EDSR,
'srgan': SrGan.SRGAN,
'carn': Carn.CARN,
'rcan': Rcan.RCAN,
'msrn': Msrn.MSRN,
'vespcn': Vespcn.VESPCN,
'srdensenet': SRDenseNet.SRDenseNet,
'srfeat': SRFeat.SRFEAT,
'sgan': Gan.SGAN,
'gangp': Gan.SGANGP,
'lsgan': Gan.LSGAN,
'wgan': Gan.WGAN,
'wgangp': Gan.WGANGP,
'rgan': Gan.RGAN,
'rgangp': Gan.RGANGP,
'ragan': Gan.RaGAN,
'ragangp': Gan.RaGANGP,
'rlsgan': Gan.RLSGAN,
'ralsgan': Gan.RaLSGAN,
}
def get_model(name):
return alias[name]
def list_supported_models():
return alias.keys()
| 24.5625 | 76 | 0.601357 |
27eb2e33bb4b871e3fe190cbd9999c3d49756982 | 688 | py | Python | configs/selfsup/mocov1/cifar100/r18_4xb64_step_ep1000.py | Westlake-AI/openmixup | ea81250819e740dd823e30cb7ce382d14a3c1b91 | [
"Apache-2.0"
] | 10 | 2021-12-30T10:22:27.000Z | 2022-03-30T02:31:38.000Z | configs/selfsup/mocov1/cifar100/r18_4xb64_step_ep1000.py | Westlake-AI/openmixup | ea81250819e740dd823e30cb7ce382d14a3c1b91 | [
"Apache-2.0"
] | 3 | 2022-01-20T21:02:48.000Z | 2022-03-19T13:49:45.000Z | configs/selfsup/mocov1/cifar100/r18_4xb64_step_ep1000.py | Westlake-AI/openmixup | ea81250819e740dd823e30cb7ce382d14a3c1b91 | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../../_base_/models/mocov1/r18.py',
'../../_base_/datasets/cifar100/mocov1_sz224_bs64.py',
'../../_base_/default_runtime.py',
]
# interval for accumulate gradient
update_interval = 1 # total: 4 x bs64 x 1 accumulates = bs256
# optimizer
optimizer = dict(type='SGD', lr=0.03, weight_decay=1e-4, momentum=0.9)
# apex
use_fp16 = False
fp16 = dict(type='apex', loss_scale=dict(init_scale=512., mode='dynamic'))
# optimizer args
optimizer_config = dict(update_interval=update_interval, use_fp16=use_fp16, grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[600, 800])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=1000)
| 28.666667 | 91 | 0.719477 |
fe75dbb9d690f3c813ed73f5b0c641dc5f54a13d | 75,056 | py | Python | src/utils.py | uehr/amena | 96e23678a261a4de6896d6d70540f447724018b7 | [
"MIT"
] | 1 | 2021-01-17T13:36:51.000Z | 2021-01-17T13:36:51.000Z | src/utils.py | uehr/amena | 96e23678a261a4de6896d6d70540f447724018b7 | [
"MIT"
] | 1 | 2021-01-19T11:23:42.000Z | 2021-01-19T11:23:42.000Z | src/utils.py | uehr/amena | 96e23678a261a4de6896d6d70540f447724018b7 | [
"MIT"
] | null | null | null | import os
import glob
import cv2
import scipy.misc as misc
from skimage.transform import resize
import numpy as np
from functools import reduce
from operator import mul
import torch
from torch import nn
import re
try:
import cynetworkx as netx
except ImportError:
import networkx as netx
from scipy.ndimage import gaussian_filter
from skimage.feature import canny
import collections
import shutil
import imageio
import copy
from mpl_toolkits.mplot3d import Axes3D
import time
from scipy.interpolate import interp1d
from collections import namedtuple
def path_planning(num_frames, x, y, z, path_type=''):
if path_type == 'straight-line':
corner_points = np.array([[0, 0, 0], [(0 + x) * 0.5, (0 + y) * 0.5, (0 + z) * 0.5], [x, y, z]])
corner_t = np.linspace(0, 1, len(corner_points))
t = np.linspace(0, 1, num_frames)
cs = interp1d(corner_t, corner_points, axis=0, kind='quadratic')
spline = cs(t)
xs, ys, zs = [xx.squeeze() for xx in np.split(spline, 3, 1)]
elif path_type == 'double-straight-line':
corner_points = np.array([[-x, -y, -z], [0, 0, 0], [x, y, z]])
corner_t = np.linspace(0, 1, len(corner_points))
t = np.linspace(0, 1, num_frames)
cs = interp1d(corner_t, corner_points, axis=0, kind='quadratic')
spline = cs(t)
xs, ys, zs = [xx.squeeze() for xx in np.split(spline, 3, 1)]
elif path_type == 'circle':
xs, ys, zs = [], [], []
for frame_id, bs_shift_val in enumerate(np.arange(-2.0, 2.0, (4./num_frames))):
xs += [np.cos(bs_shift_val * np.pi) * 1 * x]
ys += [np.sin(bs_shift_val * np.pi) * 1 * y]
zs += [np.cos(bs_shift_val * np.pi/2.) * 1 * z]
xs, ys, zs = np.array(xs), np.array(ys), np.array(zs)
return xs, ys, zs
def open_small_mask(mask, context, open_iteration, kernel):
np_mask = mask.cpu().data.numpy().squeeze().astype(np.uint8)
raw_mask = np_mask.copy()
np_context = context.cpu().data.numpy().squeeze().astype(np.uint8)
np_input = np_mask + np_context
for _ in range(open_iteration):
np_input = cv2.erode(cv2.dilate(np_input, np.ones((kernel, kernel)), iterations=1), np.ones((kernel,kernel)), iterations=1)
np_mask[(np_input - np_context) > 0] = 1
out_mask = torch.FloatTensor(np_mask).to(mask)[None, None, ...]
return out_mask
def filter_irrelevant_edge_new(self_edge, comp_edge, other_edges, other_edges_with_id, current_edge_id, context, depth, mesh, context_cc, spdb=False):
other_edges = other_edges.squeeze().astype(np.uint8)
other_edges_with_id = other_edges_with_id.squeeze()
self_edge = self_edge.squeeze()
dilate_bevel_self_edge = cv2.dilate((self_edge + comp_edge).astype(np.uint8), np.array([[1,1,1],[1,1,1],[1,1,1]]), iterations=1)
dilate_cross_self_edge = cv2.dilate((self_edge + comp_edge).astype(np.uint8), np.array([[0,1,0],[1,1,1],[0,1,0]]).astype(np.uint8), iterations=1)
edge_ids = np.unique(other_edges_with_id * context + (-1) * (1 - context)).astype(np.int)
end_depth_maps = np.zeros_like(self_edge)
self_edge_ids = np.sort(np.unique(other_edges_with_id[self_edge > 0]).astype(np.int))
self_edge_ids = self_edge_ids[1:] if self_edge_ids.shape[0] > 0 and self_edge_ids[0] == -1 else self_edge_ids
self_comp_ids = np.sort(np.unique(other_edges_with_id[comp_edge > 0]).astype(np.int))
self_comp_ids = self_comp_ids[1:] if self_comp_ids.shape[0] > 0 and self_comp_ids[0] == -1 else self_comp_ids
edge_ids = edge_ids[1:] if edge_ids[0] == -1 else edge_ids
other_edges_info = []
extend_other_edges = np.zeros_like(other_edges)
filter_self_edge = np.zeros_like(self_edge)
for self_edge_id in self_edge_ids:
filter_self_edge[other_edges_with_id == self_edge_id] = 1
dilate_self_comp_edge = cv2.dilate(comp_edge, kernel=np.ones((3, 3)), iterations=2)
valid_self_comp_edge = np.zeros_like(comp_edge)
for self_comp_id in self_comp_ids:
valid_self_comp_edge[self_comp_id == other_edges_with_id] = 1
self_comp_edge = dilate_self_comp_edge * valid_self_comp_edge
filter_self_edge = (filter_self_edge + self_comp_edge).clip(0, 1)
for edge_id in edge_ids:
other_edge_locs = (other_edges_with_id == edge_id).astype(np.uint8)
condition = (other_edge_locs * other_edges * context.astype(np.uint8))
end_cross_point = dilate_cross_self_edge * condition * (1 - filter_self_edge)
end_bevel_point = dilate_bevel_self_edge * condition * (1 - filter_self_edge)
if end_bevel_point.max() != 0:
end_depth_maps[end_bevel_point != 0] = depth[end_bevel_point != 0]
if end_cross_point.max() == 0:
nxs, nys = np.where(end_bevel_point != 0)
for nx, ny in zip(nxs, nys):
bevel_node = [xx for xx in context_cc if xx[0] == nx and xx[1] == ny][0]
for ne in mesh.neighbors(bevel_node):
if other_edges_with_id[ne[0], ne[1]] > -1 and dilate_cross_self_edge[ne[0], ne[1]] > 0:
extend_other_edges[ne[0], ne[1]] = 1
break
else:
other_edges[other_edges_with_id == edge_id] = 0
other_edges = (other_edges + extend_other_edges).clip(0, 1) * context
return other_edges, end_depth_maps, other_edges_info
def clean_far_edge_new(input_edge, end_depth_maps, mask, context, global_mesh, info_on_pix, self_edge, inpaint_id, config):
mesh = netx.Graph()
hxs, hys = np.where(input_edge * mask > 0)
valid_near_edge = (input_edge != 0).astype(np.uint8) * context
valid_map = mask + context
invalid_edge_ids = []
for hx, hy in zip(hxs, hys):
node = (hx ,hy)
mesh.add_node((hx, hy))
eight_nes = [ne for ne in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1), \
(hx + 1, hy + 1), (hx - 1, hy - 1), (hx - 1, hy + 1), (hx + 1, hy - 1)]\
if 0 <= ne[0] < input_edge.shape[0] and 0 <= ne[1] < input_edge.shape[1] and 0 < input_edge[ne[0], ne[1]]] # or end_depth_maps[ne[0], ne[1]] != 0]
for ne in eight_nes:
mesh.add_edge(node, ne, length=np.hypot(ne[0] - hx, ne[1] - hy))
if end_depth_maps[ne[0], ne[1]] != 0:
mesh.nodes[ne[0], ne[1]]['cnt'] = True
if end_depth_maps[ne[0], ne[1]] == 0:
import pdb; pdb.set_trace()
mesh.nodes[ne[0], ne[1]]['depth'] = end_depth_maps[ne[0], ne[1]]
elif mask[ne[0], ne[1]] != 1:
four_nes = [nne for nne in [(ne[0] + 1, ne[1]), (ne[0] - 1, ne[1]), (ne[0], ne[1] + 1), (ne[0], ne[1] - 1)]\
if nne[0] < end_depth_maps.shape[0] and nne[0] >= 0 and nne[1] < end_depth_maps.shape[1] and nne[1] >= 0]
for nne in four_nes:
if end_depth_maps[nne[0], nne[1]] != 0:
mesh.add_edge(nne, ne, length=np.hypot(nne[0] - ne[0], nne[1] - ne[1]))
mesh.nodes[nne[0], nne[1]]['cnt'] = True
mesh.nodes[nne[0], nne[1]]['depth'] = end_depth_maps[nne[0], nne[1]]
ccs = [*netx.connected_components(mesh)]
end_pts = []
for cc in ccs:
end_pts.append(set())
for node in cc:
if mesh.nodes[node].get('cnt') is not None:
end_pts[-1].add((node[0], node[1], mesh.nodes[node]['depth']))
predef_npaths = [None for _ in range(len(ccs))]
fpath_map = np.zeros_like(input_edge) - 1
npath_map = np.zeros_like(input_edge) - 1
npaths, fpaths = dict(), dict()
break_flag = False
end_idx = 0
while end_idx < len(end_pts):
end_pt, cc = [*zip(end_pts, ccs)][end_idx]
end_idx += 1
sorted_end_pt = []
fpath = []
iter_fpath = []
if len(end_pt) > 2 or len(end_pt) == 0:
if len(end_pt) > 2:
continue
continue
if len(end_pt) == 2:
ravel_end = [*end_pt]
tmp_sub_mesh = mesh.subgraph(list(cc)).copy()
tmp_npath = [*netx.shortest_path(tmp_sub_mesh, (ravel_end[0][0], ravel_end[0][1]), (ravel_end[1][0], ravel_end[1][1]), weight='length')]
fpath_map1, npath_map1, disp_diff1 = plan_path(mesh, info_on_pix, cc, ravel_end[0:1], global_mesh, input_edge, mask, valid_map, inpaint_id, npath_map=None, fpath_map=None, npath=tmp_npath)
fpath_map2, npath_map2, disp_diff2 = plan_path(mesh, info_on_pix, cc, ravel_end[1:2], global_mesh, input_edge, mask, valid_map, inpaint_id, npath_map=None, fpath_map=None, npath=tmp_npath)
tmp_disp_diff = [disp_diff1, disp_diff2]
self_end = []
edge_len = []
ds_edge = cv2.dilate(self_edge.astype(np.uint8), np.ones((3, 3)), iterations=1)
if ds_edge[ravel_end[0][0], ravel_end[0][1]] > 0:
self_end.append(1)
else:
self_end.append(0)
if ds_edge[ravel_end[1][0], ravel_end[1][1]] > 0:
self_end.append(1)
else:
self_end.append(0)
edge_len = [np.count_nonzero(npath_map1), np.count_nonzero(npath_map2)]
sorted_end_pts = [xx[0] for xx in sorted(zip(ravel_end, self_end, edge_len, [disp_diff1, disp_diff2]), key=lambda x: (x[1], x[2]), reverse=True)]
re_npath_map1, re_fpath_map1 = (npath_map1 != -1).astype(np.uint8), (fpath_map1 != -1).astype(np.uint8)
re_npath_map2, re_fpath_map2 = (npath_map2 != -1).astype(np.uint8), (fpath_map2 != -1).astype(np.uint8)
if np.count_nonzero(re_npath_map1 * re_npath_map2 * mask) / \
(np.count_nonzero((re_npath_map1 + re_npath_map2) * mask) + 1e-6) > 0.5\
and np.count_nonzero(re_fpath_map1 * re_fpath_map2 * mask) / \
(np.count_nonzero((re_fpath_map1 + re_fpath_map2) * mask) + 1e-6) > 0.5\
and tmp_disp_diff[0] != -1 and tmp_disp_diff[1] != -1:
my_fpath_map, my_npath_map, npath, fpath = \
plan_path_e2e(mesh, cc, sorted_end_pts, global_mesh, input_edge, mask, valid_map, inpaint_id, npath_map=None, fpath_map=None)
npath_map[my_npath_map != -1] = my_npath_map[my_npath_map != -1]
fpath_map[my_fpath_map != -1] = my_fpath_map[my_fpath_map != -1]
if len(fpath) > 0:
edge_id = global_mesh.nodes[[*sorted_end_pts][0]]['edge_id']
fpaths[edge_id] = fpath
npaths[edge_id] = npath
invalid_edge_ids.append(edge_id)
else:
if tmp_disp_diff[0] != -1:
ratio_a = tmp_disp_diff[0] / (np.sum(tmp_disp_diff) + 1e-8)
else:
ratio_a = 0
if tmp_disp_diff[1] != -1:
ratio_b = tmp_disp_diff[1] / (np.sum(tmp_disp_diff) + 1e-8)
else:
ratio_b = 0
npath_len = len(tmp_npath)
if npath_len > config['depth_edge_dilate_2'] * 2:
npath_len = npath_len - (config['depth_edge_dilate_2'] * 1)
tmp_npath_a = tmp_npath[:int(np.floor(npath_len * ratio_a))]
tmp_npath_b = tmp_npath[::-1][:int(np.floor(npath_len * ratio_b))]
tmp_merge = []
if len(tmp_npath_a) > 0 and sorted_end_pts[0][0] == tmp_npath_a[0][0] and sorted_end_pts[0][1] == tmp_npath_a[0][1]:
if len(tmp_npath_a) > 0 and mask[tmp_npath_a[-1][0], tmp_npath_a[-1][1]] > 0:
tmp_merge.append([sorted_end_pts[:1], tmp_npath_a])
if len(tmp_npath_b) > 0 and mask[tmp_npath_b[-1][0], tmp_npath_b[-1][1]] > 0:
tmp_merge.append([sorted_end_pts[1:2], tmp_npath_b])
elif len(tmp_npath_b) > 0 and sorted_end_pts[0][0] == tmp_npath_b[0][0] and sorted_end_pts[0][1] == tmp_npath_b[0][1]:
if len(tmp_npath_b) > 0 and mask[tmp_npath_b[-1][0], tmp_npath_b[-1][1]] > 0:
tmp_merge.append([sorted_end_pts[:1], tmp_npath_b])
if len(tmp_npath_a) > 0 and mask[tmp_npath_a[-1][0], tmp_npath_a[-1][1]] > 0:
tmp_merge.append([sorted_end_pts[1:2], tmp_npath_a])
for tmp_idx in range(len(tmp_merge)):
if len(tmp_merge[tmp_idx][1]) == 0:
continue
end_pts.append(tmp_merge[tmp_idx][0])
ccs.append(set(tmp_merge[tmp_idx][1]))
if len(end_pt) == 1:
sub_mesh = mesh.subgraph(list(cc)).copy()
pnodes = netx.periphery(sub_mesh)
if len(end_pt) == 1:
ends = [*end_pt]
elif len(sorted_end_pt) == 1:
ends = [*sorted_end_pt]
else:
import pdb; pdb.set_trace()
try:
edge_id = global_mesh.nodes[ends[0]]['edge_id']
except:
import pdb; pdb.set_trace()
pnodes = sorted(pnodes,
key=lambda x: np.hypot((x[0] - ends[0][0]), (x[1] - ends[0][1])),
reverse=True)[0]
npath = [*netx.shortest_path(sub_mesh, (ends[0][0], ends[0][1]), pnodes, weight='length')]
for np_node in npath:
npath_map[np_node[0], np_node[1]] = edge_id
fpath = []
if global_mesh.nodes[ends[0]].get('far') is None:
print("None far")
else:
fnodes = global_mesh.nodes[ends[0]].get('far')
dmask = mask + 0
did = 0
while True:
did += 1
dmask = cv2.dilate(dmask, np.ones((3, 3)), iterations=1)
if did > 3:
break
ffnode = [fnode for fnode in fnodes if (dmask[fnode[0], fnode[1]] > 0 and mask[fnode[0], fnode[1]] == 0 and\
global_mesh.nodes[fnode].get('inpaint_id') != inpaint_id + 1)]
if len(ffnode) > 0:
fnode = ffnode[0]
break
if len(ffnode) == 0:
continue
fpath.append((fnode[0], fnode[1]))
barrel_dir = np.array([[1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0], [-1, -1], [0, -1], [1, -1]])
n2f_dir = (int(fnode[0] - npath[0][0]), int(fnode[1] - npath[0][1]))
while True:
if barrel_dir[0, 0] == n2f_dir[0] and barrel_dir[0, 1] == n2f_dir[1]:
n2f_barrel = barrel_dir.copy()
break
barrel_dir = np.roll(barrel_dir, 1, axis=0)
for step in range(0, len(npath)):
if step == 0:
continue
elif step == 1:
next_dir = (npath[step][0] - npath[step - 1][0], npath[step][1] - npath[step - 1][1])
while True:
if barrel_dir[0, 0] == next_dir[0] and barrel_dir[0, 1] == next_dir[1]:
next_barrel = barrel_dir.copy()
break
barrel_dir = np.roll(barrel_dir, 1, axis=0)
barrel_pair = np.stack((n2f_barrel, next_barrel), axis=0)
n2f_dir = (barrel_pair[0, 0, 0], barrel_pair[0, 0, 1])
elif step > 1:
next_dir = (npath[step][0] - npath[step - 1][0], npath[step][1] - npath[step - 1][1])
while True:
if barrel_pair[1, 0, 0] == next_dir[0] and barrel_pair[1, 0, 1] == next_dir[1]:
next_barrel = barrel_pair.copy()
break
barrel_pair = np.roll(barrel_pair, 1, axis=1)
n2f_dir = (barrel_pair[0, 0, 0], barrel_pair[0, 0, 1])
new_locs = []
if abs(n2f_dir[0]) == 1:
new_locs.append((npath[step][0] + n2f_dir[0], npath[step][1]))
if abs(n2f_dir[1]) == 1:
new_locs.append((npath[step][0], npath[step][1] + n2f_dir[1]))
if len(new_locs) > 1:
new_locs = sorted(new_locs, key=lambda xx: np.hypot((xx[0] - fpath[-1][0]), (xx[1] - fpath[-1][1])))
break_flag = False
for new_loc in new_locs:
new_loc_nes = [xx for xx in [(new_loc[0] + 1, new_loc[1]), (new_loc[0] - 1, new_loc[1]),
(new_loc[0], new_loc[1] + 1), (new_loc[0], new_loc[1] - 1)]\
if xx[0] >= 0 and xx[0] < fpath_map.shape[0] and xx[1] >= 0 and xx[1] < fpath_map.shape[1]]
if np.all([(fpath_map[nlne[0], nlne[1]] == -1) for nlne in new_loc_nes]) != True:
break
if npath_map[new_loc[0], new_loc[1]] != -1:
if npath_map[new_loc[0], new_loc[1]] != edge_id:
break_flag = True
break
else:
continue
if valid_map[new_loc[0], new_loc[1]] == 0:
break_flag = True
break
fpath.append(new_loc)
if break_flag is True:
break
if step != len(npath) - 1:
for xx in npath[step:]:
if npath_map[xx[0], xx[1]] == edge_id:
npath_map[xx[0], xx[1]] = -1
npath = npath[:step]
if len(fpath) > 0:
for fp_node in fpath:
fpath_map[fp_node[0], fp_node[1]] = edge_id
fpaths[edge_id] = fpath
npaths[edge_id] = npath
fpath_map[valid_near_edge != 0] = -1
if len(fpath) > 0:
iter_fpath = copy.deepcopy(fpaths[edge_id])
for node in iter_fpath:
if valid_near_edge[node[0], node[1]] != 0:
fpaths[edge_id].remove(node)
return fpath_map, npath_map, False, npaths, fpaths, invalid_edge_ids
def plan_path_e2e(mesh, cc, end_pts, global_mesh, input_edge, mask, valid_map, inpaint_id, npath_map=None, fpath_map=None):
my_npath_map = np.zeros_like(input_edge) - 1
my_fpath_map = np.zeros_like(input_edge) - 1
sub_mesh = mesh.subgraph(list(cc)).copy()
ends_1, ends_2 = end_pts[0], end_pts[1]
edge_id = global_mesh.nodes[ends_1]['edge_id']
npath = [*netx.shortest_path(sub_mesh, (ends_1[0], ends_1[1]), (ends_2[0], ends_2[1]), weight='length')]
for np_node in npath:
my_npath_map[np_node[0], np_node[1]] = edge_id
fpath = []
if global_mesh.nodes[ends_1].get('far') is None:
print("None far")
else:
fnodes = global_mesh.nodes[ends_1].get('far')
dmask = mask + 0
while True:
dmask = cv2.dilate(dmask, np.ones((3, 3)), iterations=1)
ffnode = [fnode for fnode in fnodes if (dmask[fnode[0], fnode[1]] > 0 and mask[fnode[0], fnode[1]] == 0 and\
global_mesh.nodes[fnode].get('inpaint_id') != inpaint_id + 1)]
if len(ffnode) > 0:
fnode = ffnode[0]
break
e_fnodes = global_mesh.nodes[ends_2].get('far')
dmask = mask + 0
while True:
dmask = cv2.dilate(dmask, np.ones((3, 3)), iterations=1)
e_ffnode = [e_fnode for e_fnode in e_fnodes if (dmask[e_fnode[0], e_fnode[1]] > 0 and mask[e_fnode[0], e_fnode[1]] == 0 and\
global_mesh.nodes[e_fnode].get('inpaint_id') != inpaint_id + 1)]
if len(e_ffnode) > 0:
e_fnode = e_ffnode[0]
break
fpath.append((fnode[0], fnode[1]))
if len(e_ffnode) == 0 or len(ffnode) == 0:
return my_npath_map, my_fpath_map, [], []
barrel_dir = np.array([[1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0], [-1, -1], [0, -1], [1, -1]])
n2f_dir = (int(fnode[0] - npath[0][0]), int(fnode[1] - npath[0][1]))
while True:
if barrel_dir[0, 0] == n2f_dir[0] and barrel_dir[0, 1] == n2f_dir[1]:
n2f_barrel = barrel_dir.copy()
break
barrel_dir = np.roll(barrel_dir, 1, axis=0)
for step in range(0, len(npath)):
if step == 0:
continue
elif step == 1:
next_dir = (npath[step][0] - npath[step - 1][0], npath[step][1] - npath[step - 1][1])
while True:
if barrel_dir[0, 0] == next_dir[0] and barrel_dir[0, 1] == next_dir[1]:
next_barrel = barrel_dir.copy()
break
barrel_dir = np.roll(barrel_dir, 1, axis=0)
barrel_pair = np.stack((n2f_barrel, next_barrel), axis=0)
n2f_dir = (barrel_pair[0, 0, 0], barrel_pair[0, 0, 1])
elif step > 1:
next_dir = (npath[step][0] - npath[step - 1][0], npath[step][1] - npath[step - 1][1])
while True:
if barrel_pair[1, 0, 0] == next_dir[0] and barrel_pair[1, 0, 1] == next_dir[1]:
next_barrel = barrel_pair.copy()
break
barrel_pair = np.roll(barrel_pair, 1, axis=1)
n2f_dir = (barrel_pair[0, 0, 0], barrel_pair[0, 0, 1])
new_locs = []
if abs(n2f_dir[0]) == 1:
new_locs.append((npath[step][0] + n2f_dir[0], npath[step][1]))
if abs(n2f_dir[1]) == 1:
new_locs.append((npath[step][0], npath[step][1] + n2f_dir[1]))
if len(new_locs) > 1:
new_locs = sorted(new_locs, key=lambda xx: np.hypot((xx[0] - fpath[-1][0]), (xx[1] - fpath[-1][1])))
break_flag = False
for new_loc in new_locs:
new_loc_nes = [xx for xx in [(new_loc[0] + 1, new_loc[1]), (new_loc[0] - 1, new_loc[1]),
(new_loc[0], new_loc[1] + 1), (new_loc[0], new_loc[1] - 1)]\
if xx[0] >= 0 and xx[0] < my_fpath_map.shape[0] and xx[1] >= 0 and xx[1] < my_fpath_map.shape[1]]
if fpath_map is not None and np.sum([fpath_map[nlne[0], nlne[1]] for nlne in new_loc_nes]) != 0:
break_flag = True
break
if my_npath_map[new_loc[0], new_loc[1]] != -1:
continue
if npath_map is not None and npath_map[new_loc[0], new_loc[1]] != edge_id:
break_flag = True
break
fpath.append(new_loc)
if break_flag is True:
break
if (e_fnode[0], e_fnode[1]) not in fpath:
fpath.append((e_fnode[0], e_fnode[1]))
if step != len(npath) - 1:
for xx in npath[step:]:
if my_npath_map[xx[0], xx[1]] == edge_id:
my_npath_map[xx[0], xx[1]] = -1
npath = npath[:step]
if len(fpath) > 0:
for fp_node in fpath:
my_fpath_map[fp_node[0], fp_node[1]] = edge_id
return my_fpath_map, my_npath_map, npath, fpath
def plan_path(mesh, info_on_pix, cc, end_pt, global_mesh, input_edge, mask, valid_map, inpaint_id, npath_map=None, fpath_map=None, npath=None):
my_npath_map = np.zeros_like(input_edge) - 1
my_fpath_map = np.zeros_like(input_edge) - 1
sub_mesh = mesh.subgraph(list(cc)).copy()
pnodes = netx.periphery(sub_mesh)
ends = [*end_pt]
edge_id = global_mesh.nodes[ends[0]]['edge_id']
pnodes = sorted(pnodes,
key=lambda x: np.hypot((x[0] - ends[0][0]), (x[1] - ends[0][1])),
reverse=True)[0]
if npath is None:
npath = [*netx.shortest_path(sub_mesh, (ends[0][0], ends[0][1]), pnodes, weight='length')]
else:
if (ends[0][0], ends[0][1]) == npath[0]:
npath = npath
elif (ends[0][0], ends[0][1]) == npath[-1]:
npath = npath[::-1]
else:
import pdb; pdb.set_trace()
for np_node in npath:
my_npath_map[np_node[0], np_node[1]] = edge_id
fpath = []
if global_mesh.nodes[ends[0]].get('far') is None:
print("None far")
else:
fnodes = global_mesh.nodes[ends[0]].get('far')
dmask = mask + 0
did = 0
while True:
did += 1
if did > 3:
return my_fpath_map, my_npath_map, -1
dmask = cv2.dilate(dmask, np.ones((3, 3)), iterations=1)
ffnode = [fnode for fnode in fnodes if (dmask[fnode[0], fnode[1]] > 0 and mask[fnode[0], fnode[1]] == 0 and\
global_mesh.nodes[fnode].get('inpaint_id') != inpaint_id + 1)]
if len(ffnode) > 0:
fnode = ffnode[0]
break
fpath.append((fnode[0], fnode[1]))
disp_diff = 0.
for n_loc in npath:
if mask[n_loc[0], n_loc[1]] != 0:
disp_diff = abs(abs(1. / info_on_pix[(n_loc[0], n_loc[1])][0]['depth']) - abs(1. / ends[0][2]))
break
barrel_dir = np.array([[1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0], [-1, -1], [0, -1], [1, -1]])
n2f_dir = (int(fnode[0] - npath[0][0]), int(fnode[1] - npath[0][1]))
while True:
if barrel_dir[0, 0] == n2f_dir[0] and barrel_dir[0, 1] == n2f_dir[1]:
n2f_barrel = barrel_dir.copy()
break
barrel_dir = np.roll(barrel_dir, 1, axis=0)
for step in range(0, len(npath)):
if step == 0:
continue
elif step == 1:
next_dir = (npath[step][0] - npath[step - 1][0], npath[step][1] - npath[step - 1][1])
while True:
if barrel_dir[0, 0] == next_dir[0] and barrel_dir[0, 1] == next_dir[1]:
next_barrel = barrel_dir.copy()
break
barrel_dir = np.roll(barrel_dir, 1, axis=0)
barrel_pair = np.stack((n2f_barrel, next_barrel), axis=0)
n2f_dir = (barrel_pair[0, 0, 0], barrel_pair[0, 0, 1])
elif step > 1:
next_dir = (npath[step][0] - npath[step - 1][0], npath[step][1] - npath[step - 1][1])
while True:
if barrel_pair[1, 0, 0] == next_dir[0] and barrel_pair[1, 0, 1] == next_dir[1]:
next_barrel = barrel_pair.copy()
break
barrel_pair = np.roll(barrel_pair, 1, axis=1)
n2f_dir = (barrel_pair[0, 0, 0], barrel_pair[0, 0, 1])
new_locs = []
if abs(n2f_dir[0]) == 1:
new_locs.append((npath[step][0] + n2f_dir[0], npath[step][1]))
if abs(n2f_dir[1]) == 1:
new_locs.append((npath[step][0], npath[step][1] + n2f_dir[1]))
if len(new_locs) > 1:
new_locs = sorted(new_locs, key=lambda xx: np.hypot((xx[0] - fpath[-1][0]), (xx[1] - fpath[-1][1])))
break_flag = False
for new_loc in new_locs:
new_loc_nes = [xx for xx in [(new_loc[0] + 1, new_loc[1]), (new_loc[0] - 1, new_loc[1]),
(new_loc[0], new_loc[1] + 1), (new_loc[0], new_loc[1] - 1)]\
if xx[0] >= 0 and xx[0] < my_fpath_map.shape[0] and xx[1] >= 0 and xx[1] < my_fpath_map.shape[1]]
if fpath_map is not None and np.all([(fpath_map[nlne[0], nlne[1]] == -1) for nlne in new_loc_nes]) != True:
break_flag = True
break
if np.all([(my_fpath_map[nlne[0], nlne[1]] == -1) for nlne in new_loc_nes]) != True:
break_flag = True
break
if my_npath_map[new_loc[0], new_loc[1]] != -1:
continue
if npath_map is not None and npath_map[new_loc[0], new_loc[1]] != edge_id:
break_flag = True
break
if valid_map[new_loc[0], new_loc[1]] == 0:
break_flag = True
break
fpath.append(new_loc)
if break_flag is True:
break
if step != len(npath) - 1:
for xx in npath[step:]:
if my_npath_map[xx[0], xx[1]] == edge_id:
my_npath_map[xx[0], xx[1]] = -1
npath = npath[:step]
if len(fpath) > 0:
for fp_node in fpath:
my_fpath_map[fp_node[0], fp_node[1]] = edge_id
return my_fpath_map, my_npath_map, disp_diff
def refresh_node(old_node, old_feat, new_node, new_feat, mesh, stime=False):
mesh.add_node(new_node)
mesh.nodes[new_node].update(new_feat)
mesh.nodes[new_node].update(old_feat)
for ne in mesh.neighbors(old_node):
mesh.add_edge(new_node, ne)
if mesh.nodes[new_node].get('far') is not None:
tmp_far_nodes = mesh.nodes[new_node]['far']
for far_node in tmp_far_nodes:
if mesh.has_node(far_node) is False:
mesh.nodes[new_node]['far'].remove(far_node)
continue
if mesh.nodes[far_node].get('near') is not None:
for idx in range(len(mesh.nodes[far_node].get('near'))):
if mesh.nodes[far_node]['near'][idx][0] == new_node[0] and mesh.nodes[far_node]['near'][idx][1] == new_node[1]:
if len(mesh.nodes[far_node]['near'][idx]) == len(old_node):
mesh.nodes[far_node]['near'][idx] = new_node
if mesh.nodes[new_node].get('near') is not None:
tmp_near_nodes = mesh.nodes[new_node]['near']
for near_node in tmp_near_nodes:
if mesh.has_node(near_node) is False:
mesh.nodes[new_node]['near'].remove(near_node)
continue
if mesh.nodes[near_node].get('far') is not None:
for idx in range(len(mesh.nodes[near_node].get('far'))):
if mesh.nodes[near_node]['far'][idx][0] == new_node[0] and mesh.nodes[near_node]['far'][idx][1] == new_node[1]:
if len(mesh.nodes[near_node]['far'][idx]) == len(old_node):
mesh.nodes[near_node]['far'][idx] = new_node
if new_node != old_node:
mesh.remove_node(old_node)
if stime is False:
return mesh
else:
return mesh, None, None
def create_placeholder(context, mask, depth, fpath_map, npath_map, mesh, inpaint_id, edge_ccs, extend_edge_cc, all_edge_maps, self_edge_id):
add_node_time = 0
add_edge_time = 0
add_far_near_time = 0
valid_area = context + mask
H, W = mesh.graph['H'], mesh.graph['W']
edge_cc = edge_ccs[self_edge_id]
num_com = len(edge_cc) + len(extend_edge_cc)
hxs, hys = np.where(mask > 0)
for hx, hy in zip(hxs, hys):
mesh.add_node((hx, hy), inpaint_id=inpaint_id + 1, num_context=num_com)
for hx, hy in zip(hxs, hys):
four_nes = [(x, y) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1)] if\
0 <= x < mesh.graph['H'] and 0 <= y < mesh.graph['W'] and valid_area[x, y] != 0]
for ne in four_nes:
if mask[ne[0], ne[1]] != 0:
if not mesh.has_edge((hx, hy), ne):
mesh.add_edge((hx, hy), ne)
elif depth[ne[0], ne[1]] != 0:
if mesh.has_node((ne[0], ne[1], depth[ne[0], ne[1]])) and\
not mesh.has_edge((hx, hy), (ne[0], ne[1], depth[ne[0], ne[1]])):
mesh.add_edge((hx, hy), (ne[0], ne[1], depth[ne[0], ne[1]]))
else:
print("Undefined context node.")
import pdb; pdb.set_trace()
near_ids = np.unique(npath_map)
if near_ids[0] == -1: near_ids = near_ids[1:]
for near_id in near_ids:
hxs, hys = np.where((fpath_map == near_id) & (mask > 0))
if hxs.shape[0] > 0:
mesh.graph['max_edge_id'] = mesh.graph['max_edge_id'] + 1
else:
break
for hx, hy in zip(hxs, hys):
mesh.nodes[(hx, hy)]['edge_id'] = int(round(mesh.graph['max_edge_id']))
four_nes = [(x, y) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1)] if\
x < mesh.graph['H'] and x >= 0 and y < mesh.graph['W'] and y >= 0 and npath_map[x, y] == near_id]
for xx in four_nes:
xx_n = copy.deepcopy(xx)
if not mesh.has_node(xx_n):
if mesh.has_node((xx_n[0], xx_n[1], depth[xx_n[0], xx_n[1]])):
xx_n = (xx_n[0], xx_n[1], depth[xx_n[0], xx_n[1]])
if mesh.has_edge((hx, hy), xx_n):
# pass
mesh.remove_edge((hx, hy), xx_n)
if mesh.nodes[(hx, hy)].get('near') is None:
mesh.nodes[(hx, hy)]['near'] = []
mesh.nodes[(hx, hy)]['near'].append(xx_n)
connect_point_exception = set()
hxs, hys = np.where((npath_map == near_id) & (all_edge_maps > -1))
for hx, hy in zip(hxs, hys):
unknown_id = int(round(all_edge_maps[hx, hy]))
if unknown_id != near_id and unknown_id != self_edge_id:
unknown_node = set([xx for xx in edge_ccs[unknown_id] if xx[0] == hx and xx[1] == hy])
connect_point_exception |= unknown_node
hxs, hys = np.where((npath_map == near_id) & (mask > 0))
if hxs.shape[0] > 0:
mesh.graph['max_edge_id'] = mesh.graph['max_edge_id'] + 1
else:
break
for hx, hy in zip(hxs, hys):
mesh.nodes[(hx, hy)]['edge_id'] = int(round(mesh.graph['max_edge_id']))
mesh.nodes[(hx, hy)]['connect_point_id'] = int(round(near_id))
mesh.nodes[(hx, hy)]['connect_point_exception'] = connect_point_exception
four_nes = [(x, y) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1)] if\
x < mesh.graph['H'] and x >= 0 and y < mesh.graph['W'] and y >= 0 and fpath_map[x, y] == near_id]
for xx in four_nes:
xx_n = copy.deepcopy(xx)
if not mesh.has_node(xx_n):
if mesh.has_node((xx_n[0], xx_n[1], depth[xx_n[0], xx_n[1]])):
xx_n = (xx_n[0], xx_n[1], depth[xx_n[0], xx_n[1]])
if mesh.has_edge((hx, hy), xx_n):
mesh.remove_edge((hx, hy), xx_n)
if mesh.nodes[(hx, hy)].get('far') is None:
mesh.nodes[(hx, hy)]['far'] = []
mesh.nodes[(hx, hy)]['far'].append(xx_n)
return mesh, add_node_time, add_edge_time, add_far_near_time
def clean_far_edge(mask_edge, mask_edge_with_id, context_edge, mask, info_on_pix, global_mesh, anchor):
if isinstance(mask_edge, torch.Tensor):
if mask_edge.is_cuda:
mask_edge = mask_edge.cpu()
mask_edge = mask_edge.data
mask_edge = mask_edge.numpy()
if isinstance(context_edge, torch.Tensor):
if context_edge.is_cuda:
context_edge = context_edge.cpu()
context_edge = context_edge.data
context_edge = context_edge.numpy()
if isinstance(mask, torch.Tensor):
if mask.is_cuda:
mask = mask.cpu()
mask = mask.data
mask = mask.numpy()
mask = mask.squeeze()
mask_edge = mask_edge.squeeze()
context_edge = context_edge.squeeze()
valid_near_edge = np.zeros_like(mask_edge)
far_edge = np.zeros_like(mask_edge)
far_edge_with_id = np.ones_like(mask_edge) * -1
near_edge_with_id = np.ones_like(mask_edge) * -1
uncleaned_far_edge = np.zeros_like(mask_edge)
# Detect if there is any valid pixel mask_edge, if not ==> return default value
if mask_edge.sum() == 0:
return far_edge, uncleaned_far_edge, far_edge_with_id, near_edge_with_id
mask_edge_ids = dict(collections.Counter(mask_edge_with_id.flatten())).keys()
for edge_id in mask_edge_ids:
if edge_id < 0:
continue
specific_edge_map = (mask_edge_with_id == edge_id).astype(np.uint8)
_, sub_specific_edge_maps = cv2.connectedComponents(specific_edge_map.astype(np.uint8), connectivity=8)
for sub_edge_id in range(1, sub_specific_edge_maps.max() + 1):
specific_edge_map = (sub_specific_edge_maps == sub_edge_id).astype(np.uint8)
edge_pxs, edge_pys = np.where(specific_edge_map > 0)
edge_mesh = netx.Graph()
for edge_px, edge_py in zip(edge_pxs, edge_pys):
edge_mesh.add_node((edge_px, edge_py))
for ex in [edge_px-1, edge_px, edge_px+1]:
for ey in [edge_py-1, edge_py, edge_py+1]:
if edge_px == ex and edge_py == ey:
continue
if ex < 0 or ex >= specific_edge_map.shape[0] or ey < 0 or ey >= specific_edge_map.shape[1]:
continue
if specific_edge_map[ex, ey] == 1:
if edge_mesh.has_node((ex, ey)):
edge_mesh.add_edge((ex, ey), (edge_px, edge_py))
periphery_nodes = netx.periphery(edge_mesh)
path_diameter = netx.diameter(edge_mesh)
start_near_node = None
for node_s in periphery_nodes:
for node_e in periphery_nodes:
if node_s != node_e:
if netx.shortest_path_length(edge_mesh, node_s, node_e) == path_diameter:
if np.any(context_edge[node_s[0]-1:node_s[0]+2, node_s[1]-1:node_s[1]+2].flatten()):
start_near_node = (node_s[0], node_s[1])
end_near_node = (node_e[0], node_e[1])
break
if np.any(context_edge[node_e[0]-1:node_e[0]+2, node_e[1]-1:node_e[1]+2].flatten()):
start_near_node = (node_e[0], node_e[1])
end_near_node = (node_s[0], node_s[1])
break
if start_near_node is not None:
break
if start_near_node is None:
continue
new_specific_edge_map = np.zeros_like(mask)
for path_node in netx.shortest_path(edge_mesh, start_near_node, end_near_node):
new_specific_edge_map[path_node[0], path_node[1]] = 1
context_near_pxs, context_near_pys = np.where(context_edge[start_near_node[0]-1:start_near_node[0]+2, start_near_node[1]-1:start_near_node[1]+2] > 0)
distance = np.abs((context_near_pxs - 1)) + np.abs((context_near_pys - 1))
if (np.where(distance == distance.min())[0].shape[0]) > 1:
closest_pxs = context_near_pxs[np.where(distance == distance.min())[0]]
closest_pys = context_near_pys[np.where(distance == distance.min())[0]]
closest_depths = []
for closest_px, closest_py in zip(closest_pxs, closest_pys):
if info_on_pix.get((closest_px + start_near_node[0] - 1 + anchor[0], closest_py + start_near_node[1] - 1 + anchor[2])) is not None:
for info in info_on_pix.get((closest_px + start_near_node[0] - 1 + anchor[0], closest_py + start_near_node[1] - 1 + anchor[2])):
if info['synthesis'] is False:
closest_depths.append(abs(info['depth']))
context_near_px, context_near_py = closest_pxs[np.array(closest_depths).argmax()], closest_pys[np.array(closest_depths).argmax()]
else:
context_near_px, context_near_py = context_near_pxs[distance.argmin()], context_near_pys[distance.argmin()]
context_near_node = (start_near_node[0]-1 + context_near_px, start_near_node[1]-1 + context_near_py)
far_node_list = []
global_context_near_node = (context_near_node[0] + anchor[0], context_near_node[1] + anchor[2])
if info_on_pix.get(global_context_near_node) is not None:
for info in info_on_pix[global_context_near_node]:
if info['synthesis'] is False:
context_near_node_3d = (global_context_near_node[0], global_context_near_node[1], info['depth'])
if global_mesh.nodes[context_near_node_3d].get('far') is not None:
for far_node in global_mesh.nodes[context_near_node_3d].get('far'):
far_node = (far_node[0] - anchor[0], far_node[1] - anchor[2], far_node[2])
if mask[far_node[0], far_node[1]] == 0:
far_node_list.append([far_node[0], far_node[1]])
if len(far_node_list) > 0:
far_nodes_dist = np.sum(np.abs(np.array(far_node_list) - np.array([[edge_px, edge_py]])), axis=1)
context_far_node = tuple(far_node_list[far_nodes_dist.argmin()])
corresponding_far_edge = np.zeros_like(mask_edge)
corresponding_far_edge[context_far_node[0], context_far_node[1]] = 1
surround_map = cv2.dilate(new_specific_edge_map.astype(np.uint8),
np.array([[1,1,1],[1,1,1],[1,1,1]]).astype(np.uint8),
iterations=1)
specific_edge_map_wo_end_pt = new_specific_edge_map.copy()
specific_edge_map_wo_end_pt[end_near_node[0], end_near_node[1]] = 0
surround_map_wo_end_pt = cv2.dilate(specific_edge_map_wo_end_pt.astype(np.uint8),
np.array([[1,1,1],[1,1,1],[1,1,1]]).astype(np.uint8),
iterations=1)
surround_map_wo_end_pt[new_specific_edge_map > 0] = 0
surround_map_wo_end_pt[context_near_node[0], context_near_node[1]] = 0
surround_map = surround_map_wo_end_pt.copy()
_, far_edge_cc = cv2.connectedComponents(surround_map.astype(np.uint8), connectivity=4)
start_far_node = None
accompany_far_node = None
if surround_map[context_far_node[0], context_far_node[1]] == 1:
start_far_node = context_far_node
else:
four_nes = [(context_far_node[0] - 1, context_far_node[1]),
(context_far_node[0] + 1, context_far_node[1]),
(context_far_node[0], context_far_node[1] - 1),
(context_far_node[0], context_far_node[1] + 1)]
candidate_bevel = []
for ne in four_nes:
if surround_map[ne[0], ne[1]] == 1:
start_far_node = (ne[0], ne[1])
break
elif (ne[0] != context_near_node[0] or ne[1] != context_near_node[1]) and \
(ne[0] != start_near_node[0] or ne[1] != start_near_node[1]):
candidate_bevel.append((ne[0], ne[1]))
if start_far_node is None:
for ne in candidate_bevel:
if ne[0] == context_far_node[0]:
bevel_xys = [[ne[0] + 1, ne[1]], [ne[0] - 1, ne[1]]]
if ne[1] == context_far_node[1]:
bevel_xys = [[ne[0], ne[1] + 1], [ne[0], ne[1] - 1]]
for bevel_x, bevel_y in bevel_xys:
if surround_map[bevel_x, bevel_y] == 1:
start_far_node = (bevel_x, bevel_y)
accompany_far_node = (ne[0], ne[1])
break
if start_far_node is not None:
break
if start_far_node is not None:
for far_edge_id in range(1, far_edge_cc.max() + 1):
specific_far_edge = (far_edge_cc == far_edge_id).astype(np.uint8)
if specific_far_edge[start_far_node[0], start_far_node[1]] == 1:
if accompany_far_node is not None:
specific_far_edge[accompany_far_node] = 1
far_edge[specific_far_edge > 0] = 1
far_edge_with_id[specific_far_edge > 0] = edge_id
end_far_candidates = np.zeros_like(far_edge)
end_far_candidates[end_near_node[0], end_near_node[1]] = 1
end_far_candidates = cv2.dilate(end_far_candidates.astype(np.uint8),
np.array([[0,1,0],[1,1,1],[0,1,0]]).astype(np.uint8),
iterations=1)
end_far_candidates[end_near_node[0], end_near_node[1]] = 0
invalid_nodes = (((far_edge_cc != far_edge_id).astype(np.uint8) * \
(far_edge_cc != 0).astype(np.uint8)).astype(np.uint8) + \
(new_specific_edge_map).astype(np.uint8) + \
(mask == 0).astype(np.uint8)).clip(0, 1)
end_far_candidates[invalid_nodes > 0] = 0
far_edge[end_far_candidates > 0] = 1
far_edge_with_id[end_far_candidates > 0] = edge_id
far_edge[context_far_node[0], context_far_node[1]] = 1
far_edge_with_id[context_far_node[0], context_far_node[1]] = edge_id
near_edge_with_id[(mask_edge_with_id == edge_id) > 0] = edge_id
uncleaned_far_edge = far_edge.copy()
far_edge[mask == 0] = 0
return far_edge, uncleaned_far_edge, far_edge_with_id, near_edge_with_id
def get_MiDaS_samples(image_folder, depth_folder, config, specific=None, aft_certain=None):
lines = [os.path.splitext(os.path.basename(xx))[0] for xx in glob.glob(os.path.join(image_folder, '*' + config['img_format']))]
samples = []
generic_pose = np.eye(4)
assert len(config['traj_types']) == len(config['x_shift_range']) ==\
len(config['y_shift_range']) == len(config['z_shift_range']) == len(config['video_postfix']), \
"The number of elements in 'traj_types', 'x_shift_range', 'y_shift_range', 'z_shift_range' and \
'video_postfix' should be equal."
tgt_pose = [[generic_pose * 1]]
tgts_poses = []
for traj_idx in range(len(config['traj_types'])):
tgt_poses = []
sx, sy, sz = path_planning(config['num_frames'], config['x_shift_range'][traj_idx], config['y_shift_range'][traj_idx],
config['z_shift_range'][traj_idx], path_type=config['traj_types'][traj_idx])
for xx, yy, zz in zip(sx, sy, sz):
tgt_poses.append(generic_pose * 1.)
tgt_poses[-1][:3, -1] = np.array([xx, yy, zz])
tgts_poses += [tgt_poses]
tgt_pose = generic_pose * 1
aft_flag = True
if aft_certain is not None and len(aft_certain) > 0:
aft_flag = False
for seq_dir in lines:
if specific is not None and len(specific) > 0:
if specific != seq_dir:
continue
if aft_certain is not None and len(aft_certain) > 0:
if aft_certain == seq_dir:
aft_flag = True
if aft_flag is False:
continue
samples.append({})
sdict = samples[-1]
sdict['depth_fi'] = os.path.join(depth_folder, seq_dir + config['depth_format'])
sdict['ref_img_fi'] = os.path.join(image_folder, seq_dir + config['img_format'])
H, W = imageio.imread(sdict['ref_img_fi']).shape[:2]
sdict['int_mtx'] = np.array([[max(H, W), 0, W//2], [0, max(H, W), H//2], [0, 0, 1]]).astype(np.float32)
if sdict['int_mtx'].max() > 1:
sdict['int_mtx'][0, :] = sdict['int_mtx'][0, :] / float(W)
sdict['int_mtx'][1, :] = sdict['int_mtx'][1, :] / float(H)
sdict['ref_pose'] = np.eye(4)
sdict['tgt_pose'] = tgt_pose
sdict['tgts_poses'] = tgts_poses
sdict['video_postfix'] = config['video_postfix']
sdict['tgt_name'] = [os.path.splitext(os.path.basename(sdict['depth_fi']))[0]]
sdict['src_pair_name'] = sdict['tgt_name'][0]
return samples
def get_valid_size(imap):
x_max = np.where(imap.sum(1).squeeze() > 0)[0].max() + 1
x_min = np.where(imap.sum(1).squeeze() > 0)[0].min()
y_max = np.where(imap.sum(0).squeeze() > 0)[0].max() + 1
y_min = np.where(imap.sum(0).squeeze() > 0)[0].min()
size_dict = {'x_max':x_max, 'y_max':y_max, 'x_min':x_min, 'y_min':y_min}
return size_dict
def dilate_valid_size(isize_dict, imap, dilate=[0, 0]):
osize_dict = copy.deepcopy(isize_dict)
osize_dict['x_min'] = max(0, osize_dict['x_min'] - dilate[0])
osize_dict['x_max'] = min(imap.shape[0], osize_dict['x_max'] + dilate[0])
osize_dict['y_min'] = max(0, osize_dict['y_min'] - dilate[0])
osize_dict['y_max'] = min(imap.shape[1], osize_dict['y_max'] + dilate[1])
return osize_dict
def crop_maps_by_size(size, *imaps):
omaps = []
for imap in imaps:
omaps.append(imap[size['x_min']:size['x_max'], size['y_min']:size['y_max']].copy())
return omaps
def smooth_cntsyn_gap(init_depth_map, mask_region, context_region, init_mask_region=None):
if init_mask_region is not None:
curr_mask_region = init_mask_region * 1
else:
curr_mask_region = mask_region * 0
depth_map = init_depth_map.copy()
for _ in range(2):
cm_mask = context_region + curr_mask_region
depth_s1 = np.roll(depth_map, 1, 0)
depth_s2 = np.roll(depth_map, -1, 0)
depth_s3 = np.roll(depth_map, 1, 1)
depth_s4 = np.roll(depth_map, -1, 1)
mask_s1 = np.roll(cm_mask, 1, 0)
mask_s2 = np.roll(cm_mask, -1, 0)
mask_s3 = np.roll(cm_mask, 1, 1)
mask_s4 = np.roll(cm_mask, -1, 1)
fluxin_depths = (depth_s1 * mask_s1 + depth_s2 * mask_s2 + depth_s3 * mask_s3 + depth_s4 * mask_s4) / \
((mask_s1 + mask_s2 + mask_s3 + mask_s4) + 1e-6)
fluxin_mask = (fluxin_depths != 0) * mask_region
init_mask = (fluxin_mask * (curr_mask_region >= 0).astype(np.float32) > 0).astype(np.uint8)
depth_map[init_mask > 0] = fluxin_depths[init_mask > 0]
if init_mask.shape[-1] > curr_mask_region.shape[-1]:
curr_mask_region[init_mask.sum(-1, keepdims=True) > 0] = 1
else:
curr_mask_region[init_mask > 0] = 1
depth_map[fluxin_mask > 0] = fluxin_depths[fluxin_mask > 0]
return depth_map
def read_MiDaS_depth(disp_fi, disp_rescale=10., h=None, w=None):
if 'npy' in os.path.splitext(disp_fi)[-1]:
disp = np.load(disp_fi)
else:
disp = imageio.imread(disp_fi).astype(np.float32)
disp = disp - disp.min()
disp = cv2.blur(disp / disp.max(), ksize=(3, 3)) * disp.max()
disp = (disp / disp.max()) * disp_rescale
if h is not None and w is not None:
disp = resize(disp / disp.max(), (h, w), order=1) * disp.max()
depth = 1. / np.maximum(disp, 0.05)
return depth
def follow_image_aspect_ratio(depth, image):
H, W = image.shape[:2]
image_aspect_ratio = H / W
dH, dW = depth.shape[:2]
depth_aspect_ratio = dH / dW
if depth_aspect_ratio > image_aspect_ratio:
resize_H = dH
resize_W = dH / image_aspect_ratio
else:
resize_W = dW
resize_H = dW * image_aspect_ratio
depth = resize(depth / depth.max(),
(int(resize_H),
int(resize_W)),
order=0) * depth.max()
return depth
def depth_resize(depth, origin_size, image_size):
if origin_size[0] is not 0:
max_depth = depth.max()
depth = depth / max_depth
depth = resize(depth, origin_size, order=1, mode='edge')
depth = depth * max_depth
else:
max_depth = depth.max()
depth = depth / max_depth
depth = resize(depth, image_size, order=1, mode='edge')
depth = depth * max_depth
return depth
def filter_irrelevant_edge(self_edge, other_edges, other_edges_with_id, current_edge_id, context, edge_ccs, mesh, anchor):
other_edges = other_edges.squeeze()
other_edges_with_id = other_edges_with_id.squeeze()
self_edge = self_edge.squeeze()
dilate_self_edge = cv2.dilate(self_edge.astype(np.uint8), np.array([[1,1,1],[1,1,1],[1,1,1]]).astype(np.uint8), iterations=1)
edge_ids = collections.Counter(other_edges_with_id.flatten()).keys()
other_edges_info = []
# import ipdb
# ipdb.set_trace()
for edge_id in edge_ids:
edge_id = int(edge_id)
if edge_id >= 0:
condition = ((other_edges_with_id == edge_id) * other_edges * context).astype(np.uint8)
if dilate_self_edge[condition > 0].sum() == 0:
other_edges[other_edges_with_id == edge_id] = 0
else:
num_condition, condition_labels = cv2.connectedComponents(condition, connectivity=8)
for condition_id in range(1, num_condition):
isolate_condition = ((condition_labels == condition_id) > 0).astype(np.uint8)
num_end_group, end_group = cv2.connectedComponents(((dilate_self_edge * isolate_condition) > 0).astype(np.uint8), connectivity=8)
if num_end_group == 1:
continue
for end_id in range(1, num_end_group):
end_pxs, end_pys = np.where((end_group == end_id))
end_px, end_py = end_pxs[0], end_pys[0]
other_edges_info.append({})
other_edges_info[-1]['edge_id'] = edge_id
# other_edges_info[-1]['near_depth'] = None
other_edges_info[-1]['diff'] = None
other_edges_info[-1]['edge_map'] = np.zeros_like(self_edge)
other_edges_info[-1]['end_point_map'] = np.zeros_like(self_edge)
other_edges_info[-1]['end_point_map'][(end_group == end_id)] = 1
other_edges_info[-1]['forbidden_point_map'] = np.zeros_like(self_edge)
other_edges_info[-1]['forbidden_point_map'][(end_group != end_id) * (end_group != 0)] = 1
other_edges_info[-1]['forbidden_point_map'] = cv2.dilate(other_edges_info[-1]['forbidden_point_map'], kernel=np.array([[1,1,1],[1,1,1],[1,1,1]]), iterations=2)
for x in edge_ccs[edge_id]:
nx = x[0] - anchor[0]
ny = x[1] - anchor[1]
if nx == end_px and ny == end_py:
# other_edges_info[-1]['near_depth'] = abs(nx)
if mesh.nodes[x].get('far') is not None and len(mesh.nodes[x].get('far')) == 1:
other_edges_info[-1]['diff'] = abs(1./abs([*mesh.nodes[x].get('far')][0][2]) - 1./abs(x[2]))
else:
other_edges_info[-1]['diff'] = 0
# if end_group[nx, ny] != end_id and end_group[nx, ny] > 0:
# continue
try:
if isolate_condition[nx, ny] == 1:
other_edges_info[-1]['edge_map'][nx, ny] = 1
except:
pass
try:
other_edges_info = sorted(other_edges_info, key=lambda x : x['diff'], reverse=True)
except:
import pdb
pdb.set_trace()
# import pdb
# pdb.set_trace()
# other_edges = other_edges[..., None]
for other_edge in other_edges_info:
if other_edge['end_point_map'] is None:
import pdb
pdb.set_trace()
other_edges = other_edges * context
return other_edges, other_edges_info
def require_depth_edge(context_edge, mask):
dilate_mask = cv2.dilate(mask, np.array([[1,1,1],[1,1,1],[1,1,1]]).astype(np.uint8), iterations=1)
if (dilate_mask * context_edge).max() == 0:
return False
else:
return True
def refine_color_around_edge(mesh, info_on_pix, edge_ccs, config, spdb=False):
H, W = mesh.graph['H'], mesh.graph['W']
tmp_edge_ccs = copy.deepcopy(edge_ccs)
for edge_id, edge_cc in enumerate(edge_ccs):
if len(edge_cc) == 0:
continue
near_maps = np.zeros((H, W)).astype(np.bool)
far_maps = np.zeros((H, W)).astype(np.bool)
tmp_far_nodes = set()
far_nodes = set()
near_nodes = set()
end_nodes = set()
for i in range(5):
if i == 0:
for edge_node in edge_cc:
if mesh.nodes[edge_node].get('depth_edge_dilate_2_color_flag') is not True:
break
if mesh.nodes[edge_node].get('inpaint_id') == 1:
near_nodes.add(edge_node)
tmp_node = mesh.nodes[edge_node].get('far')
tmp_node = set(tmp_node) if tmp_node is not None else set()
tmp_far_nodes |= tmp_node
rmv_tmp_far_nodes = set()
for far_node in tmp_far_nodes:
if not(mesh.has_node(far_node) and mesh.nodes[far_node].get('inpaint_id') == 1):
rmv_tmp_far_nodes.add(far_node)
if len(tmp_far_nodes - rmv_tmp_far_nodes) == 0:
break
else:
for near_node in near_nodes:
near_maps[near_node[0], near_node[1]] = True
mesh.nodes[near_node]['refine_rgbd'] = True
mesh.nodes[near_node]['backup_depth'] = near_node[2] \
if mesh.nodes[near_node].get('real_depth') is None else mesh.nodes[near_node]['real_depth']
mesh.nodes[near_node]['backup_color'] = mesh.nodes[near_node]['color']
for far_node in tmp_far_nodes:
if mesh.has_node(far_node) and mesh.nodes[far_node].get('inpaint_id') == 1:
far_nodes.add(far_node)
far_maps[far_node[0], far_node[1]] = True
mesh.nodes[far_node]['refine_rgbd'] = True
mesh.nodes[far_node]['backup_depth'] = far_node[2] \
if mesh.nodes[far_node].get('real_depth') is None else mesh.nodes[far_node]['real_depth']
mesh.nodes[far_node]['backup_color'] = mesh.nodes[far_node]['color']
tmp_far_nodes = far_nodes
tmp_near_nodes = near_nodes
else:
tmp_far_nodes = new_tmp_far_nodes
tmp_near_nodes = new_tmp_near_nodes
new_tmp_far_nodes = None
new_tmp_near_nodes = None
new_tmp_far_nodes = set()
new_tmp_near_nodes = set()
for node in tmp_near_nodes:
for ne_node in mesh.neighbors(node):
if far_maps[ne_node[0], ne_node[1]] == False and \
near_maps[ne_node[0], ne_node[1]] == False:
if mesh.nodes[ne_node].get('inpaint_id') == 1:
new_tmp_near_nodes.add(ne_node)
near_maps[ne_node[0], ne_node[1]] = True
mesh.nodes[ne_node]['refine_rgbd'] = True
mesh.nodes[ne_node]['backup_depth'] = ne_node[2] \
if mesh.nodes[ne_node].get('real_depth') is None else mesh.nodes[ne_node]['real_depth']
mesh.nodes[ne_node]['backup_color'] = mesh.nodes[ne_node]['color']
else:
mesh.nodes[ne_node]['backup_depth'] = ne_node[2] \
if mesh.nodes[ne_node].get('real_depth') is None else mesh.nodes[ne_node]['real_depth']
mesh.nodes[ne_node]['backup_color'] = mesh.nodes[ne_node]['color']
end_nodes.add(node)
near_nodes.update(new_tmp_near_nodes)
for node in tmp_far_nodes:
for ne_node in mesh.neighbors(node):
if far_maps[ne_node[0], ne_node[1]] == False and \
near_maps[ne_node[0], ne_node[1]] == False:
if mesh.nodes[ne_node].get('inpaint_id') == 1:
new_tmp_far_nodes.add(ne_node)
far_maps[ne_node[0], ne_node[1]] = True
mesh.nodes[ne_node]['refine_rgbd'] = True
mesh.nodes[ne_node]['backup_depth'] = ne_node[2] \
if mesh.nodes[ne_node].get('real_depth') is None else mesh.nodes[ne_node]['real_depth']
mesh.nodes[ne_node]['backup_color'] = mesh.nodes[ne_node]['color']
else:
mesh.nodes[ne_node]['backup_depth'] = ne_node[2] \
if mesh.nodes[ne_node].get('real_depth') is None else mesh.nodes[ne_node]['real_depth']
mesh.nodes[ne_node]['backup_color'] = mesh.nodes[ne_node]['color']
end_nodes.add(node)
far_nodes.update(new_tmp_far_nodes)
if len(far_nodes) == 0:
tmp_edge_ccs[edge_id] = set()
continue
for node in new_tmp_far_nodes | new_tmp_near_nodes:
for ne_node in mesh.neighbors(node):
if far_maps[ne_node[0], ne_node[1]] == False and near_maps[ne_node[0], ne_node[1]] == False:
end_nodes.add(node)
mesh.nodes[ne_node]['backup_depth'] = ne_node[2] \
if mesh.nodes[ne_node].get('real_depth') is None else mesh.nodes[ne_node]['real_depth']
mesh.nodes[ne_node]['backup_color'] = mesh.nodes[ne_node]['color']
tmp_end_nodes = end_nodes
refine_nodes = near_nodes | far_nodes
remain_refine_nodes = copy.deepcopy(refine_nodes)
accum_idx = 0
while len(remain_refine_nodes) > 0:
accum_idx += 1
if accum_idx > 100:
break
new_tmp_end_nodes = None
new_tmp_end_nodes = set()
survive_tmp_end_nodes = set()
for node in tmp_end_nodes:
re_depth, re_color, re_count = 0, np.array([0., 0., 0.]), 0
for ne_node in mesh.neighbors(node):
if mesh.nodes[ne_node].get('refine_rgbd') is True:
if ne_node not in tmp_end_nodes:
new_tmp_end_nodes.add(ne_node)
else:
try:
re_depth += mesh.nodes[ne_node]['backup_depth']
re_color += mesh.nodes[ne_node]['backup_color'].astype(np.float32)
re_count += 1.
except:
import pdb; pdb.set_trace()
if re_count > 0:
re_depth = re_depth / re_count
re_color = re_color / re_count
mesh.nodes[node]['backup_depth'] = re_depth
mesh.nodes[node]['backup_color'] = re_color
mesh.nodes[node]['refine_rgbd'] = False
else:
survive_tmp_end_nodes.add(node)
for node in tmp_end_nodes - survive_tmp_end_nodes:
if node in remain_refine_nodes:
remain_refine_nodes.remove(node)
tmp_end_nodes = new_tmp_end_nodes
for node in refine_nodes:
if mesh.nodes[node].get('refine_rgbd') is not None:
mesh.nodes[node].pop('refine_rgbd')
mesh.nodes[node]['color'] = mesh.nodes[node]['backup_color']
for info in info_on_pix[(node[0], node[1])]:
if info['depth'] == node[2]:
info['color'] = mesh.nodes[node]['backup_color']
return mesh, info_on_pix
def refine_depth_around_edge(mask_depth, far_edge, uncleaned_far_edge, near_edge, mask, all_depth, config):
if isinstance(mask_depth, torch.Tensor):
if mask_depth.is_cuda:
mask_depth = mask_depth.cpu()
mask_depth = mask_depth.data
mask_depth = mask_depth.numpy()
if isinstance(far_edge, torch.Tensor):
if far_edge.is_cuda:
far_edge = far_edge.cpu()
far_edge = far_edge.data
far_edge = far_edge.numpy()
if isinstance(uncleaned_far_edge, torch.Tensor):
if uncleaned_far_edge.is_cuda:
uncleaned_far_edge = uncleaned_far_edge.cpu()
uncleaned_far_edge = uncleaned_far_edge.data
uncleaned_far_edge = uncleaned_far_edge.numpy()
if isinstance(near_edge, torch.Tensor):
if near_edge.is_cuda:
near_edge = near_edge.cpu()
near_edge = near_edge.data
near_edge = near_edge.numpy()
if isinstance(mask, torch.Tensor):
if mask.is_cuda:
mask = mask.cpu()
mask = mask.data
mask = mask.numpy()
mask = mask.squeeze()
uncleaned_far_edge = uncleaned_far_edge.squeeze()
far_edge = far_edge.squeeze()
near_edge = near_edge.squeeze()
mask_depth = mask_depth.squeeze()
dilate_far_edge = cv2.dilate(uncleaned_far_edge.astype(np.uint8), kernel=np.array([[0,1,0],[1,1,1],[0,1,0]]).astype(np.uint8), iterations=1)
near_edge[dilate_far_edge == 0] = 0
dilate_near_edge = cv2.dilate(near_edge.astype(np.uint8), kernel=np.array([[0,1,0],[1,1,1],[0,1,0]]).astype(np.uint8), iterations=1)
far_edge[dilate_near_edge == 0] = 0
init_far_edge = far_edge.copy()
init_near_edge = near_edge.copy()
for i in range(config['depth_edge_dilate_2']):
init_far_edge = cv2.dilate(init_far_edge, kernel=np.array([[0,1,0],[1,1,1],[0,1,0]]).astype(np.uint8), iterations=1)
init_far_edge[init_near_edge == 1] = 0
init_near_edge = cv2.dilate(init_near_edge, kernel=np.array([[0,1,0],[1,1,1],[0,1,0]]).astype(np.uint8), iterations=1)
init_near_edge[init_far_edge == 1] = 0
init_far_edge[mask == 0] = 0
init_near_edge[mask == 0] = 0
hole_far_edge = 1 - init_far_edge
hole_near_edge = 1 - init_near_edge
change = None
while True:
change = False
hole_far_edge[init_near_edge == 1] = 0
hole_near_edge[init_far_edge == 1] = 0
far_pxs, far_pys = np.where((hole_far_edge == 0) * (init_far_edge == 1) > 0)
current_hole_far_edge = hole_far_edge.copy()
for far_px, far_py in zip(far_pxs, far_pys):
min_px = max(far_px - 1, 0)
max_px = min(far_px + 2, mask.shape[0]-1)
min_py = max(far_py - 1, 0)
max_py = min(far_py + 2, mask.shape[1]-1)
hole_far = current_hole_far_edge[min_px: max_px, min_py: max_py]
tmp_mask = mask[min_px: max_px, min_py: max_py]
all_depth_patch = all_depth[min_px: max_px, min_py: max_py] * 0
all_depth_mask = (all_depth_patch != 0).astype(np.uint8)
cross_element = np.array([[0,1,0],[1,1,1],[0,1,0]])[min_px - (far_px - 1): max_px - (far_px - 1), min_py - (far_py - 1): max_py - (far_py - 1)]
combine_mask = (tmp_mask + all_depth_mask).clip(0, 1) * hole_far * cross_element
tmp_patch = combine_mask * (mask_depth[min_px: max_px, min_py: max_py] + all_depth_patch)
number = np.count_nonzero(tmp_patch)
if number > 0:
mask_depth[far_px, far_py] = np.sum(tmp_patch).astype(np.float32) / max(number, 1e-6)
hole_far_edge[far_px, far_py] = 1
change = True
near_pxs, near_pys = np.where((hole_near_edge == 0) * (init_near_edge == 1) > 0)
current_hole_near_edge = hole_near_edge.copy()
for near_px, near_py in zip(near_pxs, near_pys):
min_px = max(near_px - 1, 0)
max_px = min(near_px + 2, mask.shape[0]-1)
min_py = max(near_py - 1, 0)
max_py = min(near_py + 2, mask.shape[1]-1)
hole_near = current_hole_near_edge[min_px: max_px, min_py: max_py]
tmp_mask = mask[min_px: max_px, min_py: max_py]
all_depth_patch = all_depth[min_px: max_px, min_py: max_py] * 0
all_depth_mask = (all_depth_patch != 0).astype(np.uint8)
cross_element = np.array([[0,1,0],[1,1,1],[0,1,0]])[min_px - near_px + 1:max_px - near_px + 1, min_py - near_py + 1:max_py - near_py + 1]
combine_mask = (tmp_mask + all_depth_mask).clip(0, 1) * hole_near * cross_element
tmp_patch = combine_mask * (mask_depth[min_px: max_px, min_py: max_py] + all_depth_patch)
number = np.count_nonzero(tmp_patch)
if number > 0:
mask_depth[near_px, near_py] = np.sum(tmp_patch) / max(number, 1e-6)
hole_near_edge[near_px, near_py] = 1
change = True
if change is False:
break
return mask_depth
def vis_depth_edge_connectivity(depth, config):
disp = 1./depth
u_diff = (disp[1:, :] - disp[:-1, :])[:-1, 1:-1]
b_diff = (disp[:-1, :] - disp[1:, :])[1:, 1:-1]
l_diff = (disp[:, 1:] - disp[:, :-1])[1:-1, :-1]
r_diff = (disp[:, :-1] - disp[:, 1:])[1:-1, 1:]
u_over = (np.abs(u_diff) > config['depth_threshold']).astype(np.float32)
b_over = (np.abs(b_diff) > config['depth_threshold']).astype(np.float32)
l_over = (np.abs(l_diff) > config['depth_threshold']).astype(np.float32)
r_over = (np.abs(r_diff) > config['depth_threshold']).astype(np.float32)
concat_diff = np.stack([u_diff, b_diff, r_diff, l_diff], axis=-1)
concat_over = np.stack([u_over, b_over, r_over, l_over], axis=-1)
over_diff = concat_diff * concat_over
pos_over = (over_diff > 0).astype(np.float32).sum(-1).clip(0, 1)
neg_over = (over_diff < 0).astype(np.float32).sum(-1).clip(0, 1)
neg_over[(over_diff > 0).astype(np.float32).sum(-1) > 0] = 0
_, edge_label = cv2.connectedComponents(pos_over.astype(np.uint8), connectivity=8)
T_junction_maps = np.zeros_like(pos_over)
for edge_id in range(1, edge_label.max() + 1):
edge_map = (edge_label == edge_id).astype(np.uint8)
edge_map = np.pad(edge_map, pad_width=((1,1),(1,1)), mode='constant')
four_direc = np.roll(edge_map, 1, 1) + np.roll(edge_map, -1, 1) + np.roll(edge_map, 1, 0) + np.roll(edge_map, -1, 0)
eight_direc = np.roll(np.roll(edge_map, 1, 1), 1, 0) + np.roll(np.roll(edge_map, 1, 1), -1, 0) + \
np.roll(np.roll(edge_map, -1, 1), 1, 0) + np.roll(np.roll(edge_map, -1, 1), -1, 0)
eight_direc = (eight_direc + four_direc)[1:-1,1:-1]
pos_over[eight_direc > 2] = 0
T_junction_maps[eight_direc > 2] = 1
_, edge_label = cv2.connectedComponents(pos_over.astype(np.uint8), connectivity=8)
edge_label = np.pad(edge_label, 1, mode='constant')
return edge_label
def max_size(mat, value=0):
if not (mat and mat[0]): return (0, 0)
it = iter(mat)
prev = [(el==value) for el in next(it)]
max_size = max_rectangle_size(prev)
for row in it:
hist = [(1+h) if el == value else 0 for h, el in zip(prev, row)]
max_size = max(max_size, max_rectangle_size(hist), key=get_area)
prev = hist
return max_size
def max_rectangle_size(histogram):
Info = namedtuple('Info', 'start height')
stack = []
top = lambda: stack[-1]
max_size = (0, 0) # height, width of the largest rectangle
pos = 0 # current position in the histogram
for pos, height in enumerate(histogram):
start = pos # position where rectangle starts
while True:
if not stack or height > top().height:
stack.append(Info(start, height)) # push
if stack and height < top().height:
max_size = max(max_size, (top().height, (pos-top().start)),
key=get_area)
start, _ = stack.pop()
continue
break # height == top().height goes here
pos += 1
for start, height in stack:
max_size = max(max_size, (height, (pos-start)),
key=get_area)
return max_size
def get_area(size):
return reduce(mul, size)
def find_anchors(matrix):
matrix = [[*x] for x in matrix]
mh, mw = max_size(matrix)
matrix = np.array(matrix)
# element = np.zeros((mh, mw))
for i in range(matrix.shape[0] + 1 - mh):
for j in range(matrix.shape[1] + 1 - mw):
if matrix[i:i + mh, j:j + mw].max() == 0:
return i, i + mh, j, j + mw
def find_largest_rect(dst_img, bg_color=(128, 128, 128)):
valid = np.any(dst_img[..., :3] != bg_color, axis=-1)
dst_h, dst_w = dst_img.shape[:2]
ret, labels = cv2.connectedComponents(np.uint8(valid == False))
red_mat = np.zeros_like(labels)
# denoise
for i in range(1, np.max(labels)+1, 1):
x, y, w, h = cv2.boundingRect(np.uint8(labels==i))
if x == 0 or (x+w) == dst_h or y == 0 or (y+h) == dst_w:
red_mat[labels==i] = 1
# crop
t, b, l, r = find_anchors(red_mat)
return t, b, l, r
| 53.803584 | 200 | 0.536186 |
67ad271c3a419f3276011598e3a58615d8ce49b7 | 11,114 | py | Python | scales/mux/sink.py | jeffreythewang/scales | 9b068c6308eb7eb98f629e0542a0b0f28c04c960 | [
"MIT"
] | null | null | null | scales/mux/sink.py | jeffreythewang/scales | 9b068c6308eb7eb98f629e0542a0b0f28c04c960 | [
"MIT"
] | null | null | null | scales/mux/sink.py | jeffreythewang/scales | 9b068c6308eb7eb98f629e0542a0b0f28c04c960 | [
"MIT"
] | null | null | null | from abc import abstractmethod
import logging
import time
from struct import unpack
from cStringIO import StringIO
import gevent
from gevent.queue import Queue
from ..async import (
AsyncResult,
NamedGreenlet
)
from ..constants import ChannelState
from ..message import (
Deadline,
ClientError,
MethodReturnMessage
)
from ..sink import (
ClientMessageSink,
)
from ..varz import (
AggregateTimer,
AverageTimer,
Counter,
Gauge,
Rate,
Source,
VarzBase
)
from ..constants import TransportHeaders
ROOT_LOG = logging.getLogger('scales.mux')
class Tag(object):
KEY = "__Tag"
def __init__(self, tag):
self._tag = tag
def Encode(self):
return [self._tag >> 16 & 0xff,
self._tag >> 8 & 0xff,
self._tag & 0xff]
class TagPool(object):
"""A class which manages a pool of tags
"""
POOL_LOGGER = ROOT_LOG.getChild('TagPool')
class Varz(VarzBase):
_VARZ_BASE_NAME = 'scales.mux.TagPool'
_VARZ = {
'pool_exhausted': Counter,
'max_tag': Gauge
}
def __init__(self, max_tag, service, host):
self._set = set()
self._next = 1
self._max_tag = max_tag
self._varz = self.Varz(Source(service=service, endpoint=host))
self._log = self.POOL_LOGGER.getChild('[%s.%s]' % (service, host))
def get(self):
"""Get a tag from the pool.
Returns:
A tag
Raises:
Exception if the next tag will be > max_tag
"""
if not self._set:
if self._next == self._max_tag - 1:
self._varz.pool_exhausted()
raise Exception("No tags left in pool.")
self._next += 1
ret_tag = self._next
self._log.debug('Allocating new tag, max is now %d' % ret_tag)
self._varz.max_tag(ret_tag)
return ret_tag
else:
return self._set.pop()
def release(self, tag):
"""Return a tag to the pool.
Args:
tag - The previously leased tag.
"""
if tag in self._set:
self._log.warning('Tag %d has been returned more than once!' % tag)
self._set.add(tag)
class MuxSocketTransportSink(ClientMessageSink):
"""A transport sink for tmux servers.
This sink supports concurrent requests over its transport.
"""
SINK_LOG = ROOT_LOG.getChild('SocketTransportSink')
_EMPTY_DCT = {}
_CLOSE_INVOKED = "Close invoked"
class Varz(VarzBase):
"""
messages_sent - The number of messages sent over this sink.
messages_recv - The number of messages received over this sink.
active - 1 if the sink is open, else 0.
send_queue_size - The length of the send queue.
send_time - The aggregate amount of time spent sending data.
recv_time - The aggregate amount of time spend receiving data.
send_latency - The average amount of time taken to send a message.
recv_latency - The average amount of time taken to receive a message
(once a response has reached the client).
transport_latency - The average amount of time taken to perform a full
method call transaction (send data, wait for response,
read response).
"""
_VARZ_BASE_NAME = 'scales.thriftmux.SocketTransportSink'
_VARZ = {
'messages_sent': Rate,
'messages_recv': Rate,
'active': Gauge,
'send_queue_size': Gauge,
'send_time': AggregateTimer,
'recv_time': AggregateTimer,
'send_latency': AverageTimer,
'recv_latency': AverageTimer,
'transport_latency': AverageTimer
}
def __init__(self, socket, service):
super(MuxSocketTransportSink, self).__init__()
self._socket = socket
self._state = ChannelState.Idle
self._log = self.SINK_LOG.getChild('[%s.%s:%d]' % (
service, self._socket.host, self._socket.port))
self._socket_source = '%s:%d' % (self._socket.host, self._socket.port)
self._service = service
self._open_result = None
self._varz = self.Varz(Source(service=self._service,
endpoint=self._socket_source))
def _Init(self):
self._tag_map = {}
self._open_result = None
self._tag_pool = TagPool((2 ** 24) - 1, self._service, self._socket_source)
self._greenlets = []
self._send_queue = Queue()
@property
def isActive(self):
return self._state != ChannelState.Closed
@property
def state(self):
return self._state
def Open(self):
"""Initializes the dispatcher, opening a connection to the remote host.
This method may only be called once.
"""
if not self._open_result:
self._Init()
self._open_result = AsyncResult()
self._open_result.SafeLink(self._OpenImpl)
return self._open_result
def _SpawnNamedGreenlet(self, name, *args, **kwargs):
return NamedGreenlet.spawn(
'Scales %s for %s [%s]' % (name, self._service, self._socket_source),
*args,
**kwargs)
def _OpenImpl(self):
try:
self._log.debug('Opening transport.')
self._socket.open()
self._greenlets.append(self._SpawnNamedGreenlet('Recv Loop', self._RecvLoop))
self._greenlets.append(self._SpawnNamedGreenlet('Send Loop', self._SendLoop))
self._CheckInitialConnection()
self._log.debug('Open successful')
self._state = ChannelState.Open
self._varz.active(1)
except Exception as e:
self._log.error('Exception opening socket')
self._open_result.set_exception(e)
self._Shutdown('Open failed')
raise
@abstractmethod
def _CheckInitialConnection(self):
raise NotImplementedError()
def Close(self):
self._Shutdown(self._CLOSE_INVOKED, False)
def _Shutdown(self, reason, fault=True):
if not self.isActive:
return
self._state = ChannelState.Closed
if reason == self._CLOSE_INVOKED:
log_fn = self._log.debug
else:
log_fn = self._log.warning
log_fn('Shutting down transport [%s].' % str(reason))
self._varz.active(0)
self._socket.close()
[g.kill(block=False) for g in self._greenlets]
self._greenlets = []
if not isinstance(reason, Exception):
reason = Exception(str(reason))
if fault:
self.on_faulted.Set(reason)
msg = MethodReturnMessage(error=ClientError(reason))
for sink_stack, _, _ in self._tag_map.values():
sink_stack.AsyncProcessResponseMessage(msg)
if (self._open_result
and not self._open_result.ready()
and isinstance(reason, Exception)):
self._open_result.set_exception(reason)
self._tag_map = {}
self._open_result = None
self._send_queue = Queue()
@abstractmethod
def _OnTimeout(self, tag):
raise NotImplementedError()
def _HandleTimeout(self, msg_properties):
"""Determine if a message has timed out yet (because it waited in the queue
for too long). If it hasn't, initialize the timeout handler to fire if the
message times out in transit.
Args:
msg_properties - The properties of the message.
"""
timeout_event = msg_properties.get(Deadline.EVENT_KEY, None)
if timeout_event and timeout_event.Get():
# The message has timed out before it got out of the send queue
# In this case, we can discard it immediately without even sending it.
tag = msg_properties.pop(Tag.KEY, 0)
if tag != 0:
self._ReleaseTag(tag)
return True
elif timeout_event:
# The event exists but hasn't been signaled yet, hook up a
# callback so we can be notified on a timeout.
def timeout_proc():
timeout_tag = msg_properties.pop(Tag.KEY, 0)
if timeout_tag:
self._OnTimeout(timeout_tag)
timeout_event.Subscribe(lambda evt: timeout_proc(), True)
return False
else:
# No event was created, so this will never timeout.
return False
def _SendLoop(self):
"""Dispatch messages from the send queue to the remote server.
Note: Messages in the queue have already been serialized into wire format.
"""
while self.isActive:
try:
payload, dct = self._send_queue.get()
queue_len = self._send_queue.qsize()
self._varz.send_queue_size(queue_len)
# HandleTimeout sets up the transport level timeout handling
# for this message. If the message times out in transit, this
# transport will handle sending a Tdiscarded to the server.
if self._HandleTimeout(dct): continue
with self._varz.send_time.Measure():
with self._varz.send_latency.Measure():
self._socket.write(payload)
self._varz.messages_sent()
except Exception as e:
self._Shutdown(e)
break
def _RecvLoop(self):
"""Dispatch messages from the remote server to their recipient.
Note: Deserialization and dispatch occurs on a seperate greenlet, this only
reads the message off the wire.
"""
while self.isActive:
try:
sz, = unpack('!i', str(self._socket.readAll(4)))
with self._varz.recv_time.Measure():
with self._varz.recv_latency.Measure():
buf = StringIO(self._socket.readAll(sz))
self._varz.messages_recv()
gevent.spawn(self._ProcessReply, buf)
except Exception as e:
self._Shutdown(e)
break
@abstractmethod
def _ProcessReply(self, stream):
raise NotImplementedError()
def _ProcessTaggedReply(self, tag, stream):
tup = self._ReleaseTag(tag)
if tup:
reply_stack, start_time, props = tup
props[Tag.KEY] = None
self._varz.transport_latency(time.time() - start_time)
stream.seek(0)
reply_stack.AsyncProcessResponseStream(stream)
def _ReleaseTag(self, tag):
"""Return a tag to the tag pool.
Note: Tags are only returned when the server has ACK'd them (or NACK'd) with
and Rdispatch message (or similar). Client initiated timeouts do NOT return
tags to the pool.
Args:
tag - The tag to return.
Returns:
The ClientChannelSinkStack associated with the tag's response.
"""
tup = self._tag_map.pop(tag, None)
self._tag_pool.release(tag)
return tup
@abstractmethod
def _BuildHeader(self, tag, msg_type, data_len):
raise NotImplementedError()
def AsyncProcessRequest(self, sink_stack, msg, stream, headers):
if self._state == ChannelState.Idle and self._open_result:
self._log.debug('Waiting for channel to be open')
self._open_result.wait()
if ((self._state == ChannelState.Idle and not self._open_result)
or self._state == ChannelState.Closed) and sink_stack is not None:
err_msg = MethodReturnMessage(error=Exception('Sink not open.'))
sink_stack.AsyncProcessResponseMessage(err_msg)
return
if not msg.is_one_way:
tag = self._tag_pool.get()
msg.properties[Tag.KEY] = tag
self._tag_map[tag] = (sink_stack, time.time(), msg.properties)
else:
tag = 0
data_len = stream.tell()
header = self._BuildHeader(tag, headers[TransportHeaders.MessageType], data_len)
payload = header + stream.getvalue()
self._send_queue.put((payload, msg.properties))
def AsyncProcessResponse(self, sink_stack, context, stream, msg):
pass
| 29.876344 | 84 | 0.669066 |
7b077af5c3502dcb522f2786d54b3bfc2af1a42d | 97 | py | Python | mysite/settings/__init__.py | lasher85/AirCheck | 52b2f78c7d797999df4952b1bcac9f7d2c12b42c | [
"MIT"
] | null | null | null | mysite/settings/__init__.py | lasher85/AirCheck | 52b2f78c7d797999df4952b1bcac9f7d2c12b42c | [
"MIT"
] | 8 | 2019-12-04T23:44:11.000Z | 2022-02-10T08:31:40.000Z | mysite/settings/__init__.py | lasher85/AirCheck | 52b2f78c7d797999df4952b1bcac9f7d2c12b42c | [
"MIT"
] | null | null | null | from .base import *
from .production import *
try:
from .development import *
except:
pass | 16.166667 | 28 | 0.701031 |
65a6b4574a218a71efad0c08cb03e791236e175e | 2,278 | py | Python | bindings/python/cntk/tests/attributes_test.py | ArpitSisodia/CNTK | 11732625b3d7b5bca1f447c4790e8c8f10de45e1 | [
"RSA-MD"
] | null | null | null | bindings/python/cntk/tests/attributes_test.py | ArpitSisodia/CNTK | 11732625b3d7b5bca1f447c4790e8c8f10de45e1 | [
"RSA-MD"
] | null | null | null | bindings/python/cntk/tests/attributes_test.py | ArpitSisodia/CNTK | 11732625b3d7b5bca1f447c4790e8c8f10de45e1 | [
"RSA-MD"
] | 1 | 2020-12-24T14:50:54.000Z | 2020-12-24T14:50:54.000Z | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import cntk as C
import numpy as np
def _check(expected, d):
for key in expected:
assert key in d
assert d[key] == expected[key]
for key in d:
assert key in expected
def test_convolution_attributes():
x = C.input( (1, 5, 5) )
filter = np.reshape(np.array([2, -1, -1, 2], dtype = np.float32), (1, 2, 2))
kernel = C.constant(value = filter)
f = C.convolution(kernel , x, auto_padding = [False])
d = f.root_function.attributes
expected = {'autoPadding': [False, False, False],
'sharing': [True, True, True],
'strides': (1, 1, 1),
'maxTempMemSizeInSamples': 0,
'upperPad': (0, 0, 0),
'lowerPad': (0, 0, 0),
'transpose': False,
'outputShape': (0,)
}
_check(expected, d)
def test_convolution_transpose_attributes():
x = C.input( (1, 5, 5) )
filter = np.reshape(np.array([2, -1, -1, 2], dtype = np.float32), (1, 2, 2))
kernel = C.constant(value = filter)
f = C.convolution_transpose(kernel , x, auto_padding = [False])
d = f.root_function.attributes
expected = {'autoPadding': [False, False, False],
'sharing': [True, True, True],
'strides': (1, 1, 1),
'maxTempMemSizeInSamples': 0,
'upperPad': (0, 0, 0),
'lowerPad': (0, 0, 0),
'transpose': True,
'outputShape': (0,)
}
_check(expected, d)
def test_dropout_attributes():
x = C.input( (1, 5, 5) )
f = C.dropout(x, 0.5)
d = f.root_function.attributes
expected = {'dropoutRate': 0.5}
_check(expected, d)
def test_slice_attributes():
x = C.input((2,3))
f = C.slice(x, 0, 1, 2)
d = f.root_function.attributes
expected = {'endIndex': 2, 'beginIndex': 1, 'axis': ('ordered', 'static', 1)}
_check(expected, d)
f = C.slice(x, [0,1], [1,0], [2,2])
d = f.root_function.attributes
expected = {'endIndexVec': [2,2], 'beginIndexVec': [1,0], 'axisVec': [('ordered', 'static', 1), ('ordered', 'static', 0)]}
_check(expected, d)
| 34 | 126 | 0.55619 |
32d8c2a2e57510782c86c2636d48c26aea47fa28 | 11,339 | py | Python | unitTests/testScripts/TestUtils.py | faichele/NumCpp | 7c8fc50fbe44b80eaa105f0f9258120abddfcec2 | [
"MIT"
] | 1 | 2019-06-17T02:04:04.000Z | 2019-06-17T02:04:04.000Z | unitTests/testScripts/TestUtils.py | lamarrr/NumCpp | a24328e9d8dc472607a09ba50419baf21b1d3142 | [
"MIT"
] | null | null | null | unitTests/testScripts/TestUtils.py | lamarrr/NumCpp | a24328e9d8dc472607a09ba50419baf21b1d3142 | [
"MIT"
] | null | null | null | import numpy as np
from termcolor import colored
import sys
if sys.platform == 'linux':
sys.path.append(r'../lib')
else:
sys.path.append(r'../build/x64/Release')
import NumCpp
####################################################################################
def doTest():
print(colored('Testing Utils', 'magenta'))
print(colored('Testing num2str', 'cyan'))
value = np.random.randint(1, 100, [1, ], dtype=np.int8).item()
if NumCpp.num2str(value) == str(value):
print(colored('\tPASS int8', 'green'))
else:
print(colored('\tFAIL int8', 'red'))
value = np.random.randint(1, 100, [1, ], dtype=np.int16).item()
if NumCpp.num2str(value) == str(value):
print(colored('\tPASS int16', 'green'))
else:
print(colored('\tFAIL int16', 'red'))
value = np.random.randint(1, 100, [1, ], dtype=np.int32).item()
if NumCpp.num2str(value) == str(value):
print(colored('\tPASS int32', 'green'))
else:
print(colored('\tFAIL int32', 'red'))
value = np.random.randint(1, 100, [1, ], dtype=np.int64).item()
if NumCpp.num2str(value) == str(value):
print(colored('\tPASS int64', 'green'))
else:
print(colored('\tFAIL int64', 'red'))
value = np.random.randint(1, 100, [1, ], dtype=np.uint8).item()
if NumCpp.num2str(value) == str(value):
print(colored('\tPASS uint8', 'green'))
else:
print(colored('\tFAIL uint8', 'red'))
value = np.random.randint(1, 100, [1, ], dtype=np.uint16).item()
if NumCpp.num2str(value) == str(value):
print(colored('\tPASS uint16', 'green'))
else:
print(colored('\tFAIL uint16', 'red'))
value = np.random.randint(1, 100, [1, ], dtype=np.uint32).item()
if NumCpp.num2str(value) == str(value):
print(colored('\tPASS uint32', 'green'))
else:
print(colored('\tFAIL uint32', 'red'))
value = np.random.randint(1, 100, [1, ], dtype=np.uint64).item()
if NumCpp.num2str(value) == str(value):
print(colored('\tPASS uint64', 'green'))
else:
print(colored('\tFAIL uint64', 'red'))
print(colored('Testing sqr', 'cyan'))
value = np.random.randint(1, 12, [1, ], dtype=np.int8).item()
if NumCpp.sqr(value) == value ** 2:
print(colored('\tPASS int8', 'green'))
else:
print(colored('\tFAIL int8', 'red'))
value = np.random.randint(1, 100, [1, ], dtype=np.int16).item()
if NumCpp.sqr(value) == value ** 2:
print(colored('\tPASS int16', 'green'))
else:
print(colored('\tFAIL int16', 'red'))
value = np.random.randint(1, 100, [1, ], dtype=np.int32).item()
if NumCpp.sqr(value) == value ** 2:
print(colored('\tPASS int32', 'green'))
else:
print(colored('\tFAIL int32', 'red'))
value = np.random.randint(1, 100, [1, ], dtype=np.int64).item()
if NumCpp.sqr(value) == value ** 2:
print(colored('\tPASS int64', 'green'))
else:
print(colored('\tFAIL int64', 'red'))
value = np.random.randint(1, 15, [1, ], dtype=np.uint8).item()
if NumCpp.sqr(value) == value ** 2:
print(colored('\tPASS uint8', 'green'))
else:
print(colored('\tFAIL uint8', 'red'))
value = np.random.randint(1, 100, [1, ], dtype=np.uint16).item()
if NumCpp.sqr(value) == value ** 2:
print(colored('\tPASS uint16', 'green'))
else:
print(colored('\tFAIL uint16', 'red'))
value = np.random.randint(1, 100, [1, ], dtype=np.uint32).item()
if NumCpp.sqr(value) == value ** 2:
print(colored('\tPASS uint32', 'green'))
else:
print(colored('\tFAIL uint32', 'red'))
value = np.random.randint(1, 100, [1, ], dtype=np.uint64).item()
if NumCpp.sqr(value) == value ** 2:
print(colored('\tPASS uint64', 'green'))
else:
print(colored('\tFAIL uint64', 'red'))
value = np.random.randint(1, 100, [1, ]).astype(np.double).item()
if NumCpp.sqr(value) == value ** 2:
print(colored('\tPASS double', 'green'))
else:
print(colored('\tFAIL double', 'red'))
value = np.random.randint(1, 100, [1, ]).astype(np.float32).item()
if NumCpp.sqr(value) == value ** 2:
print(colored('\tPASS float', 'green'))
else:
print(colored('\tFAIL float', 'red'))
print(colored('Testing cube', 'cyan'))
value = np.random.randint(1, 6, [1, ], dtype=np.int8).item()
if NumCpp.cube(value) == value ** 3:
print(colored('\tPASS int8', 'green'))
else:
print(colored('\tFAIL int8', 'red'))
value = np.random.randint(1, 32, [1, ], dtype=np.int16).item()
if NumCpp.cube(value) == value ** 3:
print(colored('\tPASS int16', 'green'))
else:
print(colored('\tFAIL int16', 'red'))
value = np.random.randint(1, 100, [1, ], dtype=np.int32).item()
if NumCpp.cube(value) == value ** 3:
print(colored('\tPASS int32', 'green'))
else:
print(colored('\tFAIL int32', 'red'))
value = np.random.randint(1, 100, [1, ], dtype=np.int64).item()
if NumCpp.cube(value) == value ** 3:
print(colored('\tPASS int64', 'green'))
else:
print(colored('\tFAIL int64', 'red'))
value = np.random.randint(1, 7, [1, ], dtype=np.uint8).item()
if NumCpp.cube(value) == value ** 3:
print(colored('\tPASS uint8', 'green'))
else:
print(colored('\tFAIL uint8', 'red'))
value = np.random.randint(1, 41, [1, ], dtype=np.uint16).item()
if NumCpp.cube(value) == value ** 3:
print(colored('\tPASS uint16', 'green'))
else:
print(colored('\tFAIL uint16', 'red'))
value = np.random.randint(1, 100, [1, ], dtype=np.uint32).item()
if NumCpp.cube(value) == value ** 3:
print(colored('\tPASS uint32', 'green'))
else:
print(colored('\tFAIL uint32', 'red'))
value = np.random.randint(1, 100, [1, ], dtype=np.uint64).item()
if NumCpp.cube(value) == value ** 3:
print(colored('\tPASS uint64', 'green'))
else:
print(colored('\tFAIL uint64', 'red'))
value = np.random.randint(1, 100, [1, ]).astype(np.double).item()
if NumCpp.cube(value) == value ** 3:
print(colored('\tPASS double', 'green'))
else:
print(colored('\tFAIL double', 'red'))
value = np.random.randint(1, 100, [1, ]).astype(np.float32).item()
if NumCpp.cube(value) == value ** 3:
print(colored('\tPASS float', 'green'))
else:
print(colored('\tFAIL float', 'red'))
print(colored('Testing power', 'cyan'))
value = np.random.randint(1, 4, [1, ], dtype=np.int8).item()
power = np.random.randint(1, 4, dtype=np.uint8).item()
if NumCpp.power(value, power) == value ** power:
print(colored('\tPASS int8', 'green'))
else:
print(colored('\tFAIL int8', 'red'))
value = np.random.randint(1, 10, [1, ], dtype=np.int16).item()
if NumCpp.power(value, power) == value ** power:
print(colored('\tPASS int16', 'green'))
else:
print(colored('\tFAIL int16', 'red'))
value = np.random.randint(1, 10, [1, ], dtype=np.int32).item()
if NumCpp.power(value, power) == value ** power:
print(colored('\tPASS int32', 'green'))
else:
print(colored('\tFAIL int32', 'red'))
value = np.random.randint(1, 10, [1, ], dtype=np.int64).item()
if NumCpp.power(value, power) == value ** power:
print(colored('\tPASS int64', 'green'))
else:
print(colored('\tFAIL int64', 'red'))
value = np.random.randint(1, 4, [1, ], dtype=np.uint8).item()
if NumCpp.power(value, power) == value ** power:
print(colored('\tPASS uint8', 'green'))
else:
print(colored('\tFAIL uint8', 'red'))
value = np.random.randint(1, 10, [1, ], dtype=np.uint16).item()
if NumCpp.power(value, power) == value ** power:
print(colored('\tPASS uint16', 'green'))
else:
print(colored('\tFAIL uint16', 'red'))
value = np.random.randint(1, 10, [1, ], dtype=np.uint32).item()
if NumCpp.power(value, power) == value ** power:
print(colored('\tPASS uint32', 'green'))
else:
print(colored('\tFAIL uint32', 'red'))
value = np.random.randint(1, 10, [1, ], dtype=np.uint64).item()
if NumCpp.power(value, power) == value ** power:
print(colored('\tPASS uint64', 'green'))
else:
print(colored('\tFAIL uint64', 'red'))
value = np.random.randint(1, 10, [1, ]).astype(np.double).item()
if NumCpp.power(value, power) == value ** power:
print(colored('\tPASS double', 'green'))
else:
print(colored('\tFAIL double', 'red'))
value = np.random.randint(1, 10, [1, ]).astype(np.float32).item()
if NumCpp.power(value, power) == value ** power:
print(colored('\tPASS float', 'green'))
else:
print(colored('\tFAIL float', 'red'))
print(colored('Testing powerf', 'cyan'))
value = np.random.randint(1, 4, [1, ], dtype=np.int8).item()
power = np.random.rand(1).item() * 10
if NumCpp.powerf(value, power) == value ** power:
print(colored('\tPASS int8', 'green'))
else:
print(colored('\tFAIL int8', 'red'))
value = np.random.randint(1, 10, [1, ], dtype=np.int16).item()
if NumCpp.powerf(value, power) == value ** power:
print(colored('\tPASS int16', 'green'))
else:
print(colored('\tFAIL int16', 'red'))
value = np.random.randint(1, 10, [1, ], dtype=np.int32).item()
if NumCpp.powerf(value, power) == value ** power:
print(colored('\tPASS int32', 'green'))
else:
print(colored('\tFAIL int32', 'red'))
value = np.random.randint(1, 10, [1, ], dtype=np.int64).item()
if NumCpp.powerf(value, power) == value ** power:
print(colored('\tPASS int64', 'green'))
else:
print(colored('\tFAIL int64', 'red'))
value = np.random.randint(1, 4, [1, ], dtype=np.uint8).item()
if NumCpp.powerf(value, power) == value ** power:
print(colored('\tPASS uint8', 'green'))
else:
print(colored('\tFAIL uint8', 'red'))
value = np.random.randint(1, 10, [1, ], dtype=np.uint16).item()
if NumCpp.powerf(value, power) == value ** power:
print(colored('\tPASS uint16', 'green'))
else:
print(colored('\tFAIL uint16', 'red'))
value = np.random.randint(1, 10, [1, ], dtype=np.uint32).item()
if NumCpp.powerf(value, power) == value ** power:
print(colored('\tPASS uint32', 'green'))
else:
print(colored('\tFAIL uint32', 'red'))
value = np.random.randint(1, 10, [1, ], dtype=np.uint64).item()
if NumCpp.powerf(value, power) == value ** power:
print(colored('\tPASS uint64', 'green'))
else:
print(colored('\tFAIL uint64', 'red'))
value = np.random.randint(1, 10, [1, ]).astype(np.double).item()
if NumCpp.powerf(value, power) == value ** power:
print(colored('\tPASS double', 'green'))
else:
print(colored('\tFAIL double', 'red'))
value = np.random.randint(1, 10, [1, ]).astype(np.float32).item()
if NumCpp.powerf(value, power) == value ** power:
print(colored('\tPASS float', 'green'))
else:
print(colored('\tFAIL float', 'red'))
####################################################################################
if __name__ == '__main__':
doTest()
| 36.459807 | 84 | 0.566364 |
5dcccca7e3a75453c70d622598c682f1ab37976c | 8,439 | py | Python | text-classification/a06_Transformer/a2_predict_classification.py | sliderSun/pynlp | 0f1aa73f8c4bd3faba18dbb6e402d251e308bb50 | [
"MIT"
] | 71 | 2019-01-28T16:18:37.000Z | 2022-03-24T13:47:08.000Z | text-classification/a06_Transformer/a2_predict_classification.py | sliderSun/pynlp | 0f1aa73f8c4bd3faba18dbb6e402d251e308bb50 | [
"MIT"
] | 3 | 2020-06-16T10:12:50.000Z | 2021-04-08T09:41:35.000Z | text-classification/a06_Transformer/a2_predict_classification.py | sliderSun/pynlp | 0f1aa73f8c4bd3faba18dbb6e402d251e308bb50 | [
"MIT"
] | 39 | 2019-02-14T09:45:17.000Z | 2021-12-11T02:32:09.000Z | # -*- coding: utf-8 -*-
#prediction using model.
#process--->1.load data(X:list of lint,y:int). 2.create session. 3.feed data. 4.predict
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import tensorflow as tf
import numpy as np
from a2_transformer_classification import Transformer
from data_util_zhihu import load_data_predict,load_final_test_data,create_voabulary,create_voabulary_label
from keras.preprocessing.sequence import pad_sequences #to_categorical
import os
import codecs
#configuration
FLAGS=tf.flags.FLAGS
tf.flags.DEFINE_integer("num_classes",1999+3,"number of label") #3 ADDITIONAL TOKEN: _GO,_END,_PAD
tf.flags.DEFINE_float("learning_rate",0.01,"learning rate")
tf.flags.DEFINE_integer("batch_size", 128, "Batch size for training/evaluating.") #批处理的大小 32-->128 #16
tf.flags.DEFINE_integer("decay_steps", 6000, "how many steps before decay learning rate.") #6000批处理的大小 32-->128
tf.flags.DEFINE_float("decay_rate", 1.0, "Rate of decay for learning rate.") #0.87一次衰减多少
tf.flags.DEFINE_string("ckpt_dir","../checkpoint_transformer_classification/","checkpoint location for the model")
tf.flags.DEFINE_integer("sequence_length",60,"max sentence length") #100-->25
tf.flags.DEFINE_integer("embed_size",512,"embedding size")
tf.flags.DEFINE_boolean("is_training",False,"is traning.true:tranining,false:testing/inference")
#tf.flags.DEFINE_string("cache_path","text_cnn_checkpoint/data_cache.pik","checkpoint location for the model")
#train-zhihu4-only-title-all.txt
tf.flags.DEFINE_string("word2vec_model_path","../zhihu-word2vec-title-desc.bin-512","word2vec's vocabulary and vectors") #zhihu-word2vec.bin-100-->zhihu-word2vec-multilabel-minicount15.bin-100
tf.flags.DEFINE_boolean("multi_label_flag",False,"use multi label or single label.") #set this false. becase we are using it is a sequence of token here.
tf.flags.DEFINE_float("l2_lambda", 0.0001, "l2 regularization")
tf.flags.DEFINE_string("predict_target_file","../checkpoint_transformer_classification/zhihu_result_transformer_classification.csv","target file path for final prediction")
tf.flags.DEFINE_string("predict_source_file",'../test-zhihu-forpredict-title-desc-v6.txt',"target file path for final prediction") #test-zhihu-forpredict-v4only-title.txt
tf.flags.DEFINE_integer("d_model",512,"hidden size")
tf.flags.DEFINE_integer("d_k",64,"hidden size")
tf.flags.DEFINE_integer("d_v",64,"hidden size")
tf.flags.DEFINE_integer("h",8,"hidden size")
tf.flags.DEFINE_integer("num_layer",1,"hidden size") #6
#1.load data(X:list of lint,y:int). 2.create session. 3.feed data. 4.training (5.validation) ,(6.prediction)
# 1.load data with vocabulary of words and labels
_GO="_GO"
_END="_END"
_PAD="_PAD"
def main(_):
# 1.load data with vocabulary of words and labels
vocabulary_word2index, vocabulary_index2word = create_voabulary(word2vec_model_path=FLAGS.word2vec_model_path,name_scope="transformer_classification") # simple='simple'
vocab_size = len(vocabulary_word2index)
print("transformer_classification.vocab_size:", vocab_size)
vocabulary_word2index_label, vocabulary_index2word_label = create_voabulary_label(name_scope="transformer_classification")
questionid_question_lists=load_final_test_data(FLAGS.predict_source_file)
print("list of total questions:",len(questionid_question_lists))
test= load_data_predict(vocabulary_word2index,vocabulary_word2index_label,questionid_question_lists)
print("list of total questions2:",len(test))
testX=[]
question_id_list=[]
for tuple in test:
question_id,question_string_list=tuple
question_id_list.append(question_id)
testX.append(question_string_list)
# 2.Data preprocessing: Sequence padding
print("start padding....")
testX2 = pad_sequences(testX, maxlen=FLAGS.sequence_length, value=0.) # padding to max length
print("list of total questions3:", len(testX2))
print("end padding...")
# 3.create session.
config=tf.ConfigProto()
config.gpu_options.allow_growth=True
with tf.Session(config=config) as sess:
# 4.Instantiate Model
model=Transformer(FLAGS.num_classes, FLAGS.learning_rate, FLAGS.batch_size, FLAGS.decay_steps, FLAGS.decay_rate, FLAGS.sequence_length,
vocab_size, FLAGS.embed_size,FLAGS.d_model,FLAGS.d_k,FLAGS.d_v,FLAGS.h,FLAGS.num_layer,FLAGS.is_training,l2_lambda=FLAGS.l2_lambda)
saver=tf.train.Saver()
if os.path.exists(FLAGS.ckpt_dir+"checkpoint"):
print("Restoring Variables from Checkpoint")
saver.restore(sess,tf.train.latest_checkpoint(FLAGS.ckpt_dir))
else:
print("Can't find the checkpoint.going to stop")
return
# 5.feed data, to get logits
number_of_training_data=len(testX2);print("number_of_training_data:",number_of_training_data)
index=0
predict_target_file_f = codecs.open(FLAGS.predict_target_file, 'a', 'utf8')
for start, end in zip(range(0, number_of_training_data, FLAGS.batch_size),range(FLAGS.batch_size, number_of_training_data+1, FLAGS.batch_size)):
logits=sess.run(model.logits,feed_dict={model.input_x:testX2[start:end],model.dropout_keep_prob:1}) #logits:[batch_size,self.num_classes]
question_id_sublist=question_id_list[start:end]
get_label_using_logits_batch(question_id_sublist, logits, vocabulary_index2word_label, predict_target_file_f)
# 6. get lable using logtis
#predicted_labels=get_label_using_logits(logits[0],vocabulary_index2word_label)
#print(index," ;predicted_labels:",predicted_labels)
# 7. write question id and labels to file system.
#write_question_id_with_labels(question_id_list[index],predicted_labels,predict_target_file_f)
index=index+1
predict_target_file_f.close()
# get label using logits
def get_label_using_logits_batch(question_id_sublist, logits_batch, vocabulary_index2word_label, f, top_number=5):
print("get_label_using_logits.shape:", np.array(logits_batch).shape) # (1, 128, 2002))
for i, logits in enumerate(logits_batch):
index_list = np.argsort(logits)[-top_number:]
#print("index_list:",index_list)
index_list = index_list[::-1]
label_list = []
for index in index_list:
#print("index:",index)
label = vocabulary_index2word_label[index]
label_list.append(
label) # ('get_label_using_logits.label_list:', [u'-3423450385060590478', u'2838091149470021485', u'-3174907002942471215', u'-1812694399780494968', u'6815248286057533876'])
# print("get_label_using_logits.label_list",label_list)
write_question_id_with_labels(question_id_sublist[i], label_list, f)
f.flush()
# get label using logits
def get_label_using_logits(logits,vocabulary_index2word_label,top_number=5):
index_list=np.argsort(logits)[-top_number:] #a list.length is top_number
index_list=index_list[::-1] ##a list.length is top_number
label_list=[]
for index in index_list:
label=vocabulary_index2word_label[index]
label_list.append(label) #('get_label_using_logits.label_list:', [u'-3423450385060590478', u'2838091149470021485', u'-3174907002942471215', u'-1812694399780494968', u'6815248286057533876'])
return label_list
def process_each_row_get_lable(row,vocabulary_index2word_label,vocabulary_word2index_label,result_list):
"""
:param row: it is a list.length is number of labels. e.g. 2002
:param vocabulary_index2word_label
:param result_list
:return: a lable
"""
label_list=list(np.argsort(row))
label_list.reverse()
#print("label_list:",label_list) # a list,length is number of labels.
for i,index in enumerate(label_list): # if index is not exists, and not _PAD,_END, then it is the label we want.
#print(i,"index:",index)
flag1=vocabulary_index2word_label[index] not in result_list
flag2=index!=vocabulary_word2index_label[_PAD]
flag3=index!=vocabulary_word2index_label[_END]
if flag1 and flag2 and flag3:
#print("going to return ")
return vocabulary_index2word_label[index]
# write question id and labels to file system.
def write_question_id_with_labels(question_id,labels_list,f):
labels_string=",".join(labels_list)
f.write(question_id+","+labels_string+"\n")
if __name__ == "__main__":
tf.app.run() | 56.637584 | 197 | 0.747126 |
1dc8c07f1525f3ec68db3bb5ab00ed94840a25c1 | 1,082 | py | Python | sponge-jython/examples/script/py/filters_java.py | mnpas/sponge | 7190f23ae888bbef49d0fbb85157444d6ea48bcd | [
"Apache-2.0"
] | 9 | 2017-12-16T21:48:57.000Z | 2022-01-06T12:22:24.000Z | sponge-jython/examples/script/py/filters_java.py | mnpas/sponge | 7190f23ae888bbef49d0fbb85157444d6ea48bcd | [
"Apache-2.0"
] | 3 | 2020-12-18T11:56:46.000Z | 2022-03-31T18:37:10.000Z | sponge-jython/examples/script/py/filters_java.py | mnpas/sponge | 7190f23ae888bbef49d0fbb85157444d6ea48bcd | [
"Apache-2.0"
] | 2 | 2019-12-29T16:08:32.000Z | 2020-06-15T14:05:34.000Z | """
Sponge Knowledge Base
Using java filters
"""
from java.util import Collections, HashMap
from java.util.concurrent.atomic import AtomicInteger
from org.openksavi.sponge.examples import ShapeFilter
def onInit():
global eventCounter
# Variables for assertions only
eventCounter = Collections.synchronizedMap(HashMap())
eventCounter.put("e1", AtomicInteger(0))
eventCounter.put("e2", AtomicInteger(0))
eventCounter.put("e3", AtomicInteger(0))
sponge.setVariable("eventCounter", eventCounter)
class FilterTrigger(Trigger):
def onConfigure(self):
self.withEvents(["e1", "e2", "e3"])
def onRun(self, event):
self.logger.debug("Processing trigger for event {}", event)
global eventCounter
eventCounter.get(event.name).incrementAndGet()
def onLoad():
sponge.enableJava(ShapeFilter)
def onStartup():
sponge.event("e1").sendAfter(100, 100)
sponge.event("e2").set("shape", "square").sendAfter(200, 100)
sponge.event("e3").set("shape", "circle").sendAfter(300, 100)
| 30.914286 | 68 | 0.682994 |
616e742ca65e6c7b9941ab7270847836bb6b59e6 | 36,640 | py | Python | swift/common/db_replicator.py | thiagodasilva/swift | 0553d9333ed0045c4d209065b315533a33e5d7d7 | [
"Apache-2.0"
] | null | null | null | swift/common/db_replicator.py | thiagodasilva/swift | 0553d9333ed0045c4d209065b315533a33e5d7d7 | [
"Apache-2.0"
] | null | null | null | swift/common/db_replicator.py | thiagodasilva/swift | 0553d9333ed0045c4d209065b315533a33e5d7d7 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import math
import time
import shutil
import uuid
import errno
import re
from contextlib import contextmanager
from swift import gettext_ as _
from eventlet import GreenPool, sleep, Timeout
from eventlet.green import subprocess
import swift.common.db
from swift.common.direct_client import quote
from swift.common.utils import get_logger, whataremyips, storage_directory, \
renamer, mkdirs, lock_parent_directory, config_true_value, \
unlink_older_than, dump_recon_cache, rsync_module_interpolation, ismount, \
json, Timestamp
from swift.common import ring
from swift.common.ring.utils import is_local_device
from swift.common.http import HTTP_NOT_FOUND, HTTP_INSUFFICIENT_STORAGE
from swift.common.bufferedhttp import BufferedHTTPConnection
from swift.common.exceptions import DriveNotMounted
from swift.common.daemon import Daemon
from swift.common.swob import Response, HTTPNotFound, HTTPNoContent, \
HTTPAccepted, HTTPBadRequest
DEBUG_TIMINGS_THRESHOLD = 10
def quarantine_db(object_file, server_type):
"""
In the case that a corrupt file is found, move it to a quarantined area to
allow replication to fix it.
:param object_file: path to corrupt file
:param server_type: type of file that is corrupt
('container' or 'account')
"""
object_dir = os.path.dirname(object_file)
quarantine_dir = os.path.abspath(
os.path.join(object_dir, '..', '..', '..', '..', 'quarantined',
server_type + 's', os.path.basename(object_dir)))
try:
renamer(object_dir, quarantine_dir, fsync=False)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quarantine_dir = "%s-%s" % (quarantine_dir, uuid.uuid4().hex)
renamer(object_dir, quarantine_dir, fsync=False)
def roundrobin_datadirs(datadirs):
"""
Generator to walk the data dirs in a round robin manner, evenly
hitting each device on the system, and yielding any .db files
found (in their proper places). The partitions within each data
dir are walked randomly, however.
:param datadirs: a list of (path, node_id) to walk
:returns: A generator of (partition, path_to_db_file, node_id)
"""
def walk_datadir(datadir, node_id):
partitions = os.listdir(datadir)
random.shuffle(partitions)
for partition in partitions:
part_dir = os.path.join(datadir, partition)
if not os.path.isdir(part_dir):
continue
suffixes = os.listdir(part_dir)
if not suffixes:
os.rmdir(part_dir)
for suffix in suffixes:
suff_dir = os.path.join(part_dir, suffix)
if not os.path.isdir(suff_dir):
continue
hashes = os.listdir(suff_dir)
for hsh in hashes:
hash_dir = os.path.join(suff_dir, hsh)
if not os.path.isdir(hash_dir):
continue
object_file = os.path.join(hash_dir, hsh + '.db')
if os.path.exists(object_file):
yield (partition, object_file, node_id)
its = [walk_datadir(datadir, node_id) for datadir, node_id in datadirs]
while its:
for it in its:
try:
yield next(it)
except StopIteration:
its.remove(it)
class ReplConnection(BufferedHTTPConnection):
"""
Helper to simplify REPLICATEing to a remote server.
"""
def __init__(self, node, partition, hash_, logger):
""
self.logger = logger
self.node = node
host = "%s:%s" % (node['replication_ip'], node['replication_port'])
BufferedHTTPConnection.__init__(self, host)
self.path = '/%s/%s/%s' % (node['device'], partition, hash_)
def replicate(self, *args):
"""
Make an HTTP REPLICATE request
:param args: list of json-encodable objects
:returns: bufferedhttp response object
"""
try:
body = json.dumps(args)
self.request('REPLICATE', self.path, body,
{'Content-Type': 'application/json'})
response = self.getresponse()
response.data = response.read()
return response
except (Exception, Timeout):
self.logger.exception(
_('ERROR reading HTTP response from %s'), self.node)
return None
class Replicator(Daemon):
"""
Implements the logic for directing db replication.
"""
def __init__(self, conf, logger=None):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='replicator')
self.root = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.bind_ip = conf.get('bind_ip', '0.0.0.0')
self.port = int(conf.get('bind_port', self.default_port))
concurrency = int(conf.get('concurrency', 8))
self.cpool = GreenPool(size=concurrency)
swift_dir = conf.get('swift_dir', '/etc/swift')
self.ring = ring.Ring(swift_dir, ring_name=self.server_type)
self._local_device_ids = set()
self.per_diff = int(conf.get('per_diff', 1000))
self.max_diffs = int(conf.get('max_diffs') or 100)
self.interval = int(conf.get('interval') or
conf.get('run_pause') or 30)
self.node_timeout = float(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.rsync_compress = config_true_value(
conf.get('rsync_compress', 'no'))
self.rsync_module = conf.get('rsync_module', '').rstrip('/')
if not self.rsync_module:
self.rsync_module = '{replication_ip}::%s' % self.server_type
if config_true_value(conf.get('vm_test_mode', 'no')):
self.logger.warn('Option %(type)s-replicator/vm_test_mode is '
'deprecated and will be removed in a future '
'version. Update your configuration to use '
'option %(type)s-replicator/rsync_module.'
% {'type': self.server_type})
self.rsync_module += '{replication_port}'
self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
self._zero_stats()
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.recon_replicator = '%s.recon' % self.server_type
self.rcache = os.path.join(self.recon_cache_path,
self.recon_replicator)
self.extract_device_re = re.compile('%s%s([^%s]+)' % (
self.root, os.path.sep, os.path.sep))
def _zero_stats(self):
"""Zero out the stats."""
self.stats = {'attempted': 0, 'success': 0, 'failure': 0, 'ts_repl': 0,
'no_change': 0, 'hashmatch': 0, 'rsync': 0, 'diff': 0,
'remove': 0, 'empty': 0, 'remote_merge': 0,
'start': time.time(), 'diff_capped': 0,
'failure_nodes': {}}
def _report_stats(self):
"""Report the current stats to the logs."""
now = time.time()
self.logger.info(
_('Attempted to replicate %(count)d dbs in %(time).5f seconds '
'(%(rate).5f/s)'),
{'count': self.stats['attempted'],
'time': now - self.stats['start'],
'rate': self.stats['attempted'] /
(now - self.stats['start'] + 0.0000001)})
self.logger.info(_('Removed %(remove)d dbs') % self.stats)
self.logger.info(_('%(success)s successes, %(failure)s failures')
% self.stats)
dump_recon_cache(
{'replication_stats': self.stats,
'replication_time': now - self.stats['start'],
'replication_last': now},
self.rcache, self.logger)
self.logger.info(' '.join(['%s:%s' % item for item in
self.stats.items() if item[0] in
('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl',
'empty', 'diff_capped')]))
def _add_failure_stats(self, failure_devs_info):
for node, dev in failure_devs_info:
self.stats['failure'] += 1
failure_devs = self.stats['failure_nodes'].setdefault(node, {})
failure_devs.setdefault(dev, 0)
failure_devs[dev] += 1
def _rsync_file(self, db_file, remote_file, whole_file=True,
different_region=False):
"""
Sync a single file using rsync. Used by _rsync_db to handle syncing.
:param db_file: file to be synced
:param remote_file: remote location to sync the DB file to
:param whole-file: if True, uses rsync's --whole-file flag
:param different_region: if True, the destination node is in a
different region
:returns: True if the sync was successful, False otherwise
"""
popen_args = ['rsync', '--quiet', '--no-motd',
'--timeout=%s' % int(math.ceil(self.node_timeout)),
'--contimeout=%s' % int(math.ceil(self.conn_timeout))]
if whole_file:
popen_args.append('--whole-file')
if self.rsync_compress and different_region:
# Allow for compression, but only if the remote node is in
# a different region than the local one.
popen_args.append('--compress')
popen_args.extend([db_file, remote_file])
proc = subprocess.Popen(popen_args)
proc.communicate()
if proc.returncode != 0:
self.logger.error(_('ERROR rsync failed with %(code)s: %(args)s'),
{'code': proc.returncode, 'args': popen_args})
return proc.returncode == 0
def _rsync_db(self, broker, device, http, local_id,
replicate_method='complete_rsync', replicate_timeout=None,
different_region=False):
"""
Sync a whole db using rsync.
:param broker: DB broker object of DB to be synced
:param device: device to sync to
:param http: ReplConnection object
:param local_id: unique ID of the local database replica
:param replicate_method: remote operation to perform after rsync
:param replicate_timeout: timeout to wait in seconds
:param different_region: if True, the destination node is in a
different region
"""
rsync_module = rsync_module_interpolation(self.rsync_module, device)
rsync_path = '%s/tmp/%s' % (device['device'], local_id)
remote_file = '%s/%s' % (rsync_module, rsync_path)
mtime = os.path.getmtime(broker.db_file)
if not self._rsync_file(broker.db_file, remote_file,
different_region=different_region):
return False
# perform block-level sync if the db was modified during the first sync
if os.path.exists(broker.db_file + '-journal') or \
os.path.getmtime(broker.db_file) > mtime:
# grab a lock so nobody else can modify it
with broker.lock():
if not self._rsync_file(broker.db_file, remote_file,
whole_file=False,
different_region=different_region):
return False
with Timeout(replicate_timeout or self.node_timeout):
response = http.replicate(replicate_method, local_id)
return response and response.status >= 200 and response.status < 300
def _usync_db(self, point, broker, http, remote_id, local_id):
"""
Sync a db by sending all records since the last sync.
:param point: synchronization high water mark between the replicas
:param broker: database broker object
:param http: ReplConnection object for the remote server
:param remote_id: database id for the remote replica
:param local_id: database id for the local replica
:returns: boolean indicating completion and success
"""
self.stats['diff'] += 1
self.logger.increment('diffs')
self.logger.debug('Syncing chunks with %s, starting at %s',
http.host, point)
sync_table = broker.get_syncs()
objects = broker.get_items_since(point, self.per_diff)
diffs = 0
while len(objects) and diffs < self.max_diffs:
diffs += 1
with Timeout(self.node_timeout):
response = http.replicate('merge_items', objects, local_id)
if not response or response.status >= 300 or response.status < 200:
if response:
self.logger.error(_('ERROR Bad response %(status)s from '
'%(host)s'),
{'status': response.status,
'host': http.host})
return False
# replication relies on db order to send the next merge batch in
# order with no gaps
point = objects[-1]['ROWID']
objects = broker.get_items_since(point, self.per_diff)
if objects:
self.logger.debug(
'Synchronization for %s has fallen more than '
'%s rows behind; moving on and will try again next pass.',
broker, self.max_diffs * self.per_diff)
self.stats['diff_capped'] += 1
self.logger.increment('diff_caps')
else:
with Timeout(self.node_timeout):
response = http.replicate('merge_syncs', sync_table)
if response and response.status >= 200 and response.status < 300:
broker.merge_syncs([{'remote_id': remote_id,
'sync_point': point}],
incoming=False)
return True
return False
def _in_sync(self, rinfo, info, broker, local_sync):
"""
Determine whether or not two replicas of a databases are considered
to be in sync.
:param rinfo: remote database info
:param info: local database info
:param broker: database broker object
:param local_sync: cached last sync point between replicas
:returns: boolean indicating whether or not the replicas are in sync
"""
if max(rinfo['point'], local_sync) >= info['max_row']:
self.stats['no_change'] += 1
self.logger.increment('no_changes')
return True
if rinfo['hash'] == info['hash']:
self.stats['hashmatch'] += 1
self.logger.increment('hashmatches')
broker.merge_syncs([{'remote_id': rinfo['id'],
'sync_point': rinfo['point']}],
incoming=False)
return True
def _http_connect(self, node, partition, db_file):
"""
Make an http_connection using ReplConnection
:param node: node dictionary from the ring
:param partition: partition partition to send in the url
:param db_file: DB file
:returns: ReplConnection object
"""
return ReplConnection(node, partition,
os.path.basename(db_file).split('.', 1)[0],
self.logger)
def _gather_sync_args(self, info):
"""
Convert local replication_info to sync args tuple.
"""
sync_args_order = ('max_row', 'hash', 'id', 'created_at',
'put_timestamp', 'delete_timestamp', 'metadata')
return tuple(info[key] for key in sync_args_order)
def _repl_to_node(self, node, broker, partition, info,
different_region=False):
"""
Replicate a database to a node.
:param node: node dictionary from the ring to be replicated to
:param broker: DB broker for the DB to be replication
:param partition: partition on the node to replicate to
:param info: DB info as a dictionary of {'max_row', 'hash', 'id',
'created_at', 'put_timestamp', 'delete_timestamp',
'metadata'}
:param different_region: if True, the destination node is in a
different region
:returns: True if successful, False otherwise
"""
http = self._http_connect(node, partition, broker.db_file)
sync_args = self._gather_sync_args(info)
with Timeout(self.node_timeout):
response = http.replicate('sync', *sync_args)
if not response:
return False
return self._handle_sync_response(node, response, info, broker, http,
different_region=different_region)
def _handle_sync_response(self, node, response, info, broker, http,
different_region=False):
if response.status == HTTP_NOT_FOUND: # completely missing, rsync
self.stats['rsync'] += 1
self.logger.increment('rsyncs')
return self._rsync_db(broker, node, http, info['id'],
different_region=different_region)
elif response.status == HTTP_INSUFFICIENT_STORAGE:
raise DriveNotMounted()
elif response.status >= 200 and response.status < 300:
rinfo = json.loads(response.data)
local_sync = broker.get_sync(rinfo['id'], incoming=False)
if self._in_sync(rinfo, info, broker, local_sync):
return True
# if the difference in rowids between the two differs by
# more than 50% and the difference is greater than per_diff,
# rsync then do a remote merge.
# NOTE: difference > per_diff stops us from dropping to rsync
# on smaller containers, who have only a few rows to sync.
if rinfo['max_row'] / float(info['max_row']) < 0.5 and \
info['max_row'] - rinfo['max_row'] > self.per_diff:
self.stats['remote_merge'] += 1
self.logger.increment('remote_merges')
return self._rsync_db(broker, node, http, info['id'],
replicate_method='rsync_then_merge',
replicate_timeout=(info['count'] / 2000),
different_region=different_region)
# else send diffs over to the remote server
return self._usync_db(max(rinfo['point'], local_sync),
broker, http, rinfo['id'], info['id'])
def _post_replicate_hook(self, broker, info, responses):
"""
:param broker: the container that just replicated
:param info: pre-replication full info dict
:param responses: a list of bools indicating success from nodes
"""
pass
def _replicate_object(self, partition, object_file, node_id):
"""
Replicate the db, choosing method based on whether or not it
already exists on peers.
:param partition: partition to be replicated to
:param object_file: DB file name to be replicated
:param node_id: node id of the node to be replicated to
"""
start_time = now = time.time()
self.logger.debug('Replicating db %s', object_file)
self.stats['attempted'] += 1
self.logger.increment('attempts')
shouldbehere = True
try:
broker = self.brokerclass(object_file, pending_timeout=30)
broker.reclaim(now - self.reclaim_age,
now - (self.reclaim_age * 2))
info = broker.get_replication_info()
bpart = self.ring.get_part(
info['account'], info.get('container'))
if bpart != int(partition):
partition = bpart
# Important to set this false here since the later check only
# checks if it's on the proper device, not partition.
shouldbehere = False
name = '/' + quote(info['account'])
if 'container' in info:
name += '/' + quote(info['container'])
self.logger.error(
'Found %s for %s when it should be on partition %s; will '
'replicate out and remove.' % (object_file, name, bpart))
except (Exception, Timeout) as e:
if 'no such table' in str(e):
self.logger.error(_('Quarantining DB %s'), object_file)
quarantine_db(broker.db_file, broker.db_type)
else:
self.logger.exception(_('ERROR reading db %s'), object_file)
nodes = self.ring.get_part_nodes(int(partition))
self._add_failure_stats([(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in nodes])
self.logger.increment('failures')
return
# The db is considered deleted if the delete_timestamp value is greater
# than the put_timestamp, and there are no objects.
delete_timestamp = Timestamp(info.get('delete_timestamp') or 0)
put_timestamp = Timestamp(info.get('put_timestamp') or 0)
if delete_timestamp < (now - self.reclaim_age) and \
delete_timestamp > put_timestamp and \
info['count'] in (None, '', 0, '0'):
if self.report_up_to_date(info):
self.delete_db(broker)
self.logger.timing_since('timing', start_time)
return
responses = []
failure_devs_info = set()
nodes = self.ring.get_part_nodes(int(partition))
local_dev = None
for node in nodes:
if node['id'] == node_id:
local_dev = node
break
if shouldbehere:
shouldbehere = bool([n for n in nodes if n['id'] == node_id])
# See Footnote [1] for an explanation of the repl_nodes assignment.
i = 0
while i < len(nodes) and nodes[i]['id'] != node_id:
i += 1
repl_nodes = nodes[i + 1:] + nodes[:i]
more_nodes = self.ring.get_more_nodes(int(partition))
if not local_dev:
# Check further if local device is a handoff node
for node in more_nodes:
if node['id'] == node_id:
local_dev = node
break
for node in repl_nodes:
different_region = False
if local_dev and local_dev['region'] != node['region']:
# This additional information will help later if we
# want to handle syncing to a node in different
# region with some optimizations.
different_region = True
success = False
try:
success = self._repl_to_node(node, broker, partition, info,
different_region)
except DriveNotMounted:
repl_nodes.append(next(more_nodes))
self.logger.error(_('ERROR Remote drive not mounted %s'), node)
except (Exception, Timeout):
self.logger.exception(_('ERROR syncing %(file)s with node'
' %(node)s'),
{'file': object_file, 'node': node})
if not success:
failure_devs_info.add((node['replication_ip'], node['device']))
self.logger.increment('successes' if success else 'failures')
responses.append(success)
try:
self._post_replicate_hook(broker, info, responses)
except (Exception, Timeout):
self.logger.exception('UNHANDLED EXCEPTION: in post replicate '
'hook for %s', broker.db_file)
if not shouldbehere and all(responses):
# If the db shouldn't be on this node and has been successfully
# synced to all of its peers, it can be removed.
if not self.delete_db(broker):
failure_devs_info.update(
[(failure_dev['replication_ip'], failure_dev['device'])
for failure_dev in repl_nodes])
target_devs_info = set([(target_dev['replication_ip'],
target_dev['device'])
for target_dev in repl_nodes])
self.stats['success'] += len(target_devs_info - failure_devs_info)
self._add_failure_stats(failure_devs_info)
self.logger.timing_since('timing', start_time)
def delete_db(self, broker):
object_file = broker.db_file
hash_dir = os.path.dirname(object_file)
suf_dir = os.path.dirname(hash_dir)
with lock_parent_directory(object_file):
shutil.rmtree(hash_dir, True)
try:
os.rmdir(suf_dir)
except OSError as err:
if err.errno not in (errno.ENOENT, errno.ENOTEMPTY):
self.logger.exception(
_('ERROR while trying to clean up %s') % suf_dir)
return False
self.stats['remove'] += 1
device_name = self.extract_device(object_file)
self.logger.increment('removes.' + device_name)
return True
def extract_device(self, object_file):
"""
Extract the device name from an object path. Returns "UNKNOWN" if the
path could not be extracted successfully for some reason.
:param object_file: the path to a database file.
"""
match = self.extract_device_re.match(object_file)
if match:
return match.groups()[0]
return "UNKNOWN"
def report_up_to_date(self, full_info):
return True
def run_once(self, *args, **kwargs):
"""Run a replication pass once."""
self._zero_stats()
dirs = []
ips = whataremyips(self.bind_ip)
if not ips:
self.logger.error(_('ERROR Failed to get my own IPs?'))
return
self._local_device_ids = set()
found_local = False
for node in self.ring.devs:
if node and is_local_device(ips, self.port,
node['replication_ip'],
node['replication_port']):
found_local = True
if self.mount_check and not ismount(
os.path.join(self.root, node['device'])):
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in self.ring.devs if failure_dev])
self.logger.warn(
_('Skipping %(device)s as it is not mounted') % node)
continue
unlink_older_than(
os.path.join(self.root, node['device'], 'tmp'),
time.time() - self.reclaim_age)
datadir = os.path.join(self.root, node['device'], self.datadir)
if os.path.isdir(datadir):
self._local_device_ids.add(node['id'])
dirs.append((datadir, node['id']))
if not found_local:
self.logger.error("Can't find itself %s with port %s in ring "
"file, not replicating",
", ".join(ips), self.port)
self.logger.info(_('Beginning replication run'))
for part, object_file, node_id in roundrobin_datadirs(dirs):
self.cpool.spawn_n(
self._replicate_object, part, object_file, node_id)
self.cpool.waitall()
self.logger.info(_('Replication run OVER'))
self._report_stats()
def run_forever(self, *args, **kwargs):
"""
Replicate dbs under the given root in an infinite loop.
"""
sleep(random.random() * self.interval)
while True:
begin = time.time()
try:
self.run_once()
except (Exception, Timeout):
self.logger.exception(_('ERROR trying to replicate'))
elapsed = time.time() - begin
if elapsed < self.interval:
sleep(self.interval - elapsed)
class ReplicatorRpc(object):
"""Handle Replication RPC calls. TODO(redbo): document please :)"""
def __init__(self, root, datadir, broker_class, mount_check=True,
logger=None):
self.root = root
self.datadir = datadir
self.broker_class = broker_class
self.mount_check = mount_check
self.logger = logger or get_logger({}, log_route='replicator-rpc')
def dispatch(self, replicate_args, args):
if not hasattr(args, 'pop'):
return HTTPBadRequest(body='Invalid object type')
op = args.pop(0)
drive, partition, hsh = replicate_args
if self.mount_check and not ismount(os.path.join(self.root, drive)):
return Response(status='507 %s is not mounted' % drive)
db_file = os.path.join(self.root, drive,
storage_directory(self.datadir, partition, hsh),
hsh + '.db')
if op == 'rsync_then_merge':
return self.rsync_then_merge(drive, db_file, args)
if op == 'complete_rsync':
return self.complete_rsync(drive, db_file, args)
else:
# someone might be about to rsync a db to us,
# make sure there's a tmp dir to receive it.
mkdirs(os.path.join(self.root, drive, 'tmp'))
if not os.path.exists(db_file):
return HTTPNotFound()
return getattr(self, op)(self.broker_class(db_file), args)
@contextmanager
def debug_timing(self, name):
timemark = time.time()
yield
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(
'replicator-rpc-sync time for %s: %.02fs' % (
name, timespan))
def _parse_sync_args(self, args):
"""
Convert remote sync args to remote_info dictionary.
"""
(remote_sync, hash_, id_, created_at, put_timestamp,
delete_timestamp, metadata) = args[:7]
remote_metadata = {}
if metadata:
try:
remote_metadata = json.loads(metadata)
except ValueError:
self.logger.error("Unable to decode remote metadata %r",
metadata)
remote_info = {
'point': remote_sync,
'hash': hash_,
'id': id_,
'created_at': created_at,
'put_timestamp': put_timestamp,
'delete_timestamp': delete_timestamp,
'metadata': remote_metadata,
}
return remote_info
def sync(self, broker, args):
remote_info = self._parse_sync_args(args)
return self._handle_sync_request(broker, remote_info)
def _get_synced_replication_info(self, broker, remote_info):
"""
Apply any changes to the broker based on remote_info and return the
current replication info.
:param broker: the database broker
:param remote_info: the remote replication info
:returns: local broker replication info
"""
return broker.get_replication_info()
def _handle_sync_request(self, broker, remote_info):
"""
Update metadata, timestamps, sync points.
"""
with self.debug_timing('info'):
try:
info = self._get_synced_replication_info(broker, remote_info)
except (Exception, Timeout) as e:
if 'no such table' in str(e):
self.logger.error(_("Quarantining DB %s"), broker)
quarantine_db(broker.db_file, broker.db_type)
return HTTPNotFound()
raise
if remote_info['metadata']:
with self.debug_timing('update_metadata'):
broker.update_metadata(remote_info['metadata'])
sync_timestamps = ('created_at', 'put_timestamp', 'delete_timestamp')
if any(info[ts] != remote_info[ts] for ts in sync_timestamps):
with self.debug_timing('merge_timestamps'):
broker.merge_timestamps(*(remote_info[ts] for ts in
sync_timestamps))
with self.debug_timing('get_sync'):
info['point'] = broker.get_sync(remote_info['id'])
if remote_info['hash'] == info['hash'] and \
info['point'] < remote_info['point']:
with self.debug_timing('merge_syncs'):
translate = {
'remote_id': 'id',
'sync_point': 'point',
}
data = dict((k, remote_info[v]) for k, v in translate.items())
broker.merge_syncs([data])
info['point'] = remote_info['point']
return Response(json.dumps(info))
def merge_syncs(self, broker, args):
broker.merge_syncs(args[0])
return HTTPAccepted()
def merge_items(self, broker, args):
broker.merge_items(args[0], args[1])
return HTTPAccepted()
def complete_rsync(self, drive, db_file, args):
old_filename = os.path.join(self.root, drive, 'tmp', args[0])
if os.path.exists(db_file):
return HTTPNotFound()
if not os.path.exists(old_filename):
return HTTPNotFound()
broker = self.broker_class(old_filename)
broker.newid(args[0])
renamer(old_filename, db_file)
return HTTPNoContent()
def rsync_then_merge(self, drive, db_file, args):
old_filename = os.path.join(self.root, drive, 'tmp', args[0])
if not os.path.exists(db_file) or not os.path.exists(old_filename):
return HTTPNotFound()
new_broker = self.broker_class(old_filename)
existing_broker = self.broker_class(db_file)
point = -1
objects = existing_broker.get_items_since(point, 1000)
while len(objects):
new_broker.merge_items(objects)
point = objects[-1]['ROWID']
objects = existing_broker.get_items_since(point, 1000)
sleep()
new_broker.newid(args[0])
renamer(old_filename, db_file)
return HTTPNoContent()
# Footnote [1]:
# This orders the nodes so that, given nodes a b c, a will contact b then c,
# b will contact c then a, and c will contact a then b -- in other words, each
# node will always contact the next node in the list first.
# This helps in the case where databases are all way out of sync, so each
# node is likely to be sending to a different node than it's receiving from,
# rather than two nodes talking to each other, starving out the third.
# If the third didn't even have a copy and the first two nodes were way out
# of sync, such starvation would mean the third node wouldn't get any copy
# until the first two nodes finally got in sync, which could take a while.
# This new ordering ensures such starvation doesn't occur, making the data
# more durable.
| 43.671037 | 79 | 0.576174 |
a451582f7c7a186ab002c673f19a7ad1bedba895 | 8,032 | py | Python | src/oci/ai_vision/models/image_text_detection_feature.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/ai_vision/models/image_text_detection_feature.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/ai_vision/models/image_text_detection_feature.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .image_feature import ImageFeature
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ImageTextDetectionFeature(ImageFeature):
"""
Text detection parameters.
"""
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "ENG"
LANGUAGE_ENG = "ENG"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "CES"
LANGUAGE_CES = "CES"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "DAN"
LANGUAGE_DAN = "DAN"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "NLD"
LANGUAGE_NLD = "NLD"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "FIN"
LANGUAGE_FIN = "FIN"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "FRA"
LANGUAGE_FRA = "FRA"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "DEU"
LANGUAGE_DEU = "DEU"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "ELL"
LANGUAGE_ELL = "ELL"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "HUN"
LANGUAGE_HUN = "HUN"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "ITA"
LANGUAGE_ITA = "ITA"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "NOR"
LANGUAGE_NOR = "NOR"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "POL"
LANGUAGE_POL = "POL"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "POR"
LANGUAGE_POR = "POR"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "RON"
LANGUAGE_RON = "RON"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "RUS"
LANGUAGE_RUS = "RUS"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "SLK"
LANGUAGE_SLK = "SLK"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "SPA"
LANGUAGE_SPA = "SPA"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "SWE"
LANGUAGE_SWE = "SWE"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "TUR"
LANGUAGE_TUR = "TUR"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "ARA"
LANGUAGE_ARA = "ARA"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "CHI_SIM"
LANGUAGE_CHI_SIM = "CHI_SIM"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "HIN"
LANGUAGE_HIN = "HIN"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "JPN"
LANGUAGE_JPN = "JPN"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "KOR"
LANGUAGE_KOR = "KOR"
#: A constant which can be used with the language property of a ImageTextDetectionFeature.
#: This constant has a value of "OTHERS"
LANGUAGE_OTHERS = "OTHERS"
def __init__(self, **kwargs):
"""
Initializes a new ImageTextDetectionFeature object with values from keyword arguments. The default value of the :py:attr:`~oci.ai_vision.models.ImageTextDetectionFeature.feature_type` attribute
of this class is ``TEXT_DETECTION`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param feature_type:
The value to assign to the feature_type property of this ImageTextDetectionFeature.
Allowed values for this property are: "IMAGE_CLASSIFICATION", "OBJECT_DETECTION", "TEXT_DETECTION", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type feature_type: str
:param language:
The value to assign to the language property of this ImageTextDetectionFeature.
Allowed values for this property are: "ENG", "CES", "DAN", "NLD", "FIN", "FRA", "DEU", "ELL", "HUN", "ITA", "NOR", "POL", "POR", "RON", "RUS", "SLK", "SPA", "SWE", "TUR", "ARA", "CHI_SIM", "HIN", "JPN", "KOR", "OTHERS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type language: str
"""
self.swagger_types = {
'feature_type': 'str',
'language': 'str'
}
self.attribute_map = {
'feature_type': 'featureType',
'language': 'language'
}
self._feature_type = None
self._language = None
self._feature_type = 'TEXT_DETECTION'
@property
def language(self):
"""
Gets the language of this ImageTextDetectionFeature.
Language of the document image, abbreviated according to ISO 639-2.
Allowed values for this property are: "ENG", "CES", "DAN", "NLD", "FIN", "FRA", "DEU", "ELL", "HUN", "ITA", "NOR", "POL", "POR", "RON", "RUS", "SLK", "SPA", "SWE", "TUR", "ARA", "CHI_SIM", "HIN", "JPN", "KOR", "OTHERS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The language of this ImageTextDetectionFeature.
:rtype: str
"""
return self._language
@language.setter
def language(self, language):
"""
Sets the language of this ImageTextDetectionFeature.
Language of the document image, abbreviated according to ISO 639-2.
:param language: The language of this ImageTextDetectionFeature.
:type: str
"""
allowed_values = ["ENG", "CES", "DAN", "NLD", "FIN", "FRA", "DEU", "ELL", "HUN", "ITA", "NOR", "POL", "POR", "RON", "RUS", "SLK", "SPA", "SWE", "TUR", "ARA", "CHI_SIM", "HIN", "JPN", "KOR", "OTHERS"]
if not value_allowed_none_or_none_sentinel(language, allowed_values):
language = 'UNKNOWN_ENUM_VALUE'
self._language = language
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 42.273684 | 253 | 0.678536 |
7d215c940c03177cc0f3c276fc1599cf8d411280 | 56,204 | py | Python | pandas/tests/arithmetic/test_period.py | RakhithJK/pandas | 0eeda645212c240d6cbdef8e3ba4834c3763553b | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 28,899 | 2016-10-13T03:32:12.000Z | 2022-03-31T21:39:05.000Z | pandas/tests/arithmetic/test_period.py | RakhithJK/pandas | 0eeda645212c240d6cbdef8e3ba4834c3763553b | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 31,004 | 2016-10-12T23:22:27.000Z | 2022-03-31T23:17:38.000Z | pandas/tests/arithmetic/test_period.py | RakhithJK/pandas | 0eeda645212c240d6cbdef8e3ba4834c3763553b | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 15,149 | 2016-10-13T03:21:31.000Z | 2022-03-31T18:46:47.000Z | # Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs import (
IncompatibleFrequency,
Period,
Timestamp,
to_offset,
)
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
period_range,
)
import pandas._testing as tm
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
from pandas.tests.arithmetic.common import assert_invalid_comparison
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = (
box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
)
pi = period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"scalar", ["foo", Timestamp.now(), Timedelta(days=4), 9, 9.5]
)
def test_compare_invalid_scalar(self, box_with_array, scalar):
# comparison with scalar that cannot be interpreted as a Period
pi = period_range("2000", periods=4)
parr = tm.box_expected(pi, box_with_array)
assert_invalid_comparison(parr, scalar, box_with_array)
@pytest.mark.parametrize(
"other",
[
pd.date_range("2000", periods=4).array,
pd.timedelta_range("1D", periods=4).array,
np.arange(4),
np.arange(4).astype(np.float64),
list(range(4)),
],
)
def test_compare_invalid_listlike(self, box_with_array, other):
pi = period_range("2000", periods=4)
parr = tm.box_expected(pi, box_with_array)
assert_invalid_comparison(parr, other, box_with_array)
@pytest.mark.parametrize("other_box", [list, np.array, lambda x: x.astype(object)])
def test_compare_object_dtype(self, box_with_array, other_box):
pi = period_range("2000", periods=5)
parr = tm.box_expected(pi, box_with_array)
xbox = np.ndarray if box_with_array in [pd.Index, pd.array] else box_with_array
other = other_box(pi)
expected = np.array([True, True, True, True, True])
expected = tm.box_expected(expected, xbox)
result = parr == other
tm.assert_equal(result, expected)
result = parr <= other
tm.assert_equal(result, expected)
result = parr >= other
tm.assert_equal(result, expected)
result = parr != other
tm.assert_equal(result, ~expected)
result = parr < other
tm.assert_equal(result, ~expected)
result = parr > other
tm.assert_equal(result, ~expected)
other = other_box(pi[::-1])
expected = np.array([False, False, True, False, False])
expected = tm.box_expected(expected, xbox)
result = parr == other
tm.assert_equal(result, expected)
expected = np.array([True, True, True, False, False])
expected = tm.box_expected(expected, xbox)
result = parr <= other
tm.assert_equal(result, expected)
expected = np.array([False, False, True, True, True])
expected = tm.box_expected(expected, xbox)
result = parr >= other
tm.assert_equal(result, expected)
expected = np.array([True, True, False, True, True])
expected = tm.box_expected(expected, xbox)
result = parr != other
tm.assert_equal(result, expected)
expected = np.array([True, True, False, False, False])
expected = tm.box_expected(expected, xbox)
result = parr < other
tm.assert_equal(result, expected)
expected = np.array([False, False, False, True, True])
expected = tm.box_expected(expected, xbox)
result = parr > other
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", Period("2017", freq="D")])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
2017,
[2017, 2017, 2017],
np.array([2017, 2017, 2017]),
np.array([2017, 2017, 2017], dtype=object),
pd.Index([2017, 2017, 2017]),
],
)
def test_eq_integer_disallowed(self, other):
# match Period semantics by not treating integers as Periods
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([False, False, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
msg = "|".join(
[
"not supported between instances of 'Period' and 'int'",
r"Invalid comparison between dtype=period\[D\] and ",
]
)
with pytest.raises(TypeError, match=msg):
idx < other
with pytest.raises(TypeError, match=msg):
idx > other
with pytest.raises(TypeError, match=msg):
idx <= other
with pytest.raises(TypeError, match=msg):
idx >= other
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = (
box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray
)
pi = period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array in [pd.Index, pd.array] else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array in [pd.Index, pd.array] else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = rf"Invalid comparison between dtype=period\[{freq}\] and Period"
with pytest.raises(TypeError, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(TypeError, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = r"Invalid comparison between dtype=period\[A-DEC\] and PeriodArray"
idx_msg = rev_msg if box_with_array in [tm.to_array, pd.array] else msg
with pytest.raises(TypeError, match=idx_msg):
base <= idx
# Different frequency
msg = rf"Invalid comparison between dtype=period\[{freq}\] and Period"
with pytest.raises(TypeError, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(TypeError, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Invalid comparison between dtype=period\[4M\] and PeriodArray"
idx_msg = rev_msg if box_with_array in [tm.to_array, pd.array] else msg
with pytest.raises(TypeError, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = rf"Invalid comparison between dtype=period\[{freq}\] and PeriodArray"
with pytest.raises(TypeError, match=msg):
idx1 > diff
result = idx1 == diff
expected = np.array([False, False, False, False], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = PeriodIndex([Period("2011-01-01"), pd.NaT, Period("2011-01-03")])
right = PeriodIndex([pd.NaT, pd.NaT, Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
"""Test PeriodIndex and Period Series Ops consistency"""
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = Series(values)
result = func(s)
exp = Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: x > Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool_)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: x < Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool_)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool_)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [Period("2015-01", freq="M"), Period("2015-02", freq="M")],
"B": [Period("2014-01", freq="M"), Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [Period("2015-05", freq="M"), Period("2015-06", freq="M")],
"B": [Period("2015-05", freq="M"), Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = period_range("1/1/2000", freq="D", periods=5)
other = period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
msg = r"unsupported operand type\(s\) for \+: .* and .*"
with pytest.raises(TypeError, match=msg):
rng + other
with pytest.raises(TypeError, match=msg):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = period_range("1/1/2000", freq="D", periods=5)
other = period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = period_range("1/1/2000", freq="D", periods=5)
other = period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
msg = r"Input has different freq=[HD] from PeriodArray\(freq=[DH]\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = PeriodIndex([p1_d], freq=tick_classes(n))
p2 = PeriodIndex([p2_d], freq=tick_classes(n))
expected = PeriodIndex([p2_d], freq=p2.freq.base) - PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = PeriodIndex([p1_d], freq=freq)
p2 = PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = PeriodIndex([p2_d], freq=freq.base) - PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
msg = (
r"unsupported operand type\(s\) for [+-]: .* and .*|"
"Concatenation operation is not implemented for NumPy arrays"
)
with pytest.raises(TypeError, match=msg):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
Timestamp.now(),
Timestamp.now().to_pydatetime(),
Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
pd.date_range("2016-01-01", periods=3, freq="S")._data,
pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
# Miscellaneous invalid types
],
)
def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
msg = (
r"(:?cannot add PeriodArray and .*)"
r"|(:?cannot subtract .* from (:?a\s)?.*)"
r"|(:?unsupported operand type\(s\) for \+: .* and .*)"
)
with pytest.raises(TypeError, match=msg):
rng + other
with pytest.raises(TypeError, match=msg):
other + rng
with pytest.raises(TypeError, match=msg):
rng - other
with pytest.raises(TypeError, match=msg):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = period_range("1/1/2000", freq="Q", periods=3)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
msg = r"Cannot add or subtract timedelta64\[ns\] dtype from period\[Q-DEC\]"
with pytest.raises(TypeError, match=msg):
rng + tdarr
with pytest.raises(TypeError, match=msg):
tdarr + rng
with pytest.raises(TypeError, match=msg):
rng - tdarr
msg = r"cannot subtract period\[Q-DEC\]-dtype from TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
# PeriodIndex + Timedelta-like is allowed only with
# tick-like frequencies
rng = period_range("1/1/2000", freq="90D", periods=3)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = period_range("12/31/1999", freq="90D", periods=3)
result = rng + tdi
tm.assert_index_equal(result, expected)
result = rng + tdarr
tm.assert_index_equal(result, expected)
result = tdi + rng
tm.assert_index_equal(result, expected)
result = tdarr + rng
tm.assert_index_equal(result, expected)
expected = period_range("1/2/2000", freq="90D", periods=3)
result = rng - tdi
tm.assert_index_equal(result, expected)
result = rng - tdarr
tm.assert_index_equal(result, expected)
msg = r"cannot subtract .* from .*"
with pytest.raises(TypeError, match=msg):
tdarr - rng
with pytest.raises(TypeError, match=msg):
tdi - rng
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("tdi_freq", [None, "H"])
def test_parr_sub_td64array(self, box_with_array, tdi_freq, pi_freq):
box = box_with_array
xbox = box if box not in [pd.array, tm.to_array] else pd.Index
tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq)
dti = Timestamp("2018-03-07 17:16:40") + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
td64obj = tm.box_expected(tdi, box)
if pi_freq == "H":
result = pi - td64obj
expected = (pi.to_timestamp("S") - tdi).to_period(pi_freq)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
# Subtract from scalar
result = pi[0] - td64obj
expected = (pi[0].to_timestamp("S") - tdi).to_period(pi_freq)
expected = tm.box_expected(expected, box)
tm.assert_equal(result, expected)
elif pi_freq == "D":
# Tick, but non-compatible
msg = "Input has different freq=None from PeriodArray"
with pytest.raises(IncompatibleFrequency, match=msg):
pi - td64obj
with pytest.raises(IncompatibleFrequency, match=msg):
pi[0] - td64obj
else:
# With non-Tick freq, we could not add timedelta64 array regardless
# of what its resolution is
msg = "Cannot add or subtract timedelta64"
with pytest.raises(TypeError, match=msg):
pi - td64obj
with pytest.raises(TypeError, match=msg):
pi[0] - td64obj
# -----------------------------------------------------------------
# operations with array/Index of DateOffset objects
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_add_offset_array(self, box):
# GH#18849
pi = PeriodIndex([Period("2015Q1"), Period("2016Q2")])
offs = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([Period("2015Q2"), Period("2015Q4")])
with tm.assert_produces_warning(PerformanceWarning):
res = pi + offs
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = offs + pi
tm.assert_index_equal(res2, expected)
unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
# addition/subtraction ops with incompatible offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
msg = r"Input cannot be converted to Period\(freq=Q-DEC\)"
with pytest.raises(IncompatibleFrequency, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
pi + unanchored
with pytest.raises(IncompatibleFrequency, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
unanchored + pi
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_sub_offset_array(self, box):
# GH#18824
pi = PeriodIndex([Period("2015Q1"), Period("2016Q2")])
other = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])
with tm.assert_produces_warning(PerformanceWarning):
res = pi - other
tm.assert_index_equal(res, expected)
anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
msg = r"Input has different freq=-1M from Period\(freq=Q-DEC\)"
with pytest.raises(IncompatibleFrequency, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
pi - anchored
with pytest.raises(IncompatibleFrequency, match=msg):
with tm.assert_produces_warning(PerformanceWarning):
anchored - pi
def test_pi_add_iadd_int(self, one):
# Variants of `one` for #19012
rng = period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng + one
expected = period_range("2000-01-01 10:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_int(self, one):
"""
PeriodIndex.__sub__ and __isub__ with several representations of
the integer 1, e.g. int, np.int64, np.uint8, ...
"""
rng = period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng - one
expected = period_range("2000-01-01 08:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng -= one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)])
def test_pi_sub_intlike(self, five):
rng = period_range("2007-01", periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_pi_sub_isub_offset(self):
# offset
# DateOffset
rng = period_range("2014", "2024", freq="A")
result = rng - pd.offsets.YearEnd(5)
expected = period_range("2009", "2019", freq="A")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
rng = period_range("2014-01", "2016-12", freq="M")
result = rng - pd.offsets.MonthEnd(5)
expected = period_range("2013-08", "2016-07", freq="M")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("transpose", [True, False])
def test_pi_add_offset_n_gt1(self, box_with_array, transpose):
# GH#23215
# add offset to PeriodIndex with freq.n > 1
per = Period("2016-01", freq="2M")
pi = PeriodIndex([per])
expected = PeriodIndex(["2016-03"], freq="2M")
pi = tm.box_expected(pi, box_with_array, transpose=transpose)
expected = tm.box_expected(expected, box_with_array, transpose=transpose)
result = pi + per.freq
tm.assert_equal(result, expected)
result = per.freq + pi
tm.assert_equal(result, expected)
def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array):
# GH#23215
# PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0
pi = PeriodIndex(["2016-01"], freq="2M")
expected = PeriodIndex(["2016-04"], freq="2M")
pi = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = pi + to_offset("3M")
tm.assert_equal(result, expected)
result = to_offset("3M") + pi
tm.assert_equal(result, expected)
# ---------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_pi_add_intarray(self, int_holder, op):
# GH#19959
pi = PeriodIndex([Period("2015Q1"), Period("NaT")])
other = int_holder([4, -1])
result = op(pi, other)
expected = PeriodIndex([Period("2016Q1"), Period("NaT")])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_pi_sub_intarray(self, int_holder):
# GH#19959
pi = PeriodIndex([Period("2015Q1"), Period("NaT")])
other = int_holder([4, -1])
result = pi - other
expected = PeriodIndex([Period("2014Q1"), Period("NaT")])
tm.assert_index_equal(result, expected)
msg = r"bad operand type for unary -: 'PeriodArray'"
with pytest.raises(TypeError, match=msg):
other - pi
# ---------------------------------------------------------------
# Timedelta-like (timedelta, timedelta64, Timedelta, Tick)
# TODO: Some of these are misnomers because of non-Tick DateOffsets
def test_pi_add_timedeltalike_minute_gt1(self, three_days):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# minute frequency with n != 1. A more general case is tested below
# in test_pi_add_timedeltalike_tick_gt1, but here we write out the
# expected result more explicitly.
other = three_days
rng = period_range("2014-05-01", periods=3, freq="2D")
expected = PeriodIndex(["2014-05-04", "2014-05-06", "2014-05-08"], freq="2D")
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = PeriodIndex(["2014-04-28", "2014-04-30", "2014-05-02"], freq="2D")
result = rng - other
tm.assert_index_equal(result, expected)
msg = (
r"(:?bad operand type for unary -: 'PeriodArray')"
r"|(:?cannot subtract PeriodArray from timedelta64\[[hD]\])"
)
with pytest.raises(TypeError, match=msg):
other - rng
@pytest.mark.parametrize("freqstr", ["5ns", "5us", "5ms", "5s", "5T", "5h", "5d"])
def test_pi_add_timedeltalike_tick_gt1(self, three_days, freqstr):
# GH#23031 adding a time-delta-like offset to a PeriodArray that has
# tick-like frequency with n != 1
other = three_days
rng = period_range("2014-05-01", periods=6, freq=freqstr)
expected = period_range(rng[0] + other, periods=6, freq=freqstr)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
# subtraction
expected = period_range(rng[0] - other, periods=6, freq=freqstr)
result = rng - other
tm.assert_index_equal(result, expected)
msg = (
r"(:?bad operand type for unary -: 'PeriodArray')"
r"|(:?cannot subtract PeriodArray from timedelta64\[[hD]\])"
)
with pytest.raises(TypeError, match=msg):
other - rng
def test_pi_add_iadd_timedeltalike_daily(self, three_days):
# Tick
other = three_days
rng = period_range("2014-05-01", "2014-05-15", freq="D")
expected = period_range("2014-05-04", "2014-05-18", freq="D")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_timedeltalike_daily(self, three_days):
# Tick-like 3 Days
other = three_days
rng = period_range("2014-05-01", "2014-05-15", freq="D")
expected = period_range("2014-04-28", "2014-05-12", freq="D")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_daily(self, not_daily):
other = not_daily
rng = period_range("2014-05-01", "2014-05-15", freq="D")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=D\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = period_range("2014-01-01 12:00", "2014-01-05 12:00", freq="H")
result = rng + other
tm.assert_index_equal(result, expected)
rng += other
tm.assert_index_equal(rng, expected)
def test_pi_add_timedeltalike_mismatched_freq_hourly(self, not_hourly):
other = not_hourly
rng = period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=H\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
def test_pi_sub_isub_timedeltalike_hourly(self, two_hours):
other = two_hours
rng = period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
expected = period_range("2014-01-01 08:00", "2014-01-05 08:00", freq="H")
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_add_iadd_timedeltalike_annual(self):
# offset
# DateOffset
rng = period_range("2014", "2024", freq="A")
result = rng + pd.offsets.YearEnd(5)
expected = period_range("2019", "2029", freq="A")
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_annual(self, mismatched_freq):
other = mismatched_freq
rng = period_range("2014", "2024", freq="A")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
def test_pi_add_iadd_timedeltalike_M(self):
rng = period_range("2014-01", "2016-12", freq="M")
expected = period_range("2014-06", "2017-05", freq="M")
result = rng + pd.offsets.MonthEnd(5)
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_sub_timedeltalike_freq_mismatch_monthly(self, mismatched_freq):
other = mismatched_freq
rng = period_range("2014-01", "2016-12", freq="M")
msg = "Input has different freq(=.+)? from Period.*?\\(freq=M\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
rng + other
with pytest.raises(IncompatibleFrequency, match=msg):
rng += other
with pytest.raises(IncompatibleFrequency, match=msg):
rng - other
with pytest.raises(IncompatibleFrequency, match=msg):
rng -= other
@pytest.mark.parametrize("transpose", [True, False])
def test_parr_add_sub_td64_nat(self, box_with_array, transpose):
# GH#23320 special handling for timedelta64("NaT")
pi = period_range("1994-04-01", periods=9, freq="19D")
other = np.timedelta64("NaT")
expected = PeriodIndex(["NaT"] * 9, freq="19D")
obj = tm.box_expected(pi, box_with_array, transpose=transpose)
expected = tm.box_expected(expected, box_with_array, transpose=transpose)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = r"cannot subtract .* from .*"
with pytest.raises(TypeError, match=msg):
other - obj
@pytest.mark.parametrize(
"other",
[
np.array(["NaT"] * 9, dtype="m8[ns]"),
TimedeltaArray._from_sequence(["NaT"] * 9),
],
)
def test_parr_add_sub_tdt64_nat_array(self, box_with_array, other):
pi = period_range("1994-04-01", periods=9, freq="19D")
expected = PeriodIndex(["NaT"] * 9, freq="19D")
obj = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = r"cannot subtract .* from .*"
with pytest.raises(TypeError, match=msg):
other - obj
# ---------------------------------------------------------------
# Unsorted
def test_parr_add_sub_index(self):
# Check that PeriodArray defers to Index on arithmetic ops
pi = period_range("2000-12-31", periods=3)
parr = pi.array
result = parr - pi
expected = pi - pi
tm.assert_index_equal(result, expected)
def test_parr_add_sub_object_array(self):
pi = period_range("2000-12-31", periods=3, freq="D")
parr = pi.array
other = np.array([Timedelta(days=1), pd.offsets.Day(2), 3])
with tm.assert_produces_warning(PerformanceWarning):
result = parr + other
expected = PeriodIndex(
["2001-01-01", "2001-01-03", "2001-01-05"], freq="D"
).array
tm.assert_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = parr - other
expected = PeriodIndex(["2000-12-30"] * 3, freq="D").array
tm.assert_equal(result, expected)
class TestPeriodSeriesArithmetic:
def test_ops_series_timedelta(self):
# GH#13043
ser = Series(
[Period("2015-01-01", freq="D"), Period("2015-01-02", freq="D")],
name="xxx",
)
assert ser.dtype == "Period[D]"
expected = Series(
[Period("2015-01-02", freq="D"), Period("2015-01-03", freq="D")],
name="xxx",
)
result = ser + Timedelta("1 days")
tm.assert_series_equal(result, expected)
result = Timedelta("1 days") + ser
tm.assert_series_equal(result, expected)
result = ser + pd.tseries.offsets.Day()
tm.assert_series_equal(result, expected)
result = pd.tseries.offsets.Day() + ser
tm.assert_series_equal(result, expected)
def test_ops_series_period(self):
# GH#13043
ser = Series(
[Period("2015-01-01", freq="D"), Period("2015-01-02", freq="D")],
name="xxx",
)
assert ser.dtype == "Period[D]"
per = Period("2015-01-10", freq="D")
off = per.freq
# dtype will be object because of original dtype
expected = Series([9 * off, 8 * off], name="xxx", dtype=object)
tm.assert_series_equal(per - ser, expected)
tm.assert_series_equal(ser - per, -1 * expected)
s2 = Series(
[Period("2015-01-05", freq="D"), Period("2015-01-04", freq="D")],
name="xxx",
)
assert s2.dtype == "Period[D]"
expected = Series([4 * off, 2 * off], name="xxx", dtype=object)
tm.assert_series_equal(s2 - ser, expected)
tm.assert_series_equal(ser - s2, -1 * expected)
class TestPeriodIndexSeriesMethods:
"""Test PeriodIndex and Period Series Ops consistency"""
def _check(self, values, func, expected):
idx = PeriodIndex(values)
result = func(idx)
tm.assert_equal(result, expected)
ser = Series(values)
result = func(ser)
exp = Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
expected = PeriodIndex(
["2011-03", "2011-04", "2011-05", "2011-06"], freq="M", name="idx"
)
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period("2011-01", freq="M")
off = idx.freq
exp = pd.Index([0 * off, 1 * off, 2 * off, 3 * off], name="idx")
tm.assert_index_equal(result, exp)
result = Period("2011-01", freq="M") - idx
exp = pd.Index([0 * off, -1 * off, -2 * off, -3 * off], name="idx")
tm.assert_index_equal(result, exp)
@pytest.mark.parametrize("ng", ["str", 1.5])
@pytest.mark.parametrize(
"func",
[
lambda obj, ng: obj + ng,
lambda obj, ng: ng + obj,
lambda obj, ng: obj - ng,
lambda obj, ng: ng - obj,
lambda obj, ng: np.add(obj, ng),
lambda obj, ng: np.add(ng, obj),
lambda obj, ng: np.subtract(obj, ng),
lambda obj, ng: np.subtract(ng, obj),
],
)
def test_parr_ops_errors(self, ng, func, box_with_array):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
obj = tm.box_expected(idx, box_with_array)
msg = (
r"unsupported operand type\(s\)|can only concatenate|"
r"must be str|object to str implicitly"
)
with pytest.raises(TypeError, match=msg):
func(obj, ng)
def test_pi_ops_nat(self):
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"
)
expected = PeriodIndex(
["2011-03", "2011-04", "NaT", "2011-06"], freq="M", name="idx"
)
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx, lambda x: np.add(x, 2), expected)
self._check(idx + 2, lambda x: x - 2, idx)
self._check(idx + 2, lambda x: np.subtract(x, 2), idx)
# freq with mult
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="2M", name="idx"
)
expected = PeriodIndex(
["2011-07", "2011-08", "NaT", "2011-10"], freq="2M", name="idx"
)
self._check(idx, lambda x: x + 3, expected)
self._check(idx, lambda x: 3 + x, expected)
self._check(idx, lambda x: np.add(x, 3), expected)
self._check(idx + 3, lambda x: x - 3, idx)
self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
def test_pi_ops_array_int(self):
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"
)
f = lambda x: x + np.array([1, 2, 3, 4])
exp = PeriodIndex(
["2011-02", "2011-04", "NaT", "2011-08"], freq="M", name="idx"
)
self._check(idx, f, exp)
f = lambda x: np.add(x, np.array([4, -1, 1, 2]))
exp = PeriodIndex(
["2011-05", "2011-01", "NaT", "2011-06"], freq="M", name="idx"
)
self._check(idx, f, exp)
f = lambda x: x - np.array([1, 2, 3, 4])
exp = PeriodIndex(
["2010-12", "2010-12", "NaT", "2010-12"], freq="M", name="idx"
)
self._check(idx, f, exp)
f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))
exp = PeriodIndex(
["2010-10", "2010-12", "NaT", "2011-06"], freq="M", name="idx"
)
self._check(idx, f, exp)
def test_pi_ops_offset(self):
idx = PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01", "2011-04-01"],
freq="D",
name="idx",
)
f = lambda x: x + pd.offsets.Day()
exp = PeriodIndex(
["2011-01-02", "2011-02-02", "2011-03-02", "2011-04-02"],
freq="D",
name="idx",
)
self._check(idx, f, exp)
f = lambda x: x + pd.offsets.Day(2)
exp = PeriodIndex(
["2011-01-03", "2011-02-03", "2011-03-03", "2011-04-03"],
freq="D",
name="idx",
)
self._check(idx, f, exp)
f = lambda x: x - pd.offsets.Day(2)
exp = PeriodIndex(
["2010-12-30", "2011-01-30", "2011-02-27", "2011-03-30"],
freq="D",
name="idx",
)
self._check(idx, f, exp)
def test_pi_offset_errors(self):
idx = PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01", "2011-04-01"],
freq="D",
name="idx",
)
ser = Series(idx)
# Series op is applied per Period instance, thus error is raised
# from Period
for obj in [idx, ser]:
msg = r"Input has different freq=2H from Period.*?\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
obj + pd.offsets.Hour(2)
with pytest.raises(IncompatibleFrequency, match=msg):
pd.offsets.Hour(2) + obj
msg = r"Input has different freq=-2H from Period.*?\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
obj - pd.offsets.Hour(2)
def test_pi_sub_period(self):
# GH#13071
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
result = idx - Period("2012-01", freq="M")
off = idx.freq
exp = pd.Index([-12 * off, -11 * off, -10 * off, -9 * off], name="idx")
tm.assert_index_equal(result, exp)
result = np.subtract(idx, Period("2012-01", freq="M"))
tm.assert_index_equal(result, exp)
result = Period("2012-01", freq="M") - idx
exp = pd.Index([12 * off, 11 * off, 10 * off, 9 * off], name="idx")
tm.assert_index_equal(result, exp)
result = np.subtract(Period("2012-01", freq="M"), idx)
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx")
result = idx - Period("NaT", freq="M")
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
result = Period("NaT", freq="M") - idx
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
def test_pi_sub_pdnat(self):
# GH#13071
idx = PeriodIndex(
["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx"
)
exp = TimedeltaIndex([pd.NaT] * 4, name="idx")
tm.assert_index_equal(pd.NaT - idx, exp)
tm.assert_index_equal(idx - pd.NaT, exp)
def test_pi_sub_period_nat(self):
# GH#13071
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
result = idx - Period("2012-01", freq="M")
off = idx.freq
exp = pd.Index([-12 * off, pd.NaT, -10 * off, -9 * off], name="idx")
tm.assert_index_equal(result, exp)
result = Period("2012-01", freq="M") - idx
exp = pd.Index([12 * off, pd.NaT, 10 * off, 9 * off], name="idx")
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx")
tm.assert_index_equal(idx - Period("NaT", freq="M"), exp)
tm.assert_index_equal(Period("NaT", freq="M") - idx, exp)
@pytest.mark.parametrize("scalars", ["a", False, 1, 1.0, None])
def test_comparison_operations(self, scalars):
# GH 28980
expected = Series([False, False])
s = Series([Period("2019"), Period("2020")], dtype="period[A-DEC]")
result = s == scalars
tm.assert_series_equal(result, expected)
| 36.307494 | 88 | 0.574835 |
5aa53b7f153355eeb48020b9ad7a4001d52891c2 | 20,725 | py | Python | pypureclient/flasharray/FA_2_8/models/host_group_performance_by_array.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_8/models/host_group_performance_by_array.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_8/models/host_group_performance_by_array.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_8 import models
class HostGroupPerformanceByArray(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bytes_per_mirrored_write': 'int',
'bytes_per_op': 'int',
'bytes_per_read': 'int',
'bytes_per_write': 'int',
'mirrored_write_bytes_per_sec': 'int',
'mirrored_writes_per_sec': 'int',
'qos_rate_limit_usec_per_mirrored_write_op': 'int',
'qos_rate_limit_usec_per_read_op': 'int',
'qos_rate_limit_usec_per_write_op': 'int',
'queue_usec_per_mirrored_write_op': 'int',
'queue_usec_per_read_op': 'int',
'queue_usec_per_write_op': 'int',
'read_bytes_per_sec': 'int',
'reads_per_sec': 'int',
'san_usec_per_mirrored_write_op': 'int',
'san_usec_per_read_op': 'int',
'san_usec_per_write_op': 'int',
'service_usec_per_mirrored_write_op': 'int',
'service_usec_per_read_op': 'int',
'service_usec_per_write_op': 'int',
'time': 'int',
'usec_per_mirrored_write_op': 'int',
'usec_per_read_op': 'int',
'usec_per_write_op': 'int',
'write_bytes_per_sec': 'int',
'writes_per_sec': 'int',
'service_usec_per_read_op_cache_reduction': 'float',
'id': 'str',
'name': 'str',
'array': 'Resource'
}
attribute_map = {
'bytes_per_mirrored_write': 'bytes_per_mirrored_write',
'bytes_per_op': 'bytes_per_op',
'bytes_per_read': 'bytes_per_read',
'bytes_per_write': 'bytes_per_write',
'mirrored_write_bytes_per_sec': 'mirrored_write_bytes_per_sec',
'mirrored_writes_per_sec': 'mirrored_writes_per_sec',
'qos_rate_limit_usec_per_mirrored_write_op': 'qos_rate_limit_usec_per_mirrored_write_op',
'qos_rate_limit_usec_per_read_op': 'qos_rate_limit_usec_per_read_op',
'qos_rate_limit_usec_per_write_op': 'qos_rate_limit_usec_per_write_op',
'queue_usec_per_mirrored_write_op': 'queue_usec_per_mirrored_write_op',
'queue_usec_per_read_op': 'queue_usec_per_read_op',
'queue_usec_per_write_op': 'queue_usec_per_write_op',
'read_bytes_per_sec': 'read_bytes_per_sec',
'reads_per_sec': 'reads_per_sec',
'san_usec_per_mirrored_write_op': 'san_usec_per_mirrored_write_op',
'san_usec_per_read_op': 'san_usec_per_read_op',
'san_usec_per_write_op': 'san_usec_per_write_op',
'service_usec_per_mirrored_write_op': 'service_usec_per_mirrored_write_op',
'service_usec_per_read_op': 'service_usec_per_read_op',
'service_usec_per_write_op': 'service_usec_per_write_op',
'time': 'time',
'usec_per_mirrored_write_op': 'usec_per_mirrored_write_op',
'usec_per_read_op': 'usec_per_read_op',
'usec_per_write_op': 'usec_per_write_op',
'write_bytes_per_sec': 'write_bytes_per_sec',
'writes_per_sec': 'writes_per_sec',
'service_usec_per_read_op_cache_reduction': 'service_usec_per_read_op_cache_reduction',
'id': 'id',
'name': 'name',
'array': 'array'
}
required_args = {
}
def __init__(
self,
bytes_per_mirrored_write=None, # type: int
bytes_per_op=None, # type: int
bytes_per_read=None, # type: int
bytes_per_write=None, # type: int
mirrored_write_bytes_per_sec=None, # type: int
mirrored_writes_per_sec=None, # type: int
qos_rate_limit_usec_per_mirrored_write_op=None, # type: int
qos_rate_limit_usec_per_read_op=None, # type: int
qos_rate_limit_usec_per_write_op=None, # type: int
queue_usec_per_mirrored_write_op=None, # type: int
queue_usec_per_read_op=None, # type: int
queue_usec_per_write_op=None, # type: int
read_bytes_per_sec=None, # type: int
reads_per_sec=None, # type: int
san_usec_per_mirrored_write_op=None, # type: int
san_usec_per_read_op=None, # type: int
san_usec_per_write_op=None, # type: int
service_usec_per_mirrored_write_op=None, # type: int
service_usec_per_read_op=None, # type: int
service_usec_per_write_op=None, # type: int
time=None, # type: int
usec_per_mirrored_write_op=None, # type: int
usec_per_read_op=None, # type: int
usec_per_write_op=None, # type: int
write_bytes_per_sec=None, # type: int
writes_per_sec=None, # type: int
service_usec_per_read_op_cache_reduction=None, # type: float
id=None, # type: str
name=None, # type: str
array=None, # type: models.Resource
):
"""
Keyword args:
bytes_per_mirrored_write (int): The average I/O size per mirrored write. Measured in bytes.
bytes_per_op (int): The average I/O size for both read and write (all) operations.
bytes_per_read (int): The average I/O size per read. Measured in bytes.
bytes_per_write (int): The average I/O size per write. Measured in bytes.
mirrored_write_bytes_per_sec (int): The number of mirrored bytes written per second.
mirrored_writes_per_sec (int): The number of mirrored writes per second.
qos_rate_limit_usec_per_mirrored_write_op (int): The average time it takes the array to process a mirrored I/O write request. Measured in microseconds.
qos_rate_limit_usec_per_read_op (int): The average time spent waiting due to QoS rate limiting for a read request. Measured in microseconds.
qos_rate_limit_usec_per_write_op (int): The average time that a write I/O request spends waiting as a result of the volume reaching its QoS bandwidth limit. Measured in microseconds.
queue_usec_per_mirrored_write_op (int): The average time that a mirrored write I/O request spends in the array waiting to be served. Measured in microseconds.
queue_usec_per_read_op (int): The average time that a read I/O request spends in the array waiting to be served. Measured in microseconds.
queue_usec_per_write_op (int): The average time that a write I/O request spends in the array waiting to be served. Measured in microseconds.
read_bytes_per_sec (int): The number of bytes read per second.
reads_per_sec (int): The number of read requests processed per second.
san_usec_per_mirrored_write_op (int): The average time required to transfer data from the initiator to the array for a mirrored write request. Measured in microseconds.
san_usec_per_read_op (int): The average time required to transfer data from the array to the initiator for a read request. Measured in microseconds.
san_usec_per_write_op (int): The average time required to transfer data from the initiator to the array for a write request. Measured in microseconds.
service_usec_per_mirrored_write_op (int): The average time required for the array to service a mirrored write request. Measured in microseconds.
service_usec_per_read_op (int): The average time required for the array to service a read request. Measured in microseconds.
service_usec_per_write_op (int): The average time required for the array to service a write request. Measured in microseconds.
time (int): The time when the sample performance data was taken. Measured in milliseconds since the UNIX epoch.
usec_per_mirrored_write_op (int): The average time it takes the array to process a mirrored I/O write request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
usec_per_read_op (int): The average time it takes the array to process an I/O read request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
usec_per_write_op (int): The average time it takes the array to process an I/O write request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
write_bytes_per_sec (int): The number of bytes written per second.
writes_per_sec (int): The number of write requests processed per second.
service_usec_per_read_op_cache_reduction (float): The percentage reduction in `service_usec_per_read_op` due to data cache hits. For example, a value of 0.25 indicates that the value of `service_usec_per_read_op` is 25% lower than it would have been without any data cache hits.
id (str): A globally unique, system-generated ID. The ID cannot be modified and cannot refer to another resource.
name (str): A user-specified name. The name must be locally unique and can be changed.
array (Resource): The array on which the performance metrics were recorded.
"""
if bytes_per_mirrored_write is not None:
self.bytes_per_mirrored_write = bytes_per_mirrored_write
if bytes_per_op is not None:
self.bytes_per_op = bytes_per_op
if bytes_per_read is not None:
self.bytes_per_read = bytes_per_read
if bytes_per_write is not None:
self.bytes_per_write = bytes_per_write
if mirrored_write_bytes_per_sec is not None:
self.mirrored_write_bytes_per_sec = mirrored_write_bytes_per_sec
if mirrored_writes_per_sec is not None:
self.mirrored_writes_per_sec = mirrored_writes_per_sec
if qos_rate_limit_usec_per_mirrored_write_op is not None:
self.qos_rate_limit_usec_per_mirrored_write_op = qos_rate_limit_usec_per_mirrored_write_op
if qos_rate_limit_usec_per_read_op is not None:
self.qos_rate_limit_usec_per_read_op = qos_rate_limit_usec_per_read_op
if qos_rate_limit_usec_per_write_op is not None:
self.qos_rate_limit_usec_per_write_op = qos_rate_limit_usec_per_write_op
if queue_usec_per_mirrored_write_op is not None:
self.queue_usec_per_mirrored_write_op = queue_usec_per_mirrored_write_op
if queue_usec_per_read_op is not None:
self.queue_usec_per_read_op = queue_usec_per_read_op
if queue_usec_per_write_op is not None:
self.queue_usec_per_write_op = queue_usec_per_write_op
if read_bytes_per_sec is not None:
self.read_bytes_per_sec = read_bytes_per_sec
if reads_per_sec is not None:
self.reads_per_sec = reads_per_sec
if san_usec_per_mirrored_write_op is not None:
self.san_usec_per_mirrored_write_op = san_usec_per_mirrored_write_op
if san_usec_per_read_op is not None:
self.san_usec_per_read_op = san_usec_per_read_op
if san_usec_per_write_op is not None:
self.san_usec_per_write_op = san_usec_per_write_op
if service_usec_per_mirrored_write_op is not None:
self.service_usec_per_mirrored_write_op = service_usec_per_mirrored_write_op
if service_usec_per_read_op is not None:
self.service_usec_per_read_op = service_usec_per_read_op
if service_usec_per_write_op is not None:
self.service_usec_per_write_op = service_usec_per_write_op
if time is not None:
self.time = time
if usec_per_mirrored_write_op is not None:
self.usec_per_mirrored_write_op = usec_per_mirrored_write_op
if usec_per_read_op is not None:
self.usec_per_read_op = usec_per_read_op
if usec_per_write_op is not None:
self.usec_per_write_op = usec_per_write_op
if write_bytes_per_sec is not None:
self.write_bytes_per_sec = write_bytes_per_sec
if writes_per_sec is not None:
self.writes_per_sec = writes_per_sec
if service_usec_per_read_op_cache_reduction is not None:
self.service_usec_per_read_op_cache_reduction = service_usec_per_read_op_cache_reduction
if id is not None:
self.id = id
if name is not None:
self.name = name
if array is not None:
self.array = array
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `HostGroupPerformanceByArray`".format(key))
if key == "bytes_per_mirrored_write" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_mirrored_write`, must be a value greater than or equal to `0`")
if key == "bytes_per_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_op`, must be a value greater than or equal to `0`")
if key == "bytes_per_read" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_read`, must be a value greater than or equal to `0`")
if key == "bytes_per_write" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_write`, must be a value greater than or equal to `0`")
if key == "mirrored_write_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `mirrored_write_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "mirrored_writes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `mirrored_writes_per_sec`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "read_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `read_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "reads_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `reads_per_sec`, must be a value greater than or equal to `0`")
if key == "san_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "san_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "san_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "write_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `write_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "writes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `writes_per_sec`, must be a value greater than or equal to `0`")
if key == "service_usec_per_read_op_cache_reduction" and value is not None:
if value > 1.0:
raise ValueError("Invalid value for `service_usec_per_read_op_cache_reduction`, value must be less than or equal to `1.0`")
if value < 0.0:
raise ValueError("Invalid value for `service_usec_per_read_op_cache_reduction`, must be a value greater than or equal to `0.0`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(HostGroupPerformanceByArray, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HostGroupPerformanceByArray):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 56.625683 | 294 | 0.664608 |
935d91a6db2e6206e368a6fee42d3f90fa9ebcdd | 9,430 | py | Python | lexsubgen/datasets/wsi.py | myrachins/LexSubGen | 5fe27901fa96e51c94280d0938135907a2e5fd80 | [
"Apache-2.0"
] | 32 | 2020-11-09T04:55:30.000Z | 2022-02-20T10:10:19.000Z | lexsubgen/datasets/wsi.py | myrachins/LexSubGen | 5fe27901fa96e51c94280d0938135907a2e5fd80 | [
"Apache-2.0"
] | 3 | 2021-05-31T08:55:27.000Z | 2021-11-27T18:38:14.000Z | lexsubgen/datasets/wsi.py | myrachins/LexSubGen | 5fe27901fa96e51c94280d0938135907a2e5fd80 | [
"Apache-2.0"
] | 9 | 2020-11-28T11:19:35.000Z | 2022-02-20T12:29:15.000Z | import logging
import os
from itertools import chain
from pathlib import Path
from typing import List, Set, Dict, Tuple
from xml.etree import ElementTree
from word_forms.word_forms import get_word_forms
import nltk
import pandas as pd
from nltk.corpus import wordnet as wn
from lexsubgen.datasets.lexsub import DatasetReader
from lexsubgen.datasets.utils import download_dataset
from lexsubgen.utils.register import CACHE_DIR
from lexsubgen.utils.wsi import SEMEVAL2013URL, SEMEVAL2010URL, SEMEVAL2010TESTURL
logger = logging.getLogger(Path(__file__).name)
logger.setLevel(logging.INFO)
def wsi_logging_info(_logger):
"""
Decorates read_dataset methods for WSI readers.
Adds logging information about the read dataset
Args:
_logger: an object that allows to call .info method
Returns: decorator for read_dataset method
"""
def decorator(wsi_reader_method):
def wrapper(self, *args, **kwargs):
_logger.info(f"Reading {self.dataset_name} dataset")
df, gold_labels, gold_labels_path = wsi_reader_method(self, *args, **kwargs)
_logger.info(f"Reading done. {self.dataset_name} dataset "
f"contains {len(df)} lines, "
f"{len(df.group_by.unique())} ambigious words "
f"and following {df.pos_tag.unique()} POS tags")
return df, gold_labels, gold_labels_path
return wrapper
return decorator
class WSIDatasetReader(DatasetReader):
data_root_path = Path(CACHE_DIR) / "wsi"
df_columns = ['context_id', 'group_by', 'target_lemma', 'pos_tag', 'sentence', 'target_id']
def __init__(self, dataset_name: str, data_root_path: str, url: str):
super(WSIDatasetReader, self).__init__(
dataset_name=dataset_name,
data_root_path=data_root_path,
url=url
)
@staticmethod
def read_gold_labels(gold_labels_path: str) -> Dict:
gold_labels = dict()
with open(gold_labels_path, 'r') as f:
for instance in f:
_, context_id, *clusters = instance.strip().split(" ")
# TODO: gold labels might consist of more than 1 cluster label
gold_labels[context_id] = clusters[0]
return gold_labels
class SemEval2013DatasetReader(WSIDatasetReader):
dataset_name = "semeval-2013"
inner_path = (
Path("SemEval-2013-Task-13-test-data")
/ "contexts"
/ "senseval2-format"
/ "semeval-2013-task-13-test-data.senseval2.xml"
)
gold_labels_path = (
Path("SemEval-2013-Task-13-test-data") / "keys" / "gold" / "all.key"
)
def __init__(self):
super(SemEval2013DatasetReader, self).__init__(
dataset_name=self.dataset_name,
data_root_path=self.data_root_path,
url=SEMEVAL2013URL
)
@wsi_logging_info(logger)
def read_dataset(self) -> Tuple[pd.DataFrame, Dict[str, str], Path]:
"""
Reads SemEval2013 task 13 dataset. This dataset is stored in xml format where
for each target word the context is given and also its part of speech tag.
Returns:
df: pandas DataFrame that contains following columns:
'instance_id', 'target_word', 'pos_tag', 'sentence', 'target_id'
"""
data_path = self.data_root_path / self.dataset_name
gold_labels = self.read_gold_labels(data_path / self.gold_labels_path)
xml_dataset_path = data_path / self.inner_path
corpus = ElementTree.parse(xml_dataset_path).getroot()
dataset_list = []
for lexelt in corpus:
group_by = lexelt.attrib["item"]
target_lemma, pos_tag = group_by.split(".")
for instance in lexelt:
context_id = instance.attrib["id"]
context = [ctx for ctx in instance][0]
lctx, target, rctx = [text.strip() for text in context.itertext()]
lctx_tokens = nltk.word_tokenize(lctx)
rctx_tokens = nltk.word_tokenize(rctx)
sentence = lctx_tokens + [target] + rctx_tokens
target_idx = len(lctx_tokens)
# TODO: gold labels file does not
# contain several instances from the dataset
if context_id in gold_labels:
dataset_list.append((
context_id, group_by, target_lemma, pos_tag, sentence, target_idx
))
dataset_df = pd.DataFrame(dataset_list, columns=self.df_columns)
assert len(dataset_df.context_id.unique()) == len(dataset_df)
return dataset_df, gold_labels, data_path / self.gold_labels_path
class SemEval2010DatasetReader(WSIDatasetReader):
dataset_name = "semeval-2010"
inner_path = Path(dataset_name) / "test_data"
gold_labels_path = (
Path(dataset_name) / "evaluation" / "unsup_eval" / "keys" / "all.key"
)
lemma2form = {
"figure": ["figger", "figgered"],
"straighten": ["half-straightened"],
"lie": ["lah"],
}
def __init__(self, use_surrounding_context: bool = True):
super(SemEval2010DatasetReader, self).__init__(
dataset_name=self.dataset_name,
data_root_path=self.data_root_path,
url=SEMEVAL2010URL
)
self.use_surrounding_context = use_surrounding_context
self.test_data_path = self.data_root_path / self.inner_path
if not os.path.exists(self.test_data_path):
download_dataset(
SEMEVAL2010TESTURL,
self.data_root_path / self.dataset_name
)
@staticmethod
def _find_target_word_idx(tokens: List[str], word_forms: Set):
for idx, token in enumerate(tokens):
token_lower = token.lower()
lemmas = {lemma for pos in ['v', 'n', 'a', 'r']
for lemma in wn._morphy(token_lower, pos)}
if lemmas.intersection(word_forms) or token_lower in word_forms:
return idx
raise ValueError(f"Target word was not found {tokens}\n{word_forms}")
@wsi_logging_info(logger)
def read_dataset(self):
gold_labels = self.read_gold_labels(
self.data_root_path / self.gold_labels_path
)
paths = [
self.test_data_path / "nouns" / file
for file in os.listdir(self.test_data_path / "nouns")
]
paths.extend([
self.test_data_path / "verbs" / file
for file in os.listdir(self.test_data_path / "verbs")
])
dataset_list = []
for path in paths:
corpus = ElementTree.parse(path).getroot()
target_lemma, pos_tag, _ = corpus.tag.split('.')
group_by = f"{target_lemma}.{pos_tag}"
lemma2unknown_form = self.lemma2form.get(target_lemma.lower(), [])
target_word_forms = {
elem
for _set in get_word_forms(target_lemma.lower()).values()
for elem in chain(_set, lemma2unknown_form)
}
for instance in corpus:
context_id = instance.tag
target_sentences = [s.text.strip()
for s in instance.iter("TargetSentence")]
assert len(target_sentences) == 1, ("Something went wrong. "
"Number of Target Sentences should be 1")
target_sentence = target_sentences[0]
surrounding_context = [s.strip() for s in instance.itertext()]
if len(surrounding_context) == 3 and surrounding_context[1] == target_sentence:
# ['left', 'target', 'right']
left_context, right_context = surrounding_context[0], surrounding_context[2]
elif len(surrounding_context) == 2 and surrounding_context[0] == target_sentence:
# ['target', 'right']
left_context, right_context = "", surrounding_context[1]
elif len(surrounding_context) == 2 and surrounding_context[1] == target_sentence:
# ['left', 'target']
left_context, right_context = surrounding_context[0], ""
elif len(surrounding_context) == 1 and surrounding_context[0] == target_sentence:
# ['target']
left_context, right_context = "", ""
else:
raise ValueError(
"Something went wrong. Number of Target Sentences should be 1"
)
tokens = target_sentence.split()
target_idx = self._find_target_word_idx(
tokens, target_word_forms
)
if self.use_surrounding_context:
l, r = left_context.split(), right_context.split()
target_idx += len(l)
tokens = l + tokens + r
dataset_list.append((
context_id, group_by, target_lemma, pos_tag, tokens, target_idx
))
dataset_df = pd.DataFrame(dataset_list, columns=self.df_columns)
assert len(dataset_df.context_id.unique()) == len(dataset_df)
return dataset_df, gold_labels, self.data_root_path / self.gold_labels_path
| 39.957627 | 97 | 0.601909 |
8dc6f7ee910faaae53a81fc83cb47ab4d390a849 | 12,214 | py | Python | skcriteria/preprocessing/scalers.py | elcolie/scikit-criteria | 216674d699b60d68fefa98d44afd619943f3bb00 | [
"BSD-3-Clause"
] | null | null | null | skcriteria/preprocessing/scalers.py | elcolie/scikit-criteria | 216674d699b60d68fefa98d44afd619943f3bb00 | [
"BSD-3-Clause"
] | null | null | null | skcriteria/preprocessing/scalers.py | elcolie/scikit-criteria | 216674d699b60d68fefa98d44afd619943f3bb00 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised))
# Copyright (c) 2016-2021, Cabral, Juan; Luczywo, Nadia
# All rights reserved.
# =============================================================================
# DOCS
# =============================================================================
"""Functionalities for scale values based on differrent strategies.
In addition to the Transformers, a collection of an MCDA agnostic functions
are offered to scale an array along an arbitrary axis.
"""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
from numpy import linalg
from ..core import SKCMatrixAndWeightTransformerABC
from ..utils import doc_inherit
# =============================================================================
# STANDAR SCALER
# =============================================================================
def scale_by_stdscore(arr, axis=None):
r"""Standardize the values by removing the mean and divided by the std-dev.
The standard score of a sample `x` is calculated as:
.. math::
z = (x - \mu) / \sigma
Parameters
----------
arr: :py:class:`numpy.ndarray` like.
A array with values
axis : :py:class:`int` optional
Axis along which to operate. By default, flattened input is used.
Returns
-------
:py:class:`numpy.ndarray`
array of ratios
Examples
--------
.. code-block:: pycon
>>> from skcriteria.preprocess import scale_by_stdscore
>>> mtx = [[1, 2], [3, 4]]
# ratios with the max value of the array
>>> scale_by_stdscore(mtx)
array([[-1.34164079, -0.4472136 ],
[ 0.4472136 , 1.34164079]])
# ratios with the max value of the arr by column
>>> scale_by_stdscore(mtx, axis=0)
array([[-1., -1.],
[ 1., 1.]])
# ratios with the max value of the array by row
>>> scale_by_stdscore(mtx, axis=1)
array([[-1., 1.],
[-1., 1.]])
"""
arr = np.asarray(arr, dtype=float)
mean = np.mean(arr, axis=axis, keepdims=True)
std = np.std(arr, axis=axis, keepdims=True)
return (arr - mean) / std
class StandarScaler(SKCMatrixAndWeightTransformerABC):
"""Standardize the dm by removing the mean and scaling to unit variance.
The standard score of a sample `x` is calculated as:
z = (x - u) / s
where `u` is the mean of the values, and `s` is the standard deviation
of the training samples or one if `with_std=False`.
"""
@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights)
def _transform_weights(self, weights):
return scale_by_stdscore(weights, axis=None)
@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix)
def _transform_matrix(self, matrix):
return scale_by_stdscore(matrix, axis=0)
# =============================================================================
# VECTOR SCALER
# =============================================================================
def scale_by_vector(arr, axis=None):
r"""Divide the array by norm of values defined vector along an axis.
Calculates the set of ratios as the square roots of the sum of squared
responses of a given axis as denominators. If *axis* is *None* sum all
the array.
.. math::
\overline{X}_{ij} =
\frac{X_{ij}}{\sqrt{\sum\limits_{j=1}^m X_{ij}^{2}}}
Parameters
----------
arr: :py:class:`numpy.ndarray` like.
A array with values
axis : :py:class:`int` optional
Axis along which to operate. By default, flattened input is used.
Returns
-------
:py:class:`numpy.ndarray`
array of ratios
Examples
--------
.. code-block:: pycon
>>> from skcriteria.preprocess import scale_by_vector
>>> mtx = [[1, 2], [3, 4]]
# ratios with the vector value of the array
>>> scale_by_vector(mtx)
array([[ 0.18257418, 0.36514837],
[ 0.54772252, 0.73029673]])
# ratios by column
>>> scale_by_vector(mtx, axis=0)
array([[ 0.31622776, 0.44721359],
[ 0.94868326, 0.89442718]])
# ratios by row
>>> scale_by_vector(mtx, axis=1)
array([[ 0.44721359, 0.89442718],
[ 0.60000002, 0.80000001]])
"""
arr = np.asarray(arr, dtype=float)
frob = linalg.norm(arr, None, axis=axis)
return arr / frob
class VectorScaler(SKCMatrixAndWeightTransformerABC):
r"""Scaler based on the norm of the vector..
.. math::
\overline{X}_{ij} =
\frac{X_{ij}}{\sqrt{\sum\limits_{j=1}^m X_{ij}^{2}}}
If the scaler is configured to work with 'matrix' each value
of each criteria is divided by the norm of the vector defined by the values
of that criteria.
In other hand if is configure to work with 'weights',
each value of weight is divided by the vector defined by the values
of the weights.
"""
@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights)
def _transform_weights(self, weights):
return scale_by_vector(weights, axis=None)
@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix)
def _transform_matrix(self, matrix):
return scale_by_vector(matrix, axis=0)
# =============================================================================
# MINMAX
# =============================================================================
def scale_by_minmax(arr, axis=None):
r"""Fraction of the range normalizer.
Subtracts to each value of the array the minimum and then divides
it by the total range.
.. math::
\overline{X}_{ij} =
\frac{X_{ij} - \min{X_{ij}}}{\max_{X_{ij}} - \min_{X_{ij}}}
Parameters
----------
arr: :py:class:`numpy.ndarray` like.
A array with values
axis : :py:class:`int` optional
Axis along which to operate. By default, flattened input is used.
Returns
-------
:py:class:`numpy.ndarray`
array of ratios
Examples
--------
.. code-block:: pycon
>>> from skcriteria.preprocess import scale_by_minmax
>>> mtx = [[1, 2], [3, 4]]
# ratios with the range of the array
>>> scale_by_minmax(mtx)
array([[0. , 0.33333333],
[0.66666667, 1. ]])
# ratios with the range by column
>>> scale_by_minmax(mtx, axis=0)
array([[0., 0.],
[1., 1.]])
# ratios with the range by row
>>> scale_by_minmax(mtx, axis=1)
array([[0., 1.],
[0., 1.]])
"""
arr = np.asarray(arr, dtype=float)
minval = np.min(arr, axis=axis, keepdims=True)
maxval = np.max(arr, axis=axis, keepdims=True)
return (arr - minval) / (maxval - minval)
class MinMaxScaler(SKCMatrixAndWeightTransformerABC):
r"""Scaler based on the range.
.. math::
\overline{X}_{ij} =
\frac{X_{ij} - \min{X_{ij}}}{\max_{X_{ij}} - \min_{X_{ij}}}
If the scaler is configured to work with 'matrix' each value
of each criteria is divided by the range of that criteria.
In other hand if is configure to work with 'weights',
each value of weight is divided by the range the weights.
"""
@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights)
def _transform_weights(self, weights):
return scale_by_minmax(weights, axis=None)
@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix)
def _transform_matrix(self, matrix):
return scale_by_minmax(matrix, axis=0)
# =============================================================================
# SUM
# =============================================================================
def scale_by_sum(arr, axis=None):
r"""Divide of every value on the array by sum of values along an axis.
.. math::
\overline{X}_{ij} = \frac{X_{ij}}{\sum\limits_{j=1}^m X_{ij}}
Parameters
----------
arr: :py:class:`numpy.ndarray` like.
A array with values
axis : :py:class:`int` optional
Axis along which to operate. By default, flattened input is used.
Returns
-------
:py:class:`numpy.ndarray`
array of ratios
Examples
--------
.. code-block:: pycon
>>> from skcriteria.preprocess import scale_by_sum
>>> mtx = [[1, 2], [3, 4]]
>>> scale_by_sum(mtx) # ratios with the sum of the array
array([[ 0.1 , 0.2 ],
[ 0.30000001, 0.40000001]])
# ratios with the sum of the array by column
>>> scale_by_sum(mtx, axis=0)
array([[ 0.25 , 0.33333334],
[ 0.75 , 0.66666669]])
# ratios with the sum of the array by row
>>> scale_by_sum(mtx, axis=1)
array([[ 0.33333334, 0.66666669],
[ 0.42857143, 0.5714286 ]])
"""
arr = np.asarray(arr, dtype=float)
sumval = np.sum(arr, axis=axis, keepdims=True)
return arr / sumval
class SumScaler(SKCMatrixAndWeightTransformerABC):
r"""Scalerbased on the total sum of values.
.. math::
\overline{X}_{ij} = \frac{X_{ij}}{\sum\limits_{j=1}^m X_{ij}}
If the scaler is configured to work with 'matrix' each value
of each criteria is divided by the total sum of all the values of that
criteria.
In other hand if is configure to work with 'weights',
each value of weight is divided by the total sum of all the weights.
"""
@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights)
def _transform_weights(self, weights):
return scale_by_sum(weights, axis=None)
@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix)
def _transform_matrix(self, matrix):
return scale_by_sum(matrix, axis=0)
# =============================================================================
# MAX
# =============================================================================
def scale_by_max(arr, axis=None):
r"""Divide of every value on the array by max value along an axis.
.. math::
\overline{X}_{ij} = \frac{X_{ij}}{\max_{X_{ij}}}
Parameters
----------
arr: :py:class:`numpy.ndarray` like.
A array with values
axis : :py:class:`int` optional
Axis along which to operate. By default, flattened input is used.
Returns
-------
:py:class:`numpy.ndarray`
array of ratios
Examples
--------
.. code-block:: pycon
>>> from skcriteria.preprocess import scale_by_max
>>> mtx = [[1, 2], [3, 4]]
# ratios with the max value of the array
>>> scale_by_max(mtx)
array([[ 0.25, 0.5 ],
[ 0.75, 1. ]])
# ratios with the max value of the arr by column
>>> scale_by_max(mtx, axis=0)
array([[ 0.33333334, 0.5],
[ 1. , 1. ]])
# ratios with the max value of the array by row
>>> scale_by_max(mtx, axis=1)
array([[ 0.5 , 1.],
[ 0.75, 1.]])
"""
arr = np.asarray(arr, dtype=float)
maxval = np.max(arr, axis=axis, keepdims=True)
return arr / maxval
class MaxScaler(SKCMatrixAndWeightTransformerABC):
r"""Scaler based on the maximum values.
.. math::
\overline{X}_{ij} = \frac{X_{ij}}{\max_{X_{ij}}}
If the scaler is configured to work with 'matrix' each value
of each criteria is divided by the maximum value of that criteria.
In other hand if is configure to work with 'weights',
each value of weight is divided by the maximum value the weights.
"""
@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights)
def _transform_weights(self, weights):
return scale_by_max(weights, axis=None)
@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix)
def _transform_matrix(self, matrix):
return scale_by_max(matrix, axis=0)
| 29.150358 | 79 | 0.5542 |
18c9b055a1010eb67b7ea25cecdf00477f1de106 | 12,875 | py | Python | easyp2p/ui/Ui_main_window.py | Ceystyle/easyp2p | 99c32e3ec0ff5a34733f157dd1b53d1aa9bc9edc | [
"MIT"
] | 4 | 2019-07-18T10:58:28.000Z | 2021-11-18T16:57:45.000Z | easyp2p/ui/Ui_main_window.py | Ceystyle/easyp2p | 99c32e3ec0ff5a34733f157dd1b53d1aa9bc9edc | [
"MIT"
] | 1 | 2019-07-05T09:21:47.000Z | 2019-07-05T09:21:47.000Z | easyp2p/ui/Ui_main_window.py | Ceystyle/easyp2p | 99c32e3ec0ff5a34733f157dd1b53d1aa9bc9edc | [
"MIT"
] | 2 | 2019-07-05T08:56:34.000Z | 2020-06-09T10:03:42.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Niko Sandschneider
# Form implementation generated from reading ui file 'easyp2p/ui/main_window.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(736, 510)
MainWindow.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
MainWindow.setWindowTitle("easyp2p")
MainWindow.setDocumentMode(False)
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralWidget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.group_box_platform_top = QtWidgets.QGroupBox(self.centralWidget)
self.group_box_platform_top.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.group_box_platform_top.setAlignment(QtCore.Qt.AlignCenter)
self.group_box_platform_top.setObjectName("group_box_platform_top")
self.gridLayout_4 = QtWidgets.QGridLayout(self.group_box_platform_top)
self.gridLayout_4.setObjectName("gridLayout_4")
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.group_box_platforms = QtWidgets.QGroupBox(self.group_box_platform_top)
self.group_box_platforms.setTitle("")
self.group_box_platforms.setObjectName("group_box_platforms")
self.gridLayout_3 = QtWidgets.QGridLayout(self.group_box_platforms)
self.gridLayout_3.setObjectName("gridLayout_3")
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setSpacing(6)
self.gridLayout_2.setObjectName("gridLayout_2")
self.check_box_grupeer = QtWidgets.QCheckBox(self.group_box_platforms)
self.check_box_grupeer.setText("Grupeer")
self.check_box_grupeer.setShortcut("")
self.check_box_grupeer.setObjectName("check_box_grupeer")
self.gridLayout_2.addWidget(self.check_box_grupeer, 6, 0, 1, 1)
self.check_box_dofinance = QtWidgets.QCheckBox(self.group_box_platforms)
self.check_box_dofinance.setText("DoFinance")
self.check_box_dofinance.setShortcut("")
self.check_box_dofinance.setObjectName("check_box_dofinance")
self.gridLayout_2.addWidget(self.check_box_dofinance, 2, 0, 1, 1)
self.check_box_iuvo = QtWidgets.QCheckBox(self.group_box_platforms)
self.check_box_iuvo.setText("Iuvo")
self.check_box_iuvo.setShortcut("")
self.check_box_iuvo.setObjectName("check_box_iuvo")
self.gridLayout_2.addWidget(self.check_box_iuvo, 7, 0, 1, 1)
self.check_box_bondora = QtWidgets.QCheckBox(self.group_box_platforms)
self.check_box_bondora.setText("Bondora")
self.check_box_bondora.setShortcut("")
self.check_box_bondora.setObjectName("check_box_bondora")
self.gridLayout_2.addWidget(self.check_box_bondora, 0, 0, 1, 1)
self.check_box_estateguru = QtWidgets.QCheckBox(self.group_box_platforms)
self.check_box_estateguru.setText("Estateguru")
self.check_box_estateguru.setShortcut("")
self.check_box_estateguru.setObjectName("check_box_estateguru")
self.gridLayout_2.addWidget(self.check_box_estateguru, 4, 0, 1, 1)
self.check_box_mintos = QtWidgets.QCheckBox(self.group_box_platforms)
self.check_box_mintos.setText("Mintos")
self.check_box_mintos.setShortcut("")
self.check_box_mintos.setObjectName("check_box_mintos")
self.gridLayout_2.addWidget(self.check_box_mintos, 8, 0, 1, 1)
self.check_box_peerberry = QtWidgets.QCheckBox(self.group_box_platforms)
self.check_box_peerberry.setText("PeerBerry")
self.check_box_peerberry.setShortcut("")
self.check_box_peerberry.setObjectName("check_box_peerberry")
self.gridLayout_2.addWidget(self.check_box_peerberry, 0, 1, 1, 1)
self.check_box_robocash = QtWidgets.QCheckBox(self.group_box_platforms)
self.check_box_robocash.setText("Robocash")
self.check_box_robocash.setShortcut("")
self.check_box_robocash.setObjectName("check_box_robocash")
self.gridLayout_2.addWidget(self.check_box_robocash, 2, 1, 1, 1)
self.check_box_swaper = QtWidgets.QCheckBox(self.group_box_platforms)
self.check_box_swaper.setText("Swaper")
self.check_box_swaper.setShortcut("")
self.check_box_swaper.setObjectName("check_box_swaper")
self.gridLayout_2.addWidget(self.check_box_swaper, 4, 1, 1, 1)
self.check_box_twino = QtWidgets.QCheckBox(self.group_box_platforms)
self.check_box_twino.setText("Twino")
self.check_box_twino.setShortcut("")
self.check_box_twino.setObjectName("check_box_twino")
self.gridLayout_2.addWidget(self.check_box_twino, 6, 1, 1, 1)
self.check_box_viventor = QtWidgets.QCheckBox(self.group_box_platforms)
self.check_box_viventor.setText("Viventor")
self.check_box_viventor.setObjectName("check_box_viventor")
self.gridLayout_2.addWidget(self.check_box_viventor, 7, 1, 1, 1)
self.check_box_viainvest = QtWidgets.QCheckBox(self.group_box_platforms)
self.check_box_viainvest.setEnabled(True)
self.check_box_viainvest.setText("Viainvest")
self.check_box_viainvest.setObjectName("check_box_viainvest")
self.gridLayout_2.addWidget(self.check_box_viainvest, 8, 1, 1, 1)
self.gridLayout_3.addLayout(self.gridLayout_2, 0, 0, 1, 1)
self.verticalLayout_3.addWidget(self.group_box_platforms)
self.check_box_select_all = QtWidgets.QCheckBox(self.group_box_platform_top)
self.check_box_select_all.setObjectName("check_box_select_all")
self.verticalLayout_3.addWidget(self.check_box_select_all)
self.gridLayout_4.addLayout(self.verticalLayout_3, 0, 0, 1, 1)
self.verticalLayout_2.addWidget(self.group_box_platform_top)
self.horizontalLayout_date_range = QtWidgets.QHBoxLayout()
self.horizontalLayout_date_range.setObjectName("horizontalLayout_date_range")
self.groupBox_start_date = QtWidgets.QGroupBox(self.centralWidget)
self.groupBox_start_date.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.groupBox_start_date.setAlignment(QtCore.Qt.AlignCenter)
self.groupBox_start_date.setObjectName("groupBox_start_date")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.groupBox_start_date)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.combo_box_start_month = QtWidgets.QComboBox(self.groupBox_start_date)
self.combo_box_start_month.setObjectName("combo_box_start_month")
self.horizontalLayout_3.addWidget(self.combo_box_start_month)
self.combo_box_start_year = QtWidgets.QComboBox(self.groupBox_start_date)
self.combo_box_start_year.setObjectName("combo_box_start_year")
self.horizontalLayout_3.addWidget(self.combo_box_start_year)
self.verticalLayout_4.addLayout(self.horizontalLayout_3)
self.horizontalLayout_date_range.addWidget(self.groupBox_start_date)
self.groupBox_end_date = QtWidgets.QGroupBox(self.centralWidget)
self.groupBox_end_date.setAlignment(QtCore.Qt.AlignCenter)
self.groupBox_end_date.setObjectName("groupBox_end_date")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.groupBox_end_date)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.combo_box_end_month = QtWidgets.QComboBox(self.groupBox_end_date)
self.combo_box_end_month.setObjectName("combo_box_end_month")
self.horizontalLayout_4.addWidget(self.combo_box_end_month)
self.combo_box_end_year = QtWidgets.QComboBox(self.groupBox_end_date)
self.combo_box_end_year.setObjectName("combo_box_end_year")
self.horizontalLayout_4.addWidget(self.combo_box_end_year)
self.horizontalLayout_5.addLayout(self.horizontalLayout_4)
self.horizontalLayout_date_range.addWidget(self.groupBox_end_date)
self.verticalLayout_2.addLayout(self.horizontalLayout_date_range)
self.groupBox_5 = QtWidgets.QGroupBox(self.centralWidget)
self.groupBox_5.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.groupBox_5.setAlignment(QtCore.Qt.AlignCenter)
self.groupBox_5.setObjectName("groupBox_5")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.groupBox_5)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.line_edit_output_file = QtWidgets.QLineEdit(self.groupBox_5)
self.line_edit_output_file.setReadOnly(True)
self.line_edit_output_file.setObjectName("line_edit_output_file")
self.horizontalLayout_6.addWidget(self.line_edit_output_file)
self.push_button_file_chooser = QtWidgets.QPushButton(self.groupBox_5)
self.push_button_file_chooser.setObjectName("push_button_file_chooser")
self.horizontalLayout_6.addWidget(self.push_button_file_chooser)
self.verticalLayout_5.addLayout(self.horizontalLayout_6)
self.verticalLayout_2.addWidget(self.groupBox_5)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.push_button_start = QtWidgets.QPushButton(self.centralWidget)
self.push_button_start.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.push_button_start.setObjectName("push_button_start")
self.horizontalLayout.addWidget(self.push_button_start)
self.tool_button_settings = QtWidgets.QToolButton(self.centralWidget)
self.tool_button_settings.setText("...")
self.tool_button_settings.setObjectName("tool_button_settings")
self.horizontalLayout.addWidget(self.tool_button_settings)
self.verticalLayout_2.addLayout(self.horizontalLayout)
MainWindow.setCentralWidget(self.centralWidget)
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 736, 30))
self.menuBar.setObjectName("menuBar")
self.menuLanguage = QtWidgets.QMenu(self.menuBar)
self.menuLanguage.setObjectName("menuLanguage")
MainWindow.setMenuBar(self.menuBar)
self.action_english = QtWidgets.QAction(MainWindow)
self.action_english.setCheckable(True)
self.action_english.setChecked(True)
self.action_english.setObjectName("action_english")
self.action_german = QtWidgets.QAction(MainWindow)
self.action_german.setCheckable(True)
self.action_german.setEnabled(True)
self.action_german.setObjectName("action_german")
self.menuLanguage.addAction(self.action_english)
self.menuLanguage.addAction(self.action_german)
self.menuBar.addAction(self.menuLanguage.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
self.group_box_platform_top.setTitle(_translate("MainWindow", "For which P2P platforms should the results be loaded?"))
self.check_box_select_all.setText(_translate("MainWindow", "Select/deselect all"))
self.groupBox_start_date.setTitle(_translate("MainWindow", "Start date"))
self.groupBox_end_date.setTitle(_translate("MainWindow", "End date"))
self.groupBox_5.setTitle(_translate("MainWindow", "Where should the results be saved?"))
self.push_button_file_chooser.setText(_translate("MainWindow", "Choose File"))
self.push_button_start.setText(_translate("MainWindow", "Start evaluation"))
self.menuLanguage.setTitle(_translate("MainWindow", "&Language"))
self.action_english.setText(_translate("MainWindow", "&English"))
self.action_german.setText(_translate("MainWindow", "&German"))
| 62.198068 | 127 | 0.75301 |
507cb7e7b0bdd8c2784f4bbb2ea85bda2d43880d | 3,178 | py | Python | blog/blog/settings.py | edgardarbeni/curso-induccion-django | e95416690c1ca7fd1e62b481e0ffb57eabf6f8e9 | [
"MIT"
] | null | null | null | blog/blog/settings.py | edgardarbeni/curso-induccion-django | e95416690c1ca7fd1e62b481e0ffb57eabf6f8e9 | [
"MIT"
] | 6 | 2021-03-19T02:55:17.000Z | 2021-09-22T19:05:25.000Z | blog/blog/settings.py | edgardarbeni/curso-induccion-django | e95416690c1ca7fd1e62b481e0ffb57eabf6f8e9 | [
"MIT"
] | null | null | null | """
Django settings for blog project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l_8l%yqdv$kwrqqx^9935=%yt*!i1^(4(5q^$kaz^507m$2u30'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'weblog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
| 25.837398 | 91 | 0.695091 |
5cafa32fe2ebad6ae37070befe15306c16a301f8 | 3,512 | py | Python | mglearn/plot_grid_search.py | data-ml/advanced_training | 84d78b60161d08b0f212a14f10f80c6bdc346998 | [
"BSD-2-Clause"
] | 132 | 2016-06-06T17:30:23.000Z | 2021-11-16T13:51:36.000Z | mglearn/plot_grid_search.py | afcarl/advanced_training | 1ef9246e2f70b82295bb3c4dc9a283e32fd427fb | [
"BSD-2-Clause"
] | 1 | 2017-03-08T19:49:13.000Z | 2017-03-08T19:55:03.000Z | mglearn/plot_grid_search.py | afcarl/advanced_training | 1ef9246e2f70b82295bb3c4dc9a283e32fd427fb | [
"BSD-2-Clause"
] | 47 | 2016-06-07T09:39:22.000Z | 2021-09-01T01:45:44.000Z | import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
try:
from sklearn.model_selection import GridSearchCV, train_test_split
except:
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_iris
def plot_cross_val_selection():
iris = load_iris()
X_trainval, X_test, y_trainval, y_test = train_test_split(iris.data,
iris.target,
random_state=0)
param_grid = {'C': [0.001, 0.01, 0.1, 1, 10, 100],
'gamma': [0.001, 0.01, 0.1, 1, 10, 100]}
grid_search = GridSearchCV(SVC(), param_grid, cv=5)
grid_search.fit(X_trainval, y_trainval)
scores = grid_search.grid_scores_[15:]
best = np.argmax([x.mean_validation_score for x in scores])
plt.figure(figsize=(10, 3))
plt.xlim(-1, len(scores))
plt.ylim(0, 1.1)
for i, score in enumerate(scores):
marker_cv, = plt.plot([i] * 5, score.cv_validation_scores, '^', c='gray', markersize=5, alpha=.5)
marker_mean, = plt.plot(i, score.mean_validation_score, 'v', c='none', alpha=1, markersize=10)
if i == best:
marker_best, = plt.plot(i, score.mean_validation_score, 'o', c='red', fillstyle="none", alpha=1, markersize=20, markeredgewidth=3)
plt.xticks(range(len(scores)), [str(score.parameters).strip("{}").replace("'", "") for score in scores], rotation=90);
plt.ylabel("validation accuracy")
plt.xlabel("parameter settings")
plt.legend([marker_cv, marker_mean, marker_best], ["cv accuracy", "mean accuracy", "best parameter setting"], loc=(1.05, .4))
def plot_grid_search_overview():
plt.figure(figsize=(10, 3))
axes = plt.gca()
axes.yaxis.set_visible(False)
axes.xaxis.set_visible(False)
axes.set_frame_on(False)
#axes.invert_yaxis()
def draw(ax, text, start, target=None):
if target is not None:
patchB = target.get_bbox_patch()
end = target.get_position()
else:
end = start
patchB = None
annotation = ax.annotate(text, end, start, xycoords='axes pixels', textcoords='axes pixels', size=20,
arrowprops=dict(arrowstyle="-|>", fc="w", ec="k", patchB=patchB,
connectionstyle="arc3,rad=0.0"),
bbox=dict(boxstyle="round", fc="w"), horizontalalignment="center", verticalalignment="center")
plt.draw()
return annotation
step = 100
grr = 400
final_evaluation = draw(axes, "final evaluation", (5 * step, grr - 3 * step))
retrained_model = draw(axes, "retrained model", (3 * step, grr - 3 * step), final_evaluation)
best_parameters = draw(axes, "best parameters", (.5 * step, grr - 3 * step), retrained_model)
cross_validation = draw(axes, "cross validation", (.5 * step, grr - 2 * step), best_parameters)
parameters = draw(axes, "parameter grid", (0.0, grr - 0), cross_validation)
training_data = draw(axes, "training data", (2 * step, grr - step), cross_validation)
draw(axes, "training data", (2 * step, grr - step), retrained_model)
test_data = draw(axes, "test data", (5 * step, grr - step), final_evaluation)
draw(axes, "data set", (3.5 * step, grr - 0.0), training_data)
data_set = draw(axes, "data set", (3.5 * step, grr - 0.0), test_data)
plt.ylim(0, 1)
plt.xlim(0, 1.5)
| 46.210526 | 142 | 0.61959 |
0a0b45c86afc92462c684ffed94ebc8ca5a4dac2 | 27,591 | py | Python | improver_tests/nowcasting/accumulation/test_Accumulation.py | owena11/improver | ff658db31364c0bdf3af3940736c260e10544705 | [
"BSD-3-Clause"
] | 77 | 2017-04-26T07:47:40.000Z | 2022-03-31T09:40:49.000Z | improver_tests/nowcasting/accumulation/test_Accumulation.py | owena11/improver | ff658db31364c0bdf3af3940736c260e10544705 | [
"BSD-3-Clause"
] | 1,440 | 2017-03-29T10:04:15.000Z | 2022-03-28T10:11:29.000Z | improver_tests/nowcasting/accumulation/test_Accumulation.py | MoseleyS/improver | ca028e3a1c842e3ff00b188c8ea6eaedd0a07149 | [
"BSD-3-Clause"
] | 72 | 2017-03-17T16:53:45.000Z | 2022-02-16T09:41:37.000Z | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
""" Unit tests for the nowcasting.Accumulation plugin """
import datetime
import unittest
import iris
import numpy as np
from cf_units import Unit
from iris.tests import IrisTest
from improver.nowcasting.accumulation import Accumulation
from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube
from improver.utilities.warnings_handler import ManageWarnings
class rate_cube_set_up(IrisTest):
"""Set up a sequence of precipitation rates cubes for use in testing the
accumulation plugin functionality."""
def setUp(self):
"""Set up 11 precipitation rate cubes offset by 1 minute (spanning 10
minutes), with a shower moving across the array from left to right
(west to east). A uniform rate of precipitation covers the eastern half
of the domain. Lighter precipitation covers the north-west quadrant,
and there is no precipitation in the south-western quadrant. This
shower is advected eastwards by the optical flow at a uniform rate over
the period considered. Beyond the western boundary there is no
precipitation, so precipitation stops in the west as the shower is
advected east.
A mask covers two cells towards the centre of the domain to simulate a
radar quality mask.
Accumulations are greatest in the right-hand array columns and smallest
in the left. All accumulations to which a masked cell has contributed
are returned as masked; note that in the arrays of expected values in
the tests below those cells that are expected to be masked are given
a value of np.nan.
"""
ncells = 10
# Rates equivalent to 5.4 and 1.8 mm/hr
rates = np.ones((ncells)) * 5.4
rates[0 : ncells // 2] = 1.8
rates = rates / 3600.0
datalist = []
for i in range(ncells):
data = np.vstack([rates] * 4)
data = np.roll(data, i, axis=1)
try:
data[0:2, :i] = 0
data[2:, : i + ncells // 2] = 0
except IndexError:
pass
mask = np.zeros((4, ncells))
mask[1:3, ncells // 2 + i : ncells // 2 + i + 1] = 1
data = np.ma.MaskedArray(data, mask=mask, dtype=np.float32)
datalist.append(data)
datalist.append(
np.ma.MaskedArray(
np.zeros((4, ncells)), mask=np.zeros((4, ncells)), dtype=np.float32
)
)
name = "lwe_precipitation_rate"
units = "mm s-1"
self.cubes = iris.cube.CubeList()
for index, data in enumerate(datalist):
cube = set_up_variable_cube(
data,
name=name,
units=units,
spatial_grid="equalarea",
time=datetime.datetime(2017, 11, 10, 4, index),
frt=datetime.datetime(2017, 11, 10, 4, 0),
)
self.cubes.append(cube)
return self.cubes
class Test__init__(IrisTest):
"""Test class initialisation"""
def test_default(self):
"""Test the default accumulation_units are set when not specified."""
plugin = Accumulation()
self.assertEqual(plugin.accumulation_units, "m")
self.assertEqual(plugin.accumulation_period, None)
def test_units_set(self):
"""Test the accumulation_units are set when specified."""
plugin = Accumulation(accumulation_units="cm")
self.assertEqual(plugin.accumulation_units, "cm")
def test_accumulation_period_set(self):
"""Test the accumulation_period is set when specified."""
plugin = Accumulation(accumulation_period=180)
self.assertEqual(plugin.accumulation_period, 180)
def test_forecast_period_set(self):
"""Test the forecast_period is set when specified."""
plugin = Accumulation(forecast_periods=[60, 120])
self.assertListEqual(plugin.forecast_periods, [60, 120])
class Test__repr__(IrisTest):
"""Test class representation"""
def test_basic(self):
"""Test string representation"""
result = str(
Accumulation(
accumulation_units="cm",
accumulation_period=60,
forecast_periods=[60, 120],
)
)
expected_result = (
"<Accumulation: accumulation_units=cm, "
"accumulation_period=60s, "
"forecast_periods=[60, 120]s>"
)
self.assertEqual(result, expected_result)
class Test_sort_cubes_by_time(rate_cube_set_up):
"""Tests the input cubes are sorted in time ascending order."""
def test_returns_cubelist(self):
"""Test function returns a cubelist."""
result, _ = Accumulation.sort_cubes_by_time(self.cubes)
self.assertIsInstance(result, iris.cube.CubeList)
def test_reorders(self):
"""Test function reorders a cubelist that is not time ordered."""
expected = [cube.coord("time").points[0] for cube in self.cubes]
self.cubes = self.cubes[::-1]
reordered = [cube.coord("time").points[0] for cube in self.cubes]
result, _ = Accumulation.sort_cubes_by_time(self.cubes)
result_times = [cube.coord("time").points[0] for cube in result]
self.assertIsInstance(result, iris.cube.CubeList)
self.assertEqual(result_times, expected)
self.assertNotEqual(result_times, reordered)
def test_times(self):
"""Test function returns the correct times for the sorted cubes."""
expected = [
1510286400,
1510286460,
1510286520,
1510286580,
1510286640,
1510286700,
1510286760,
1510286820,
1510286880,
1510286940,
1510287000,
]
_, times = Accumulation.sort_cubes_by_time(self.cubes)
self.assertArrayEqual(times, expected)
class Test__check_inputs(rate_cube_set_up):
"""Test the _check_inputs method."""
def test_basic(self):
"""Test that the expected time_interval is returned and that the
returned list of cubes has the expected units."""
expected_time_interval = 60
expected_cubes = self.cubes.copy()
for cube in expected_cubes:
cube.convert_units("m/s")
cubes, time_interval = Accumulation()._check_inputs(self.cubes)
self.assertEqual(cubes, expected_cubes)
self.assertEqual(time_interval, expected_time_interval)
def test_specify_accumulation_period(self):
"""Test that the expected time interval is returned when the
accumulation period is specified. Also test that the returned list of
cubes has the expected units."""
expected_time_interval = 60
expected_cubes = self.cubes.copy()
for cube in expected_cubes:
cube.convert_units("m/s")
accumulation_period = 60 * 60
plugin = Accumulation(accumulation_period=accumulation_period)
cubes, time_interval = plugin._check_inputs(self.cubes)
self.assertEqual(cubes, expected_cubes)
self.assertEqual(time_interval, expected_time_interval)
self.assertEqual(plugin.accumulation_period, accumulation_period)
def test_specify_forecast_period(self):
"""Test that the expected time interval is returned when the forecast
periods are specified. Also test that the returned list of cubes has
the expected units."""
expected_time_interval = 60
expected_cubes = self.cubes.copy()
for cube in expected_cubes:
cube.convert_units("m/s")
forecast_periods = [600]
plugin = Accumulation(forecast_periods=forecast_periods)
cubes, time_interval = plugin._check_inputs(self.cubes)
self.assertEqual(cubes, expected_cubes)
self.assertEqual(time_interval, expected_time_interval)
self.assertEqual(plugin.forecast_periods, forecast_periods)
def test_specify_accumulation_period_and_forecast_period(self):
"""Test that the expected time interval is returned when the
accumulation period and forecast periods are specified. Also test that
the returned list of cubes has the expected units."""
expected_time_interval = 60
expected_cubes = self.cubes.copy()
for cube in expected_cubes:
cube.convert_units("m/s")
accumulation_period = 20 * 60
forecast_periods = np.array([15]) * 60
plugin = Accumulation(
accumulation_period=accumulation_period, forecast_periods=forecast_periods
)
cubes, time_interval = plugin._check_inputs(self.cubes)
self.assertEqual(cubes, expected_cubes)
self.assertEqual(time_interval, expected_time_interval)
def test_raises_exception_for_unevenly_spaced_cubes(self):
"""Test function raises an exception if the input cubes are not
spaced equally in time."""
last_time = self.cubes[-1].coord("time").points
self.cubes[-1].coord("time").points = last_time + 60
msg = (
"Accumulation is designed to work with rates "
"cubes at regular time intervals."
)
plugin = Accumulation(accumulation_period=120)
with self.assertRaisesRegex(ValueError, msg):
plugin.process(self.cubes)
def test_raises_exception_for_small_accumulation_period(self):
"""Test that if the forecast period of the upper bound cube is
not within the list of requested forecast periods, then the
subset of cubes returned is equal to None."""
msg = (
"The accumulation_period is less than the time interval "
"between the rates cubes. The rates cubes provided are "
"therefore insufficient for computing the accumulation period "
"requested."
)
reduced_cubelist = iris.cube.CubeList([self.cubes[0], self.cubes[-1]])
plugin = Accumulation(
accumulation_period=5 * 60, forecast_periods=np.array([5]) * 60
)
with self.assertRaisesRegex(ValueError, msg):
plugin.process(reduced_cubelist)
def test_raises_exception_for_impossible_aggregation(self):
"""Test function raises an exception when attempting to create an
accumulation_period that cannot be created from the input cubes."""
plugin = Accumulation(accumulation_period=119)
msg = "The specified accumulation period "
with self.assertRaisesRegex(ValueError, msg):
plugin._check_inputs(self.cubes)
class Test__get_cube_subsets(rate_cube_set_up):
"""Test the _get_cube_subsets method."""
def test_basic(self):
"""Test that the subset of cubes that are within the accumulation
period are correctly identified. In this case, the subset of cubes
used for each accumulation period is expected to consist of 6 cubes."""
expected_cube_subset = self.cubes[:6]
(upper_bound_fp,) = self.cubes[5].coord("forecast_period").points
plugin = Accumulation(
accumulation_period=5 * 60, forecast_periods=np.array([5]) * 60
)
result = plugin._get_cube_subsets(self.cubes, upper_bound_fp)
self.assertEqual(expected_cube_subset, result)
class Test__calculate_accumulation(rate_cube_set_up):
"""Test the _calculate_accumulation method."""
def test_basic(self):
"""Check the calculations of the accumulations, where an accumulation
is computed by finding the mean rate between each adjacent pair of
cubes within the cube_subset and multiplying this mean rate by the
time_interval, in order to compute an accumulation. In this case,
as the cube_subset only contains a pair of cubes, then the
accumulation from this pair will be the same as the total accumulation.
"""
expected_t0 = np.array(
[
[0.015, 0.03, 0.03, 0.03, 0.03, 0.06, 0.09, 0.09, 0.09, 0.09],
[0.015, 0.03, 0.03, 0.03, 0.03, np.nan, np.nan, 0.09, 0.09, 0.09],
[0.0, 0.0, 0.0, 0.0, 0.0, np.nan, np.nan, 0.09, 0.09, 0.09],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.045, 0.09, 0.09, 0.09, 0.09],
]
)
expected_mask_t0 = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
time_interval = 60
result = Accumulation()._calculate_accumulation(self.cubes[:2], time_interval)
self.assertArrayAlmostEqual(result, expected_t0)
self.assertArrayAlmostEqual(result.mask, expected_mask_t0)
class Test__set_metadata(rate_cube_set_up):
"""Test the _set_metadata method."""
def test_basic(self):
"""Check that the metadata is set as expected."""
expected_name = "lwe_thickness_of_precipitation_amount"
expected_units = Unit("m")
expected_time_point = [datetime.datetime(2017, 11, 10, 4, 10)]
expected_time_bounds = [
(
datetime.datetime(2017, 11, 10, 4, 0),
datetime.datetime(2017, 11, 10, 4, 10),
)
]
expected_fp_point = 600
expected_fp_bounds = [[0, 600]]
expected_cell_method = iris.coords.CellMethod("sum", coords="time")
result = Accumulation()._set_metadata(self.cubes)
self.assertEqual(result.name(), expected_name)
self.assertEqual(result.units, expected_units)
points = [value.point for value in result.coord("time").cells()]
bounds = [value.bound for value in result.coord("time").cells()]
self.assertEqual(points, expected_time_point)
self.assertArrayAlmostEqual(
result.coord("forecast_period").points, expected_fp_point
)
self.assertEqual(bounds, expected_time_bounds)
self.assertArrayAlmostEqual(
result.coord("forecast_period").bounds, expected_fp_bounds
)
self.assertEqual(result.cell_methods[0], expected_cell_method)
class Test_process(rate_cube_set_up):
"""Tests the process method results in the expected outputs."""
def setUp(self):
"""Set up forecast periods used for testing."""
super().setUp()
self.forecast_periods = [
cube.coord("forecast_period").points for cube in self.cubes[1:]
]
def test_returns_cubelist(self):
"""Test function returns a cubelist."""
plugin = Accumulation(
accumulation_period=60, forecast_periods=self.forecast_periods
)
result = plugin.process(self.cubes)
self.assertIsInstance(result, iris.cube.CubeList)
def test_accumulation_length(self):
"""Test to check that the length of the accumulation period is
consistent across all output cubes. Only complete periods are
required."""
accumulation_length = 120
plugin = Accumulation(
accumulation_period=accumulation_length,
forecast_periods=self.forecast_periods,
)
result = plugin.process(self.cubes)
for cube in result:
self.assertEqual(
np.diff(cube.coord("forecast_period").bounds), accumulation_length
)
def test_returns_masked_cubes(self):
"""Test function returns a list of masked cubes for masked input
data."""
result = Accumulation(forecast_periods=[600]).process(self.cubes)
self.assertIsInstance(result[0].data, np.ma.MaskedArray)
def test_default_output_units(self):
"""Test the function returns accumulations in the default units if no
units are explicitly set, where the default is metres."""
# Multiply the rates in mm/s by 60 to get accumulation over 1 minute
# and divide by 1000 to get into metres.
expected = self.cubes[0].copy(
data=(0.5 * (self.cubes[0].data + self.cubes[1].data) * 60 / 1000)
)
plugin = Accumulation(
accumulation_period=60, forecast_periods=self.forecast_periods
)
result = plugin.process(self.cubes)
self.assertEqual(result[0].units, "m")
self.assertArrayAlmostEqual(result[0].data, expected.data)
def test_default_altered_output_units(self):
"""Test the function returns accumulations in the specified units if
they are explicitly set. Here the units are set to mm."""
# Multiply the rates in mm/s by 60 to get accumulation over 1 minute
expected = self.cubes[0].copy(
data=(0.5 * (self.cubes[0].data + self.cubes[1].data) * 60)
)
plugin = Accumulation(
accumulation_units="mm",
accumulation_period=60,
forecast_periods=self.forecast_periods,
)
result = plugin.process(self.cubes)
self.assertEqual(result[0].units, "mm")
self.assertArrayAlmostEqual(result[0].data, expected.data)
@ManageWarnings(
ignored_messages=["The provided cubes result in a"], warning_types=[UserWarning]
)
def test_does_not_use_incomplete_period_data(self):
"""Test function returns only 2 accumulation periods when a 4 minute
aggregation period is used with 10 minutes of input data. The trailing
2 cubes are insufficient to create another period and so are discarded.
A warning is raised by the chunking function and has been tested above,
so is ignored here.
"""
plugin = Accumulation(accumulation_period=240, forecast_periods=[240, 480])
result = plugin.process(self.cubes)
self.assertEqual(len(result), 2)
def test_returns_expected_values_5_minutes(self):
"""Test function returns the expected accumulations over a 5 minute
aggregation period. These are written out long hand to make the
comparison easy. Check that the number of accumulation cubes returned
is the expected number."""
expected_t0 = np.array(
[
[0.015, 0.045, 0.075, 0.105, 0.135, 0.18, 0.24, 0.3, 0.36, 0.42],
[
0.015,
0.045,
0.075,
0.105,
0.135,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
[0.0, 0.0, 0.0, 0.0, 0.0, np.nan, np.nan, np.nan, np.nan, np.nan],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.045, 0.135, 0.225, 0.315, 0.405],
]
)
expected_t1 = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.015, 0.045, 0.075, 0.105, 0.135],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.015, 0.045, 0.075, 0.105, 0.135],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
)
expected_mask_t0 = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
expected_mask_t1 = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
plugin = Accumulation(
accumulation_period=300,
accumulation_units="mm",
forecast_periods=[300, 600],
)
result = plugin.process(self.cubes)
self.assertArrayAlmostEqual(result[0].data, expected_t0)
self.assertArrayAlmostEqual(result[1].data, expected_t1)
self.assertArrayAlmostEqual(result[0].data.mask, expected_mask_t0)
self.assertArrayAlmostEqual(result[1].data.mask, expected_mask_t1)
self.assertEqual(len(result), 2)
def test_returns_expected_values_10_minutes(self):
"""Test function returns the expected accumulations over the complete
10 minute aggregation period. These are written out long hand to make
the comparison easy. Note that the test have been constructed such that
only the top row is expected to show a difference by including the last
5 minutes of the accumulation, all the other results are the same as
for the 5 minute test above. Check that the number of accumulation
cubes returned is the expected number."""
expected_t0 = np.array(
[
[0.015, 0.045, 0.075, 0.105, 0.135, 0.195, 0.285, 0.375, 0.465, 0.555],
[
0.015,
0.045,
0.075,
0.105,
0.135,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
[0.0, 0.0, 0.0, 0.0, 0.0, np.nan, np.nan, np.nan, np.nan, np.nan],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.045, 0.135, 0.225, 0.315, 0.405],
]
)
expected_mask_t0 = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
plugin = Accumulation(
accumulation_period=600, accumulation_units="mm", forecast_periods=[600]
)
result = plugin.process(self.cubes)
self.assertArrayAlmostEqual(result[0].data, expected_t0)
self.assertArrayAlmostEqual(result[0].data.mask, expected_mask_t0)
self.assertEqual(len(result), 1)
@ManageWarnings(
ignored_messages=["The provided cubes result in a"], warning_types=[UserWarning]
)
def test_returns_total_accumulation_if_no_period_specified(self):
"""Test function returns a list containing a single accumulation cube
that is the accumulation over the whole period specified by the rates
cubes. The results are the same as the 10 minute test above as that is
the total span of the input rates cubes. Check that the number of
accumulation cubes returned is the expected number."""
expected_t0 = np.array(
[
[0.015, 0.045, 0.075, 0.105, 0.135, 0.195, 0.285, 0.375, 0.465, 0.555],
[
0.015,
0.045,
0.075,
0.105,
0.135,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
[0.0, 0.0, 0.0, 0.0, 0.0, np.nan, np.nan, np.nan, np.nan, np.nan],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.045, 0.135, 0.225, 0.315, 0.405],
]
)
expected_mask_t0 = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
plugin = Accumulation(accumulation_units="mm")
result = plugin.process(self.cubes)
self.assertArrayAlmostEqual(result[0].data, expected_t0)
self.assertArrayAlmostEqual(result[0].data.mask, expected_mask_t0)
self.assertEqual(len(result), 1)
def test_returns_expected_values_1_minute(self):
"""Test function returns the expected accumulations over a 1 minute
aggregation period. Check that the number of accumulation cubes
returned is the expected number."""
expected_t0 = np.array(
[
[0.015, 0.03, 0.03, 0.03, 0.03, 0.06, 0.09, 0.09, 0.09, 0.09],
[0.015, 0.03, 0.03, 0.03, 0.03, np.nan, np.nan, 0.09, 0.09, 0.09],
[0.0, 0.0, 0.0, 0.0, 0.0, np.nan, np.nan, 0.09, 0.09, 0.09],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.045, 0.09, 0.09, 0.09, 0.09],
]
)
expected_t7 = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.015, 0.03, 0.03],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.015, 0.03, 0.03],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
)
expected_mask_t0 = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
expected_mask_t7 = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
plugin = Accumulation(
accumulation_period=60,
accumulation_units="mm",
forecast_periods=self.forecast_periods,
)
result = plugin.process(self.cubes)
self.assertArrayAlmostEqual(result[0].data, expected_t0)
self.assertArrayAlmostEqual(result[7].data, expected_t7)
self.assertArrayAlmostEqual(result[0].data.mask, expected_mask_t0)
self.assertArrayAlmostEqual(result[7].data.mask, expected_mask_t7)
self.assertEqual(len(result), 10)
if __name__ == "__main__":
unittest.main()
| 39.247511 | 88 | 0.592295 |
feb8c1bce0fecc53864eb38e57f296a7fc0ec94d | 3,013 | py | Python | Sketches/THF/simplecube/simplecube.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 12 | 2015-10-20T10:22:01.000Z | 2021-07-19T10:09:44.000Z | Sketches/THF/simplecube/simplecube.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 2 | 2015-10-20T10:22:55.000Z | 2017-02-13T11:05:25.000Z | Sketches/THF/simplecube/simplecube.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 6 | 2015-03-09T12:51:59.000Z | 2020-03-01T13:06:21.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
def main():
pygame.init()
screen = pygame.display.set_mode((300,300),OPENGL|DOUBLEBUF)
pygame.display.set_caption('Simple cube')
# background
glClearColor(0.0,0.0,0.0,0.0)
# enable depth tests
glClearDepth(1.0)
glEnable(GL_DEPTH_TEST)
# projection matrix
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(300)/float(300), 0.1, 100.0)
# model matrix
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
pygame.display.flip()
angle=0
while 1:
for event in pygame.event.get():
if event.type == QUIT:
return
# clear screen
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
angle+=0.1
# translation and rotation
glPushMatrix()
glTranslate(0.0,0.0,-15.0)
glRotate(angle, 1.0,1.0,1.0)
# draw faces
glBegin(GL_QUADS)
glColor3f(1.0,0.0,0.0)
glVertex3f(1.0,1.0,1.0)
glVertex3f(1.0,-1.0,1.0)
glVertex3f(-1.0,-1.0,1.0)
glVertex3f(-1.0,1.0,1.0)
glColor3f(0.0,1.0,0.0)
glVertex3f(1.0,1.0,-1.0)
glVertex3f(1.0,-1.0,-1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(-1.0,1.0,-1.0)
glColor3f(0.0,0.0,1.0)
glVertex3f(1.0,1.0,1.0)
glVertex3f(1.0,-1.0,1.0)
glVertex3f(1.0,-1.0,-1.0)
glVertex3f(1.0,1.0,-1.0)
glColor3f(1.0,0.0,1.0)
glVertex3f(-1.0,1.0,1.0)
glVertex3f(-1.0,-1.0,1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(-1.0,1.0,-1.0)
glColor3f(0.0,1.0,1.0)
glVertex3f(1.0,1.0,1.0)
glVertex3f(-1.0,1.0,1.0)
glVertex3f(-1.0,1.0,-1.0)
glVertex3f(1.0,1.0,-1.0)
glColor3f(1.0,1.0,0.0)
glVertex3f(1.0,-1.0,1.0)
glVertex3f(-1.0,-1.0,1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(1.0,-1.0,-1.0)
glEnd()
glPopMatrix()
glFlush()
pygame.display.flip()
if __name__=='__main__': main()
| 27.390909 | 78 | 0.589446 |
78b783081ac77e8fa07bf48d3e801ec1a1692452 | 81,793 | py | Python | nlp/models/nezha.py | zhihao-chen/NLP-experiments | c7512276050f5b8489adb4c745fa970ea8119646 | [
"MIT"
] | 4 | 2021-11-10T03:49:28.000Z | 2022-03-24T02:18:44.000Z | nlp/models/nezha.py | zhihao-chen/NLP-experiments | c7512276050f5b8489adb4c745fa970ea8119646 | [
"MIT"
] | null | null | null | nlp/models/nezha.py | zhihao-chen/NLP-experiments | c7512276050f5b8489adb4c745fa970ea8119646 | [
"MIT"
] | 1 | 2021-11-14T18:01:18.000Z | 2021-11-14T18:01:18.000Z | # -*- coding: utf8 -*-
"""
======================================
Project Name: NLP
File Name: nezha
Author: czh
Create Date: 2021/8/10
--------------------------------------
Change Activity:
======================================
"""
import math
import os
import re
import logging
from abc import ABC
from typing import Callable, Dict, Optional, Union
import torch
from torch import nn
import torch.nn.functional as func
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.configuration_utils import PretrainedConfig
from transformers.modeling_utils import (
PreTrainedModel,
prune_linear_layer,
unwrap_model
)
from transformers.models.bert.modeling_bert import (
BertOutput,
BertPooler,
BertSelfOutput,
BertIntermediate,
BertOnlyMLMHead,
BertOnlyNSPHead,
BertPreTrainingHeads,
BERT_START_DOCSTRING,
BERT_INPUTS_DOCSTRING,
)
from transformers.file_utils import (
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
WEIGHTS_NAME,
cached_path,
hf_bucket_url,
is_offline_mode,
is_remote_url,
add_start_docstrings,
add_start_docstrings_to_model_forward
)
logger = logging.getLogger(__name__)
_CONFIG_FOR_DOC = "NeZhaConfig"
_TOKENIZER_FOR_DOC = "NeZhaTokenizer"
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST = []
NEZHA_PRETRAINED_MODEL_ARCHIVE_MAP = {}
NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
class NeZhaConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of an :class:`~transformers.AlbertModel`.
It is used to instantiate an ALBERT model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the ALBERT `xxlarge <https://huggingface.co/albert-xxlarge-v2>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used
to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`
for more information.
Args:
vocab_size (:obj:`int`, optional, defaults to 30000):
Vocabulary size of the ALBERT model. Defines the different tokens that can be represented by
the `inputs_ids` passed to the forward method of :class:`~transformers.AlbertModel`.
embedding_size (:obj:`int`, optional, defaults to 128):
Dimensionality of vocabulary embeddings.
hidden_size (:obj:`int`, optional, defaults to 4096):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, optional, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_hidden_groups (:obj:`int`, optional, defaults to 1):
Number of groups for the hidden layers, parameters in the same group are shared.
num_attention_heads (:obj:`int`, optional, defaults to 64):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, optional, defaults to 16384):
The dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
inner_group_num (:obj:`int`, optional, defaults to 1):
The number of inner repetition of attention and ffn.
hidden_act (:obj:`str` or :obj:`function`, optional, defaults to "gelu_new"):
The non-linear activation function (function or string) in the encoder and pooler.
If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob (:obj:`float`, optional, defaults to 0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, optional, defaults to 0):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, optional, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something
large (e.g., 512 or 1024 or 2048).
type_vocab_size (:obj:`int`, optional, defaults to 2):
The vocabulary size of the `token_type_ids` passed into :class:`~transformers.AlbertModel`.
initializer_range (:obj:`float`, optional, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):
The epsilon used by the layer normalization layers.
classifier_dropout_prob (:obj:`float`, optional, defaults to 0.1):
The dropout ratio for attached classifiers.
Example::
from transformers import AlbertConfig, AlbertModel
# Initializing an ALBERT-xxlarge style configuration
albert_xxlarge_configuration = AlbertConfig()
# Initializing an ALBERT-base style configuration
albert_base_configuration = AlbertConfig(
hidden_size=768,
num_attention_heads=12,
intermediate_size=3072,
)
# Initializing a model from the ALBERT-base style configuration
model = AlbertModel(albert_xxlarge_configuration)
# Accessing the model configuration
configuration = model.config
Attributes:
pretrained_config_archive_map (Dict[str, str]):
A dictionary containing all the available pre-trained checkpoints.
"""
pretrained_config_archive_map = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = "nezha"
def __init__(
self,
vocab_size=30000,
embedding_size=128,
hidden_size=4096,
num_hidden_layers=12,
num_hidden_groups=1,
num_attention_heads=64,
intermediate_size=16384,
inner_group_num=1,
hidden_act="gelu_new",
hidden_dropout_prob=0,
attention_probs_dropout_prob=0,
max_position_embeddings=512,
max_relative_position=64,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
classifier_dropout_prob=0.1,
use_relative_position=True,
pad_token_id=0,
bos_token_id=2,
eos_token_id=3,
**kwargs
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_hidden_groups = num_hidden_groups
self.num_attention_heads = num_attention_heads
self.inner_group_num = inner_group_num
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.max_relative_position = max_relative_position
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_relative_position = use_relative_position
self.classifier_dropout_prob = classifier_dropout_prob
def load_tf_weights_in_nezha(model, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
# logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "lamb_m", "lamb_v", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1",
"global_step", "good_steps", "loss_scale", 'bad_steps']
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class NeZhaEmbeddings(nn.Module):
"""
Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.use_relative_position = config.use_relative_position
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class RelativePositionsEncoding(nn.Module):
def __init__(self, length, depth, max_relative_position=127):
super(RelativePositionsEncoding, self).__init__()
vocab_size = max_relative_position * 2 + 1
range_vec = torch.arange(length)
range_mat = range_vec.repeat(length).view(length, length)
distance_mat = range_mat - torch.t(range_mat)
distance_mat_clipped = torch.clamp(distance_mat, -max_relative_position, max_relative_position)
final_mat = distance_mat_clipped + max_relative_position
embeddings_table = torch.zeros(vocab_size, depth)
position = torch.arange(0, vocab_size, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, depth, 2).float() * (-math.log(10000.0) / depth))
embeddings_table[:, 0::2] = torch.sin(position * div_term)
embeddings_table[:, 1::2] = torch.cos(position * div_term)
embeddings_table = embeddings_table.unsqueeze(0).transpose(0, 1).squeeze(1)
flat_relative_positions_matrix = final_mat.view(-1)
one_hot_relative_positions_matrix = func.one_hot(flat_relative_positions_matrix, num_classes=vocab_size).float()
positions_encoding = torch.matmul(one_hot_relative_positions_matrix, embeddings_table)
my_shape = list(final_mat.size())
my_shape.append(depth)
positions_encoding = positions_encoding.view(my_shape)
self.register_buffer('positions_encoding', positions_encoding)
def forward(self, length):
return self.positions_encoding[:length, :length, :]
class NeZhaSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.relative_positions_encoding = RelativePositionsEncoding(length=config.max_position_embeddings,
depth=self.attention_head_size,
max_relative_position=config.max_relative_position)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
attention_mask = encoder_attention_mask
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
batch_size, num_attention_heads, from_seq_length, to_seq_length = attention_scores.size()
relations_keys = self.relative_positions_encoding(to_seq_length)
query_layer_t = query_layer.permute(2, 0, 1, 3)
query_layer_r = query_layer_t.contiguous().view(from_seq_length, batch_size * num_attention_heads,
self.attention_head_size)
key_position_scores = torch.matmul(query_layer_r, relations_keys.permute(0, 2, 1))
key_position_scores_r = key_position_scores.view(from_seq_length, batch_size,
num_attention_heads, from_seq_length)
key_position_scores_r_t = key_position_scores_r.permute(1, 2, 0, 3)
attention_scores = attention_scores + key_position_scores_r_t
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
relations_values = self.relative_positions_encoding(to_seq_length)
attention_probs_t = attention_probs.permute(2, 0, 1, 3)
attentions_probs_r = attention_probs_t.contiguous().view(from_seq_length, batch_size * num_attention_heads,
to_seq_length)
value_position_scores = torch.matmul(attentions_probs_r, relations_values)
value_position_scores_r = value_position_scores.view(from_seq_length, batch_size,
num_attention_heads, self.attention_head_size)
value_position_scores_r_t = value_position_scores_r.permute(1, 2, 0, 3)
context_layer = context_layer + value_position_scores_r_t
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
class NeZhaAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = NeZhaSelfAttention(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
heads = set(heads) - self.pruned_heads # Convert to set and remove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
self_outputs = self.self(
hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class NeZhaLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = NeZhaAttention(config)
self.is_decoder = config.is_decoder
if self.is_decoder:
self.crossattention = NeZhaAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if self.is_decoder and encoder_hidden_states is not None:
cross_attention_outputs = self.crossattention(
attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + outputs
return outputs
class NeZhaEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([NeZhaLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask
)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class NeZhaPreTrainedModel(PreTrainedModel, ABC):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = NeZhaConfig
pretrained_model_archive_map = NEZHA_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_nezha
base_model_prefix = "bert"
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def save_pretrained(
self,
save_directory: Union[str, os.PathLike],
save_config: bool = True,
state_dict: Optional[dict] = None,
save_function: Callable = torch.save,
push_to_hub: bool = False,
**kwargs,
):
if os.path.isfile(save_directory):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
os.makedirs(save_directory, exist_ok=True)
# Only save the model itself if we are using distributed training
model_to_save = unwrap_model(self)
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
# Save the config
if save_config:
model_to_save.config.save_pretrained(save_directory)
# Save the model
if state_dict is None:
state_dict = model_to_save.state_dict()
# Handle the case where some state_dict keys shouldn't be saved
if self._keys_to_ignore_on_save is not None:
state_dict = {k: v for k, v in state_dict.items() if k not in self._keys_to_ignore_on_save}
state_dict = {k: v for k, v in state_dict.items() if 'relative_positions' not in k}
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
save_function(state_dict, output_model_file)
logger.info(f"Model weights saved in {output_model_file}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
use_auth_token = kwargs.pop("use_auth_token", None)
revision = kwargs.pop("revision", None)
mirror = kwargs.pop("mirror", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args, # noqa
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
revision=revision,
_from_auto=from_auto_class,
_from_pipeline=from_pipeline,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
# Load from a TF 1.0 checkpoint in priority if from_tf
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint in priority if from_tf
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError(
f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + '.index']} found in "
f"directory {pretrained_model_name_or_path} or `from_tf` set to False."
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
if not from_tf:
raise ValueError(
f"We found a TensorFlow checkpoint at {pretrained_model_name_or_path + '.index'}, please set "
"from_tf to True to load from this checkpoint."
)
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(
pretrained_model_name_or_path,
filename=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME),
revision=revision,
mirror=mirror,
)
try:
# Load from URL or cache if already cached
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed "
f"on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a "
f"file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\n\n"
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info(f"loading weights file {archive_file}")
else:
logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}")
else:
resolved_archive_file = None
config.name_or_path = pretrained_model_name_or_path
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if state_dict is None and not from_tf:
try:
state_dict = torch.load(resolved_archive_file, map_location="cpu")
except Exception:
raise OSError(
f"Unable to load weights from pytorch checkpoint file for '{pretrained_model_name_or_path}' "
f"at '{resolved_archive_file}'"
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
)
missing_keys = []
unexpected_keys = []
error_msgs = []
if from_tf:
if resolved_archive_file.endswith(".index"):
# Load from a TensorFlow 1.X checkpoint - provided by original authors
model = cls.load_tf_weights(model, config) # Remove the '.index'
else:
# Load from our TensorFlow 2.0 checkpoints
try:
from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model
model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. "
"Please see https://pytorch.org/ and https://www.tensorflow.org/install/ for "
"installation instructions."
)
raise
else:
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module: nn.Module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict,
prefix,
local_metadata,
True,
missing_keys,
unexpected_keys,
error_msgs,
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
# Make sure we are able to load base models as well as derived models (with heads)
start_prefix = ""
model_to_load = model
has_prefix_module = any(s.startswith(cls.base_model_prefix) for s in state_dict.keys())
if not hasattr(model, cls.base_model_prefix) and has_prefix_module:
start_prefix = cls.base_model_prefix + "."
if hasattr(model, cls.base_model_prefix) and not has_prefix_module:
model_to_load = getattr(model, cls.base_model_prefix)
load(model_to_load, prefix=start_prefix)
if model.__class__.__name__ != model_to_load.__class__.__name__:
base_model_state_dict = model_to_load.state_dict().keys()
head_model_state_dict_without_base_prefix = [
key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys()
]
missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict)
# Some models may have keys that are not in the state by design, removing them before needlessly warning
# the user.
if cls._keys_to_ignore_on_load_missing is not None:
for pat in cls._keys_to_ignore_on_load_missing:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if cls._keys_to_ignore_on_load_unexpected is not None:
for pat in cls._keys_to_ignore_on_load_unexpected:
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
if len(unexpected_keys) > 0:
unexpected_keys = [k for k in unexpected_keys if 'relative_positions' not in k]
logger.warning(
f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when "
f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of "
f"a model trained on another task or with another architecture (e.g. initializing a "
f"BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if "
f"you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
f"to be exactly identical (initializing a BertForSequenceClassification model from a "
f"BertForSequenceClassification model)."
)
else:
logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
if len(missing_keys) > 0:
missing_keys = [k for k in missing_keys if 'relative_positions' not in k]
logger.warning(
f"Some weights of {model.__class__.__name__} were not initialized from the model "
f"checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it "
f"for predictions and inference."
)
else:
logger.info(
f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint "
f"at {pretrained_model_name_or_path}.\n"
f"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {model.__class__.__name__} for predictions without further training."
)
if len(error_msgs) > 0:
error_msg = "\n\t".join(error_msgs)
raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}")
# make sure token embedding weights are still tied if needed
model.tie_weights()
# Set model in evaluation mode to deactivate DropOut modules by default
model.eval()
if output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"error_msgs": error_msgs,
}
return model, loading_info
return model
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class NeZhaModel(NeZhaPreTrainedModel, ABC):
"""
The model can behave as an encoder (with only self-attention) as well
as a decoder, in which case a layer of cross-attention is added between
the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani,
Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the
:obj:`is_decoder` argument of the configuration set to :obj:`True`; an
:obj:`encoder_hidden_states` is expected as an input to the forward pass.
.. _`Attention is all you need`:
https://arxiv.org/abs/1706.03762
"""
def __init__(self, config: NeZhaConfig):
super().__init__(config)
self.config = config
self.embeddings = NeZhaEmbeddings(config)
self.encoder = NeZhaEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration
(:class:`~transformers.BertConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during pre-training.
This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertModel, BertTokenizer
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)
# Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, self.device
)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
# add hidden_states and attentions if they are here
outputs = (sequence_output, pooled_output,) + encoder_outputs[1:]
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with two heads on top as done during the pre-training: a `masked language modeling` head and
a `next sentence prediction (classification)` head. """,
BERT_START_DOCSTRING,
)
class NeZhaForPreTraining(NeZhaPreTrainedModel, ABC):
def __init__(self, config: NeZhaConfig):
super().__init__(config)
self.bert = NeZhaModel(config)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
):
r"""
masked_lm_labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`,
defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked),
the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels for computing the next sequence prediction (classification) loss.
Input should be a sequence pair (see :obj:`input_ids` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration
(:class:`~transformers.BertConfig`) and inputs:
loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and
the next sequence prediction (classification) loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False
continuation before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when
:obj:`config.output_hidden_states=True`):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForPreTraining
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForPreTraining.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)
outputs = model(input_ids)
prediction_scores, seq_relationship_scores = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
# add hidden states and attention if they are here
outputs = (prediction_scores, seq_relationship_score,) + outputs[2:]
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
class NeZhaForMaskedLM(NeZhaPreTrainedModel, ABC):
def __init__(self, config: NeZhaConfig):
super().__init__(config)
self.bert = NeZhaModel(config)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
):
r"""
masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`,
defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with
labels in ``[0, ..., config.vocab_size]``
lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`,
defaults to :obj:`None`):
Labels for computing the left-to-right language modeling loss (next word prediction).
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens
with labels in ``[0, ..., config.vocab_size]``
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration
(:class:`~transformers.BertConfig`) and inputs:
masked_lm_loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of
shape ``(1,)``:
Masked language modeling loss.
ltr_lm_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`lm_labels` is
provided):
Next token prediction loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForMaskedLM
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMaskedLM.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)
# Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
# Although this may seem awkward, BertForMaskedLM supports two scenarios:
# 1. If a tensor that contains the indices of masked labels is provided,
# the cross-entropy is the MLM cross-entropy that measures the likelihood
# of predictions for masked words.
# 2. If `lm_labels` is provided we are in a causal scenario where we
# try to predict the next token for each input in the decoder.
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (ltr_lm_loss), (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# if model is does not use a causal mask then add a dummy token
if self.config.is_decoder is False:
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat(
[attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1
)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top. """, BERT_START_DOCSTRING,
)
class NeZhaForNextSentencePrediction(NeZhaPreTrainedModel, ABC):
def __init__(self, config: NeZhaConfig):
super().__init__(config)
self.bert = NeZhaModel(config)
self.cls = BertOnlyNSPHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
inputs_embeds=None,
next_sentence_label=None,
):
r"""
next_sentence_label (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`,
defaults to :obj:`None`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration
(:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`next_sentence_label`
is provided):
Next sequence prediction (classification) loss.
seq_relationship_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False
continuation before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForNextSentencePrediction
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)
# Batch size 1
outputs = model(input_ids)
seq_relationship_scores = outputs[0]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
seq_relationship_score = self.cls(pooled_output)
outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
outputs = (next_sentence_loss,) + outputs
return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
BERT_START_DOCSTRING,
)
class NeZhaForSequenceClassification(NeZhaPreTrainedModel, ABC):
def __init__(self, config: NeZhaConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = NeZhaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration
(:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForSequenceClassification
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)
# Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
BERT_START_DOCSTRING,
)
class NeZhaForMultipleChoice(NeZhaPreTrainedModel, ABC):
def __init__(self, config: NeZhaConfig):
super().__init__(config)
self.bert = NeZhaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration
(:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForMultipleChoice
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMultipleChoice.from_pretrained('bert-base-uncased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s, add_special_tokens=True) for s in choices]).unsqueeze(0)
# Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
num_choices = input_ids.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1))
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
BERT_START_DOCSTRING,
)
class NeZhaForTokenClassification(NeZhaPreTrainedModel, ABC):
def __init__(self, config: NeZhaConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = NeZhaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`,
defaults to :obj:`None`):
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration
(:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForTokenClassification
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForTokenClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)
# Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, scores = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """,
BERT_START_DOCSTRING,
)
class NeZhaForQuestionAnswering(NeZhaPreTrainedModel, ABC):
def __init__(self, config: NeZhaConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = NeZhaModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration
(:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForQuestionAnswering
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
encoding = tokenizer.encode_plus(question, text)
input_ids, token_type_ids = encoding["input_ids"], encoding["token_type_ids"]
start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids]))
all_tokens = tokenizer.convert_ids_to_tokens(input_ids)
answer = ' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])
assert answer == "a nice puppet"
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| 49.96518 | 120 | 0.648772 |
e5fd2e0a512ea1d4f119fedf9ee39cd0c1a4d881 | 24,301 | py | Python | experiment_domainadapt_meanteacher.py | Danilum/self-ensemble-visual-domain-adapt | 22d10d9585c5d1f93b5cb02a6cd1576a80d9ba63 | [
"MIT"
] | null | null | null | experiment_domainadapt_meanteacher.py | Danilum/self-ensemble-visual-domain-adapt | 22d10d9585c5d1f93b5cb02a6cd1576a80d9ba63 | [
"MIT"
] | null | null | null | experiment_domainadapt_meanteacher.py | Danilum/self-ensemble-visual-domain-adapt | 22d10d9585c5d1f93b5cb02a6cd1576a80d9ba63 | [
"MIT"
] | null | null | null | """
Incorporates mean teacher, from:
Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results
Antti Tarvainen, Harri Valpola
https://arxiv.org/abs/1703.01780
"""
import click
import torch
@click.command()
@click.option('--exp', type=click.Choice(['svhn_mnist', 'mnist_svhn',
'svhn_mnist_rgb', 'mnist_svhn_rgb',
'cifar_stl', 'stl_cifar',
'mnist_usps', 'usps_mnist',
'syndigits_svhn',
'synsigns_gtsrb',
]), default='svhn_mnist',
help='experiment to run')
@click.option('--arch', type=click.Choice([
'',
'mnist-bn-32-64-256',
'grey-32-64-128-gp', 'grey-32-64-128-gp-wn', 'grey-32-64-128-gp-nonorm',
'rgb-128-256-down-gp', 'resnet18-32',
'rgb40-48-96-192-384-gp', 'rgb40-96-192-384-gp',
]), default='', help='network architecture')
@click.option('--loss', type=click.Choice(['var', 'bce']), default='var',
help='augmentation variance loss function')
@click.option('--double_softmax', is_flag=True, default=False, help='apply softmax twice to compute supervised loss')
@click.option('--confidence_thresh', type=float, default=0.96837722, help='augmentation var loss confidence threshold')
@click.option('--rampup', type=int, default=0, help='ramp-up length')
@click.option('--teacher_alpha', type=float, default=0.99, help='Teacher EMA alpha (decay)')
@click.option('--fix_ema', is_flag=True, default=False, help='Use fixed EMA')
@click.option('--unsup_weight', type=float, default=3.0, help='unsupervised loss weight')
@click.option('--cls_bal_scale', is_flag=True, default=False,
help='Enable scaling unsupervised loss to counteract class imbalance')
@click.option('--cls_bal_scale_range', type=float, default=0.0,
help='If not 0, clamp class imbalance scale to between x and 1/x where x is this value')
@click.option('--cls_balance', type=float, default=0.005,
help='Weight of class balancing component of unsupervised loss')
@click.option('--cls_balance_loss', type=click.Choice(['bce', 'log', 'bug']), default='bce',
help='Class balancing loss function')
@click.option('--combine_batches', is_flag=True, default=False,
help='Build batches from both source and target samples')
@click.option('--learning_rate', type=float, default=0.001, help='learning rate (Adam)')
@click.option('--standardise_samples', default=False, is_flag=True, help='standardise samples (0 mean unit var)')
@click.option('--src_affine_std', type=float, default=0.1, help='src aug xform: random affine transform std-dev')
@click.option('--src_xlat_range', type=float, default=2.0, help='src aug xform: translation range')
@click.option('--src_hflip', default=False, is_flag=True, help='src aug xform: enable random horizontal flips')
@click.option('--src_intens_flip', is_flag=True, default=False,
help='src aug colour; enable intensity flip')
@click.option('--src_intens_scale_range', type=str, default='',
help='src aug colour; intensity scale range `low:high` (-1.5:1.5 for mnist-svhn)')
@click.option('--src_intens_offset_range', type=str, default='',
help='src aug colour; intensity offset range `low:high` (-0.5:0.5 for mnist-svhn)')
@click.option('--src_gaussian_noise_std', type=float, default=0.1,
help='std aug: standard deviation of Gaussian noise to add to samples')
@click.option('--tgt_affine_std', type=float, default=0.1, help='tgt aug xform: random affine transform std-dev')
@click.option('--tgt_xlat_range', type=float, default=2.0, help='tgt aug xform: translation range')
@click.option('--tgt_hflip', default=False, is_flag=True, help='tgt aug xform: enable random horizontal flips')
@click.option('--tgt_intens_flip', is_flag=True, default=False,
help='tgt aug colour; enable intensity flip')
@click.option('--tgt_intens_scale_range', type=str, default='',
help='tgt aug colour; intensity scale range `low:high` (-1.5:1.5 for mnist-svhn)')
@click.option('--tgt_intens_offset_range', type=str, default='',
help='tgt aug colour; intensity offset range `low:high` (-0.5:0.5 for mnist-svhn)')
@click.option('--tgt_gaussian_noise_std', type=float, default=0.1,
help='tgt aug: standard deviation of Gaussian noise to add to samples')
@click.option('--num_epochs', type=int, default=10, help='number of epochs')
@click.option('--batch_size', type=int, default=64, help='mini-batch size')
@click.option('--epoch_size', type=click.Choice(['large', 'small', 'target']), default='target',
help='epoch size is either that of the smallest dataset, the largest, or the target')
@click.option('--seed', type=int, default=0, help='random seed (0 for time-based)')
@click.option('--log_file', type=str, default='', help='log file path (none to disable)')
@click.option('--model_file', type=str, default='', help='model file path')
@click.option('--device', type=str, default='cuda:0', help='Device')
def experiment(exp, arch, loss, double_softmax, confidence_thresh, rampup, teacher_alpha, fix_ema,
unsup_weight, cls_bal_scale, cls_bal_scale_range, cls_balance, cls_balance_loss,
combine_batches,
learning_rate, standardise_samples,
src_affine_std, src_xlat_range, src_hflip,
src_intens_flip, src_intens_scale_range, src_intens_offset_range, src_gaussian_noise_std,
tgt_affine_std, tgt_xlat_range, tgt_hflip,
tgt_intens_flip, tgt_intens_scale_range, tgt_intens_offset_range, tgt_gaussian_noise_std,
num_epochs, batch_size, epoch_size, seed,
log_file, model_file, device):
settings = locals().copy()
import os
import sys
import pickle
import cmdline_helpers
if log_file == '':
log_file = 'output_aug_log_{}.txt'.format(exp)
elif log_file == 'none':
log_file = None
if log_file is not None:
if os.path.exists(log_file):
print('Output log file {} already exists'.format(log_file))
return
use_rampup = rampup > 0
src_intens_scale_range_lower, src_intens_scale_range_upper, src_intens_offset_range_lower, src_intens_offset_range_upper = \
cmdline_helpers.intens_aug_options(src_intens_scale_range, src_intens_offset_range)
tgt_intens_scale_range_lower, tgt_intens_scale_range_upper, tgt_intens_offset_range_lower, tgt_intens_offset_range_upper = \
cmdline_helpers.intens_aug_options(tgt_intens_scale_range, tgt_intens_offset_range)
import time
import math
import numpy as np
from batchup import data_source, work_pool
import data_loaders
import standardisation
import network_architectures
import augmentation
import torch, torch.cuda
from torch import nn
from torch.nn import functional as F
import optim_weight_ema
torch_device = torch.device(device)
pool = work_pool.WorkerThreadPool(2)
n_chn = 0
if exp == 'svhn_mnist':
d_source = data_loaders.load_svhn(zero_centre=False, greyscale=True)
d_target = data_loaders.load_mnist(invert=False, zero_centre=False, pad32=True, val=False)
elif exp == 'mnist_svhn':
d_source = data_loaders.load_mnist(invert=False, zero_centre=False, pad32=True)
d_target = data_loaders.load_svhn(zero_centre=False, greyscale=True, val=False)
elif exp == 'svhn_mnist_rgb':
d_source = data_loaders.load_svhn(zero_centre=False, greyscale=False)
d_target = data_loaders.load_mnist(invert=False, zero_centre=False, pad32=True, val=False, rgb=True)
elif exp == 'mnist_svhn_rgb':
d_source = data_loaders.load_mnist(invert=False, zero_centre=False, pad32=True, rgb=True)
d_target = data_loaders.load_svhn(zero_centre=False, greyscale=False, val=False)
elif exp == 'cifar_stl':
d_source = data_loaders.load_cifar10(range_01=False)
d_target = data_loaders.load_stl(zero_centre=False, val=False)
elif exp == 'stl_cifar':
d_source = data_loaders.load_stl(zero_centre=False)
d_target = data_loaders.load_cifar10(range_01=False, val=False)
elif exp == 'mnist_usps':
d_source = data_loaders.load_mnist(zero_centre=False)
d_target = data_loaders.load_usps(zero_centre=False, scale28=True, val=False)
elif exp == 'usps_mnist':
d_source = data_loaders.load_usps(zero_centre=False, scale28=True)
d_target = data_loaders.load_mnist(zero_centre=False, val=False)
elif exp == 'syndigits_svhn':
d_source = data_loaders.load_syn_digits(zero_centre=False)
d_target = data_loaders.load_svhn(zero_centre=False, val=False)
elif exp == 'synsigns_gtsrb':
d_source = data_loaders.load_syn_signs(zero_centre=False)
d_target = data_loaders.load_gtsrb(zero_centre=False, val=False)
else:
print('Unknown experiment type \'{}\''.format(exp))
return
# Delete the training ground truths as we should not be using them
del d_target.train_y
if standardise_samples:
standardisation.standardise_dataset(d_source)
standardisation.standardise_dataset(d_target)
n_classes = d_source.n_classes
print('Loaded data')
if arch == '':
if exp in {'mnist_usps', 'usps_mnist'}:
arch = 'mnist-bn-32-64-256'
if exp in {'svhn_mnist', 'mnist_svhn'}:
arch = 'grey-32-64-128-gp'
if exp in {'cifar_stl', 'stl_cifar', 'syndigits_svhn', 'svhn_mnist_rgb', 'mnist_svhn_rgb'}:
arch = 'rgb-128-256-down-gp'
if exp in {'synsigns_gtsrb'}:
arch = 'rgb40-96-192-384-gp'
net_class, expected_shape = network_architectures.get_net_and_shape_for_architecture(arch)
if expected_shape != d_source.train_X.shape[1:]:
print('Architecture {} not compatible with experiment {}; it needs samples of shape {}, '
'data has samples of shape {}'.format(arch, exp, expected_shape, d_source.train_X.shape[1:]))
return
student_net = net_class(n_classes).to(torch_device)
teacher_net = net_class(n_classes).to(torch_device)
student_params = list(student_net.parameters())
teacher_params = list(teacher_net.parameters())
for param in teacher_params:
param.requires_grad = False
student_optimizer = torch.optim.Adam(student_params, lr=learning_rate)
if fix_ema:
teacher_optimizer = optim_weight_ema.EMAWeightOptimizer(teacher_net, student_net, alpha=teacher_alpha)
else:
teacher_optimizer = optim_weight_ema.OldWeightEMA(teacher_net, student_net, alpha=teacher_alpha)
classification_criterion = nn.CrossEntropyLoss()
print('Built network')
src_aug = augmentation.ImageAugmentation(
src_hflip, src_xlat_range, src_affine_std,
intens_flip=src_intens_flip,
intens_scale_range_lower=src_intens_scale_range_lower, intens_scale_range_upper=src_intens_scale_range_upper,
intens_offset_range_lower=src_intens_offset_range_lower,
intens_offset_range_upper=src_intens_offset_range_upper,
gaussian_noise_std=src_gaussian_noise_std
)
tgt_aug = augmentation.ImageAugmentation(
tgt_hflip, tgt_xlat_range, tgt_affine_std,
intens_flip=tgt_intens_flip,
intens_scale_range_lower=tgt_intens_scale_range_lower, intens_scale_range_upper=tgt_intens_scale_range_upper,
intens_offset_range_lower=tgt_intens_offset_range_lower,
intens_offset_range_upper=tgt_intens_offset_range_upper,
gaussian_noise_std=tgt_gaussian_noise_std
)
if combine_batches:
def augment(X_sup, y_src, X_tgt):
X_src_stu, X_src_tea = src_aug.augment_pair(X_sup)
X_tgt_stu, X_tgt_tea = tgt_aug.augment_pair(X_tgt)
return X_src_stu, X_src_tea, y_src, X_tgt_stu, X_tgt_tea
else:
def augment(X_src, y_src, X_tgt):
X_src = src_aug.augment(X_src)
X_tgt_stu, X_tgt_tea = tgt_aug.augment_pair(X_tgt)
return X_src, y_src, X_tgt_stu, X_tgt_tea
rampup_weight_in_list = [0]
cls_bal_fn = network_architectures.get_cls_bal_function(cls_balance_loss)
def compute_aug_loss(stu_out, tea_out):
# Augmentation loss
if use_rampup:
unsup_mask = None
conf_mask_count = None
unsup_mask_count = None
else:
conf_tea = torch.max(tea_out, 1)[0]
unsup_mask = conf_mask = (conf_tea > confidence_thresh).float()
unsup_mask_count = conf_mask_count = conf_mask.sum()
if loss == 'bce':
aug_loss = network_architectures.robust_binary_crossentropy(stu_out, tea_out)
else:
d_aug_loss = stu_out - tea_out
aug_loss = d_aug_loss * d_aug_loss
# Class balance scaling
if cls_bal_scale:
if use_rampup:
n_samples = float(aug_loss.shape[0])
else:
n_samples = unsup_mask.sum()
avg_pred = n_samples / float(n_classes)
bal_scale = avg_pred / torch.clamp(tea_out.sum(dim=0), min=1.0)
if cls_bal_scale_range != 0.0:
bal_scale = torch.clamp(bal_scale, min=1.0/cls_bal_scale_range, max=cls_bal_scale_range)
bal_scale = bal_scale.detach()
aug_loss = aug_loss * bal_scale[None, :]
aug_loss = aug_loss.mean(dim=1)
if use_rampup:
unsup_loss = aug_loss.mean() * rampup_weight_in_list[0]
else:
unsup_loss = (aug_loss * unsup_mask).mean()
# Class balance loss
if cls_balance > 0.0:
# Compute per-sample average predicated probability
# Average over samples to get average class prediction
avg_cls_prob = stu_out.mean(dim=0)
# Compute loss
equalise_cls_loss = cls_bal_fn(avg_cls_prob, float(1.0 / n_classes))
equalise_cls_loss = equalise_cls_loss.mean() * n_classes
if use_rampup:
equalise_cls_loss = equalise_cls_loss * rampup_weight_in_list[0]
else:
if rampup == 0:
equalise_cls_loss = equalise_cls_loss * unsup_mask.mean(dim=0)
unsup_loss += equalise_cls_loss * cls_balance
return unsup_loss, conf_mask_count, unsup_mask_count
if combine_batches:
def f_train(X_src0, X_src1, y_src, X_tgt0, X_tgt1):
X_src0 = torch.tensor(X_src0, dtype=torch.float, device=torch_device)
X_src1 = torch.tensor(X_src1, dtype=torch.float, device=torch_device)
y_src = torch.tensor(y_src, dtype=torch.long, device=torch_device)
X_tgt0 = torch.tensor(X_tgt0, dtype=torch.float, device=torch_device)
X_tgt1 = torch.tensor(X_tgt1, dtype=torch.float, device=torch_device)
n_samples = X_src0.size()[0]
n_total = n_samples + X_tgt0.size()[0]
student_optimizer.zero_grad()
student_net.train()
teacher_net.train()
# Concatenate source and target mini-batches
X0 = torch.cat([X_src0, X_tgt0], 0)
X1 = torch.cat([X_src1, X_tgt1], 0)
student_logits_out = student_net(X0)
student_prob_out = F.softmax(student_logits_out, dim=1)
src_logits_out = student_logits_out[:n_samples]
src_prob_out = student_prob_out[:n_samples]
teacher_logits_out = teacher_net(X1)
teacher_prob_out = F.softmax(teacher_logits_out, dim=1)
# Supervised classification loss
if double_softmax:
clf_loss = classification_criterion(src_prob_out, y_src)
else:
clf_loss = classification_criterion(src_logits_out, y_src)
unsup_loss, conf_mask_count, unsup_mask_count = compute_aug_loss(student_prob_out, teacher_prob_out)
loss_expr = clf_loss + unsup_loss * unsup_weight
loss_expr.backward()
student_optimizer.step()
teacher_optimizer.step()
outputs = [float(clf_loss) * n_samples, float(unsup_loss) * n_total]
if not use_rampup:
mask_count = float(conf_mask_count) * 0.5
unsup_count = float(unsup_mask_count) * 0.5
outputs.append(mask_count)
outputs.append(unsup_count)
return tuple(outputs)
else:
def f_train(X_src, y_src, X_tgt0, X_tgt1):
X_src = torch.tensor(X_src, dtype=torch.float, device=torch_device)
y_src = torch.tensor(y_src, dtype=torch.long, device=torch_device)
X_tgt0 = torch.tensor(X_tgt0, dtype=torch.float, device=torch_device)
X_tgt1 = torch.tensor(X_tgt1, dtype=torch.float, device=torch_device)
student_optimizer.zero_grad()
student_net.train()
teacher_net.train()
src_logits_out = student_net(X_src)
student_tgt_logits_out = student_net(X_tgt0)
student_tgt_prob_out = F.softmax(student_tgt_logits_out, dim=1)
teacher_tgt_logits_out = teacher_net(X_tgt1)
teacher_tgt_prob_out = F.softmax(teacher_tgt_logits_out, dim=1)
# Supervised classification loss
if double_softmax:
clf_loss = classification_criterion(F.softmax(src_logits_out, dim=1), y_src)
else:
clf_loss = classification_criterion(src_logits_out, y_src)
unsup_loss, conf_mask_count, unsup_mask_count = compute_aug_loss(student_tgt_prob_out, teacher_tgt_prob_out)
loss_expr = clf_loss + unsup_loss * unsup_weight
loss_expr.backward()
student_optimizer.step()
teacher_optimizer.step()
n_samples = X_src.size()[0]
outputs = [float(clf_loss) * n_samples, float(unsup_loss) * n_samples]
if not use_rampup:
mask_count = float(conf_mask_count)
unsup_count = float(unsup_mask_count)
outputs.append(mask_count)
outputs.append(unsup_count)
return tuple(outputs)
print('Compiled training function')
def f_pred_src(X_sup):
X_var = torch.tensor(X_sup, dtype=torch.float, device=torch_device)
student_net.eval()
teacher_net.eval()
return (F.softmax(student_net(X_var), dim=1).detach().cpu().numpy(),
F.softmax(teacher_net(X_var), dim=1).detach().cpu().numpy())
def f_pred_tgt(X_sup):
X_var = torch.tensor(X_sup, dtype=torch.float, device=torch_device)
student_net.eval()
teacher_net.eval()
return (F.softmax(student_net(X_var), dim=1).detach().cpu().numpy(),
F.softmax(teacher_net(X_var), dim=1).detach().cpu().numpy())
def f_eval_src(X_sup, y_sup):
y_pred_prob_stu, y_pred_prob_tea = f_pred_src(X_sup)
y_pred_stu = np.argmax(y_pred_prob_stu, axis=1)
y_pred_tea = np.argmax(y_pred_prob_tea, axis=1)
return (float((y_pred_stu != y_sup).sum()), float((y_pred_tea != y_sup).sum()))
def f_eval_tgt(X_sup, y_sup):
y_pred_prob_stu, y_pred_prob_tea = f_pred_tgt(X_sup)
y_pred_stu = np.argmax(y_pred_prob_stu, axis=1)
y_pred_tea = np.argmax(y_pred_prob_tea, axis=1)
return (float((y_pred_stu != y_sup).sum()), float((y_pred_tea != y_sup).sum()))
print('Compiled evaluation function')
# Setup output
def log(text):
print(text)
if log_file is not None:
with open(log_file, 'a') as f:
f.write(text + '\n')
f.flush()
f.close()
cmdline_helpers.ensure_containing_dir_exists(log_file)
# Report setttings
log('Settings: {}'.format(', '.join(['{}={}'.format(key, settings[key]) for key in sorted(list(settings.keys()))])))
# Report dataset size
log('Dataset:')
log('SOURCE Train: X.shape={}, y.shape={}'.format(d_source.train_X.shape, d_source.train_y.shape))
log('SOURCE Test: X.shape={}, y.shape={}'.format(d_source.test_X.shape, d_source.test_y.shape))
log('TARGET Train: X.shape={}'.format(d_target.train_X.shape))
log('TARGET Test: X.shape={}, y.shape={}'.format(d_target.test_X.shape, d_target.test_y.shape))
print('Training...')
sup_ds = data_source.ArrayDataSource([d_source.train_X, d_source.train_y], repeats=-1)
tgt_train_ds = data_source.ArrayDataSource([d_target.train_X], repeats=-1)
train_ds = data_source.CompositeDataSource([sup_ds, tgt_train_ds]).map(augment)
train_ds = pool.parallel_data_source(train_ds)
if epoch_size == 'large':
n_samples = max(d_source.train_X.shape[0], d_target.train_X.shape[0])
elif epoch_size == 'small':
n_samples = min(d_source.train_X.shape[0], d_target.train_X.shape[0])
elif epoch_size == 'target':
n_samples = d_target.train_X.shape[0]
n_train_batches = n_samples // batch_size
source_test_ds = data_source.ArrayDataSource([d_source.test_X, d_source.test_y])
target_test_ds = data_source.ArrayDataSource([d_target.test_X, d_target.test_y])
if seed != 0:
shuffle_rng = np.random.RandomState(seed)
else:
shuffle_rng = np.random
train_batch_iter = train_ds.batch_iterator(batch_size=batch_size, shuffle=shuffle_rng)
best_teacher_model_state = {k: v.cpu().numpy() for k, v in teacher_net.state_dict().items()}
best_conf_mask_rate = 0.0
best_src_test_err = 1.0
for epoch in range(num_epochs):
t1 = time.time()
if use_rampup:
if epoch < rampup:
p = max(0.0, float(epoch)) / float(rampup)
p = 1.0 - p
rampup_value = math.exp(-p * p * 5.0)
else:
rampup_value = 1.0
rampup_weight_in_list[0] = rampup_value
train_res = data_source.batch_map_mean(f_train, train_batch_iter, n_batches=n_train_batches)
train_clf_loss = train_res[0]
if combine_batches:
unsup_loss_string = 'unsup (both) loss={:.6f}'.format(train_res[1])
else:
unsup_loss_string = 'unsup (tgt) loss={:.6f}'.format(train_res[1])
src_test_err_stu, src_test_err_tea = source_test_ds.batch_map_mean(f_eval_src, batch_size=batch_size * 2)
tgt_test_err_stu, tgt_test_err_tea = target_test_ds.batch_map_mean(f_eval_tgt, batch_size=batch_size * 2)
if use_rampup:
unsup_loss_string = '{}, rampup={:.3%}'.format(unsup_loss_string, rampup_value)
if src_test_err_stu < best_src_test_err:
best_src_test_err = src_test_err_stu
best_teacher_model_state = {k: v.cpu().numpy() for k, v in teacher_net.state_dict().items()}
improve = '*** '
else:
improve = ''
else:
conf_mask_rate = train_res[-2]
unsup_mask_rate = train_res[-1]
if conf_mask_rate > best_conf_mask_rate:
best_conf_mask_rate = conf_mask_rate
improve = '*** '
best_teacher_model_state = {k: v.cpu().numpy() for k, v in teacher_net.state_dict().items()}
else:
improve = ''
unsup_loss_string = '{}, conf mask={:.3%}, unsup mask={:.3%}'.format(
unsup_loss_string, conf_mask_rate, unsup_mask_rate)
t2 = time.time()
log('{}Epoch {} took {:.2f}s: TRAIN clf loss={:.6f}, {}; '
'SVHN TEST ACCURACY={:.3%}, MNIST TEST student ACCURACY={:.3%}, MNIST TEST teacher ACCURACY={:.3%}'.format(
improve, epoch, t2 - t1, train_clf_loss, unsup_loss_string, 1.0 - src_test_err_stu, 1.0 - tgt_test_err_stu, 1.0 -tgt_test_err_tea))
# Save network
if model_file != '':
cmdline_helpers.ensure_containing_dir_exists(model_file)
with open(model_file, 'wb') as f:
torch.save(best_teacher_model_state, f)
if __name__ == '__main__':
experiment()
| 45.169145 | 144 | 0.657051 |
af817d8015b475b5dfc39e6a9f691ed1e71c05fc | 53,145 | py | Python | reflred/refldata.py | usnistgov/reductus | abb977f8db41975bb577597e23790b8b58b19d98 | [
"Unlicense"
] | null | null | null | reflred/refldata.py | usnistgov/reductus | abb977f8db41975bb577597e23790b8b58b19d98 | [
"Unlicense"
] | null | null | null | reflred/refldata.py | usnistgov/reductus | abb977f8db41975bb577597e23790b8b58b19d98 | [
"Unlicense"
] | null | null | null | # This program is public domain
"""
Reflectometry data representation.
Need to support collections of data from TOF, monochromatic
and white beam instruments.
Conceptually each data point is a tuple::
incident angles (sample tilt and rotation)
reflected angles (polar angle of detector pixel)
slit distances and openings
detector pixel distance and size
incident/reflected polarization
wavelength distribution
measurement start and duration
monitor and detector counts
sample environment
Reflectometers are either vertical or horizontal geometry.
For vertical geometry (sample surface parallel to gravity),
x refers to horizontal slit opening and horizontal detector
pixels. For horizontal geometry (sample surface perpendicular
to gravity) x refers to vertical slit opening and vertical
detector pixels. Other than gravitational corrections to
resolution and detector pixels, the analysis for the two
instrument types should be identical.
Monochromatic reflectometry files have a single wavelength per
angle and a series of angles. Time-of-flight and polychromatic
reflectometers have multiple wavelengths per angle but usually
one angle per file. In either case a file is a set of
detector frames each with its own wavelength and angles.
Different polarization states will be treated as belonging
to different measurements. These will need to be aligned
before polarization correction can be performed. Multiple
measurements may occur on the same detector. In this
case each measurement should have a separate 'region of
interest' to isolate it from the others, presenting a virtual
detector to the reduction and analysis program.
Some information about the measurements may be missing
from the files, or recorded incorrectly. Changes and
additions to the metadata must be recorded in any reduced
data file format, along with a list of transformations
that went into the reduction.
See notes in properties.py regarding dated values.
"""
__all__ = ['ReflData']
## Proposal for introspection with units
#def get_as(object,**kw):
# if len(kw) != 1: raise Error
# for k,v in kw.items():
# return convert(getattr(object,k),object.__units__[k],v)
#def units(object, k):
# return object.__units__[k]
#
#get_as(detector, distance='m')
#units(detector, 'distance')
#
#class Detector(object):
# __units__ = dict(distance='mm',size='mm',saturation='counts/s')
# ...
#
## Variation: use a units class to represent the units rather than a string
## This means that we save a couple of string lookups when doing conversion
#get_as(detector, distance=metre)
#def get_as(object,**kw):
# if len(kw) != 1: raise Error
# for k,v in kw.items():
# return getattr(object,k)*object.__units__[k]/v
#class Detector(object):
# __units__ = dict(distance=milli*metre,size=milli*metre,saturation=1/second)
## Something similar can be used for uncertainty, preferably stored as variance
import sys
import datetime
import warnings
import json
from io import BytesIO
import numpy as np
from numpy import inf, arctan2, sqrt, sin, cos, pi, radians
from dataflow.lib.exporters import exports_text, exports_json, NumpyEncoder
from .resolution import calc_Qx, calc_Qz, dTdL2dQ
IS_PY3 = sys.version_info[0] >= 3
# for sample background angle offset
QZ_FROM_SAMPLE = 'sample angle'
QZ_FROM_DETECTOR = 'detector angle'
class Group(object):
_fields = ()
_props = ()
def __setattr__(self, key, value):
# Check for class attr when setting; this is because hasattr on
# a property will return False if getattr on that property raises
# an exception. This means if you really want to sneak an
# attribute into the group from your data loader, you will have
# to populate it from the
if not key.startswith('_') and not hasattr(self.__class__, key):
raise AttributeError("Cannot add attribute %s to class %s"
% (key, self.__class__.__name__))
object.__setattr__(self, key, value)
def __init__(self, **kw):
_set(self, kw)
def __str__(self):
return _str(self)
def _toDict(self):
return _toDict(self)
def set_fields(cls):
groups = set(name for name, type in getattr(cls, '_groups', ()))
properties = []
fields = []
for k, v in sorted((k, v) for k, v in cls.__dict__.items()):
if k.startswith('_') or k in groups:
pass
elif isinstance(v, property):
properties.append(k)
elif not callable(v):
fields.append(k)
cls._fields = tuple(fields)
cls._props = tuple(properties)
return cls
# TODO: attribute documentation and units should be integrated with the
# TODO: definition of the attributes. Value attributes should support
# TODO: unit conversion
@set_fields
class Slit(Group):
"""
Define a slit for the instrument. This is needed for correct resolution
calculations and for ab initio footprint calculations.
distance (inf millimetre)
Distance from sample. Positive numbers are after the sample,
negative numbers are before the sample along the beam path.
offset (4 x 0 millimetre)
Offset of the slit blades relative to the distance from the sample.
For vertical geometry, this is left, right, up, down. For horizontal
geometry this is up, down, left, right. Offset + distance gives the
distances of the individual blades from the sample, with negative
numbers occurring before the sample position and positive numbers
after.
shape (shape='rectangular')
Whether we have slit blades ('rectangular') or a circular
aperature ('circular').
x (n x inf millimetre)
Slit opening in the primary direction. For vertical geometry
this is the horizontal opening, for horizontal geometry this is
the vertical opening. This may be a constant (fixed slits) or
of length n for the number of measurements.
y (n x inf millimetre)
Slit opening in the secondary direction. This may be a constant
(fixed slits) or of length n for the number of measurements.
"""
properties = ['distance','offset','x','y','shape']
columns = {"x": {"units": "mm"}}
distance = inf
offset = (0., 0., 0., 0.)
x = inf # type: np.ndarray
x_target = None # type: np.ndarray
y = inf # type: np.ndarray
y_target = None # type: np.ndarray
shape = "rectangular" # rectangular or circular
@set_fields
class Environment(Group):
"""
Define sample environment data for the measurements such as
temperature (kelvin)
pressure (pascal)
relative_humidity (%)
electric_field (V/m)
magnetic_field (tesla)
stress_field (pascal)
The data may be a constant, a series of values equal to
the number of scan points, or a series of values and times.
The average, max and min over all scan points, and the
value, max and min for a particular scan point may be
available.
Some measurements are directional, and will have a polar
and azimuthal angle associated with them. This may be
constant for the entire scan, or stored separately with
each magnitude measurement.
"""
#: Name of environment variable
name = ""
#: Units to report on graphs
units = ""
#: Statistics on all measurements
average = None # type: np.ndarray
minimum = None # type: np.ndarray
maximum = None # type: np.ndarray
#: Magnitude of the measurement
value = None # type: np.ndarray
#: Start time for log (seconds)
start = None # type: np.ndarray
#: Measurement time relative to start (seconds)
time = None # type: np.ndarray
@set_fields
class Sample(Group):
"""
Define the sample geometry. Size and shape areneeded for correct
resolution calculations and for ab initio footprint calculations.
Angles are needed for correct calculation of Q. Rotation and
environment are for display to the user.
description ("")
Sample description, if available from the file.
width (inf millimetre)
Width of the sample in the primary direction. For fixed slits
the footprint of the beam on the sample decreases with angle
in this direction.
length (inf millimetre)
Width of the sample in the secondary direction. The footprint
is independent of angle in this direction.
thickness (inf millimetre)
Thickness of the sample.
substrate_sld (10^-6 Angstrom^-2)
To plot Fresnel reflectivity we need to know the substrate
scattering length density. The default is to assume silicon.
shape ('rectangular')
Shape is 'circular' or 'rectangular'
angle_x (n x 0 degree)
Angle between neutron beam and sample surface in the primary
direction. This may be constant or an array of length n for
the number of measurements.
angle_x_target (n x 0 degree)
Desired angle_x, used by join to select the points that are
nominally the same in the joined data set.
angle_y (n x 0 degree)
Angle between the neutron beam and sample surface in the
secondary direction. This may be constant or an array of
length n for the number of measurements. This is known as
tilt on some instruments.
rotation (n x 0 degree)
For off-specular reflectivity the orientation of the patterned
array on the surface of the sample affects the computed theory.
This value is not needed for data reduction, but it should be
reported to the user during reduction and carried through to
the reduced file for correct analysis.
environment ({})
Sample environment data. See Environment class for a list of
common environment data.
"""
name = ''
description = ''
columns = {"angle_x": {"units": "degrees"}}
width = inf # mm
length = inf # mm
thickness = inf # mm
shape = 'rectangular' # rectangular or circular or irregular
angle_x = 0. # degree
angle_x_target = None # degree
angle_y = 0. # degree
rotation = 0. # degree
substrate_sld = 2.07 # inv A (silicon substrate for neutrons)
incident_sld = 0. # inv A (air)
broadening = 0.
environment = None # type: Dict[str, Environment]
temp_setpoint = None
temp_avg = None
magnet_setpoint = None
magnet_avg = None
def __init__(self, **kw):
self.environment = {}
Group.__init__(self, **kw)
@set_fields
class Beamstop(Group):
"""
Define the geometry of the beamstop. This is used by the
detector class to compute the shadow of the beamstop on the
detector. The beamstop is assumed to be centered on the
direct beam regardless of the position of the detector.
distance (0 millimetre)
Distance from sample to beamstop. Note: this will need to
be subtracted from the distance from detector to beamstop.
shape ('rectangular')
Shape is 'circular' or 'rectangular'
width (0 millimetre)
Width of the beamstop in the primary direction. For circular
beamstops, this is the diameter.
length (0 millimetre)
Width of the beamstop in the secondary direction. For circular
beamstops, this is the diameter.
offset (2 x millimetre)
Offset of the beamstop from the center of the beam.
ispresent (False)
True if beamstop is present in the experiment.
"""
distance = 0. # mm
width = 0. # mm
length = 0. # mm
shape = 'rectangular' # rectangular or circular
offset = (0., 0.) # mm
ispresent = False
@set_fields
class Monochromator(Group):
"""
Monochromator properties.
wavelength (k nanometre)
Wavelength for each channel
wavelength_resolution (k %)
Wavelength resolution of the beam for each channel using 1-sigma
gaussian approximation dL, expressed as 100*dL/L. The actual
wavelength distribution is considerably more complicated, being
approximately square for multi-sheet monochromators and highly
skewed on TOF machines.
"""
columns = {
"wavelength": {"units": "Angstroms", "variance": "wavelength_resolution"},
}
wavelength = None # angstrom
wavelength_resolution = None # angstrom
@set_fields
class Detector(Group):
"""
Define the detector properties. Note that this defines a virtual
detector. The real detector may have e.g., multiple beam paths
incident upon it, and be split into two virtual detectors when
the file is loaded.
Direction x refers to the primary direction and y refers to
the secondary direction. For vertical geometry, the primary
direction is in the horizontal plane and the secondary direction
is in the vertical plane. For horizontal geometry these are
reversed. This allows the reduction software to be simpler,
but may complicate file loading from formats which store values
in absolute geometry.
Geometry
========
dims (2 x pixels)
Dimensions of the detector, [nx,ny]. For pencil detectors this
should be [1,1]. For position sensitive detectors, this should be
[nx,1]. For area detectors, this should be [nx,ny].
distance (millimetre)
Distance from the sample to the detector.
size (2 x millimetre)
Detector size, [x,y]. Default is 1 mm x 1 mm.
solid_angle (2 x radian)
Detector solid angle [x,y], calculated from distance and size.
center (2 x millimetre)
Location of the center pixel [x,y] relative to the detector arm.
width_x (nx x millimetre)
width_y (ny x millimetre)
Pixel width in x and y.
offset_x (nx x millimetre)
offset_y (ny x millimetre)
Pixel offset in x and y.
angle_x (n x degree)
angle_y (n x degree)
Angle of the detector arm relative to the main beam in x and y.
This may be constant or an array of length n for the number of
measurements in the scan.
angle_x_offset (nx x degree)
angle_y_offset (ny x degree)
Pixel angle relative to detector angle in x and y.
rotation (degree)
Angle of rotation of the detector relative to the beam. This
will affect how vertical integration in the region of interest
is calculated. Ideally the detector would not be rotated, though
misalignment can sometimes occur.
Efficiency
==========
efficiency (nx x ny %)
Efficiency of the individual pixels; this is an array of the same
shape as the detector, giving the relative efficiency of each pixel,
or 1 if the efficiency is unknown.
TODO: do we need variance?
saturation (k [rate (counts/second), efficiency (%), uncertainty])
Given a measurement of a given number of counts versus expected
number of counts on the detector (e.g., as estimated by scanning
a narrow slit across the detector to measure the beam profile,
then measuring increasingly large portions of the beam profile),
this can be converted to an efficiency correction per count rate
which can be applied to all data read with this detector. The
value for deadtime should be a tuple of vectors: count rate,
efficiency and uncertainty. Below the lowest count rate the detector
is considered to be 100% efficient (any baseline inefficiency will
be normalized when comparing the measured reflection to the
measured beam). Beyond the highest count rate, the detector
is considered saturated. The normalize counts vector (v,dv) will
be scaled by 1/(saturation +/- uncertainty).
Note: There may be separate per pixel and per detector
saturation levels.
mask (nx x ny)
Ignore data when (mask&0xFFFF != 0)
https://manual.nexusformat.org/classes/base_classes/NXdetector.html
Measurement
===========
wavelength (k nanometre)
Wavelength for each channel
wavelength_resolution (k %)
Wavelength resolution of the beam for each channel using 1-sigma
gaussian approximation dL, expressed as 100*dL/L. The actual
wavelength distribution is considerably more complicated, being
approximately square for multi-sheet monochromators and highly
skewed on TOF machines.
time_of_flight (k+1 millisecond)
Time boundaries for time-of-flight measurement
counts (nx x ny x k counts OR n x nx x ny counts OR n x nx x ny x k counts)
nx x ny detector pixels
n number of measurements
k time-of-flight/wavelength channels
counts_variance (like counts)
"""
dims = (1, 1) # i,j
distance = None # mm
size = (1., 1.) # mm
center = (0., 0.) # mm
width_x = 1. # mm
width_y = 1. # mm
offset_x = 0. # mm
offset_y = 0. # mm
angle_x = 0. # degree
angle_y = 0. # degree
angle_x_target = 0. # degree
angle_x_offset = 0. # degree
angle_y_offset = 0. # degree
rotation = 0. # degree
efficiency = 1. # proportion
saturation = inf # counts/sec
wavelength = None # angstrom
wavelength_resolution = None # angstrom
time_of_flight = None # ms
counts = None
counts_variance = None
mask = None
deadtime = None
deadtime_error = None
columns = {
"counts": {"units": "counts", "variance": "counts_variance"},
"angle_x": {"units": "degrees"},
"wavelength": {"units": "Angstroms", "variance": "wavelength_resolution"},
}
@property
def solid_angle(self):
"""Detector solid angle [x,y] (radians)"""
#return 2*arctan2(np.asarray(self.size)/2., self.distance)
return (2*arctan2(np.asarray(self.size)/2., self.distance)
if self.distance is not None
else np.array([0., 0.]))
@set_fields
class ROI(Group):
"""
Detector region of interest.
Defines a rectangular region of interest on the detector which
is used for defining frames. This can be used for example to
split a single detector with both polarization states (via
transmission and reflection off a supermirror) into two virtual
detectors.
xlo, xhi (pixels)
ylo, yhi (pixels)
"""
xlo = None
xhi = None
ylo = None
yhi = None
@set_fields
class Monitor(Group):
"""
Define the monitor properties.
The monitor is essential to the normalization of reflectometry data.
Reflectometry is the number of neutrons detected divided by the
number of neutrons incident on the sample. To compute this ratio,
the incident and detected neutrons must be normalized to the neutron
rate, either counts per monitor count, counts per second or counts
per unit of source power (e.g., coulombs of protons incident on the
detector, or megawatt hours of reactor power).
counts (n x k counts)
Number of counts measured. For scanning instruments there is
a separate count for each of the n measurements. For TOF
instruments there is a separate count for each of k time
channels. Counts may be absent, in which case normalization
must be by time or by monitor. In some circumstances the
user may generate a counts vector, for example by estimating
the count rate by other means, in order to combine data
measured by time with data measured by monitor when the
monitor values are otherwise unreliable.
counts_variance (n x k counts)
Variance is set to the number of counts but scaled during
monitor saturation and deadtime corrections.
roi_counts (n x k counts)
Count against a region of interest (ROI) on the detector.
**TODO**: ROI is **not** scaled during detector deadtime corrections,
and there is no correction for detector efficiency.
roi_variance (n x k counts)
Variance is to be the number of counts.
count_time (n seconds)
Duration of the measurement. For scanning instruments, there is
a separate duration for each measurement. For TOF, this is a
single value equal to the duration of the entire measurement.
time_step (seconds)
The count_time timer has a reporting unit, e.g. second, or
millisecond, or in the case of NCNR ICP files, hundredths of
a minute. The measurement uncertainty for the count time
is assumed to be uniform over the time_step, centered on
the reported time, with a gaussian approximation of uncertainty
being sqrt(time_step/12).
source_power (n source_power_units)
The average source power for each measurement. For situations when
the monitor cannot be trusted (which can happen from time to time on
some instruments), we can use the number of protons incident on
the target (proton charge) or the energy of the source (reactor
power integrated over the duration of each measurement) as a proxy
for the monitor.
source_power_units ('coulombs/s' | 'megawatts')
Units for source power.
source_power_variance (n source_power_units)
Variance in the measured source power
base ('time' | 'monitor' | 'roi' | 'power')
The measurement rate basis which should be used to normalize
the data. This is initialized by the file loader, but may
be overridden during reduction.
start_time (n seconds)
For scanning instruments the start of each measurement relative
to start of the scan. Note that this is not simply sum of the
count times because there may be motor movement between
measurements. The start time is required to align the measurement
values with environment parameters, and for calculation of He3
polarization. For TOF, this should be zero.
distance (metre)
Distance from the sample. This is not used by reduction but
may be of interest to the user.
sampled_fraction ([0,1])
Portion of the neutrons that are sampled by the monitor. If the
monitor is after the second slit, the monitor value can be used to
estimate the the counts on the detector, scaled by the sampled
fraction. Otherwise a full slit scan is required to normalize
the reflectivity. This is the inverse of the detector to monitor
ratio used to normalize data on some instruments.
time_of_flight (k+1 millisecond)
Time boundaries for the time-of-flight measurement
The corrected monitor counts field will start as None, but may be
set by a dead time correction, which scales the monitor according
to the monitor rate. If for some reason the monitor is flaky, then
the corrected monitor counts could be set by multiplying time by
the monitor rate.
"""
distance = None
sampled_fraction = None
counts = None
counts_variance = None
roi_counts = None
roi_variance = None
start_time = None
count_time = None
time_step = 1 # Default to nearest second
time_of_flight = None
base = 'monitor'
source_power = None # No source power recorded
source_power_units = "MW"
source_power_variance = 0
saturation = None
columns = {
"counts": {"units": "counts", "variance": "counts_variance"},
"roi_counts": {"units": "counts", "variance": "roi_variance"},
"count_time": {"units": "seconds"}
}
deadtime = None
deadtime_error = None
class Intent(object):
"""
Intent is one of the following:
intensity: Normalization scan for computing absolute reflection
specular: Specular intensity measurement
background+: Background measurement, sample rotated
background-: Background measurement, detector offset
rock qx: Rocking curve with fixed Qz
rock sample: Rocking curve with fixed detector angle
rock detector: Rocking curve with fixed sample angle
unknown: Some other kind of measurement
detector efficiency: Flood fill
Not supported:
alignment: Sample alignment measurement
area: Measurement of a region of Qx-Qz plane
slice: Slice through Qx-Qz
"""
slit = 'intensity'
spec = 'specular'
back = 'background'
backp = 'background+'
backm = 'background-'
rockQ = 'rock qx'
rock3 = 'rock sample'
rock4 = 'rock detector'
none = 'unknown'
deff = 'detector efficiency'
time = 'time'
other = 'other'
scan = 'scan'
intents = (slit, spec, back, backp, backm, rockQ, rock3, rock4,
deff, time, other, scan, none)
@staticmethod
def isback(intent):
return intent.startswith('background')
@staticmethod
def isspec(intent):
return intent == Intent.spec
@staticmethod
def isrock(intent):
return intent.startswith('rock')
@staticmethod
def isslit(intent):
return intent == Intent.slit
@staticmethod
def isnone(intent):
return intent == Intent.none
@staticmethod
def isscan(intent):
return intent == Intent.scan
def infer_intent(data):
"""
Infer intent from data.
Returns one of the Intent strings.
"""
# TODO: doesn't handle alignment scans
theta_i = data.sample.angle_x
theta_f = 0.5*data.detector.angle_x
dtheta = 0.1*data.angular_resolution
n = len(theta_i)
scan_i = (max(theta_i) - min(theta_i) > dtheta).any()
scan_f = (max(theta_f) - min(theta_f) > dtheta).any()
if (abs(theta_i) < dtheta).all() and (abs(theta_f) < dtheta).all():
# incident and reflected angles are both 0
intent = Intent.slit
elif (scan_i and scan_f) or (not scan_i and not scan_f):
# both theta_i and theta_f are moving, or neither is moving
if (abs(theta_f - theta_i) < dtheta).all(): # all specular
intent = Intent.spec
elif (data.Qz.max() - data.Qz.min() < data.dQ.max()
and data.Qx.max() - data.Qx.min()) > data.dQ.max():
intent = Intent.rockQ
elif np.sum(theta_f - theta_i > dtheta) > 0.9*n: # 90% above
intent = Intent.backp
elif np.sum(theta_i - theta_f > dtheta) > 0.9*n: # 90% below
intent = Intent.backm
else:
intent = Intent.none
elif scan_i:
# only theta_i is moving
intent = Intent.rock3
elif scan_f:
# only theta_f is moving
intent = Intent.rock4
else:
# never gets here
intent = Intent.scan
return intent
@set_fields
class ReflData(Group):
"""
Reflectometry data structure, giving a predictable name space for the
reduction steps regardless of input file format.
"""
_groups = (
("slit1", Slit), ("slit2", Slit), ("slit3", Slit), ("slit4", Slit),
("monochromator", Monochromator),
("detector", Detector),
("sample", Sample),
("monitor", Monitor),
("roi", ROI),
)
#: Sample geometry
sample = None # type: Sample
#: Presample slits
slit1 = None # type: Slit
#: Presample slits
slit2 = None # type: Slit
#: Post sample slits
slit3 = None # type: Slit
#: Post sample slits
slit4 = None # type: Slit
#: Monochromator wavelength
monochromator = None # type: Monochromator
#: Detector geometry, efficiency and counts
detector = None # type: Detector
#: Counts and/or durations
monitor = None # type: Monitor
#: Region of interest on the detector.
roi = None # type: ROI
#: Name of a particular instrument
instrument = "unknown"
#: Whether the scattering plane is horizontal or vertical. The x-y
#: values of the slits, detector, etc. should be relative to the
#: scattering plane, not the lab frame.
geometry = "vertical"
#: Type of radiation (neutron or xray) used to probe the sample.
probe = "unknown"
#: Location of the datafile
path = "unknown"
#: Download location for the file, if available
uri = "unknown"
#: For scanning instruments, the number of measurements.
points = 1
#: For time of flight, the number of time channels. For white
#: beam instruments, the number of analysers.
channels = 1
#: Name of the dataset. This may be a combination of filename and
#: entry number.
name = ""
#: Numeric ID of the dataset. Using "fileNum" from trajectoryData
filenumber = 0
#: Entry identifier if more than one entry per file
entry = ""
#: Description of the entry.
description = ""
#: Starting date and time of the measurement.
date = datetime.datetime(1970, 1, 1)
#: Duration of the measurement.
duration = 0
#: Nominal attenuation as recorded in the data file, or 1.0 if not recorded
attenuation = 1.0
#: '' unpolarized
#: '+' spin up
#: '-' spin down
#: '++','--' non-spin-flip
#: '-+','+-' spin flip
polarization = ""
#: The base for normalization (e.g., 'monitor' or 'time')
normbase = None
#: List of warnings generated when the file was loaded
warnings = None
#: Value label for y-axis on 1-D or colorbar on 2-D plots.
#: Label will change when the value is normalized.
vlabel = 'Intensity'
#: Value units
vunits = 'counts'
#: Value scale ('linear', 'log' or '' for auto)
#: Units will change when the value is normalized.
vscale = '' # type: str
#: X axis label
xlabel = 'unknown intent'
#: X axis units
xunits = ''
#: Value scale ('linear', 'log' or '' for auto)
xscale = ''
#: points excluded from reduction
mask = None
#: Computed 1-sigma angular resolution in degrees
angular_resolution = None # type: np.ndarray
#: For background scans, the choice of Qz for the
#: points according to theta (sample angle), 2theta (detector angle)
#: or qz (Qz value of background computed from sample and detector angle)
#: How to calculate Qz from instrument angles.
#:
#: **actual**
#: calculates Qx and Qz as (x,z)-components of
#: $(\vec k_{\text{out}} - \vec k_\text{in})$ in sample coordinates,
#: **detector**
#: ignores the sample angle and calculates Qz
#: as $(4\pi/\lambda \sin(\theta_\text{detector}/2))$,
#: **sample**
#: ignores the detector angle and calculates Qz
#: as $(4\pi/\lambda \sin(\theta_\text{sample}))$
#: **target**
#: uses the user-supplied Qz_target values
Qz_basis = 'actual'
#: The target Qz value given in the data file; or NaN if not available
Qz_target = None # type: np.ndarray
scan_value = None # type: List[np.ndarray]
scan_label = None # type: List[str]
scan_units = None # type: List[str]
_intent = Intent.none
_v = None
_dv = None
## Data representation for generic plotter as (x,y,z,v) -> (qz,qx,qy,Iq)
## TODO: subclass Data so we get pixel edges calculations
#def _getx(self): return self.Qz
#def _gety(self): return self.Qx
#def _getz(self): return self.Qy
#x,xlabel,xunits = property(_getx),"Qx","inv A"
#y,ylabel,yunits = property(_gety),"Qy","inv A"
#z,zlabel,zunits = property(_getz),"Qz","inv A"
@property
def intent(self):
"""Purpose of the measurement."""
return self._intent
@intent.setter
def intent(self, v):
# Note: not setting x value with the label since the returned x should
# correspond to the underlying value even if it has been updated since
# the measurement intent was set.
self._intent = v
if Intent.isspec(v) or Intent.isback(v):
self.xlabel, self.xunits = "Qz", "1/Ang"
elif Intent.isrock(v):
self.xlabel, self.xunits = "Qx", "1/Ang"
elif Intent.isslit(v):
#self.xlabel, self.xunits = "angular resolution", "degrees 1-sigma"
self.xlabel, self.xunits = "slit 1 opening", "mm"
elif Intent.isscan(v):
self.xlabel, self.xunits = self.scan_label[0], self.scan_units[0]
else:
self.xlabel, self.xunits = "point", ""
@property
def x(self):
# Return different x depending on intent
intent = self.intent
if Intent.isback(intent) or Intent.isspec(intent):
return self.Qz
elif Intent.isrock(intent):
return self.Qx
elif Intent.isslit(intent):
return self.slit1.x
#return self.angular_resolution
elif Intent.isscan(intent):
return self.scan_value[0]
else:
return np.arange(1, len(self.v)+1)
@property
def dx(self):
return self.dQ
@property
def v(self):
return self.detector.counts if self._v is None else self._v
@v.setter
def v(self, v):
self._v = v
@property
def dv(self):
return sqrt(self.detector.counts_variance) if self._dv is None else self._dv
@dv.setter
def dv(self, dv):
self._dv = dv
@property
def Ti(self):
return self.sample.angle_x
@property
def Td(self):
return self.detector.angle_x
@property
def Tf(self):
Ti, Td = self.Ti, self.Td
return Td - Ti if Ti is not None and Td is not None else None
@property
def Ti_target(self):
return self.sample.angle_x_target
@property
def Td_target(self):
return self.detector.angle_x_target
@property
def Tf_target(self):
Ti, Td = self.Ti_target, self.Td_target
return Td - Ti if Ti is not None and Td is not None else None
@property
def Li(self):
return self.monochromator.wavelength
@property
def Ld(self):
return self.detector.wavelength
@property
def Qz(self):
# Note: specular reflectivity assumes elastic scattering
Li = Ld = self.Ld
#print("Qz_basis", self.Qz_basis, self.Ti.shape, self.Td.shape, self.Ti_target.shape, self.Td_target.shape, Li.shape)
if self.Qz_basis == 'actual':
return calc_Qz(self.Ti, self.Td, Li, Ld)
if self.Qz_basis == 'target':
if self.Qz_target is not None:
return self.Qz_target
return calc_Qz(self.Ti_target, self.Td_target, Li, Ld)
if self.Qz_basis == 'detector':
return calc_Qz(self.Td/2, self.Td, Li, Ld)
if self.Qz_basis == 'sample':
return calc_Qz(self.Ti, 2*self.Ti, Li, Ld)
raise KeyError("Qz basis must be one of [actual, detector, sample, target]")
@property
def Qx(self):
# Note: specular reflectivity assumes elastic scattering
Li = Ld = self.Ld
if self.Qz_basis == 'actual':
return calc_Qx(self.Ti, self.Td, Li, Ld)
if self.Qz_basis == 'target':
return np.zeros_like(self.Td)
#return calc_Qx(self.Ti_target, self.Td_target, Li, Ld)
if self.Qz_basis == 'detector':
return np.zeros_like(self.Td)
if self.Qz_basis == 'sample':
return np.zeros_like(self.Ti)
raise KeyError("Qz basis must be one of [actual, detector, sample, target]")
@property
def dQ(self):
if self.angular_resolution is None:
return None
#raise ValueError("Need to estimate divergence before requesting dQ")
# TODO: move sample broadening to to the dQ calculation
T, dT = self.Ti, self.angular_resolution
L, dL = self.Ld, self.detector.wavelength_resolution
#print(T.shape, dT.shape, L.shape, dL.shape)
return dTdL2dQ(T, dT, L, dL)
@property
def columns(self):
from copy import deepcopy
from collections import OrderedDict
data_columns = OrderedDict([
('x', {'label': self.xlabel, 'units': self.xunits, 'errorbars': 'dx'}),
('v', {'label': self.vlabel, 'units': self.vunits, 'errorbars': 'dv'}),
('Qz', {'label': 'Qz', 'units': "1/Ang"}),
('Qz_target', {'label': 'Target Qz', 'units': '1/Ang'}),
('Qx', {'label': 'Qx', 'units': "1/Ang"}),
('angular_resolution', {'label': 'Angular Resolution (1-sigma)', 'units': 'degrees'})
])
# TODO: duplicate code in columns, apply_mask and refldata._group
for subclsnm in ['sample', 'detector', 'monitor', 'slit1', 'slit2', 'slit3', 'slit4', 'monochromator']:
subcls = getattr(self, subclsnm, None)
if subcls is None:
continue
sub_cols = deepcopy(getattr(subcls, 'columns', {}))
for col in sub_cols.keys():
# units are defined for the subcolumns, but nothing else... do that here:
sub_col = sub_cols[col]
v = getattr(subcls, col, None)
if v is not None and hasattr(v, 'size') and v.size > 0:
label = "%s/%s" % (subclsnm, col)
sub_col['label'] = label
data_columns[label] = sub_col
if self.scan_value is not None:
for si, sv in enumerate(self.scan_value):
new_col = {}
new_label = self.scan_label[si]
new_col['label'] = new_label
new_col['is_scan'] = True
new_col['units'] = self.scan_units[si]
data_columns[new_label] = new_col
return data_columns
def apply_mask(self, mask_indices):
"""in-place masking of all data that is maskable"""
def check_array(v):
return isinstance(v, np.ndarray)
def make_mask(v, mask_indices):
mask = np.ones_like(v, dtype="bool")
mask[mask_indices] = False
return mask
for prop in ['_v', '_dv', 'angular_resolution', 'Qz_target']:
v = getattr(self, prop, None)
if check_array(v):
masked_v = v[make_mask(v, mask_indices)]
setattr(self, prop, masked_v)
self.points = len(masked_v)
self.scan_value = [v[make_mask(v, mask_indices)] if check_array(v) else v for v in self.scan_value]
for subclsnm in ['sample', 'detector', 'monitor', 'slit1', 'slit2', 'slit3', 'slit4', 'monochromator']:
subcls = getattr(self, subclsnm, None)
if subcls is None:
continue
sub_cols = getattr(subcls, 'columns', {})
for col in sub_cols.keys():
v = getattr(subcls, col, None)
if check_array(v):
setattr(subcls, col, v[make_mask(v, mask_indices)])
# handle col_target
target_name = col + "_target"
v = getattr(subcls, target_name, None)
if check_array(v):
setattr(subcls, target_name, v[make_mask(v, mask_indices)])
# handle variance
dv_name = sub_cols[col].get('variance', None)
if dv_name is not None:
dv = getattr(subcls, dv_name, None)
if check_array(dv): setattr(subcls, dv_name, dv[make_mask(dv, mask_indices)])
def __init__(self, **kw):
for attr, cls in ReflData._groups:
setattr(self, attr, cls())
self.warnings = []
Group.__init__(self, **kw)
def __str__(self):
base = [_str(self, indent=2)]
others = ["".join((" ", s, "\n", str(getattr(self, s))))
for s, _ in ReflData._groups]
return "\n".join(base+others)
def todict(self, maxsize=np.inf):
state = _toDict(self, maxsize=maxsize)
groups = {s: _toDict(getattr(self, s), maxsize=maxsize)
for s, _ in ReflData._groups}
state.update(groups)
return state
def fromdict(self, state):
props = {k: v for k, v in state.items() if k in self._fields}
props = _fromDict(props)
for k, v in props.items():
setattr(self, k, v)
for attr, cls in ReflData._groups:
props = _fromDict(state[attr])
setattr(self, attr, cls(**props))
def warn(self, msg):
"""Record a warning that should be displayed to the user"""
warnings.warn(msg)
self.warnings.append(msg)
def get_metadata(self):
"""
Return metadata used by webreduce.
The following are used in webreduce/instruments/ncnr.refl.js::
{
x: [..., xmin, ..., xmax, ...]}
sample: {name: str, description: str}
intent: str
polarization: str
filenumber: int
filename: str
entryname: str
mtime: int
source: str name of data server uri
}
*x* min and max are used for drawing the range indicators.
*sample.description* is displayed when hovering over link.
*source* and *filename* are needed for creating the hdf reader link.
*sample.name > intent > filenumber > polarization* forms the default
tree ordering.
Users can define their own tree organization from the other fields
in the dataset, so we should probably include trajectoryData entries.
Sample environment conditions could also be useful for some
experiments. In practice, though, the defaults are going to be
good enough, and users won't be changing them. Not sure what
happens when a vector field is used as a sort criterion.
"""
# TODO: Load and return minimal metadata for the file browser.
# TODO: Delay loading bulk of the data until file is selected.
# Limit metadata to scalars and small arrays
data = self.todict(maxsize=1000)
# If data['x'] is not a vector or if it was too big, then override
if self.x.ndim > 1 or len(data['x']) == 0 or self.x.ndim > 1:
if Intent.isslit(self.intent):
data['x'] = self.slit1.x.tolist()
else:
data['x'] = self.sample.angle_x.tolist()
return data
def plot(self, label=None):
if label is None:
label = self.name+self.polarization
from matplotlib import pyplot as plt
xerr = self.dx if self.angular_resolution is not None else None
x, dx, xunits, xlabel = self.x, xerr, self.xunits, self.xlabel
#x, dx, xunits, xlabel = self.detector.angle_x, self.angular_resolution, 'detector angle', 'deg'
plt.errorbar(x, self.v, yerr=self.dv, xerr=xerr, label=label, fmt='.-')
plt.xlabel("%s (%s)"%(xlabel, xunits) if xunits else xlabel)
plt.ylabel("%s (%s)"%(self.vlabel, self.vunits) if self.vunits else self.vlabel)
if not Intent.isslit(self.intent):
plt.yscale('log')
def save(self, filename):
with open(filename, 'w') as fid:
fid.write(self.to_column_text()["value"])
# TODO: split refldata in to ReflBase and PointRefl so PSD doesn't inherit column format
@exports_text("column")
def to_column_text(self):
# Note: subclass this for non-traditional reflectometry measurements
with BytesIO() as fid: # numpy.savetxt requires a byte stream
for n in ['name', 'entry', 'polarization']:
_write_key_value(fid, n, getattr(self, n))
wavelength = getattr(self.detector, "wavelength", None)
wavelength_resolution = getattr(self.detector, "wavelength_resolution", None)
if wavelength is not None:
_write_key_value(fid, "wavelength", float(wavelength[0]))
if wavelength_resolution is not None:
_write_key_value(fid, "wavelength_resolution", float(wavelength_resolution[0]))
if Intent.isscan(self.intent):
_write_key_value(fid, "columns", list(self.columns.keys()))
_write_key_value(fid, "units", [c.get("units", "") for c in self.columns.values()])
# add column headers
header_string = "\t".join(list(self.columns.keys())) + "\n"
fid.write(header_string.encode('utf-8'))
data_arrays = [
self.scan_value[self.scan_label.index(p)] if v.get('is_scan', False)
else get_item_from_path(self, p)
for p, v in self.columns.items()
]
data_arrays = [np.resize(d, self.points) for d in data_arrays]
format_string = "\t".join([
"%s" if d.dtype.kind in ["S", "U"]
else "%.10e"
for d in data_arrays
]) + "\n"
for i in range(self.points):
datarow = format_string % tuple([d[i] for d in data_arrays])
fid.write(datarow.encode('utf-8'))
suffix = ".dat"
else:
_write_key_value(fid, "columns", [self.xlabel, self.vlabel, "uncertainty", "resolution"])
_write_key_value(fid, "units", [self.xunits, self.vunits, self.vunits, self.xunits])
data = np.vstack([self.x, self.v, self.dv, self.dx]).T
np.savetxt(fid, data, fmt="%.10e")
suffix = ".refl"
value = fid.getvalue()
return {
"name": self.name,
"entry": self.entry,
"file_suffix": suffix,
"value": value.decode('utf-8'),
}
def get_plottable(self):
# Note: subclass this for non-traditional reflectometry measurements
columns = self.columns # {name: {label: str, units: str, errorbars: str}}
data_arrays = [
self.scan_value[self.scan_label.index(p)] if v.get('is_scan', False)
else get_item_from_path(self, p)
for p, v in columns.items()]
data_arrays = [np.resize(d, self.points).tolist() for d in data_arrays]
datas = {c: {"values": d} for c, d in zip(columns.keys(), data_arrays)}
# add errorbars:
for k in columns.keys():
if 'errorbars' in columns[k]:
#print('errorbars found for column %s' % (k,))
errorbars = get_item_from_path(self, columns[k]['errorbars'])
if errorbars is not None:
datas[k]["errorbars"] = errorbars.tolist()
else:
print(f"===> missing errorbars {columns[k]['errorbars']} for {k}")
name = getattr(self, "name", "default_name")
entry = getattr(self, "entry", "default_entry")
series = [{"label": "%s:%s" % (name, entry)}]
xcol = "x"
ycol = "v"
plottable = {
"type": "nd",
"title": "%s:%s" % (name, entry),
"entry": entry,
"columns": columns,
"options": {
"series": series,
"axes": {
"xaxis": {"label": "%s(%s)" % (columns[xcol]["label"], columns[xcol]["units"])},
"yaxis": {"label": "%s(%s)" % (columns[ycol]["label"], columns[ycol]["units"])}
},
"xcol": xcol,
"ycol": ycol,
"errorbar_width": 0
},
"datas": datas
}
#print(plottable)
return plottable
class PSDData(ReflData):
"""PSD data for reflectometer"""
def plot(self, label=None):
if label is None:
label = self.name+self.polarization
from matplotlib import pyplot as plt
data = np.log(self.v + (self.v == 0))
plt.pcolormesh(data, label=label)
plt.xlabel("pixel")
plt.ylabel("%s (%s)"%(self.xlabel, self.xunits))
def get_axes(self):
ny, nx = self.v.shape
x, xlabel = np.arange(1, nx+1), "pixel"
if Intent.isslit(self.intent):
y, ylabel = self.slit1.x, "S1"
elif Intent.isspec(self.intent):
y, ylabel = self.Qz_target, "Qz"
else:
y, ylabel = np.arange(1, ny+1), "point"
return (x, xlabel), (y, ylabel)
def get_plottable(self):
name = getattr(self, "name", "default_name")
entry = getattr(self, "entry", "default_entry")
def limits(v, n):
low, high = v.min(), v.max()
delta = (high - low) / max(n-1, 1)
# TODO: move range cleanup to plotter
if delta == 0.:
delta = v[0]/10.
return low - delta/2, high+delta/2
data = self.v
ny, nx = data.shape
(x, xlabel), (y, ylabel) = self.get_axes()
#print("data shape", nx, ny)
xmin, xmax = limits(x, nx)
ymin, ymax = limits(y, ny)
# TODO: self.detector.mask
zmin, zmax = data.min(), data.max()
# TODO: move range cleanup to plotter
if zmin <= 0.:
if (data > 0).any():
zmin = data[data > 0].min()
else:
data[:] = zmin = 1e-10
if zmin >= zmax:
zmax = 10*zmin
dims = {
"xmin": xmin, "xmax": xmax, "xdim": nx,
"ymin": ymin, "ymax": ymax, "ydim": ny,
"zmin": zmin, "zmax": zmax,
}
z = data.T.ravel('C').tolist()
plottable = {
#'type': '2d_multi',
#'dims': {'zmin': zmin, 'zmax': zmax},
#'datasets': [{'dims': dims, 'data': z}],
'type': '2d',
'dims': dims,
'z': [z],
'entry': entry,
'title': "%s:%s" % (name, entry),
'options': {
'fixedAspect': {
'fixAspect': False,
'aspectRatio': 1.0,
},
},
'xlabel': xlabel,
'ylabel': ylabel,
'zlabel': 'Intensity (I)',
'ztransform': 'log',
}
#print(plottable)
return plottable
# TODO: Define export format for partly reduced PSD data.
@exports_json("json")
def to_json_text(self):
name = getattr(self, "name", "default_name")
entry = getattr(self, "entry", "default_entry")
return {
"name": name,
"entry": entry,
"file_suffix": ".dat",
"value": self._toDict(),
}
# Kill column writer for now
def to_column_text(self):
pass
def get_item_from_path(obj, path):
"""
Fetch *obj.a.b.c* from path *"a/b/c"*.
Returns None if path does not exist.
"""
*head, tail = path.split("/")
for key in head:
obj = getattr(obj, key, {})
return getattr(obj, tail, None)
def _write_key_value(fid, key, value):
value_str = json.dumps(value, cls=NumpyEncoder)
if IS_PY3:
fid.write('# "{0}": {1}\n'.format(key, value_str).encode('utf-8'))
else:
fid.write('# "%s": %s\n'%(key, value_str))
def _str(object, indent=4):
"""
Helper function: document data object by convert attributes listed in
properties into a string.
"""
props = [a + "=" + str(getattr(object, a)) for a in object._fields]
prefix = " "*indent
return prefix+("\n"+prefix).join(props)
def _toDict(obj, maxsize=np.inf):
properties = list(getattr(obj, '_fields', ()))
properties += list(getattr(obj, '_props', ()))
props = {a: _toDictItem(getattr(obj, a), maxsize=maxsize)
for a in properties}
return props
def _toDictItem(obj, maxsize=None):
if isinstance(obj, np.integer):
obj = int(obj)
elif isinstance(obj, np.floating):
obj = float(obj)
elif isinstance(obj, np.ndarray):
obj = obj.tolist() if obj.size < maxsize else [] #[float(obj.min()), float(obj.max())]
elif isinstance(obj, datetime.datetime):
obj = [obj.year, obj.month, obj.day, obj.hour, obj.minute, obj.second]
elif isinstance(obj, (list, tuple)):
obj = [_toDictItem(a, maxsize) for a in obj]
return obj
def _fromDict(props):
# Note: timestamps must have the property named "date"
for name, value in props.items():
if isinstance(value, list) and value:
if all(isinstance(v, (int, float)) for v in value):
props[name] = np.asarray(value)
elif name == 'date':
props[name] = datetime.datetime(*value)
return props
def _set(object, kw):
"""
Helper function: distribute the __init__ keyword parameters to
individual attributes of an object, raising AttributeError if
the class does not define the given attribute.
Example:
def __init__(self, **kw): _set(self,kw)
"""
for k, v in kw.items():
# this will fail with an attribute error for incorrect keys
getattr(object, k)
setattr(object, k, v)
| 37.906562 | 125 | 0.618177 |
7e935e1b5e16b957db9a5d507fa8561cdd5de762 | 1,652 | py | Python | selvbetjening/sadmin2/graph.py | animekita/selvbetjening | fee63d178fbd5ce2976c04d3a4b2dde6d8691892 | [
"MIT"
] | null | null | null | selvbetjening/sadmin2/graph.py | animekita/selvbetjening | fee63d178fbd5ce2976c04d3a4b2dde6d8691892 | [
"MIT"
] | 3 | 2020-02-11T21:54:59.000Z | 2021-06-10T17:35:21.000Z | selvbetjening/sadmin2/graph.py | animekita/selvbetjening | fee63d178fbd5ce2976c04d3a4b2dde6d8691892 | [
"MIT"
] | null | null | null | from datetime import date, timedelta
def diff_in_months(ref_date, date):
return (date.year - ref_date.year) * 12 + (date.month - ref_date.month)
def diff_in_weeks(ref_date, date):
first_week = int(ref_date.strftime('%W'))
last_week = int(date.strftime('%W'))
return (date.year - ref_date.year) * 52 + last_week - first_week
def accumulate(data):
acc_sum = 0
acc = []
for value in data:
acc_sum = acc_sum + value
acc.append(acc_sum)
return acc
def insert_prefix(data, axis=False):
"""
Adds a prefix entry effectively forcing a initial zero state for the graphs.
Should be applied to both axis and data.
"""
data.insert(0, " " if axis else 0)
return data
def generate_month_axis(start_date, end_date):
labels = []
months = diff_in_months(start_date, end_date)
for x in range(0, months + 1):
new_month = (start_date.month + x) % 12
if new_month == 0:
new_month = 1
month = date(year=start_date.year + (start_date.month + x) / 12,
month=new_month,
day=1)
labels.append(month.strftime("%B %Y"))
return labels
def generate_week_axis(start_date, end_date):
axis = []
last_month = None
for week in xrange(0, diff_in_weeks(start_date, end_date) + 1):
current = start_date + timedelta(days=week*7)
if last_month == current.month:
axis.append('%s' % current.strftime('%W'))
else:
axis.append('%s - %s' % (current.strftime('%B'), current.strftime('%W')))
last_month = current.month
return axis | 25.415385 | 85 | 0.609564 |
78a9cf4d65f60f0dc979b6f6f00478e1232aa385 | 5,401 | py | Python | zephyr/zmake/zmake/__main__.py | IssacAlegre/chrome-ec | 19c3731dfd5250bfadaa90940f108476444d49b1 | [
"BSD-3-Clause"
] | null | null | null | zephyr/zmake/zmake/__main__.py | IssacAlegre/chrome-ec | 19c3731dfd5250bfadaa90940f108476444d49b1 | [
"BSD-3-Clause"
] | null | null | null | zephyr/zmake/zmake/__main__.py | IssacAlegre/chrome-ec | 19c3731dfd5250bfadaa90940f108476444d49b1 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The entry point into zmake."""
import argparse
import inspect
import logging
import pathlib
import sys
import zmake.multiproc as multiproc
import zmake.zmake as zm
def call_with_namespace(func, namespace):
"""Call a function with arguments applied from a Namespace.
Args:
func: The callable to call.
namespace: The namespace to apply to the callable.
Returns:
The result of calling the callable.
"""
kwds = {}
sig = inspect.signature(func)
names = [p.name for p in sig.parameters.values()]
for name, value in vars(namespace).items():
pyname = name.replace('-', '_')
if pyname in names:
kwds[pyname] = value
return func(**kwds)
# Dictionary used to map log level strings to their corresponding int values.
log_level_map = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
def main(argv=None):
"""The main function.
Args:
argv: Optionally, the command-line to parse, not including argv[0].
Returns:
Zero upon success, or non-zero upon failure.
"""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument('--checkout', type=pathlib.Path,
help='Path to ChromiumOS checkout')
parser.add_argument('-D', '--debug', action='store_true', default=False,
help='Turn on zmake debugging (e.g. stack trace)')
parser.add_argument('-j', '--jobs', type=int,
help='Degree of multiprogramming to use')
parser.add_argument('-l', '--log-level', choices=list(log_level_map.keys()),
default='WARNING',
dest='log_level',
help='Set the logging level (default=WARNING)')
parser.add_argument('-L', '--no-log-label', action='store_true',
default=False,
help='Turn off logging labels')
parser.add_argument('--modules-dir',
type=pathlib.Path,
help='The path to a directory containing all modules '
'needed. If unspecified, zmake will assume you have '
'a Chrome OS checkout and try locating them in the '
'checkout.')
parser.add_argument('--zephyr-base', type=pathlib.Path,
help='Path to Zephyr OS repository')
sub = parser.add_subparsers(dest='subcommand', help='Subcommand')
sub.required = True
configure = sub.add_parser('configure')
configure.add_argument(
'--ignore-unsupported-zephyr-version', action='store_true',
help="Don't warn about using an unsupported Zephyr version")
configure.add_argument('-t', '--toolchain', help='Name of toolchain to use')
configure.add_argument('--bringup', action='store_true',
dest='bringup',
help='Enable bringup debugging features')
configure.add_argument('-B', '--build-dir', type=pathlib.Path,
help='Build directory')
configure.add_argument('-b', '--build', action='store_true',
dest='build_after_configure',
help='Run the build after configuration')
configure.add_argument('--test', action='store_true',
dest='test_after_configure',
help='Test the .elf file after configuration')
configure.add_argument('project_dir', type=pathlib.Path,
help='Path to the project to build')
configure.add_argument('-c', '--coverage', action='store_true',
dest='coverage',
help='Enable CONFIG_COVERAGE Kconfig.')
build = sub.add_parser('build')
build.add_argument('build_dir', type=pathlib.Path,
help='The build directory used during configuration')
build.add_argument('-w', '--fail-on-warnings', action='store_true',
help='Exit with code 2 if warnings are detected')
test = sub.add_parser('test')
test.add_argument('build_dir', type=pathlib.Path,
help='The build directory used during configuration')
testall = sub.add_parser('testall')
coverage = sub.add_parser('coverage')
coverage.add_argument('build_dir', type=pathlib.Path,
help='The build directory used during configuration')
opts = parser.parse_args(argv)
if opts.no_log_label:
log_format = '%(message)s'
else:
log_format = '%(asctime)s - %(name)s/%(levelname)s: %(message)s'
logging.basicConfig(format=log_format, level=log_level_map.get(opts.log_level))
if not opts.debug:
sys.tracebacklimit = 0
try:
zmake = call_with_namespace(zm.Zmake, opts)
subcommand_method = getattr(zmake, opts.subcommand.replace('-', '_'))
result = call_with_namespace(subcommand_method, opts)
return result
finally:
multiproc.wait_for_log_end()
if __name__ == '__main__':
sys.exit(main())
| 37.506944 | 83 | 0.603036 |
07cfa7c073692f7ff81f78defb2e3541ec3803be | 13,313 | py | Python | nf/flows.py | arita37/normalizing-flows | c9896656bfd2007b0c17b801c0fe068560127301 | [
"MIT"
] | 1 | 2019-11-11T05:40:30.000Z | 2019-11-11T05:40:30.000Z | nf/flows.py | arita37/normalizing-flows | c9896656bfd2007b0c17b801c0fe068560127301 | [
"MIT"
] | null | null | null | nf/flows.py | arita37/normalizing-flows | c9896656bfd2007b0c17b801c0fe068560127301 | [
"MIT"
] | null | null | null | import math
import numpy as np
import scipy as sp
import scipy.linalg
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from nf.utils import unconstrained_RQS
# supported non-linearities: note that the function must be invertible
functional_derivatives = {
torch.tanh: lambda x: 1 - torch.pow(torch.tanh(x), 2),
F.leaky_relu: lambda x: (x > 0).type(torch.FloatTensor) + \
(x < 0).type(torch.FloatTensor) * -0.01,
F.elu: lambda x: (x > 0).type(torch.FloatTensor) + \
(x < 0).type(torch.FloatTensor) * torch.exp(x)
}
class Planar(nn.Module):
"""
Planar flow.
z = f(x) = x + u h(wᵀx + b)
[Rezende and Mohamed, 2015]
"""
def __init__(self, dim, nonlinearity=torch.tanh):
super().__init__()
self.h = nonlinearity
self.w = nn.Parameter(torch.Tensor(dim))
self.u = nn.Parameter(torch.Tensor(dim))
self.b = nn.Parameter(torch.Tensor(1))
self.reset_parameters(dim)
def reset_parameters(self, dim):
init.uniform_(self.w, -math.sqrt(1/dim), math.sqrt(1/dim))
init.uniform_(self.u, -math.sqrt(1/dim), math.sqrt(1/dim))
init.uniform_(self.b, -math.sqrt(1/dim), math.sqrt(1/dim))
def forward(self, x):
"""
Given x, returns z and the log-determinant log|df/dx|.
Returns
-------
"""
if self.h in (F.elu, F.leaky_relu):
u = self.u
elif self.h == torch.tanh:
scal = torch.log(1+torch.exp(self.w @ self.u)) - self.w @ self.u - 1
u = self.u + scal * self.w / torch.norm(self.w)
else:
raise NotImplementedError("Non-linearity is not supported.")
lin = torch.unsqueeze(x @ self.w, 1) + self.b
z = x + u * self.h(lin)
phi = functional_derivatives[self.h](lin) * self.w
log_det = torch.log(torch.abs(1 + phi @ u) + 1e-4)
return z, log_det
def backward(self, z):
raise NotImplementedError("Planar flow has no algebraic inverse.")
class Radial(nn.Module):
"""
Radial flow.
z = f(x) = = x + β h(α, r)(z − z0)
[Rezende and Mohamed 2015]
"""
def __init__(self, dim):
super().__init__()
self.x0 = nn.Parameter(torch.Tensor(dim))
self.log_alpha = nn.Parameter(torch.Tensor(1))
self.beta = nn.Parameter(torch.Tensor(1))
def reset_parameters(dim):
init.uniform_(self.z0, -math.sqrt(1/dim), math.sqrt(1/dim))
init.uniform_(self.log_alpha, -math.sqrt(1/dim), math.sqrt(1/dim))
init.uniform_(self.beta, -math.sqrt(1/dim), math.sqrt(1/dim))
def forward(self, x):
"""
Given x, returns z and the log-determinant log|df/dx|.
"""
m, n = x.shape
r = torch.norm(x - self.x0)
h = 1 / (torch.exp(self.log_alpha) + r)
beta = -torch.exp(self.log_alpha) + torch.log(1 + torch.exp(self.beta))
z = x + beta * h * (x - self.x0)
log_det = (n - 1) * torch.log(1 + beta * h) + \
torch.log(1 + beta * h - \
beta * r / (torch.exp(self.log_alpha) + r) ** 2)
return z, log_det
class FCNN(nn.Module):
"""
Simple fully connected neural network.
"""
def __init__(self, in_dim, out_dim, hidden_dim):
super().__init__()
self.network = nn.Sequential(
nn.Linear(in_dim, hidden_dim),
nn.Tanh(),
nn.Linear(hidden_dim, hidden_dim),
nn.Tanh(),
nn.Linear(hidden_dim, out_dim),
)
def forward(self, x):
return self.network(x)
class RealNVP(nn.Module):
"""
Non-volume preserving flow.
[Dinh et. al. 2017]
"""
def __init__(self, dim, hidden_dim = 8, base_network=FCNN):
super().__init__()
self.dim = dim
self.t1 = base_network(dim // 2, dim // 2, hidden_dim)
self.s1 = base_network(dim // 2, dim // 2, hidden_dim)
self.t2 = base_network(dim // 2, dim // 2, hidden_dim)
self.s2 = base_network(dim // 2, dim // 2, hidden_dim)
def forward(self, x):
lower, upper = x[:,:self.dim // 2], x[:,self.dim // 2:]
t1_transformed = self.t1(lower)
s1_transformed = self.s1(lower)
upper = t1_transformed + upper * torch.exp(s1_transformed)
t2_transformed = self.t2(upper)
s2_transformed = self.s2(upper)
lower = t2_transformed + lower * torch.exp(s2_transformed)
z = torch.cat([lower, upper], dim=1)
log_det = torch.sum(s1_transformed, dim=1) + \
torch.sum(s2_transformed, dim=1)
return z, log_det
def backward(self, z):
lower, upper = z[:,:self.dim // 2], z[:,self.dim // 2:]
t2_transformed = self.t2(upper)
s2_transformed = self.s2(upper)
lower = (lower - t2_transformed) * torch.exp(-s2_transformed)
t1_transformed = self.t1(lower)
s1_transformed = self.s1(lower)
upper = (upper - t1_transformed) * torch.exp(-s1_transformed)
x = torch.cat([lower, upper], dim=1)
log_det = torch.sum(-s1_transformed, dim=1) + \
torch.sum(-s2_transformed, dim=1)
return x, log_det
class MAF(nn.Module):
"""
Masked auto-regressive flow.
[Papamakarios et al. 2018]
"""
def __init__(self, dim, hidden_dim = 8, base_network=FCNN):
super().__init__()
self.dim = dim
self.layers = nn.ModuleList()
self.initial_param = nn.Parameter(torch.Tensor(2))
for i in range(1, dim):
self.layers += [base_network(i, 2, hidden_dim)]
self.reset_parameters()
def reset_parameters(self):
init.uniform_(self.initial_param, -math.sqrt(0.5), math.sqrt(0.5))
def forward(self, x):
z = torch.zeros_like(x)
log_det = torch.zeros(z.shape[0])
for i in range(self.dim):
if i == 0:
mu, alpha = self.initial_param[0], self.initial_param[1]
else:
out = self.layers[i - 1](x[:, :i])
mu, alpha = out[:, 0], out[:, 1]
z[:, i] = (x[:, i] - mu) / torch.exp(alpha)
log_det -= alpha
return z.flip(dims=(1,)), log_det
def backward(self, z):
x = torch.zeros_like(z)
log_det = torch.zeros(z.shape[0])
z = z.flip(dims=(1,))
for i in range(self.dim):
if i == 0:
mu, alpha = self.initial_param[0], self.initial_param[1]
else:
out = self.layers[i - 1](x[:, :i])
mu, alpha = out[:, 0], out[:, 1]
x[:, i] = mu + torch.exp(alpha) * z[:, i]
log_det += alpha
return x, log_det
class ActNorm(nn.Module):
"""
ActNorm layer.
[Kingma and Dhariwal, 2018.]
"""
def __init__(self, dim):
super().__init__()
self.dim = dim
self.mu = nn.Parameter(torch.zeros(dim, dtype = torch.float))
self.log_sigma = nn.Parameter(torch.zeros(dim, dtype = torch.float))
def forward(self, x):
z = x * torch.exp(self.log_sigma) + self.mu
log_det = torch.sum(self.log_sigma)
return z, log_det
def backward(self, z):
x = (z - self.mu) / torch.exp(self.log_sigma)
log_det = -torch.sum(self.log_sigma)
return x, log_det
class OneByOneConv(nn.Module):
"""
Invertible 1x1 convolution.
[Kingma and Dhariwal, 2018.]
"""
def __init__(self, dim):
super().__init__()
self.dim = dim
W, _ = sp.linalg.qr(np.random.randn(dim, dim))
P, L, U = sp.linalg.lu(W)
self.P = torch.tensor(P, dtype = torch.float)
self.L = nn.Parameter(torch.tensor(L, dtype = torch.float))
self.S = nn.Parameter(torch.tensor(np.diag(U), dtype = torch.float))
self.U = nn.Parameter(torch.triu(torch.tensor(U, dtype = torch.float),
diagonal = 1))
self.W_inv = None
def forward(self, x):
L = torch.tril(self.L, diagonal = -1) + torch.diag(torch.ones(self.dim))
U = torch.triu(self.U, diagonal = 1)
z = x @ self.P @ L @ (U + torch.diag(self.S))
log_det = torch.sum(torch.log(torch.abs(self.S)))
return z, log_det
def backward(self, z):
if not self.W_inv:
L = torch.tril(self.L, diagonal = -1) + \
torch.diag(torch.ones(self.dim))
U = torch.triu(self.U, diagonal = 1)
W = self.P @ L @ (U + torch.diag(self.S))
self.W_inv = torch.inverse(W)
x = z @ self.W_inv
log_det = -torch.sum(torch.log(torch.abs(self.S)))
return x, log_det
class NSF_AR(nn.Module):
"""
Neural spline flow, auto-regressive.
[Durkan et al. 2019]
"""
def __init__(self, dim, K = 5, B = 3, hidden_dim = 8, base_network = FCNN):
super().__init__()
self.dim = dim
self.K = K
self.B = B
self.layers = nn.ModuleList()
self.init_param = nn.Parameter(torch.Tensor(3 * K - 1))
for i in range(1, dim):
self.layers += [base_network(i, 3 * K - 1, hidden_dim)]
self.reset_parameters()
def reset_parameters(self):
init.uniform_(self.init_param, - 1 / 2, 1 / 2)
def forward(self, x):
z = torch.zeros_like(x)
log_det = torch.zeros(z.shape[0])
for i in range(self.dim):
if i == 0:
init_param = self.init_param.expand(x.shape[0], 3 * self.K - 1)
W, H, D = torch.split(init_param, self.K, dim = 1)
else:
out = self.layers[i - 1](x[:, :i])
W, H, D = torch.split(out, self.K, dim = 1)
W, H = torch.softmax(W, dim = 1), torch.softmax(H, dim = 1)
W, H = 2 * self.B * W, 2 * self.B * H
D = F.softplus(D)
z[:, i], ld = unconstrained_RQS(
x[:, i], W, H, D, inverse=False, tail_bound=self.B)
log_det += ld
return z, log_det
def backward(self, z):
x = torch.zeros_like(z)
log_det = torch.zeros(x.shape[0])
for i in range(self.dim):
if i == 0:
init_param = self.init_param.expand(x.shape[0], 3 * self.K - 1)
W, H, D = torch.split(init_param, self.K, dim = 1)
else:
out = self.layers[i - 1](x[:, :i])
W, H, D = torch.split(out, self.K, dim = 1)
W, H = torch.softmax(W, dim = 1), torch.softmax(H, dim = 1)
W, H = 2 * self.B * W, 2 * self.B * H
D = F.softplus(D)
x[:, i], ld = unconstrained_RQS(
z[:, i], W, H, D, inverse = True, tail_bound = self.B)
log_det += ld
return x, log_det
class NSF_CL(nn.Module):
"""
Neural spline flow, coupling layer.
[Durkan et al. 2019]
"""
def __init__(self, dim, K = 5, B = 3, hidden_dim = 8, base_network = FCNN):
super().__init__()
self.dim = dim
self.K = K
self.B = B
self.f1 = base_network(dim // 2, (3 * K - 1) * dim // 2, hidden_dim)
self.f2 = base_network(dim // 2, (3 * K - 1) * dim // 2, hidden_dim)
def forward(self, x):
log_det = torch.zeros(x.shape[0])
lower, upper = x[:, :self.dim // 2], x[:, self.dim // 2:]
out = self.f1(lower).reshape(-1, self.dim // 2, 3 * self.K - 1)
W, H, D = torch.split(out, self.K, dim = 2)
W, H = torch.softmax(W, dim = 2), torch.softmax(H, dim = 2)
W, H = 2 * self.B * W, 2 * self.B * H
D = F.softplus(D)
upper, ld = unconstrained_RQS(
upper, W, H, D, inverse=False, tail_bound=self.B)
log_det += torch.sum(ld, dim = 1)
out = self.f2(upper).reshape(-1, self.dim // 2, 3 * self.K - 1)
W, H, D = torch.split(out, self.K, dim = 2)
W, H = torch.softmax(W, dim = 2), torch.softmax(H, dim = 2)
W, H = 2 * self.B * W, 2 * self.B * H
D = F.softplus(D)
lower, ld = unconstrained_RQS(
lower, W, H, D, inverse=False, tail_bound=self.B)
log_det += torch.sum(ld, dim = 1)
return torch.cat([lower, upper], dim = 1), log_det
def backward(self, z):
log_det = torch.zeros(z.shape[0])
lower, upper = z[:, :self.dim // 2], z[:, self.dim // 2:]
out = self.f2(upper).reshape(-1, self.dim // 2, 3 * self.K - 1)
W, H, D = torch.split(out, self.K, dim = 2)
W, H = torch.softmax(W, dim = 2), torch.softmax(H, dim = 2)
W, H = 2 * self.B * W, 2 * self.B * H
D = F.softplus(D)
lower, ld = unconstrained_RQS(
lower, W, H, D, inverse=True, tail_bound=self.B)
log_det += torch.sum(ld, dim = 1)
out = self.f1(lower).reshape(-1, self.dim // 2, 3 * self.K - 1)
W, H, D = torch.split(out, self.K, dim = 2)
W, H = torch.softmax(W, dim = 2), torch.softmax(H, dim = 2)
W, H = 2 * self.B * W, 2 * self.B * H
D = F.softplus(D)
upper, ld = unconstrained_RQS(
upper, W, H, D, inverse = True, tail_bound = self.B)
log_det += torch.sum(ld, dim = 1)
return torch.cat([lower, upper], dim = 1), log_det
| 35.312997 | 80 | 0.533539 |
2cdba8314fca4b127afe3ab310e7e1a8ae90a33c | 25,268 | py | Python | geffnet/efficientnet_builder.py | laksh9950/gen-efficientnet-pytorch | b3bc163478737924f508978a6f0c96e07046e025 | [
"Apache-2.0"
] | 1 | 2019-11-18T02:41:44.000Z | 2019-11-18T02:41:44.000Z | geffnet/efficientnet_builder.py | jph00/gen-efficientnet-pytorch | b3bc163478737924f508978a6f0c96e07046e025 | [
"Apache-2.0"
] | null | null | null | geffnet/efficientnet_builder.py | jph00/gen-efficientnet-pytorch | b3bc163478737924f508978a6f0c96e07046e025 | [
"Apache-2.0"
] | 1 | 2020-01-18T20:56:53.000Z | 2020-01-18T20:56:53.000Z | import re
from copy import deepcopy
from .conv2d_layers import *
from geffnet.activations import *
# Default args for PyTorch BN impl
BN_MOMENTUM_PT_DEFAULT = 0.1
BN_EPS_PT_DEFAULT = 1e-5
BN_ARGS_PT = dict(momentum=BN_MOMENTUM_PT_DEFAULT, eps=BN_EPS_PT_DEFAULT)
# Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per
# papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay)
# NOTE: momentum varies btw .99 and .9997 depending on source
# .99 in official TF TPU impl
# .9997 (/w .999 in search space) for paper
BN_MOMENTUM_TF_DEFAULT = 1 - 0.99
BN_EPS_TF_DEFAULT = 1e-3
BN_ARGS_TF = dict(momentum=BN_MOMENTUM_TF_DEFAULT, eps=BN_EPS_TF_DEFAULT)
def resolve_bn_args(kwargs):
bn_args = BN_ARGS_TF.copy() if kwargs.pop('bn_tf', False) else BN_ARGS_PT.copy()
bn_momentum = kwargs.pop('bn_momentum', None)
if bn_momentum is not None:
bn_args['momentum'] = bn_momentum
bn_eps = kwargs.pop('bn_eps', None)
if bn_eps is not None:
bn_args['eps'] = bn_eps
return bn_args
def round_channels(channels, depth_multiplier=1.0, depth_divisor=8, min_depth=None):
"""Round number of filters based on depth multiplier."""
if not depth_multiplier:
return channels
channels *= depth_multiplier
min_depth = min_depth or depth_divisor
new_channels = max(
int(channels + depth_divisor / 2) // depth_divisor * depth_divisor,
min_depth)
# Make sure that round down does not go down by more than 10%.
if new_channels < 0.9 * channels:
new_channels += depth_divisor
return new_channels
def drop_connect(inputs, training: bool = False, drop_connect_rate: float = 0.):
"""Apply drop connect."""
if not training:
return inputs
keep_prob = 1 - drop_connect_rate
random_tensor = keep_prob + torch.rand(
(inputs.size()[0], 1, 1, 1), dtype=inputs.dtype, device=inputs.device)
random_tensor.floor_() # binarize
output = inputs.div(keep_prob) * random_tensor
return output
class ConvBnAct(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size,
stride=1, pad_type='', act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, norm_kwargs=BN_ARGS_PT):
super(ConvBnAct, self).__init__()
assert stride in [1, 2]
self.conv = select_conv2d(in_chs, out_chs, kernel_size, stride=stride, padding=pad_type)
self.bn1 = norm_layer(out_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn1(x)
x = self.act1(x)
return x
class DepthwiseSeparableConv(nn.Module):
""" DepthwiseSeparable block
Used for DS convs in MobileNet-V1 and in the place of IR blocks with an expansion
factor of 1.0. This is an alternative to having a IR with optional first pw conv.
"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, pad_type='', act_layer=nn.ReLU, noskip=False,
pw_kernel_size=1, pw_act=False,
se_ratio=0., se_gate_fn=sigmoid,
norm_layer=nn.BatchNorm2d, norm_kwargs=BN_ARGS_PT, drop_connect_rate=0.):
super(DepthwiseSeparableConv, self).__init__()
assert stride in [1, 2]
self.has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip
self.drop_connect_rate = drop_connect_rate
self.conv_dw = select_conv2d(
in_chs, in_chs, dw_kernel_size, stride=stride, padding=pad_type, depthwise=True)
self.bn1 = norm_layer(in_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
# Squeeze-and-excitation
if self.has_se:
self.se = SqueezeExcite(
in_chs, reduce_chs=max(1, int(in_chs * se_ratio)), act_layer=act_layer, gate_fn=se_gate_fn)
else:
self.se = nn.Identity()
self.conv_pw = select_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = norm_layer(out_chs, **norm_kwargs)
self.act2 = act_layer(inplace=True) if pw_act else nn.Identity()
def forward(self, x):
residual = x
x = self.conv_dw(x)
x = self.bn1(x)
x = self.act1(x)
x = self.se(x)
x = self.conv_pw(x)
x = self.bn2(x)
x = self.act2(x)
if self.has_residual:
if self.drop_connect_rate > 0.:
x = drop_connect(x, self.training, self.drop_connect_rate)
x += residual
return x
class SqueezeExcite(nn.Module):
__constants__ = ['gate_fn']
def __init__(self, in_chs, reduce_chs=None, act_layer=nn.ReLU, gate_fn=torch.sigmoid):
super(SqueezeExcite, self).__init__()
self.act_layer = act_layer
self.gate_fn = gate_fn
reduced_chs = reduce_chs or in_chs
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, x):
# NOTE adaptiveavgpool bad for NVIDIA AMP performance
# tensor.view + mean bad for ONNX export (produces mess of gather ops that break TensorRT)
#x_se = x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0), x.size(1), 1, 1)
x_se = self.avg_pool(x)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
x = x * self.gate_fn(x_se)
return x
class InvertedResidual(nn.Module):
""" Inverted residual block w/ optional SE"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, pad_type='', act_layer=nn.ReLU, noskip=False,
exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1,
se_ratio=0., se_reduce_mid=False, se_gate_fn=sigmoid,
norm_layer=nn.BatchNorm2d, norm_kwargs=BN_ARGS_PT, conv_kwargs={}, drop_connect_rate=0.):
super(InvertedResidual, self).__init__()
mid_chs: int = int(in_chs * exp_ratio)
self.has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_connect_rate = drop_connect_rate
# Point-wise expansion
self.conv_pw = select_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs)
self.bn1 = norm_layer(mid_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
# Depth-wise convolution
self.conv_dw = select_conv2d(
mid_chs, mid_chs, dw_kernel_size, stride=stride, padding=pad_type, depthwise=True, **conv_kwargs)
self.bn2 = norm_layer(mid_chs, **norm_kwargs)
self.act2 = act_layer(inplace=True)
# Squeeze-and-excitation
if self.has_se:
se_base_chs = mid_chs if se_reduce_mid else in_chs
self.se = SqueezeExcite(
mid_chs, reduce_chs=max(1, int(se_base_chs * se_ratio)), act_layer=act_layer, gate_fn=se_gate_fn)
else:
self.se = nn.Identity() # for jit.script compat
# Point-wise linear projection
self.conv_pwl = select_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs)
self.bn3 = norm_layer(out_chs, **norm_kwargs)
def forward(self, x):
residual = x
# Point-wise expansion
x = self.conv_pw(x)
x = self.bn1(x)
x = self.act1(x)
# Depth-wise convolution
x = self.conv_dw(x)
x = self.bn2(x)
x = self.act2(x)
# Squeeze-and-excitation
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x)
x = self.bn3(x)
if self.has_residual:
if self.drop_connect_rate > 0.:
x = drop_connect(x, self.training, self.drop_connect_rate)
x += residual
return x
class CondConvResidual(InvertedResidual):
""" Inverted residual block w/ CondConv routing"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, pad_type='', act_layer=nn.ReLU, noskip=False,
exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1,
se_ratio=0., se_reduce_mid=False, se_gate_fn=sigmoid,
norm_layer=nn.BatchNorm2d, norm_kwargs=BN_ARGS_PT, num_experts=0, drop_connect_rate=0.):
self.num_experts = num_experts
conv_kwargs = dict(num_experts=self.num_experts)
super(CondConvResidual, self).__init__(
in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, pad_type=pad_type,
act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size,
pw_kernel_size=pw_kernel_size, se_ratio=se_ratio, se_reduce_mid=se_reduce_mid, se_gate_fn=se_gate_fn,
norm_layer=norm_layer, norm_kwargs=norm_kwargs, conv_kwargs=conv_kwargs,
drop_connect_rate=drop_connect_rate)
self.routing_fn = nn.Linear(in_chs, self.num_experts)
def forward(self, x):
residual = x
# CondConv routing
pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1)
routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs))
# Point-wise expansion
x = self.conv_pw(x, routing_weights)
x = self.bn1(x)
x = self.act1(x)
# Depth-wise convolution
x = self.conv_dw(x, routing_weights)
x = self.bn2(x)
x = self.act2(x)
# Squeeze-and-excitation
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x, routing_weights)
x = self.bn3(x)
if self.has_residual:
if self.drop_connect_rate > 0.:
x = drop_connect(x, self.training, self.drop_connect_rate)
x += residual
return x
class EdgeResidual(nn.Module):
""" EdgeTPU Residual block with expansion convolution followed by pointwise-linear w/ stride"""
def __init__(self, in_chs, out_chs, exp_kernel_size=3, exp_ratio=1.0, fake_in_chs=0,
stride=1, pad_type='', act_layer=nn.ReLU, noskip=False, pw_kernel_size=1,
se_ratio=0., se_reduce_mid=False, se_gate_fn=sigmoid,
norm_layer=nn.BatchNorm2d, norm_kwargs=BN_ARGS_PT, drop_connect_rate=0.):
super(EdgeResidual, self).__init__()
mid_chs = int(fake_in_chs * exp_ratio) if fake_in_chs > 0 else int(in_chs * exp_ratio)
self.has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_connect_rate = drop_connect_rate
# Expansion convolution
self.conv_exp = select_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type)
self.bn1 = norm_layer(mid_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
# Squeeze-and-excitation
if self.has_se:
se_base_chs = mid_chs if se_reduce_mid else in_chs
self.se = SqueezeExcite(
mid_chs, reduce_chs=max(1, int(se_base_chs * se_ratio)), act_layer=act_layer, gate_fn=se_gate_fn)
else:
self.se = nn.Identity()
# Point-wise linear projection
self.conv_pwl = select_conv2d(mid_chs, out_chs, pw_kernel_size, stride=stride, padding=pad_type)
self.bn2 = nn.BatchNorm2d(out_chs, **norm_kwargs)
def forward(self, x):
residual = x
# Expansion convolution
x = self.conv_exp(x)
x = self.bn1(x)
x = self.act1(x)
# Squeeze-and-excitation
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x)
x = self.bn2(x)
if self.has_residual:
if self.drop_connect_rate > 0.:
x = drop_connect(x, self.training, self.drop_connect_rate)
x += residual
return x
class EfficientNetBuilder:
""" Build Trunk Blocks for Efficient/Mobile Networks
This ended up being somewhat of a cross between
https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py
and
https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py
"""
def __init__(self, channel_multiplier=1.0, channel_divisor=8, channel_min=None,
pad_type='', act_layer=None, se_gate_fn=sigmoid, se_reduce_mid=False,
norm_layer=nn.BatchNorm2d, norm_kwargs=BN_ARGS_PT, drop_connect_rate=0.):
self.channel_multiplier = channel_multiplier
self.channel_divisor = channel_divisor
self.channel_min = channel_min
self.pad_type = pad_type
self.act_layer = act_layer
self.se_gate_fn = se_gate_fn
self.se_reduce_mid = se_reduce_mid
self.norm_layer = norm_layer
self.norm_kwargs = norm_kwargs
self.drop_connect_rate = drop_connect_rate
# updated during build
self.in_chs = None
self.block_idx = 0
self.block_count = 0
def _round_channels(self, chs):
return round_channels(chs, self.channel_multiplier, self.channel_divisor, self.channel_min)
def _make_block(self, ba):
bt = ba.pop('block_type')
ba['in_chs'] = self.in_chs
ba['out_chs'] = self._round_channels(ba['out_chs'])
if 'fake_in_chs' in ba and ba['fake_in_chs']:
# FIXME this is a hack to work around mismatch in origin impl input filters for EdgeTPU
ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs'])
ba['norm_layer'] = self.norm_layer
ba['norm_kwargs'] = self.norm_kwargs
ba['pad_type'] = self.pad_type
# block act fn overrides the model default
ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer
assert ba['act_layer'] is not None
if bt == 'ir':
ba['drop_connect_rate'] = self.drop_connect_rate * self.block_idx / self.block_count
ba['se_gate_fn'] = self.se_gate_fn
ba['se_reduce_mid'] = self.se_reduce_mid
if ba.get('num_experts', 0) > 0:
block = CondConvResidual(**ba)
else:
block = InvertedResidual(**ba)
elif bt == 'ds' or bt == 'dsa':
ba['drop_connect_rate'] = self.drop_connect_rate * self.block_idx / self.block_count
block = DepthwiseSeparableConv(**ba)
elif bt == 'er':
ba['drop_connect_rate'] = self.drop_connect_rate * self.block_idx / self.block_count
ba['se_gate_fn'] = self.se_gate_fn
ba['se_reduce_mid'] = self.se_reduce_mid
block = EdgeResidual(**ba)
elif bt == 'cn':
block = ConvBnAct(**ba)
else:
assert False, 'Uknkown block type (%s) while building model.' % bt
self.in_chs = ba['out_chs'] # update in_chs for arg of next block
return block
def _make_stack(self, stack_args):
blocks = []
# each stack (stage) contains a list of block arguments
for i, ba in enumerate(stack_args):
if i >= 1:
# only the first block in any stack can have a stride > 1
ba['stride'] = 1
block = self._make_block(ba)
blocks.append(block)
self.block_idx += 1 # incr global idx (across all stacks)
return nn.Sequential(*blocks)
def __call__(self, in_chs, block_args):
""" Build the blocks
Args:
in_chs: Number of input-channels passed to first block
block_args: A list of lists, outer list defines stages, inner
list contains strings defining block configuration(s)
Return:
List of block stacks (each stack wrapped in nn.Sequential)
"""
self.in_chs = in_chs
self.block_count = sum([len(x) for x in block_args])
self.block_idx = 0
blocks = []
# outer list of block_args defines the stacks ('stages' by some conventions)
for stack_idx, stack in enumerate(block_args):
assert isinstance(stack, list)
stack = self._make_stack(stack)
blocks.append(stack)
return blocks
def _parse_ksize(ss):
if ss.isdigit():
return int(ss)
else:
return [int(k) for k in ss.split('.')]
def _decode_block_str(block_str):
""" Decode block definition string
Gets a list of block arg (dicts) through a string notation of arguments.
E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip
All args can exist in any order with the exception of the leading string which
is assumed to indicate the block type.
leading string - block type (
ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct)
r - number of repeat blocks,
k - kernel size,
s - strides (1-9),
e - expansion ratio,
c - output channels,
se - squeeze/excitation ratio
n - activation fn ('re', 'r6', 'hs', or 'sw')
Args:
block_str: a string representation of block arguments.
Returns:
A list of block args (dicts)
Raises:
ValueError: if the string def not properly specified (TODO)
"""
assert isinstance(block_str, str)
ops = block_str.split('_')
block_type = ops[0] # take the block type off the front
ops = ops[1:]
options = {}
noskip = False
for op in ops:
# string options being checked on individual basis, combine if they grow
if op == 'noskip':
noskip = True
elif op.startswith('n'):
# activation fn
key = op[0]
v = op[1:]
if v == 're':
value = get_act_layer('relu')
elif v == 'r6':
value = get_act_layer('relu6')
elif v == 'hs':
value = get_act_layer('hard_swish')
elif v == 'sw':
value = get_act_layer('swish')
else:
continue
options[key] = value
else:
# all numeric options
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# if act_layer is None, the model default (passed to model init) will be used
act_layer = options['n'] if 'n' in options else None
exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1
pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1
fake_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def
num_repeat = int(options['r'])
# each type of block has different valid arguments, fill accordingly
if block_type == 'ir':
block_args = dict(
block_type=block_type,
dw_kernel_size=_parse_ksize(options['k']),
exp_kernel_size=exp_kernel_size,
pw_kernel_size=pw_kernel_size,
out_chs=int(options['c']),
exp_ratio=float(options['e']),
se_ratio=float(options['se']) if 'se' in options else None,
stride=int(options['s']),
act_layer=act_layer,
noskip=noskip,
)
if 'cc' in options:
block_args['num_experts'] = int(options['cc'])
elif block_type == 'ds' or block_type == 'dsa':
block_args = dict(
block_type=block_type,
dw_kernel_size=_parse_ksize(options['k']),
pw_kernel_size=pw_kernel_size,
out_chs=int(options['c']),
se_ratio=float(options['se']) if 'se' in options else None,
stride=int(options['s']),
act_layer=act_layer,
pw_act=block_type == 'dsa',
noskip=block_type == 'dsa' or noskip,
)
elif block_type == 'er':
block_args = dict(
block_type=block_type,
exp_kernel_size=_parse_ksize(options['k']),
pw_kernel_size=pw_kernel_size,
out_chs=int(options['c']),
exp_ratio=float(options['e']),
fake_in_chs=fake_in_chs,
se_ratio=float(options['se']) if 'se' in options else None,
stride=int(options['s']),
act_layer=act_layer,
noskip=noskip,
)
elif block_type == 'cn':
block_args = dict(
block_type=block_type,
kernel_size=int(options['k']),
out_chs=int(options['c']),
stride=int(options['s']),
act_layer=act_layer,
)
else:
assert False, 'Unknown block type (%s)' % block_type
return block_args, num_repeat
def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'):
""" Per-stage depth scaling
Scales the block repeats in each stage. This depth scaling impl maintains
compatibility with the EfficientNet scaling method, while allowing sensible
scaling for other models that may have multiple block arg definitions in each stage.
"""
# We scale the total repeat count for each stage, there may be multiple
# block arg defs per stage so we need to sum.
num_repeat = sum(repeats)
if depth_trunc == 'round':
# Truncating to int by rounding allows stages with few repeats to remain
# proportionally smaller for longer. This is a good choice when stage definitions
# include single repeat stages that we'd prefer to keep that way as long as possible
num_repeat_scaled = max(1, round(num_repeat * depth_multiplier))
else:
# The default for EfficientNet truncates repeats to int via 'ceil'.
# Any multiplier > 1.0 will result in an increased depth for every stage.
num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier))
# Proportionally distribute repeat count scaling to each block definition in the stage.
# Allocation is done in reverse as it results in the first block being less likely to be scaled.
# The first block makes less sense to repeat in most of the arch definitions.
repeats_scaled = []
for r in repeats[::-1]:
rs = max(1, round((r / num_repeat * num_repeat_scaled)))
repeats_scaled.append(rs)
num_repeat -= r
num_repeat_scaled -= rs
repeats_scaled = repeats_scaled[::-1]
# Apply the calculated scaling to each block arg in the stage
sa_scaled = []
for ba, rep in zip(stack_args, repeats_scaled):
sa_scaled.extend([deepcopy(ba) for _ in range(rep)])
return sa_scaled
def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1):
arch_args = []
for stack_idx, block_strings in enumerate(arch_def):
assert isinstance(block_strings, list)
stack_args = []
repeats = []
for block_str in block_strings:
assert isinstance(block_str, str)
ba, rep = _decode_block_str(block_str)
if ba.get('num_experts', 0) > 0 and experts_multiplier > 1:
ba['num_experts'] *= experts_multiplier
stack_args.append(ba)
repeats.append(rep)
arch_args.append(_scale_stage_depth(stack_args, repeats, depth_multiplier, depth_trunc))
return arch_args
def initialize_weight_goog(m, n=''):
# weight init as per Tensorflow Official impl
# https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py
if isinstance(m, CondConv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
init_weight_fn = get_condconv_initializer(
lambda w: w.data.normal_(0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape)
init_weight_fn(m.weight)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
fan_out = m.weight.size(0) # fan-out
fan_in = 0
if 'routing_fn' in n:
fan_in = m.weight.size(1)
init_range = 1.0 / math.sqrt(fan_in + fan_out)
m.weight.data.uniform_(-init_range, init_range)
m.bias.data.zero_()
def initialize_weight_default(m, n=''):
if isinstance(m, CondConv2d):
init_fn = get_condconv_initializer(partial(
nn.init.kaiming_normal_, mode='fan_out', nonlinearity='relu'), m.num_experts, m.weight_shape)
init_fn(m.weight)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight, mode='fan_in', nonlinearity='linear')
| 39.114551 | 124 | 0.62969 |
ce0b4ea506c4114a09eb01800eadbcb279805bfe | 1,919 | py | Python | options.py | dungpham98/HiDDeN | ecbe71e290091afca86d0a5df469c30ac5436e0f | [
"MIT"
] | null | null | null | options.py | dungpham98/HiDDeN | ecbe71e290091afca86d0a5df469c30ac5436e0f | [
"MIT"
] | null | null | null | options.py | dungpham98/HiDDeN | ecbe71e290091afca86d0a5df469c30ac5436e0f | [
"MIT"
] | null | null | null | class TrainingOptions:
"""
Configuration options for the training
"""
def __init__(self,
batch_size: int,
number_of_epochs: int,
train_folder: str, validation_folder: str, runs_folder: str,
start_epoch: int, experiment_name: str):
self.batch_size = batch_size
self.number_of_epochs = number_of_epochs
self.train_folder = train_folder
self.validation_folder = validation_folder
self.runs_folder = runs_folder
self.start_epoch = start_epoch
self.experiment_name = experiment_name
class HiDDenConfiguration():
"""
The HiDDeN network configuration.
"""
def __init__(self, H: int, W: int, message_length: int,
encoder_blocks: int, encoder_channels: int,
decoder_blocks: int, decoder_channels: int,
use_discriminator: bool,
use_vgg: bool,
discriminator_blocks: int, discriminator_channels: int,
decoder_loss: float,
encoder_loss: float,
adversarial_loss: float,
blocking_loss:float,
enable_fp16: bool = False):
self.H = H
self.W = W
self.message_length = message_length
self.encoder_blocks = encoder_blocks
self.encoder_channels = encoder_channels
self.use_discriminator = use_discriminator
self.use_vgg = use_vgg
self.decoder_blocks = decoder_blocks
self.decoder_channels = decoder_channels
self.discriminator_blocks = discriminator_blocks
self.discriminator_channels = discriminator_channels
self.decoder_loss = decoder_loss
self.encoder_loss = encoder_loss
self.adversarial_loss = adversarial_loss
self.blocking_loss = blocking_loss
self.enable_fp16 = enable_fp16
| 36.903846 | 77 | 0.629495 |
f418cbf5ae6251e0528abbbc6878bc474d65f6e4 | 768 | py | Python | modules/math-codes/modules/statistics-and-probability/src/outliers-v1.py | drigols/Studies | 9c293156935b491ded24be6b511daac67fd43538 | [
"MIT"
] | 1 | 2020-09-06T22:17:19.000Z | 2020-09-06T22:17:19.000Z | modules/math-codes/modules/statistics-and-probability/src/outliers-v1.py | drigols/Studies | 9c293156935b491ded24be6b511daac67fd43538 | [
"MIT"
] | null | null | null | modules/math-codes/modules/statistics-and-probability/src/outliers-v1.py | drigols/Studies | 9c293156935b491ded24be6b511daac67fd43538 | [
"MIT"
] | null | null | null | ########################################################
# Rodrigo Leite - drigols #
# Last update: 17/12/2021 #
########################################################
import pandas as pd
from matplotlib import pyplot as plt
df = pd.DataFrame(
{
'Name': ['Dan', 'Joann', 'Pedro', 'Rosie', 'Ethan', 'Vicky', 'Frederic'],
'Salary':[50000, 54000, 50000, 189000, 55000, 40000, 59000],
'Hours':[41, 40, 36, 30, 35, 39, 40],
'Grade':[50, 50, 46, 95, 50, 5,57]
}
)
# Cria um plot/gráfico do tipo(kind) "box" a partir dos salários dos ex-alunos.
df['Salary'].plot(kind='box', title='Salary Distribution', figsize=(10, 8))
plt.savefig('../images/first-boxplot-02.png', format='png')
plt.show()
| 34.909091 | 79 | 0.489583 |
37a1507c324a5e876a2da60a2f53e814facf6789 | 5,818 | py | Python | app.py | gozer/nhobot | 437299f0fa3500af22ae1562f247041c85cc44f5 | [
"MIT"
] | null | null | null | app.py | gozer/nhobot | 437299f0fa3500af22ae1562f247041c85cc44f5 | [
"MIT"
] | null | null | null | app.py | gozer/nhobot | 437299f0fa3500af22ae1562f247041c85cc44f5 | [
"MIT"
] | null | null | null | from flask import Flask, request
import database.mongo_setup as mongo_setup
from database.people import People
from database.messages import Messages
from database.messages_to_send import MessagesToSend as Send
from iam_profile_faker.factory import V2ProfileFactory
import json
import datetime
import pytz
app = Flask(__name__)
app.secret_key = 'SeMO9wbRIu4mbm3zZlmwrNrQYNQd5jQC7wLXzmXh'
message_frequency = {'day': 1, 'week': 7, 'month': 30, 'year': 365}
@app.before_first_request
def main_start():
mongo_setup.global_init()
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/addMessage', methods=['GET', 'POST'])
def add_new_message():
print(request.values)
message_id = request.values['message_id']
message_type = request.values['type']
category = request.values['category']
title = request.values['title']
title_link = request.values['title_link']
send_day = request.values['send_day']
send_time = request.values['send_time']
frequency = request.values['frequency']
text = request.values['text']
print(message_id)
print(message_type)
message = Messages();
message.message_id = message_id
message.type = message_type
message.category = category
message.title = title
message.title_link = title_link
message.send_day = send_day
message.send_hour = send_time
message.frequency = frequency
message.text = text
message.save()
return 'added {} {}'.format(message_id, title)
@app.route('/addEmployee', methods=['GET', 'POST'])
def add_new_employee():
factory = V2ProfileFactory()
new_emp = factory.create()
people = People()
people.first_name = json.dumps(new_emp['first_name']['value'], sort_keys=True, indent=4).replace('"', '')
people.last_name = json.dumps(new_emp['last_name']['value'], sort_keys=True, indent=4).replace('"', '')
people.email = json.dumps(new_emp['primary_email']['value']).replace('"', '')
people.city = json.dumps(new_emp['access_information']['hris']['values']['LocationCity']).replace('"', '')
people.state = json.dumps(new_emp['access_information']['hris']['values']['LocationState']).replace('"', '')
people.country = json.dumps(new_emp['access_information']['hris']['values']['LocationCountryISO2']).replace('"', '')
people.timezone = json.dumps(new_emp['timezone']['value']).replace('"', '')
people.emp_id = json.dumps(new_emp['access_information']['hris']['values']['EmployeeID'], sort_keys=True, indent=4)
employee_id = json.dumps(new_emp['access_information']['hris']['values']['EmployeeID'], sort_keys=True, indent=4)
people.slack_handle = find_slack_handle(json.dumps(new_emp['usernames']['values']))
people.start_date = datetime.datetime.strptime(json.dumps(new_emp['created']['value']).replace('"', ''), '%Y-%m-%dT%H:%M:%S').isoformat()
people.phone = json.dumps(new_emp['phone_numbers']['values'])
people.manager_id = json.dumps(new_emp['access_information']['hris']['values']['WorkersManagersEmployeeID'])
people.title = json.dumps(new_emp['access_information']['hris']['values']['businessTitle']).replace('"', '')
people.picture = json.dumps(new_emp['picture']['value']).replace('"', '')
people.last_updated = datetime.datetime.strptime(json.dumps(new_emp['last_modified']['value']).replace('"', ''),
'%Y-%m-%dT%H:%M:%S')
print(json.dumps(new_emp['first_name']['value'], sort_keys=True, indent=4).replace('"', ''))
print(json.dumps(new_emp['phone_numbers']['values']))
# print(json.dumps(new_emp, indent=4))
people.admin_opt_out = False
people.user_opt_out = False
people.manager_opt_out = False
people.save()
print('employee id = {}'.format(employee_id))
newly_added_user = People.objects(emp_id=employee_id)
print('newly added user = {}'.format(newly_added_user[0].first_name))
new_person = {}
for p in newly_added_user:
new_person['first_name'] = p.first_name
new_person['last_name'] = p.last_name
print('{} {} {} {} {} {}'.format(p.first_name, p.last_name, p.emp_id, p.start_date, p.manager_id, p.picture))
add_messages_to_send(p)
return 'You added {}'.format(new_person)
def find_slack_handle(socials: dict):
"""Search social media values for slack
:param socials:
:return:
"""
if 'slack' in socials:
return socials['slack']
else:
return 'marty331'
def add_messages_to_send(person: People):
"""
Add each message from the messages table to the messages_to_send table when a new user is added
:param person:
:return:
"""
employee_id = person.emp_id
start_date = person.start_date
print('start date ={}'.format(start_date))
my_timezone = pytz.timezone(person.timezone)
for m in Messages.objects:
print(m)
for x in range(0, m.number_of_sends):
if x == 0:
send_day = m.send_day
else:
print('add {}'.format(message_frequency[m.frequency]))
send_day = send_day + message_frequency[m.frequency]
send_date_time = start_date + datetime.timedelta(days=send_day)
send_date_time = my_timezone.localize(send_date_time)
send_date_time = send_date_time.replace(hour=m.send_hour, minute=0, second=0)
print('send date time = {}'.format(send_date_time))
to_send = Send()
to_send.emp_id = employee_id
to_send.message_id = m.message_id
to_send.send_order = x
to_send.send_dttm = send_date_time
to_send.last_updated = datetime.datetime.now()
to_send.save()
if __name__ == '__main__':
print('starting app')
main_start()
app.debug = True
app.run()
| 40.685315 | 141 | 0.665693 |
047fd03f595ad1af67e086843b570a3f3ab02b72 | 632 | py | Python | Chapter07/final/form_project/manage.py | PacktPublishing/Web-Development-Projects-with-Django | 531bc4d58d614888cc81b7fd6f8ec859f5a65217 | [
"MIT"
] | 97 | 2021-03-01T12:54:30.000Z | 2022-03-28T02:57:26.000Z | Chapter07/final/form_project/manage.py | PacktPublishing/Web-Development-Projects-with-Django | 531bc4d58d614888cc81b7fd6f8ec859f5a65217 | [
"MIT"
] | 81 | 2020-08-27T04:56:04.000Z | 2022-03-12T00:53:40.000Z | Chapter07/final/form_project/manage.py | PacktPublishing/Web-Development-Projects-with-Django | 531bc4d58d614888cc81b7fd6f8ec859f5a65217 | [
"MIT"
] | 163 | 2020-12-25T14:38:38.000Z | 2022-03-30T10:31:40.000Z | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'form_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.727273 | 76 | 0.685127 |
573bab32ce05e80ff152cda09326f2712c6af8b6 | 3,827 | py | Python | test/test_pipeline/components/feature_preprocessing/test_select_rates_regression.py | Louquinze/auto-sklearn | b2ac331c500ebef7becf372802493a7b235f7cec | [
"BSD-3-Clause"
] | 6,390 | 2015-07-11T07:59:51.000Z | 2022-03-31T16:45:15.000Z | test/test_pipeline/components/feature_preprocessing/test_select_rates_regression.py | Louquinze/auto-sklearn | b2ac331c500ebef7becf372802493a7b235f7cec | [
"BSD-3-Clause"
] | 1,276 | 2015-07-29T02:11:29.000Z | 2022-03-31T17:31:34.000Z | test/test_pipeline/components/feature_preprocessing/test_select_rates_regression.py | Louquinze/auto-sklearn | b2ac331c500ebef7becf372802493a7b235f7cec | [
"BSD-3-Clause"
] | 1,313 | 2015-07-20T14:11:39.000Z | 2022-03-25T18:22:48.000Z | import unittest
import numpy as np
import scipy.sparse
import sklearn.preprocessing
from autosklearn.pipeline.components.feature_preprocessing.select_rates_regression import \
SelectRegressionRates
from autosklearn.pipeline.util import _test_preprocessing, get_dataset
class SelectRegressionRatesComponentTest(unittest.TestCase):
def test_default_configuration(self):
transformation, original = _test_preprocessing(SelectRegressionRates)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], 4)
self.assertFalse((transformation == 0).all())
transformation, original = _test_preprocessing(
SelectRegressionRates, make_sparse=True)
self.assertTrue(scipy.sparse.issparse(transformation))
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], int(original.shape[1] / 2))
# Makes sure that the features are reduced, not the number of samples
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
original_X_train = X_train.copy()
ss = sklearn.preprocessing.StandardScaler()
X_train = ss.fit_transform(X_train)
configuration_space = SelectRegressionRates.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = SelectRegressionRates(random_state=1,
**{hp_name: default[hp_name]
for hp_name in default
if default[hp_name] is not None})
transformer = preprocessor.fit(X_train, Y_train)
transformation, original = transformer.transform(
X_train), original_X_train
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], 21)
def test_default_configuration_regression(self):
transformation, original = _test_preprocessing(
SelectRegressionRates,
dataset='boston',
)
self.assertEqual(transformation.shape[0], original.shape[0])
# From 13 to 12 features
self.assertEqual(transformation.shape[1], 12)
self.assertFalse((transformation == 0).all())
def test_preprocessing_dtype_regression(self):
# Dense
# np.float32
X_train, Y_train, X_test, Y_test = get_dataset("boston")
self.assertEqual(X_train.dtype, np.float32)
dataset_properties = {'target_type': 'regression'}
configuration_space = SelectRegressionRates.get_hyperparameter_search_space(
dataset_properties
)
default = configuration_space.get_default_configuration()
preprocessor = SelectRegressionRates(random_state=1,
**{hp_name: default[hp_name] for hp_name in
default})
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float32)
# np.float64
X_train, Y_train, X_test, Y_test = get_dataset("boston")
X_train = X_train.astype(np.float64)
configuration_space = SelectRegressionRates.get_hyperparameter_search_space(
dataset_properties
)
default = configuration_space.get_default_configuration()
preprocessor = SelectRegressionRates(random_state=1,
**{hp_name: default[hp_name] for hp_name in
default})
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float64)
| 44.5 | 91 | 0.652469 |
6e1a46d9c048e96eb734e95ced18507fae5713a4 | 1,022 | py | Python | LocusPocus/scripts/fidibus-filens.py | vpbrendel/AEGeAn | 4b00f0a6aab36a9ab6ea6a62f53c110053aaf96c | [
"0BSD"
] | 21 | 2016-05-19T18:14:41.000Z | 2021-12-21T21:42:33.000Z | LocusPocus/scripts/fidibus-filens.py | BrendelGroup/AEGeAn | 4d59d2f268cf67d1296dd3eea52cf76d7a23c465 | [
"0BSD"
] | 59 | 2016-01-27T20:46:06.000Z | 2021-01-04T14:51:16.000Z | LocusPocus/scripts/fidibus-filens.py | vpbrendel/AEGeAn | 4b00f0a6aab36a9ab6ea6a62f53c110053aaf96c | [
"0BSD"
] | 10 | 2016-01-27T20:41:32.000Z | 2020-01-28T22:45:42.000Z | #!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (c) 2015 Indiana University
#
# This file is part of AEGeAn (http://github.com/BrendelGroup/AEGeAn) and is
# licensed under the ISC license: see LICENSE.
# -----------------------------------------------------------------------------
from __future__ import print_function
import argparse
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument('species', help='4-letter species label')
parser.add_argument('gff3', type=argparse.FileType('r'), default=sys.stdin)
args = parser.parse_args()
for line in args.gff3:
liilmatch = re.search(r'liil=(\d+)', line)
riilmatch = re.search(r'riil=(\d+)', line)
namematch = re.search(r'Name=([^;\n]+)', line)
if not liilmatch or not riilmatch:
continue
lname = namematch.group(1)
liil = liilmatch.group(1)
riil = riilmatch.group(1)
fields = '\t'.join([args.species, lname, liil, riil])
print(fields)
| 31.9375 | 79 | 0.584149 |
26d7d7297177b35e6453e4d5a4edaab0bd0f88b0 | 511 | py | Python | env/lib/python3.8/site-packages/plotly/validators/funnel/marker/_cmax.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/funnel/marker/_cmax.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/funnel/marker/_cmax.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class CmaxValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="cmax", parent_name="funnel.marker", **kwargs):
super(CmaxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {"cauto": False}),
role=kwargs.pop("role", "info"),
**kwargs
)
| 36.5 | 82 | 0.643836 |
3d88f337e938ecea5f0dfe895d916165ec5d3087 | 11,115 | py | Python | tests/components/arcam_fmj/test_media_player.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 1 | 2021-08-23T00:15:33.000Z | 2021-08-23T00:15:33.000Z | tests/components/arcam_fmj/test_media_player.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 5 | 2021-02-08T20:51:16.000Z | 2022-03-12T00:43:18.000Z | tests/components/arcam_fmj/test_media_player.py | klauern/home-assistant-core | c18ba6aec0627e6afb6442c678edb5ff2bb17db6 | [
"Apache-2.0"
] | 1 | 2020-06-09T20:54:05.000Z | 2020-06-09T20:54:05.000Z | """Tests for arcam fmj receivers."""
from math import isclose
from arcam.fmj import DecodeMode2CH, DecodeModeMCH, IncomingAudioFormat, SourceCodes
import pytest
from homeassistant.components.media_player.const import MEDIA_TYPE_MUSIC
from homeassistant.core import HomeAssistant
from .conftest import MOCK_ENTITY_ID, MOCK_HOST, MOCK_NAME, MOCK_PORT, MOCK_UUID
from tests.async_mock import ANY, MagicMock, Mock, PropertyMock, patch
MOCK_TURN_ON = {
"service": "switch.turn_on",
"data": {"entity_id": "switch.test"},
}
async def update(player, force_refresh=False):
"""Force a update of player and return current state data."""
await player.async_update_ha_state(force_refresh=force_refresh)
return player.hass.states.get(player.entity_id)
async def test_properties(player, state):
"""Test standard properties."""
assert player.unique_id == f"{MOCK_UUID}-1"
assert player.device_info == {
"identifiers": {("arcam_fmj", MOCK_HOST, MOCK_PORT)},
"model": "FMJ",
"manufacturer": "Arcam",
}
assert not player.should_poll
async def test_powered_off(hass, player, state):
"""Test properties in powered off state."""
state.get_source.return_value = None
state.get_power.return_value = None
data = await update(player)
assert "source" not in data.attributes
assert data.state == "off"
async def test_powered_on(player, state):
"""Test properties in powered on state."""
state.get_source.return_value = SourceCodes.PVR
state.get_power.return_value = True
data = await update(player)
assert data.attributes["source"] == "PVR"
assert data.state == "on"
async def test_supported_features(player, state):
"""Test support when turn on service exist."""
data = await update(player)
assert data.attributes["supported_features"] == 69004
async def test_turn_on_without_service(player, state):
"""Test turn on service."""
state.get_power.return_value = None
await player.async_turn_on()
state.set_power.assert_not_called()
state.get_power.return_value = False
await player.async_turn_on()
state.set_power.assert_called_with(True)
async def test_turn_on_with_service(hass, state):
"""Test support when turn on service exist."""
from homeassistant.components.arcam_fmj.media_player import ArcamFmj
player = ArcamFmj(state, MOCK_UUID, "dummy", MOCK_TURN_ON)
player.hass = Mock(HomeAssistant)
player.entity_id = MOCK_ENTITY_ID
with patch(
"homeassistant.components.arcam_fmj.media_player.async_call_from_config"
) as async_call_from_config:
state.get_power.return_value = None
await player.async_turn_on()
state.set_power.assert_not_called()
async_call_from_config.assert_called_with(
player.hass,
MOCK_TURN_ON,
variables=None,
blocking=True,
validate_config=False,
)
async def test_turn_off(player, state):
"""Test command to turn off."""
await player.async_turn_off()
state.set_power.assert_called_with(False)
@pytest.mark.parametrize("mute", [True, False])
async def test_mute_volume(player, state, mute):
"""Test mute functionality."""
await player.async_mute_volume(mute)
state.set_mute.assert_called_with(mute)
player.async_write_ha_state.assert_called_with()
async def test_name(player):
"""Test name."""
assert player.name == MOCK_NAME
async def test_update(player, state):
"""Test update."""
await update(player, force_refresh=True)
state.update.assert_called_with()
@pytest.mark.parametrize(
"fmt, result",
[
(None, True),
(IncomingAudioFormat.PCM, True),
(IncomingAudioFormat.ANALOGUE_DIRECT, True),
(IncomingAudioFormat.DOLBY_DIGITAL, False),
],
)
async def test_2ch(player, state, fmt, result):
"""Test selection of 2ch mode."""
state.get_incoming_audio_format.return_value = (fmt, None)
assert player._get_2ch() == result # pylint: disable=W0212
@pytest.mark.parametrize(
"source, value",
[("PVR", SourceCodes.PVR), ("BD", SourceCodes.BD), ("INVALID", None)],
)
async def test_select_source(player, state, source, value):
"""Test selection of source."""
await player.async_select_source(source)
if value:
state.set_source.assert_called_with(value)
else:
state.set_source.assert_not_called()
async def test_source_list(player, state):
"""Test source list."""
state.get_source_list.return_value = [SourceCodes.BD]
data = await update(player)
assert data.attributes["source_list"] == ["BD"]
@pytest.mark.parametrize(
"mode, mode_sel, mode_2ch, mode_mch",
[
("STEREO", True, DecodeMode2CH.STEREO, None),
("STEREO", False, None, None),
("STEREO", False, None, None),
],
)
async def test_select_sound_mode(player, state, mode, mode_sel, mode_2ch, mode_mch):
"""Test selection sound mode."""
player._get_2ch = Mock(return_value=mode_sel) # pylint: disable=W0212
await player.async_select_sound_mode(mode)
if mode_2ch:
state.set_decode_mode_2ch.assert_called_with(mode_2ch)
else:
state.set_decode_mode_2ch.assert_not_called()
if mode_mch:
state.set_decode_mode_mch.assert_called_with(mode_mch)
else:
state.set_decode_mode_mch.assert_not_called()
async def test_volume_up(player, state):
"""Test mute functionality."""
await player.async_volume_up()
state.inc_volume.assert_called_with()
player.async_write_ha_state.assert_called_with()
async def test_volume_down(player, state):
"""Test mute functionality."""
await player.async_volume_down()
state.dec_volume.assert_called_with()
player.async_write_ha_state.assert_called_with()
@pytest.mark.parametrize(
"mode, mode_sel, mode_2ch, mode_mch",
[
("STEREO", True, DecodeMode2CH.STEREO, None),
("STEREO_DOWNMIX", False, None, DecodeModeMCH.STEREO_DOWNMIX),
(None, False, None, None),
],
)
async def test_sound_mode(player, state, mode, mode_sel, mode_2ch, mode_mch):
"""Test selection sound mode."""
player._get_2ch = Mock(return_value=mode_sel) # pylint: disable=W0212
state.get_decode_mode_2ch.return_value = mode_2ch
state.get_decode_mode_mch.return_value = mode_mch
assert player.sound_mode == mode
async def test_sound_mode_list(player, state):
"""Test sound mode list."""
player._get_2ch = Mock(return_value=True) # pylint: disable=W0212
assert sorted(player.sound_mode_list) == sorted([x.name for x in DecodeMode2CH])
player._get_2ch = Mock(return_value=False) # pylint: disable=W0212
assert sorted(player.sound_mode_list) == sorted([x.name for x in DecodeModeMCH])
async def test_sound_mode_zone_x(player, state):
"""Test second zone sound mode."""
state.zn = 2
assert player.sound_mode is None
assert player.sound_mode_list is None
async def test_is_volume_muted(player, state):
"""Test muted."""
state.get_mute.return_value = True
assert player.is_volume_muted is True # pylint: disable=singleton-comparison
state.get_mute.return_value = False
assert player.is_volume_muted is False # pylint: disable=singleton-comparison
state.get_mute.return_value = None
assert player.is_volume_muted is None
async def test_volume_level(player, state):
"""Test volume."""
state.get_volume.return_value = 0
assert isclose(player.volume_level, 0.0)
state.get_volume.return_value = 50
assert isclose(player.volume_level, 50.0 / 99)
state.get_volume.return_value = 99
assert isclose(player.volume_level, 1.0)
state.get_volume.return_value = None
assert player.volume_level is None
@pytest.mark.parametrize("volume, call", [(0.0, 0), (0.5, 50), (1.0, 99)])
async def test_set_volume_level(player, state, volume, call):
"""Test setting volume."""
await player.async_set_volume_level(volume)
state.set_volume.assert_called_with(call)
@pytest.mark.parametrize(
"source, media_content_type",
[
(SourceCodes.DAB, MEDIA_TYPE_MUSIC),
(SourceCodes.FM, MEDIA_TYPE_MUSIC),
(SourceCodes.PVR, None),
(None, None),
],
)
async def test_media_content_type(player, state, source, media_content_type):
"""Test content type deduction."""
state.get_source.return_value = source
assert player.media_content_type == media_content_type
@pytest.mark.parametrize(
"source, dab, rds, channel",
[
(SourceCodes.DAB, "dab", "rds", "dab"),
(SourceCodes.DAB, None, None, None),
(SourceCodes.FM, "dab", "rds", "rds"),
(SourceCodes.FM, None, None, None),
(SourceCodes.PVR, "dab", "rds", None),
],
)
async def test_media_channel(player, state, source, dab, rds, channel):
"""Test media channel."""
state.get_dab_station.return_value = dab
state.get_rds_information.return_value = rds
state.get_source.return_value = source
assert player.media_channel == channel
@pytest.mark.parametrize(
"source, dls, artist",
[
(SourceCodes.DAB, "dls", "dls"),
(SourceCodes.FM, "dls", None),
(SourceCodes.DAB, None, None),
],
)
async def test_media_artist(player, state, source, dls, artist):
"""Test media artist."""
state.get_dls_pdt.return_value = dls
state.get_source.return_value = source
assert player.media_artist == artist
@pytest.mark.parametrize(
"source, channel, title",
[
(SourceCodes.DAB, "channel", "DAB - channel"),
(SourceCodes.DAB, None, "DAB"),
(None, None, None),
],
)
async def test_media_title(player, state, source, channel, title):
"""Test media title."""
from homeassistant.components.arcam_fmj.media_player import ArcamFmj
state.get_source.return_value = source
with patch.object(
ArcamFmj, "media_channel", new_callable=PropertyMock
) as media_channel:
media_channel.return_value = channel
data = await update(player)
if title is None:
assert "media_title" not in data.attributes
else:
assert data.attributes["media_title"] == title
async def test_added_to_hass(player, state):
"""Test addition to hass."""
from homeassistant.components.arcam_fmj.const import (
SIGNAL_CLIENT_DATA,
SIGNAL_CLIENT_STARTED,
SIGNAL_CLIENT_STOPPED,
)
connectors = {}
def _connect(signal, fun):
connectors[signal] = fun
player.hass = MagicMock()
player.hass.helpers.dispatcher.async_dispatcher_connect.side_effects = _connect
await player.async_added_to_hass()
state.start.assert_called_with()
player.hass.helpers.dispatcher.async_dispatcher_connect.assert_any_call(
SIGNAL_CLIENT_DATA, ANY
)
player.hass.helpers.dispatcher.async_dispatcher_connect.assert_any_call(
SIGNAL_CLIENT_STARTED, ANY
)
player.hass.helpers.dispatcher.async_dispatcher_connect.assert_any_call(
SIGNAL_CLIENT_STOPPED, ANY
)
| 31.309859 | 84 | 0.698336 |
7c91aa3a614e001eb5ba45c44c570e34b5aa282a | 3,949 | py | Python | backend/bin/test/test_enrichers/test_tls_enricher.py | anjo-ba/PCAP-Analyzer | ccb13caba9c0c05a7643e63c57575b56ab1233cb | [
"MIT"
] | 4 | 2019-03-29T08:45:36.000Z | 2021-11-11T00:49:36.000Z | backend/bin/test/test_enrichers/test_tls_enricher.py | anjo-ba/PCAP-Analyzer | ccb13caba9c0c05a7643e63c57575b56ab1233cb | [
"MIT"
] | 9 | 2019-04-03T18:10:19.000Z | 2020-08-16T12:13:34.000Z | backend/bin/test/test_enrichers/test_tls_enricher.py | anjo-ba/PCAP-Analyzer | ccb13caba9c0c05a7643e63c57575b56ab1233cb | [
"MIT"
] | 4 | 2019-05-09T15:33:23.000Z | 2022-02-06T08:01:23.000Z | import unittest
from typing import Dict
from main.enrichers.tls_version_enricher import TlsVersionEnricher
from main.helpers.string_helper import enclose_with_quotes
class TestTlsVersionEnricherMethods(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.tls_version_enricher = TlsVersionEnricher()
cls.information_dict_tls1_2 = {"traffic_analyzer_stream": 1}
cls.information_dict_tls1_3 = {"traffic_analyzer_stream": 2}
cls.packets_tls1_2 = {
"client_hello_tls1_2": {
"tls.version": "0x0301",
"tls.handshake.type": "1",
"tls.handshake.version": "0x0303",
"tls.handshake.extensions.supported_version": "0x0301, 0x0302, 0x0303, 0x0304"
}, "server_hello_tls1_2": {
"tls.version": "0x0303",
"tls.handshake.type": "2",
"tls.handshake.version": "0x0303",
"tls.handshake.extensions.supported_version": ""
}, "first_packet_tls1_2": {
"tls.version": "0x0303",
"tls.handshake.type": "",
"tls.handshake.version": "",
"tls.handshake.extensions.supported_version": ""
}}
cls.packets_tls1_3 = {
"client_hello_tls1_3": {
"tls.version": "0x0301",
"tls.handshake.type": "1",
"tls.handshake.version": "0x0303",
"tls.handshake.extensions.supported_version": "0x0301, 0x0302, 0x0303, 0x0304"
}, "server_hello_tls1_3": {
"tls.version": "0x0303",
"tls.handshake.type": "2",
"tls.handshake.version": "0x0303",
"tls.handshake.extensions.supported_version": "0x0304"
}, "first_packet_tls1_3": {
"tls.version": "0x0303",
"tls.handshake.type": "",
"tls.handshake.version": "",
"tls.handshake.extensions.supported_version": ""
}}
def test_header(self) -> None:
expected_header = "tls_ssl_version_negotiated"
self.assertEqual(self.tls_version_enricher.header, expected_header)
def run_test_packet(self, expected_value: str, packet: Dict[str, str], information_dict) -> None:
if expected_value == "":
expected_tls_ssl_version = enclose_with_quotes(expected_value)
else:
expected_tls_ssl_version = '{}'.format(expected_value)
self.tls_version_enricher.get_information(packet, information_dict)
self.assertEqual(information_dict["tls_ssl_version_negotiated"], expected_tls_ssl_version)
def test_get_tls_ssl_version_tls1_2(self) -> None:
expected_value = ""
self.run_test_packet(expected_value, self.packets_tls1_2["client_hello_tls1_2"], self.information_dict_tls1_2)
expected_value = "0x0303"
self.run_test_packet(expected_value, self.packets_tls1_2["server_hello_tls1_2"], self.information_dict_tls1_2)
expected_value = "0x0303"
self.run_test_packet(expected_value, self.packets_tls1_2["first_packet_tls1_2"], self.information_dict_tls1_2)
def test_get_tls_ssl_version_tls1_3(self) -> None:
expected_value = ""
self.run_test_packet(expected_value, self.packets_tls1_3["client_hello_tls1_3"], self.information_dict_tls1_3)
expected_value = "0x0304"
self.run_test_packet(expected_value, self.packets_tls1_3["server_hello_tls1_3"], self.information_dict_tls1_3)
expected_value = "0x0304"
self.run_test_packet(expected_value, self.packets_tls1_3["first_packet_tls1_3"], self.information_dict_tls1_3)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestTlsVersionEnricherMethods)
unittest.TextTestRunner(verbosity=2).run(suite)
| 48.158537 | 119 | 0.634591 |
6e81eab84a1558fcc45ec90f1a4220b9d57d6be5 | 1,983 | py | Python | Lib/distutils/tests/test_spawn.py | hashiqizaizai/hashiqizaizai.github.io | 7217400802f6b944dfd1e29d4b00d268957ff769 | [
"bzip2-1.0.6"
] | 1 | 2019-05-14T05:05:42.000Z | 2019-05-14T05:05:42.000Z | Lib/distutils/tests/test_spawn.py | hashiqizaizai/hashiqizaizai.github.io | 7217400802f6b944dfd1e29d4b00d268957ff769 | [
"bzip2-1.0.6"
] | null | null | null | Lib/distutils/tests/test_spawn.py | hashiqizaizai/hashiqizaizai.github.io | 7217400802f6b944dfd1e29d4b00d268957ff769 | [
"bzip2-1.0.6"
] | 2 | 2018-07-15T06:35:21.000Z | 2019-05-14T05:05:31.000Z | """Tests for distutils.spawn."""
import unittest
import os
import time
from test.test_support import captured_stdout
from distutils.spawn import _nt_quote_args
from distutils.spawn import spawn, find_executable
from distutils.errors import DistutilsExecError
from distutils.tests import support
class SpawnTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_nt_quote_args(self):
for (args, wanted) in ((['with space', 'nospace'],
['"with space"', 'nospace']),
(['nochange', 'nospace'],
['nochange', 'nospace'])):
res = _nt_quote_args(args)
self.assertEqual(res, wanted)
@unittest.skipUnless(os.name in ('nt', 'posix'),
'Runs only under posix or nt')
def test_spawn(self):
tmpdir = self.mkdtemp()
# creating something executable
# through the shell that returns 1
if os.name == 'posix':
exe = os.path.join(tmpdir, 'foo.sh')
self.write_file(exe, '#!/bin/sh\nexit 1')
os.chmod(exe, 0777)
else:
exe = os.path.join(tmpdir, 'foo.bat')
self.write_file(exe, 'exit 1')
os.chmod(exe, 0777)
self.assertRaises(DistutilsExecError, spawn, [exe])
# now something that works
if os.name == 'posix':
exe = os.path.join(tmpdir, 'foo.sh')
self.write_file(exe, '#!/bin/sh\nexit 0')
os.chmod(exe, 0777)
else:
exe = os.path.join(tmpdir, 'foo.bat')
self.write_file(exe, 'exit 0')
os.chmod(exe, 0777)
spawn([exe]) # should work without any error
def test_suite():
return unittest.makeSuite(SpawnTestCase)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
| 32.508197 | 62 | 0.555724 |
986dd0a3e46d255edc056771592808752abbb5c8 | 679 | py | Python | aula078.py | juniorpedroso/CFBCursos | 88657d6aad38de7d41e76499f0ff4d85a02745ae | [
"MIT"
] | null | null | null | aula078.py | juniorpedroso/CFBCursos | 88657d6aad38de7d41e76499f0ff4d85a02745ae | [
"MIT"
] | null | null | null | aula078.py | juniorpedroso/CFBCursos | 88657d6aad38de7d41e76499f0ff4d85a02745ae | [
"MIT"
] | null | null | null | # Aula 78 - Grid
from tkinter import *
from tkinter import ttk
app = Tk()
app.title('Pedroso')
app.geometry('500x300')
lb_canal = Label(app, text='CFB Cursos')
lb_nome = Label(app, text='Digite seu nome')
lb_idade = Label(app, text='Digite sua idade')
en_nome = Entry(app)
en_idade = Entry(app)
btn = Button(app, text='Canal')
lb_canal.grid(column=0, row=0, columnspan=2)
# lb_canal.grid(collumn = 0, row = 0, columnspan = 2, pasy = 15)
# Opções do sticky => w -> esquerda; e-> direita; n-> cima. s-> baixo
lb_nome.grid(column=0, row=1, sticky='w')
en_nome.grid(column=0, row=2)
lb_idade.grid(column=1, row=1, sticky='w')
en_idade.grid(column=1, row=2)
app.mainloop()
| 21.21875 | 69 | 0.683358 |
607b64abcdb7af2ecaf00d47d133c69cb76945a1 | 1,903 | py | Python | jobfunnel/resources/enums.py | marchbnr/JobFunnel | 446e9e06790467d6274e7e69cc505e7b3d982e03 | [
"MIT"
] | 2 | 2020-01-05T20:39:07.000Z | 2020-03-23T18:07:04.000Z | jobfunnel/resources/enums.py | marchbnr/JobFunnel | 446e9e06790467d6274e7e69cc505e7b3d982e03 | [
"MIT"
] | 18 | 2019-11-12T21:04:56.000Z | 2020-10-19T19:44:52.000Z | jobfunnel/resources/enums.py | marchbnr/JobFunnel | 446e9e06790467d6274e7e69cc505e7b3d982e03 | [
"MIT"
] | 2 | 2020-01-05T00:37:58.000Z | 2020-01-12T03:32:06.000Z | from enum import Enum
class Locale(Enum):
"""This will allow Scrapers / Filters / Main to identify the support they
have for different domains of different websites
Locale must be set as it defines the code implementation to use for forming
the correct GET requests, to allow us to interact with a job-source.
"""
CANADA_ENGLISH = 1
CANADA_FRENCH = 2
USA_ENGLISH = 3
UK_ENGLISH = 4
FRANCE_FRENCH = 5
class JobStatus(Enum):
"""Job statuses that are built-into jobfunnel
NOTE: these are the only valid values for entries in 'status' in our CSV
"""
UNKNOWN = 1
NEW = 2
ARCHIVE = 3
INTERVIEWING = 4
INTERVIEWED = 5
REJECTED = 6
ACCEPTED = 7
DELETE = 8
INTERESTED = 9
APPLIED = 10
APPLY = 11
OLD = 12
class JobField(Enum):
"""Fields of job that we need setters for, passed to Scraper.get(field=...)
"""
TITLE = 0
COMPANY = 1
LOCATION = 2
DESCRIPTION = 3
KEY_ID = 4
URL = 5
LOCALE = 6
QUERY = 7
PROVIDER = 8
STATUS = 9
SCRAPE_DATE = 10
SHORT_DESCRIPTION = 11
POST_DATE = 12
RAW = 13
TAGS = 14
WAGE = 15
REMOTENESS = 16
class Remoteness(Enum):
"""What level of remoteness is a Job?
"""
UNKNOWN = 1 # NOTE: invalid state
IN_PERSON = 2
TEMPORARILY_REMOTE = 3 # AKA Cuz' COVID, realistically this is not remote!
PARTIALLY_REMOTE = 4
FULLY_REMOTE = 5
ANY = 6
class DuplicateType(Enum):
"""Ways in which a job can be a duplicate
NOTE: we use these to determine what action(s) to take for a duplicate
"""
KEY_ID = 0
EXISTING_TFIDF = 1
NEW_TFIDF = 2
class Provider(Enum):
"""Job source providers
"""
INDEED = 1
GLASSDOOR = 2
MONSTER = 3
class DelayAlgorithm(Enum):
"""delaying algorithms
"""
CONSTANT = 1
SIGMOID = 2
LINEAR = 3
| 20.684783 | 79 | 0.619548 |
c7e42ae828640d68900f0a39b4b5d119738fb0e4 | 2,968 | py | Python | bootstrap.py | MiguelCarino/maniwani | 9519b89aeedee40527ba49425964a077d74a7de4 | [
"MIT"
] | 81 | 2018-08-09T12:58:01.000Z | 2022-02-02T05:56:48.000Z | bootstrap.py | MiguelCarino/maniwani | 9519b89aeedee40527ba49425964a077d74a7de4 | [
"MIT"
] | 456 | 2019-12-09T02:28:26.000Z | 2021-08-03T03:28:12.000Z | bootstrap.py | MiguelCarino/maniwani | 9519b89aeedee40527ba49425964a077d74a7de4 | [
"MIT"
] | 13 | 2018-08-11T10:12:01.000Z | 2022-03-10T04:32:05.000Z | from gevent import monkey
monkey.patch_all()
import json
import os
from alembic import command
from alembic.config import Config
from flask.cli import with_appcontext
from model.Board import Board
from model.Slip import gen_slip
from model.Tag import Tag
from model.Media import storage
from shared import app, db
MIGRATION_DIR = "./migrations"
BOOTSTRAP_SETTINGS = "./deploy-configs/bootstrap-config.json"
SECRET_FILE = "./deploy-configs/secret"
def initialize_db():
db.create_all()
def grab_settings():
settings_file = open(BOOTSTRAP_SETTINGS)
return json.load(settings_file)
def setup_boards(json_settings):
for board_info in json_settings["boards"]:
name = board_info["name"]
threadlimit = board_info.get("threadlimit") or json_settings["default_threadlimit"]
mimetypes = board_info.get("mimetypes")
if mimetypes is None:
mimetypes = json_settings["default_mimetypes"]
extra_mimetypes = board_info.get("extra_mimetypes")
if extra_mimetypes:
mimetypes = mimetypes + "|" + extra_mimetypes
rule_file = board_info.get("rules")
rules = ""
if rule_file:
rules = open(os.path.join("deploy-configs", rule_file)).read()
board = Board(name=name, max_threads=threadlimit, mimetypes=mimetypes, rules=rules)
db.session.add(board)
def setup_slips(json_settings):
for slip_info in json_settings["slips"]:
username = slip_info["username"]
password = slip_info["password"]
is_admin = slip_info.get("is_admin") or False
is_mod = slip_info.get("is_mod") or False
slip = gen_slip(username, password)
slip.is_admin = is_admin
slip.is_mod = is_mod
db.session.add(slip)
db.session.commit()
def setup_tags(json_settings):
for tag_info in json_settings["tags"]:
tag_name = tag_info["tag"]
bg_style = tag_info["bgstyle"]
text_style = tag_info["textstyle"]
tag = Tag(name=tag_name, bg_style=bg_style, text_style=text_style)
db.session.add(tag)
def write_secret():
if os.path.exists(SECRET_FILE):
os.remove(SECRET_FILE)
open(SECRET_FILE, "w+").write(str(os.urandom(16)))
def setup_storage():
storage.bootstrap()
def save_db():
if os.path.exists(MIGRATION_DIR):
alembic_config = Config(os.path.join(MIGRATION_DIR, "alembic.ini"))
alembic_config.set_main_option("script_location", MIGRATION_DIR)
with app.app_context():
# mark the database as being up to date migration-wise since
# it was just created
command.stamp(config=alembic_config, revision="head")
db.session.commit()
def main():
initialize_db()
settings = grab_settings()
setup_boards(settings)
setup_slips(settings)
setup_tags(settings)
setup_storage()
write_secret()
save_db()
if __name__ == "__main__":
main()
| 28 | 91 | 0.674865 |
bebc80e061ccefa6875e315060818c0a8d32fd2b | 9,292 | py | Python | drnet/apps/load_db_icu.py | jameszhou-gl/drnet | d8ff339f802425c3f7896ccd57c8995f69c43d28 | [
"MIT"
] | 46 | 2019-02-03T20:57:51.000Z | 2022-03-28T12:06:45.000Z | drnet/apps/load_db_icu.py | jameszhou-gl/drnet | d8ff339f802425c3f7896ccd57c8995f69c43d28 | [
"MIT"
] | 7 | 2019-12-16T21:27:25.000Z | 2022-02-10T00:11:43.000Z | drnet/apps/load_db_icu.py | jameszhou-gl/drnet | d8ff339f802425c3f7896ccd57c8995f69c43d28 | [
"MIT"
] | 12 | 2019-07-21T20:48:01.000Z | 2022-03-22T02:16:13.000Z | """
Copyright (C) 2019 Patrick Schwab, ETH Zurich
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import sys
import numpy as np
from datetime import datetime
from bisect import bisect_right
def timestamp_string_to_datetime(timestamp):
try:
return datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S.%f")
except:
# Fallback in case microseconds are missing.
return datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S")
def timestamp_from_string(timestamp_string):
return int(timestamp_string_to_datetime(timestamp_string)
.strftime("%s")) * 1000
def insert_patients(data_access, per_patient_data, output_directory, blood_gas_key, window_length_in_seconds,
db_origin="uzh"):
from drnet.data_access.icu.data_access import DataAccess as OutputDataAccess
global WINDOW_OFFSET
global WINDOWS_TOTAL
windows_data_access = OutputDataAccess(output_directory, is_factual=False)
all_windows, all_patients, all_outcomes = [], [], []
for patient_id in per_patient_data.keys():
windows, blood_gas_values = per_patient_data[patient_id]
for window, (blood_gas_timestamp, blood_gas_value) in zip(windows, blood_gas_values):
for signal_name in window.keys():
sampling_rate = data_access.get_sampling_rate(signal_name)
if window[signal_name] is None:
vals = None
timestamps = None
else:
vals = window[signal_name][:, 1]
timestamps = window[signal_name][:, 0]
all_windows.append((WINDOW_OFFSET, signal_name,
timestamps, vals,
sampling_rate, window_length_in_seconds,
db_origin))
all_patients.append((patient_id, WINDOW_OFFSET))
all_outcomes.append((WINDOW_OFFSET, blood_gas_key,
blood_gas_timestamp, blood_gas_value,
"mmHg"))
WINDOW_OFFSET += 1
with windows_data_access.db:
windows_data_access.insert_many_values(OutputDataAccess.TABLE_OUTCOMES, all_outcomes)
windows_data_access.insert_many_values(OutputDataAccess.TABLE_WINDOWS, all_windows)
windows_data_access.insert_many_values(OutputDataAccess.TABLE_PATIENTS, all_patients)
WINDOWS_TOTAL += len(all_outcomes)
print("[INFO]: Added", len(all_patients) // len(window.keys()) // len(all_outcomes),
"patients with", len(all_outcomes), "windows (total=", WINDOWS_TOTAL, ").",
file=sys.stderr)
all_windows, all_patients, all_outcomes = [], [], []
def get_windows_and_labels_mimic3(data_access, output_directory, patients, required_signals,
window_length_in_seconds=60*60,
normalise=False,
blood_gas_key="pO2(a)/FIO2"):
print("[INFO]: Generating data points and labels (mimic).", file=sys.stderr)
all_signals = {
"cns_spo2-na": data_access.get_spo2_values,
"cns_etco2-na": data_access.get_etco2_values,
"cns_fio2-na": data_access.get_fio2_values,
"cns_peep-na": data_access.get_peep_values,
"cns_art-mean": data_access.get_meanbp_values,
"cns_art-sys": data_access.get_sysbp_values,
"cns_art-dias": data_access.get_diasbp_values,
"cns_rr-na": data_access.get_rr_values,
"cns_hr-na": data_access.get_hr_values,
"cns_tinfinity-a": data_access.get_tempc_values,
"cns_icp-mean": data_access.get_icp_values,
"cns_cpp-na": data_access.get_cpp_values,
"cns_vte-na": data_access.get_tidal_volume_values,
"cns_ti-na": data_access.get_inspiratory_time_values,
"cns_rinsp-na": data_access.get_resistance_values,
"cns_pplateau-na": data_access.get_plateau_pressure_values,
"cns_ppeak-na": data_access.get_peak_pressure_values,
"cns_pminimum-na": data_access.get_min_pressure_values,
"cns_pmean-na": data_access.get_mean_pressure_values,
"cns_pinsp-na": data_access.get_pinsp_values,
"cns_ftotal-na": data_access.get_ftotal_values,
}
# Get blood draws.
# For each blood draw get PEEP set, FIO2, etco2, spo2, and Temperature values.
per_patient_data = {}
for i, patient_id in enumerate(patients):
blood_gas = data_access.get_pao2_values(patient_id)
lab_fio2 = data_access.get_fio2_lab_values(patient_id)
fio2_timestamps = map(lambda x: timestamp_from_string(x[0]), lab_fio2)
if len(blood_gas) == 0 or len(lab_fio2) == 0:
continue
all_x, all_y = [], []
for timestamp, blood_gas_value in blood_gas:
timestamp = timestamp_from_string(timestamp)
fio2_end_index = bisect_right(fio2_timestamps, timestamp)
fio2_start_index = bisect_right(fio2_timestamps, timestamp - window_length_in_seconds)
if fio2_end_index == 0 or fio2_end_index == fio2_start_index:
continue # No fio2 value before the pao2 value present.
try:
fio2_value = lab_fio2[fio2_end_index - 1][1] / 100.
adjusted_blood_gas_value = blood_gas_value / fio2_value
except:
continue # Not a valid fio2 or pao2 value (ex. type exception).
windows, did_find_any = {}, False
for signal_name, signal_data_fn in all_signals.items():
signal_data = signal_data_fn(patient_id)
signal_data = np.column_stack((map(lambda x: timestamp_from_string(x[0]), signal_data),
map(lambda x: x[1], signal_data)))
# Get closest in each signal.
timestamps = signal_data[:, 0]
end_idx = bisect_right(timestamps, timestamp)
start_idx = bisect_right(timestamps, timestamp - window_length_in_seconds*1000)
# Save to DB.
did_find_result = len(signal_data) != 0 and start_idx != 0 and end_idx != start_idx
if did_find_result:
did_find_any = True
assert len(signal_data[start_idx:end_idx]) != 0
windows[signal_name] = signal_data[start_idx:end_idx]
else:
windows[signal_name] = None
if not did_find_result and signal_name in required_signals:
did_find_any = False
break
if did_find_any:
all_x.append(windows)
all_y.append((timestamp, adjusted_blood_gas_value))
if len(all_x) != 0:
per_patient_data[patient_id] = all_x, all_y
insert_patients(data_access, per_patient_data, output_directory, blood_gas_key, window_length_in_seconds,
db_origin="mimic3")
per_patient_data = {}
print(i, "of", len(patients), file=sys.stderr)
def run(mimic3_database_directory, output_directory):
required_signals = {
"cns_fio2-na", "cns_peep-na",
"cns_ftotal-na"
}
from drnet.data_access.icu.mimic3.data_access import DataAccess as Mimic3DataAccess
da = Mimic3DataAccess(mimic3_database_directory)
vps = da.get_ventilated_patients()
get_windows_and_labels_mimic3(da, output_directory, vps, required_signals)
if __name__ == '__main__':
if len(sys.argv) != 3:
print("USAGE: ./load_db_icu.py {DATABASE_PATH} {OUTPUT_FOLDER}\n"
" e.g. ./load_db_icu.py ./mimic3 ./data\n"
" where \n"
" DATABASE_PATH is the path to the directory containing your MIMIC3 DB in SQLite format \n"
" (See README.md on how to obtain MIMIC3)\n"
" OUTPUT_FOLDER is the path to the directory to which you want to save the benchmark DB.\n",
file=sys.stderr)
else:
mimic3_database_directory = sys.argv[1]
output_directory = sys.argv[2]
run(mimic3_database_directory, output_directory)
| 45.326829 | 117 | 0.646255 |
9c02901134141c5a7b9e62c7a8aafccc891c6cf3 | 161 | py | Python | mlflow/R/mlflow/inst/examples/python/simple/simple.py | parkerzf/mlflow | 58f70d522d439ab26f777dbd32de77f79c0235bc | [
"Apache-2.0"
] | 1 | 2020-10-02T08:10:13.000Z | 2020-10-02T08:10:13.000Z | mlflow/R/mlflow/inst/examples/python/simple/simple.py | parkerzf/mlflow | 58f70d522d439ab26f777dbd32de77f79c0235bc | [
"Apache-2.0"
] | null | null | null | mlflow/R/mlflow/inst/examples/python/simple/simple.py | parkerzf/mlflow | 58f70d522d439ab26f777dbd32de77f79c0235bc | [
"Apache-2.0"
] | null | null | null | import os
import mlflow
if __name__ == "__main__":
with mlflow.start_run():
mlflow.log_param("parameter", 5)
mlflow.log_metric("metric", 0)
| 20.125 | 40 | 0.652174 |
1710c5bab0ebcb8449c4996da0a480276eae11f8 | 99 | py | Python | intro/introduction/apps.py | cs-fullstack-2019-fall/django-intro1-cw-marcus110379 | 3a95df8732d9032d70929545658a97ac52d81568 | [
"Apache-2.0"
] | null | null | null | intro/introduction/apps.py | cs-fullstack-2019-fall/django-intro1-cw-marcus110379 | 3a95df8732d9032d70929545658a97ac52d81568 | [
"Apache-2.0"
] | 5 | 2021-03-18T23:39:07.000Z | 2021-09-22T18:33:54.000Z | apps/introduction/apps.py | ad-free/lab-friends | a91d69b0ab60d78b5202e9d682f6dafd689963c4 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class IntroductionConfig(AppConfig):
name = 'introduction'
| 16.5 | 36 | 0.777778 |
d152d50163f31708efcb02d6f0aad9f9b1551428 | 15,585 | py | Python | appengine/chromium_rietveld/codereview/library.py | mcgreevy/chromium-infra | 09064105713603f7bf75c772e8354800a1bfa256 | [
"BSD-3-Clause"
] | 1 | 2018-01-02T05:47:07.000Z | 2018-01-02T05:47:07.000Z | appengine/chromium_rietveld/codereview/library.py | mcgreevy/chromium-infra | 09064105713603f7bf75c772e8354800a1bfa256 | [
"BSD-3-Clause"
] | null | null | null | appengine/chromium_rietveld/codereview/library.py | mcgreevy/chromium-infra | 09064105713603f7bf75c772e8354800a1bfa256 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Django template library for Rietveld."""
import cgi
import math
from xml.etree.ElementTree import Element, SubElement, tostring
from google.appengine.api import memcache
from google.appengine.api import users
import django.template
import django.utils.safestring
from django.core.urlresolvers import reverse
from codereview import auth_utils
from codereview import models
register = django.template.Library()
user_cache = {}
def get_links_for_users(user_emails):
"""Return a dictionary of email->link to user page and fill caches."""
link_dict = {}
remaining_emails = set(user_emails)
# initialize with email usernames
for email in remaining_emails:
nick = email.split('@', 1)[0]
link_dict[email] = cgi.escape(nick)
# look in the local cache
for email in remaining_emails:
if email in user_cache:
link_dict[email] = user_cache[email]
remaining_emails = remaining_emails - set(user_cache)
if not remaining_emails:
return link_dict
# then look in memcache
memcache_results = memcache.get_multi(remaining_emails,
key_prefix="show_user:")
for email in memcache_results:
link_dict[email] = memcache_results[email]
user_cache[email] = memcache_results[email]
remaining_emails = remaining_emails - set(memcache_results)
if not remaining_emails:
return link_dict
# and finally hit the datastore
accounts = models.Account.get_accounts_for_emails(remaining_emails)
for account in accounts:
if account and account.user_has_selected_nickname:
ret = ('<a href="%s" onMouseOver="M_showUserInfoPopup(this)">%s</a>' %
(reverse('codereview.views.show_user', args=[account.nickname]),
cgi.escape(account.nickname)))
link_dict[account.email] = ret
datastore_results = dict((e, link_dict[e]) for e in remaining_emails)
memcache.set_multi(datastore_results, 300, key_prefix='show_user:')
user_cache.update(datastore_results)
return link_dict
def get_link_for_user(email):
"""Get a link to a user's profile page."""
links = get_links_for_users([email])
return links[email]
def create_login_url(redirect):
"""Create a login url.
Basically just a wrapper around the built-in users.create_login_url,
except that it changes the 'passive' query parameter to force appengine
to display the account picker.
"""
return users.create_login_url(redirect).replace(
'passive=true', 'passive=false')
@register.filter
def show_user(email, arg=None, _autoescape=None, _memcache_results=None):
"""Render a link to the user's dashboard, with text being the nickname."""
if isinstance(email, users.User):
email = email.email()
if not arg:
user = auth_utils.get_current_user()
if user is not None and email == user.email():
return 'me'
ret = get_link_for_user(email)
return django.utils.safestring.mark_safe(ret)
@register.filter
def show_reviewers(reviewer_list, required_reviewers, arg=None):
"""Render list of links to each reviewer's dashboard with color."""
email_list = []
for reviewer, _approval in reviewer_list.items():
email = reviewer
if isinstance(email, users.User):
email = email.email()
email_list.append(email)
links = get_links_for_users(email_list)
if not arg:
user = auth_utils.get_current_user()
if user is not None:
links[user.email()] = 'me'
formatted_reviewers = [
format_approval_text(
models.format_reviewer(r, required_reviewers, links.get), a)
for r, a in reviewer_list.items()]
return django.utils.safestring.mark_safe(', '.join(formatted_reviewers))
def format_approval_text(text, approval):
if approval == None:
return text
if approval:
return "<span class='approval'>" + text + "</span>"
return "<span class='disapproval'>" + text + "</span>"
@register.filter
def show_users(email_list, arg=None):
"""Render list of links to each user's dashboard."""
new_email_list = []
for email in email_list:
if isinstance(email, users.User):
email = email.email()
new_email_list.append(email)
links = get_links_for_users(new_email_list)
if not arg:
user = auth_utils.get_current_user()
if user is not None:
links[user.email()] = 'me'
return django.utils.safestring.mark_safe(', '.join(
links[email] for email in email_list))
class UrlAppendViewSettingsNode(django.template.Node):
"""Django template tag that appends context, column_width, tab_spaces params.
This tag should be used after any URL that requires view settings.
Example:
<a href='{%url /foo%}{%urlappend_view_settings%}'>
The tag tries to get the current column width and context from the
template context and if they're present it returns '?param1¶m2'
otherwise it returns an empty string.
"""
def __init__(self):
super(UrlAppendViewSettingsNode, self).__init__()
self.view_context = django.template.Variable('context')
self.view_colwidth = django.template.Variable('column_width')
self.view_tabspaces = django.template.Variable('tab_spaces')
def render(self, context):
"""Returns a HTML fragment."""
url_params = []
current_context = -1
try:
current_context = self.view_context.resolve(context)
except django.template.VariableDoesNotExist:
pass
if current_context is None:
url_params.append('context=')
elif isinstance(current_context, int) and current_context > 0:
url_params.append('context=%d' % current_context)
current_colwidth = None
try:
current_colwidth = self.view_colwidth.resolve(context)
except django.template.VariableDoesNotExist:
pass
if current_colwidth is not None:
url_params.append('column_width=%d' % current_colwidth)
current_tabspaces = None
try:
current_tabspaces = self.view_tabspaces.resolve(context)
except django.template.VariableDoesNotExist:
pass
if current_tabspaces is not None:
url_params.append('tab_spaces=%d' % current_tabspaces)
if url_params:
return '?%s' % '&'.join(url_params)
return ''
@register.tag
def urlappend_view_settings(_parser, _token):
"""The actual template tag."""
return UrlAppendViewSettingsNode()
def get_nickname(email, never_me=False, request=None):
"""Return a nickname for an email address.
If 'never_me' is True, 'me' is not returned if 'email' belongs to the
current logged in user. If 'request' is a HttpRequest, it is used to
cache the nickname returned by models.Account.get_nickname_for_email().
"""
if isinstance(email, users.User):
email = email.email()
if not never_me:
if request is not None:
user = request.user
else:
user = auth_utils.get_current_user()
if user is not None and email == user.email():
return 'me'
if request is None:
return models.Account.get_nickname_for_email(email)
# _nicknames is injected into request as a cache.
# TODO(maruel): Use memcache instead.
# Access to a protected member _nicknames of a client class
# pylint: disable=W0212
if getattr(request, '_nicknames', None) is None:
request._nicknames = {}
if email in request._nicknames:
return request._nicknames[email]
result = models.Account.get_nickname_for_email(email)
request._nicknames[email] = result
return result
class CategoriesNode(django.template.Node):
"""Renders divs for categories and their builders.
Renders divs for categories which are hidden by default. Expanding the top
level categories displays their subcategories. Expanding the subcategories
displays its builders as checkboxes.
If no subcategories are specified in categories_to_builders then expanding
the top level categories displays its builders as checkboxes.
Example usage:
{% output_categories_and_builders default_builders.items %}
"""
def __init__(self, tryserver, categories_to_builders):
"""Constructor.
'categories_to_builders' is the name of the template variable that holds a
dictionary of full category names to their builders. If the full category
name contains a '|' as a separator then the first part is considered to be
the top level category and everything afterwards is considered to be the
subcategory.
"""
super(CategoriesNode, self).__init__()
self.tryserver = django.template.Variable(tryserver)
self.categories_to_builders = django.template.Variable(
categories_to_builders)
def render(self, context):
try:
tryserver = self.tryserver.resolve(context)
categories_to_builders = self.categories_to_builders.resolve(context)
except django.template.VariableDoesNotExist:
return ''
# Dictionary for quick lookup of top level categories.
top_level_categories = {}
# Top level root element to add top level and sub categories to.
root_elem = Element('a')
for full_category, builders in sorted(categories_to_builders):
categories = full_category.split('|')
top_level_category = categories[0]
if not top_level_categories.get(top_level_category):
top_level_categories[top_level_category] = 1
# This is the first time encountering this top level category create its
# anchor and div.
triangle_anchor_attrib = {
'id': '%s-%s-builders-pointer' % (tryserver, top_level_category),
'href': "javascript:M_toggleSection('%s-%s-builders')" % (
tryserver, top_level_category),
'class': 'toggled-section closedtriangle'
}
triangle_anchor_elem = SubElement(
parent=root_elem,
tag='a',
attrib=triangle_anchor_attrib)
triangle_anchor_elem.text = top_level_category
top_level_cat_div_elem = SubElement(
parent=root_elem,
tag='div',
id='%s-%s-builders' % (tryserver, top_level_category),
style='display:none')
SubElement(parent=root_elem, tag='br')
sub_category = categories[1] if len(categories) > 1 else None
if sub_category:
indent_anchor_elem = SubElement(
parent=top_level_cat_div_elem,
tag='a',
style='padding-left:2em')
triangle_anchor_attrib = {
'id': '%s-%s-builders-pointer' % (tryserver, full_category),
'href': "javascript:M_toggleSection('%s-%s-builders')" % (
tryserver, full_category),
'class': 'toggled-section closedtriangle',
}
triangle_anchor_elem = SubElement(
parent=indent_anchor_elem,
tag='a',
attrib=triangle_anchor_attrib)
triangle_anchor_elem.text = sub_category
sub_cat_div_elem = SubElement(
parent=indent_anchor_elem,
tag='div',
id='%s-%s-builders' % (tryserver, full_category),
style='display:none')
for builder in sorted(builders):
builder_div_attrib = {
'class': 'trybot-popup-input',
'style': 'padding-left:2em',
}
if sub_category:
parent = sub_cat_div_elem
else:
parent = top_level_cat_div_elem
builder_div_elem = SubElement(
parent=parent,
tag='div',
attrib=builder_div_attrib)
builder_checkbox_elem = SubElement(
parent=builder_div_elem,
tag='input',
type='checkbox',
name='%s:%s' % (tryserver, builder),
id='cb_%s_%s' % (tryserver, builder),
checked='checked')
builder_checkbox_elem.text = builder
SubElement(parent=top_level_cat_div_elem, tag='br')
return tostring(root_elem, method='html')
class NicknameNode(django.template.Node):
"""Renders a nickname for a given email address.
The return value is cached if a HttpRequest is available in a
'request' template variable.
The template tag accepts one or two arguments. The first argument is
the template variable for the email address. If the optional second
argument evaluates to True, 'me' as nickname is never rendered.
Example usage:
{% cached_nickname msg.sender %}
{% cached_nickname msg.sender True %}
"""
def __init__(self, email_address, never_me=''):
"""Constructor.
'email_address' is the name of the template variable that holds an
email address. If 'never_me' evaluates to True, 'me' won't be returned.
"""
super(NicknameNode, self).__init__()
self.email_address = django.template.Variable(email_address)
self.never_me = bool(never_me.strip())
self.is_multi = False
def render(self, context):
try:
email = self.email_address.resolve(context)
except django.template.VariableDoesNotExist:
return ''
request = context.get('request')
if self.is_multi:
return ', '.join(get_nickname(e, self.never_me, request) for e in email)
return get_nickname(email, self.never_me, request)
@register.tag
def nickname(_parser, token):
"""Almost the same as nickname filter but the result is cached."""
try:
_, email_address, never_me = token.split_contents()
except ValueError:
try:
_, email_address = token.split_contents()
never_me = ''
except ValueError:
raise django.template.TemplateSyntaxError(
"%r requires exactly one or two arguments" % token.contents.split()[0])
return NicknameNode(email_address, never_me)
@register.tag
def nicknames(parser, token):
"""Wrapper for nickname tag with is_multi flag enabled."""
node = nickname(parser, token)
node.is_multi = True
return node
@register.tag
def output_categories_and_builders(_parser, token):
"""Returns the complete category and builders structure."""
_, tryserver, categories_to_builders = token.split_contents()
return CategoriesNode(tryserver, categories_to_builders)
@register.filter
def num_drafts(issue, user):
"""Returns number of drafts for given user.
:param issue: an Issue instance.
:param user: an User instance or None.
:returns: Drafts for given object.
"""
return issue.get_num_drafts(user)
@register.filter
def format_duration(seconds):
"""Convert a number of seconds into human readable compact string."""
if not seconds:
return seconds
seconds = int(seconds)
prefix = ''
if seconds < 0:
prefix = '-'
seconds = -seconds
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
hours = math.floor(minutes / 60)
minutes -= hours * 60
days = math.floor(hours / 24)
hours -= days * 24
out = []
if days > 0:
out.append('%dd' % days)
if hours > 0 or days > 0:
out.append('%02dh' % hours)
if minutes > 0 or hours > 0 or days > 0:
out.append('%02dm' % minutes)
if seconds > 0 and not out:
# Skip seconds unless there's only seconds.
out.append('%02ds' % seconds)
return prefix + ''.join(out).lstrip('0')
@register.filter
def sort(value):
return sorted(value)
| 32.002053 | 80 | 0.694386 |
0e86cf1f1508d7dd02762f7bd8eb0294903cf738 | 2,698 | py | Python | route/give_admin.py | lsh23/openNAMU | 18f780afe5e81ef1f347fe556b6fd98bb4914a53 | [
"BSD-3-Clause"
] | null | null | null | route/give_admin.py | lsh23/openNAMU | 18f780afe5e81ef1f347fe556b6fd98bb4914a53 | [
"BSD-3-Clause"
] | null | null | null | route/give_admin.py | lsh23/openNAMU | 18f780afe5e81ef1f347fe556b6fd98bb4914a53 | [
"BSD-3-Clause"
] | null | null | null | from .tool.func import *
def give_admin_2(conn, name):
curs = conn.cursor()
owner = admin_check()
curs.execute(db_change("select acl from user where id = ?"), [name])
user = curs.fetchall()
if not user:
return re_error('/error/2')
else:
if owner != 1:
curs.execute(db_change('select name from alist where name = ? and acl = "owner"'), [user[0][0]])
if curs.fetchall():
return re_error('/error/3')
if ip_check() == name:
return re_error('/error/3')
if flask.request.method == 'POST':
if admin_check(7, 'admin (' + name + ')') != 1:
return re_error('/error/3')
if owner != 1:
curs.execute(db_change('select name from alist where name = ? and acl = "owner"'), [flask.request.form.get('select', None)])
if curs.fetchall():
return re_error('/error/3')
if flask.request.form.get('select', None) == 'X':
curs.execute(db_change("update user set acl = 'user' where id = ?"), [name])
else:
curs.execute(db_change("update user set acl = ? where id = ?"), [flask.request.form.get('select', None), name])
conn.commit()
return redirect('/admin/' + url_pas(name))
else:
if admin_check(7) != 1:
return re_error('/error/3')
div = '<option value="X">X</option>'
curs.execute(db_change('select distinct name from alist order by name asc'))
for data in curs.fetchall():
if user[0][0] == data[0]:
div += '<option value="' + data[0] + '" selected="selected">' + data[0] + '</option>'
else:
if owner != 1:
curs.execute(db_change('select name from alist where name = ? and acl = "owner"'), [data[0]])
if not curs.fetchall():
div += '<option value="' + data[0] + '">' + data[0] + '</option>'
else:
div += '<option value="' + data[0] + '">' + data[0] + '</option>'
return easy_minify(flask.render_template(skin_check(),
imp = [name, wiki_set(), custom(), other2([' (' + load_lang('authorize') + ')', 0])],
data = '''
<form method="post">
<select name="select">''' + div + '''</select>
<hr class=\"main_hr\">
<button type="submit">''' + load_lang('save') + '''</button>
</form>
''',
menu = [['manager', load_lang('return')]]
)) | 40.878788 | 136 | 0.475908 |
90244c77a85be4beb9cdd5190bce19ed1aff602a | 238 | py | Python | usuarios/models.py | ShadowsS01/Cursos-Django | 40ca002d7de34d2d4e192819801e99b62938f1f2 | [
"MIT"
] | null | null | null | usuarios/models.py | ShadowsS01/Cursos-Django | 40ca002d7de34d2d4e192819801e99b62938f1f2 | [
"MIT"
] | null | null | null | usuarios/models.py | ShadowsS01/Cursos-Django | 40ca002d7de34d2d4e192819801e99b62938f1f2 | [
"MIT"
] | null | null | null | from django.db import models
class Usuario(models.Model):
nome = models.CharField(max_length = 50)
email = models.EmailField()
senha = models.CharField(max_length = 64)
def __str__(self) -> str:
return self.nome
| 23.8 | 45 | 0.680672 |
ba27eea6c51bc0d0221743751c085b8cdeae49a5 | 100,279 | py | Python | numpy/lib/tests/test_io.py | PhanatosZou/numpy | ee7fcb790e36d940d4fde69bd1cad5048008064b | [
"BSD-3-Clause"
] | 1 | 2020-01-09T03:37:16.000Z | 2020-01-09T03:37:16.000Z | numpy/lib/tests/test_io.py | PhanatosZou/numpy | ee7fcb790e36d940d4fde69bd1cad5048008064b | [
"BSD-3-Clause"
] | null | null | null | numpy/lib/tests/test_io.py | PhanatosZou/numpy | ee7fcb790e36d940d4fde69bd1cad5048008064b | [
"BSD-3-Clause"
] | null | null | null | import sys
import gzip
import os
import threading
import time
import warnings
import io
import re
import pytest
from tempfile import NamedTemporaryFile
from io import BytesIO, StringIO
from datetime import datetime
import locale
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import ConverterError, ConversionWarning
from numpy.compat import asbytes, bytes, Path
from numpy.ma.testutils import assert_equal
from numpy.testing import (
assert_warns, assert_, assert_raises_regex, assert_raises,
assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, assert_no_warnings
)
from numpy.testing._private.utils import requires_memory
class TextIO(BytesIO):
"""Helper IO class.
Writes encode strings to bytes if needed, reads return bytes.
This makes it easier to emulate files opened in binary mode
without needing to explicitly convert strings to bytes in
setting up the test data.
"""
def __init__(self, s=""):
BytesIO.__init__(self, asbytes(s))
def write(self, s):
BytesIO.write(self, asbytes(s))
def writelines(self, lines):
BytesIO.writelines(self, [asbytes(s) for s in lines])
MAJVER, MINVER = sys.version_info[:2]
IS_64BIT = sys.maxsize > 2**32
try:
import bz2
HAS_BZ2 = True
except ImportError:
HAS_BZ2 = False
try:
import lzma
HAS_LZMA = True
except ImportError:
HAS_LZMA = False
def strptime(s, fmt=None):
"""
This function is available in the datetime module only from Python >=
2.5.
"""
if type(s) == bytes:
s = s.decode("latin1")
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest:
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {"allow_pickle": True})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
target_file = NamedTemporaryFile(delete=False)
load_file = target_file.name
else:
target_file = BytesIO()
load_file = target_file
try:
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
finally:
if not isinstance(target_file, BytesIO):
target_file.close()
# holds an open file descriptor so it can't be deleted on win
if 'arr_reloaded' in locals():
if not isinstance(arr_reloaded, np.lib.npyio.NpzFile):
os.remove(target_file.name)
def check_roundtrips(self, a):
self.roundtrip(a)
self.roundtrip(a, file_on_disk=True)
self.roundtrip(np.asfortranarray(a))
self.roundtrip(np.asfortranarray(a), file_on_disk=True)
if a.shape[0] > 1:
# neither C nor Fortran contiguous for 2D arrays or more
self.roundtrip(np.asfortranarray(a)[1:])
self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True)
def test_array(self):
a = np.array([], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], int)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.check_roundtrips(a)
def test_array_object(self):
a = np.array([], object)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], object)
self.check_roundtrips(a)
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
a = np.asfortranarray([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.check_roundtrips(a)
@pytest.mark.slow
def test_format_2_0(self):
dt = [(("%d" % i) * 100, float) for i in range(500)]
a = np.ones(1000, dtype=dt)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', UserWarning)
self.check_roundtrips(a)
class TestSaveLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype)
assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc)
class TestSavezLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
try:
for n, arr in enumerate(self.arr):
reloaded = self.arr_reloaded['arr_%d' % n]
assert_equal(arr, reloaded)
assert_equal(arr.dtype, reloaded.dtype)
assert_equal(arr.flags.fnc, reloaded.flags.fnc)
finally:
# delete tempfile, must be done here on windows
if self.arr_reloaded.fid:
self.arr_reloaded.fid.close()
os.remove(self.arr_reloaded.fid.name)
@pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")
@pytest.mark.slow
def test_big_arrays(self):
L = (1 << 31) + 100000
a = np.empty(L, dtype=np.uint8)
with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp:
np.savez(tmp, a=a)
del a
npfile = np.load(tmp)
a = npfile['a'] # Should succeed
npfile.close()
del a # Avoid pyflakes unused variable warning.
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a, b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
def test_BagObj(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(sorted(dir(l.f)), ['file_a','file_b'])
assert_equal(a, l.f.file_a)
assert_equal(b, l.f.file_b)
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
def writer(error_list):
with temppath(suffix='.npz') as tmp:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError as err:
error_list.append(err)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
for j in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
if errors:
raise AssertionError(errors)
def test_not_closing_opened_fid(self):
# Test that issue #2178 is fixed:
# verify could seek on 'loaded' file
with temppath(suffix='.npz') as tmp:
with open(tmp, 'wb') as fp:
np.savez(fp, data='LOVELY LOAD')
with open(tmp, 'rb', 10000) as fp:
fp.seek(0)
assert_(not fp.closed)
np.load(fp)['data']
# fp must not get closed by .load
assert_(not fp.closed)
fp.seek(0)
assert_(not fp.closed)
#FIXME: Is this still true?
@pytest.mark.skipif(IS_PYPY, reason="Missing context manager on PyPy")
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
with temppath(suffix='.npz') as tmp:
np.savez(tmp, data='LOVELY LOAD')
# We need to check if the garbage collector can properly close
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
# collector, so we catch the warnings. Because ResourceWarning
# is unknown in Python < 3.x, we take the easy way out and
# catch all warnings.
with suppress_warnings() as sup:
sup.filter(Warning) # TODO: specify exact message
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
def test_closing_zipfile_after_load(self):
# Check that zipfile owns file and can close it. This needs to
# pass a file name to load for the test. On windows failure will
# cause a second error will be raised when the attempt to remove
# the open file is made.
prefix = 'numpy_test_closing_zipfile_after_load_'
with temppath(suffix='.npz', prefix=prefix) as tmp:
np.savez(tmp, lab='place holder')
data = np.load(tmp)
fp = data.zip.fp
data.close()
assert_(fp.closed)
class TestSaveTxt:
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = BytesIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
[asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
a = np.array([[1, 2], [3, 4]], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
def test_0D_3D(self):
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, np.array(1))
assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]]))
def test_structured(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_structured_padded(self):
# gh-13297
a = np.array([(1, 2, 3),(4, 5, 6)], dtype=[
('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4')
])
c = BytesIO()
np.savetxt(c, a[['foo', 'baz']], fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 3\n', b'4 6\n'])
@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
def test_multifield_view(self):
a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')])
v = a[['x', 'z']]
with temppath(suffix='.npy') as path:
path = Path(path)
np.save(path, v)
data = np.load(path)
assert_array_equal(data, v)
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = BytesIO()
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overridden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Bad fmt, should raise a ValueError
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, a, fmt=99)
def test_header_footer(self):
# Test the functionality of the header and footer keyword argument.
c = BytesIO()
a = np.array([(1, 2), (3, 4)], dtype=int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
# Test the footer keyword argument
c = BytesIO()
np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
# Test the commentstr keyword argument used on the header
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
header=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
# Test the commentstr keyword argument used on the footer
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
footer=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
def test_file_roundtrip(self):
with temppath() as name:
a = np.array([(1, 2), (3, 4)])
np.savetxt(name, a)
b = np.loadtxt(name)
assert_array_equal(a, b)
def test_complex_arrays(self):
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re + 1.0j * im
# One format only
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
# One format for each real and imaginary part
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
# One format for each complex number
c = BytesIO()
np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
def test_complex_negative_exponent(self):
# Previous to 1.15, some formats generated x+-yj, gh 7895
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re - 1.0j * im
c = BytesIO()
np.savetxt(c, a, fmt='%.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n',
b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n'])
def test_custom_writer(self):
class CustomWriter(list):
def write(self, text):
self.extend(text.split(b'\n'))
w = CustomWriter()
a = np.array([(1, 2), (3, 4)])
np.savetxt(w, a)
b = np.loadtxt(w)
assert_array_equal(a, b)
def test_unicode(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
with tempdir() as tmpdir:
# set encoding as on windows it may not be unicode even on py3
np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'],
encoding='UTF-8')
def test_unicode_roundtrip(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
# our gz wrapper support encoding
suffixes = ['', '.gz']
# stdlib 2 versions do not support encoding
if MAJVER > 2:
if HAS_BZ2:
suffixes.append('.bz2')
if HAS_LZMA:
suffixes.extend(['.xz', '.lzma'])
with tempdir() as tmpdir:
for suffix in suffixes:
np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a,
fmt=['%s'], encoding='UTF-16-LE')
b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix),
encoding='UTF-16-LE', dtype=np.unicode_)
assert_array_equal(a, b)
def test_unicode_bytestream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
s = BytesIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
assert_equal(s.read().decode('UTF-8'), utf8 + '\n')
def test_unicode_stringstream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
s = StringIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
assert_equal(s.read(), utf8 + '\n')
@pytest.mark.parametrize("fmt", [u"%f", b"%f"])
@pytest.mark.parametrize("iotype", [StringIO, BytesIO])
def test_unicode_and_bytes_fmt(self, fmt, iotype):
# string type of fmt should not matter, see also gh-4053
a = np.array([1.])
s = iotype()
np.savetxt(s, a, fmt=fmt)
s.seek(0)
if iotype is StringIO:
assert_equal(s.read(), u"%f\n" % 1.)
else:
assert_equal(s.read(), b"%f\n" % 1.)
@pytest.mark.skipif(sys.platform=='win32',
reason="large files cause problems")
@pytest.mark.slow
@requires_memory(free_bytes=7e9)
def test_large_zip(self):
# The test takes at least 6GB of memory, writes a file larger than 4GB
test_data = np.asarray([np.random.rand(np.random.randint(50,100),4)
for i in range(800000)])
with tempdir() as tmpdir:
np.savez(os.path.join(tmpdir, 'test.npz'), test_data=test_data)
class LoadTxtBase:
def check_compressed(self, fopen, suffixes):
# Test that we can load data from a compressed file
wanted = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
for suffix in suffixes:
with temppath(suffix=suffix) as name:
with fopen(name, mode='wt', encoding='UTF-32-LE') as f:
f.write(data)
res = self.loadfunc(name, encoding='UTF-32-LE')
assert_array_equal(res, wanted)
with fopen(name, "rt", encoding='UTF-32-LE') as f:
res = self.loadfunc(f)
assert_array_equal(res, wanted)
# Python2 .open does not support encoding
@pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_gzip(self):
self.check_compressed(gzip.open, ('.gz',))
@pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2")
@pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_bz2(self):
self.check_compressed(bz2.open, ('.bz2',))
@pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma")
@pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
def test_compressed_lzma(self):
self.check_compressed(lzma.open, ('.xz', '.lzma'))
def test_encoding(self):
with temppath() as path:
with open(path, "wb") as f:
f.write('0.\n1.\n2.'.encode("UTF-16"))
x = self.loadfunc(path, encoding="UTF-16")
assert_array_equal(x, [0., 1., 2.])
def test_stringload(self):
# umlaute
nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8")
with temppath() as path:
with open(path, "wb") as f:
f.write(nonascii.encode("UTF-16"))
x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode_)
assert_array_equal(x, nonascii)
def test_binary_decode(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
v = self.loadfunc(BytesIO(utf16), dtype=np.unicode_, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_converters_decode(self):
# test converters that decode strings
c = TextIO()
c.write(b'\xcf\x96')
c.seek(0)
x = self.loadfunc(c, dtype=np.unicode_,
converters={0: lambda x: x.decode('UTF-8')})
a = np.array([b'\xcf\x96'.decode('UTF-8')])
assert_array_equal(x, a)
def test_converters_nodecode(self):
# test native string converters enabled by setting an encoding
utf8 = b'\xcf\x96'.decode('UTF-8')
with temppath() as path:
with io.open(path, 'wt', encoding='UTF-8') as f:
f.write(utf8)
x = self.loadfunc(path, dtype=np.unicode_,
converters={0: lambda x: x + 't'},
encoding='UTF-8')
a = np.array([utf8 + 't'])
assert_array_equal(x, a)
class TestLoadTxt(LoadTxtBase):
loadfunc = staticmethod(np.loadtxt)
def setup(self):
# lower chunksize for testing
self.orig_chunk = np.lib.npyio._loadtxt_chunksize
np.lib.npyio._loadtxt_chunksize = 1
def teardown(self):
np.lib.npyio._loadtxt_chunksize = self.orig_chunk
def test_record(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = TextIO()
d.write('M 64.0 75.0\nF 25.0 60.0')
d.seek(0)
mydescriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = TextIO()
c.write('1\n2\n3\n4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = TextIO()
c.write('1,2,3,4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = TextIO()
c.write('1,2,3,,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = TextIO()
c.write('1,2,3,,5\n6,7,8,9,10\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments_unicode(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=u'#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_comments_byte(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=b'#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_comments_multiple(self):
c = TextIO()
c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=['#', '@', '//'])
a = np.array([[1, 2, 3], [4, 5, 6]], int)
assert_array_equal(x, a)
def test_comments_multi_chars(self):
c = TextIO()
c.write('/* comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments='/*')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
# Check that '/*' is not transformed to ['/', '*']
c = TextIO()
c.write('*/ comment\n1,2,3,5\n')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',',
comments='/*')
def test_skiprows(self):
c = TextIO()
c.write('comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:, 1])
a = np.array([[1, 2, 3], [3, 4, 5]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Testing with an integer instead of a sequence
for int_type in [int, np.int8, np.int16,
np.int32, np.int64, np.uint8, np.uint16,
np.uint32, np.uint64]:
to_read = int_type(1)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=to_read)
assert_array_equal(x, a[:, 1])
# Testing with some crazy custom integer type
class CrazyInt:
def __index__(self):
return 1
crazy_int = CrazyInt()
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=crazy_int)
assert_array_equal(x, a[:, 1])
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(crazy_int,))
assert_array_equal(x, a[:, 1])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = TextIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(arr['stid'], [b"JOE", b"BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
# Testing non-ints in usecols
c.seek(0)
bogus_idx = 1.5
assert_raises_regex(
TypeError,
'^usecols must be.*%s' % type(bogus_idx),
np.loadtxt, c, usecols=bogus_idx
)
assert_raises_regex(
TypeError,
'^usecols must be.*%s' % type(bogus_idx),
np.loadtxt, c, usecols=[0, bogus_idx, 0]
)
def test_fancy_dtype(self):
c = TextIO()
c.write('1,2,3.0\n4,5,6.0\n')
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_3d_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0,
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])],
dtype=dt)
assert_array_equal(x, a)
def test_str_dtype(self):
# see gh-8033
c = ["str1", "str2"]
for dt in (str, np.bytes_):
a = np.array(["str1", "str2"], dtype=dt)
x = np.loadtxt(c, dtype=dt)
assert_array_equal(x, a)
def test_empty_file(self):
with suppress_warnings() as sup:
sup.filter(message="loadtxt: Empty input file:")
c = TextIO()
x = np.loadtxt(c)
assert_equal(x.shape, (0,))
x = np.loadtxt(c, dtype=np.int64)
assert_equal(x.shape, (0,))
assert_(x.dtype == np.int64)
def test_unused_converter(self):
c = TextIO()
c.writelines(['1 21\n', '3 42\n'])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
def test_dtype_with_object(self):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
def test_uint64_type(self):
tgt = (9223372043271415339, 9223372043271415853)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.uint64)
assert_equal(res, tgt)
def test_int64_type(self):
tgt = (-9223372036854775807, 9223372036854775807)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.int64)
assert_equal(res, tgt)
def test_from_float_hex(self):
# IEEE doubles and floats only, otherwise the float32
# conversion may fail.
tgt = np.logspace(-10, 10, 5).astype(np.float32)
tgt = np.hstack((tgt, -tgt)).astype(float)
inp = '\n'.join(map(float.hex, tgt))
c = TextIO()
c.write(inp)
for dt in [float, np.float32]:
c.seek(0)
res = np.loadtxt(c, dtype=dt)
assert_equal(res, tgt, err_msg="%s" % dt)
def test_from_complex(self):
tgt = (complex(1, 1), complex(1, -1))
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=complex)
assert_equal(res, tgt)
def test_complex_misformatted(self):
# test for backward compatibility
# some complex formats used to generate x+-yj
a = np.zeros((2, 2), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re - 1.0j * im
c = BytesIO()
np.savetxt(c, a, fmt='%.16e')
c.seek(0)
txt = c.read()
c.seek(0)
# misformat the sign on the imaginary part, gh 7895
txt_bad = txt.replace(b'e+00-', b'e00+-')
assert_(txt_bad != txt)
c.write(txt_bad)
c.seek(0)
res = np.loadtxt(c, dtype=complex)
assert_equal(res, a)
def test_universal_newline(self):
with temppath() as name:
with open(name, 'w') as f:
f.write('1 21\r3 42\r')
data = np.loadtxt(name)
assert_array_equal(data, [[1, 21], [3, 42]])
def test_empty_field_after_tab(self):
c = TextIO()
c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')
c.seek(0)
dt = {'names': ('x', 'y', 'z', 'comment'),
'formats': ('<i4', '<i4', '<f4', '|S8')}
x = np.loadtxt(c, dtype=dt, delimiter='\t')
a = np.array([b'start ', b' ', b''])
assert_array_equal(x['comment'], a)
def test_structure_unpack(self):
txt = TextIO("M 21 72\nF 35 58")
dt = {'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
assert_(a.dtype.str == '|S1')
assert_(b.dtype.str == '<i4')
assert_(c.dtype.str == '<f4')
assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([72., 58.]))
def test_ndmin_keyword(self):
c = TextIO()
c.write('1,2,3\n4,5,6')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=3)
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=1.5)
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
a = np.array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(x, a)
d = TextIO()
d.write('0,1,2')
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (1, 3))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
e = TextIO()
e.write('0\n1\n2')
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (3, 1))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
# Test ndmin kw with empty file.
with suppress_warnings() as sup:
sup.filter(message="loadtxt: Empty input file:")
f = TextIO()
assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
assert_(np.loadtxt(f, ndmin=1).shape == (0,))
def test_generator_source(self):
def count():
for i in range(10):
yield "%d" % i
res = np.loadtxt(count())
assert_array_equal(res, np.arange(10))
def test_bad_line(self):
c = TextIO()
c.write('1 2 3\n4 5 6\n2 3')
c.seek(0)
# Check for exception and that exception contains line number
assert_raises_regex(ValueError, "3", np.loadtxt, c)
def test_none_as_string(self):
# gh-5155, None should work as string when format demands it
c = TextIO()
c.write('100,foo,200\n300,None,400')
c.seek(0)
dt = np.dtype([('x', int), ('a', 'S10'), ('y', int)])
np.loadtxt(c, delimiter=',', dtype=dt, comments=None) # Should succeed
@pytest.mark.skipif(locale.getpreferredencoding() == 'ANSI_X3.4-1968',
reason="Wrong preferred encoding")
def test_binary_load(self):
butf8 = b"5,6,7,\xc3\x95scarscar\n\r15,2,3,hello\n\r"\
b"20,2,3,\xc3\x95scar\n\r"
sutf8 = butf8.decode("UTF-8").replace("\r", "").splitlines()
with temppath() as path:
with open(path, "wb") as f:
f.write(butf8)
with open(path, "rb") as f:
x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode_)
assert_array_equal(x, sutf8)
# test broken latin1 conversion people now rely on
with open(path, "rb") as f:
x = np.loadtxt(f, encoding="UTF-8", dtype="S")
x = [b'5,6,7,\xc3\x95scarscar', b'15,2,3,hello', b'20,2,3,\xc3\x95scar']
assert_array_equal(x, np.array(x, dtype="S"))
def test_max_rows(self):
c = TextIO()
c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
max_rows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_max_rows_with_skiprows(self):
c = TextIO()
c.write('comments\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=2)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
assert_array_equal(x, a)
def test_max_rows_with_read_continuation(self):
c = TextIO()
c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
max_rows=2)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
assert_array_equal(x, a)
# test continuation
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([2,1,4,5], int)
assert_array_equal(x, a)
def test_max_rows_larger(self):
#test max_rows > num rows
c = TextIO()
c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=6)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int)
assert_array_equal(x, a)
class Testfromregex:
def test_record(self):
c = TextIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
def test_record_unicode(self):
utf8 = b'\xcf\x96'
with temppath() as path:
with open(path, 'wb') as f:
f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux')
dt = [('num', np.float64), ('val', 'U4')]
x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8')
a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'),
(4.444, 'qux')], dtype=dt)
assert_array_equal(x, a)
regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE)
x = np.fromregex(path, regexp, dt, encoding='UTF-8')
assert_array_equal(x, a)
def test_compiled_bytes(self):
regexp = re.compile(b'(\\d)')
c = BytesIO(b'123')
dt = [('num', np.float64)]
a = np.array([1, 2, 3], dtype=dt)
x = np.fromregex(c, regexp, dt)
assert_array_equal(x, a)
#####--------------------------------------------------------------------------
class TestFromTxt(LoadTxtBase):
loadfunc = staticmethod(np.genfromtxt)
def test_record(self):
# Test w/ explicit dtype
data = TextIO('1 2\n3 4')
test = np.genfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = TextIO('M 64.0 75.0\nF 25.0 60.0')
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.genfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
# Test outputting a standard ndarray
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.genfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1, 2], [3, 4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
# Test squeezing to 1D
control = np.array([1, 2, 3, 4], int)
#
data = TextIO('1\n2\n3\n4\n')
test = np.genfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = TextIO('1,2,3,4\n')
test = np.genfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
# Test the stripping of comments
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = TextIO('# comment\n1,2,3,5\n')
test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = TextIO('1,2,3,5# comment\n')
test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
# Test row skipping
control = np.array([1, 2, 3, 5], int)
kwargs = dict(dtype=int, delimiter=',')
#
data = TextIO('comment\n1,2,3,5\n')
test = np.genfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = TextIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
def test_skip_footer(self):
data = ["# %i" % i for i in range(1, 6)]
data.append("A, B, C")
data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
data[-1] = "99,99"
kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
with suppress_warnings() as sup:
sup.filter(ConversionWarning)
basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
TextIO(basestr), skip_footer=1)
# except ValueError:
# pass
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
a = np.genfromtxt(TextIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
a = np.genfromtxt(
TextIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
def test_header(self):
# Test retrieving a header
data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, dtype=None, names=True)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
# Test the automatic definition of the output dtype
data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
np.array([True, False]), ]
assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
# Tests whether the output dtype can be uniformized
data = TextIO('1 2 3 4\n5 6 7 8\n')
test = np.genfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
def test_fancy_dtype(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.genfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
# Test overwriting the names of the dtype
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.genfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
# Check that names can be retrieved even if the line is commented out.
data = TextIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = TextIO(b"""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test, ctrl)
def test_names_and_comments_none(self):
# Tests case when names is true but comments is None (gh-10780)
data = TextIO('col1 col2\n 1 2\n 3 4')
test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True)
control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)])
assert_equal(test, control)
def test_file_is_closed_on_error(self):
# gh-13200
with tempdir() as tmpdir:
fpath = os.path.join(tmpdir, "test.csv")
with open(fpath, "wb") as f:
f.write(u'\N{GREEK PI SYMBOL}'.encode('utf8'))
# ResourceWarnings are emitted from a destructor, so won't be
# detected by regular propagation to errors.
with assert_no_warnings():
with pytest.raises(UnicodeDecodeError):
np.genfromtxt(fpath, encoding="ascii")
def test_autonames_and_usecols(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
# Test the combination user-defined converters and usecol
data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.genfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None,
converters={'C': lambda s: 2 * int(s)})
assert_(w[0].category is np.VisibleDeprecationWarning)
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
# Test the conversion to datetime.
converter = {
'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.genfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_converters_cornercases2(self):
# Test the conversion to datetime64.
converter = {
'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.genfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
# Test whether unused converters are forgotten
data = TextIO("1 21\n 3 42\n")
test = np.genfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.genfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_invalid_converter(self):
strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or
(b'r' not in x.lower() and x.strip() or 0.0))
strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or
(b'%' not in x.lower() and x.strip() or 0.0))
s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = dict(
converters={2: strip_per, 3: strip_rand}, delimiter=",",
dtype=None)
assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
def test_tricky_converter_bug1666(self):
# Test some corner cases
s = TextIO('q1,2\nq3,4')
cnv = lambda s: float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0: cnv})
control = np.array([[1., 2.], [3., 4.]])
assert_equal(test, control)
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.genfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.genfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_converters_and_usecols(self):
dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n"
dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3}
dtyp = [('e1','i4'),('e2','i4'),('e3','i2'),('n', 'i1')]
conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]}
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
names=None, converters=conv)
control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp)
assert_equal(test, control)
dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')]
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
usecols=(0,1,3), names=None, converters=conv)
control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp)
assert_equal(test, control)
def test_dtype_with_object(self):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
ndtype = [('nest', [('idx', int), ('code', object)])]
with assert_raises_regex(NotImplementedError,
'Nested fields.* not supported.*'):
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
# nested but empty fields also aren't supported
ndtype = [('idx', int), ('code', object), ('nest', [])]
with assert_raises_regex(NotImplementedError,
'Nested fields.* not supported.*'):
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
def test_userconverters_with_explicit_dtype(self):
# Test user_converters w/ explicit (standard) dtype
data = TextIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_utf8_userconverters_with_explicit_dtype(self):
utf8 = b'\xcf\x96'
with temppath() as path:
with open(path, 'wb') as f:
f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip')
test = np.genfromtxt(path, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: np.compat.unicode},
encoding='UTF-8')
control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)],
dtype=[('', '|U11'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
# Test space delimiter
data = TextIO("1 2 3 4 5\n6 7 8 9 10")
test = np.genfromtxt(data)
control = np.array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 10.]])
assert_equal(test, control)
def test_integer_delimiter(self):
# Test using an integer for delimiter
data = " 1 2 3\n 4 5 67\n890123 4"
test = np.genfromtxt(TextIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
data = TextIO('1,2,3,,5\n')
test = np.genfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_missing_with_tabs(self):
# Test w/ a delimiter tab
txt = "1\t2\t3\n\t2\t\n1\t\t3"
test = np.genfromtxt(TextIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
assert_equal(test.data, ctrl_d)
assert_equal(test.mask, ctrl_m)
def test_usecols(self):
# Test the selection of columns
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
# Test giving usecols with a comma-separated string
data = "1 2 3\n4 5 6"
test = np.genfromtxt(TextIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
# Test usecols with an explicit structured dtype
data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.genfromtxt(
data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(test['stid'], [b"JOE", b"BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
# Test usecols with an integer
test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
# Test usecols with named columns
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = dict(names="a, b, c")
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
# Test that an empty file raises the proper warning.
with suppress_warnings() as sup:
sup.filter(message="genfromtxt: Empty input file:")
data = TextIO()
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
# when skip_header > 0
test = np.genfromtxt(data, skip_header=1)
assert_equal(test, np.array([]))
def test_fancy_dtype_alt(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.genfromtxt(data, dtype=fancydtype, delimiter=',', usemask=True)
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.genfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_withmissing(self):
data = TextIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.genfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.genfromtxt(data, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', float), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.genfromtxt(TextIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
test = np.genfromtxt(TextIO(data),
missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
test = np.genfromtxt(TextIO(data),
missing_values={0: -9, 'B': -99, 'C': -999j},
usemask=True,
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_user_filling_values(self):
# Test with missing and filling values
ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
data = "N/A, 2, 3\n4, ,???"
kwargs = dict(delimiter=",",
dtype=int,
names="a,b,c",
missing_values={0: "N/A", 'b': " ", 2: "???"},
filling_values={0: 0, 'b': 0, 2: -999})
test = np.genfromtxt(TextIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
data2 = "1,2,*,4\n5,*,7,8\n"
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=0)
ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]])
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=-1)
ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]])
assert_equal(test, ctrl)
def test_withmissing_float(self):
data = TextIO('A,B\n0,1.5\n2,-999.00')
test = np.genfromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True, usemask=True)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
# Test masked column
data = TextIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
# Test masked column
data = TextIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0), (0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_invalid_raise(self):
# Test invalid raise
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
#
kwargs = dict(delimiter=",", dtype=None, names=True)
# XXX: is there a better way to get the return value of the
# callable in assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.genfromtxt(mdata, invalid_raise=False, **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.genfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
# Test invalid_raise with usecols
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True,
invalid_raise=False)
# XXX: is there a better way to get the return value of the
# callable in assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.genfromtxt(mdata, usecols=(0, 4), **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
mtest = np.genfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
assert_equal(mtest, control)
def test_inconsistent_dtype(self):
# Test inconsistent dtype
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = TextIO("\n".join(data))
converters = {4: lambda x: "(%s)" % x.decode()}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
def test_default_field_format(self):
# Test default format
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
assert_equal(mtest, ctrl)
def test_single_dtype_wo_names(self):
# Test single dtype w/o names
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
def test_single_dtype_w_explicit_names(self):
# Test single dtype w explicit names
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_single_dtype_w_implicit_names(self):
# Test single dtype w implicit names
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_easy_structured_dtype(self):
# Test easy structured dtype
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
assert_equal(mtest, ctrl)
def test_autostrip(self):
# Test autostrip
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
mtest = np.genfromtxt(TextIO(data), **kwargs)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
mtest = np.genfromtxt(TextIO(data), autostrip=True, **kwargs)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
def test_replace_space(self):
# Test the 'replace_space' option
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_replace_space_known_dtype(self):
# Test the 'replace_space' (and related) options when dtype != None
txt = "A.A, B (B), C:C\n1, 2, 3"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_incomplete_names(self):
# Test w/ incomplete names
data = "A,,C\n0,1,2\n3,4,5"
kwargs = dict(delimiter=",", names=True)
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
test = np.genfromtxt(TextIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
test = np.genfromtxt(TextIO(data), **kwargs)
def test_names_auto_completion(self):
# Make sure that names are properly completed
data = "1 2 3\n 4 5 6"
test = np.genfromtxt(TextIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
assert_equal(test, ctrl)
def test_names_with_usecols_bug1636(self):
# Make sure we pick up the right names w/ usecols
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
def test_fixed_width_names(self):
# Test fix-width w/ names
data = " A B C\n 0 1 2.3\n 45 67 9."
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
# Test missing values
data = b"1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values=-999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_comments_is_none(self):
# Github issue 329 (None was previously being converted to 'None').
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1], b'testNonetherestofthedata')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1], b' testNonetherestofthedata')
def test_latin1(self):
latin1 = b'\xf6\xfc\xf6'
norm = b"norm1,norm2,norm3\n"
enc = b"test1,testNonethe" + latin1 + b",test3\n"
s = norm + enc + norm
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1, 0], b"test1")
assert_equal(test[1, 1], b"testNonethe" + latin1)
assert_equal(test[1, 2], b"test3")
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',',
encoding='latin1')
assert_equal(test[1, 0], u"test1")
assert_equal(test[1, 1], u"testNonethe" + latin1.decode('latin1'))
assert_equal(test[1, 2], u"test3")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test['f0'], 0)
assert_equal(test['f1'], b"testNonethe" + latin1)
def test_binary_decode_autodtype(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_utf8_byte_encoding(self):
utf8 = b"\xcf\x96"
norm = b"norm1,norm2,norm3\n"
enc = b"test1,testNonethe" + utf8 + b",test3\n"
s = norm + enc + norm
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
ctl = np.array([
[b'norm1', b'norm2', b'norm3'],
[b'test1', b'testNonethe' + utf8, b'test3'],
[b'norm1', b'norm2', b'norm3']])
assert_array_equal(test, ctl)
def test_utf8_file(self):
utf8 = b"\xcf\x96"
with temppath() as path:
with open(path, "wb") as f:
f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',', encoding="UTF-8")
ctl = np.array([
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"],
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]],
dtype=np.unicode_)
assert_array_equal(test, ctl)
# test a mixed dtype
with open(path, "wb") as f:
f.write(b"0,testNonethe" + utf8)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',', encoding="UTF-8")
assert_equal(test['f0'], 0)
assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8"))
def test_utf8_file_nodtype_unicode(self):
# bytes encoding with non-latin1 -> unicode upcast
utf8 = u'\u03d6'
latin1 = u'\xf6\xfc\xf6'
# skip test if cannot encode utf8 test string with preferred
# encoding. The preferred encoding is assumed to be the default
# encoding of io.open. Will need to change this for PyTest, maybe
# using pytest.mark.xfail(raises=***).
try:
encoding = locale.getpreferredencoding()
utf8.encode(encoding)
except (UnicodeError, ImportError):
pytest.skip('Skipping test_utf8_file_nodtype_unicode, '
'unable to encode utf8 in preferred encoding')
with temppath() as path:
with io.open(path, "wt") as f:
f.write(u"norm1,norm2,norm3\n")
f.write(u"norm1," + latin1 + u",norm3\n")
f.write(u"test1,testNonethe" + utf8 + u",test3\n")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '',
np.VisibleDeprecationWarning)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',')
# Check for warning when encoding not specified.
assert_(w[0].category is np.VisibleDeprecationWarning)
ctl = np.array([
["norm1", "norm2", "norm3"],
["norm1", latin1, "norm3"],
["test1", "testNonethe" + utf8, "test3"]],
dtype=np.unicode_)
assert_array_equal(test, ctl)
def test_recfromtxt(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', int), ('b', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,3')
dtype = [('a', int), ('b', float)]
test = np.recfromcsv(data, missing_values='N/A', dtype=dtype)
control = np.array([(0, 1), (2, 3)],
dtype=dtype)
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#gh-10394
data = TextIO('color\n"red"\n"blue"')
test = np.recfromcsv(data, converters={0: lambda x: x.strip(b'\"')})
control = np.array([('red',), ('blue',)], dtype=[('color', (bytes, 4))])
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
def test_max_rows(self):
# Test the `max_rows` keyword argument.
data = '1 2\n3 4\n5 6\n7 8\n9 10\n'
txt = TextIO(data)
a1 = np.genfromtxt(txt, max_rows=3)
a2 = np.genfromtxt(txt)
assert_equal(a1, [[1, 2], [3, 4], [5, 6]])
assert_equal(a2, [[7, 8], [9, 10]])
# max_rows must be at least 1.
assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0)
# An input with several invalid rows.
data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n'
test = np.genfromtxt(TextIO(data), max_rows=2)
control = np.array([[1., 1.], [2., 2.]])
assert_equal(test, control)
# Test keywords conflict
assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1,
max_rows=4)
# Test with invalid value
assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4)
# Test with invalid not raise
with suppress_warnings() as sup:
sup.filter(ConversionWarning)
test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False)
control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
assert_equal(test, control)
test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False)
control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
assert_equal(test, control)
# Structured array with field names.
data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n'
# Test with header, names and comments
txt = TextIO(data)
test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True)
control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)],
dtype=[('c', '<f8'), ('d', '<f8')])
assert_equal(test, control)
# To continue reading the same "file", don't use skip_header or
# names, and use the previously determined dtype.
test = np.genfromtxt(txt, max_rows=None, dtype=test.dtype)
control = np.array([(4.0, 4.0), (5.0, 5.0)],
dtype=[('c', '<f8'), ('d', '<f8')])
assert_equal(test, control)
def test_gft_using_filename(self):
# Test that we can load data from a filename as well as a file
# object
tgt = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
with temppath() as name:
with open(name, 'w') as f:
f.write(data)
res = np.genfromtxt(name)
assert_array_equal(res, tgt)
def test_gft_from_gzip(self):
# Test that we can load data from a gzipped file
wanted = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
s = BytesIO()
with gzip.GzipFile(fileobj=s, mode='w') as g:
g.write(asbytes(data))
with temppath(suffix='.gz2') as name:
with open(name, 'w') as f:
f.write(data)
assert_array_equal(np.genfromtxt(name), wanted)
def test_gft_using_generator(self):
# gft doesn't work with unicode.
def count():
for i in range(10):
yield asbytes("%d" % i)
res = np.genfromtxt(count())
assert_array_equal(res, np.arange(10))
def test_auto_dtype_largeint(self):
# Regression test for numpy/numpy#5635 whereby large integers could
# cause OverflowErrors.
# Test the automatic definition of the output dtype
#
# 2**66 = 73786976294838206464 => should convert to float
# 2**34 = 17179869184 => should convert to int64
# 2**10 = 1024 => should convert to int (int32 on 32-bit systems,
# int64 on 64-bit systems)
data = TextIO('73786976294838206464 17179869184 1024')
test = np.genfromtxt(data, dtype=None)
assert_equal(test.dtype.names, ['f0', 'f1', 'f2'])
assert_(test.dtype['f0'] == float)
assert_(test.dtype['f1'] == np.int64)
assert_(test.dtype['f2'] == np.integer)
assert_allclose(test['f0'], 73786976294838206464.)
assert_equal(test['f1'], 17179869184)
assert_equal(test['f2'], 1024)
@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
class TestPathUsage:
# Test that pathlib.Path can be used
def test_loadtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
a = np.array([[1.1, 2], [3, 4]])
np.savetxt(path, a)
x = np.loadtxt(path)
assert_array_equal(x, a)
def test_save_load(self):
# Test that pathlib.Path instances can be used with save.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
data = np.load(path)
assert_array_equal(data, a)
def test_save_load_memmap(self):
# Test that pathlib.Path instances can be loaded mem-mapped.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
data = np.load(path, mmap_mode='r')
assert_array_equal(data, a)
# close the mem-mapped file
del data
def test_save_load_memmap_readwrite(self):
# Test that pathlib.Path instances can be written mem-mapped.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
b = np.load(path, mmap_mode='r+')
a[0][0] = 5
b[0][0] = 5
del b # closes the file
data = np.load(path)
assert_array_equal(data, a)
def test_savez_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
path = Path(path)
np.savez(path, lab='place holder')
with np.load(path) as data:
assert_array_equal(data['lab'], 'place holder')
def test_savez_compressed_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
path = Path(path)
np.savez_compressed(path, lab='place holder')
data = np.load(path)
assert_array_equal(data['lab'], 'place holder')
data.close()
def test_genfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
a = np.array([(1, 2), (3, 4)])
np.savetxt(path, a)
data = np.genfromtxt(path)
assert_array_equal(a, data)
def test_ndfromtxt(self):
# Test outputting a standard ndarray
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.genfromtxt(path, dtype=int)
assert_array_equal(test, control)
def test_mafromtxt(self):
# From `test_fancy_dtype_alt` above
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'1,2,3.0\n4,5,6.0\n')
test = np.genfromtxt(path, delimiter=',', usemask=True)
control = ma.array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)])
assert_equal(test, control)
def test_recfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(path, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
def test_recfromcsv(self):
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(path, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
def test_gzip_load():
a = np.random.random((5, 5))
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
# These next two classes encode the minimal API needed to save()/load() arrays.
# The `test_ducktyping` ensures they work correctly
class JustWriter:
def __init__(self, base):
self.base = base
def write(self, s):
return self.base.write(s)
def flush(self):
return self.base.flush()
class JustReader:
def __init__(self, base):
self.base = base
def read(self, n):
return self.base.read(n)
def seek(self, off, whence=0):
return self.base.seek(off, whence)
def test_ducktyping():
a = np.random.random((5, 5))
s = BytesIO()
f = JustWriter(s)
np.save(f, a)
f.flush()
s.seek(0)
f = JustReader(s)
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokenness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = BytesIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(b'1 2 3\n')
g.close()
s.seek(0)
with temppath(suffix='.gz') as name:
with open(name, 'wb') as f:
f.write(s.read())
res = np.loadtxt(name)
s.close()
assert_array_equal(res, [1, 2, 3])
def test_gzip_loadtxt_from_string():
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write(b'1 2 3\n')
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
s = BytesIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert_('x' in z)
assert_('y' in z)
assert_('x' in z.keys())
assert_('y' in z.keys())
for f, a in z.items():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
assert_(len(z.items()) == 2)
for f in z:
assert_(f in ['x', 'y'])
assert_('x' in z.keys())
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
f = BytesIO()
np.savez(f, [1, 2, 3])
f.seek(0)
with assert_no_gc_cycles():
np.load(f)
f.seek(0)
dt = [("a", 'u1', 2), ("b", 'u1', 2)]
with assert_no_gc_cycles():
x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
| 38.64316 | 97 | 0.521106 |
0e5abe2f5ccd169a6fb25914bd7f747325aec507 | 10,643 | py | Python | pandas/tests/indexes/timedeltas/test_ops.py | hkennyv/pandas | 31875eb3d8a56f359c2f529f86b867572d5dfeb1 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2020-04-26T22:11:21.000Z | 2020-04-26T22:11:21.000Z | pandas/tests/indexes/timedeltas/test_ops.py | hkennyv/pandas | 31875eb3d8a56f359c2f529f86b867572d5dfeb1 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/indexes/timedeltas/test_ops.py | hkennyv/pandas | 31875eb3d8a56f359c2f529f86b867572d5dfeb1 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import Series, TimedeltaIndex, timedelta_range
import pandas._testing as tm
from pandas.tseries.offsets import DateOffset, Day, Hour
class TestTimedeltaIndexOps:
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range("1 days 09:00:00", freq="H", periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range("1 days 18:00:00", freq="-1H", periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64")
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range("1 days 09:00:00", freq="H", periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(
[
"1 days 09:00:00",
"1 days 09:00:00",
"1 days 09:00:00",
"1 days 08:00:00",
"1 days 08:00:00",
pd.NaT,
]
)
exp_idx = TimedeltaIndex(["1 days 09:00:00", "1 days 08:00:00"])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(["1 days 09:00:00", "1 days 08:00:00", pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(
TimedeltaIndex,
(
[0, 1, 0],
[0, 0, -1],
[0, -1, -1],
["00:01:00", "00:01:00", "00:02:00"],
["00:01:00", "00:01:00", "00:00:01"],
),
):
assert idx[0] in idx
def test_unknown_attribute(self):
# see gh-9680
tdi = pd.timedelta_range(start=0, periods=10, freq="1s")
ts = pd.Series(np.random.normal(size=10), index=tdi)
assert "foo" not in ts.__dict__.keys()
msg = "'Series' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
ts.foo
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(["1 day", "2 day", "3 day"], freq="D", name="idx")
idx2 = TimedeltaIndex(["1 hour", "2 hour", "3 hour"], freq="H", name="idx")
for idx in [idx1, idx2]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, idx[::-1])
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
idx1 = TimedeltaIndex(
["1 hour", "3 hour", "5 hour", "2 hour ", "1 hour"], name="idx1"
)
exp1 = TimedeltaIndex(
["1 hour", "1 hour", "2 hour", "3 hour", "5 hour"], name="idx1"
)
idx2 = TimedeltaIndex(
["1 day", "3 day", "5 day", "2 day", "1 day"], name="idx2"
)
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_drop_duplicates_metadata(self, freq_sample):
# GH 10115
idx = pd.timedelta_range("1 day", periods=10, freq=freq_sample, name="idx")
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
expected = idx._with_freq(None)
tm.assert_index_equal(expected, result)
assert result.freq is None
@pytest.mark.parametrize(
"keep, expected, index",
[
("first", np.concatenate(([False] * 10, [True] * 5)), np.arange(0, 10)),
("last", np.concatenate(([True] * 5, [False] * 10)), np.arange(5, 15)),
(
False,
np.concatenate(([True] * 5, [False] * 5, [True] * 5)),
np.arange(5, 10),
),
],
)
def test_drop_duplicates(self, freq_sample, keep, expected, index):
# to check Index/Series compat
idx = pd.timedelta_range("1 day", periods=10, freq=freq_sample, name="idx")
idx = idx.append(idx[:5])
tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected)
expected = idx[~expected]
result = idx.drop_duplicates(keep=keep)
tm.assert_index_equal(result, expected)
result = Series(idx).drop_duplicates(keep=keep)
tm.assert_series_equal(result, Series(expected, index=index))
def test_infer_freq(self, freq_sample):
# GH#11018
idx = pd.timedelta_range("1", freq=freq_sample, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq="infer")
tm.assert_index_equal(idx, result)
assert result.freq == freq_sample
def test_repeat(self):
index = pd.timedelta_range("1 days", periods=2, freq="D")
exp = pd.TimedeltaIndex(["1 days", "1 days", "2 days", "2 days"])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = TimedeltaIndex(["1 days", "NaT", "3 days"])
exp = TimedeltaIndex(
[
"1 days",
"1 days",
"1 days",
"NaT",
"NaT",
"NaT",
"3 days",
"3 days",
"3 days",
]
)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_nat(self):
assert pd.TimedeltaIndex._na_value is pd.NaT
assert pd.TimedeltaIndex([])._na_value is pd.NaT
idx = pd.TimedeltaIndex(["1 days", "2 days"])
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert idx.hasnans is False
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(["1 days", "NaT"])
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans is True
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(["1 days", "2 days", "NaT"])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.TimedeltaIndex(["2 days", "1 days", "NaT"])
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.astype(object).equals(idx2.astype(object))
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# Check that we dont raise OverflowError on comparisons outside the
# implementation range
oob = pd.Index([timedelta(days=10 ** 6)] * 3, dtype=object)
assert not idx.equals(oob)
assert not idx2.equals(oob)
# FIXME: oob.apply(np.timedelta64) incorrectly overflows
oob2 = pd.Index([np.timedelta64(x) for x in oob], dtype=object)
assert not idx.equals(oob2)
assert not idx2.equals(oob2)
@pytest.mark.parametrize("values", [["0 days", "2 days", "4 days"], []])
@pytest.mark.parametrize("freq", ["2D", Day(2), "48H", Hour(48)])
def test_freq_setter(self, values, freq):
# GH 20678
idx = TimedeltaIndex(values)
# can set to an offset, converting from string if necessary
idx._data.freq = freq
assert idx.freq == freq
assert isinstance(idx.freq, DateOffset)
# can reset to None
idx._data.freq = None
assert idx.freq is None
def test_freq_setter_errors(self):
# GH 20678
idx = TimedeltaIndex(["0 days", "2 days", "4 days"])
# setting with an incompatible freq
msg = (
"Inferred frequency 2D from passed values does not conform to "
"passed frequency 5D"
)
with pytest.raises(ValueError, match=msg):
idx._data.freq = "5D"
# setting with a non-fixed frequency
msg = r"<2 \* BusinessDays> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
idx._data.freq = "2B"
# setting with non-freq string
with pytest.raises(ValueError, match="Invalid frequency"):
idx._data.freq = "foo"
| 36.324232 | 88 | 0.573804 |
be37b022b1aec1a75fd5eff3b017f93d3ebeda10 | 3,568 | py | Python | p2p/exchange/manager.py | dendisuhubdy/trinity | 001664781259c7dd0779a0ef6f822451b608ded4 | [
"MIT"
] | 1 | 2021-04-07T07:33:28.000Z | 2021-04-07T07:33:28.000Z | p2p/exchange/manager.py | dendisuhubdy/trinity | 001664781259c7dd0779a0ef6f822451b608ded4 | [
"MIT"
] | null | null | null | p2p/exchange/manager.py | dendisuhubdy/trinity | 001664781259c7dd0779a0ef6f822451b608ded4 | [
"MIT"
] | null | null | null | import asyncio
from concurrent import futures
import logging
from typing import (
Callable,
TypeVar,
)
from eth_utils import (
ValidationError,
)
from p2p.abc import ConnectionAPI
from p2p.exceptions import PeerConnectionLost
from .abc import (
ExchangeManagerAPI,
NormalizerAPI,
PerformanceTrackerAPI,
ResponseCandidateStreamAPI,
)
from .typing import TRequestCommand, TResponseCommand
TResult = TypeVar('TResult')
class ExchangeManager(ExchangeManagerAPI[TRequestCommand, TResponseCommand, TResult]):
logger = logging.getLogger('p2p.exchange.ExchangeManager')
_response_stream: ResponseCandidateStreamAPI[TRequestCommand, TResponseCommand] = None
def __init__(self,
connection: ConnectionAPI,
response_stream: ResponseCandidateStreamAPI[TRequestCommand, TResponseCommand],
) -> None:
self._connection = connection
self._response_stream = response_stream
async def get_result(
self,
request: TRequestCommand,
normalizer: NormalizerAPI[TResponseCommand, TResult],
validate_result: Callable[[TResult], None],
payload_validator: Callable[[TResponseCommand], None],
tracker: PerformanceTrackerAPI[TRequestCommand, TResult],
timeout: float = None) -> TResult:
stream = self._response_stream
if not stream.is_alive:
raise PeerConnectionLost(
f"Response stream closed before sending request to {self._connection}"
)
loop = asyncio.get_event_loop()
with futures.ThreadPoolExecutor() as executor:
async for payload in stream.payload_candidates(request, tracker, timeout=timeout):
try:
payload_validator(payload)
if normalizer.is_normalization_slow:
result = await loop.run_in_executor(
executor,
normalizer.normalize_result,
payload
)
else:
result = normalizer.normalize_result(payload)
validate_result(result)
except ValidationError as err:
self.logger.debug(
"Response validation failed for pending %s request from connection %s: %s",
stream.response_cmd_name,
self._connection,
err,
)
# If this response was just for the wrong request, we'll
# catch the right one later. Otherwise, this request will
# eventually time out.
continue
else:
tracker.record_response(
stream.last_response_time,
request,
result,
)
stream.complete_request()
return result
raise PeerConnectionLost(f"Response stream of {self._connection} was apparently closed")
@property
def service(self) -> ResponseCandidateStreamAPI[TRequestCommand, TResponseCommand]:
"""
This service that needs to be running for calls to execute properly
"""
return self._response_stream
@property
def is_requesting(self) -> bool:
return self._response_stream is not None and self._response_stream.is_pending
| 33.980952 | 99 | 0.589406 |
ddd956e5c3167cd42b79fefa50d9d1b6df083bad | 3,163 | py | Python | music_player/music_player/settings.py | yashrahurikar23/drf-spotify-houseparty-app | 150842a52671b9f31cb878c85667c7849ff3c10f | [
"MIT"
] | null | null | null | music_player/music_player/settings.py | yashrahurikar23/drf-spotify-houseparty-app | 150842a52671b9f31cb878c85667c7849ff3c10f | [
"MIT"
] | null | null | null | music_player/music_player/settings.py | yashrahurikar23/drf-spotify-houseparty-app | 150842a52671b9f31cb878c85667c7849ff3c10f | [
"MIT"
] | null | null | null | """
Django settings for music_player project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(e!)id+%u%f*d15g%9y8tzcjxe!&==w+_y*m1(37w5kg=fbn4t'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api.apps.ApiConfig',
'frontend.apps.FrontendConfig',
'rest_framework'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'music_player.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'music_player.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| 25.508065 | 91 | 0.696175 |
e9c882c00d145d573ab2ef725685cc79448f178f | 783 | py | Python | project/db/models/blogs.py | samsonosiomwan/Devs-Prime-Api | 7b43078bb1f848f17f85e8bb94292d1b776eee92 | [
"MIT"
] | null | null | null | project/db/models/blogs.py | samsonosiomwan/Devs-Prime-Api | 7b43078bb1f848f17f85e8bb94292d1b776eee92 | [
"MIT"
] | 1 | 2021-10-21T22:13:56.000Z | 2021-10-21T22:13:57.000Z | project/db/models/blogs.py | Favourkass/Devsprime-api | 2414a2541efeb76b6a7ebb26c2d05a3bfead153c | [
"MIT"
] | null | null | null | from django.db import models
import uuid
from django.conf import settings
from .user import User
class Blog(models.Model):
DEFAULT_COVER_IMG_URL = 'https://res.cloudinary.com/devsprime/image/upload/v1623419362/Blogs/blog_ompj6m.jpg'
id = models.UUIDField(unique=True, primary_key=True,
default=uuid.uuid4, editable=False)
user_id = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(max_length=255, unique=True)
cover_img = models.URLField(default=DEFAULT_COVER_IMG_URL)
short_desc = models.TextField(blank=True)
detail = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
| 34.043478 | 113 | 0.735632 |
2d54c0d597d485791d552aae5b679dffacd5d2e5 | 1,158 | py | Python | applications/ATOM/utils/compute_profile.py | jonesholger/lbann | 3214f189a1438565d695542e076c4fa8e7332d34 | [
"Apache-2.0"
] | 194 | 2016-07-19T15:40:21.000Z | 2022-03-19T08:06:10.000Z | applications/ATOM/utils/compute_profile.py | jonesholger/lbann | 3214f189a1438565d695542e076c4fa8e7332d34 | [
"Apache-2.0"
] | 1,021 | 2016-07-19T12:56:31.000Z | 2022-03-29T00:41:47.000Z | applications/ATOM/utils/compute_profile.py | jonesholger/lbann | 3214f189a1438565d695542e076c4fa8e7332d34 | [
"Apache-2.0"
] | 74 | 2016-07-28T18:24:00.000Z | 2022-01-24T19:41:04.000Z | import sys
if len(sys.argv) != 3 :
print('usage:')
print(' ' + sys.argv[0] + ' input_fn output_fn')
print('function:')
print(' writes data for plotting num_sequences as a function')
print(' of sequence length to "output_fn"; prints length')
print(' of longest sequence to cout (add two for <bos>, <eos>)')
print('delimiter:')
print(' is hard-coded for comma\n')
exit(9)
a = open(sys.argv[1])
a.readline() #discard header
out = open(sys.argv[2], 'w')
longest = 0
longest_seq = ''
longest_line_num = 0
data = {}
j = 0
for line in a :
j += 1
if j % 1000 == 0 : print(str(j/1000) + 'K lines processed')
t = line.split(',')
x = len(t[0])
if x not in data :
data[x] = 0
data[x] += 1
if x > longest :
longest = x
longest_seq = t[0]
longest_line_num = j-1
v = []
for ell in data :
v.append( (ell, data[ell]) )
v.sort()
for d in v :
out.write(str(d[0]) + ' ' + str(d[1]) + '\n')
print('\noutput written to: ', sys.argv[2] + '\n')
out.close()
print('\nlongest sequence length: ' + str(longest))
print('line number of longest: ' + str(longest_line_num))
print('longest sequence length: ' + longest_seq)
| 22.705882 | 67 | 0.606218 |
ec80fe97a457f81ae79726b1a19934210a7e93b5 | 38,270 | py | Python | tools/scons/scons-local-1.2.0/SCons/SConf.py | rohankumardubey/node | d49d53fd499f7cf68fdfcc7d0c9d401e4e4407fb | [
"MIT"
] | 48 | 2015-01-09T20:39:35.000Z | 2021-12-21T21:17:52.000Z | tools/scons/scons-local-1.2.0/SCons/SConf.py | jdunck/node | d1f69ef35dac810530df8249d523add168e09f03 | [
"MIT"
] | 2 | 2016-02-05T10:27:37.000Z | 2019-01-22T16:22:51.000Z | tools/scons/scons-local-1.2.0/SCons/SConf.py | jdunck/node | d1f69ef35dac810530df8249d523add168e09f03 | [
"MIT"
] | 8 | 2015-01-12T17:14:36.000Z | 2018-09-15T14:10:27.000Z | """SCons.SConf
Autoconf-like configuration support.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/SConf.py 3842 2008/12/20 22:59:52 scons"
import os
import re
import string
import StringIO
import sys
import traceback
import types
import SCons.Action
import SCons.Builder
import SCons.Errors
import SCons.Job
import SCons.Node.FS
import SCons.Taskmaster
import SCons.Util
import SCons.Warnings
import SCons.Conftest
from SCons.Debug import Trace
# Turn off the Conftest error logging
SCons.Conftest.LogInputFiles = 0
SCons.Conftest.LogErrorMessages = 0
# Set
build_type = None
build_types = ['clean', 'help']
def SetBuildType(type):
global build_type
build_type = type
# to be set, if we are in dry-run mode
dryrun = 0
AUTO=0 # use SCons dependency scanning for up-to-date checks
FORCE=1 # force all tests to be rebuilt
CACHE=2 # force all tests to be taken from cache (raise an error, if necessary)
cache_mode = AUTO
def SetCacheMode(mode):
"""Set the Configure cache mode. mode must be one of "auto", "force",
or "cache"."""
global cache_mode
if mode == "auto":
cache_mode = AUTO
elif mode == "force":
cache_mode = FORCE
elif mode == "cache":
cache_mode = CACHE
else:
raise ValueError, "SCons.SConf.SetCacheMode: Unknown mode " + mode
progress_display = SCons.Util.display # will be overwritten by SCons.Script
def SetProgressDisplay(display):
"""Set the progress display to use (called from SCons.Script)"""
global progress_display
progress_display = display
SConfFS = None
_ac_build_counter = 0 # incremented, whenever TryBuild is called
_ac_config_logs = {} # all config.log files created in this build
_ac_config_hs = {} # all config.h files created in this build
sconf_global = None # current sconf object
def _createConfigH(target, source, env):
t = open(str(target[0]), "w")
defname = re.sub('[^A-Za-z0-9_]', '_', string.upper(str(target[0])))
t.write("""#ifndef %(DEFNAME)s_SEEN
#define %(DEFNAME)s_SEEN
""" % {'DEFNAME' : defname})
t.write(source[0].get_contents())
t.write("""
#endif /* %(DEFNAME)s_SEEN */
""" % {'DEFNAME' : defname})
t.close()
def _stringConfigH(target, source, env):
return "scons: Configure: creating " + str(target[0])
def CreateConfigHBuilder(env):
"""Called just before the building targets phase begins."""
if len(_ac_config_hs) == 0:
return
action = SCons.Action.Action(_createConfigH,
_stringConfigH)
sconfigHBld = SCons.Builder.Builder(action=action)
env.Append( BUILDERS={'SConfigHBuilder':sconfigHBld} )
for k in _ac_config_hs.keys():
env.SConfigHBuilder(k, env.Value(_ac_config_hs[k]))
class SConfWarning(SCons.Warnings.Warning):
pass
SCons.Warnings.enableWarningClass(SConfWarning)
# some error definitions
class SConfError(SCons.Errors.UserError):
def __init__(self,msg):
SCons.Errors.UserError.__init__(self,msg)
class ConfigureDryRunError(SConfError):
"""Raised when a file or directory needs to be updated during a Configure
process, but the user requested a dry-run"""
def __init__(self,target):
if not isinstance(target, SCons.Node.FS.File):
msg = 'Cannot create configure directory "%s" within a dry-run.' % str(target)
else:
msg = 'Cannot update configure test "%s" within a dry-run.' % str(target)
SConfError.__init__(self,msg)
class ConfigureCacheError(SConfError):
"""Raised when a use explicitely requested the cache feature, but the test
is run the first time."""
def __init__(self,target):
SConfError.__init__(self, '"%s" is not yet built and cache is forced.' % str(target))
# define actions for building text files
def _createSource( target, source, env ):
fd = open(str(target[0]), "w")
fd.write(source[0].get_contents())
fd.close()
def _stringSource( target, source, env ):
return (str(target[0]) + ' <-\n |' +
string.replace( source[0].get_contents(),
'\n', "\n |" ) )
# python 2.2 introduces types.BooleanType
BooleanTypes = [types.IntType]
if hasattr(types, 'BooleanType'): BooleanTypes.append(types.BooleanType)
class SConfBuildInfo(SCons.Node.FS.FileBuildInfo):
"""
Special build info for targets of configure tests. Additional members
are result (did the builder succeed last time?) and string, which
contains messages of the original build phase.
"""
result = None # -> 0/None -> no error, != 0 error
string = None # the stdout / stderr output when building the target
def set_build_result(self, result, string):
self.result = result
self.string = string
class Streamer:
"""
'Sniffer' for a file-like writable object. Similar to the unix tool tee.
"""
def __init__(self, orig):
self.orig = orig
self.s = StringIO.StringIO()
def write(self, str):
if self.orig:
self.orig.write(str)
self.s.write(str)
def writelines(self, lines):
for l in lines:
self.write(l + '\n')
def getvalue(self):
"""
Return everything written to orig since the Streamer was created.
"""
return self.s.getvalue()
def flush(self):
if self.orig:
self.orig.flush()
self.s.flush()
class SConfBuildTask(SCons.Taskmaster.Task):
"""
This is almost the same as SCons.Script.BuildTask. Handles SConfErrors
correctly and knows about the current cache_mode.
"""
def display(self, message):
if sconf_global.logstream:
sconf_global.logstream.write("scons: Configure: " + message + "\n")
def display_cached_string(self, bi):
"""
Logs the original builder messages, given the SConfBuildInfo instance
bi.
"""
if not isinstance(bi, SConfBuildInfo):
SCons.Warnings.warn(SConfWarning,
"The stored build information has an unexpected class: %s" % bi.__class__)
else:
self.display("The original builder output was:\n" +
string.replace(" |" + str(bi.string),
"\n", "\n |"))
def failed(self):
# check, if the reason was a ConfigureDryRunError or a
# ConfigureCacheError and if yes, reraise the exception
exc_type = self.exc_info()[0]
if issubclass(exc_type, SConfError):
raise
elif issubclass(exc_type, SCons.Errors.BuildError):
# we ignore Build Errors (occurs, when a test doesn't pass)
# Clear the exception to prevent the contained traceback
# to build a reference cycle.
self.exc_clear()
else:
self.display('Caught exception while building "%s":\n' %
self.targets[0])
try:
excepthook = sys.excepthook
except AttributeError:
# Earlier versions of Python don't have sys.excepthook...
def excepthook(type, value, tb):
traceback.print_tb(tb)
print type, value
apply(excepthook, self.exc_info())
return SCons.Taskmaster.Task.failed(self)
def collect_node_states(self):
# returns (is_up_to_date, cached_error, cachable)
# where is_up_to_date is 1, if the node(s) are up_to_date
# cached_error is 1, if the node(s) are up_to_date, but the
# build will fail
# cachable is 0, if some nodes are not in our cache
T = 0
changed = False
cached_error = False
cachable = True
for t in self.targets:
if T: Trace('%s' % (t))
bi = t.get_stored_info().binfo
if isinstance(bi, SConfBuildInfo):
if T: Trace(': SConfBuildInfo')
if cache_mode == CACHE:
t.set_state(SCons.Node.up_to_date)
if T: Trace(': set_state(up_to-date)')
else:
if T: Trace(': get_state() %s' % t.get_state())
if T: Trace(': changed() %s' % t.changed())
if (t.get_state() != SCons.Node.up_to_date and t.changed()):
changed = True
if T: Trace(': changed %s' % changed)
cached_error = cached_error or bi.result
else:
if T: Trace(': else')
# the node hasn't been built in a SConf context or doesn't
# exist
cachable = False
changed = ( t.get_state() != SCons.Node.up_to_date )
if T: Trace(': changed %s' % changed)
if T: Trace('\n')
return (not changed, cached_error, cachable)
def execute(self):
if not self.targets[0].has_builder():
return
sconf = sconf_global
is_up_to_date, cached_error, cachable = self.collect_node_states()
if cache_mode == CACHE and not cachable:
raise ConfigureCacheError(self.targets[0])
elif cache_mode == FORCE:
is_up_to_date = 0
if cached_error and is_up_to_date:
self.display("Building \"%s\" failed in a previous run and all "
"its sources are up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
raise SCons.Errors.BuildError # will be 'caught' in self.failed
elif is_up_to_date:
self.display("\"%s\" is up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
elif dryrun:
raise ConfigureDryRunError(self.targets[0])
else:
# note stdout and stderr are the same here
s = sys.stdout = sys.stderr = Streamer(sys.stdout)
try:
env = self.targets[0].get_build_env()
env['PSTDOUT'] = env['PSTDERR'] = s
try:
sconf.cached = 0
self.targets[0].build()
finally:
sys.stdout = sys.stderr = env['PSTDOUT'] = \
env['PSTDERR'] = sconf.logstream
except KeyboardInterrupt:
raise
except SystemExit:
exc_value = sys.exc_info()[1]
raise SCons.Errors.ExplicitExit(self.targets[0],exc_value.code)
except Exception, e:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(1, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
raise e
else:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(0, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
class SConfBase:
"""This is simply a class to represent a configure context. After
creating a SConf object, you can call any tests. After finished with your
tests, be sure to call the Finish() method, which returns the modified
environment.
Some words about caching: In most cases, it is not necessary to cache
Test results explicitely. Instead, we use the scons dependency checking
mechanism. For example, if one wants to compile a test program
(SConf.TryLink), the compiler is only called, if the program dependencies
have changed. However, if the program could not be compiled in a former
SConf run, we need to explicitely cache this error.
"""
def __init__(self, env, custom_tests = {}, conf_dir='$CONFIGUREDIR',
log_file='$CONFIGURELOG', config_h = None, _depth = 0):
"""Constructor. Pass additional tests in the custom_tests-dictinary,
e.g. custom_tests={'CheckPrivate':MyPrivateTest}, where MyPrivateTest
defines a custom test.
Note also the conf_dir and log_file arguments (you may want to
build tests in the VariantDir, not in the SourceDir)
"""
global SConfFS
if not SConfFS:
SConfFS = SCons.Node.FS.default_fs or \
SCons.Node.FS.FS(env.fs.pathTop)
if not sconf_global is None:
raise (SCons.Errors.UserError,
"Only one SConf object may be active at one time")
self.env = env
if log_file != None:
log_file = SConfFS.File(env.subst(log_file))
self.logfile = log_file
self.logstream = None
self.lastTarget = None
self.depth = _depth
self.cached = 0 # will be set, if all test results are cached
# add default tests
default_tests = {
'CheckCC' : CheckCC,
'CheckCXX' : CheckCXX,
'CheckSHCC' : CheckSHCC,
'CheckSHCXX' : CheckSHCXX,
'CheckFunc' : CheckFunc,
'CheckType' : CheckType,
'CheckTypeSize' : CheckTypeSize,
'CheckDeclaration' : CheckDeclaration,
'CheckHeader' : CheckHeader,
'CheckCHeader' : CheckCHeader,
'CheckCXXHeader' : CheckCXXHeader,
'CheckLib' : CheckLib,
'CheckLibWithHeader' : CheckLibWithHeader,
}
self.AddTests(default_tests)
self.AddTests(custom_tests)
self.confdir = SConfFS.Dir(env.subst(conf_dir))
if not config_h is None:
config_h = SConfFS.File(config_h)
self.config_h = config_h
self._startup()
def Finish(self):
"""Call this method after finished with your tests:
env = sconf.Finish()
"""
self._shutdown()
return self.env
def Define(self, name, value = None, comment = None):
"""
Define a pre processor symbol name, with the optional given value in the
current config header.
If value is None (default), then #define name is written. If value is not
none, then #define name value is written.
comment is a string which will be put as a C comment in the
header, to explain the meaning of the value (appropriate C comments /* and
*/ will be put automatically."""
lines = []
if comment:
comment_str = "/* %s */" % comment
lines.append(comment_str)
if value is not None:
define_str = "#define %s %s" % (name, value)
else:
define_str = "#define %s" % name
lines.append(define_str)
lines.append('')
self.config_h_text = self.config_h_text + string.join(lines, '\n')
def BuildNodes(self, nodes):
"""
Tries to build the given nodes immediately. Returns 1 on success,
0 on error.
"""
if self.logstream != None:
# override stdout / stderr to write in log file
oldStdout = sys.stdout
sys.stdout = self.logstream
oldStderr = sys.stderr
sys.stderr = self.logstream
# the engine assumes the current path is the SConstruct directory ...
old_fs_dir = SConfFS.getcwd()
old_os_dir = os.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=1)
# Because we take responsibility here for writing out our
# own .sconsign info (see SConfBuildTask.execute(), above),
# we override the store_info() method with a null place-holder
# so we really control how it gets written.
for n in nodes:
n.store_info = n.do_not_store_info
ret = 1
try:
# ToDo: use user options for calc
save_max_drift = SConfFS.get_max_drift()
SConfFS.set_max_drift(0)
tm = SCons.Taskmaster.Taskmaster(nodes, SConfBuildTask)
# we don't want to build tests in parallel
jobs = SCons.Job.Jobs(1, tm )
jobs.run()
for n in nodes:
state = n.get_state()
if (state != SCons.Node.executed and
state != SCons.Node.up_to_date):
# the node could not be built. we return 0 in this case
ret = 0
finally:
SConfFS.set_max_drift(save_max_drift)
os.chdir(old_os_dir)
SConfFS.chdir(old_fs_dir, change_os_dir=0)
if self.logstream != None:
# restore stdout / stderr
sys.stdout = oldStdout
sys.stderr = oldStderr
return ret
def pspawn_wrapper(self, sh, escape, cmd, args, env):
"""Wrapper function for handling piped spawns.
This looks to the calling interface (in Action.py) like a "normal"
spawn, but associates the call with the PSPAWN variable from
the construction environment and with the streams to which we
want the output logged. This gets slid into the construction
environment as the SPAWN variable so Action.py doesn't have to
know or care whether it's spawning a piped command or not.
"""
return self.pspawn(sh, escape, cmd, args, env, self.logstream, self.logstream)
def TryBuild(self, builder, text = None, extension = ""):
"""Low level TryBuild implementation. Normally you don't need to
call that - you can use TryCompile / TryLink / TryRun instead
"""
global _ac_build_counter
# Make sure we have a PSPAWN value, and save the current
# SPAWN value.
try:
self.pspawn = self.env['PSPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing PSPAWN construction variable.')
try:
save_spawn = self.env['SPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing SPAWN construction variable.')
nodesToBeBuilt = []
f = "conftest_" + str(_ac_build_counter)
pref = self.env.subst( builder.builder.prefix )
suff = self.env.subst( builder.builder.suffix )
target = self.confdir.File(pref + f + suff)
try:
# Slide our wrapper into the construction environment as
# the SPAWN function.
self.env['SPAWN'] = self.pspawn_wrapper
sourcetext = self.env.Value(text)
if text != None:
textFile = self.confdir.File(f + extension)
textFileNode = self.env.SConfSourceBuilder(target=textFile,
source=sourcetext)
nodesToBeBuilt.extend(textFileNode)
source = textFileNode
else:
source = None
nodes = builder(target = target, source = source)
if not SCons.Util.is_List(nodes):
nodes = [nodes]
nodesToBeBuilt.extend(nodes)
result = self.BuildNodes(nodesToBeBuilt)
finally:
self.env['SPAWN'] = save_spawn
_ac_build_counter = _ac_build_counter + 1
if result:
self.lastTarget = nodes[0]
else:
self.lastTarget = None
return result
def TryAction(self, action, text = None, extension = ""):
"""Tries to execute the given action with optional source file
contents <text> and optional source file extension <extension>,
Returns the status (0 : failed, 1 : ok) and the contents of the
output file.
"""
builder = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS = {'SConfActionBuilder' : builder} )
ok = self.TryBuild(self.env.SConfActionBuilder, text, extension)
del self.env['BUILDERS']['SConfActionBuilder']
if ok:
outputStr = self.lastTarget.get_contents()
return (1, outputStr)
return (0, "")
def TryCompile( self, text, extension):
"""Compiles the program given in text to an env.Object, using extension
as file extension (e.g. '.c'). Returns 1, if compilation was
successful, 0 otherwise. The target is saved in self.lastTarget (for
further processing).
"""
return self.TryBuild(self.env.Object, text, extension)
def TryLink( self, text, extension ):
"""Compiles the program given in text to an executable env.Program,
using extension as file extension (e.g. '.c'). Returns 1, if
compilation was successful, 0 otherwise. The target is saved in
self.lastTarget (for further processing).
"""
return self.TryBuild(self.env.Program, text, extension )
def TryRun(self, text, extension ):
"""Compiles and runs the program given in text, using extension
as file extension (e.g. '.c'). Returns (1, outputStr) on success,
(0, '') otherwise. The target (a file containing the program's stdout)
is saved in self.lastTarget (for further processing).
"""
ok = self.TryLink(text, extension)
if( ok ):
prog = self.lastTarget
pname = str(prog)
output = SConfFS.File(pname+'.out')
node = self.env.Command(output, prog, [ [ pname, ">", "${TARGET}"] ])
ok = self.BuildNodes(node)
if ok:
outputStr = output.get_contents()
return( 1, outputStr)
return (0, "")
class TestWrapper:
"""A wrapper around Tests (to ensure sanity)"""
def __init__(self, test, sconf):
self.test = test
self.sconf = sconf
def __call__(self, *args, **kw):
if not self.sconf.active:
raise (SCons.Errors.UserError,
"Test called after sconf.Finish()")
context = CheckContext(self.sconf)
ret = apply(self.test, (context,) + args, kw)
if not self.sconf.config_h is None:
self.sconf.config_h_text = self.sconf.config_h_text + context.config_h
context.Result("error: no result")
return ret
def AddTest(self, test_name, test_instance):
"""Adds test_class to this SConf instance. It can be called with
self.test_name(...)"""
setattr(self, test_name, SConfBase.TestWrapper(test_instance, self))
def AddTests(self, tests):
"""Adds all the tests given in the tests dictionary to this SConf
instance
"""
for name in tests.keys():
self.AddTest(name, tests[name])
def _createDir( self, node ):
dirName = str(node)
if dryrun:
if not os.path.isdir( dirName ):
raise ConfigureDryRunError(dirName)
else:
if not os.path.isdir( dirName ):
os.makedirs( dirName )
node._exists = 1
def _startup(self):
"""Private method. Set up logstream, and set the environment
variables necessary for a piped build
"""
global _ac_config_logs
global sconf_global
global SConfFS
self.lastEnvFs = self.env.fs
self.env.fs = SConfFS
self._createDir(self.confdir)
self.confdir.up().add_ignore( [self.confdir] )
if self.logfile != None and not dryrun:
# truncate logfile, if SConf.Configure is called for the first time
# in a build
if _ac_config_logs.has_key(self.logfile):
log_mode = "a"
else:
_ac_config_logs[self.logfile] = None
log_mode = "w"
fp = open(str(self.logfile), log_mode)
self.logstream = SCons.Util.Unbuffered(fp)
# logfile may stay in a build directory, so we tell
# the build system not to override it with a eventually
# existing file with the same name in the source directory
self.logfile.dir.add_ignore( [self.logfile] )
tb = traceback.extract_stack()[-3-self.depth]
old_fs_dir = SConfFS.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=0)
self.logstream.write('file %s,line %d:\n\tConfigure(confdir = %s)\n' %
(tb[0], tb[1], str(self.confdir)) )
SConfFS.chdir(old_fs_dir)
else:
self.logstream = None
# we use a special builder to create source files from TEXT
action = SCons.Action.Action(_createSource,
_stringSource)
sconfSrcBld = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS={'SConfSourceBuilder':sconfSrcBld} )
self.config_h_text = _ac_config_hs.get(self.config_h, "")
self.active = 1
# only one SConf instance should be active at a time ...
sconf_global = self
def _shutdown(self):
"""Private method. Reset to non-piped spawn"""
global sconf_global, _ac_config_hs
if not self.active:
raise SCons.Errors.UserError, "Finish may be called only once!"
if self.logstream != None and not dryrun:
self.logstream.write("\n")
self.logstream.close()
self.logstream = None
# remove the SConfSourceBuilder from the environment
blds = self.env['BUILDERS']
del blds['SConfSourceBuilder']
self.env.Replace( BUILDERS=blds )
self.active = 0
sconf_global = None
if not self.config_h is None:
_ac_config_hs[self.config_h] = self.config_h_text
self.env.fs = self.lastEnvFs
class CheckContext:
"""Provides a context for configure tests. Defines how a test writes to the
screen and log file.
A typical test is just a callable with an instance of CheckContext as
first argument:
def CheckCustom(context, ...)
context.Message('Checking my weird test ... ')
ret = myWeirdTestFunction(...)
context.Result(ret)
Often, myWeirdTestFunction will be one of
context.TryCompile/context.TryLink/context.TryRun. The results of
those are cached, for they are only rebuild, if the dependencies have
changed.
"""
def __init__(self, sconf):
"""Constructor. Pass the corresponding SConf instance."""
self.sconf = sconf
self.did_show_result = 0
# for Conftest.py:
self.vardict = {}
self.havedict = {}
self.headerfilename = None
self.config_h = "" # config_h text will be stored here
# we don't regenerate the config.h file after each test. That means,
# that tests won't be able to include the config.h file, and so
# they can't do an #ifdef HAVE_XXX_H. This shouldn't be a major
# issue, though. If it turns out, that we need to include config.h
# in tests, we must ensure, that the dependencies are worked out
# correctly. Note that we can't use Conftest.py's support for config.h,
# cause we will need to specify a builder for the config.h file ...
def Message(self, text):
"""Inform about what we are doing right now, e.g.
'Checking for SOMETHING ... '
"""
self.Display(text)
self.sconf.cached = 1
self.did_show_result = 0
def Result(self, res):
"""Inform about the result of the test. res may be an integer or a
string. In case of an integer, the written text will be 'ok' or
'failed'.
The result is only displayed when self.did_show_result is not set.
"""
if type(res) in BooleanTypes:
if res:
text = "yes"
else:
text = "no"
elif type(res) == types.StringType:
text = res
else:
raise TypeError, "Expected string, int or bool, got " + str(type(res))
if self.did_show_result == 0:
# Didn't show result yet, do it now.
self.Display(text + "\n")
self.did_show_result = 1
def TryBuild(self, *args, **kw):
return apply(self.sconf.TryBuild, args, kw)
def TryAction(self, *args, **kw):
return apply(self.sconf.TryAction, args, kw)
def TryCompile(self, *args, **kw):
return apply(self.sconf.TryCompile, args, kw)
def TryLink(self, *args, **kw):
return apply(self.sconf.TryLink, args, kw)
def TryRun(self, *args, **kw):
return apply(self.sconf.TryRun, args, kw)
def __getattr__( self, attr ):
if( attr == 'env' ):
return self.sconf.env
elif( attr == 'lastTarget' ):
return self.sconf.lastTarget
else:
raise AttributeError, "CheckContext instance has no attribute '%s'" % attr
#### Stuff used by Conftest.py (look there for explanations).
def BuildProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Program, text, ext)
def CompileProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Object, text, ext)
def CompileSharedObject(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $SHCC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.SharedObject, text, ext)
def RunProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
st, out = self.TryRun(text, ext)
return not st, out
def AppendLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Append(LIBS = lib_name_list)
return oldLIBS
def SetLIBS(self, val):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Replace(LIBS = val)
return oldLIBS
def Display(self, msg):
if self.sconf.cached:
# We assume that Display is called twice for each test here
# once for the Checking for ... message and once for the result.
# The self.sconf.cached flag can only be set between those calls
msg = "(cached) " + msg
self.sconf.cached = 0
progress_display(msg, append_newline=0)
self.Log("scons: Configure: " + msg + "\n")
def Log(self, msg):
if self.sconf.logstream != None:
self.sconf.logstream.write(msg)
#### End of stuff used by Conftest.py.
def SConf(*args, **kw):
if kw.get(build_type, True):
kw['_depth'] = kw.get('_depth', 0) + 1
for bt in build_types:
try:
del kw[bt]
except KeyError:
pass
return apply(SConfBase, args, kw)
else:
return SCons.Util.Null()
def CheckFunc(context, function_name, header = None, language = None):
res = SCons.Conftest.CheckFunc(context, function_name, header = header, language = language)
context.did_show_result = 1
return not res
def CheckType(context, type_name, includes = "", language = None):
res = SCons.Conftest.CheckType(context, type_name,
header = includes, language = language)
context.did_show_result = 1
return not res
def CheckTypeSize(context, type_name, includes = "", language = None, expect = None):
res = SCons.Conftest.CheckTypeSize(context, type_name,
header = includes, language = language,
expect = expect)
context.did_show_result = 1
return res
def CheckDeclaration(context, declaration, includes = "", language = None):
res = SCons.Conftest.CheckDeclaration(context, declaration,
includes = includes,
language = language)
context.did_show_result = 1
return not res
def createIncludesFromHeaders(headers, leaveLast, include_quotes = '""'):
# used by CheckHeader and CheckLibWithHeader to produce C - #include
# statements from the specified header (list)
if not SCons.Util.is_List(headers):
headers = [headers]
l = []
if leaveLast:
lastHeader = headers[-1]
headers = headers[:-1]
else:
lastHeader = None
for s in headers:
l.append("#include %s%s%s\n"
% (include_quotes[0], s, include_quotes[1]))
return string.join(l, ''), lastHeader
def CheckHeader(context, header, include_quotes = '<>', language = None):
"""
A test for a C or C++ header file.
"""
prog_prefix, hdr_to_check = \
createIncludesFromHeaders(header, 1, include_quotes)
res = SCons.Conftest.CheckHeader(context, hdr_to_check, prog_prefix,
language = language,
include_quotes = include_quotes)
context.did_show_result = 1
return not res
def CheckCC(context):
res = SCons.Conftest.CheckCC(context)
return not res
def CheckCXX(context):
res = SCons.Conftest.CheckCXX(context)
return not res
def CheckSHCC(context):
res = SCons.Conftest.CheckSHCC(context)
return not res
def CheckSHCXX(context):
res = SCons.Conftest.CheckSHCXX(context)
return not res
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCHeader(context, header, include_quotes = '""'):
"""
A test for a C header file.
"""
return CheckHeader(context, header, include_quotes, language = "C")
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCXXHeader(context, header, include_quotes = '""'):
"""
A test for a C++ header file.
"""
return CheckHeader(context, header, include_quotes, language = "C++")
def CheckLib(context, library = None, symbol = "main",
header = None, language = None, autoadd = 1):
"""
A test for a library. See also CheckLibWithHeader.
Note that library may also be None to test whether the given symbol
compiles without flags.
"""
if library == []:
library = [None]
if not SCons.Util.is_List(library):
library = [library]
# ToDo: accept path for the library
res = SCons.Conftest.CheckLib(context, library, symbol, header = header,
language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# XXX
# Bram: Can only include one header and can't use #ifdef HAVE_HEADER_H.
def CheckLibWithHeader(context, libs, header, language,
call = None, autoadd = 1):
# ToDo: accept path for library. Support system header files.
"""
Another (more sophisticated) test for a library.
Checks, if library and header is available for language (may be 'C'
or 'CXX'). Call maybe be a valid expression _with_ a trailing ';'.
As in CheckLib, we support library=None, to test if the call compiles
without extra link flags.
"""
prog_prefix, dummy = \
createIncludesFromHeaders(header, 0)
if libs == []:
libs = [None]
if not SCons.Util.is_List(libs):
libs = [libs]
res = SCons.Conftest.CheckLib(context, libs, None, prog_prefix,
call = call, language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
| 37.778875 | 96 | 0.595088 |
015ab75d01d8493fdf14a1258350473b1e8a3919 | 751 | py | Python | setup.py | CharliePW/Covid-Clock | af94542952a74906625337729cd80cdbffa0d36b | [
"MIT"
] | null | null | null | setup.py | CharliePW/Covid-Clock | af94542952a74906625337729cd80cdbffa0d36b | [
"MIT"
] | null | null | null | setup.py | CharliePW/Covid-Clock | af94542952a74906625337729cd80cdbffa0d36b | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name = "Covid-Clock-pkg-cep234",
version = "0.0.1",
author = "Charles Pearman-Wright",
author_email = "cep234@exeter.ac.uk",
description = "A webpage that can set announces recent covid news, covid cases, the weather and sets alarms",
long_description = long_description,
long_description_content = "text/markdown",
url = "https://github.com/CharliePW/Covid-Clock",
packages = setuptools.find_packages(),
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independant",
],
python_requires = ">= 3.9",
)
| 32.652174 | 113 | 0.655126 |
7e6f91019689a97ceb88a1e93ad66c6db5c5fba3 | 666 | py | Python | pyqt5_module/pyqt5_lesson1.py | kenwaldek/python | e6aaf5616a456a4fb91889c0617bd6511f1a223e | [
"MIT"
] | 1 | 2019-02-24T09:57:16.000Z | 2019-02-24T09:57:16.000Z | pyqt5_module/pyqt5_lesson1.py | kenwaldek/python | e6aaf5616a456a4fb91889c0617bd6511f1a223e | [
"MIT"
] | null | null | null | pyqt5_module/pyqt5_lesson1.py | kenwaldek/python | e6aaf5616a456a4fb91889c0617bd6511f1a223e | [
"MIT"
] | 4 | 2017-05-21T15:34:53.000Z | 2018-09-25T06:56:15.000Z | #! /usr/bin/env python3
# -*- coding:utf-8 -*-
###############################################################
# kenwaldek GPL-license
# Title: PyQt5 lesson 1 Version: 1.0
# Date: 08-01-17 Language: python3
# Description: pyqt5 simple example of empty window
# pythonprogramming.net from PyQt4 to PyQt5
###############################################################
# do something
import sys
from PyQt5.QtWidgets import QApplication, QWidget
app = QApplication(sys.argv)
window = QWidget()
window.setGeometry(50, 50, 500, 300)
window.setWindowTitle('pyQt Tuts')
window.show()
sys.exit(app.exec_())
| 27.75 | 63 | 0.528529 |
33a1cfdd2bb289ca4ff5f9e0123a5f236449dbce | 3,362 | py | Python | inventory/views.py | CNicox/inventory | 6a85e3155a7215182f892bbc712f49f85db5d8f8 | [
"Unlicense"
] | 1 | 2022-01-11T13:51:35.000Z | 2022-01-11T13:51:35.000Z | inventory/views.py | CNicox/inventory | 6a85e3155a7215182f892bbc712f49f85db5d8f8 | [
"Unlicense"
] | null | null | null | inventory/views.py | CNicox/inventory | 6a85e3155a7215182f892bbc712f49f85db5d8f8 | [
"Unlicense"
] | null | null | null | from django.shortcuts import render, redirect
from django.http import HttpResponse
from .models import *
from django.views.generic import ListView, TemplateView
from django.contrib.auth.backends import BaseBackend
from .forms import *
from django.contrib.auth import login
from django.contrib import messages
from django.contrib.auth.forms import AuthenticationForm # add this
from django.contrib.auth import login, authenticate # add this
# create your views here
# class MyBackend(BaseBackend):
# def authenticate(self, request, username=None, password=None):
class IndexView(ListView):
template_name = 'inventory/index.html'
model = Item
context_object_name = 'items'
paginate_by = 3
# ne rabotaet ne ebu pochemu
# update: rabotaet (ebu pochemu)
class RegistrationView(TemplateView):
template_name = 'registration/create_user.html'
def get(self, request):
form = UserCreationForm()
return render(request, self.template_name, {'form': form})
def post(self, request):
form = UserCreationForm(request.POST)
if form.is_valid():
form.clean_password2()
form.save()
return redirect('/inventory/registration/')
args = {'form': form}
return render(request, self.template_name, args)
class ChangePasswordView(TemplateView):
template_name = '/inventory/change_password.html'
def get(self, request):
form = UserChangeForm()
return render(request, self.template_name, {'form': form})
def post(self, request):
form = UserChangeForm(request.POST)
if form.is_valid():
form.clean_password()
form.save()
return redirect('/inventory/change-password/')
args = {'form': form}
return render(request, self.template_name, args)
def login_request(request):
if request.method == "POST":
form = AuthForm(request, data=request.POST)
if form.is_valid():
email = form.cleaned_data.get('email')
print(email)
password = form.cleaned_data.get('password')
user = authenticate(email=email, password=password)
if user is not None:
login(request, user)
messages.info(request, f"You are now logged in as {email}.")
return redirect("/inventory/index/")
else:
messages.error(request, "Invalid username or password.")
else:
messages.error(request, "Invalid username or password.")
form = AuthForm()
print(f'user {email} is authenticated')
return render(request=request, template_name="registration/login.html", context={"form": form})
# def registration_form_post(request):
# form = UserCreationForm()
# # if form.is_valid():
# # form.save()
# return render(request, 'registration/create_user.html', {'form': form})
'''
if request.method == 'POST':
form = registration_form(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
password1 = form.cleaned_data['password1']
password2 = form.cleaned_data['password2']
print(email, password1, password2)
'''
# this incase generic doesn't work for some reason
# def index(request):
# return render(request, "inventory/index.html")
| 31.420561 | 99 | 0.653778 |
70c9c88a72ac966be243fd35c19d9d6705b9b2c3 | 437 | py | Python | 14.classes/0.basic_classes.py | Tazri/Python | f7ca625800229c8a7e20b64810d6e162ccb6b09f | [
"DOC"
] | null | null | null | 14.classes/0.basic_classes.py | Tazri/Python | f7ca625800229c8a7e20b64810d6e162ccb6b09f | [
"DOC"
] | null | null | null | 14.classes/0.basic_classes.py | Tazri/Python | f7ca625800229c8a7e20b64810d6e162ccb6b09f | [
"DOC"
] | null | null | null | # create simple class which name is Point
from operator import pos
class Point :
x = 40;
y = 43;
# create Point object
position = Point();
print(">>> Position <<<");
print("position.x : ",position.x);
print("position.y : ",position.y);
# changing position property
position.x = 0;
position.y = 0;
print("\n\n>>> After Change Position Properties <<<");
print("position.x : ",position.x);
print("position.y : ",position.y);
| 19 | 54 | 0.652174 |
819b95776ca69d600b2acf57094194988767c5e0 | 11,426 | py | Python | goatools/parsers/ncbi_gene_file_reader.py | cinaljess/goatools | 8cd603df88fcb49da970e37c445cdb1bdd17ee01 | [
"BSD-2-Clause"
] | 1 | 2020-03-12T13:53:11.000Z | 2020-03-12T13:53:11.000Z | goatools/parsers/ncbi_gene_file_reader.py | cinaljess/goatools | 8cd603df88fcb49da970e37c445cdb1bdd17ee01 | [
"BSD-2-Clause"
] | null | null | null | goatools/parsers/ncbi_gene_file_reader.py | cinaljess/goatools | 8cd603df88fcb49da970e37c445cdb1bdd17ee01 | [
"BSD-2-Clause"
] | 1 | 2022-03-17T03:14:32.000Z | 2022-03-17T03:14:32.000Z | """Reads an NCBI Gene tsv file."""
from __future__ import print_function
import sys
import re
from collections import namedtuple
from collections import OrderedDict
__copyright__ = "Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved."
__author__ = "DV Klopfenstein"
#pylint: disable=line-too-long,too-many-instance-attributes,unnecessary-lambda
class NCBIgeneFileReader(object):
"""Reads an NCBI Gene tsv file.
Generate the NCBI gene file by following these steps:
1) Open a browser at: https://www.ncbi.nlm.nih.gov/gene
2) Type search text. Example:
genetype protein coding[Properties] AND "3702"[Taxonomy ID] AND alive[property]
3) Press the "Search" button.
4) From the pull down menu: "Send to" -> File
"""
# ints=None, floats=None, hdr_ex=None, log=sys.stdout):
#def __init__(self, sep, ints, floats, hdr_ex, log):
def __init__(self, fin, sep="\t", **kwargs_dict):
self.log = kwargs_dict.get('log', sys.stdout)
self.int_hdrs = [
'tax_id', 'GeneID', 'CurrentID', # NCBI Gene
'start_position_on_the_genomic_accession', # NCBI Gene
'end_position_on_the_genomic_accession', # NCBI Gene
'exon_count', # NCBI Gene
'OMIM', # NCBI Gene
'Start', 'start', 'End', 'end', # Cluster
'Len', 'len', 'Length', 'length', # cluster
'Qty', 'qty', '# Genes'] # Cluster
if 'ints' in kwargs_dict:
ints = kwargs_dict['ints']
if len(ints) != 0:
self.int_hdrs.extend(ints)
else:
self.int_hdrs = []
self.float_hdrs = ['Density', 'density', 'MinDensity'] # Cluster
# These are formated for expected sorting: eg. Chr "09", "10"
self.strpat_hdrs = {'Chr':'{:>2}', 'chromosome':'{:>2}'}
if 'floats' in kwargs_dict:
self.float_hdrs.extend(kwargs_dict['floats'])
self.idxs_float = [] # run() inits proper values
self.idxs_int = [] # run() inits proper values
self.idxs_strpat = [] # run() inits proper values
# Data Members used by all functions
self.fin = fin
self.hdr2idx = None
self.len = 0
self.sep = self._get_sep(fin, sep)
self.hdr_ex = kwargs_dict.get('hdr_ex', None)
# Data Members used by various functions
self.ret_list = [] # tbl2list
self.hdrs_usr = [] # tbl2sublist tbl2list
self.usr_max_idx = None
# list: Return the one item (a list of items) of interest to the user.
# sublist: Return the items (a list of lists) of interest to the user.
# lists: Return all items (a list of lists) read from the tsv/csv file.
self.fncs = {
'list': lambda fld: self.ret_list.extend([fld[hdr_i[1]] for hdr_i in self.hdrs_usr]),
'sublist': lambda fld: self.ret_list.append([fld[hdr_i[1]] for hdr_i in self.hdrs_usr]),
'lists': lambda fld: self.ret_list.append(fld)
}
def get_h2i(self, hdrs_usr):
"""Read csv/tsv file and return specified data in a list of lists."""
with open(self.fin) as fin_stream:
for line in fin_stream:
line = line.rstrip('\r\n') # chomp
if not self.hdr2idx:
if self.do_hdr(line, hdrs_usr):
return self.hdr2idx
return None
def do_hdr(self, line, hdrs_usr):
"""Initialize self.h2i."""
# If there is no header hint, consider the first line the header.
if self.hdr_ex is None:
self._init_hdr(line, hdrs_usr)
return True
# If there is a header hint, examine each beginning line until header hint is found.
elif self.hdr_ex in line:
self._init_hdr(line, hdrs_usr)
return True
return False
def run(self, fnc_name, hdrs_usr):
"""Read csv/tsv file and return specified data in a list of lists."""
fnc = self.fncs[fnc_name]
with open(self.fin) as fin_stream:
for lnum, line in enumerate(fin_stream):
line = line.rstrip('\r\n') # chomp
# Obtain Data if headers have been collected from the first line
if self.hdr2idx:
self._init_data_line(fnc, lnum, line)
# Obtain the header
else:
self.do_hdr(line, hdrs_usr)
if self.log is not None:
self.log.write(" {:9} data READ: {}\n".format(len(self.ret_list), self.fin))
return self.ret_list, self.hdr2idx
def get_nts(self):
"""Read csv/tsv file and return specified data in a list of lists."""
data = []
nt_obj = None
with open(self.fin) as fin_stream:
for lnum, line in enumerate(fin_stream, 1):
try:
line = line.rstrip('\r\n') # chomp
# Obtain Data if headers have been collected from the first line
if nt_obj is not None:
flds = re.split(self.sep, line)
self.convert_ints_floats(flds)
flds[6] = [s.strip() for s in flds[6].split(',')]
ntdata = nt_obj._make(flds)
data.append(ntdata)
# Obtain the header
else:
nt_obj = self._init_nt_hdr(line)
except RuntimeError:
# Print headers
#if nt_obj is not None:
# sys.stdout.write("{HDRS}\n".format(HDRS='\n'.join(nt_obj._fields)))
flds = re.split(self.sep, line)
print(len(flds), "FIELDS")
print(flds)
#raise Exception("{FIN}({LNUM}): {LINE}\n".format(
# FIN=self.fin, LNUM=lnum, LINE=line))
# JUST SKIP LINES WITH INCOMPLETE DATA, BUT PRINT ERROR MESSAGE
sys.stdout.write("**ERROR: {FIN}({LNUM}): {LINE}\n".format(
FIN=self.fin, LNUM=lnum, LINE=line))
if self.log is not None:
self.log.write(" {:9} lines READ: {}\n".format(len(data), self.fin))
return data
def hdr_xform(self, hdrs):
"""Transform NCBI Gene header fields into valid namedtuple fields."""
xform = []
hdrs = self.replace_nulls(hdrs)
for hdr in hdrs:
hdr = hdr.replace('.', '_')
hdr = hdr.replace(' ', '_')
hdr = hdr.replace('#', 'N')
hdr = hdr.replace('-', '_')
hdr = hdr.replace('"', '')
xform.append(hdr)
return xform
def _init_nt_hdr(self, line):
"""Convert headers into valid namedtuple fields."""
line = line.replace('.', '_')
line = line.replace(' ', '_')
line = line.replace('#', 'N')
line = line.replace('-', '_')
line = line.replace('"', '')
#line = re.sub(r"_$", r"", line)
hdrs = re.split(self.sep, line)
if '' in hdrs:
hdrs = NCBIgeneFileReader.replace_nulls(hdrs)
# Init indexes which will be converted to int or float
self.idxs_int = [idx for idx, hdr in enumerate(hdrs) if hdr in self.int_hdrs]
self.idxs_float = [idx for idx, hdr in enumerate(hdrs) if hdr in self.float_hdrs]
assert hdrs[6] == 'Aliases'
return namedtuple('ntncbi', ' '.join(hdrs))
@staticmethod
def _get_sep(fin, sep):
"""Uses extension(.tsv, .csv) to determine separator."""
if '.tsv' in fin:
return r'\t'
elif '.csv' in fin:
return r','
else:
return sep
@staticmethod
def replace_nulls(hdrs):
"""Replace '' in hdrs."""
ret = []
idx = 0
for hdr in hdrs:
if hdr == '':
ret.append("no_hdr{}".format(idx))
else:
ret.append(hdr)
return ret
def _init_data_line(self, fnc, lnum, line):
"""Process Data line."""
fld = re.split(self.sep, line)
# Lines may contain different numbers of items.
# The line should have all columns requested by the user.
if self.usr_max_idx < len(fld):
self.convert_ints_floats(fld)
fnc(fld)
else:
for fld in enumerate(zip(self.hdr2idx.keys(), fld)):
print(fld)
for hdr in self.hdrs_usr:
print(hdr)
print('# ITEMS ON A LINE:', len(fld))
print('MAX USR IDX:', self.usr_max_idx)
raise Exception("ERROR ON LINE {} IN {}".format(lnum+1, self.fin))
def convert_ints_floats(self, flds):
"""Convert strings to ints and floats, if so specified."""
for idx in self.idxs_float:
flds[idx] = float(flds[idx])
for idx in self.idxs_int:
dig = flds[idx]
#print 'idx={} ({}) {}'.format(idx, flds[idx], flds) # DVK
flds[idx] = int(flds[idx]) if dig.isdigit() else dig
for idx in self.idxs_strpat:
hdr = self.hdr2idx.items()[idx][0]
pat = self.strpat_hdrs[hdr]
flds[idx] = pat.format(flds[idx])
def _init_hdr(self, line, hdrs_usr):
"""Initialize self.hdr2idx, self.len, self.idxs_float, and self.idxs_int"""
self.hdr2idx = OrderedDict([(v.strip(), i) for i, v in enumerate(re.split(self.sep, line))])
self.len = len(self.hdr2idx)
# If user is requesting specific data fields...
if hdrs_usr is not None:
# Loop through the user headers
for usr_hdr in hdrs_usr:
# If the user header is contained in the file....
if usr_hdr in self.hdr2idx:
# Add the user header and the field index to a list
self.hdrs_usr.append([usr_hdr, self.hdr2idx[usr_hdr]])
else:
raise Exception("NO COLUMN({}) FOUND:\n HDR={}\n".format(
hdrs_usr, '\n HDR='.join(self.hdr2idx.keys())))
usr_hdrs = [E[0] for E in self.hdrs_usr] if self.hdrs_usr else self.hdr2idx
self._init_idxs_float(usr_hdrs)
self._init_idxs_int(usr_hdrs)
self._init_idxs_strpat(usr_hdrs)
self.usr_max_idx = max(E[1] for E in self.hdrs_usr) if self.hdrs_usr else len(self.hdr2idx)-1
def _init_idxs_float(self, usr_hdrs):
"""List of indexes whose values will be floats."""
self.idxs_float = [
Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in self.float_hdrs]
def _init_idxs_int(self, usr_hdrs):
"""List of indexes whose values will be ints."""
self.idxs_int = [
Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in self.int_hdrs]
def _init_idxs_strpat(self, usr_hdrs):
"""List of indexes whose values will be strings."""
strpat = self.strpat_hdrs.keys()
self.idxs_strpat = [
Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in strpat]
# Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved.
| 42.794007 | 101 | 0.549799 |
994269d36cd8540b0a38dbad0d82863e95d90236 | 41,944 | py | Python | pandas/tools/tests/test_pivot.py | betoesquivel/PyData29-DataAnalyticsWithAWSLambda | 318d1f595e4079544159a0f4802277dc5b25cb47 | [
"MIT"
] | 4 | 2016-12-06T20:22:28.000Z | 2018-05-04T09:51:45.000Z | pandas/tools/tests/test_pivot.py | betoesquivel/PyData29-DataAnalyticsWithAWSLambda | 318d1f595e4079544159a0f4802277dc5b25cb47 | [
"MIT"
] | null | null | null | pandas/tools/tests/test_pivot.py | betoesquivel/PyData29-DataAnalyticsWithAWSLambda | 318d1f595e4079544159a0f4802277dc5b25cb47 | [
"MIT"
] | 1 | 2021-11-05T22:17:01.000Z | 2021-11-05T22:17:01.000Z | from datetime import datetime, date, timedelta
import numpy as np
from numpy.testing import assert_equal
import pandas as pd
from pandas import DataFrame, Series, Index, MultiIndex, Grouper
from pandas.tools.merge import concat
from pandas.tools.pivot import pivot_table, crosstab
from pandas.compat import range, u, product
import pandas.util.testing as tm
class TestPivotTable(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_pivot_table(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, values='D',
index=index, columns=columns)
table2 = self.data.pivot_table(
values='D', index=index, columns=columns)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values='D', index=index)
if len(index) > 1:
self.assertEqual(table.index.names, tuple(index))
else:
self.assertEqual(table.index.name, index[0])
if len(columns) > 1:
self.assertEqual(table.columns.names, columns)
else:
self.assertEqual(table.columns.name, columns[0])
expected = self.data.groupby(
index + [columns])['D'].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_nocols(self):
df = DataFrame({'rows': ['a', 'b', 'c'],
'cols': ['x', 'y', 'z'],
'values': [1, 2, 3]})
rs = df.pivot_table(columns='cols', aggfunc=np.sum)
xp = df.pivot_table(index='cols', aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})
xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},
'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},
'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},
'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},
'quantity': {0: 2000000, 1: 500000,
2: 1000000, 3: 1000000}})
pv_col = df.pivot_table('quantity', 'month', [
'customer', 'product'], dropna=False)
pv_ind = df.pivot_table(
'quantity', ['customer', 'product'], 'month', dropna=False)
m = MultiIndex.from_tuples([(u('A'), u('a')),
(u('A'), u('b')),
(u('A'), u('c')),
(u('A'), u('d')),
(u('B'), u('a')),
(u('B'), u('b')),
(u('B'), u('c')),
(u('B'), u('d')),
(u('C'), u('a')),
(u('C'), u('b')),
(u('C'), u('c')),
(u('C'), u('d'))])
assert_equal(pv_col.columns.values, m.values)
assert_equal(pv_ind.index.values, m.values)
def test_pass_array(self):
result = self.data.pivot_table(
'D', index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table('D', index='A', columns='C')
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table('D', index=lambda x: x // 5,
columns=self.data.C)
expected = self.data.pivot_table('D', index=self.data.index // 5,
columns='C')
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ['A', 'B']
columns = 'C'
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})
self.assertEqual(f.dtypes['v'], 'int64')
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.sum)
result = z.get_dtype_counts()
expected = Series(dict(int64=2))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [
1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})
self.assertEqual(f.dtypes['v'], 'float64')
z = pivot_table(f, values='v', index=['a'], columns=[
'i'], fill_value=0, aggfunc=np.mean)
result = z.get_dtype_counts()
expected = Series(dict(float64=2))
tm.assert_series_equal(result, expected)
def test_pivot_multi_values(self):
result = pivot_table(self.data, values=['D', 'E'],
index='A', columns=['B', 'C'], fill_value=0)
expected = pivot_table(self.data.drop(['F'], axis=1),
index='A', columns=['B', 'C'], fill_value=0)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(self.data, values=['D', 'E'],
index=['A', 'B'], columns='C',
aggfunc=func, margins=True)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=['mean', 'std'], axis=1)
tm.assert_frame_equal(result, expected)
def test_pivot_index_with_nan(self):
# GH 3588
nan = np.nan
df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],
'b': ['C1', 'C2', 'C3', 'C4'],
'c': [10, 15, 17, 20]})
result = df.pivot('a', 'b', 'c')
expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],
[nan, 15, nan, nan], [nan, nan, nan, 20]],
index=Index([nan, 'R1', 'R2', 'R4'], name='a'),
columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)
# GH9491
df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),
'c': 100 + np.arange(6)})
df['b'] = df['a'] - pd.Timestamp('2014-02-02')
df.loc[1, 'a'] = df.loc[3, 'a'] = nan
df.loc[1, 'b'] = df.loc[4, 'b'] = nan
pv = df.pivot('a', 'b', 'c')
self.assertEqual(pv.notnull().values.sum(), len(df))
for _, row in df.iterrows():
self.assertEqual(pv.loc[row['a'], row['b']], row['c'])
tm.assert_frame_equal(df.pivot('b', 'a', 'c'), pv.T)
def test_pivot_with_tz(self):
# GH 5878
df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0)],
'dt2': [datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0)],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'] * 2,
name='dt2', tz='Asia/Tokyo')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=exp_col)
pv = df.pivot(index='dt1', columns='dt2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.DatetimeIndex(['2013/01/01 09:00',
'2013/01/02 09:00'],
name='dt1',
tz='US/Pacific'),
columns=pd.DatetimeIndex(['2014/01/01 09:00',
'2014/01/02 09:00'],
name='dt2',
tz='Asia/Tokyo'))
pv = df.pivot(index='dt1', columns='dt2', values='data1')
tm.assert_frame_equal(pv, expected)
def test_pivot_periods(self):
df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D'),
pd.Period('2013-01-01', 'D'),
pd.Period('2013-01-02', 'D')],
'p2': [pd.Period('2013-01', 'M'),
pd.Period('2013-01', 'M'),
pd.Period('2013-02', 'M'),
pd.Period('2013-02', 'M')],
'data1': np.arange(4, dtype='int64'),
'data2': np.arange(4, dtype='int64')})
exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])
exp_col2 = pd.PeriodIndex(['2013-01', '2013-02'] * 2,
name='p2', freq='M')
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=exp_col)
pv = df.pivot(index='p1', columns='p2')
tm.assert_frame_equal(pv, expected)
expected = DataFrame([[0, 2], [1, 3]],
index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],
name='p1', freq='D'),
columns=pd.PeriodIndex(['2013-01', '2013-02'],
name='p2', freq='M'))
pv = df.pivot(index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
columns=['C'],
margins_col='All'):
col_margins = result.ix[:-1, margins_col]
expected_col_margins = self.data.groupby(index)[values_col].mean()
tm.assert_series_equal(col_margins, expected_col_margins,
check_names=False)
self.assertEqual(col_margins.name, margins_col)
result = result.sortlevel()
index_margins = result.ix[(margins_col, '')].iloc[:-1]
expected_ix_margins = self.data.groupby(columns)[values_col].mean()
tm.assert_series_equal(index_margins, expected_ix_margins,
check_names=False)
self.assertEqual(index_margins.name, (margins_col, ''))
grand_total_margins = result.loc[(margins_col, ''), margins_col]
expected_total_margins = self.data[values_col].mean()
self.assertEqual(grand_total_margins, expected_total_margins)
# column specified
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean)
_check_output(result, 'D')
# Set a different margins_name (not 'All')
result = self.data.pivot_table(values='D', index=['A', 'B'],
columns='C',
margins=True, aggfunc=np.mean,
margins_name='Totals')
_check_output(result, 'D', margins_col='Totals')
# no column specified
table = self.data.pivot_table(index=['A', 'B'], columns='C',
margins=True, aggfunc=np.mean)
for value_col in table.columns.levels[0]:
_check_output(table[value_col], value_col)
# no col
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc=np.mean)
for value_col in table.columns:
totals = table.loc[('All', ''), value_col]
self.assertEqual(totals, self.data[value_col].mean())
# no rows
rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,
aggfunc=np.mean)
tm.assertIsInstance(rtable, Series)
table = self.data.pivot_table(index=['AA', 'BB'], margins=True,
aggfunc='mean')
for item in ['DD', 'EE', 'FF']:
totals = table.loc[('All', ''), item]
self.assertEqual(totals, self.data[item].mean())
# issue number #8349: pivot_table with margins and dictionary aggfunc
data = [
{'JOB': 'Worker', 'NAME': 'Bob', 'YEAR': 2013,
'MONTH': 12, 'DAYS': 3, 'SALARY': 17},
{'JOB': 'Employ', 'NAME':
'Mary', 'YEAR': 2013, 'MONTH': 12, 'DAYS': 5, 'SALARY': 23},
{'JOB': 'Worker', 'NAME': 'Bob', 'YEAR': 2014,
'MONTH': 1, 'DAYS': 10, 'SALARY': 100},
{'JOB': 'Worker', 'NAME': 'Bob', 'YEAR': 2014,
'MONTH': 1, 'DAYS': 11, 'SALARY': 110},
{'JOB': 'Employ', 'NAME': 'Mary', 'YEAR': 2014,
'MONTH': 1, 'DAYS': 15, 'SALARY': 200},
{'JOB': 'Worker', 'NAME': 'Bob', 'YEAR': 2014,
'MONTH': 2, 'DAYS': 8, 'SALARY': 80},
{'JOB': 'Employ', 'NAME': 'Mary', 'YEAR': 2014,
'MONTH': 2, 'DAYS': 5, 'SALARY': 190},
]
df = DataFrame(data)
df = df.set_index(['JOB', 'NAME', 'YEAR', 'MONTH'], drop=False,
append=False)
result = df.pivot_table(index=['JOB', 'NAME'],
columns=['YEAR', 'MONTH'],
values=['DAYS', 'SALARY'],
aggfunc={'DAYS': 'mean', 'SALARY': 'sum'},
margins=True)
expected = df.pivot_table(index=['JOB', 'NAME'],
columns=['YEAR', 'MONTH'], values=['DAYS'],
aggfunc='mean', margins=True)
tm.assert_frame_equal(result['DAYS'], expected['DAYS'])
expected = df.pivot_table(index=['JOB', 'NAME'],
columns=['YEAR', 'MONTH'], values=['SALARY'],
aggfunc='sum', margins=True)
tm.assert_frame_equal(result['SALARY'], expected['SALARY'])
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
d = date.min
data = list(product(['foo', 'bar'], ['A', 'B', 'C'], ['x1', 'x2'],
[d + timedelta(i)
for i in range(20)], [1.0]))
df = DataFrame(data)
table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])
df2 = df.rename(columns=str)
table2 = df2.pivot_table(
values='4', index=['0', '1', '3'], columns=['2'])
tm.assert_frame_equal(table, table2, check_names=False)
def test_pivot_no_level_overlap(self):
# GH #1181
data = DataFrame({'a': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'] * 2,
'b': [0, 0, 0, 0, 1, 1, 1, 1] * 2,
'c': (['foo'] * 4 + ['bar'] * 4) * 2,
'value': np.random.randn(16)})
table = data.pivot_table('value', index='a', columns=['b', 'c'])
grouped = data.groupby(['a', 'b', 'c'])['value'].mean()
expected = grouped.unstack('b').unstack('c').dropna(axis=1, how='all')
tm.assert_frame_equal(table, expected)
def test_pivot_columns_lexsorted(self):
n = 10000
dtype = np.dtype([
("Index", object),
("Symbol", object),
("Year", int),
("Month", int),
("Day", int),
("Quantity", int),
("Price", float),
])
products = np.array([
('SP500', 'ADBE'),
('SP500', 'NVDA'),
('SP500', 'ORCL'),
('NDQ100', 'AAPL'),
('NDQ100', 'MSFT'),
('NDQ100', 'GOOG'),
('FTSE', 'DGE.L'),
('FTSE', 'TSCO.L'),
('FTSE', 'GSK.L'),
], dtype=[('Index', object), ('Symbol', object)])
items = np.empty(n, dtype=dtype)
iproduct = np.random.randint(0, len(products), n)
items['Index'] = products['Index'][iproduct]
items['Symbol'] = products['Symbol'][iproduct]
dr = pd.date_range(date(2000, 1, 1),
date(2010, 12, 31))
dates = dr[np.random.randint(0, len(dr), n)]
items['Year'] = dates.year
items['Month'] = dates.month
items['Day'] = dates.day
items['Price'] = np.random.lognormal(4.0, 2.0, n)
df = DataFrame(items)
pivoted = df.pivot_table('Price', index=['Month', 'Day'],
columns=['Index', 'Symbol', 'Year'],
aggfunc='mean')
self.assertTrue(pivoted.columns.is_monotonic)
def test_pivot_complex_aggfunc(self):
f = {'D': ['std'], 'E': ['sum']}
expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')
result = self.data.pivot_table(index='A', columns='B', aggfunc=f)
tm.assert_frame_equal(result, expected)
def test_margins_no_values_no_cols(self):
# Regression test on pivot table: no values or cols passed.
result = self.data[['A', 'B']].pivot_table(
index=['A', 'B'], aggfunc=len, margins=True)
result_list = result.tolist()
self.assertEqual(sum(result_list[:-1]), result_list[-1])
def test_margins_no_values_two_rows(self):
# Regression test on pivot table: no values passed but rows are a
# multi-index
result = self.data[['A', 'B', 'C']].pivot_table(
index=['A', 'B'], columns='C', aggfunc=len, margins=True)
self.assertEqual(result.All.tolist(), [3.0, 1.0, 4.0, 3.0, 11.0])
def test_margins_no_values_one_row_one_col(self):
# Regression test on pivot table: no values passed but row and col
# defined
result = self.data[['A', 'B']].pivot_table(
index='A', columns='B', aggfunc=len, margins=True)
self.assertEqual(result.All.tolist(), [4.0, 7.0, 11.0])
def test_margins_no_values_two_row_two_cols(self):
# Regression test on pivot table: no values passed but rows and cols
# are multi-indexed
self.data['D'] = ['a', 'b', 'c', 'd',
'e', 'f', 'g', 'h', 'i', 'j', 'k']
result = self.data[['A', 'B', 'C', 'D']].pivot_table(
index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True)
self.assertEqual(result.All.tolist(), [3.0, 1.0, 4.0, 3.0, 11.0])
def test_pivot_table_with_margins_set_margin_name(self):
# GH 3335
for margin_name in ['foo', 'one', 666, None, ['a', 'b']]:
with self.assertRaises(ValueError):
# multi-index index
pivot_table(self.data, values='D', index=['A', 'B'],
columns=['C'], margins=True,
margins_name=margin_name)
with self.assertRaises(ValueError):
# multi-index column
pivot_table(self.data, values='D', index=['C'],
columns=['A', 'B'], margins=True,
margins_name=margin_name)
with self.assertRaises(ValueError):
# non-multi-index index/column
pivot_table(self.data, values='D', index=['A'],
columns=['B'], margins=True,
margins_name=margin_name)
def test_pivot_timegrouper(self):
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 1, 1),
datetime(2013, 1, 1),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 12, 2),
datetime(2013, 12, 2), ]}).set_index('Date')
expected = DataFrame(np.array([10, 18, 3], dtype='int64')
.reshape(1, 3),
index=[datetime(2013, 12, 31)],
columns='Carl Joe Mark'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='A'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='A'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
expected = DataFrame(np.array([1, np.nan, 3, 9, 18, np.nan])
.reshape(2, 3),
index=[datetime(2013, 1, 1),
datetime(2013, 7, 1)],
columns='Carl Joe Mark'.split())
expected.index.name = 'Date'
expected.columns.name = 'Buyer'
result = pivot_table(df, index=Grouper(freq='6MS'), columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
# passing the name
df = df.reset_index()
result = pivot_table(df, index=Grouper(freq='6MS', key='Date'),
columns='Buyer',
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
self.assertRaises(KeyError, lambda: pivot_table(
df, index=Grouper(freq='6MS', key='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
self.assertRaises(KeyError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', key='foo'),
values='Quantity', aggfunc=np.sum))
# passing the level
df = df.set_index('Date')
result = pivot_table(df, index=Grouper(freq='6MS', level='Date'),
columns='Buyer', values='Quantity',
aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index='Buyer',
columns=Grouper(freq='6MS', level='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
self.assertRaises(ValueError, lambda: pivot_table(
df, index=Grouper(freq='6MS', level='foo'),
columns='Buyer', values='Quantity', aggfunc=np.sum))
self.assertRaises(ValueError, lambda: pivot_table(
df, index='Buyer',
columns=Grouper(freq='6MS', level='foo'),
values='Quantity', aggfunc=np.sum))
# double grouper
df = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [datetime(2013, 11, 1, 13, 0), datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 11, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 10, 2, 12, 0),
datetime(2013, 12, 5, 14, 0)],
'PayDay': [datetime(2013, 10, 4, 0, 0),
datetime(2013, 10, 15, 13, 5),
datetime(2013, 9, 5, 20, 0),
datetime(2013, 11, 2, 10, 0),
datetime(2013, 10, 7, 20, 0),
datetime(2013, 9, 5, 10, 0),
datetime(2013, 12, 30, 12, 0),
datetime(2013, 11, 20, 14, 0), ]})
result = pivot_table(df, index=Grouper(freq='M', key='Date'),
columns=Grouper(freq='M', key='PayDay'),
values='Quantity', aggfunc=np.sum)
expected = DataFrame(np.array([np.nan, 3, np.nan, np.nan,
6, np.nan, 1, 9,
np.nan, 9, np.nan, np.nan, np.nan,
np.nan, 3, np.nan]).reshape(4, 4),
index=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)],
columns=[datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31)])
expected.index.name = 'Date'
expected.columns.name = 'PayDay'
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=Grouper(freq='M', key='PayDay'),
columns=Grouper(freq='M', key='Date'),
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
tuples = [(datetime(2013, 9, 30), datetime(2013, 10, 31)),
(datetime(2013, 10, 31),
datetime(2013, 9, 30)),
(datetime(2013, 10, 31),
datetime(2013, 11, 30)),
(datetime(2013, 10, 31),
datetime(2013, 12, 31)),
(datetime(2013, 11, 30),
datetime(2013, 10, 31)),
(datetime(2013, 12, 31), datetime(2013, 11, 30)), ]
idx = MultiIndex.from_tuples(tuples, names=['Date', 'PayDay'])
expected = DataFrame(np.array([3, np.nan, 6, np.nan, 1, np.nan,
9, np.nan, 9, np.nan,
np.nan, 3]).reshape(6, 2),
index=idx, columns=['A', 'B'])
expected.columns.name = 'Branch'
result = pivot_table(
df, index=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')], columns=['Branch'],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=['Branch'],
columns=[Grouper(freq='M', key='Date'),
Grouper(freq='M', key='PayDay')],
values='Quantity', aggfunc=np.sum)
tm.assert_frame_equal(result, expected.T)
def test_pivot_datetime_tz(self):
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))
exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'],
tz='US/Pacific', name='dt1')
exp_col1 = Index(['value1', 'value1'])
exp_col2 = Index(['a', 'b'], name='label')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 3], [1, 4], [2, 5]],
index=exp_idx, columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=[
'label'], values=['value1'])
tm.assert_frame_equal(result, expected)
exp_col1 = Index(['sum', 'sum', 'sum', 'sum',
'mean', 'mean', 'mean', 'mean'])
exp_col2 = Index(['value1', 'value1', 'value2', 'value2'] * 2)
exp_col3 = pd.DatetimeIndex(['2013-01-01 15:00:00',
'2013-02-01 15:00:00'] * 4,
tz='Asia/Tokyo', name='dt2')
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2, exp_col3])
expected = DataFrame(np.array([[0, 3, 1, 2, 0, 3, 1, 2],
[1, 4, 2, 1, 1, 4, 2, 1],
[2, 5, 1, 2, 2, 5, 1, 2]],
dtype='int64'),
index=exp_idx,
columns=exp_col)
result = pivot_table(df, index=['dt1'], columns=['dt2'],
values=['value1', 'value2'],
aggfunc=[np.sum, np.mean])
tm.assert_frame_equal(result, expected)
def test_pivot_dtaccessor(self):
# GH 8103
dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00']
dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',
'2013-01-01 15:00:00',
'2013-02-01 15:00:00', '2013-02-01 15:00:00',
'2013-02-01 15:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'dt1': dates1, 'dt2': dates2,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d))
df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d))
result = pivot_table(df, index='label', columns=df['dt1'].dt.hour,
values='value1')
exp_idx = Index(['a', 'b'], name='label')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=exp_idx,
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.month,
columns=df['dt1'].dt.hour,
values='value1')
expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=Index([1, 2], name='dt2'),
columns=Index([7, 8, 9], name='dt1'))
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=df['dt2'].dt.year.values,
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
exp_col = MultiIndex.from_arrays(
[[7, 7, 8, 8, 9, 9], [1, 2] * 3], names=['dt1', 'dt2'])
expected = DataFrame(np.array([[0, 3, 1, 4, 2, 5]], dtype='int64'),
index=[2013], columns=exp_col)
tm.assert_frame_equal(result, expected)
result = pivot_table(df, index=np.array(['X', 'X', 'X',
'X', 'Y', 'Y']),
columns=[df['dt1'].dt.hour, df['dt2'].dt.month],
values='value1')
expected = DataFrame(np.array([[0, 3, 1, np.nan, 2, np.nan],
[np.nan, np.nan, np.nan,
4, np.nan, 5]]),
index=['X', 'Y'], columns=exp_col)
tm.assert_frame_equal(result, expected)
def test_pivot_table_with_iterator_values(self):
# GH 12017
aggs = {'D': 'sum', 'E': 'mean'}
pivot_values_list = pd.pivot_table(
self.data, index=['A'], values=list(aggs.keys()), aggfunc=aggs,
)
pivot_values_keys = pd.pivot_table(
self.data, index=['A'], values=aggs.keys(), aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_keys, pivot_values_list)
agg_values_gen = (value for value in aggs.keys())
pivot_values_gen = pd.pivot_table(
self.data, index=['A'], values=agg_values_gen, aggfunc=aggs,
)
tm.assert_frame_equal(pivot_values_gen, pivot_values_list)
class TestCrosstab(tm.TestCase):
def setUp(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
self.df = df.append(df, ignore_index=True)
def test_crosstab_single(self):
df = self.df
result = crosstab(df['A'], df['C'])
expected = df.groupby(['A', 'C']).size().unstack()
tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64))
def test_crosstab_multiple(self):
df = self.df
result = crosstab(df['A'], [df['B'], df['C']])
expected = df.groupby(['A', 'B', 'C']).size()
expected = expected.unstack(
'B').unstack('C').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
result = crosstab([df['B'], df['C']], df['A'])
expected = df.groupby(['B', 'C', 'A']).size()
expected = expected.unstack('A').fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_crosstab_ndarray(self):
a = np.random.randint(0, 5, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 10, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'))
expected = crosstab(df['a'], [df['b'], df['c']])
tm.assert_frame_equal(result, expected)
result = crosstab([b, c], a, colnames=['a'], rownames=('b', 'c'))
expected = crosstab([df['b'], df['c']], df['a'])
tm.assert_frame_equal(result, expected)
# assign arbitrary names
result = crosstab(self.df['A'].values, self.df['C'].values)
self.assertEqual(result.index.name, 'row_0')
self.assertEqual(result.columns.name, 'col_0')
def test_crosstab_margins(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({'a': a, 'b': b, 'c': c})
result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),
margins=True)
self.assertEqual(result.index.names, ('a',))
self.assertEqual(result.columns.names, ['b', 'c'])
all_cols = result['All', '']
exp_cols = df.groupby(['a']).size().astype('i8')
exp_cols = exp_cols.append(Series([len(df)], index=['All']))
exp_cols.name = ('All', '')
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.ix['All']
exp_rows = df.groupby(['b', 'c']).size().astype('i8')
exp_rows = exp_rows.append(Series([len(df)], index=[('All', '')]))
exp_rows.name = 'All'
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
def test_crosstab_pass_values(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
values = np.random.randn(100)
table = crosstab([a, b], c, values, aggfunc=np.sum,
rownames=['foo', 'bar'], colnames=['baz'])
df = DataFrame({'foo': a, 'bar': b, 'baz': c, 'values': values})
expected = df.pivot_table('values', index=['foo', 'bar'],
columns='baz', aggfunc=np.sum)
tm.assert_frame_equal(table, expected)
def test_crosstab_dropna(self):
# GH 3820
a = np.array(['foo', 'foo', 'foo', 'bar',
'bar', 'foo', 'foo'], dtype=object)
b = np.array(['one', 'one', 'two', 'one',
'two', 'two', 'two'], dtype=object)
c = np.array(['dull', 'dull', 'dull', 'dull',
'dull', 'shiny', 'shiny'], dtype=object)
res = crosstab(a, [b, c], rownames=['a'],
colnames=['b', 'c'], dropna=False)
m = MultiIndex.from_tuples([('one', 'dull'), ('one', 'shiny'),
('two', 'dull'), ('two', 'shiny')])
assert_equal(res.columns.values, m.values)
def test_categorical_margins(self):
# GH 10989
df = pd.DataFrame({'x': np.arange(8),
'y': np.arange(8) // 4,
'z': np.arange(8) % 2})
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, 'All'], name='y')
expected.columns = Index([0, 1, 'All'], name='z')
data = df.copy()
table = data.pivot_table('x', 'y', 'z', margins=True)
tm.assert_frame_equal(table, expected)
data = df.copy()
data.y = data.y.astype('category')
data.z = data.z.astype('category')
table = data.pivot_table('x', 'y', 'z', margins=True)
tm.assert_frame_equal(table, expected)
def test_crosstab_no_overlap(self):
# GS 10291
s1 = pd.Series([1, 2, 3], index=[1, 2, 3])
s2 = pd.Series([4, 5, 6], index=[4, 5, 6])
actual = crosstab(s1, s2)
expected = pd.DataFrame()
tm.assert_frame_equal(actual, expected)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| 44.479321 | 79 | 0.461091 |
556b0c9ed1615e1ad8a5f773d4d2054b90c2ddc5 | 582 | py | Python | strategies/strategy.py | math2001/pacman | 01c998bed3ca278e8dded30122e569a213a7b45a | [
"MIT"
] | null | null | null | strategies/strategy.py | math2001/pacman | 01c998bed3ca278e8dded30122e569a213a7b45a | [
"MIT"
] | null | null | null | strategies/strategy.py | math2001/pacman | 01c998bed3ca278e8dded30122e569a213a7b45a | [
"MIT"
] | null | null | null | from scene import Scene
class Strategy(Scene):
"""A strategy is just like a scene
It just isn't *activated* as a scene. But in essense, they do the same
thing: update, render (for debug) and handle events. Hence why they have
such similar methods.
"""
def __init__(self, tiles, pacman, ghosts):
self.tiles = tiles
self.pacman = pacman
self.ghosts = ghosts
def render(self, surface, rect, rfc):
pass
def update(self, ufc):
pass
def handle_event(self, e):
pass
def done(self):
pass | 22.384615 | 76 | 0.613402 |
501cfd68d25c8e9f148a4433514f6fcf3ac80d49 | 944 | py | Python | tests/test_tag_inheritance.py | phonkee/django-wrapper-tag | 61059e33c8c77ebfa2fd73db730777173d100fef | [
"MIT"
] | 1 | 2016-10-03T19:56:18.000Z | 2016-10-03T19:56:18.000Z | tests/test_tag_inheritance.py | phonkee/django-wrapper-tag | 61059e33c8c77ebfa2fd73db730777173d100fef | [
"MIT"
] | 7 | 2020-02-11T21:54:38.000Z | 2022-02-10T07:36:07.000Z | tests/test_tag_inheritance.py | phonkee/django-wrapper-tag | 61059e33c8c77ebfa2fd73db730777173d100fef | [
"MIT"
] | 1 | 2019-02-25T09:47:00.000Z | 2019-02-25T09:47:00.000Z | """
test_django-wrapper-tag
------------
Tests for `django-wrapper-tag` inheritance of tags module.
"""
from django.test import TestCase
from wrapper_tag import arguments
from wrapper_tag import tag
class TestInheritanceTag(TestCase):
def test_something(self):
class FirstTag(object):
title11 = arguments.Keyword()
title12 = arguments.Keyword()
class SecondTag(FirstTag):
title21 = arguments.Keyword()
title22 = arguments.Keyword()
class ThirdTag(SecondTag, tag.Tag):
title31 = arguments.Keyword()
title32 = arguments.Keyword()
class FourthTag(ThirdTag):
title41 = arguments.Keyword()
title42 = arguments.Keyword()
@classmethod
def contribute_to_class(cls, name):
pass
self.assertEqual(len(FourthTag.arguments), 8, 'inheritance is probably not working')
| 25.513514 | 92 | 0.623941 |
84eb076aca21d9dab584791252be9b6bb4559627 | 8,184 | py | Python | uq_benchmark_2019/news/data_utils_from_hendrycks.py | yick2232/google-research | 99021ebda945e232abdcc592f2cea1375b3c84f7 | [
"Apache-2.0"
] | 11 | 2020-01-29T07:25:04.000Z | 2022-03-05T16:01:21.000Z | uq_benchmark_2019/news/data_utils_from_hendrycks.py | RubensZimbres/google-research | 562c7c6ef959cb3cb382b1b660ccc45e8f5289c4 | [
"Apache-2.0"
] | 13 | 2020-01-28T22:19:53.000Z | 2022-02-10T00:39:26.000Z | uq_benchmark_2019/news/data_utils_from_hendrycks.py | RubensZimbres/google-research | 562c7c6ef959cb3cb382b1b660ccc45e8f5289c4 | [
"Apache-2.0"
] | 2 | 2019-12-07T19:01:03.000Z | 2020-03-19T16:53:04.000Z | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for preprocessing text.
Functions are copied and modified based on
https://raw.githubusercontent.com/hendrycks/error-detection/master/NLP/Categorization/20%20Newsgroups.ipynb
by Dan Hendrycks
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import numpy as np
import tensorflow as tf
# disable pylint for keeping the original code from Hendrycks.
# pylint: disable=bare-except
# pylint: disable=invalid-name
# pylint: disable=g-explicit-length-test
# pylint: disable=invalid-unary-operand-type
# pylint: disable=dangerous-default-value
def load_data(filename, stop_words=[]):
"""Load the raw dataset."""
# Differently from Hendrycks's code, we don't throw away stop words,
x, y = [], []
with tf.gfile.Open(filename, 'r') as f:
for line in f:
line = re.sub(r'\W+', ' ', line).strip()
if line[1] == ' ':
x.append(line[1:])
y.append(line[0])
else:
x.append(line[2:])
y.append(line[:2])
x[-1] = ' '.join(word for word in x[-1].split() if word not in stop_words)
return x, np.array(y, dtype=int)
def get_vocab(dataset):
"""Count words and build vocab."""
vocab = {}
# create a counter for each word
for example in dataset:
example_as_list = example.split()
for word in example_as_list:
vocab[word] = 0
for example in dataset:
example_as_list = example.split()
for word in example_as_list:
vocab[word] += 1
# sort from greatest to least by count
return collections.OrderedDict(
sorted(vocab.items(), key=lambda x: x[1], reverse=True))
def text_to_rank(dataset, _vocab, desired_vocab_size=15000):
"""Encode words to ids.
Args:
dataset: the text from load_data
_vocab: a _ordered_ dictionary of vocab words and counts from get_vocab
desired_vocab_size: the desired vocabulary size. words no longer in vocab
become unk
Returns:
the text corpus with words mapped to their vocab rank,
with all sufficiently infrequent words mapped to unk;
unk has rank desired_vocab_size.
(the infrequent word cutoff is determined by desired_vocab size)
"""
# pylint: disable=invalid-name
_dataset = dataset[:] # aliasing safeguard
vocab_ordered = list(_vocab)
count_cutoff = _vocab[vocab_ordered[
desired_vocab_size - 1]] # get word by its rank and map to its count
word_to_rank = {}
for i in range(len(vocab_ordered)):
# we add one to make room for any future padding symbol with value 0
word_to_rank[vocab_ordered[i]] = i + 1
for i in range(len(_dataset)):
example = _dataset[i]
example_as_list = example.split()
for j in range(len(example_as_list)):
try:
if _vocab[example_as_list[j]] >= count_cutoff and word_to_rank[
example_as_list[j]] < desired_vocab_size:
# we need to ensure that other words below the word
# on the edge of our desired_vocab size
# are not also on the count cutoff
example_as_list[j] = word_to_rank[example_as_list[j]]
else:
example_as_list[j] = desired_vocab_size # UUUNNNKKK
# pylint: disable=bare-except
except:
example_as_list[j] = desired_vocab_size # UUUNNNKKK
_dataset[i] = example_as_list
return _dataset
def pad_sequences(sequences,
maxlen=None,
dtype='int32',
padding='pre',
truncating='pre',
value=0.):
"""Pads each sequence to the same length.
If maxlen is provided, any sequence longer
than maxlen is truncated to maxlen.
Truncation happens off either the beginning (default) or
the end of the sequence.
Supports post-padding and pre-padding (default).
Args:
sequences: list of lists where each element is a sequence
maxlen: int, maximum length
dtype: type to cast the resulting sequence.
padding: 'pre' or 'post', pad either before or after each sequence.
truncating: 'pre' or 'post', remove values from sequences larger than maxlen
either in the beginning or in the end of the sequence
value: float, value to pad the sequences to the desired value.
Returns:
numpy array with dimensions (number_of_sequences, maxlen).
"""
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
# pylint: disable=g-explicit-length-test
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
# pylint: disable=g-explicit-length-test
if len(s) == 0:
continue # empty list was found
if truncating == 'pre':
# pylint: disable=invalid-unary-operand-type
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError(
'Shape of sample %s of sequence at position %s is different from expected shape %s'
% (trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x
def partion_data_in_two(dataset, dataset_labels, in_sample_labels, oos_labels):
"""Partition dataset into in-distribution and OODs by labels.
Args:
dataset: the text from text_to_rank
dataset_labels: dataset labels
in_sample_labels: a list of newsgroups which the network will/did train on
oos_labels: the complement of in_sample_labels; these newsgroups the network
has never seen
Returns:
the dataset partitioned into in_sample_examples, in_sample_labels,
oos_examples, and oos_labels in that order
"""
_dataset = dataset[:] # aliasing safeguard
_dataset_labels = dataset_labels
in_sample_idxs = np.zeros(np.shape(_dataset_labels), dtype=bool)
ones_vec = np.ones(np.shape(_dataset_labels), dtype=int)
for label in in_sample_labels:
in_sample_idxs = np.logical_or(in_sample_idxs,
_dataset_labels == label * ones_vec)
oos_sample_idxs = np.zeros(np.shape(_dataset_labels), dtype=bool)
for label in oos_labels:
oos_sample_idxs = np.logical_or(oos_sample_idxs,
_dataset_labels == label * ones_vec)
return _dataset[in_sample_idxs], _dataset_labels[in_sample_idxs], _dataset[
oos_sample_idxs], _dataset_labels[oos_sample_idxs]
# our network trains only on a subset of classes, say 6,
# but class number 7 might still
# be an in-sample label: we need to squish the labels to be in {0,...,5}
def relabel_in_sample_labels(labels):
"""Relabel in-distribution labels from 1,3,5 to 0,1,2 for training."""
labels_as_list = labels.tolist()
set_of_labels = []
for label in labels_as_list:
set_of_labels.append(label)
labels_ordered = sorted(list(set(set_of_labels)))
relabeled = np.zeros(labels.shape, dtype=int)
for i in range(len(labels_as_list)):
relabeled[i] = labels_ordered.index(labels_as_list[i])
return relabeled
| 33.540984 | 107 | 0.690982 |
d5c6102251eb6a1130e03d5b62e5b58fe34d8741 | 1,872 | py | Python | src/predict_video.py | irfanmustafas/TeethClassifierCNN | 8c58b50162b3f9eb7f12251cbca9fcbd4d6c43b7 | [
"MIT"
] | 1 | 2018-12-05T01:49:54.000Z | 2018-12-05T01:49:54.000Z | src/predict_video.py | irfanmustafas/TeethClassifierCNN | 8c58b50162b3f9eb7f12251cbca9fcbd4d6c43b7 | [
"MIT"
] | null | null | null | src/predict_video.py | irfanmustafas/TeethClassifierCNN | 8c58b50162b3f9eb7f12251cbca9fcbd4d6c43b7 | [
"MIT"
] | null | null | null | import numpy as np
import sys
import caffe
import glob
import uuid
import cv2
from util import transform_img
from mouth_detector_dlib import mouth_detector
from caffe.proto import caffe_pb2
import os
import shutil
from util import histogram_equalization
import math
from teeth_cnn import teeth_cnn
cv2.namedWindow("preview")
vc = cv2.VideoCapture(0)
vc.set(3,200)
vc.set(4,200)
#vc.set(5,100)
if vc.isOpened(): # try to get the first frame
rval, frame = vc.read()
else:
rval = False
mouth_detector_instance = mouth_detector()
teeth_cnn_instance = teeth_cnn()
size = cv2.getTextSize("Showing teeth", cv2.FONT_HERSHEY_PLAIN, 2, 1)[0]
x,y = (50,250)
while rval:
rval, frame = vc.read()
copy_frame = frame.copy()
result,prob,xf,yf,wf,hf = teeth_cnn_instance.predict(copy_frame,mouth_detector_instance)
print prob
if result is not None:
if(result == 1):
cv2.rectangle(frame, (xf,yf),(wf,hf),(0,255,0),4,0)
prob_round = prob[0][1]*100
print prob_round
cv2.rectangle(frame, (xf-2,yf-25),(wf+2,yf),(0,255,0),-1,0)
cv2.rectangle(frame, (xf-2,hf),(xf+((wf-xf)/2),hf+25),(0,255,0),-1,0)
cv2.putText(frame, "Teeth!!",(xf,hf+14),cv2.FONT_HERSHEY_PLAIN,1.2,0,2)
cv2.putText(frame, str(prob_round)+"%",(xf,yf-10),cv2.FONT_HERSHEY_PLAIN,1.2,0,2)
#out.write(frame)
print "SHOWING TEETH!!!"
elif(result==0):
cv2.rectangle(frame, (xf,yf),(wf,hf),(64,64,64),4,0)
prob_round = prob[0][1]*100
print prob_round
cv2.rectangle(frame, (xf-2,yf-25),(wf+2,yf),(64,64,64),-1,0)
cv2.rectangle(frame, (xf-2,hf),(xf+((wf-xf)/2),hf+25),(64,64,64),-1,0)
cv2.putText(frame, "Teeth??",(xf,hf+14),cv2.FONT_HERSHEY_PLAIN,1.2,0,2)
cv2.putText(frame, str(prob_round)+"%",(xf,yf-10),cv2.FONT_HERSHEY_PLAIN,1.2,0,2)
cv2.imshow("preview", frame)
cv2.waitKey(1)
cv2.destroyWindow("preview") | 31.728814 | 89 | 0.676282 |
12c88281619be7d6995b19fa1aa88957efe8eeb7 | 4,588 | py | Python | contrib/testgen/gen_base58_test_vectors.py | ROZ-MOFUMOFU-ME/susucoin | af7616171f814313f8bdea43a3186ffaec9770f8 | [
"MIT"
] | 15 | 2018-06-12T07:48:13.000Z | 2020-01-17T10:10:56.000Z | contrib/testgen/gen_base58_test_vectors.py | ROZ-MOFUMOFU-ME/susucoin | af7616171f814313f8bdea43a3186ffaec9770f8 | [
"MIT"
] | 12 | 2018-06-11T16:13:59.000Z | 2018-12-19T15:31:12.000Z | contrib/testgen/gen_base58_test_vectors.py | ROZ-MOFUMOFU-ME/susucoin | af7616171f814313f8bdea43a3186ffaec9770f8 | [
"MIT"
] | 16 | 2018-06-26T21:56:40.000Z | 2020-12-21T15:59:02.000Z | #!/usr/bin/env python3
# Copyright (c) 2012-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Generate valid and invalid base58 address and private key test vectors.
Usage:
gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json
gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode_chk, b58decode_chk, b58chars
import random
from binascii import b2a_hex
# key types
PUBKEY_ADDRESS = 63
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 114
SCRIPT_ADDRESS_TEST = 192
PRIVKEY = 128
PRIVKEY_TEST = 231
metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)),
((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)),
((PRIVKEY,), 32, (), (True, False, None, False)),
((PRIVKEY,), 32, (1,), (True, False, None, True)),
((PRIVKEY_TEST,), 32, (), (True, True, None, False)),
((PRIVKEY_TEST,), 32, (1,), (True, True, None, True))
]
def is_valid(v):
'''Check vector v for validity'''
result = b58decode_chk(v)
if result is None:
return False
for template in templates:
prefix = bytearray(template[0])
suffix = bytearray(template[2])
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return False
def gen_valid_vectors():
'''Generate valid test vectors'''
while True:
for template in templates:
prefix = bytearray(template[0])
payload = bytearray(os.urandom(template[1]))
suffix = bytearray(template[2])
rv = b58encode_chk(prefix + payload + suffix)
assert is_valid(rv)
metadata = {x: y for x, y in zip(metadata_keys,template[3]) if y is not None}
hexrepr = b2a_hex(payload)
if isinstance(hexrepr, bytes):
hexrepr = hexrepr.decode('utf8')
yield (rv, hexrepr, metadata)
def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix):
'''Generate possibly invalid vector'''
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = bytearray(template[0])
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = bytearray(template[2])
return b58encode_chk(prefix + payload + suffix)
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
while True:
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
for template in templates:
val = gen_invalid_vector(template, randbool(0.2), randbool(0.2), randbool(0.2))
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
if not is_valid(val):
yield val,
if __name__ == '__main__':
import sys
import json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
| 34.496241 | 91 | 0.613775 |
5d6d32960d714d1de9f90a675ef231f91d0cfb79 | 443 | py | Python | catkin_ws/devel/lib/python2.7/dist-packages/carla_msgs/msg/__init__.py | udeto/carla_autoware_final | 479e53f916be1cffa2524eb854bc3d1c47471bc1 | [
"MIT"
] | 2 | 2021-02-03T07:22:12.000Z | 2021-06-24T15:10:52.000Z | catkin_ws/devel/lib/python2.7/dist-packages/carla_msgs/msg/__init__.py | udeto/carla_autoware_final | 479e53f916be1cffa2524eb854bc3d1c47471bc1 | [
"MIT"
] | null | null | null | catkin_ws/devel/lib/python2.7/dist-packages/carla_msgs/msg/__init__.py | udeto/carla_autoware_final | 479e53f916be1cffa2524eb854bc3d1c47471bc1 | [
"MIT"
] | 2 | 2020-08-24T09:16:31.000Z | 2020-12-08T06:18:23.000Z | from ._CarlaActorInfo import *
from ._CarlaActorList import *
from ._CarlaCollisionEvent import *
from ._CarlaControl import *
from ._CarlaEgoVehicleControl import *
from ._CarlaEgoVehicleInfo import *
from ._CarlaEgoVehicleInfoWheel import *
from ._CarlaEgoVehicleStatus import *
from ._CarlaLaneInvasionEvent import *
from ._CarlaMapInfo import *
from ._CarlaStatus import *
from ._CarlaWalkerControl import *
from ._CarlaWorldInfo import *
| 31.642857 | 40 | 0.823928 |
ab24042197de9bf802d95aafe8d8b0ab4bc00138 | 60 | py | Python | Baisc_Calculator/Square.py | cy275/Statistics_Calculator | c98dec271df98465a87180a170a786bcf817800c | [
"MIT"
] | null | null | null | Baisc_Calculator/Square.py | cy275/Statistics_Calculator | c98dec271df98465a87180a170a786bcf817800c | [
"MIT"
] | null | null | null | Baisc_Calculator/Square.py | cy275/Statistics_Calculator | c98dec271df98465a87180a170a786bcf817800c | [
"MIT"
] | null | null | null | def square(a):
a = float(a)
b = a ** 2
return b
| 12 | 16 | 0.45 |
10792c775ae1f307f700a0520e067d2049c12046 | 2,811 | py | Python | model-optimizer/extensions/middle/TensorIteratorOutput_test.py | JOCh1958/openvino | 070201feeec5550b7cf8ec5a0ffd72dc879750be | [
"Apache-2.0"
] | 1 | 2021-04-06T03:32:12.000Z | 2021-04-06T03:32:12.000Z | model-optimizer/extensions/middle/TensorIteratorOutput_test.py | JOCh1958/openvino | 070201feeec5550b7cf8ec5a0ffd72dc879750be | [
"Apache-2.0"
] | 28 | 2021-09-24T09:29:02.000Z | 2022-03-28T13:20:46.000Z | model-optimizer/extensions/middle/TensorIteratorOutput_test.py | JOCh1958/openvino | 070201feeec5550b7cf8ec5a0ffd72dc879750be | [
"Apache-2.0"
] | 1 | 2020-08-30T11:48:03.000Z | 2020-08-30T11:48:03.000Z | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from extensions.middle.TensorIteratorOutput import SmartOutputMatcher
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph_with_attrs
class SmartOutputMatcherTests(unittest.TestCase):
def test(self):
pattern_matcher = SmartOutputMatcher()
pattern = pattern_matcher.pattern()
graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'],
# update_edge_attrs=None,
new_nodes_with_attrs=[('index', {'kind': 'data'}),
('value', {'kind': 'data'}),
('ta_size', {'kind': 'data'}),
],
new_edges_with_attrs=[('index', 'TensorArrayWrite', {'in':1}),
('value', 'TensorArrayWrite', {'in': 2}),
('ta_size', 'TensorArray')
],
update_nodes_attributes=[('WriteEnter_data', {'value': np.array([1, 1])}),
('start_data', {'value': np.array([0])}),
('delta_data', {'value': np.array([1])}),
])
pattern_matcher.find_and_replace_pattern(graph)
graph_ref = build_graph_with_attrs(
nodes_with_attrs=[
('TensorIteratorOutput', {'kind': 'op', 'op': 'TensorIteratorOutput'}),
('TensorArrayGather_data', {'kind': 'data'}),
('index', {'kind': 'data'}),
('value', {'kind': 'data'}),
('ta_size', {'kind': 'data'}), ],
edges_with_attrs=[('ta_size', 'TensorIteratorOutput', {'in': 0}),
('index', 'TensorIteratorOutput', {'in': 2}),
('value', 'TensorIteratorOutput', {'in': 1}),
('TensorIteratorOutput', 'TensorArrayGather_data')],
update_edge_attrs=None,
new_nodes_with_attrs=[],
new_edges_with_attrs=[],
)
(flag, resp) = compare_graphs(graph, graph_ref, 'TensorArrayGather_data', check_op_attrs=True)
self.assertTrue(flag, resp) | 54.057692 | 113 | 0.44148 |
a0823d14952a5496bef78662882645d6421894ad | 4,242 | py | Python | authorized_keys/management/commands/expireusers.py | tzwenn/moalinna | 5723b773325452162a719870f27ea0db1ebf0b5a | [
"MIT"
] | 2 | 2021-01-09T09:56:40.000Z | 2021-01-25T14:34:58.000Z | authorized_keys/management/commands/expireusers.py | tzwenn/moalinna | 5723b773325452162a719870f27ea0db1ebf0b5a | [
"MIT"
] | 6 | 2021-01-09T10:31:06.000Z | 2021-02-03T09:51:35.000Z | authorized_keys/management/commands/expireusers.py | tzwenn/moalinna | 5723b773325452162a719870f27ea0db1ebf0b5a | [
"MIT"
] | 1 | 2021-01-11T05:54:57.000Z | 2021-01-11T05:54:57.000Z | from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from abc import ABCMeta, abstractmethod
import datetime
import inspect
import logging
import ldap3
logger = logging.getLogger(__name__)
class UserDirectory(object, metaclass=ABCMeta):
"""Abstract base class for a user lookup backend"""
@abstractmethod
def all_users(self):
"""Yields tuples (uid, is_expired, date)"""
pass
@abstractmethod
def is_expired(self, uid):
"""Is the User with this uid expired or non-existent?"""
pass
def __iter__(self):
return self.all_users()
class LDAP(UserDirectory):
"""Check if users are expired via LDAP. Highly customized, definitely requires adjustment"""
# On my particular LDAP system multiple values are used for non-expiering accounts
# You may want to extend this list
_eternal_dates = [
None,
datetime.datetime(1601, 1, 1, 0, 0, tzinfo=datetime.timezone.utc),
datetime.datetime(9999, 12, 31, 23, 59, 59, 999999, tzinfo=datetime.timezone.utc),
]
def __init__(self):
super().__init__()
self.server = ldap3.Server(settings.LDAP_SERVER, use_ssl=settings.LDAP_USE_SSL)
self.conn = ldap3.Connection(self.server, user=settings.LDAP_USER, password=settings.LDAP_PASSWORD,
authentication=settings.LDAP_AUTH_METHOD, client_strategy='SYNC',
auto_referrals=True, check_names=True)
if settings.LDAP_USE_TLS:
self.conn.start_tls()
if not self.conn.bind():
raise Exception("Could not bind LDAP connection: {}".format(self.conn.result))
def _is_date_expired(self, expirationDate):
return expirationDate not in self._eternal_dates and expirationDate < datetime.datetime.now(datetime.timezone.utc)
def all_users(self):
paged_entries = self.conn.extend.standard.paged_search(
settings.LDAP_SEARCH_BASE, '(uid=*)',
attributes=['uid', 'accountExpires'])
for entry_dict in paged_entries:
attributes = entry_dict['attributes']
# uid is a list. Here, we cover None and empty lists
if not attributes.get('uid'):
continue
date = attributes.get('accountExpires')
yield attributes['uid'][0], self._is_date_expired(date), date
def is_expired(self, uid):
search_pattern = '(uid={})'.format(ldap3.utils.conv.escape_filter_chars(uid))
if not self.conn.search(settings.LDAP_SEARCH_BASE, search_pattern, attributes=['accountExpires']):
return True
entry = self.conn.entries[0]
return 'accountExpires' in entry and self._is_date_expired(entry.accountExpires.value)
class Command(BaseCommand):
help = 'Marks expired users accounts as inactive'
requires_migrations_checks = True
def deactivate_user(self, user, reason):
logger.warning('Deactivate user {} ({})'.format(user.username, reason))
if not self.dryrun:
user.is_active = False
user.save()
def fetch_and_expire(self, directory):
# For reducing traffic to directory and DB, we so a local join
global_users = {uid: (is_expired, date) for uid, is_expired, date in directory}
if not global_users:
logger.warning("Empty directory fetched. Not expiring anyone.")
return
logger.info("Matching agains {} users in directory".format(len(global_users)))
# Superusers cannot expire. Staff can, if option set
local_users = User.objects.filter(is_active=True,is_superuser=False)
if not self.expire_staff:
local_users = local_users.filter(is_staff=False)
for user in local_users:
if user.username not in global_users:
self.deactivate_user(user, 'Not found in global directory')
elif global_users[user.username][0]:
self.deactivate_user(user, 'Expired in global directory on {}'.format(global_users[user.username][1]))
def add_arguments(self, parser):
parser.add_argument('-d', '--dryrun', action='store_true',
help='show which users are expired, but do not apply change')
parser.add_argument('--expire-staff', action='store_true',
help='allow to deactivate users with staff status in django')
def handle(self, *args, **options):
self.dryrun = options["dryrun"]
self.expire_staff = options["expire_staff"]
self.fetch_and_expire(LDAP())
| 35.057851 | 116 | 0.724422 |
5cb62278c581f3ea53bffb022e92a7cb04ab9413 | 30,957 | bzl | Python | packages/typescript/index.bzl | jakebiesinger-storyhealth/rules_nodejs | 8df86ccb799e4f9f3c4b26174f09b58a89ef3639 | [
"Apache-2.0"
] | null | null | null | packages/typescript/index.bzl | jakebiesinger-storyhealth/rules_nodejs | 8df86ccb799e4f9f3c4b26174f09b58a89ef3639 | [
"Apache-2.0"
] | null | null | null | packages/typescript/index.bzl | jakebiesinger-storyhealth/rules_nodejs | 8df86ccb799e4f9f3c4b26174f09b58a89ef3639 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Public API surface is re-exported here.
Users should not load files under "/internal"
"""
load("@build_bazel_rules_nodejs//internal/node:node.bzl", "nodejs_binary")
load("//packages/typescript/internal:ts_config.bzl", "write_tsconfig", _ts_config = "ts_config")
load("//packages/typescript/internal:ts_project.bzl", _ts_project = "ts_project")
load("//packages/typescript/internal:tslib.bzl", _lib = "lib")
load("//packages/typescript/internal:validate_options.bzl", "validate_options")
load("@build_bazel_rules_nodejs//:index.bzl", "js_library")
load("@bazel_skylib//lib:partial.bzl", "partial")
load("@bazel_skylib//rules:build_test.bzl", "build_test")
# If adding rules here also add to index.docs.bzl
ts_config = _ts_config
# Copied from aspect_bazel_lib
# https://github.com/aspect-build/bazel-lib/blob/main/lib/private/utils.bzl#L73-L82
# TODO(6.0): depend on that library and remove this copy
def _to_label(param):
"""Converts a string to a Label. If Label is supplied, the same label is returned.
Args:
param: a string representing a label or a Label
Returns:
a Label
"""
param_type = type(param)
if param_type == "string":
if param.startswith("@"):
return Label(param)
if param.startswith("//"):
return Label("@" + param)
# resolve the relative label from the current package
# if 'param' is in another workspace, then this would return the label relative to that workspace, eg:
# `Label("@my//foo:bar").relative("@other//baz:bill") == Label("@other//baz:bill")`
if param.startswith(":"):
param = param[1:]
if native.package_name():
return Label("@//" + native.package_name()).relative(param)
else:
return Label("@//:" + param)
elif param_type == "Label":
return param
else:
fail("Expected 'string' or 'Label' but got '%s'" % param_type)
# copied from aspect_bazel_lib, see comment above
def _is_external_label(param):
"""Returns True if the given Label (or stringy version of a label) represents a target outside of the workspace
Args:
param: a string or label
Returns:
a bool
"""
return len(_to_label(param).workspace_root) > 0
_DEFAULT_TYPESCRIPT_PACKAGE = (
# BEGIN-INTERNAL
"@npm" +
# END-INTERNAL
"//typescript"
)
def ts_project(
name = "tsconfig",
tsconfig = None,
srcs = None,
args = [],
deps = [],
extends = None,
allow_js = False,
declaration = False,
source_map = False,
declaration_map = False,
resolve_json_module = None,
preserve_jsx = False,
composite = False,
incremental = False,
emit_declaration_only = False,
transpiler = None,
ts_build_info_file = None,
tsc = None,
typescript_package = _DEFAULT_TYPESCRIPT_PACKAGE,
typescript_require_path = "typescript",
validate = True,
supports_workers = False,
declaration_dir = None,
out_dir = None,
root_dir = None,
link_workspace_root = False,
**kwargs):
"""Compiles one TypeScript project using `tsc --project`
This is a drop-in replacement for the `tsc` rule automatically generated for the "typescript"
package, typically loaded from `@npm//typescript:index.bzl`. Unlike bare `tsc`, this rule understands
the Bazel interop mechanism (Providers) so that this rule works with others that produce or consume
TypeScript typings (`.d.ts` files).
Unlike `ts_library`, this rule is the thinnest possible layer of Bazel interoperability on top
of the TypeScript compiler. It shifts the burden of configuring TypeScript into the tsconfig.json file.
See https://github.com/bazelbuild/rules_nodejs/blob/master/docs/TypeScript.md#alternatives
for more details about the trade-offs between the two rules.
Some TypeScript options affect which files are emitted, and Bazel wants to know these ahead-of-time.
So several options from the tsconfig file must be mirrored as attributes to ts_project.
See https://www.typescriptlang.org/v2/en/tsconfig for a listing of the TypeScript options.
Any code that works with `tsc` should work with `ts_project` with a few caveats:
- `ts_project` always produces some output files, or else Bazel would never run it.
Therefore you shouldn't use it with TypeScript's `noEmit` option.
See `tsc_test` under the Alternatives section above.
- Bazel requires that the `outDir` (and `declarationDir`) be set to
`bazel-out/[target architecture]/bin/path/to/package`
so we override whatever settings appear in your tsconfig.
- Bazel expects that each output is produced by a single rule.
Thus if you have two `ts_project` rules with overlapping sources (the same `.ts` file
appears in more than one) then you get an error about conflicting `.js` output
files if you try to build both together.
Worse, if you build them separately then the output directory will contain whichever
one you happened to build most recently. This is highly discouraged.
As a thin wrapper, this rule doesn't try to compensate for behavior of the TypeScript compiler.
See https://github.com/bazelbuild/rules_nodejs/wiki/Debugging-problems-with-ts_project for notes
that may help you debug issues.
> Note: in order for TypeScript to resolve relative references to the bazel-out folder,
> we recommend that the base tsconfig contain a rootDirs section that includes all
> possible locations they may appear.
>
> We hope this will not be needed in some future release of TypeScript.
> Follow https://github.com/microsoft/TypeScript/issues/37378 for more info.
>
> For example, if the base tsconfig file relative to the workspace root is
> `path/to/tsconfig.json` then you should configure like:
>
> ```
> "compilerOptions": {
> "rootDirs": [
> ".",
> "../../bazel-out/host/bin/path/to",
> "../../bazel-out/darwin-fastbuild/bin/path/to",
> "../../bazel-out/darwin_arm64-fastbuild/bin/path/to",
> "../../bazel-out/k8-fastbuild/bin/path/to",
> "../../bazel-out/x64_windows-fastbuild/bin/path/to",
> "../../bazel-out/darwin-dbg/bin/path/to",
> "../../bazel-out/k8-dbg/bin/path/to",
> "../../bazel-out/x64_windows-dbg/bin/path/to",
> ]
> }
> ```
>
> See some related discussion including both "rootDirs" and "paths" for a monorepo setup
> using custom import paths:
> https://github.com/bazelbuild/rules_nodejs/issues/2298
### Issues when running non-sandboxed
When using a non-sandboxed spawn strategy (which is the default on Windows), you may
observe these problems which require workarounds:
1) Bazel deletes outputs from the previous execution before running `tsc`.
This causes a problem with TypeScript's incremental mode: if the `.tsbuildinfo` file
is not known to be an output of the rule, then Bazel will leave it in the output
directory, and when `tsc` runs, it may see that the outputs written by the prior
invocation are up-to-date and skip the emit of these files. This will cause Bazel
to intermittently fail with an error that some outputs were not written.
This is why we depend on `composite` and/or `incremental` attributes to be provided,
so we can tell Bazel to expect a `.tsbuildinfo` output to ensure it is deleted before a
subsequent compilation.
At present, we don't do anything useful with the `.tsbuildinfo` output, and this rule
does not actually have incremental behavior. Deleting the file is actually
counter-productive in terms of TypeScript compile performance.
Follow https://github.com/bazelbuild/rules_nodejs/issues/1726
2) When using Project References, TypeScript will expect to verify that the outputs of referenced
projects are up-to-date with respect to their inputs.
(This is true even without using the `--build` option).
When using a non-sandboxed spawn strategy, `tsc` can read the sources from other `ts_project`
rules in your project, and will expect that the `tsconfig.json` file for those references will
indicate where the outputs were written. However the `outDir` is determined by this Bazel rule so
it cannot be known from reading the `tsconfig.json` file.
This problem is manifested as a TypeScript diagnostic like
`error TS6305: Output file '/path/to/execroot/a.d.ts' has not been built from source file '/path/to/execroot/a.ts'.`
As a workaround, you can give the Windows "fastbuild" output directory as the `outDir` in your tsconfig file.
On other platforms, the value isn't read so it does no harm.
See https://github.com/bazelbuild/rules_nodejs/tree/stable/packages/typescript/test/ts_project as an example.
We hope this will be fixed in a future release of TypeScript;
follow https://github.com/microsoft/TypeScript/issues/37378
3) When TypeScript encounters an import statement, it adds the source file resolved by that reference
to the program. However you may have included that source file in a different project, so this causes
the problem mentioned above where a source file is in multiple programs.
(Note, if you use Project References this is not the case, TS will know the referenced
file is part of the other program.)
This will result in duplicate emit for the same file, which produces an error
since the files written to the output tree are read-only.
Workarounds include using using Project References, or simply grouping the whole compilation
into one program (if this doesn't exceed your time budget).
Args:
name: A name for the target.
We recommend you use the basename (no `.json` extension) of the tsconfig file that should be compiled.
srcs: List of labels of TypeScript source files to be provided to the compiler.
If absent, the default is set as follows:
- Include `**/*.ts[x]` (all TypeScript files in the package).
- If `allow_js` is set, include `**/*.js[x]` (all JavaScript files in the package).
- If `resolve_json_module` is set, include `**/*.json` (all JSON files in the package), but exclude `**/package.json`, `**/package-lock.json`, and `**/tsconfig*.json`.
deps: List of labels of other rules that produce TypeScript typings (.d.ts files)
tsconfig: Label of the tsconfig.json file to use for the compilation
To support "chaining" of more than one extended config, this label could be a target that
provides `TsConfigInfo` such as `ts_config`.
By default, we assume the tsconfig file is "tsconfig.json" in the same folder as the ts_project rule.
EXPERIMENTAL: generated tsconfig
Instead of a label, you can pass a dictionary of tsconfig keys.
In this case, a tsconfig.json file will be generated for this compilation, in the following way:
- all top-level keys will be copied by converting the dict to json.
So `tsconfig = {"compilerOptions": {"declaration": True}}`
will result in a generated `tsconfig.json` with `{"compilerOptions": {"declaration": true}}`
- each file in srcs will be converted to a relative path in the `files` section.
- the `extends` attribute will be converted to a relative path
Note that you can mix and match attributes and compilerOptions properties, so these are equivalent:
```
ts_project(
tsconfig = {
"compilerOptions": {
"declaration": True,
},
},
)
```
and
```
ts_project(
declaration = True,
)
```
extends: Label of the tsconfig file referenced in the `extends` section of tsconfig
To support "chaining" of more than one extended config, this label could be a target that
provdes `TsConfigInfo` such as `ts_config`.
args: List of strings of additional command-line arguments to pass to tsc.
transpiler: A custom transpiler tool to run that produces the JavaScript outputs instead of `tsc`.
This attribute accepts a rule or macro with this signature:
`name, srcs, js_outs, map_outs, **kwargs`
where the `**kwargs` attribute propagates the tags, visibility, and testonly attributes from `ts_project`.
If you need to pass additional attributes to the transpiler rule, you can use a
[partial](https://github.com/bazelbuild/bazel-skylib/blob/main/lib/partial.bzl)
to bind those arguments at the "make site", then pass that partial to this attribute where it
will be called with the remaining arguments.
See the packages/typescript/test/ts_project/swc directory for an example.
When a custom transpiler is used, then the `ts_project` macro expands to these targets:
- `[name]` - the default target is a `js_library` which can be included in the `deps` of downstream rules.
Note that it will successfully build *even if there are typecheck failures* because the `tsc` binary
is not needed to produce the default outputs.
This is considered a feature, as it allows you to have a faster development mode where type-checking
is not on the critical path.
- `[name]_typecheck` - provides typings (`.d.ts` files) as the default output,
therefore building this target always causes the typechecker to run.
- `[name]_typecheck_test` - a
[`build_test`](https://github.com/bazelbuild/bazel-skylib/blob/main/rules/build_test.bzl)
target which simply depends on the `[name]_typecheck` target.
This ensures that typechecking will be run under `bazel test` with
[`--build_tests_only`](https://docs.bazel.build/versions/main/user-manual.html#flag--build_tests_only).
- `[name]_typings` - internal target which runs the binary from the `tsc` attribute
- Any additional target(s) the custom transpiler rule/macro produces.
Some rules produce one target per TypeScript input file.
By default, `ts_project` expects `.js` outputs to be written in the same action
that does the type-checking to produce `.d.ts` outputs.
This is the simplest configuration, however `tsc` is slower than alternatives.
It also means developers must wait for the type-checking in the developer loop.
In theory, Persistent Workers (via the `supports_workers` attribute) remedies the
slow compilation time, however it adds additional complexity because the worker process
can only see one set of dependencies, and so it cannot be shared between different
`ts_project` rules. That attribute is documented as experimental, and may never graduate
to a better support contract.
tsc: Label of the TypeScript compiler binary to run.
For example, `tsc = "@my_deps//typescript/bin:tsc"`
Or you can pass a custom compiler binary instead.
One possible compiler is the Angular compiler, provided by the
`@angular/compiler-cli` package as the `ngc` binary, which can be set typically with
`tsc = "@npm//@angular/compiler-cli/bin:ngc"`
Note that you'll also need to pass `.html` and `.css` files to the `srcs` of the `ts_project`
so that they're declared as inputs for the Angular compiler to read them.
An example can be found in the rules_nodejs repo under `packages/typescript/test/ts_project/ngc`.
> To use the `ngc` program from Angular versions prior to 11, you'll need a fix for
> https://github.com/angular/angular/issues/36290
> To apply the fix, you can use the patch-package package to apply this patch:
> https://gist.github.com/alexeagle/ba44b2601bd7c953d29c6e8ec44d1ef9
typescript_package: Label of the package containing all data deps of tsc.
For example, `typescript_package = "@my_deps//typescript"`
typescript_require_path: Module name which resolves to typescript_package when required
For example, `typescript_require_path = "typescript"`
validate: boolean; whether to check that the tsconfig JSON settings match the attributes on this target.
Set this to `False` to skip running our validator, in case you have a legitimate reason for these to differ,
e.g. you have a setting enabled just for the editor but you want different behavior when Bazel runs `tsc`.
supports_workers: Experimental! Use only with caution.
Allows you to enable the Bazel Persistent Workers strategy for this project.
See https://docs.bazel.build/versions/main/persistent-workers.html
This requires that the tsc binary support a `--watch` option.
NOTE: this does not work on Windows yet.
We will silently fallback to non-worker mode on Windows regardless of the value of this attribute.
Follow https://github.com/bazelbuild/rules_nodejs/issues/2277 for progress on this feature.
root_dir: a string specifying a subdirectory under the input package which should be consider the
root directory of all the input files.
Equivalent to the TypeScript --rootDir option.
By default it is '.', meaning the source directory where the BUILD file lives.
out_dir: a string specifying a subdirectory under the bazel-out folder where outputs are written.
Equivalent to the TypeScript --outDir option.
Note that Bazel always requires outputs be written under a subdirectory matching the input package,
so if your rule appears in path/to/my/package/BUILD.bazel and out_dir = "foo" then the .js files
will appear in bazel-out/[arch]/bin/path/to/my/package/foo/*.js.
By default the out_dir is '.', meaning the packages folder in bazel-out.
allow_js: boolean; Specifies whether TypeScript will read .js and .jsx files. When used with declaration,
TypeScript will generate .d.ts files from .js files.
resolve_json_module: None | boolean; Specifies whether TypeScript will read .json files. Defaults to None.
If set to True or False and tsconfig is a dict, resolveJsonModule is set in the generated config file.
If set to None and tsconfig is a dict, resolveJsonModule is unset in the generated config and typescript
default or extended tsconfig value will be load bearing.
declaration_dir: a string specifying a subdirectory under the bazel-out folder where generated declaration
outputs are written. Equivalent to the TypeScript --declarationDir option.
By default declarations are written to the out_dir.
declaration: if the `declaration` bit is set in the tsconfig.
Instructs Bazel to expect a `.d.ts` output for each `.ts` source.
source_map: if the `sourceMap` bit is set in the tsconfig.
Instructs Bazel to expect a `.js.map` output for each `.ts` source.
declaration_map: if the `declarationMap` bit is set in the tsconfig.
Instructs Bazel to expect a `.d.ts.map` output for each `.ts` source.
preserve_jsx: if the `jsx` value is set to "preserve" in the tsconfig.
Instructs Bazel to expect a `.jsx` or `.jsx.map` output for each `.tsx` source.
composite: if the `composite` bit is set in the tsconfig.
Instructs Bazel to expect a `.tsbuildinfo` output and a `.d.ts` output for each `.ts` source.
incremental: if the `incremental` bit is set in the tsconfig.
Instructs Bazel to expect a `.tsbuildinfo` output.
emit_declaration_only: if the `emitDeclarationOnly` bit is set in the tsconfig.
Instructs Bazel *not* to expect `.js` or `.js.map` outputs for `.ts` sources.
ts_build_info_file: the user-specified value of `tsBuildInfoFile` from the tsconfig.
Helps Bazel to predict the path where the .tsbuildinfo output is written.
link_workspace_root: Link the workspace root to the bin_dir to support absolute requires like 'my_wksp/path/to/file'.
If source files need to be required then they can be copied to the bin_dir with copy_to_bin.
**kwargs: passed through to underlying rule, allows eg. visibility, tags
"""
if srcs == None:
include = ["**/*.ts", "**/*.tsx"]
exclude = []
if allow_js == True:
include.extend(["**/*.js", "**/*.jsx"])
if resolve_json_module == True:
include.append("**/*.json")
exclude.extend(["**/package.json", "**/package-lock.json", "**/tsconfig*.json"])
srcs = native.glob(include, exclude)
tsc_deps = deps
if type(extends) == type([]):
fail("As of rules_nodejs 3.0, extends should have a single value, not a list.\n" +
"Use a ts_config rule to group together a chain of extended tsconfigs.")
common_kwargs = {
"tags": kwargs.get("tags", []),
"visibility": kwargs.get("visibility", None),
"testonly": kwargs.get("testonly", None),
}
if type(tsconfig) == type(dict()):
# Copy attributes <-> tsconfig properties
# TODO: fail if compilerOptions includes a conflict with an attribute?
compiler_options = tsconfig.setdefault("compilerOptions", {})
source_map = compiler_options.setdefault("sourceMap", source_map)
declaration = compiler_options.setdefault("declaration", declaration)
declaration_map = compiler_options.setdefault("declarationMap", declaration_map)
emit_declaration_only = compiler_options.setdefault("emitDeclarationOnly", emit_declaration_only)
allow_js = compiler_options.setdefault("allowJs", allow_js)
if resolve_json_module != None:
resolve_json_module = compiler_options.setdefault("resolveJsonModule", resolve_json_module)
# These options are always passed on the tsc command line so don't include them
# in the tsconfig. At best they're redundant, but at worst we'll have a conflict
if "outDir" in compiler_options.keys():
out_dir = compiler_options.pop("outDir")
if "declarationDir" in compiler_options.keys():
declaration_dir = compiler_options.pop("declarationDir")
if "rootDir" in compiler_options.keys():
root_dir = compiler_options.pop("rootDir")
# FIXME: need to remove keys that have a None value?
write_tsconfig(
name = "_gen_tsconfig_%s" % name,
config = tsconfig,
files = srcs,
extends = Label("%s//%s:%s" % (native.repository_name(), native.package_name(), name)).relative(extends) if extends else None,
out = "tsconfig_%s.json" % name,
allow_js = allow_js,
resolve_json_module = resolve_json_module,
)
# From here, tsconfig becomes a file, the same as if the
# user supplied a tsconfig.json InputArtifact
tsconfig = "tsconfig_%s.json" % name
else:
if tsconfig == None:
tsconfig = "tsconfig.json"
if validate:
validate_options(
name = "_validate_%s_options" % name,
target = "//%s:%s" % (native.package_name(), name),
declaration = declaration,
source_map = source_map,
declaration_map = declaration_map,
preserve_jsx = preserve_jsx,
composite = composite,
incremental = incremental,
ts_build_info_file = ts_build_info_file,
emit_declaration_only = emit_declaration_only,
resolve_json_module = resolve_json_module,
allow_js = allow_js,
tsconfig = tsconfig,
extends = extends,
has_local_deps = len([d for d in deps if not _is_external_label(d)]) > 0,
**common_kwargs
)
tsc_deps = tsc_deps + ["_validate_%s_options" % name]
if supports_workers:
tsc_worker = "%s_worker" % name
nodejs_binary(
name = tsc_worker,
data = [
# BEGIN-INTERNAL
# Users get this dependency transitively from @bazel/typescript
# but that's our own code, so we don't.
# TODO: remove protobuf dependency once rules_typescript also uses
# worker package
"@npm//protobufjs",
# END-INTERNAL
Label(typescript_package),
Label("//packages/typescript/internal/worker:filegroup"),
# BEGIN-INTERNAL
# this is not needed when package since @bazel/typescript lists
# @bazel/worker as its dependency hence has access to it.
# this only needed when ts_project is run from the source.
Label("//packages/worker:library"),
# END-INTERNAL
tsconfig,
],
entry_point = Label("//packages/typescript/internal/worker:worker_adapter"),
templated_args = [
"--typescript_require_path",
typescript_require_path,
],
)
tsc = ":" + tsc_worker
typings_out_dir = declaration_dir if declaration_dir else out_dir
tsbuildinfo_path = ts_build_info_file if ts_build_info_file else name + ".tsbuildinfo"
js_outs = _lib.calculate_js_outs(srcs, out_dir, root_dir, allow_js, preserve_jsx, emit_declaration_only)
map_outs = _lib.calculate_map_outs(srcs, out_dir, root_dir, source_map, preserve_jsx, emit_declaration_only)
typings_outs = _lib.calculate_typings_outs(srcs, typings_out_dir, root_dir, declaration, composite, allow_js)
typing_maps_outs = _lib.calculate_typing_maps_outs(srcs, typings_out_dir, root_dir, declaration_map, allow_js)
tsc_js_outs = []
tsc_map_outs = []
if not transpiler:
tsc_js_outs = js_outs
tsc_map_outs = map_outs
tsc_target_name = name
else:
# To stitch together a tree of ts_project where transpiler is a separate rule,
# we have to produce a few targets
tsc_target_name = "%s_typings" % name
transpile_target_name = "%s_transpile" % name
typecheck_target_name = "%s_typecheck" % name
test_target_name = "%s_typecheck_test" % name
transpile_srcs = [s for s in srcs if _lib.is_ts_src(s, allow_js)]
if (len(transpile_srcs) != len(js_outs)):
fail("ERROR: illegal state: transpile_srcs has length {} but js_outs has length {}".format(
len(transpile_srcs),
len(js_outs),
))
if type(transpiler) == "function" or type(transpiler) == "rule":
transpiler(
name = transpile_target_name,
srcs = transpile_srcs,
js_outs = js_outs,
map_outs = map_outs,
**common_kwargs
)
elif partial.is_instance(transpiler):
partial.call(
transpiler,
name = transpile_target_name,
srcs = transpile_srcs,
js_outs = js_outs,
map_outs = map_outs,
**common_kwargs
)
else:
fail("transpiler attribute should be a rule/macro, a skylib partial, or the string 'tsc'. Got " + type(transpiler))
# Users should build this target to get a failed build when typechecking fails
native.filegroup(
name = typecheck_target_name,
srcs = [tsc_target_name],
# This causes the DeclarationInfo to be produced, which in turn triggers the tsc action to typecheck
output_group = "types",
**common_kwargs
)
# Ensures the target above gets built under `bazel test --build_tests_only`
build_test(
name = test_target_name,
targets = [typecheck_target_name],
**common_kwargs
)
# Default target produced by the macro gives the js and map outs, with the transitive dependencies.
js_library(
name = name,
srcs = js_outs + map_outs,
# Include the tsc target so that this js_library can be a valid dep for downstream ts_project
# or other DeclarationInfo-aware rules.
deps = deps + [tsc_target_name],
**common_kwargs
)
_ts_project(
name = tsc_target_name,
srcs = srcs,
args = args,
deps = tsc_deps,
tsconfig = tsconfig,
allow_js = allow_js,
extends = extends,
incremental = incremental,
preserve_jsx = preserve_jsx,
composite = composite,
declaration = declaration,
declaration_dir = declaration_dir,
source_map = source_map,
declaration_map = declaration_map,
out_dir = out_dir,
root_dir = root_dir,
js_outs = tsc_js_outs,
map_outs = tsc_map_outs,
typings_outs = typings_outs,
typing_maps_outs = typing_maps_outs,
buildinfo_out = tsbuildinfo_path if composite or incremental else None,
emit_declaration_only = emit_declaration_only,
tsc = tsc,
link_workspace_root = link_workspace_root,
supports_workers = supports_workers,
transpile = not transpiler,
**kwargs
)
| 49.610577 | 179 | 0.653487 |
b540efba89fbdceddb157defeba6f8b42f9bfe22 | 1,037 | py | Python | tests/test_jewellerybox.py | Jarino/pycgp | c3281c0deff3388cb7cfd79339b84391f6499ae1 | [
"MIT"
] | 4 | 2018-02-08T17:34:58.000Z | 2021-09-20T01:37:32.000Z | tests/test_jewellerybox.py | Jarino/pycgp | c3281c0deff3388cb7cfd79339b84391f6499ae1 | [
"MIT"
] | 1 | 2018-02-09T09:59:09.000Z | 2018-02-09T10:52:26.000Z | tests/test_jewellerybox.py | Jarino/pycgp | c3281c0deff3388cb7cfd79339b84391f6499ae1 | [
"MIT"
] | 1 | 2018-12-12T03:51:33.000Z | 2018-12-12T03:51:33.000Z | """ Test suite for jewellery box """
from pycgp.gems import GemSingleGene, JewelleryBox
from pycgp.individual import Individual
def test_match(jewellerybox: JewelleryBox, individual: Individual):
""" Should return matching gem """
matching_gem = jewellerybox.match(individual)
assert hash(matching_gem) == 413
def test_add_to_full(individual: Individual, jewellerybox: JewelleryBox):
""" Should replace the gem with least value """
# there are already two individuals in jewellerybox (from conftest.py)
jewellerybox.max_size = 2
# individual fixture has no fitness
individual.fitness = 100
# get the smallest
min_value = min(jewellerybox.gems.values())
assert min_value == 5
# add another
better_ind = Individual(individual.genes[:], individual.bounds, individual.params)
better_ind.fitness = 30
new_gem = GemSingleGene(better_ind, individual, 7)
jewellerybox.add(new_gem)
min_value = min(jewellerybox.gems.values())
assert min_value == 10
| 27.289474 | 86 | 0.717454 |
8b8856a97f0e894481c41e4c520c6cb1808ab4cf | 4,324 | py | Python | userbot/modules/dogbin.py | razalfaaindi/rwszay | e4772d6a218b43ac8b721e5ff35d0ad853b78805 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/dogbin.py | razalfaaindi/rwszay | e4772d6a218b43ac8b721e5ff35d0ad853b78805 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/dogbin.py | razalfaaindi/rwszay | e4772d6a218b43ac8b721e5ff35d0ad853b78805 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module containing commands for interacting with dogbin(https://del.dog)"""
from requests import get, post, exceptions
import os
from userbot import BOTLOG, BOTLOG_CHATID, CMD_HELP, TEMP_DOWNLOAD_DIRECTORY
from userbot.events import register
DOGBIN_URL = "https://del.dog/"
@register(outgoing=True, pattern=r"^.paste(?: |$)([\s\S]*)")
async def paste(pstl):
""" For .paste command, pastes the text directly to dogbin. """
dogbin_final_url = ""
match = pstl.pattern_match.group(1).strip()
reply_id = pstl.reply_to_msg_id
if not match and not reply_id:
return await pstl.edit("`Elon Musk said I cannot paste void.`")
if match:
message = match
elif reply_id:
message = (await pstl.get_reply_message())
if message.media:
downloaded_file_name = await pstl.client.download_media(
message,
TEMP_DOWNLOAD_DIRECTORY,
)
m_list = None
with open(downloaded_file_name, "rb") as fd:
m_list = fd.readlines()
message = ""
for m in m_list:
message += m.decode("UTF-8")
os.remove(downloaded_file_name)
else:
message = message.message
# Dogbin
await pstl.edit("`Pasting text . . .`")
resp = post(DOGBIN_URL + "documents", data=message.encode('utf-8'))
if resp.status_code == 200:
response = resp.json()
key = response['key']
dogbin_final_url = DOGBIN_URL + key
if response['isUrl']:
reply_text = ("`Pasted successfully!`\n\n"
f"`Shortened URL:` {dogbin_final_url}\n\n"
"`Original(non-shortened) URLs`\n"
f"`Dogbin URL`: {DOGBIN_URL}v/{key}\n")
else:
reply_text = ("`Pasted successfully!`\n\n"
f"`Dogbin URL`: {dogbin_final_url}")
else:
reply_text = ("`Failed to reach Dogbin`")
await pstl.edit(reply_text)
if BOTLOG:
await pstl.client.send_message(
BOTLOG_CHATID,
"Paste query was executed successfully",
)
@register(outgoing=True, pattern="^.getpaste(?: |$)(.*)")
async def get_dogbin_content(dog_url):
""" For .getpaste command, fetches the content of a dogbin URL. """
textx = await dog_url.get_reply_message()
message = dog_url.pattern_match.group(1)
await dog_url.edit("`Getting dogbin content...`")
if textx:
message = str(textx.message)
format_normal = f'{DOGBIN_URL}'
format_view = f'{DOGBIN_URL}v/'
if message.startswith(format_view):
message = message[len(format_view):]
elif message.startswith(format_normal):
message = message[len(format_normal):]
elif message.startswith("del.dog/"):
message = message[len("del.dog/"):]
else:
return await dog_url.edit("`Is that even a dogbin url?`")
resp = get(f'{DOGBIN_URL}raw/{message}')
try:
resp.raise_for_status()
except exceptions.HTTPError as HTTPErr:
await dog_url.edit(
"Request returned an unsuccessful status code.\n\n" + str(HTTPErr))
return
except exceptions.Timeout as TimeoutErr:
await dog_url.edit("Request timed out." + str(TimeoutErr))
return
except exceptions.TooManyRedirects as RedirectsErr:
await dog_url.edit(
"Request exceeded the configured number of maximum redirections." +
str(RedirectsErr))
return
reply_text = ("`Fetched dogbin URL content successfully!`"
"\n\n`Content:` " + resp.text)
await dog_url.edit(reply_text)
if BOTLOG:
await dog_url.client.send_message(
BOTLOG_CHATID,
"Get dogbin content query was executed successfully",
)
CMD_HELP.update({
"dogbin":
">`.paste <text/reply>`"
"\nUsage: Create a paste or a shortened url using dogbin (https://del.dog/)"
"\n\n>`.getpaste`"
"\nUsage: Gets the content of a paste or shortened url from dogbin (https://del.dog/)"
})
| 33.261538 | 90 | 0.612396 |
637b7c84fb49487d7f58887ccf6c4829f7fd92e6 | 1,729 | py | Python | komics/commands/assemble.py | FreBio/komics | 3af2d968f7864d5f3767983b236660f268f8d0c0 | [
"OLDAP-2.2.1"
] | 2 | 2021-10-04T14:10:32.000Z | 2021-11-10T11:59:39.000Z | komics/commands/assemble.py | FreBio/komics | 3af2d968f7864d5f3767983b236660f268f8d0c0 | [
"OLDAP-2.2.1"
] | 2 | 2021-10-20T11:21:22.000Z | 2022-03-29T13:47:28.000Z | komics/commands/assemble.py | FreBio/komics | 3af2d968f7864d5f3767983b236660f268f8d0c0 | [
"OLDAP-2.2.1"
] | 1 | 2020-06-11T10:05:32.000Z | 2020-06-11T10:05:32.000Z | import sys
import komics
import argparse
def main():
parser = argparse.ArgumentParser(
prog='komics',
description='Assembles contigs using MEGAHIT',
usage = 'komics assemble [options] <out> <reads1> <reads2>')
parser.add_argument('out', help='Prefix used for labeling FASTQ files', metavar='out')
parser.add_argument('reads1', help='FASTQ file w/ first-in-pair reads', metavar='reads1')
parser.add_argument('reads2', help='FASTQ file w/ second-in-pair reads', metavar='reads1')
parser.add_argument('--threads', type=int, help='Number of threads [%(default)s]', default=1, metavar='INT')
parser.add_argument('--kmin', help='Minimum k-mer (must be odd number) [%(default)s]', default=39, metavar='INT')
parser.add_argument('--kmax', help='Maximum k-mer (must be odd number) [%(default)s]', default=119, metavar='INT')
parser.add_argument('--kstep', help='Steps between k-mers (must be even number) [%(default)s]', default=10, metavar='INT')
parser.add_argument('--length', help='Minimum length (bp) of contigs to be kept [%(default)s]', default=400, metavar='INT')
parser.add_argument('--csb3', type=str, help='Specificy one or more Conserved Sequence Block 3 (CSB3) sequences, used for identifying minicircles. When nothing is provided, the following two CSB3-mers are used by default: "GGGGTTGGTGT|GGGGTTGATGT".', metavar='STR')
options = parser.parse_args()
args = vars(parser.parse_args())
if args["csb3"] is None:
csb3mer = 'GGGGTTGGTGT|GGGGTTGATGT'
elif args["csb3"] is not None:
csb3mer = args["csb3"]
ka = komics.assemble.Megahit(
options.out,
options.reads1,
options.reads2,
csb3mer,
options.threads,
options.kmin,
options.kmax,
options.kstep,
options.length,
)
ka.run()
| 44.333333 | 266 | 0.717178 |
9bed4fd21676890f7257d1c2be96e8f2cbe486aa | 2,870 | py | Python | nowcasting_dataset/consts.py | JanEbbing/nowcasting_dataset | f907054a457987e6f6dbb13bfb65fc5359c6f680 | [
"MIT"
] | null | null | null | nowcasting_dataset/consts.py | JanEbbing/nowcasting_dataset | f907054a457987e6f6dbb13bfb65fc5359c6f680 | [
"MIT"
] | null | null | null | nowcasting_dataset/consts.py | JanEbbing/nowcasting_dataset | f907054a457987e6f6dbb13bfb65fc5359c6f680 | [
"MIT"
] | null | null | null | """ Constants that can be imported when needed """
from pathlib import Path
from typing import Union
import numpy as np
import xarray as xr
# Satellite data
# TODO: Issue #423: Remove this?
SAT_FILENAME = "gs://" + str(
Path("solar-pv-nowcasting-data")
/ "satellite/EUMETSAT/SEVIRI_RSS/OSGB36/all_zarr_int16_single_timestep.zarr"
)
# Typing
Array = Union[xr.DataArray, np.ndarray]
PV_SYSTEM_ID: str = "pv_system_id"
PV_SYSTEM_ROW_NUMBER = "pv_system_row_number"
PV_SYSTEM_X_COORDS = "pv_system_x_coords"
PV_SYSTEM_Y_COORDS = "pv_system_y_coords"
SUN_AZIMUTH_ANGLE = "sun_azimuth_angle"
SUN_ELEVATION_ANGLE = "sun_elevation_angle"
PV_YIELD = "pv_yield"
PV_DATETIME_INDEX = "pv_datetime_index"
DEFAULT_N_PV_SYSTEMS_PER_EXAMPLE = 2048
GSP_ID: str = "gsp_id"
GSP_YIELD = "gsp_yield"
GSP_X_COORDS = "gsp_x_coords"
GSP_Y_COORDS = "gsp_y_coords"
GSP_DATETIME_INDEX = "gsp_datetime_index"
N_GSPS = 338
DEFAULT_N_GSP_PER_EXAMPLE = 32
OBJECT_AT_CENTER = "object_at_center"
DATETIME_FEATURE_NAMES = (
"hour_of_day_sin",
"hour_of_day_cos",
"day_of_year_sin",
"day_of_year_cos",
)
SATELLITE_DATA = "sat_data"
SATELLITE_Y_COORDS = "sat_y_coords"
SATELLITE_X_COORDS = "sat_x_coords"
SATELLITE_DATETIME_INDEX = "sat_datetime_index"
NWP_TARGET_TIME = "nwp_target_time"
NWP_DATA = "nwp"
NWP_X_COORDS = "nwp_x_coords"
NWP_Y_COORDS = "nwp_y_coords"
X_CENTERS_OSGB = "x_centers_osgb"
Y_CENTERS_OSGB = "y_centers_osgb"
TOPOGRAPHIC_DATA = "topo_data"
TOPOGRAPHIC_X_COORDS = "topo_x_coords"
TOPOGRAPHIC_Y_COORDS = "topo_y_coords"
# "Safe" default NWP variable names:
NWP_VARIABLE_NAMES = (
"t",
"dswrf",
"prate",
"r",
"sde",
"si10",
"vis",
"lcc",
"mcc",
"hcc",
)
# A complete set of NWP variable names. Not all are currently used.
FULL_NWP_VARIABLE_NAMES = (
"cdcb",
"lcc",
"mcc",
"hcc",
"sde",
"hcct",
"dswrf",
"dlwrf",
"h",
"t",
"r",
"dpt",
"vis",
"si10",
"wdir10",
"prmsl",
"prate",
)
SAT_VARIABLE_NAMES = (
"HRV",
"IR_016",
"IR_039",
"IR_087",
"IR_097",
"IR_108",
"IR_120",
"IR_134",
"VIS006",
"VIS008",
"WV_062",
"WV_073",
)
DEFAULT_REQUIRED_KEYS = [
NWP_DATA,
NWP_X_COORDS,
NWP_Y_COORDS,
SATELLITE_DATA,
SATELLITE_X_COORDS,
SATELLITE_Y_COORDS,
PV_YIELD,
PV_SYSTEM_ID,
PV_SYSTEM_ROW_NUMBER,
PV_SYSTEM_X_COORDS,
PV_SYSTEM_Y_COORDS,
X_CENTERS_OSGB,
Y_CENTERS_OSGB,
GSP_ID,
GSP_YIELD,
GSP_X_COORDS,
GSP_Y_COORDS,
GSP_DATETIME_INDEX,
TOPOGRAPHIC_DATA,
TOPOGRAPHIC_Y_COORDS,
TOPOGRAPHIC_X_COORDS,
] + list(DATETIME_FEATURE_NAMES)
T0_DT = "t0_dt"
SPATIAL_AND_TEMPORAL_LOCATIONS_OF_EACH_EXAMPLE_FILENAME = (
"spatial_and_temporal_locations_of_each_example.csv"
)
LOG_LEVELS = ("DEBUG", "INFO", "WARNING", "ERROR")
| 20.797101 | 80 | 0.697213 |
83399868f5f6851fbc6675e827830813f4aa4ab8 | 3,748 | py | Python | pyfitnesspal/settings.py | gord1anknot/pyfitnesspal | cf0b224a2b12797876a3601678d84172c6dddf00 | [
"Apache-2.0"
] | 2 | 2018-06-08T16:05:53.000Z | 2018-12-22T16:07:33.000Z | pyfitnesspal/settings.py | gord1anknot/pyfitnesspal | cf0b224a2b12797876a3601678d84172c6dddf00 | [
"Apache-2.0"
] | null | null | null | pyfitnesspal/settings.py | gord1anknot/pyfitnesspal | cf0b224a2b12797876a3601678d84172c6dddf00 | [
"Apache-2.0"
] | 1 | 2018-11-08T10:53:15.000Z | 2018-11-08T10:53:15.000Z | # Copyright 2015 Bradley Rowe
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Django settings for pyfitnesspal project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = os.environ['SECRET_KEY']
if 'DJANGO_ENV' in os.environ and\
os.environ['DJANGO_ENV'].lower().startswith('prod'):
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = []
else:
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
'crispy_forms',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'pyfitnesspal.urls'
WSGI_APPLICATION = 'pyfitnesspal.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {}
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# This does a test to see which db is in use, as sqlite3
# might be used locally during development.
# Enable Connection Pooling (if desired)
# if 'ENGINE' in DATABASES['default'] and \
# DATABASES['default']['ENGINE'] == \
# 'django.db.backends.postgresql_psycopg2':
# DATABASES['default']['ENGINE'] = 'django_postgrespool'
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Heroku Django config snippet: allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
# Static asset configuration
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
# Commercial libraries not available on GitHub or via Bower
# could go into 'libraries'
os.path.join(BASE_DIR, 'static/libraries'),
# Custom frontend assets in css/js subfolders
os.path.join(BASE_DIR, 'static/custom'),
# Bower controlled assets
os.path.join(BASE_DIR, 'static/bower_components'),
)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
LOGIN_REDIRECT_URL = 'pyfitnesspal_home'
LOGIN_URL = 'pyfitnesspal_login'
LOGOUT_URL = 'pyfitnesspal_logout'
CRISPY_TEMPLATE_PACK = 'bootstrap3'
| 30.471545 | 74 | 0.740662 |
3684e7e877da009447ab9b71ef594b8a51f1fd82 | 1,736 | py | Python | ticketViewer/TicketModel.py | kunwarvidhan/Zendesk-Coding-Challenge-2021 | f6b34fd0d7e21937ed6af6e0e0397409a1c48106 | [
"MIT"
] | null | null | null | ticketViewer/TicketModel.py | kunwarvidhan/Zendesk-Coding-Challenge-2021 | f6b34fd0d7e21937ed6af6e0e0397409a1c48106 | [
"MIT"
] | null | null | null | ticketViewer/TicketModel.py | kunwarvidhan/Zendesk-Coding-Challenge-2021 | f6b34fd0d7e21937ed6af6e0e0397409a1c48106 | [
"MIT"
] | null | null | null | import time
import sys
import requests
class TicketModel:
"""Class for retrieve, update and store tickets."""
def __init__(self):
"""__init__ function defines tickets list"""
self.__tickets = []
def __update_tickets(self, tickets):
"""update ticket in TicketModel from the server"""
self.__tickets = tickets
def retrieve_ticket(self, url, auth):
"""retrieve tickets from the server. Use base64 basic Authentication.
will attempt 2 times to connect to the server. If failed the system will exit
will invoke update_ticket method at last to update ticket in the
model once retrive data from the server"""
print('Connecting to ' + url + '...')
ticket_session = requests.Session()
tickets = []
while url:
for _ in range(2):
try:
response = ticket_session.get(url, headers={'Authorization': 'Basic %s' % auth})
except requests.exceptions.RequestException as msg:
print(msg)
time.sleep(0.3)
print('Reconnecting...')
else:
break
else:
print('Unable to connect to ' + url +
'. The API is currently not available, please try later ')
sys.exit()
json_data = response.json()
tickets.extend(json_data['tickets'])
url = json_data['next_page']
ticket_session.close()
self.__update_tickets(tickets)
print('Connecting successfully')
def get_tickets(self):
"""return ticked in the model to controller"""
return self.__tickets
| 32.754717 | 100 | 0.570853 |
52b380abbfc036d3abd0d94a7b431d357a2c83f0 | 1,698 | py | Python | packstack/installer/utils/shortcuts.py | strider/packstack | 8157722953234d00a2dc4ca6bca940464e70a614 | [
"Apache-2.0"
] | null | null | null | packstack/installer/utils/shortcuts.py | strider/packstack | 8157722953234d00a2dc4ca6bca940464e70a614 | [
"Apache-2.0"
] | null | null | null | packstack/installer/utils/shortcuts.py | strider/packstack | 8157722953234d00a2dc4ca6bca940464e70a614 | [
"Apache-2.0"
] | 1 | 2020-08-31T21:25:37.000Z | 2020-08-31T21:25:37.000Z | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grp
import os
import pwd
def host_iter(config):
for key, value in config.iteritems():
if key.endswith("_HOST"):
host = value.split('/')[0]
if host:
yield key, host
if key.endswith("_HOSTS"):
for i in value.split(","):
host = i.strip().split('/')[0]
if host:
yield key, host
def hosts(config):
result = set()
for key, host in host_iter(config):
result.add(host)
return result
def get_current_user():
try:
user = pwd.getpwnam(os.getlogin())
uid, gid = user.pw_uid, user.pw_gid
except OSError:
# in case program is run by a script
uid, gid = os.getuid(), os.getgid()
return uid, gid
def get_current_username():
uid, gid = get_current_user()
user = pwd.getpwuid(uid).pw_name
group = grp.getgrgid(gid).gr_name
return user, group
def split_hosts(hosts_string):
hosts = set()
for host in hosts_string.split(','):
shost = host.strip()
if shost:
hosts.add(shost)
return hosts
| 26.53125 | 69 | 0.621908 |
77fb66fd0863454ea0b7e27061b5af8cd71e71ae | 4,109 | py | Python | huaweicloud-sdk-as/huaweicloudsdkas/v1/model/lbaas_listeners.py | githubmilesma/huaweicloud-sdk-python-v3 | 9d9449ed68a609ca65f0aa50b5b2a1c28445bf03 | [
"Apache-2.0"
] | 1 | 2021-04-16T07:59:28.000Z | 2021-04-16T07:59:28.000Z | huaweicloud-sdk-as/huaweicloudsdkas/v1/model/lbaas_listeners.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-as/huaweicloudsdkas/v1/model/lbaas_listeners.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | 1 | 2022-01-17T02:24:18.000Z | 2022-01-17T02:24:18.000Z | # coding: utf-8
import pprint
import re
import six
class LbaasListeners:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'pool_id': 'str',
'protocol_port': 'int',
'weight': 'int'
}
attribute_map = {
'pool_id': 'pool_id',
'protocol_port': 'protocol_port',
'weight': 'weight'
}
def __init__(self, pool_id=None, protocol_port=None, weight=None):
"""LbaasListeners - a model defined in huaweicloud sdk"""
self._pool_id = None
self._protocol_port = None
self._weight = None
self.discriminator = None
self.pool_id = pool_id
self.protocol_port = protocol_port
if weight is not None:
self.weight = weight
@property
def pool_id(self):
"""Gets the pool_id of this LbaasListeners.
后端云服务器组ID
:return: The pool_id of this LbaasListeners.
:rtype: str
"""
return self._pool_id
@pool_id.setter
def pool_id(self, pool_id):
"""Sets the pool_id of this LbaasListeners.
后端云服务器组ID
:param pool_id: The pool_id of this LbaasListeners.
:type: str
"""
self._pool_id = pool_id
@property
def protocol_port(self):
"""Gets the protocol_port of this LbaasListeners.
后端协议号,指后端云服务器监听的端口,取值范围[1,65535]
:return: The protocol_port of this LbaasListeners.
:rtype: int
"""
return self._protocol_port
@protocol_port.setter
def protocol_port(self, protocol_port):
"""Sets the protocol_port of this LbaasListeners.
后端协议号,指后端云服务器监听的端口,取值范围[1,65535]
:param protocol_port: The protocol_port of this LbaasListeners.
:type: int
"""
self._protocol_port = protocol_port
@property
def weight(self):
"""Gets the weight of this LbaasListeners.
权重,指后端云服务器经分发得到的请求数量比例,取值范围[0,1],默认为1。
:return: The weight of this LbaasListeners.
:rtype: int
"""
return self._weight
@weight.setter
def weight(self, weight):
"""Sets the weight of this LbaasListeners.
权重,指后端云服务器经分发得到的请求数量比例,取值范围[0,1],默认为1。
:param weight: The weight of this LbaasListeners.
:type: int
"""
self._weight = weight
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LbaasListeners):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.208589 | 74 | 0.554149 |
92ce14c261592eb062be660d1a06447c0685fe38 | 3,703 | py | Python | clamav.py | Rafiot/viper-modules | 812642effecf8ae64bce76fa0f72116e0c2c81cd | [
"BSD-3-Clause"
] | 5 | 2019-12-20T09:42:41.000Z | 2021-04-30T07:05:00.000Z | clamav.py | SubSpaceManeuvers/viper-modules | c8f19c6d4e0e976e2ad8730e0862c2250e3acdd5 | [
"BSD-3-Clause"
] | 7 | 2019-11-25T13:13:15.000Z | 2020-09-09T09:04:46.000Z | clamav.py | SubSpaceManeuvers/viper-modules | c8f19c6d4e0e976e2ad8730e0862c2250e3acdd5 | [
"BSD-3-Clause"
] | 10 | 2019-11-20T04:57:51.000Z | 2021-01-21T18:51:47.000Z | # -*- coding: utf-8 -*-
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
try:
import pyclamd
HAVE_CLAMD = True
except ImportError:
HAVE_CLAMD = False
import os
from viper.common.abstracts import Module
from viper.core.database import Database, Malware
from viper.core.session import __sessions__
from viper.core.storage import get_sample_path
class ClamAV(Module):
cmd = 'clamav'
description = 'Scan file from local ClamAV daemon'
authors = ['neriberto']
def __init__(self):
super(ClamAV, self).__init__()
self.parser.add_argument('-s', '--socket', help='Specify an unix socket (default: Clamd Unix Socket)')
self.parser.add_argument('-a', '--all', action='store_true', help='Scan all files')
self.parser.add_argument('-t', '--tag', action='store_true', help='Tag file(s) with the signature name when detect as malware')
self.db = Database()
def run(self):
super(ClamAV, self).run()
if self.args is None:
return
if not HAVE_CLAMD:
self.log('error', "Missing dependency, install pyclamd (`pip install pyclamd`)")
return
if not self.Connect():
self.log('error', 'Daemon is not responding!')
return
if self.args.all:
self.ScanAll()
elif __sessions__.is_set():
self.ScanFile(__sessions__.current.file)
else:
self.log('error', 'No open session')
def ScanAll(self):
samples = self.db.find(key='all')
for sample in samples:
if sample.size == 0:
continue
self.ScanFile(sample)
def Connect(self):
self.daemon = None
self.socket = self.args.socket
try:
if self.socket is not None:
self.daemon = pyclamd.ClamdUnixSocket(self.socket)
self.log('info', 'Using socket {0} to scan'.format(self.socket))
else:
self.daemon = pyclamd.ClamdUnixSocket()
self.socket = 'Clamav'
return self.daemon.ping()
except Exception as ex:
msg = 'Daemon connection failure, {0}'.format(ex)
self.log('error,', msg)
return False
def ScanFile(self, sample):
if isinstance(sample, Malware):
sample_path = get_sample_path(sample.sha256)
else:
sample_path = sample.path
if not os.path.exists(sample_path):
self.log('error', 'The file does not exists at path {0}'.format(sample_path))
return
try:
if self.daemon.ping():
with open(sample_path, 'rb') as fd:
results = self.daemon.scan_stream(fd.read())
else:
self.log('error', "Unable to connect to the daemon")
except Exception as ex:
msg = 'Unable to scan file {0} with antivirus daemon, {1}'.format(sample.sha256, ex)
self.log('error', msg)
return
found = None
name = None
if results:
for item in results:
found = results[item][0]
name = results[item][1]
if found == 'ERROR':
self.log('error', "Check permissions of the binary folder, {0}".format(name))
else:
if name is not None:
if self.args.tag:
self.db.add_tags(sample.sha256, name)
else:
name = 'Threat not found!'
self.log('info', "{0} identify {1} as : {2}".format(self.socket, sample.sha256, name))
| 32.482456 | 135 | 0.565487 |
92090221db0004b84ef4a8dc46e83cc5a75ec4bc | 1,472 | py | Python | src/pypyr/shapefunctions.py | joelphillips/pypyramid | be1b4760235d859755771e55c003396e02b72f91 | [
"BSD-3-Clause"
] | 1 | 2015-01-01T16:26:16.000Z | 2015-01-01T16:26:16.000Z | src/pypyr/shapefunctions.py | joelphillips/pypyramid | be1b4760235d859755771e55c003396e02b72f91 | [
"BSD-3-Clause"
] | null | null | null | src/pypyr/shapefunctions.py | joelphillips/pypyramid | be1b4760235d859755771e55c003396e02b72f91 | [
"BSD-3-Clause"
] | null | null | null | '''
The Pyramidal approximation spaces. See the paper "Numerical integration for high order pyramidal elements"
For each space, we build first build the Helmholtz decomposition, then map to the finite pyramid.
Created on Aug 17, 2010
@author: joel
'''
from pypyr.mappings import derham3dweights, psijac, psiinvjac, psidet, psi
from pypyr.diffforms import DiffForm, CatDiffForm, MapDiffForm, derham
from pypyr.functions import QSpace, ZeroFns
w = derham3dweights(psijac, psiinvjac, psidet)
def R1GradFreeI(k):
R1Q1 = QSpace(k-1,k,k+1)
R1Q10 = ZeroFns(R1Q1.nfns)
R1Q2 = QSpace(k,k-1,k+1)
R1Q20 = ZeroFns(R1Q2.nfns)
return CatDiffForm([DiffForm([R1Q1,R1Q10,R1Q10], derham[1:]), DiffForm([R1Q20,R1Q2,R1Q20], derham[1:])])
def R2CurlFreeI(k):
R2Q = QSpace(k-1,k-1,k+2)
R2Q0 = ZeroFns(R2Q.nfns)
return DiffForm([R2Q0, R2Q0, R2Q], derham[2:])
def R0Forms(k):
R0Q = QSpace(k,k,k)
R0I = DiffForm([R0Q], derham)
return MapDiffForm(R0I, psi, w)
def R1Forms(k):
R0D = DiffForm([QSpace(k,k,k,1)], derham).D()
R1I = CatDiffForm([R1GradFreeI(k), R0D])
return MapDiffForm(R1I, psi, w[1:])
def R2Forms(k):
R2I = CatDiffForm([R2CurlFreeI(k), R1GradFreeI(k).D()])
return MapDiffForm(R2I, psi, w[2:])
def R3Forms(k):
return MapDiffForm(DiffForm([QSpace(k-1,k-1,k+3)], []), psi, w[3:])
def R2FormsDivFree(k):
return MapDiffForm(R1GradFreeI(k).D(), psi, w[2:])
| 29.44 | 108 | 0.662364 |
14c7c434712494ee16c87381bf7a5f04a3a1435c | 7,583 | py | Python | vispy/visuals/tests/test_mesh.py | theGiallo/vispy | 081a1216e336e8c2c9aa4c61b926ba771bb5479f | [
"BSD-3-Clause"
] | null | null | null | vispy/visuals/tests/test_mesh.py | theGiallo/vispy | 081a1216e336e8c2c9aa4c61b926ba771bb5479f | [
"BSD-3-Clause"
] | 1 | 2021-06-04T13:48:46.000Z | 2021-06-05T10:57:33.000Z | vispy/visuals/tests/test_mesh.py | theGiallo/vispy | 081a1216e336e8c2c9aa4c61b926ba771bb5479f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
from vispy import scene
from vispy.geometry import create_cube, create_sphere
from vispy.testing import (TestingCanvas, requires_application,
run_tests_if_main, requires_pyopengl)
from vispy.visuals.filters import WireframeFilter
import pytest
@requires_pyopengl()
def test_mesh_color():
# Create visual
vertices, filled_indices, outline_indices = create_cube()
axis = scene.visuals.Mesh(vertices['position'], outline_indices,
color='black', mode='lines')
# Change color (regression test for a bug that caused this to reset
# the vertex data to None)
axis.color = (0.1, 0.3, 0.7, 0.9)
new_vertices = axis.mesh_data.get_vertices()
np.testing.assert_allclose(axis.color.rgba, (0.1, 0.3, 0.7, 0.9))
np.testing.assert_allclose(vertices['position'], new_vertices)
@requires_pyopengl()
@requires_application()
@pytest.mark.parametrize('shading', [None, 'flat', 'smooth'])
def test_mesh_shading_filter(shading):
size = (45, 40)
with TestingCanvas(size=size, bgcolor="k") as c:
v = c.central_widget.add_view(border_width=0)
# Create visual
mdata = create_sphere(20, 40, radius=20)
mesh = scene.visuals.Mesh(meshdata=mdata,
shading=shading,
color=(0.1, 0.3, 0.7, 0.9))
v.add(mesh)
from vispy.visuals.transforms import STTransform
mesh.transform = STTransform(translate=(20, 20))
mesh.transforms.scene_transform = STTransform(scale=(1, 1, 0.01))
rendered = c.render()[..., 0] # R channel only
if shading in ("flat", "smooth"):
# there should be a gradient, not solid colors
assert np.unique(rendered).size >= 28
# sphere/circle starts "dark" on the left and gets brighter
# then hits a bright spot and decreases after
invest_row = rendered[23].astype(np.float64)
# overall, we should be increasing brightness up to a "bright spot"
assert (np.diff(invest_row[:29]) >= -1).all()
else:
assert np.unique(rendered).size == 2
@requires_pyopengl()
def test_mesh_bounds():
# Create 3D visual
vertices, filled_indices, outline_indices = create_cube()
axis = scene.visuals.Mesh(vertices['position'], outline_indices,
color='black', mode='lines')
# Test bounds for all 3 axes
for i in range(3):
np.testing.assert_allclose(axis.bounds(i), (-1.0, 1.0))
# Create 2D visual using projection of cube
axis = scene.visuals.Mesh(vertices['position'][:, :2], outline_indices,
color='black', mode='lines')
# Test bounds for first 2 axes
for i in range(2):
np.testing.assert_allclose(axis.bounds(i), (-1.0, 1.0))
# Test bounds for 3rd axis
np.testing.assert_allclose(axis.bounds(2), (0.0, 0.0))
@requires_pyopengl()
@requires_application()
def test_mesh_wireframe_filter():
size = (45, 40)
with TestingCanvas(size=size, bgcolor="k") as c:
v = c.central_widget.add_view(border_width=0)
# Create visual
mdata = create_sphere(20, 40, radius=20)
mesh = scene.visuals.Mesh(meshdata=mdata,
shading=None,
color=(0.1, 0.3, 0.7, 0.9))
wireframe_filter = WireframeFilter(color='red')
mesh.attach(wireframe_filter)
v.add(mesh)
from vispy.visuals.transforms import STTransform
mesh.transform = STTransform(translate=(20, 20))
mesh.transforms.scene_transform = STTransform(scale=(1, 1, 0.01))
rendered_with_wf = c.render()
assert np.unique(rendered_with_wf[..., 0]).size >= 50
wireframe_filter.enabled = False
rendered_wo_wf = c.render()
# the result should be completely different
# assert not allclose
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_with_wf, rendered_wo_wf)
wireframe_filter.enabled = True
wireframe_filter.wireframe_only = True
rendered_with_wf_only = c.render()
# the result should be different from the two cases above
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_with_wf_only, rendered_with_wf)
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_with_wf_only, rendered_wo_wf)
wireframe_filter.enabled = True
wireframe_filter.wireframe_only = False
wireframe_filter.faces_only = True
rendered_with_faces_only = c.render()
# the result should be different from the cases above
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_with_faces_only, rendered_with_wf)
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_with_faces_only, rendered_wo_wf)
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_with_faces_only, rendered_with_wf_only)
@requires_pyopengl()
@requires_application()
def test_mesh_normals():
size = (45, 40)
with TestingCanvas(size=size, bgcolor="k") as c:
v = c.central_widget.add_view(border_width=0)
# Create visual
mdata = create_sphere(20, 40, radius=20)
mesh = scene.visuals.Mesh(meshdata=mdata,
shading=None,
color=(0.1, 0.1, 0.1, 1.0))
v.add(mesh)
from vispy.visuals.transforms import STTransform
local_transform = STTransform(translate=(20, 20))
scene_transform = STTransform(scale=(1, 1, 0.01))
mesh.transform = local_transform
mesh.transforms.scene_transform = scene_transform
rendered_without_normals = c.render()
# The color should be of low intensity.
assert np.all((rendered_without_normals[..., 0:3]) < 32)
face_normals = scene.visuals.MeshNormals(mdata, primitive="face",
color=(1, 0, 0))
face_normals.parent = mesh
# XXX: This seems to be repeated. Could this be set on a higher level?
face_normals.transforms.scene_transform = scene_transform
rendered_with_face_normals = c.render()
face_normals.parent = None
# There should be some pixels with brighter red.
assert np.sum(rendered_with_face_normals[..., 0] > 128) > 64
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_without_normals, rendered_with_face_normals)
vertex_normals = scene.visuals.MeshNormals(mdata, primitive="vertex",
color=(0, 1, 0))
vertex_normals.parent = mesh
# XXX: Same as above.
vertex_normals.transforms.scene_transform = scene_transform
rendered_with_vertex_normals = c.render()
vertex_normals.parent = None
# There should be some pixels with brighter green.
assert np.sum(rendered_with_vertex_normals[..., 1] > 128) > 64
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_without_normals, rendered_with_vertex_normals)
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_with_face_normals, rendered_with_vertex_normals)
run_tests_if_main()
| 40.989189 | 79 | 0.632731 |
9f5f89113bc6f1696aa59fc5348d159c42ef7989 | 3,018 | py | Python | tools/vid2img_kinetics.py | eynaij/X-Temporal | 7dde0457b10be703e70329312ea55b14d7364f88 | [
"MIT"
] | 1 | 2020-11-05T03:05:30.000Z | 2020-11-05T03:05:30.000Z | tools/vid2img_kinetics.py | eynaij/X-Temporal | 7dde0457b10be703e70329312ea55b14d7364f88 | [
"MIT"
] | null | null | null | tools/vid2img_kinetics.py | eynaij/X-Temporal | 7dde0457b10be703e70329312ea55b14d7364f88 | [
"MIT"
] | 1 | 2020-11-05T03:14:59.000Z | 2020-11-05T03:14:59.000Z | # Code for "TSM: Temporal Shift Module for Efficient Video Understanding"
# arXiv:1811.08383
# Ji Lin*, Chuang Gan, Song Han
# {jilin, songhan}@mit.edu, ganchuang@csail.mit.edu
from __future__ import print_function, division
import os
import sys
import subprocess
from multiprocessing import Pool
from tqdm import tqdm
import glob
n_thread = 100
def vid2jpg(file_name, class_path, dst_class_path):
#if '.avi' not in file_name:
# return
# print(file_name)
#import ipdb;ipdb.set_trace()
name, ext = os.path.splitext(file_name)
name = name.split('003/')[-1]
print(name)
dst_directory_path = os.path.join(dst_class_path, name)
#video_file_path = os.path.join(class_path, file_name)
video_file_path = file_name
try:
if os.path.exists(dst_directory_path):
if not os.path.exists(os.path.join(
dst_directory_path, 'img_00001.jpg')):
subprocess.call(
'rm -r \"{}\"'.format(dst_directory_path),
shell=True)
print('remove {}'.format(dst_directory_path))
os.mkdir(dst_directory_path)
else:
print('*** convert has been done: {}'.format(dst_directory_path))
return
else:
#print('-----------------mkdir',dst_directory_path)
os.system('mkdir -p %s'%dst_directory_path)
#os.mkdir(dst_directory_path)
except BaseException:
#print(dst_directory_path)
return
cmd = 'ffmpeg -i \"{}\" -threads 1 -vf scale=-1:331 -q:v 0 \"{}/img_%05d.jpg\"'.format(
video_file_path, dst_directory_path)
# print(cmd)
subprocess.call(cmd, shell=True,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def class_process(dir_path, dst_dir_path, class_name):
print('*' * 20, class_name, '*' * 20)
# class_path = os.path.join(dir_path, class_name)
class_path = dir_path
if not os.path.isdir(class_path):
print('*** is not a dir {}'.format(class_path))
return
#dst_class_path = os.path.join(dst_dir_path, class_name)
dst_class_path = dst_dir_path
if not os.path.exists(dst_class_path):
os.mkdir(dst_class_path)
vid_list = sorted(os.listdir(class_path))
vid_list = glob.glob(class_path+'/*/*mp4')
# import ipdb;ipdb.set_trace()
p = Pool(n_thread)
from functools import partial
worker = partial(
vid2jpg,
class_path=class_path,
dst_class_path=dst_class_path)
for _ in tqdm(p.imap_unordered(worker, vid_list), total=len(vid_list)):
pass
# p.map(worker, vid_list)
p.close()
p.join()
print('\n')
if __name__ == "__main__":
dir_path = sys.argv[1]
dst_dir_path = sys.argv[2]
class_list = sorted(os.listdir(dir_path))
for class_name in class_list:
class_process(dir_path, dst_dir_path, class_name)
class_name = 'test'
class_process(dir_path, dst_dir_path, class_name)
| 31.113402 | 91 | 0.633863 |
00d932f0551fe04bafe7e4b0f8fcf5c7da193da4 | 2,997 | py | Python | ee/clickhouse/sql/dead_letter_queue.py | msnitish/posthog | cb86113f568e72eedcb64b5fd00c313d21e72f90 | [
"MIT"
] | null | null | null | ee/clickhouse/sql/dead_letter_queue.py | msnitish/posthog | cb86113f568e72eedcb64b5fd00c313d21e72f90 | [
"MIT"
] | null | null | null | ee/clickhouse/sql/dead_letter_queue.py | msnitish/posthog | cb86113f568e72eedcb64b5fd00c313d21e72f90 | [
"MIT"
] | null | null | null | from ee.clickhouse.sql.clickhouse import KAFKA_COLUMNS, kafka_engine, ttl_period
from ee.clickhouse.sql.table_engines import ReplacingMergeTree
from ee.kafka_client.topics import KAFKA_DEAD_LETTER_QUEUE
from posthog.settings import CLICKHOUSE_CLUSTER, CLICKHOUSE_DATABASE
# We pipe our Kafka dead letter queue into CH for easier analysis and longer retention
# This allows us to explore errors and replay events with ease
DEAD_LETTER_QUEUE_TABLE = "events_dead_letter_queue"
DEAD_LETTER_QUEUE_TABLE_BASE_SQL = """
CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}'
(
id UUID,
event_uuid UUID,
event VARCHAR,
properties VARCHAR,
distinct_id VARCHAR,
team_id Int64,
elements_chain VARCHAR,
created_at DateTime64(6, 'UTC'),
ip VARCHAR,
site_url VARCHAR,
now DateTime64(6, 'UTC'),
raw_payload VARCHAR,
error_timestamp DateTime64(6, 'UTC'),
error_location VARCHAR,
error VARCHAR,
tags Array(VARCHAR)
{extra_fields}
) ENGINE = {engine}
"""
DEAD_LETTER_QUEUE_TABLE_ENGINE = lambda: ReplacingMergeTree(DEAD_LETTER_QUEUE_TABLE, ver="_timestamp")
DEAD_LETTER_QUEUE_TABLE_SQL = lambda: (
DEAD_LETTER_QUEUE_TABLE_BASE_SQL
+ """ORDER BY (id, event_uuid, distinct_id, team_id)
{ttl_period}
SETTINGS index_granularity=512
"""
).format(
table_name=DEAD_LETTER_QUEUE_TABLE,
cluster=CLICKHOUSE_CLUSTER,
extra_fields=KAFKA_COLUMNS,
engine=DEAD_LETTER_QUEUE_TABLE_ENGINE(),
ttl_period=ttl_period("_timestamp", 4), # 4 weeks
)
KAFKA_DEAD_LETTER_QUEUE_TABLE_SQL = lambda: DEAD_LETTER_QUEUE_TABLE_BASE_SQL.format(
table_name="kafka_" + DEAD_LETTER_QUEUE_TABLE,
cluster=CLICKHOUSE_CLUSTER,
engine=kafka_engine(topic=KAFKA_DEAD_LETTER_QUEUE),
extra_fields="",
)
DEAD_LETTER_QUEUE_TABLE_MV_SQL = """
CREATE MATERIALIZED VIEW IF NOT EXISTS {table_name}_mv ON CLUSTER '{cluster}'
TO {database}.{table_name}
AS SELECT
id,
event_uuid,
event,
properties,
distinct_id,
team_id,
elements_chain,
created_at,
ip,
site_url,
now,
raw_payload,
error_timestamp,
error_location,
error,
tags,
_timestamp,
_offset
FROM {database}.kafka_{table_name}
""".format(
table_name=DEAD_LETTER_QUEUE_TABLE, cluster=CLICKHOUSE_CLUSTER, database=CLICKHOUSE_DATABASE,
)
INSERT_DEAD_LETTER_QUEUE_EVENT_SQL = """
INSERT INTO events_dead_letter_queue
SELECT
%(id)s,
%(event_uuid)s,
%(event)s,
%(properties)s,
%(distinct_id)s,
%(team_id)s,
%(elements_chain)s,
%(created_at)s,
%(ip)s,
%(site_url)s,
%(now)s,
%(raw_payload)s,
%(error_timestamp)s,
%(error_location)s,
%(error)s,
['some_tag'],
0,
now()
"""
TRUNCATE_DEAD_LETTER_QUEUE_TABLE_SQL = (
f"TRUNCATE TABLE IF EXISTS {DEAD_LETTER_QUEUE_TABLE} ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
)
DROP_KAFKA_DEAD_LETTER_QUEUE_TABLE_SQL = (
f"DROP TABLE IF EXISTS kafka_{DEAD_LETTER_QUEUE_TABLE} ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
)
TRUNCATE_DEAD_LETTER_QUEUE_TABLE_MV_SQL = (
f"TRUNCATE TABLE IF EXISTS {DEAD_LETTER_QUEUE_TABLE}_mv ON CLUSTER '{CLICKHOUSE_CLUSTER}'"
)
| 25.836207 | 102 | 0.771772 |
4ff62b509ab1490e8621aab8b331350b929a7ecc | 669 | py | Python | python/hackerrank/AlternatingCharacters.py | leewalter/coding | 2afd9dbfc1ecb94def35b953f4195a310d6953c9 | [
"Apache-2.0"
] | null | null | null | python/hackerrank/AlternatingCharacters.py | leewalter/coding | 2afd9dbfc1ecb94def35b953f4195a310d6953c9 | [
"Apache-2.0"
] | null | null | null | python/hackerrank/AlternatingCharacters.py | leewalter/coding | 2afd9dbfc1ecb94def35b953f4195a310d6953c9 | [
"Apache-2.0"
] | 1 | 2020-08-29T17:12:52.000Z | 2020-08-29T17:12:52.000Z | '''
https://www.hackerrank.com/snippets/fde7efcb-163f-4574-a896-4944d37edb3e/quietwalters-snippet-from-alternating-characters
'''
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the alternatingCharacters function below.
def alternatingCharacters(s):
count_delete = 0
for i in range(len(s)-1):
if s[i] == s[i+1]:
count_delete += 1
return count_delete
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
s = input()
result = alternatingCharacters(s)
fptr.write(str(result) + '\n')
fptr.close()
| 18.583333 | 121 | 0.647235 |
36c55c1568bb20cb0a8890b1d8da917f4cd141fc | 3,304 | py | Python | tests/unit/test_accessor.py | matthewgdv/pathmagic | fe5788e02cfa7397cc0ef45ea7a0c5549ca4f261 | [
"MIT"
] | null | null | null | tests/unit/test_accessor.py | matthewgdv/pathmagic | fe5788e02cfa7397cc0ef45ea7a0c5549ca4f261 | [
"MIT"
] | 1 | 2021-02-08T10:48:05.000Z | 2021-02-08T10:48:05.000Z | tests/unit/test_accessor.py | matthewgdv/pathmagic | fe5788e02cfa7397cc0ef45ea7a0c5549ca4f261 | [
"MIT"
] | null | null | null | import pytest
from pathmagic import Dir, File
from pathmagic.accessor import Name, AmbiguityError
from tests.conftest import unnecessary, abstract, untestable
class TestAccessor:
def test___call__(self, temp_root: Dir, temp_dir: Dir, temp_file: File): # synced
file_name, = temp_root.files()
assert file_name == temp_file.name
dir_name, = temp_root.dirs()
assert dir_name == temp_dir.name
def test___len__(self, temp_root: Dir): # synced
assert len(temp_root.files) == 0
temp_root.new_file('test', 'json')
assert len(temp_root.files) == 1
assert len(temp_root.dirs) == 0
temp_root.new_dir('test')
assert len(temp_root.dirs) == 1
def test___iter__(self, temp_root: Dir, temp_dir: Dir, temp_file: File): # synced
assert set(temp_root.files) == {temp_file}
assert set(temp_root.dirs) == {temp_dir}
def test___contains__(self, temp_root: Dir, temp_dir: Dir, temp_file: File): # synced
home = Dir.from_home()
assert (
temp_file in temp_root.files
and temp_dir in temp_root.dirs
and temp_root not in home.dirs
and temp_root not in home.files
)
@abstract
def test___getitem__(self, temp_root: Dir, temp_dir: Dir, temp_file: File): # synced
assert True
@unnecessary
def test___setitem__(self): # synced
assert True
def test___delitem__(self, temp_root: Dir, temp_dir: Dir, temp_file: File): # synced
assert temp_dir.path.exists() and temp_file.path.exists()
del temp_root.files[temp_file.name]
del temp_root.dirs[temp_dir.name]
assert not temp_dir.path.exists() and not temp_file.path.exists()
@untestable
def test___getattribute__(self): # synced
assert True
@untestable
def test___getattr__(self, temp_dir: Dir): # synced
assert True
@abstract
def test__synchronize_(self): # synced
assert True
@untestable
def test__acquire_(self): # synced
assert True
class TestFileAccessor:
def test___getitem__(self, temp_root: Dir, temp_file: File): # synced
assert temp_root.files[temp_file.name] is temp_file
def test__synchronize_(self, temp_root: Dir): # synced
(temp_root.path / 'test.txt').touch()
assert 'test.txt' not in temp_root.files._items_
temp_root.files._synchronize_()
assert 'test.txt' in temp_root.files._items_
class TestDirAccessor:
def test___getitem__(self, temp_root: Dir, temp_dir: Dir): # synced
assert temp_root.dirs[temp_dir.name] is temp_dir
def test__synchronize_(self, temp_root: Dir): # synced
(temp_root.path / 'test').mkdir()
assert 'test' not in temp_root.dirs._items_
temp_root.dirs._synchronize_()
assert 'test' in temp_root.dirs._items_
class TestAmbiguityError:
pass
class TestName:
def test_access(self, temp_root: Dir, temp_file: File): # synced
with pytest.raises(AmbiguityError):
Name(clean_name='test', raw_names=['testing.txt', 'testing.json'], accessor=temp_root.files).access()
assert Name(clean_name='test', raw_names=['testing.txt'], accessor=temp_root.files).access() is temp_file
| 32.392157 | 113 | 0.666768 |
261e7f2c6aa1d506f3dd658d97795a3a5c435d61 | 2,480 | py | Python | Database Initialization/Creators/consistsof_init.py | georgevangelou/Bringing_yoU_Spiti | 6b98c6f1179e8e3dd3f41ff0b3d373eb1d8643a5 | [
"Apache-2.0"
] | 2 | 2021-03-05T19:36:24.000Z | 2021-03-08T19:52:06.000Z | Database Initialization/Creators/consistsof_init.py | n-roussos/Bringing_yoU_Spiti | cc0946d0b8a66bb3b9d7b988622fe262a68ba3fb | [
"Apache-2.0"
] | null | null | null | Database Initialization/Creators/consistsof_init.py | n-roussos/Bringing_yoU_Spiti | cc0946d0b8a66bb3b9d7b988622fe262a68ba3fb | [
"Apache-2.0"
] | 1 | 2021-03-05T19:58:51.000Z | 2021-03-05T19:58:51.000Z | import random
random.seed(3900)
import bus_stops_init
STOPS = bus_stops_init.BUS_STOPS
stopsDic = bus_stops_init.bustopsDic
import lines_init
SIZE = lines_init.LINES
linesDic = lines_init.linesDic
EXTRAS = lines_init.NAMES_EXTRAS
def getRandStop(currentStops, finalStop):
newStop = random.randint(1, STOPS)
while ((newStop in currentStops) or (newStop==finalStop)):
newStop = random.randint(1, STOPS)
return newStop
STOPS_PER_LINE = [15, 25]
#insert_str = "INSERT INTO consists_of(line_id, bus_stop_id, i/i, eta_from_itinerary_start) VALUES "
insert_str = "INSERT INTO consists_of(line_id, bus_stop_id, sequence_index) VALUES "
conofDic = {}
for i in range(SIZE):
initialStop_Name = linesDic[i*len(EXTRAS)+1][1]
finalStop_Name = linesDic[i*len(EXTRAS)+1][2]
initialStop = -1
finalStop = -1
for k in range(STOPS):
if stopsDic[k][1]==initialStop_Name:
initialStop = k+1
elif stopsDic[k][1]==finalStop_Name:
finalStop = k+1
stops_of_line = [initialStop]
NormLineSize = random.randint(STOPS_PER_LINE[0], STOPS_PER_LINE[1])
ExprLineSize = random.randint(int(NormLineSize/4), int(NormLineSize/2))
for j in range(NormLineSize-2): #NORMAL LINE
stops_of_line.append(getRandStop(stops_of_line, finalStop))
stops_of_line.append(finalStop)
stops_of_express = stops_of_line.copy()
for j in range(NormLineSize-ExprLineSize): #EXPRESS LINE
removed = random.randint(1, len(stops_of_express)-2)
stops_of_express.pop(removed)
#(line_id, bus_stop_id, i/i, eta_from_itinerary_start)
conofDic[1+i*len(EXTRAS) + 0] = [[],[],[]] #[line] --> [[stops], [i/i], [eta]]
conofDic[1+i*len(EXTRAS) + 1] = [[],[],[]] #[line] --> [[stops], [i/i], [eta]]
for w in range(len(stops_of_express)):
insert_str += "(" + str(1 + i*len(EXTRAS) + 0) + "," + str(stops_of_express[w]) + "," + str(1 + w) + ")," #MHPWS NA MHN EXOUME NULL TA ETAs ?
conofDic[1+i*len(EXTRAS) + 0][0].append(stops_of_express[w])
conofDic[1+i*len(EXTRAS) + 0][1].append(w)
for w in range(len(stops_of_line)):
insert_str += "(" + str(1 + i*len(EXTRAS) + 1) + "," + str(stops_of_line[w]) + "," + str(1 + w) + "),"
conofDic[1+i*len(EXTRAS) + 1][0].append(stops_of_line[w])
conofDic[1+i*len(EXTRAS) + 1][1].append(w)
insert_str = insert_str[:-1] + ";"
with open("strings.txt", 'a') as f: f.write("\n\n" + insert_str)
| 33.066667 | 149 | 0.648387 |
ee0253ace1355b321ea73b50fd394dcd96a03122 | 13,218 | py | Python | mars/dataframe/groupby/tests/test_groupby.py | xccheng/mars | 8146d1b7d3f3bc2a652c414a336a2f884a06a108 | [
"Apache-2.0"
] | 1 | 2020-11-05T05:53:00.000Z | 2020-11-05T05:53:00.000Z | mars/dataframe/groupby/tests/test_groupby.py | xccheng/mars | 8146d1b7d3f3bc2a652c414a336a2f884a06a108 | [
"Apache-2.0"
] | null | null | null | mars/dataframe/groupby/tests/test_groupby.py | xccheng/mars | 8146d1b7d3f3bc2a652c414a336a2f884a06a108 | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from collections import OrderedDict
import mars.dataframe as md
from mars import opcodes
from mars.core import OutputType
from mars.dataframe.core import DataFrameGroupBy, SeriesGroupBy, DataFrame
from mars.dataframe.groupby.core import DataFrameGroupByOperand, DataFrameShuffleProxy
from mars.dataframe.groupby.aggregation import DataFrameGroupByAgg
from mars.dataframe.groupby.getitem import GroupByIndex
from mars.operands import OperandStage
from mars.tests.core import TestBase
class Test(TestBase):
def testGroupBy(self):
df = pd.DataFrame({'a': [3, 4, 5, 3, 5, 4, 1, 2, 3],
'b': [1, 3, 4, 5, 6, 5, 4, 4, 4]})
mdf = md.DataFrame(df, chunk_size=2)
with self.assertRaises(KeyError):
mdf.groupby('c2')
with self.assertRaises(KeyError):
mdf.groupby(['b', 'c2'])
grouped = mdf.groupby('b')
self.assertIsInstance(grouped, DataFrameGroupBy)
self.assertIsInstance(grouped.op, DataFrameGroupByOperand)
self.assertEqual(list(grouped.key_dtypes.index), ['b'])
grouped = grouped.tiles()
self.assertEqual(len(grouped.chunks), 5)
for chunk in grouped.chunks:
self.assertIsInstance(chunk.op, DataFrameGroupByOperand)
series = pd.Series([3, 4, 5, 3, 5, 4, 1, 2, 3])
ms = md.Series(series, chunk_size=3)
grouped = ms.groupby(lambda x: x + 1)
self.assertIsInstance(grouped, SeriesGroupBy)
self.assertIsInstance(grouped.op, DataFrameGroupByOperand)
grouped = grouped.tiles()
self.assertEqual(len(grouped.chunks), 3)
for chunk in grouped.chunks:
self.assertIsInstance(chunk.op, DataFrameGroupByOperand)
with self.assertRaises(TypeError):
ms.groupby(lambda x: x + 1, as_index=False)
def testGroupByGetItem(self):
df1 = pd.DataFrame({'a': [3, 4, 5, 3, 5, 4, 1, 2, 3],
'b': [1, 3, 4, 5, 6, 5, 4, 4, 4],
'c': list('aabaaddce')})
mdf = md.DataFrame(df1, chunk_size=3)
r = mdf.groupby('b')[['a', 'b']].tiles()
self.assertIsInstance(r, DataFrameGroupBy)
self.assertIsInstance(r.op, GroupByIndex)
self.assertEqual(r.selection, ['a', 'b'])
self.assertEqual(list(r.key_dtypes.index), ['b'])
self.assertEqual(len(r.chunks), 3)
r = mdf.groupby('b').a.tiles()
self.assertIsInstance(r, SeriesGroupBy)
self.assertIsInstance(r.op, GroupByIndex)
self.assertEqual(r.name, 'a')
self.assertEqual(list(r.key_dtypes.index), ['b'])
self.assertEqual(len(r.chunks), 3)
with self.assertRaises(IndexError):
getattr(mdf.groupby('b')[['a', 'b']], 'a')
def testGroupByAgg(self):
df = pd.DataFrame({'a': np.random.choice([2, 3, 4], size=(20,)),
'b': np.random.choice([2, 3, 4], size=(20,))})
mdf = md.DataFrame(df, chunk_size=3)
r = mdf.groupby('a').agg('sum', method='tree')
self.assertIsInstance(r.op, DataFrameGroupByAgg)
self.assertIsInstance(r, DataFrame)
self.assertEqual(r.op.method, 'tree')
r = r.tiles()
self.assertEqual(len(r.chunks), 1)
self.assertEqual(r.chunks[0].op.stage, OperandStage.agg)
self.assertEqual(len(r.chunks[0].inputs), 1)
self.assertEqual(len(r.chunks[0].inputs[0].inputs), 2)
df = pd.DataFrame({'c1': range(10),
'c2': np.random.choice(['a', 'b', 'c'], (10,)),
'c3': np.random.rand(10)})
mdf = md.DataFrame(df, chunk_size=2)
r = mdf.groupby('c2').sum(method='shuffle')
self.assertIsInstance(r.op, DataFrameGroupByAgg)
self.assertIsInstance(r, DataFrame)
r = r.tiles()
self.assertEqual(len(r.chunks), 5)
for chunk in r.chunks:
self.assertIsInstance(chunk.op, DataFrameGroupByAgg)
self.assertEqual(chunk.op.stage, OperandStage.agg)
self.assertIsInstance(chunk.inputs[0].op, DataFrameGroupByOperand)
self.assertEqual(chunk.inputs[0].op.stage, OperandStage.reduce)
self.assertIsInstance(chunk.inputs[0].inputs[0].op, DataFrameShuffleProxy)
self.assertIsInstance(chunk.inputs[0].inputs[0].inputs[0].op, DataFrameGroupByOperand)
self.assertEqual(chunk.inputs[0].inputs[0].inputs[0].op.stage, OperandStage.map)
agg_chunk = chunk.inputs[0].inputs[0].inputs[0].inputs[0]
self.assertEqual(agg_chunk.op.stage, OperandStage.map)
# test unknown method
with self.assertRaises(ValueError):
mdf.groupby('c2').sum(method='not_exist')
def testGroupByApply(self):
df1 = pd.DataFrame({'a': [3, 4, 5, 3, 5, 4, 1, 2, 3],
'b': [1, 3, 4, 5, 6, 5, 4, 4, 4],
'c': list('aabaaddce')})
def apply_df(df):
return df.sort_index()
def apply_series(s):
return s.sort_index()
mdf = md.DataFrame(df1, chunk_size=3)
applied = mdf.groupby('b').apply(apply_df).tiles()
pd.testing.assert_series_equal(applied.dtypes, df1.dtypes)
self.assertEqual(applied.shape, (np.nan, 3))
self.assertEqual(applied.op._op_type_, opcodes.APPLY)
self.assertEqual(applied.op.output_types[0], OutputType.dataframe)
self.assertEqual(len(applied.chunks), 3)
self.assertEqual(applied.chunks[0].shape, (np.nan, 3))
pd.testing.assert_series_equal(applied.chunks[0].dtypes, df1.dtypes)
applied = mdf.groupby('b').apply(lambda df: df.a).tiles()
self.assertEqual(applied.dtype, df1.a.dtype)
self.assertEqual(applied.shape, (np.nan,))
self.assertEqual(applied.op._op_type_, opcodes.APPLY)
self.assertEqual(applied.op.output_types[0], OutputType.series)
self.assertEqual(len(applied.chunks), 3)
self.assertEqual(applied.chunks[0].shape, (np.nan,))
self.assertEqual(applied.chunks[0].dtype, df1.a.dtype)
applied = mdf.groupby('b').apply(lambda df: df.a.sum()).tiles()
self.assertEqual(applied.dtype, df1.a.dtype)
self.assertEqual(applied.shape, (np.nan,))
self.assertEqual(applied.op._op_type_, opcodes.APPLY)
self.assertEqual(applied.op.output_types[0], OutputType.series)
self.assertEqual(len(applied.chunks), 3)
self.assertEqual(applied.chunks[0].shape, (np.nan,))
self.assertEqual(applied.chunks[0].dtype, df1.a.dtype)
series1 = pd.Series([3, 4, 5, 3, 5, 4, 1, 2, 3])
ms1 = md.Series(series1, chunk_size=3)
applied = ms1.groupby(lambda x: x % 3).apply(apply_series).tiles()
self.assertEqual(applied.dtype, series1.dtype)
self.assertEqual(applied.shape, (np.nan,))
self.assertEqual(applied.op._op_type_, opcodes.APPLY)
self.assertEqual(applied.op.output_types[0], OutputType.series)
self.assertEqual(len(applied.chunks), 3)
self.assertEqual(applied.chunks[0].shape, (np.nan,))
self.assertEqual(applied.chunks[0].dtype, series1.dtype)
def testGroupByTransform(self):
df1 = pd.DataFrame({
'a': [3, 4, 5, 3, 5, 4, 1, 2, 3],
'b': [1, 3, 4, 5, 6, 5, 4, 4, 4],
'c': list('aabaaddce'),
'd': [3, 4, 5, 3, 5, 4, 1, 2, 3],
'e': [1, 3, 4, 5, 6, 5, 4, 4, 4],
'f': list('aabaaddce'),
})
def transform_df(df):
return df.sort_index()
mdf = md.DataFrame(df1, chunk_size=3)
with self.assertRaises(TypeError):
mdf.groupby('b').transform(['cummax', 'cumcount'])
r = mdf.groupby('b').transform(transform_df).tiles()
self.assertListEqual(r.dtypes.index.tolist(), list('acdef'))
self.assertEqual(r.shape, (9, 5))
self.assertEqual(r.op._op_type_, opcodes.TRANSFORM)
self.assertEqual(r.op.output_types[0], OutputType.dataframe)
self.assertEqual(len(r.chunks), 3)
self.assertEqual(r.chunks[0].shape, (np.nan, 5))
self.assertListEqual(r.chunks[0].dtypes.index.tolist(), list('acdef'))
r = mdf.groupby('b').transform(['cummax', 'cumcount'], _call_agg=True).tiles()
self.assertEqual(r.shape, (np.nan, 6))
self.assertEqual(r.op._op_type_, opcodes.TRANSFORM)
self.assertEqual(r.op.output_types[0], OutputType.dataframe)
self.assertEqual(len(r.chunks), 3)
self.assertEqual(r.chunks[0].shape, (np.nan, 6))
agg_dict = OrderedDict([('d', 'cummax'), ('b', 'cumsum')])
r = mdf.groupby('b').transform(agg_dict, _call_agg=True).tiles()
self.assertEqual(r.shape, (np.nan, 2))
self.assertEqual(r.op._op_type_, opcodes.TRANSFORM)
self.assertEqual(r.op.output_types[0], OutputType.dataframe)
self.assertEqual(len(r.chunks), 3)
self.assertEqual(r.chunks[0].shape, (np.nan, 2))
agg_list = ['sum', lambda s: s.sum()]
r = mdf.groupby('b').transform(agg_list, _call_agg=True).tiles()
self.assertEqual(r.shape, (np.nan, 10))
self.assertEqual(r.op._op_type_, opcodes.TRANSFORM)
self.assertEqual(r.op.output_types[0], OutputType.dataframe)
self.assertEqual(len(r.chunks), 3)
self.assertEqual(r.chunks[0].shape, (np.nan, 10))
series1 = pd.Series([3, 4, 5, 3, 5, 4, 1, 2, 3])
ms1 = md.Series(series1, chunk_size=3)
r = ms1.groupby(lambda x: x % 3).transform(lambda x: x + 1).tiles()
self.assertEqual(r.dtype, series1.dtype)
self.assertEqual(r.shape, series1.shape)
self.assertEqual(r.op._op_type_, opcodes.TRANSFORM)
self.assertEqual(r.op.output_types[0], OutputType.series)
self.assertEqual(len(r.chunks), 3)
self.assertEqual(r.chunks[0].shape, (np.nan,))
self.assertEqual(r.chunks[0].dtype, series1.dtype)
r = ms1.groupby(lambda x: x % 3).transform('cummax', _call_agg=True).tiles()
self.assertEqual(r.shape, (np.nan,))
self.assertEqual(r.op._op_type_, opcodes.TRANSFORM)
self.assertEqual(r.op.output_types[0], OutputType.series)
self.assertEqual(len(r.chunks), 3)
self.assertEqual(r.chunks[0].shape, (np.nan,))
agg_list = ['cummax', 'cumcount']
r = ms1.groupby(lambda x: x % 3).transform(agg_list, _call_agg=True).tiles()
self.assertEqual(r.shape, (np.nan, 2))
self.assertEqual(r.op._op_type_, opcodes.TRANSFORM)
self.assertEqual(r.op.output_types[0], OutputType.dataframe)
self.assertEqual(len(r.chunks), 3)
self.assertEqual(r.chunks[0].shape, (np.nan, 2))
def testGroupByCum(self):
df1 = pd.DataFrame({'a': [3, 5, 2, 7, 1, 2, 4, 6, 2, 4],
'b': [8, 3, 4, 1, 8, 2, 2, 2, 2, 3],
'c': [1, 8, 8, 5, 3, 5, 0, 0, 5, 4]})
mdf = md.DataFrame(df1, chunk_size=3)
for fun in ['cummin', 'cummax', 'cumprod', 'cumsum']:
r = getattr(mdf.groupby('b'), fun)().tiles()
self.assertEqual(r.op.output_types[0], OutputType.dataframe)
self.assertEqual(len(r.chunks), 4)
self.assertEqual(r.shape, (len(df1), 2))
self.assertEqual(r.chunks[0].shape, (np.nan, 2))
pd.testing.assert_index_equal(r.chunks[0].columns_value.to_pandas(), pd.Index(['a', 'c']))
r = getattr(mdf.groupby('b'), fun)(axis=1).tiles()
self.assertEqual(r.op.output_types[0], OutputType.dataframe)
self.assertEqual(len(r.chunks), 4)
self.assertEqual(r.shape, (len(df1), 3))
self.assertEqual(r.chunks[0].shape, (np.nan, 3))
pd.testing.assert_index_equal(r.chunks[0].columns_value.to_pandas(), df1.columns)
r = mdf.groupby('b').cumcount().tiles()
self.assertEqual(r.op.output_types[0], OutputType.series)
self.assertEqual(len(r.chunks), 4)
self.assertEqual(r.shape, (len(df1),))
self.assertEqual(r.chunks[0].shape, (np.nan,))
series1 = pd.Series([2, 2, 5, 7, 3, 7, 8, 8, 5, 6])
ms1 = md.Series(series1, chunk_size=3)
for fun in ['cummin', 'cummax', 'cumprod', 'cumsum', 'cumcount']:
r = getattr(ms1.groupby(lambda x: x % 2), fun)().tiles()
self.assertEqual(r.op.output_types[0], OutputType.series)
self.assertEqual(len(r.chunks), 4)
self.assertEqual(r.shape, (len(series1),))
self.assertEqual(r.chunks[0].shape, (np.nan,))
| 44.959184 | 102 | 0.611893 |
4388485df16a9b9463b0ac06422f702f6669034f | 8,679 | py | Python | packages/python/plotly/plotly/graph_objs/scattermapbox/marker/colorbar/title/__init__.py | pragyagarg642/plotly.py | 141aa6dcb3f838b2102db6ecc9ae1bdb70daf20b | [
"MIT"
] | 2 | 2020-04-11T19:28:30.000Z | 2020-05-04T03:16:20.000Z | packages/python/plotly/plotly/graph_objs/scattermapbox/marker/colorbar/title/__init__.py | pragyagarg642/plotly.py | 141aa6dcb3f838b2102db6ecc9ae1bdb70daf20b | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/graph_objs/scattermapbox/marker/colorbar/title/__init__.py | pragyagarg642/plotly.py | 141aa6dcb3f838b2102db6ecc9ae1bdb70daf20b | [
"MIT"
] | null | null | null | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "scattermapbox.marker.colorbar.title"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattermapbox.
marker.colorbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattermapbox.marker.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattermapbox.marker.colorbar.title.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.scattermapbox.marker.colorbar.title import font as v_font
# Initialize validators
# ---------------------
self._validators["color"] = v_font.ColorValidator()
self._validators["family"] = v_font.FamilyValidator()
self._validators["size"] = v_font.SizeValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Font"]
| 37.571429 | 88 | 0.572186 |
fc4857bf0aff4178b507e74759703614be85862f | 23,190 | py | Python | desktop/core/ext-py/Twisted/twisted/conch/test/test_telnet.py | civascu/hue | 82f2de44789ff5a981ed725175bae7944832d1e9 | [
"Apache-2.0"
] | 19 | 2015-05-01T19:59:03.000Z | 2021-12-09T08:03:16.000Z | desktop/core/ext-py/Twisted/twisted/conch/test/test_telnet.py | civascu/hue | 82f2de44789ff5a981ed725175bae7944832d1e9 | [
"Apache-2.0"
] | 1 | 2018-01-03T15:26:49.000Z | 2018-01-03T15:26:49.000Z | desktop/core/ext-py/Twisted/twisted/conch/test/test_telnet.py | civascu/hue | 82f2de44789ff5a981ed725175bae7944832d1e9 | [
"Apache-2.0"
] | 30 | 2015-03-25T19:40:07.000Z | 2021-05-28T22:59:26.000Z | # -*- test-case-name: twisted.conch.test.test_telnet -*-
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.telnet}.
"""
from zope.interface import implements
from twisted.internet import defer
from twisted.conch import telnet
from twisted.trial import unittest
from twisted.test import proto_helpers
class TestProtocol:
implements(telnet.ITelnetProtocol)
localEnableable = ()
remoteEnableable = ()
def __init__(self):
self.bytes = ''
self.subcmd = ''
self.calls = []
self.enabledLocal = []
self.enabledRemote = []
self.disabledLocal = []
self.disabledRemote = []
def makeConnection(self, transport):
d = transport.negotiationMap = {}
d['\x12'] = self.neg_TEST_COMMAND
d = transport.commandMap = transport.commandMap.copy()
for cmd in ('NOP', 'DM', 'BRK', 'IP', 'AO', 'AYT', 'EC', 'EL', 'GA'):
d[getattr(telnet, cmd)] = lambda arg, cmd=cmd: self.calls.append(cmd)
def dataReceived(self, bytes):
self.bytes += bytes
def connectionLost(self, reason):
pass
def neg_TEST_COMMAND(self, payload):
self.subcmd = payload
def enableLocal(self, option):
if option in self.localEnableable:
self.enabledLocal.append(option)
return True
return False
def disableLocal(self, option):
self.disabledLocal.append(option)
def enableRemote(self, option):
if option in self.remoteEnableable:
self.enabledRemote.append(option)
return True
return False
def disableRemote(self, option):
self.disabledRemote.append(option)
class TelnetTransportTestCase(unittest.TestCase):
"""
Tests for L{telnet.TelnetTransport}.
"""
def setUp(self):
self.p = telnet.TelnetTransport(TestProtocol)
self.t = proto_helpers.StringTransport()
self.p.makeConnection(self.t)
def testRegularBytes(self):
# Just send a bunch of bytes. None of these do anything
# with telnet. They should pass right through to the
# application layer.
h = self.p.protocol
L = ["here are some bytes la la la",
"some more arrive here",
"lots of bytes to play with",
"la la la",
"ta de da",
"dum"]
for b in L:
self.p.dataReceived(b)
self.assertEquals(h.bytes, ''.join(L))
def testNewlineHandling(self):
# Send various kinds of newlines and make sure they get translated
# into \n.
h = self.p.protocol
L = ["here is the first line\r\n",
"here is the second line\r\0",
"here is the third line\r\n",
"here is the last line\r\0"]
for b in L:
self.p.dataReceived(b)
self.assertEquals(h.bytes, L[0][:-2] + '\n' +
L[1][:-2] + '\r' +
L[2][:-2] + '\n' +
L[3][:-2] + '\r')
def testIACEscape(self):
# Send a bunch of bytes and a couple quoted \xFFs. Unquoted,
# \xFF is a telnet command. Quoted, one of them from each pair
# should be passed through to the application layer.
h = self.p.protocol
L = ["here are some bytes\xff\xff with an embedded IAC",
"and here is a test of a border escape\xff",
"\xff did you get that IAC?"]
for b in L:
self.p.dataReceived(b)
self.assertEquals(h.bytes, ''.join(L).replace('\xff\xff', '\xff'))
def _simpleCommandTest(self, cmdName):
# Send a single simple telnet command and make sure
# it gets noticed and the appropriate method gets
# called.
h = self.p.protocol
cmd = telnet.IAC + getattr(telnet, cmdName)
L = ["Here's some bytes, tra la la",
"But ono!" + cmd + " an interrupt"]
for b in L:
self.p.dataReceived(b)
self.assertEquals(h.calls, [cmdName])
self.assertEquals(h.bytes, ''.join(L).replace(cmd, ''))
def testInterrupt(self):
self._simpleCommandTest("IP")
def testNoOperation(self):
self._simpleCommandTest("NOP")
def testDataMark(self):
self._simpleCommandTest("DM")
def testBreak(self):
self._simpleCommandTest("BRK")
def testAbortOutput(self):
self._simpleCommandTest("AO")
def testAreYouThere(self):
self._simpleCommandTest("AYT")
def testEraseCharacter(self):
self._simpleCommandTest("EC")
def testEraseLine(self):
self._simpleCommandTest("EL")
def testGoAhead(self):
self._simpleCommandTest("GA")
def testSubnegotiation(self):
# Send a subnegotiation command and make sure it gets
# parsed and that the correct method is called.
h = self.p.protocol
cmd = telnet.IAC + telnet.SB + '\x12hello world' + telnet.IAC + telnet.SE
L = ["These are some bytes but soon" + cmd,
"there will be some more"]
for b in L:
self.p.dataReceived(b)
self.assertEquals(h.bytes, ''.join(L).replace(cmd, ''))
self.assertEquals(h.subcmd, list("hello world"))
def testSubnegotiationWithEmbeddedSE(self):
# Send a subnegotiation command with an embedded SE. Make sure
# that SE gets passed to the correct method.
h = self.p.protocol
cmd = (telnet.IAC + telnet.SB +
'\x12' + telnet.SE +
telnet.IAC + telnet.SE)
L = ["Some bytes are here" + cmd + "and here",
"and here"]
for b in L:
self.p.dataReceived(b)
self.assertEquals(h.bytes, ''.join(L).replace(cmd, ''))
self.assertEquals(h.subcmd, [telnet.SE])
def testBoundarySubnegotiation(self):
# Send a subnegotiation command. Split it at every possible byte boundary
# and make sure it always gets parsed and that it is passed to the correct
# method.
cmd = (telnet.IAC + telnet.SB +
'\x12' + telnet.SE + 'hello' +
telnet.IAC + telnet.SE)
for i in range(len(cmd)):
h = self.p.protocol = TestProtocol()
h.makeConnection(self.p)
a, b = cmd[:i], cmd[i:]
L = ["first part" + a,
b + "last part"]
for bytes in L:
self.p.dataReceived(bytes)
self.assertEquals(h.bytes, ''.join(L).replace(cmd, ''))
self.assertEquals(h.subcmd, [telnet.SE] + list('hello'))
def _enabledHelper(self, o, eL=[], eR=[], dL=[], dR=[]):
self.assertEquals(o.enabledLocal, eL)
self.assertEquals(o.enabledRemote, eR)
self.assertEquals(o.disabledLocal, dL)
self.assertEquals(o.disabledRemote, dR)
def testRefuseWill(self):
# Try to enable an option. The server should refuse to enable it.
cmd = telnet.IAC + telnet.WILL + '\x12'
bytes = "surrounding bytes" + cmd + "to spice things up"
self.p.dataReceived(bytes)
self.assertEquals(self.p.protocol.bytes, bytes.replace(cmd, ''))
self.assertEquals(self.t.value(), telnet.IAC + telnet.DONT + '\x12')
self._enabledHelper(self.p.protocol)
def testRefuseDo(self):
# Try to enable an option. The server should refuse to enable it.
cmd = telnet.IAC + telnet.DO + '\x12'
bytes = "surrounding bytes" + cmd + "to spice things up"
self.p.dataReceived(bytes)
self.assertEquals(self.p.protocol.bytes, bytes.replace(cmd, ''))
self.assertEquals(self.t.value(), telnet.IAC + telnet.WONT + '\x12')
self._enabledHelper(self.p.protocol)
def testAcceptDo(self):
# Try to enable an option. The option is in our allowEnable
# list, so we will allow it to be enabled.
cmd = telnet.IAC + telnet.DO + '\x19'
bytes = 'padding' + cmd + 'trailer'
h = self.p.protocol
h.localEnableable = ('\x19',)
self.p.dataReceived(bytes)
self.assertEquals(self.t.value(), telnet.IAC + telnet.WILL + '\x19')
self._enabledHelper(h, eL=['\x19'])
def testAcceptWill(self):
# Same as testAcceptDo, but reversed.
cmd = telnet.IAC + telnet.WILL + '\x91'
bytes = 'header' + cmd + 'padding'
h = self.p.protocol
h.remoteEnableable = ('\x91',)
self.p.dataReceived(bytes)
self.assertEquals(self.t.value(), telnet.IAC + telnet.DO + '\x91')
self._enabledHelper(h, eR=['\x91'])
def testAcceptWont(self):
# Try to disable an option. The server must allow any option to
# be disabled at any time. Make sure it disables it and sends
# back an acknowledgement of this.
cmd = telnet.IAC + telnet.WONT + '\x29'
# Jimmy it - after these two lines, the server will be in a state
# such that it believes the option to have been previously enabled
# via normal negotiation.
s = self.p.getOptionState('\x29')
s.him.state = 'yes'
bytes = "fiddle dee" + cmd
self.p.dataReceived(bytes)
self.assertEquals(self.p.protocol.bytes, bytes.replace(cmd, ''))
self.assertEquals(self.t.value(), telnet.IAC + telnet.DONT + '\x29')
self.assertEquals(s.him.state, 'no')
self._enabledHelper(self.p.protocol, dR=['\x29'])
def testAcceptDont(self):
# Try to disable an option. The server must allow any option to
# be disabled at any time. Make sure it disables it and sends
# back an acknowledgement of this.
cmd = telnet.IAC + telnet.DONT + '\x29'
# Jimmy it - after these two lines, the server will be in a state
# such that it believes the option to have beenp previously enabled
# via normal negotiation.
s = self.p.getOptionState('\x29')
s.us.state = 'yes'
bytes = "fiddle dum " + cmd
self.p.dataReceived(bytes)
self.assertEquals(self.p.protocol.bytes, bytes.replace(cmd, ''))
self.assertEquals(self.t.value(), telnet.IAC + telnet.WONT + '\x29')
self.assertEquals(s.us.state, 'no')
self._enabledHelper(self.p.protocol, dL=['\x29'])
def testIgnoreWont(self):
# Try to disable an option. The option is already disabled. The
# server should send nothing in response to this.
cmd = telnet.IAC + telnet.WONT + '\x47'
bytes = "dum de dum" + cmd + "tra la la"
self.p.dataReceived(bytes)
self.assertEquals(self.p.protocol.bytes, bytes.replace(cmd, ''))
self.assertEquals(self.t.value(), '')
self._enabledHelper(self.p.protocol)
def testIgnoreDont(self):
# Try to disable an option. The option is already disabled. The
# server should send nothing in response to this. Doing so could
# lead to a negotiation loop.
cmd = telnet.IAC + telnet.DONT + '\x47'
bytes = "dum de dum" + cmd + "tra la la"
self.p.dataReceived(bytes)
self.assertEquals(self.p.protocol.bytes, bytes.replace(cmd, ''))
self.assertEquals(self.t.value(), '')
self._enabledHelper(self.p.protocol)
def testIgnoreWill(self):
# Try to enable an option. The option is already enabled. The
# server should send nothing in response to this. Doing so could
# lead to a negotiation loop.
cmd = telnet.IAC + telnet.WILL + '\x56'
# Jimmy it - after these two lines, the server will be in a state
# such that it believes the option to have been previously enabled
# via normal negotiation.
s = self.p.getOptionState('\x56')
s.him.state = 'yes'
bytes = "tra la la" + cmd + "dum de dum"
self.p.dataReceived(bytes)
self.assertEquals(self.p.protocol.bytes, bytes.replace(cmd, ''))
self.assertEquals(self.t.value(), '')
self._enabledHelper(self.p.protocol)
def testIgnoreDo(self):
# Try to enable an option. The option is already enabled. The
# server should send nothing in response to this. Doing so could
# lead to a negotiation loop.
cmd = telnet.IAC + telnet.DO + '\x56'
# Jimmy it - after these two lines, the server will be in a state
# such that it believes the option to have been previously enabled
# via normal negotiation.
s = self.p.getOptionState('\x56')
s.us.state = 'yes'
bytes = "tra la la" + cmd + "dum de dum"
self.p.dataReceived(bytes)
self.assertEquals(self.p.protocol.bytes, bytes.replace(cmd, ''))
self.assertEquals(self.t.value(), '')
self._enabledHelper(self.p.protocol)
def testAcceptedEnableRequest(self):
# Try to enable an option through the user-level API. This
# returns a Deferred that fires when negotiation about the option
# finishes. Make sure it fires, make sure state gets updated
# properly, make sure the result indicates the option was enabled.
d = self.p.do('\x42')
h = self.p.protocol
h.remoteEnableable = ('\x42',)
self.assertEquals(self.t.value(), telnet.IAC + telnet.DO + '\x42')
self.p.dataReceived(telnet.IAC + telnet.WILL + '\x42')
d.addCallback(self.assertEquals, True)
d.addCallback(lambda _: self._enabledHelper(h, eR=['\x42']))
return d
def testRefusedEnableRequest(self):
# Try to enable an option through the user-level API. This
# returns a Deferred that fires when negotiation about the option
# finishes. Make sure it fires, make sure state gets updated
# properly, make sure the result indicates the option was enabled.
d = self.p.do('\x42')
self.assertEquals(self.t.value(), telnet.IAC + telnet.DO + '\x42')
self.p.dataReceived(telnet.IAC + telnet.WONT + '\x42')
d = self.assertFailure(d, telnet.OptionRefused)
d.addCallback(lambda _: self._enabledHelper(self.p.protocol))
return d
def testAcceptedDisableRequest(self):
# Try to disable an option through the user-level API. This
# returns a Deferred that fires when negotiation about the option
# finishes. Make sure it fires, make sure state gets updated
# properly, make sure the result indicates the option was enabled.
s = self.p.getOptionState('\x42')
s.him.state = 'yes'
d = self.p.dont('\x42')
self.assertEquals(self.t.value(), telnet.IAC + telnet.DONT + '\x42')
self.p.dataReceived(telnet.IAC + telnet.WONT + '\x42')
d.addCallback(self.assertEquals, True)
d.addCallback(lambda _: self._enabledHelper(self.p.protocol,
dR=['\x42']))
return d
def testNegotiationBlocksFurtherNegotiation(self):
# Try to disable an option, then immediately try to enable it, then
# immediately try to disable it. Ensure that the 2nd and 3rd calls
# fail quickly with the right exception.
s = self.p.getOptionState('\x24')
s.him.state = 'yes'
d2 = self.p.dont('\x24') # fires after the first line of _final
def _do(x):
d = self.p.do('\x24')
return self.assertFailure(d, telnet.AlreadyNegotiating)
def _dont(x):
d = self.p.dont('\x24')
return self.assertFailure(d, telnet.AlreadyNegotiating)
def _final(x):
self.p.dataReceived(telnet.IAC + telnet.WONT + '\x24')
# an assertion that only passes if d2 has fired
self._enabledHelper(self.p.protocol, dR=['\x24'])
# Make sure we allow this
self.p.protocol.remoteEnableable = ('\x24',)
d = self.p.do('\x24')
self.p.dataReceived(telnet.IAC + telnet.WILL + '\x24')
d.addCallback(self.assertEquals, True)
d.addCallback(lambda _: self._enabledHelper(self.p.protocol,
eR=['\x24'],
dR=['\x24']))
return d
d = _do(None)
d.addCallback(_dont)
d.addCallback(_final)
return d
def testSuperfluousDisableRequestRaises(self):
# Try to disable a disabled option. Make sure it fails properly.
d = self.p.dont('\xab')
return self.assertFailure(d, telnet.AlreadyDisabled)
def testSuperfluousEnableRequestRaises(self):
# Try to disable a disabled option. Make sure it fails properly.
s = self.p.getOptionState('\xab')
s.him.state = 'yes'
d = self.p.do('\xab')
return self.assertFailure(d, telnet.AlreadyEnabled)
def testLostConnectionFailsDeferreds(self):
d1 = self.p.do('\x12')
d2 = self.p.do('\x23')
d3 = self.p.do('\x34')
class TestException(Exception):
pass
self.p.connectionLost(TestException("Total failure!"))
d1 = self.assertFailure(d1, TestException)
d2 = self.assertFailure(d2, TestException)
d3 = self.assertFailure(d3, TestException)
return defer.gatherResults([d1, d2, d3])
class TestTelnet(telnet.Telnet):
"""
A trivial extension of the telnet protocol class useful to unit tests.
"""
def __init__(self):
telnet.Telnet.__init__(self)
self.events = []
def applicationDataReceived(self, bytes):
"""
Record the given data in C{self.events}.
"""
self.events.append(('bytes', bytes))
def unhandledCommand(self, command, bytes):
"""
Record the given command in C{self.events}.
"""
self.events.append(('command', command, bytes))
def unhandledSubnegotiation(self, command, bytes):
"""
Record the given subnegotiation command in C{self.events}.
"""
self.events.append(('negotiate', command, bytes))
class TelnetTests(unittest.TestCase):
"""
Tests for L{telnet.Telnet}.
L{telnet.Telnet} implements the TELNET protocol (RFC 854), including option
and suboption negotiation, and option state tracking.
"""
def setUp(self):
"""
Create an unconnected L{telnet.Telnet} to be used by tests.
"""
self.protocol = TestTelnet()
def test_enableLocal(self):
"""
L{telnet.Telnet.enableLocal} should reject all options, since
L{telnet.Telnet} does not know how to implement any options.
"""
self.assertFalse(self.protocol.enableLocal('\0'))
def test_enableRemote(self):
"""
L{telnet.Telnet.enableRemote} should reject all options, since
L{telnet.Telnet} does not know how to implement any options.
"""
self.assertFalse(self.protocol.enableRemote('\0'))
def test_disableLocal(self):
"""
It is an error for L{telnet.Telnet.disableLocal} to be called, since
L{telnet.Telnet.enableLocal} will never allow any options to be enabled
locally. If a subclass overrides enableLocal, it must also override
disableLocal.
"""
self.assertRaises(NotImplementedError, self.protocol.disableLocal, '\0')
def test_disableRemote(self):
"""
It is an error for L{telnet.Telnet.disableRemote} to be called, since
L{telnet.Telnet.enableRemote} will never allow any options to be
enabled remotely. If a subclass overrides enableRemote, it must also
override disableRemote.
"""
self.assertRaises(NotImplementedError, self.protocol.disableRemote, '\0')
def _deliver(self, bytes, *expected):
"""
Pass the given bytes to the protocol's C{dataReceived} method and
assert that the given events occur.
"""
received = self.protocol.events = []
self.protocol.dataReceived(bytes)
self.assertEqual(received, list(expected))
def test_oneApplicationDataByte(self):
"""
One application-data byte in the default state gets delivered right
away.
"""
self._deliver('a', ('bytes', 'a'))
def test_twoApplicationDataBytes(self):
"""
Two application-data bytes in the default state get delivered
together.
"""
self._deliver('bc', ('bytes', 'bc'))
def test_threeApplicationDataBytes(self):
"""
Three application-data bytes followed by a control byte get
delivered, but the control byte doesn't.
"""
self._deliver('def' + telnet.IAC, ('bytes', 'def'))
def test_escapedControl(self):
"""
IAC in the escaped state gets delivered and so does another
application-data byte following it.
"""
self._deliver(telnet.IAC)
self._deliver(telnet.IAC + 'g', ('bytes', telnet.IAC + 'g'))
def test_carriageReturn(self):
"""
A carriage return only puts the protocol into the newline state. A
linefeed in the newline state causes just the newline to be
delivered. A nul in the newline state causes a carriage return to
be delivered. An IAC in the newline state causes a carriage return
to be delivered and puts the protocol into the escaped state.
Anything else causes a carriage return and that thing to be
delivered.
"""
self._deliver('\r')
self._deliver('\n', ('bytes', '\n'))
self._deliver('\r\n', ('bytes', '\n'))
self._deliver('\r')
self._deliver('\0', ('bytes', '\r'))
self._deliver('\r\0', ('bytes', '\r'))
self._deliver('\r')
self._deliver('a', ('bytes', '\ra'))
self._deliver('\ra', ('bytes', '\ra'))
self._deliver('\r')
self._deliver(
telnet.IAC + telnet.IAC + 'x', ('bytes', '\r' + telnet.IAC + 'x'))
def test_applicationDataBeforeSimpleCommand(self):
"""
Application bytes received before a command are delivered before the
command is processed.
"""
self._deliver(
'x' + telnet.IAC + telnet.NOP,
('bytes', 'x'), ('command', telnet.NOP, None))
def test_applicationDataBeforeCommand(self):
"""
Application bytes received before a WILL/WONT/DO/DONT are delivered
before the command is processed.
"""
self.protocol.commandMap = {}
self._deliver(
'y' + telnet.IAC + telnet.WILL + '\x00',
('bytes', 'y'), ('command', telnet.WILL, '\x00'))
def test_applicationDataBeforeSubnegotiation(self):
"""
Application bytes received before a subnegotiation command are
delivered before the negotiation is processed.
"""
self._deliver(
'z' + telnet.IAC + telnet.SB + 'Qx' + telnet.IAC + telnet.SE,
('bytes', 'z'), ('negotiate', 'Q', ['x']))
| 34.153166 | 82 | 0.596809 |
d698a0f4167f19f1d4cd7d928ae59e1c2b8897df | 14,965 | py | Python | chia/cmds/show.py | santiagoferreira33/mainchia | 16917701fd93cebab25bf054cf7c17967052ef2e | [
"Apache-2.0"
] | 3 | 2021-06-16T05:12:13.000Z | 2021-08-14T01:26:54.000Z | chia/cmds/show.py | santiagoferreira33/mainchia | 16917701fd93cebab25bf054cf7c17967052ef2e | [
"Apache-2.0"
] | 28 | 2021-07-13T21:07:14.000Z | 2022-03-29T21:10:38.000Z | chia/cmds/show.py | santiagoferreira33/mainchia | 16917701fd93cebab25bf054cf7c17967052ef2e | [
"Apache-2.0"
] | 2 | 2021-05-18T15:33:58.000Z | 2021-05-28T21:15:09.000Z | from typing import Any
import click
async def show_async(
rpc_port: int,
state: bool,
show_connections: bool,
exit_node: bool,
add_connection: str,
remove_connection: str,
block_header_hash_by_height: str,
block_by_header_hash: str,
) -> None:
import aiohttp
import time
import traceback
from time import localtime, struct_time
from typing import List, Optional
from chia.consensus.block_record import BlockRecord
from chia.rpc.full_node_rpc_client import FullNodeRpcClient
from chia.server.outbound_message import NodeType
from chia.types.full_block import FullBlock
from chia.util.bech32m import encode_puzzle_hash
from chia.util.byte_types import hexstr_to_bytes
from chia.util.config import load_config
from chia.util.default_root import DEFAULT_ROOT_PATH
from chia.util.ints import uint16
from chia.util.misc import format_bytes
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
if state:
blockchain_state = await client.get_blockchain_state()
if blockchain_state is None:
print("There is no blockchain found yet. Try again shortly")
return None
peak: Optional[BlockRecord] = blockchain_state["peak"]
difficulty = blockchain_state["difficulty"]
sub_slot_iters = blockchain_state["sub_slot_iters"]
synced = blockchain_state["sync"]["synced"]
sync_mode = blockchain_state["sync"]["sync_mode"]
total_iters = peak.total_iters if peak is not None else 0
num_blocks: int = 10
if sync_mode:
sync_max_block = blockchain_state["sync"]["sync_tip_height"]
sync_current_block = blockchain_state["sync"]["sync_progress_height"]
print(
"Current Blockchain Status: Full Node syncing to block",
sync_max_block,
"\nCurrently synced to block:",
sync_current_block,
)
if synced:
print("Current Blockchain Status: Full Node Synced")
print("\nPeak: Hash:", peak.header_hash if peak is not None else "")
elif peak is not None:
print(f"Current Blockchain Status: Not Synced. Peak height: {peak.height}")
else:
print("\nSearching for an initial chain\n")
print("You may be able to expedite with 'chia show -a host:port' using a known node.\n")
if peak is not None:
if peak.is_transaction_block:
peak_time = peak.timestamp
else:
peak_hash = peak.header_hash
curr = await client.get_block_record(peak_hash)
while curr is not None and not curr.is_transaction_block:
curr = await client.get_block_record(curr.prev_hash)
peak_time = curr.timestamp
peak_time_struct = struct_time(localtime(peak_time))
print(
" Time:",
f"{time.strftime('%a %b %d %Y %T %Z', peak_time_struct)}",
f" Height: {peak.height:>10}\n",
)
print("Estimated network space: ", end="")
print(format_bytes(blockchain_state["space"]))
print(f"Current difficulty: {difficulty}")
print(f"Current VDF sub_slot_iters: {sub_slot_iters}")
print("Total iterations since the start of the blockchain:", total_iters)
print("")
print(" Height: | Hash:")
added_blocks: List[BlockRecord] = []
curr = await client.get_block_record(peak.header_hash)
while curr is not None and len(added_blocks) < num_blocks and curr.height > 0:
added_blocks.append(curr)
curr = await client.get_block_record(curr.prev_hash)
for b in added_blocks:
print(f"{b.height:>9} | {b.header_hash}")
else:
print("Blockchain has no blocks yet")
# if called together with show_connections, leave a blank line
if show_connections:
print("")
if show_connections:
connections = await client.get_connections()
print("Connections:")
print(
"Type IP Ports NodeID Last Connect"
+ " MiB Up|Dwn"
)
for con in connections:
last_connect_tuple = struct_time(localtime(con["last_message_time"]))
last_connect = time.strftime("%b %d %T", last_connect_tuple)
mb_down = con["bytes_read"] / (1024 * 1024)
mb_up = con["bytes_written"] / (1024 * 1024)
host = con["peer_host"]
# Strip IPv6 brackets
if host[0] == "[":
host = host[1:39]
# Nodetype length is 9 because INTRODUCER will be deprecated
if NodeType(con["type"]) is NodeType.FULL_NODE:
peak_height = con["peak_height"]
peak_hash = con["peak_hash"]
if peak_hash is None:
peak_hash = "No Info"
if peak_height is None:
peak_height = 0
con_str = (
f"{NodeType(con['type']).name:9} {host:38} "
f"{con['peer_port']:5}/{con['peer_server_port']:<5}"
f" {con['node_id'].hex()[:8]}... "
f"{last_connect} "
f"{mb_up:7.1f}|{mb_down:<7.1f}"
f"\n "
f"-SB Height: {peak_height:8.0f} -Hash: {peak_hash[2:10]}..."
)
else:
con_str = (
f"{NodeType(con['type']).name:9} {host:38} "
f"{con['peer_port']:5}/{con['peer_server_port']:<5}"
f" {con['node_id'].hex()[:8]}... "
f"{last_connect} "
f"{mb_up:7.1f}|{mb_down:<7.1f}"
)
print(con_str)
# if called together with state, leave a blank line
if state:
print("")
if exit_node:
node_stop = await client.stop_node()
print(node_stop, "Node stopped")
if add_connection:
if ":" not in add_connection:
print("Enter a valid IP and port in the following format: 10.5.4.3:8000")
else:
ip, port = (
":".join(add_connection.split(":")[:-1]),
add_connection.split(":")[-1],
)
print(f"Connecting to {ip}, {port}")
try:
await client.open_connection(ip, int(port))
except Exception:
print(f"Failed to connect to {ip}:{port}")
if remove_connection:
result_txt = ""
if len(remove_connection) != 8:
result_txt = "Invalid NodeID. Do not include '.'"
else:
connections = await client.get_connections()
for con in connections:
if remove_connection == con["node_id"].hex()[:8]:
print("Attempting to disconnect", "NodeID", remove_connection)
try:
await client.close_connection(con["node_id"])
except Exception:
result_txt = f"Failed to disconnect NodeID {remove_connection}"
else:
result_txt = f"NodeID {remove_connection}... {NodeType(con['type']).name} "
f"{con['peer_host']} disconnected"
elif result_txt == "":
result_txt = f"NodeID {remove_connection}... not found"
print(result_txt)
if block_header_hash_by_height != "":
block_header = await client.get_block_record_by_height(block_header_hash_by_height)
if block_header is not None:
print(f"Header hash of block {block_header_hash_by_height}: " f"{block_header.header_hash.hex()}")
else:
print("Block height", block_header_hash_by_height, "not found")
if block_by_header_hash != "":
block: Optional[BlockRecord] = await client.get_block_record(hexstr_to_bytes(block_by_header_hash))
full_block: Optional[FullBlock] = await client.get_block(hexstr_to_bytes(block_by_header_hash))
# Would like to have a verbose flag for this
if block is not None:
assert full_block is not None
prev_b = await client.get_block_record(block.prev_hash)
if prev_b is not None:
difficulty = block.weight - prev_b.weight
else:
difficulty = block.weight
if block.is_transaction_block:
assert full_block.transactions_info is not None
block_time = struct_time(
localtime(
full_block.foliage_transaction_block.timestamp
if full_block.foliage_transaction_block
else None
)
)
block_time_string = time.strftime("%a %b %d %Y %T %Z", block_time)
cost = str(full_block.transactions_info.cost)
tx_filter_hash = "Not a transaction block"
if full_block.foliage_transaction_block:
tx_filter_hash = full_block.foliage_transaction_block.filter_hash
fees: Any = block.fees
else:
block_time_string = "Not a transaction block"
cost = "Not a transaction block"
tx_filter_hash = "Not a transaction block"
fees = "Not a transaction block"
address_prefix = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
farmer_address = encode_puzzle_hash(block.farmer_puzzle_hash, address_prefix)
pool_address = encode_puzzle_hash(block.pool_puzzle_hash, address_prefix)
pool_pk = (
full_block.reward_chain_block.proof_of_space.pool_public_key
if full_block.reward_chain_block.proof_of_space.pool_public_key is not None
else "Pay to pool puzzle hash"
)
print(
f"Block Height {block.height}\n"
f"Header Hash 0x{block.header_hash.hex()}\n"
f"Timestamp {block_time_string}\n"
f"Weight {block.weight}\n"
f"Previous Block 0x{block.prev_hash.hex()}\n"
f"Difficulty {difficulty}\n"
f"Sub-slot iters {block.sub_slot_iters}\n"
f"Cost {cost}\n"
f"Total VDF Iterations {block.total_iters}\n"
f"Is a Transaction Block?{block.is_transaction_block}\n"
f"Deficit {block.deficit}\n"
f"PoSpace 'k' Size {full_block.reward_chain_block.proof_of_space.size}\n"
f"Plot Public Key 0x{full_block.reward_chain_block.proof_of_space.plot_public_key}\n"
f"Pool Public Key {pool_pk}\n"
f"Tx Filter Hash {tx_filter_hash}\n"
f"Farmer Address {farmer_address}\n"
f"Pool Address {pool_address}\n"
f"Fees Amount {fees}\n"
)
else:
print("Block with header hash", block_header_hash_by_height, "not found")
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if full node rpc is running at {rpc_port}")
print("This is normal if full node is still starting up")
else:
tb = traceback.format_exc()
print(f"Exception from 'show' {tb}")
client.close()
await client.await_closed()
@click.command("show", short_help="Show node information")
@click.option(
"-p",
"--rpc-port",
help=(
"Set the port where the Full Node is hosting the RPC interface. "
"See the rpc_port under full_node in config.yaml"
),
type=int,
default=None,
)
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
@click.option("-s", "--state", help="Show the current state of the blockchain", is_flag=True, type=bool, default=False)
@click.option(
"-c", "--connections", help="List nodes connected to this Full Node", is_flag=True, type=bool, default=False
)
@click.option("-e", "--exit-node", help="Shut down the running Full Node", is_flag=True, default=False)
@click.option("-a", "--add-connection", help="Connect to another Full Node by ip:port", type=str, default="")
@click.option(
"-r", "--remove-connection", help="Remove a Node by the first 8 characters of NodeID", type=str, default=""
)
@click.option(
"-bh", "--block-header-hash-by-height", help="Look up a block header hash by block height", type=str, default=""
)
@click.option("-b", "--block-by-header-hash", help="Look up a block by block header hash", type=str, default="")
def show_cmd(
rpc_port: int,
wallet_rpc_port: int,
state: bool,
connections: bool,
exit_node: bool,
add_connection: str,
remove_connection: str,
block_header_hash_by_height: str,
block_by_header_hash: str,
) -> None:
import asyncio
asyncio.run(
show_async(
rpc_port,
state,
connections,
exit_node,
add_connection,
remove_connection,
block_header_hash_by_height,
block_by_header_hash,
)
)
| 45.764526 | 119 | 0.537187 |
60bd1df0f4de5ae4ea668c915b47625d96e1f746 | 549 | py | Python | users/migrations/0027_auto_20190910_1643.py | dhanupandey12/Blog | fcd274b7249c255786b46cf81d6e949a903e9a53 | [
"MIT"
] | null | null | null | users/migrations/0027_auto_20190910_1643.py | dhanupandey12/Blog | fcd274b7249c255786b46cf81d6e949a903e9a53 | [
"MIT"
] | null | null | null | users/migrations/0027_auto_20190910_1643.py | dhanupandey12/Blog | fcd274b7249c255786b46cf81d6e949a903e9a53 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.5 on 2019-09-10 11:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
atomic = False
dependencies = [
('users', '0026_auto_20190910_1638'),
]
operations = [
migrations.AlterField(
model_name='fdata',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='utem', to=settings.AUTH_USER_MODEL),
),
]
| 26.142857 | 134 | 0.663024 |
892d58bced7a94c8ffa269c5a5131735fe183099 | 4,246 | py | Python | drafthorse/models/trade.py | olf42/python-drafthorse | 0ef0326556ad0336236fd8d91cb32e8343ebd592 | [
"Apache-2.0"
] | null | null | null | drafthorse/models/trade.py | olf42/python-drafthorse | 0ef0326556ad0336236fd8d91cb32e8343ebd592 | [
"Apache-2.0"
] | null | null | null | drafthorse/models/trade.py | olf42/python-drafthorse | 0ef0326556ad0336236fd8d91cb32e8343ebd592 | [
"Apache-2.0"
] | null | null | null | from . import BASIC, COMFORT, EXTENDED, NS_RAM, NS_FERD_1p0
from .accounting import (ApplicableTradeTax, AppliedTradeTax,
BillingSpecifiedPeriod, MonetarySummation,
ReceivableAccountingAccount, TradeAllowanceCharge)
from .delivery import TradeDelivery
from .elements import Element
from .fields import CurrencyField, Field, MultiField, StringField
from .party import (BuyerTradeParty, EndUserTradeParty, InvoiceeTradeParty,
PayeeTradeParty, SellerTradeParty)
from .payment import PaymentMeans, PaymentTerms
from .references import (AdditionalReferencedDocument,
BuyerOrderReferencedDocument,
ContractReferencedDocument,
CustomerOrderReferencedDocument)
from .tradelines import LineItem
class DeliveryTerms(Element):
type_code = StringField(NS_RAM, "DeliveryTypeCode", required=False,
profile=EXTENDED, _d="Lieferbedingung (Code)")
class Meta:
namespace = NS_RAM
tag = "ApplicableTradeDeliveryTerms"
class TradeAgreement(Element):
buyer_reference = StringField(NS_RAM, "BuyerReference", required=False,
profile=COMFORT, _d="Referenz des Käufers")
seller = Field(SellerTradeParty, required=True, _d="Detailinformationen zum Verkäufer")
buyer = Field(BuyerTradeParty, required=True)
end_user = Field(EndUserTradeParty, required=False, _d="Abweichender Endverbraucher")
delivery_terms = Field(DeliveryTerms, required=False, profile=EXTENDED)
buyer_order = Field(BuyerOrderReferencedDocument, required=False, profile=COMFORT)
customer_order = Field(CustomerOrderReferencedDocument, required=False, profile=COMFORT)
contract = Field(ContractReferencedDocument, required=False, profile=COMFORT)
additional_references = MultiField(AdditionalReferencedDocument, required=False,
profile=COMFORT)
class Meta:
namespace = NS_RAM
tag = "ApplicableSupplyChainTradeAgreement"
class LogisticsServiceCharge(Element):
description = StringField(NS_RAM, "Description", required=True, profile=COMFORT,
_d="Identifikation der Servicegebühr")
applied_amount = CurrencyField(NS_RAM, "AppliedAmount", required=True,
profile=COMFORT, _d="Betrag der Servicegebühr")
trade_tax = MultiField(AppliedTradeTax, required=False, profile=COMFORT)
class Meta:
namespace = NS_RAM
tag = "SpecifiedLogisticsServiceCharge"
class TradeSettlement(Element):
payment_reference = StringField(NS_RAM, "PaymentReference")
currency_code = StringField(NS_RAM, "InvoiceCurrencyCode")
invoicee = Field(InvoiceeTradeParty, required=False, profile=COMFORT,
_d="Rechnungsempfänger")
payee = Field(PayeeTradeParty, required=False, profile=COMFORT,
_d="Zahlungsempfänger")
payment_means = Field(PaymentMeans)
trade_tax = MultiField(ApplicableTradeTax)
period = Field(BillingSpecifiedPeriod, required=False, profile=COMFORT)
allowance_charge = MultiField(TradeAllowanceCharge, required=False, profile=COMFORT,
_d="Schalter für Zu-/Abschlag")
service_charge = MultiField(LogisticsServiceCharge, required=False, profile=COMFORT)
terms = MultiField(PaymentTerms, required=False, profile=COMFORT)
monetary_summation = Field(MonetarySummation, required=True, profile=BASIC,
_d="Detailinformation zu Belegsummen")
accounting_account = Field(ReceivableAccountingAccount, required=False, profile=EXTENDED,
_d="Detailinformationen zur Buchungsreferenz")
class Meta:
namespace = NS_RAM
tag = "ApplicableSupplyChainTradeSettlement"
class TradeTransaction(Element):
agreement = Field(TradeAgreement, required=True)
delivery = Field(TradeDelivery, required=True)
settlement = Field(TradeSettlement, required=True)
items = MultiField(LineItem, required=True)
class Meta:
namespace = NS_FERD_1p0
tag = "SpecifiedSupplyChainTradeTransaction"
| 47.177778 | 93 | 0.70325 |
ddb264931c11feb420a91eea58b5663963d7a29e | 2,365 | py | Python | curlylint/rules/tabindex_no_positive/tabindex_no_positive.py | adamchainz/curlylint | b64ec22effafffc6a1371e544c560e6bfc24b56e | [
"MIT"
] | 155 | 2020-03-22T14:06:31.000Z | 2022-03-20T15:41:03.000Z | curlylint/rules/tabindex_no_positive/tabindex_no_positive.py | adamchainz/curlylint | b64ec22effafffc6a1371e544c560e6bfc24b56e | [
"MIT"
] | 57 | 2020-06-22T13:33:32.000Z | 2022-03-30T11:44:33.000Z | curlylint/rules/tabindex_no_positive/tabindex_no_positive.py | adamchainz/curlylint | b64ec22effafffc6a1371e544c560e6bfc24b56e | [
"MIT"
] | 22 | 2020-08-11T19:51:48.000Z | 2022-03-29T08:42:28.000Z | from curlylint import ast
from curlylint.check_node import CheckNode, build_tree
from curlylint.issue import Issue
TABINDEX_NO_POSITIVE = "tabindex_no_positive"
RULE = {
"id": "tabindex_no_positive",
"type": "accessibility",
"docs": {
"description": "Prevents using positive `tabindex` values, which are very easy to misuse with problematic consequences for keyboard users.",
"url": "https://www.curlylint.org/docs/rules/tabindex_no_positive",
"impact": "Serious",
"tags": ["cat.language", "wcag2a"],
"resources": [
"[WHATWG HTML Standard, The autofocus attribute](https://html.spec.whatwg.org/multipage/interaction.html#attr-fe-autofocus)",
"[The accessibility of HTML 5 autofocus](https://www.brucelawson.co.uk/2009/the-accessibility-of-html-5-autofocus/)",
"[MDN: input `autofocus` attribute usage considerations](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input#htmlattrdefautofocus)",
],
},
"schema": {
"$schema": "http://json-schema.org/draft/2019-09/schema#",
"oneOf": [
{
"const": True,
"title": "Avoid positive `tabindex` values, change the order of elements on the page instead.",
"examples": [True],
}
],
},
}
def find_valid(node, file):
is_elt = isinstance(node.value, ast.Element)
if is_elt:
attributes = []
if getattr(node.value, "opening_tag", None):
attributes = {}
for n in node.value.opening_tag.attributes.nodes:
attributes[str(n.name)] = str(n.value).strip("\"'")
if "tabindex" in attributes and int(attributes["tabindex"]) > 0:
return [
Issue.from_node(
file,
node,
"Avoid positive `tabindex` values, change the order of elements on the page instead",
"tabindex_no_positive",
)
]
if not node.children:
return []
return sum((find_valid(child, file) for child in node.children), [])
def tabindex_no_positive(file, config):
root = CheckNode(None)
build_tree(root, file.tree)
src = file.source.lower()
if r"tabindex" in src:
return find_valid(root, file)
return []
| 34.275362 | 156 | 0.594503 |
1d2d5a983a54eea11b57e4bdc6b96c6b65d47e48 | 1,050 | py | Python | jp.atcoder/abc224/abc224_e/26783521.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc224/abc224_e/26783521.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc224/abc224_e/26783521.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | import sys
import typing
import numba as nb
import numpy as np
@nb.njit((nb.i8, nb.i8, nb.i8[:, :]), cache=True)
def solve(h: int, w: int, rca: np.ndarray) -> typing.NoReturn:
n = len(rca)
dist = np.zeros(n, np.int64)
rmax = np.full(h, -1, np.int64)
cmax = np.full(w, -1, np.int64)
order = np.argsort(rca[:, 2], kind='mergesort')[::-1]
s = 0
prev = -1
for i in range(n):
if rca[order[i], 2] != prev:
for j in range(s, i):
r, c, a = rca[order[j]]
rmax[r] = max(rmax[r], dist[order[j]])
cmax[c] = max(cmax[c], dist[order[j]])
s = i
r, c, a = rca[order[i]]
dist[order[i]] = max(rmax[r], cmax[c]) + 1
prev = a
for d in dist:
print(d)
def main() -> typing.NoReturn:
h, w, n = map(int, input().split())
rca = np.array(
sys.stdin.read().split(),
dtype=np.int64,
).reshape(n, 3)
rca[:, :2] -= 1
solve(h, w, rca)
main()
| 22.826087 | 63 | 0.464762 |
54d7ebb15352fe37218fd899dc5554eaf1ff576d | 114,815 | py | Python | src/sage/rings/number_field/number_field_ideal.py | velasjk3/SageMath | 7fb1c29714fb7c9b93e640e658586a664f939791 | [
"BSL-1.0"
] | null | null | null | src/sage/rings/number_field/number_field_ideal.py | velasjk3/SageMath | 7fb1c29714fb7c9b93e640e658586a664f939791 | [
"BSL-1.0"
] | null | null | null | src/sage/rings/number_field/number_field_ideal.py | velasjk3/SageMath | 7fb1c29714fb7c9b93e640e658586a664f939791 | [
"BSL-1.0"
] | null | null | null | """
Number Field Ideals
AUTHORS:
- Steven Sivek (2005-05-16)
- William Stein (2007-09-06): vastly improved the doctesting
- William Stein and John Cremona (2007-01-28): new class
NumberFieldFractionalIdeal now used for all except the 0 ideal
- Radoslav Kirov and Alyson Deines (2010-06-22):
prime_to_S_part, is_S_unit, is_S_integral
We test that pickling works::
sage: K.<a> = NumberField(x^2 - 5)
sage: I = K.ideal(2/(5+a))
sage: I == loads(dumps(I))
True
"""
# ****************************************************************************
# Copyright (C) 2004 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# https://www.gnu.org/licenses/
# ****************************************************************************
SMALL_DISC = 1000000
import sage.misc.latex as latex
import sage.rings.rational_field as rational_field
import sage.rings.integer_ring as integer_ring
from sage.arith.all import kronecker_symbol, gcd
import sage.misc.misc as misc
from sage.rings.finite_rings.finite_field_constructor import FiniteField
from sage.rings.ideal import Ideal_generic
from sage.misc.all import prod
from sage.misc.mrange import xmrange_iter
from sage.misc.cachefunc import cached_method
from sage.structure.element import MultiplicativeGroupElement
from sage.structure.factorization import Factorization
from sage.structure.sequence import Sequence
from sage.structure.proof.proof import get_flag
from sage.structure.richcmp import richcmp
QQ = rational_field.RationalField()
ZZ = integer_ring.IntegerRing()
class NumberFieldIdeal(Ideal_generic):
"""
An ideal of a number field.
"""
def __init__(self, field, gens, coerce=True):
"""
INPUT:
- ``field`` - a number field
- ``x`` - a list of NumberFieldElements belonging to the field
EXAMPLES::
sage: K.<i> = NumberField(x^2 + 1)
sage: K.ideal(7)
Fractional ideal (7)
Initialization from PARI::
sage: K.ideal(pari(7))
Fractional ideal (7)
sage: K.ideal(pari(4), pari(4 + 2*i))
Fractional ideal (2)
sage: K.ideal(pari("i + 2"))
Fractional ideal (i + 2)
sage: K.ideal(pari("[3,0;0,3]"))
Fractional ideal (3)
sage: F = pari(K).idealprimedec(5)
sage: K.ideal(F[0])
Fractional ideal (2*i + 1)
TESTS:
Check that _pari_prime is set when initializing from a PARI
prime ideal::
sage: K.ideal(pari(K).idealprimedec(5)[0])._pari_prime
[5, [-2, 1]~, 1, 1, [2, -1; 1, 2]]
Number fields defined by non-monic and non-integral
polynomials are supported (:trac:`252`)::
sage: K.<a> = NumberField(2*x^2 - 1/3)
sage: I = K.ideal(a); I
Fractional ideal (a)
sage: I.norm()
1/6
"""
from .number_field import NumberField_generic
if not isinstance(field, NumberField_generic):
raise TypeError("field (=%s) must be a number field." % field)
if len(gens) == 1 and isinstance(gens[0], (list, tuple)):
gens = gens[0]
from sage.libs.pari.all import pari_gen
if len(gens) == 1 and isinstance(gens[0], pari_gen):
# Init from PARI
gens = gens[0]
if gens.type() == "t_MAT":
# Assume columns are generators
gens = [field(x, check=False) for x in field.pari_zk() * gens]
elif gens.type() == "t_VEC":
# Assume prime ideal form
self._pari_prime = gens
gens = [ZZ(gens.pr_get_p()), field(gens.pr_get_gen())]
else:
# Assume one element of the field
gens = [field(gens, check=False)]
if len(gens)==0:
raise ValueError("gens must have length at least 1 (zero ideal is not a fractional ideal)")
Ideal_generic.__init__(self, field, gens, coerce)
if field.absolute_degree() == 2:
self.quadratic_form = self._quadratic_form
def _magma_init_(self, magma):
"""
Return Magma version of this ideal.
INPUT:
- ``magma`` - a Magma interpreter
OUTPUT: MagmaElement corresponding to this ideal.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + 2) # optional - magma
sage: I = K.ideal(5) # optional - magma
sage: I._magma_init_(magma) # optional - magma
'(_sage_[...]![5, 0, 0]) * _sage_[...]'
"""
O = magma(self.number_field().maximal_order())
g = self.gens()[0]
ans = magma(g) * O
for g in self.gens()[1:]:
ans += magma(g) * O
return '+'.join('%s * %s' % (g._magma_init_(magma), O.name())
for g in self.gens())
def __hash__(self):
"""
EXAMPLES::
sage: NumberField(x^2 + 1, 'a').ideal(7).__hash__() # random
7806919040325273549
"""
try:
return self._hash
except AttributeError:
# At some point in the future (e.g., for relative extensions),
# we'll likely have to consider other hashes.
self._hash = hash(self.pari_hnf())
return self._hash
def _latex_(self):
r"""
EXAMPLES::
sage: K.<a> = NumberField(x^2 + 23)
sage: K.ideal([2, 1/2*a - 1/2])._latex_()
'\\left(2, \\frac{1}{2} a - \\frac{1}{2}\\right)'
sage: latex(K.ideal([2, 1/2*a - 1/2]))
\left(2, \frac{1}{2} a - \frac{1}{2}\right)
The gens are reduced only if the norm of the discriminant of
the defining polynomial is at most
sage.rings.number_field.number_field_ideal.SMALL_DISC::
sage: K.<a> = NumberField(x^2 + 902384092834); K
Number Field in a with defining polynomial x^2 + 902384092834
sage: I = K.factor(19)[0][0]; I._latex_()
'\\left(19\\right)'
We can make the generators reduced by increasing SMALL_DISC.
We had better also set proof to False, or computing reduced
gens could take too long::
sage: proof.number_field(False)
sage: sage.rings.number_field.number_field_ideal.SMALL_DISC = 10^20
sage: K.<a> = NumberField(x^4 + 3*x^2 - 17)
sage: K.ideal([17*a,17,17,17*a])._latex_()
'\\left(17\\right)'
TESTS:
Reset SMALL_DISC for continued testing::
sage: sage.rings.number_field.number_field_ideal.SMALL_DISC = 1000000
"""
return '\\left(%s\\right)' % (", ".join(map(latex.latex, self._gens_repr())))
def _richcmp_(self, other, op):
"""
Compare an ideal of a number field to something else.
REMARK:
By default, comparing ideals is the same as comparing
their generator list. But of course, different generators
can give rise to the same ideal. And this can easily
be detected using Hermite normal form.
EXAMPLES::
sage: K.<a> = NumberField(x^2 + 3); K
Number Field in a with defining polynomial x^2 + 3
sage: f = K.factor(15); f
(Fractional ideal (1/2*a + 3/2))^2 * (Fractional ideal (5))
sage: (f[0][0] < f[1][0])
True
sage: (f[0][0] == f[0][0])
True
sage: (f[1][0] > f[0][0])
True
sage: f[1][0] == 5
True
sage: f[1][0] == GF(7)(5)
False
TESTS::
sage: L.<b> = NumberField(x^8-x^4+1)
sage: F_2 = L.fractional_ideal(b^2-1)
sage: F_4 = L.fractional_ideal(b^4-1)
sage: F_2 == F_4
True
"""
if not isinstance(other, NumberFieldIdeal):
return NotImplemented
return richcmp(self.pari_hnf().sage(), other.pari_hnf().sage(), op)
def _mul_(self, other):
"""
Returns the product of self and other.
This is implemented by just calling pari to do the multiplication.
EXAMPLES::
sage: K.<I>=QQ[i]
sage: A = K.ideal([5, 2 + I])
sage: B = K.ideal([13, 5 + 12*I])
sage: A*B
Fractional ideal (4*I - 7)
sage: (K.ideal(3 + I) * K.ideal(7 + I)).gens()
(10*I + 20,)
TESTS:
Make sure that :trac:`13958` is fixed::
sage: I = QuadraticField(-5).ideal(2).factor()[0][0]
sage: I = I * I * I; I.ngens() == 2
True
sage: I = I^301; I.ngens() == 2
True
"""
if self.ngens() == 1 and other.ngens() == 1:
return self.ring().ideal(self.gen(0) * other.gen(0))
K=self.ring()
K_pari=K.pari_nf()
return K.ideal(K_pari.idealmul(self, other))
def coordinates(self, x):
r"""
Returns the coordinate vector of `x` with respect to this ideal.
INPUT:
``x`` -- an element of the number field (or ring of integers) of this ideal.
OUTPUT:
List giving the coordinates of `x` with respect to the integral basis
of the ideal. In general this will be a vector of
rationals; it will consist of integers if and only if `x`
is in the ideal.
AUTHOR: John Cremona 2008-10-31
ALGORITHM:
Uses linear algebra.
Provides simpler implementations for ``_contains_()``,
``is_integral()`` and ``smallest_integer()``.
EXAMPLES::
sage: K.<i> = QuadraticField(-1)
sage: I = K.ideal(7+3*i)
sage: Ibasis = I.integral_basis(); Ibasis
[58, i + 41]
sage: a = 23-14*i
sage: acoords = I.coordinates(a); acoords
(597/58, -14)
sage: sum([Ibasis[j]*acoords[j] for j in range(2)]) == a
True
sage: b = 123+456*i
sage: bcoords = I.coordinates(b); bcoords
(-18573/58, 456)
sage: sum([Ibasis[j]*bcoords[j] for j in range(2)]) == b
True
sage: J = K.ideal(0)
sage: J.coordinates(0)
()
sage: J.coordinates(1)
Traceback (most recent call last):
...
TypeError: vector is not in free module
"""
K = self.number_field()
V, from_V, to_V = K.absolute_vector_space()
try:
return self.free_module().coordinate_vector(to_V(K(x)))
except ArithmeticError as e:
raise TypeError(e)
def _contains_(self, x):
"""
Return True if x is an element of this ideal.
This function is called (indirectly) when the ``in`` operator is used.
EXAMPLES::
sage: K.<a> = NumberField(x^2 + 23); K
Number Field in a with defining polynomial x^2 + 23
sage: I = K.factor(13)[0][0]; I
Fractional ideal (13, 1/2*a + 9/2)
sage: I._contains_(a)
False
sage: a in I
False
sage: 13 in I
True
sage: 13/2 in I
False
sage: a + 9 in I
True
sage: J = K.ideal(0)
sage: 0 in J
True
sage: 1 in J
False
sage: K.<a> = NumberField(x^4 + 3); K
Number Field in a with defining polynomial x^4 + 3
sage: I = K.factor(13)[0][0]
sage: I # random sign in output
Fractional ideal (-2*a^2 - 1)
sage: 2/3 in I
False
sage: 1 in I
False
sage: 13 in I
True
sage: 1 in I*I^(-1)
True
sage: I # random sign in output
Fractional ideal (-2*a^2 - 1)
sage: K.<y>=NumberField(x^2-3)
sage: L.<z>=K.extension(x^2-5)
sage: 0 in L.ideal(0)
True
sage: 1 in L.ideal(0)
False
"""
return self.coordinates(x).denominator() == 1
def __elements_from_hnf(self, hnf):
"""
Convert a PARI Hermite normal form matrix to a list of
NumberFieldElements.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + 389); K
Number Field in a with defining polynomial x^3 + 389
sage: I = K.factor(17)[0][0]
sage: I # random sign in generator
Fractional ideal (-100*a^2 + 730*a - 5329)
sage: hnf = I.pari_hnf(); hnf
[17, 0, 13; 0, 17, 8; 0, 0, 1]
sage: I._NumberFieldIdeal__elements_from_hnf(hnf)
[17, 17*a, a^2 + 8*a + 13]
sage: I._NumberFieldIdeal__elements_from_hnf(hnf^(-1))
[1/17, 1/17*a, a^2 - 8/17*a - 13/17]
"""
K = self.number_field()
return [K(x, check=False) for x in K.pari_zk() * hnf]
def __repr__(self):
"""
Return the string representation of this number field ideal.
.. note::
Only the zero ideal actually has type NumberFieldIdeal; all
others have type NumberFieldFractionalIdeal. So this function
will only ever be called on the zero ideal.
EXAMPLES::
sage: K.<a> = NumberField(x^3-2)
sage: I = K.ideal(0); I
Ideal (0) of Number Field in a with defining polynomial x^3 - 2
sage: type(I)
<class 'sage.rings.number_field.number_field_ideal.NumberFieldIdeal'>
sage: I = K.ideal(1); I
Fractional ideal (1)
sage: type(I)
<class 'sage.rings.number_field.number_field_ideal.NumberFieldFractionalIdeal'>
sage: I = K.ideal(a); I
Fractional ideal (a)
sage: type(I)
<class 'sage.rings.number_field.number_field_ideal.NumberFieldFractionalIdeal'>
sage: I = K.ideal(1/a); I
Fractional ideal (1/2*a^2)
sage: type(I)
<class 'sage.rings.number_field.number_field_ideal.NumberFieldFractionalIdeal'>
"""
return "Ideal %s of %s" % (self._repr_short(), self.number_field())
def _repr_short(self):
"""
Compact string representation of this ideal. When the norm of
the discriminant of the defining polynomial of the number field
is less than
sage.rings.number_field.number_field_ideal.SMALL_DISC
then display reduced generators. Otherwise display two
generators.
EXAMPLES::
sage: K.<a> = NumberField(x^4 + 389); K
Number Field in a with defining polynomial x^4 + 389
sage: I = K.factor(17)[0][0]; I
Fractional ideal (17, a^2 + 6)
sage: I._repr_short()
'(17, a^2 + 6)'
We use reduced gens, because the discriminant is small::
sage: K.<a> = NumberField(x^2 + 17); K
Number Field in a with defining polynomial x^2 + 17
sage: I = K.factor(17)[0][0]; I
Fractional ideal (a)
Here the discriminant is 'large', so the gens aren't reduced::
sage: sage.rings.number_field.number_field_ideal.SMALL_DISC
1000000
sage: K.<a> = NumberField(x^2 + 902384094); K
Number Field in a with defining polynomial x^2 + 902384094
sage: I = K.factor(19)[0][0]; I
Fractional ideal (19, a + 5)
sage: I.gens_reduced()
(19, a + 5)
"""
return '(%s)' % (', '.join(map(str, self._gens_repr())))
def _gens_repr(self):
"""
Returns tuple of generators to be used for printing this number
field ideal. The gens are reduced only if the absolute value of
the norm of the discriminant of the defining polynomial is at
most sage.rings.number_field.number_field_ideal.SMALL_DISC.
EXAMPLES::
sage: sage.rings.number_field.number_field_ideal.SMALL_DISC
1000000
sage: K.<a> = NumberField(x^4 + 3*x^2 - 17)
sage: K.discriminant() # too big
-1612688
sage: I = K.ideal([17*a*(2*a-2),17*a*(2*a-3)]); I._gens_repr()
(289, 17*a)
sage: I.gens_reduced()
(17*a,)
"""
# If the discriminant is small, it is easy to find nice gens.
# Otherwise it is potentially very hard.
try:
if abs(self.number_field().defining_polynomial().discriminant().norm()) <= SMALL_DISC:
return self.gens_reduced()
except TypeError:
# In some cases with relative extensions, computing the
# discriminant of the defining polynomial is not
# supported.
pass
# Return two generators unless the second one is zero
two_gens = self.gens_two()
if two_gens[1]:
return two_gens
else:
return (two_gens[0],)
def __pari__(self):
"""
Returns PARI Hermite Normal Form representations of this
ideal.
EXAMPLES::
sage: K.<w> = NumberField(x^2 + 23)
sage: I = K.class_group().0.ideal(); I
Fractional ideal (2, 1/2*w - 1/2)
sage: I.__pari__()
[2, 0; 0, 1]
"""
return self.pari_hnf()
def _pari_init_(self):
"""
Returns self in PARI Hermite Normal Form as a string
EXAMPLES::
sage: K.<w> = NumberField(x^2 + 23)
sage: I = K.class_group().0.ideal()
sage: I._pari_init_()
'[2, 0; 0, 1]'
"""
return str(self.__pari__())
def pari_hnf(self):
"""
Return PARI's representation of this ideal in Hermite normal form.
EXAMPLES::
sage: R.<x> = PolynomialRing(QQ)
sage: K.<a> = NumberField(x^3 - 2)
sage: I = K.ideal(2/(5+a))
sage: I.pari_hnf()
[2, 0, 50/127; 0, 2, 244/127; 0, 0, 2/127]
"""
try:
return self.__pari_hnf
except AttributeError:
nf = self.number_field().pari_nf()
self.__pari_hnf = nf.idealhnf(0)
hnflist = [ nf.idealhnf(x) for x in self.gens() ]
for ideal in hnflist:
self.__pari_hnf = nf.idealadd(self.__pari_hnf, ideal)
return self.__pari_hnf
@cached_method
def basis(self):
r"""
Return a basis for this ideal viewed as a `\ZZ` -module.
OUTPUT:
An immutable sequence of elements of this ideal (note: their
parent is the number field) forming a basis for this ideal.
EXAMPLES::
sage: K.<z> = CyclotomicField(7)
sage: I = K.factor(11)[0][0]
sage: I.basis() # warning -- choice of basis can be somewhat random
[11, 11*z, 11*z^2, z^3 + 5*z^2 + 4*z + 10, z^4 + z^2 + z + 5, z^5 + z^4 + z^3 + 2*z^2 + 6*z + 5]
An example of a non-integral ideal.::
sage: J = 1/I
sage: J # warning -- choice of generators can be somewhat random
Fractional ideal (2/11*z^5 + 2/11*z^4 + 3/11*z^3 + 2/11)
sage: J.basis() # warning -- choice of basis can be somewhat random
[1, z, z^2, 1/11*z^3 + 7/11*z^2 + 6/11*z + 10/11, 1/11*z^4 + 1/11*z^2 + 1/11*z + 7/11, 1/11*z^5 + 1/11*z^4 + 1/11*z^3 + 2/11*z^2 + 8/11*z + 7/11]
Number fields defined by non-monic and non-integral
polynomials are supported (:trac:`252`)::
sage: K.<a> = NumberField(2*x^2 - 1/3)
sage: K.ideal(a).basis()
[1, a]
"""
hnf = self.pari_hnf()
v = self.__elements_from_hnf(hnf)
return Sequence(v, immutable=True)
@cached_method
def free_module(self):
r"""
Return the free `\ZZ`-module contained in the vector space
associated to the ambient number field, that corresponds
to this ideal.
EXAMPLES::
sage: K.<z> = CyclotomicField(7)
sage: I = K.factor(11)[0][0]; I
Fractional ideal (-3*z^4 - 2*z^3 - 2*z^2 - 2)
sage: A = I.free_module()
sage: A # warning -- choice of basis can be somewhat random
Free module of degree 6 and rank 6 over Integer Ring
User basis matrix:
[11 0 0 0 0 0]
[ 0 11 0 0 0 0]
[ 0 0 11 0 0 0]
[10 4 5 1 0 0]
[ 5 1 1 0 1 0]
[ 5 6 2 1 1 1]
However, the actual `\ZZ`-module is not at all random::
sage: A.basis_matrix().change_ring(ZZ).echelon_form()
[ 1 0 0 5 1 1]
[ 0 1 0 1 1 7]
[ 0 0 1 7 6 10]
[ 0 0 0 11 0 0]
[ 0 0 0 0 11 0]
[ 0 0 0 0 0 11]
The ideal doesn't have to be integral::
sage: J = I^(-1)
sage: B = J.free_module()
sage: B.echelonized_basis_matrix()
[ 1/11 0 0 7/11 1/11 1/11]
[ 0 1/11 0 1/11 1/11 5/11]
[ 0 0 1/11 5/11 4/11 10/11]
[ 0 0 0 1 0 0]
[ 0 0 0 0 1 0]
[ 0 0 0 0 0 1]
This also works for relative extensions::
sage: K.<a,b> = NumberField([x^2 + 1, x^2 + 2])
sage: I = K.fractional_ideal(4)
sage: I.free_module()
Free module of degree 4 and rank 4 over Integer Ring
User basis matrix:
[ 4 0 0 0]
[ -3 7 -1 1]
[ 3 7 1 1]
[ 0 -10 0 -2]
sage: J = I^(-1); J.free_module()
Free module of degree 4 and rank 4 over Integer Ring
User basis matrix:
[ 1/4 0 0 0]
[-3/16 7/16 -1/16 1/16]
[ 3/16 7/16 1/16 1/16]
[ 0 -5/8 0 -1/8]
An example of intersecting ideals by intersecting free modules.::
sage: K.<a> = NumberField(x^3 + x^2 - 2*x + 8)
sage: I = K.factor(2)
sage: p1 = I[0][0]; p2 = I[1][0]
sage: N = p1.free_module().intersection(p2.free_module()); N
Free module of degree 3 and rank 3 over Integer Ring
Echelon basis matrix:
[ 1 1/2 1/2]
[ 0 1 1]
[ 0 0 2]
sage: N.index_in(p1.free_module()).abs()
2
TESTS:
Sage can find the free module associated to quite large ideals
quickly (see :trac:`4627`)::
sage: y = polygen(ZZ)
sage: M.<a> = NumberField(y^20 - 2*y^19 + 10*y^17 - 15*y^16 + 40*y^14 - 64*y^13 + 46*y^12 + 8*y^11 - 32*y^10 + 8*y^9 + 46*y^8 - 64*y^7 + 40*y^6 - 15*y^4 + 10*y^3 - 2*y + 1)
sage: M.ideal(prod(prime_range(6000, 6200))).free_module()
Free module of degree 20 and rank 20 over Integer Ring
User basis matrix:
20 x 20 dense matrix over Rational Field
"""
return basis_to_module(self.basis(), self.number_field())
def reduce_equiv(self):
"""
Return a small ideal that is equivalent to self in the group
of fractional ideals modulo principal ideals. Very often (but
not always) if self is principal then this function returns
the unit ideal.
ALGORITHM: Calls :pari:`idealred` function.
EXAMPLES::
sage: K.<w> = NumberField(x^2 + 23)
sage: I = ideal(w*23^5); I
Fractional ideal (6436343*w)
sage: I.reduce_equiv()
Fractional ideal (1)
sage: I = K.class_group().0.ideal()^10; I
Fractional ideal (1024, 1/2*w + 979/2)
sage: I.reduce_equiv()
Fractional ideal (2, 1/2*w - 1/2)
"""
K = self.number_field()
P = K.pari_nf()
hnf = P.idealred(self.pari_hnf())
gens = self.__elements_from_hnf(hnf)
return K.ideal(gens)
def gens_reduced(self, proof=None):
r"""
Express this ideal in terms of at most two generators, and one
if possible.
This function indirectly uses ``bnfisprincipal``, so set
``proof=True`` if you want to prove correctness (which *is* the
default).
EXAMPLES::
sage: R.<x> = PolynomialRing(QQ)
sage: K.<a> = NumberField(x^2 + 5)
sage: K.ideal(0).gens_reduced()
(0,)
sage: J = K.ideal([a+2, 9])
sage: J.gens()
(a + 2, 9)
sage: J.gens_reduced() # random sign
(a + 2,)
sage: K.ideal([a+2, 3]).gens_reduced()
(3, a + 2)
TESTS::
sage: len(J.gens_reduced()) == 1
True
sage: all(j.parent() is K for j in J.gens())
True
sage: all(j.parent() is K for j in J.gens_reduced())
True
sage: K.<a> = NumberField(x^4 + 10*x^2 + 20)
sage: J = K.prime_above(5)
sage: J.is_principal()
False
sage: J.gens_reduced()
(5, a)
sage: all(j.parent() is K for j in J.gens())
True
sage: all(j.parent() is K for j in J.gens_reduced())
True
Make sure this works with large ideals (:trac:`11836`)::
sage: R.<x> = QQ['x']
sage: L.<b> = NumberField(x^10 - 10*x^8 - 20*x^7 + 165*x^6 - 12*x^5 - 760*x^3 + 2220*x^2 + 5280*x + 7744)
sage: z_x = -96698852571685/2145672615243325696*b^9 + 2472249905907/195061146840302336*b^8 + 916693155514421/2145672615243325696*b^7 + 1348520950997779/2145672615243325696*b^6 - 82344497086595/12191321677518896*b^5 + 2627122040194919/536418153810831424*b^4 - 452199105143745/48765286710075584*b^3 + 4317002771457621/536418153810831424*b^2 + 2050725777454935/67052269226353928*b + 3711967683469209/3047830419379724
sage: P = EllipticCurve(L, '57a1').lift_x(z_x) * 3
sage: ideal = L.fractional_ideal(P[0], P[1])
sage: ideal.is_principal(proof=False)
True
sage: len(ideal.gens_reduced(proof=False))
1
"""
if len(self.gens()) <= 1:
self._is_principal = True
self._reduced_generators = self.gens()
return self._reduced_generators
self._cache_bnfisprincipal(proof=proof, gens=True)
return self._reduced_generators
def gens_two(self):
r"""
Express this ideal using exactly two generators, the first of
which is a generator for the intersection of the ideal with `Q`.
ALGORITHM: uses PARI's :pari:`idealtwoelt` function, which runs in
randomized polynomial time and is very fast in practice.
EXAMPLES::
sage: R.<x> = PolynomialRing(QQ)
sage: K.<a> = NumberField(x^2 + 5)
sage: J = K.ideal([a+2, 9])
sage: J.gens()
(a + 2, 9)
sage: J.gens_two()
(9, a + 2)
sage: K.ideal([a+5, a+8]).gens_two()
(3, a + 2)
sage: K.ideal(0).gens_two()
(0, 0)
The second generator is zero if and only if the ideal is
generated by a rational, in contrast to the PARI function
:pari:`idealtwoelt`::
sage: I = K.ideal(12)
sage: pari(K).idealtwoelt(I) # Note that second element is not zero
[12, [0, 12]~]
sage: I.gens_two()
(12, 0)
"""
try:
return self.__two_generators
except AttributeError:
pass
K = self.number_field()
if self.is_zero():
self.__two_generators = (K.zero(), K.zero())
else:
HNF = self.pari_hnf()
# Check whether the ideal is generated by an integer, i.e.
# whether HNF is a multiple of the identity matrix
if HNF.gequal(HNF[0,0]):
a = HNF[0,0]
alpha = 0
else:
a, alpha = K.pari_nf().idealtwoelt(HNF)
self.__two_generators = (K(a), K(alpha))
return self.__two_generators
def integral_basis(self):
r"""
Return a list of generators for this ideal as a `\ZZ`-module.
EXAMPLES::
sage: R.<x> = PolynomialRing(QQ)
sage: K.<i> = NumberField(x^2 + 1)
sage: J = K.ideal(i+1)
sage: J.integral_basis()
[2, i + 1]
"""
hnf = self.pari_hnf()
return self.__elements_from_hnf(hnf)
def integral_split(self):
r"""
Return a tuple `(I, d)`, where `I` is an integral ideal, and `d` is the
smallest positive integer such that this ideal is equal to `I/d`.
EXAMPLES::
sage: R.<x> = PolynomialRing(QQ)
sage: K.<a> = NumberField(x^2-5)
sage: I = K.ideal(2/(5+a))
sage: I.is_integral()
False
sage: J,d = I.integral_split()
sage: J
Fractional ideal (-1/2*a + 5/2)
sage: J.is_integral()
True
sage: d
5
sage: I == J/d
True
"""
try:
return self.__integral_split
except AttributeError:
if self.is_integral():
self.__integral_split = (self, ZZ(1))
else:
factors = self.factor()
denom_list = [p_e for p_e in factors if p_e[1] < 0]
denominator = prod([ p.smallest_integer()**(-e)
for (p,e) in denom_list ])
## Get a list of the primes dividing the denominator
plist = [ p.smallest_integer() for (p,e) in denom_list ]
for p in plist:
while denominator % p == 0 and (self*(denominator/p)).is_integral():
denominator //= p
self.__integral_split = (self*denominator, denominator)
return self.__integral_split
def intersection(self, other):
r"""
Return the intersection of self and other.
EXAMPLES::
sage: K.<a> = QuadraticField(-11)
sage: p = K.ideal((a + 1)/2); q = K.ideal((a + 3)/2)
sage: p.intersection(q) == q.intersection(p) == K.ideal(a-2)
True
An example with non-principal ideals::
sage: L.<a> = NumberField(x^3 - 7)
sage: p = L.ideal(a^2 + a + 1, 2)
sage: q = L.ideal(a+1)
sage: p.intersection(q) == L.ideal(8, 2*a + 2)
True
A relative example::
sage: L.<a,b> = NumberField([x^2 + 11, x^2 - 5])
sage: A = L.ideal([15, (-3/2*b + 7/2)*a - 8])
sage: B = L.ideal([6, (-1/2*b + 1)*a - b - 5/2])
sage: A.intersection(B) == L.ideal(-1/2*a - 3/2*b - 1)
True
TESTS:
Test that this works with non-integral ideals (:trac:`10767`)::
sage: K = QuadraticField(-2)
sage: I = K.ideal(1/2)
sage: I.intersection(I)
Fractional ideal (1/2)
"""
L = self.number_field()
other = L.ideal(other)
nf = L.pari_nf()
hnf = nf.idealintersect(self.pari_hnf(), other.pari_hnf())
I = L.ideal(self._NumberFieldIdeal__elements_from_hnf(hnf))
I.__pari_hnf = hnf
return I
def is_integral(self):
"""
Return True if this ideal is integral.
EXAMPLES::
sage: R.<x> = PolynomialRing(QQ)
sage: K.<a> = NumberField(x^5-x+1)
sage: K.ideal(a).is_integral()
True
sage: (K.ideal(1) / (3*a+1)).is_integral()
False
"""
try:
return self.__is_integral
except AttributeError:
one = self.number_field().ideal(1)
self.__is_integral = all(a in one for a in self.integral_basis())
return self.__is_integral
def is_maximal(self):
"""
Return True if this ideal is maximal. This is equivalent to
self being prime and nonzero.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + 3); K
Number Field in a with defining polynomial x^3 + 3
sage: K.ideal(5).is_maximal()
False
sage: K.ideal(7).is_maximal()
True
"""
return self.is_prime() and not self.is_zero()
def is_prime(self):
"""
Return True if this ideal is prime.
EXAMPLES::
sage: K.<a> = NumberField(x^2 - 17); K
Number Field in a with defining polynomial x^2 - 17
sage: K.ideal(5).is_prime() # inert prime
True
sage: K.ideal(13).is_prime() # split
False
sage: K.ideal(17).is_prime() # ramified
False
"""
try:
return self._pari_prime is not None
except AttributeError:
F = self.factor() # factorization with caching
if len(F) != 1 or F[0][1] != 1:
self._pari_prime = None
else:
self._pari_prime = F[0][0]._pari_prime
return self._pari_prime is not None
def pari_prime(self):
r"""
Returns a PARI prime ideal corresponding to the ideal ``self``.
INPUT:
- ``self`` - a prime ideal.
OUTPUT: a PARI "prime ideal", i.e. a five-component vector `[p,a,e,f,b]`
representing the prime ideal `p O_K + a O_K`, `e`, `f` as usual, `a` as
vector of components on the integral basis, `b` Lenstra's constant.
EXAMPLES::
sage: K.<i> = QuadraticField(-1)
sage: K.ideal(3).pari_prime()
[3, [3, 0]~, 1, 2, 1]
sage: K.ideal(2+i).pari_prime()
[5, [2, 1]~, 1, 1, [-2, -1; 1, -2]]
sage: K.ideal(2).pari_prime()
Traceback (most recent call last):
...
ValueError: Fractional ideal (2) is not a prime ideal
"""
if not self.is_prime():
raise ValueError("%s is not a prime ideal" % self)
return self._pari_prime
def _cache_bnfisprincipal(self, proof=None, gens=False):
r"""
This function is essentially the implementation of
:meth:`is_principal`, :meth:`gens_reduced` and
:meth:`ideal_class_log`.
INPUT:
- ``self`` -- an ideal
- ``proof`` -- proof flag. If ``proof=False``, assume GRH.
- ``gens`` -- (default: False) if True, also computes the reduced
generators of the ideal.
OUTPUT:
None. This function simply caches the results: it sets
``_ideal_class_log`` (see :meth:`ideal_class_log`),
``_is_principal`` (see :meth:`is_principal`) and
``_reduced_generators``.
TESTS:
Check that no warnings are triggered from PARI/GP (see :trac:`30801`)::
sage: K.<a> = NumberField(x^2 - x + 112941801)
sage: I = K.ideal((112941823, a + 49942513))
sage: I.is_principal()
False
"""
# Since pari_bnf() is cached, this call to pari_bnf() should not
# influence the run-time much. Also, this simplifies the handling
# of the proof flag: if we computed bnfisprincipal() in the past
# with proof=False, then we do not need to recompute the result.
# We just need to check correctness of pari_bnf().
proof = get_flag(proof, "number_field")
bnf = self.number_field().pari_bnf(proof)
# If we already have _reduced_generators, no need to compute them again
if hasattr(self, "_reduced_generators"):
gens = False
# Is there something to do?
if hasattr(self, "_ideal_class_log") and not gens:
self._is_principal = not any(self._ideal_class_log)
return
if not gens:
v = bnf.bnfisprincipal(self.pari_hnf(), 0)
self._ideal_class_log = list(v)
self._is_principal = not any(self._ideal_class_log)
else:
# TODO: this is a bit of a waste. We ask bnfisprincipal to compute the compact form and then
# convert this compact form back into an expanded form.
# (though calling with 3 instead of 5 most likely triggers an error with memory allocation failure)
v = bnf.bnfisprincipal(self.pari_hnf(), 5)
e = v[0]
t = v[1]
t = bnf.nfbasistoalg(bnf.nffactorback(t))
self._ideal_class_log = list(e)
self._is_principal = not any(self._ideal_class_log)
if self._is_principal:
g = self.number_field()(t)
self._reduced_generators = (g,)
elif gens:
# Non-principal ideal
self._reduced_generators = self.gens_two()
def is_principal(self, proof=None):
r"""
Return True if this ideal is principal.
Since it uses the PARI method :pari:`bnfisprincipal`, specify
``proof=True`` (this is the default setting) to prove the correctness
of the output.
EXAMPLES::
sage: K = QuadraticField(-119,'a')
sage: P = K.factor(2)[1][0]
sage: P.is_principal()
False
sage: I = P^5
sage: I.is_principal()
True
sage: I # random
Fractional ideal (-1/2*a + 3/2)
sage: P = K.ideal([2]).factor()[1][0]
sage: I = P^5
sage: I.is_principal()
True
"""
if len(self.gens()) <= 1:
self._is_principal = True
self._reduced_generators = self.gens()
return self._is_principal
self._cache_bnfisprincipal(proof)
return self._is_principal
def ideal_class_log(self, proof=None):
r"""
Return the output of PARI's :pari:`bnfisprincipal` for this ideal,
i.e. a vector expressing the class of this ideal in terms of a
set of generators for the class group.
Since it uses the PARI method :pari:`bnfisprincipal`, specify
``proof=True`` (this is the default setting) to prove the correctness
of the output.
EXAMPLES:
When the class number is 1, the result is always the empty list::
sage: K.<a> = QuadraticField(-163)
sage: J = K.primes_above(random_prime(10^6))[0]
sage: J.ideal_class_log()
[]
An example with class group of order 2. The first ideal is
not principal, the second one is::
sage: K.<a> = QuadraticField(-5)
sage: J = K.ideal(23).factor()[0][0]
sage: J.ideal_class_log()
[1]
sage: (J^10).ideal_class_log()
[0]
An example with a more complicated class group::
sage: K.<a, b> = NumberField([x^3 - x + 1, x^2 + 26])
sage: K.class_group()
Class group of order 18 with structure C6 x C3 of Number Field in a with defining polynomial x^3 - x + 1 over its base field
sage: K.primes_above(7)[0].ideal_class_log() # random
[1, 2]
"""
self._cache_bnfisprincipal(proof)
return self._ideal_class_log
def S_ideal_class_log(self, S):
r"""
S-class group version of :meth:`ideal_class_log`.
EXAMPLES::
sage: K.<a> = QuadraticField(-14)
sage: S = K.primes_above(2)
sage: I = K.ideal(3, a + 1)
sage: I.S_ideal_class_log(S)
[1]
sage: I.S_ideal_class_log([])
[3]
TESTS::
sage: K.<a> = QuadraticField(-974)
sage: S = K.primes_above(2)
sage: G = K.S_class_group(S)
sage: I0 = G.0.ideal(); I1 = G.1.ideal()
sage: for p in prime_range(100):
....: for P in K.primes_above(p):
....: v = P.S_ideal_class_log(S)
....: assert(G(P) == G(I0^v[0] * I1^v[1]))
"""
from sage.modules.free_module_element import vector
from sage.rings.finite_rings.integer_mod_ring import Zmod
v = vector(ZZ, self.ideal_class_log())
if all(P.is_principal() for P in S):
L = v.list()
invs = self.number_field().class_group().invariants()
else:
M = self.number_field()._S_class_group_quotient_matrix(tuple(S))
L = (v * M).list()
D = self.number_field()._S_class_group_and_units(tuple(S))[1]
invs = [x[1] for x in D]
return [Zmod(invs[i])(L[i]) for i in range(len(L))]
def is_zero(self):
"""
Return True iff self is the zero ideal
Note that `(0)` is a ``NumberFieldIdeal``, not a
``NumberFieldFractionalIdeal``.
EXAMPLES::
sage: K.<a> = NumberField(x^2 + 2); K
Number Field in a with defining polynomial x^2 + 2
sage: K.ideal(3).is_zero()
False
sage: I=K.ideal(0); I.is_zero()
True
sage: I
Ideal (0) of Number Field in a with defining polynomial x^2 + 2
"""
return self == self.number_field().ideal(0)
def norm(self):
"""
Return the norm of this fractional ideal as a rational number.
EXAMPLES::
sage: K.<a> = NumberField(x^4 + 23); K
Number Field in a with defining polynomial x^4 + 23
sage: I = K.ideal(19); I
Fractional ideal (19)
sage: factor(I.norm())
19^4
sage: F = I.factor()
sage: F[0][0].norm().factor()
19^2
"""
try:
return self._norm
except AttributeError:
pass
self._norm = QQ(self.number_field().pari_nf().idealnorm(self.pari_hnf()))
return self._norm
# synonyms (using terminology of relative number fields)
def absolute_norm(self):
"""
A synonym for norm.
EXAMPLES::
sage: K.<i> = NumberField(x^2 + 1)
sage: K.ideal(1 + 2*i).absolute_norm()
5
"""
return self.norm()
def relative_norm(self):
"""
A synonym for norm.
EXAMPLES::
sage: K.<i> = NumberField(x^2 + 1)
sage: K.ideal(1 + 2*i).relative_norm()
5
"""
return self.norm()
def absolute_ramification_index(self):
"""
A synonym for ramification_index.
EXAMPLES::
sage: K.<i> = NumberField(x^2 + 1)
sage: K.ideal(1 + i).absolute_ramification_index()
2
"""
return self.ramification_index()
def relative_ramification_index(self):
"""
A synonym for ramification_index.
EXAMPLES::
sage: K.<i> = NumberField(x^2 + 1)
sage: K.ideal(1 + i).relative_ramification_index()
2
"""
return self.ramification_index()
def number_field(self):
"""
Return the number field that this is a fractional ideal in.
EXAMPLES::
sage: K.<a> = NumberField(x^2 + 2); K
Number Field in a with defining polynomial x^2 + 2
sage: K.ideal(3).number_field()
Number Field in a with defining polynomial x^2 + 2
sage: K.ideal(0).number_field() # not tested (not implemented)
Number Field in a with defining polynomial x^2 + 2
"""
return self.ring()
def smallest_integer(self):
r"""
Return the smallest non-negative integer in `I \cap \ZZ`,
where `I` is this ideal. If `I = 0`, returns 0.
EXAMPLES::
sage: R.<x> = PolynomialRing(QQ)
sage: K.<a> = NumberField(x^2+6)
sage: I = K.ideal([4,a])/7; I
Fractional ideal (2/7, 1/7*a)
sage: I.smallest_integer()
2
TESTS::
sage: K.<i> = QuadraticField(-1)
sage: P1, P2 = [P for P,e in K.factor(13)]
sage: all((P1^i*P2^j).smallest_integer() == 13^max(i,j,0) for i in range(-3,3) for j in range(-3,3))
True
sage: I = K.ideal(0)
sage: I.smallest_integer()
0
See :trac:`4392`::
sage: K.<a>=QuadraticField(-5)
sage: I=K.ideal(7)
sage: I.smallest_integer()
7
sage: K.<z> = CyclotomicField(13)
sage: a = K([-8, -4, -4, -6, 3, -4, 8, 0, 7, 4, 1, 2])
sage: I = K.ideal(a)
sage: I.smallest_integer()
146196692151
sage: I.norm()
1315770229359
sage: I.norm() / I.smallest_integer()
9
"""
if self.is_zero():
return ZZ(0)
# There is no need for caching since pari_hnf() is already cached.
q = self.pari_hnf()[0,0] # PARI integer or rational
return ZZ(q.numerator())
#Old code by John Cremona, 2008-10-30, using the new coordinates()
#function instead of factorization.
#
#Idea: We write 1 as a Q-linear combination of the Z-basis of self,
#and return the denominator of this vector.
#
#self.__smallest_integer = self.coordinates(1).denominator()
#return self.__smallest_integer
def valuation(self, p):
r"""
Return the valuation of self at ``p``.
INPUT:
- ``p`` -- a prime ideal `\mathfrak{p}` of this number field.
OUTPUT:
(integer) The valuation of this fractional ideal at the prime
`\mathfrak{p}`. If `\mathfrak{p}` is not prime, raise a
ValueError.
EXAMPLES::
sage: K.<a> = NumberField(x^5 + 2); K
Number Field in a with defining polynomial x^5 + 2
sage: i = K.ideal(38); i
Fractional ideal (38)
sage: i.valuation(K.factor(19)[0][0])
1
sage: i.valuation(K.factor(2)[0][0])
5
sage: i.valuation(K.factor(3)[0][0])
0
sage: i.valuation(0)
Traceback (most recent call last):
...
ValueError: p (= Ideal (0) of Number Field in a with defining polynomial x^5 + 2) must be nonzero
sage: K.ideal(0).valuation(K.factor(2)[0][0])
+Infinity
"""
if not isinstance(p, NumberFieldIdeal):
p = self.number_field().ideal(p)
if not p:
raise ValueError("p (= %s) must be nonzero" % p)
if not p.is_prime():
raise ValueError("p (= %s) must be a prime" % p)
if p.ring() != self.number_field():
raise ValueError("p (= %s) must be an ideal in %s" % self.number_field())
nf = self.number_field().pari_nf()
return nf.idealval(self.pari_hnf(), p.pari_prime()).sage()
def decomposition_group(self):
r"""
Return the decomposition group of self, as a subset of the
automorphism group of the number field of self. Raises an
error if the field isn't Galois. See the decomposition_group
method of the ``GaloisGroup_v2`` class for further examples
and doctests.
EXAMPLES::
sage: QuadraticField(-23, 'w').primes_above(7)[0].decomposition_group()
Subgroup generated by [(1,2)] of (Galois group 2T1 (S2) with order 2 of x^2 + 23)
"""
return self.number_field().galois_group().decomposition_group(self)
def ramification_group(self, v):
r"""
Return the `v`'th ramification group of self, i.e. the set of
elements `s` of the Galois group of the number field of self
(which we assume is Galois) such that `s` acts trivially
modulo the `(v+1)`'st power of self. See the
ramification_group method of the ``GaloisGroup`` class for
further examples and doctests.
EXAMPLES::
sage: QuadraticField(-23, 'w').primes_above(23)[0].ramification_group(0)
Subgroup generated by [(1,2)] of (Galois group 2T1 (S2) with order 2 of x^2 + 23)
sage: QuadraticField(-23, 'w').primes_above(23)[0].ramification_group(1)
Subgroup generated by [()] of (Galois group 2T1 (S2) with order 2 of x^2 + 23)
"""
return self.number_field().galois_group().ramification_group(self, v)
def inertia_group(self):
r"""
Return the inertia group of self, i.e. the set of elements s of the
Galois group of the number field of self (which we assume is Galois)
such that s acts trivially modulo self. This is the same as the 0th
ramification group of self. See the inertia_group method of the
``GaloisGroup_v2`` class for further examples and doctests.
EXAMPLES::
sage: QuadraticField(-23, 'w').primes_above(23)[0].inertia_group()
Subgroup generated by [(1,2)] of (Galois group 2T1 (S2) with order 2 of x^2 + 23)
"""
return self.ramification_group(0)
def random_element(self, *args, **kwds):
r"""
Return a random element of this order.
INPUT:
- ``*args``, ``*kwds`` - Parameters passed to the random integer
function. See the documentation of ``ZZ.random_element()`` for
details.
OUTPUT:
A random element of this fractional ideal, computed as a random
`\ZZ`-linear combination of the basis.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + 2)
sage: I = K.ideal(1-a)
sage: I.random_element() # random output
-a^2 - a - 19
sage: I.random_element(distribution="uniform") # random output
a^2 - 2*a - 8
sage: I.random_element(-30,30) # random output
-7*a^2 - 17*a - 75
sage: I.random_element(-100, 200).is_integral()
True
sage: I.random_element(-30,30).parent() is K
True
A relative example::
sage: K.<a, b> = NumberField([x^2 + 2, x^2 + 1000*x + 1])
sage: I = K.ideal(1-a)
sage: I.random_element() # random output
17/500002*a^3 + 737253/250001*a^2 - 1494505893/500002*a + 752473260/250001
sage: I.random_element().is_integral()
True
sage: I.random_element(-100, 200).parent() is K
True
"""
if self.number_field().is_absolute():
basis = self.basis()
else:
basis = self.absolute_ideal().basis()
return self.number_field()(sum([ZZ.random_element(*args, **kwds)*a for a in basis]))
def artin_symbol(self):
r"""
Return the Artin symbol `( K / \QQ, P)`, where `K` is the
number field of `P` =self. This is the unique element `s` of
the decomposition group of `P` such that `s(x) = x^p \pmod{P}`
where `p` is the residue characteristic of `P`. (Here `P`
(self) should be prime and unramified.)
See the ``artin_symbol`` method of the ``GaloisGroup_v2``
class for further documentation and examples.
EXAMPLES::
sage: QuadraticField(-23, 'w').primes_above(7)[0].artin_symbol()
(1,2)
"""
return self.number_field().galois_group().artin_symbol(self)
def residue_symbol(self, e, m, check=True):
r"""
The m-th power residue symbol for an element e and the proper ideal.
.. MATH:: \left(\frac{\alpha}{\mathbf{P}}\right) \equiv \alpha^{\frac{N(\mathbf{P})-1}{m}} \operatorname{mod} \mathbf{P}
.. note:: accepts m=1, in which case returns 1
.. note:: can also be called for an element from sage.rings.number_field_element.residue_symbol
.. note:: e is coerced into the number field of self
.. note:: if m=2, e is an integer, and self.number_field() has absolute degree 1 (i.e. it is a copy of the rationals), then this calls kronecker_symbol, which is implemented using GMP.
INPUT:
- ``e`` - element of the number field
- ``m`` - positive integer
OUTPUT:
- an m-th root of unity in the number field
EXAMPLES:
Quadratic Residue (7 is not a square modulo 11)::
sage: K.<a> = NumberField(x - 1)
sage: K.ideal(11).residue_symbol(7,2)
-1
Cubic Residue::
sage: K.<w> = NumberField(x^2 - x + 1)
sage: K.ideal(17).residue_symbol(w^2 + 3,3)
-w
The field must contain the m-th roots of unity::
sage: K.<w> = NumberField(x^2 - x + 1)
sage: K.ideal(17).residue_symbol(w^2 + 3,5)
Traceback (most recent call last):
...
ValueError: The residue symbol to that power is not defined for the number field
"""
K = self.ring()
if m == 2 and K.absolute_degree() == 1:
try:
ze = ZZ(e)
zp = self.smallest_integer()
except TypeError:
pass
else:
return kronecker_symbol(ze, zp)
if check:
if self.is_trivial():
raise ValueError("Ideal must be proper")
if m < 1:
raise ValueError("Power must be positive")
if not self.is_coprime(e):
raise ValueError("Element is not coprime to the ideal")
if not self.is_coprime(m):
raise ValueError("Ideal is not coprime to the power")
primroot = K.primitive_root_of_unity()
rootorder = primroot.multiplicative_order()
if check:
if rootorder % m:
raise ValueError("The residue symbol to that power is not defined for the number field")
if not self.is_prime():
return prod(Q.residue_symbol(e,m,check=False)**i for Q, i in self.factor())
k = self.residue_field()
try:
r = k(e)
except TypeError:
raise ValueError("Element and ideal must be in a common number field")
r = k(r**((k.order()-1)/m))
resroot = primroot**(rootorder/m)
from sage.groups.generic import discrete_log
j = discrete_log(k(r), k(resroot), ord=m)
return resroot**j
def _quadratic_form(self):
r"""
If this is a quadratic extension over `\QQ`, return the binary
quadratic form associated with this ideal.
EXAMPLES::
sage: K.<a> = QuadraticField(23)
sage: K.ideal(a).quadratic_form()
23*x^2 - y^2
sage: K.<a> = QuadraticField(-5)
sage: K.class_group().order()
2
sage: A = K.class_group().gen()
sage: A.ideal().quadratic_form().reduced_form()
2*x^2 + 2*x*y + 3*y^2
sage: (A^2).ideal().quadratic_form().reduced_form()
x^2 + 5*y^2
sage: K.<a> = QuadraticField(-40)
sage: K.class_group().order()
2
sage: A = K.class_group().gen()
sage: A.ideal().quadratic_form().reduced_form()
2*x^2 + 5*y^2
sage: (A^2).ideal().quadratic_form().reduced_form()
x^2 + 10*y^2
One more check::
sage: K = QuadraticField(-79)
sage: A = K.class_group().gen()
sage: [(A**i).ideal().quadratic_form().discriminant()
....: for i in range(5)]
[-79, -79, -79, -79, -79]
This is not defined for higher-degree extensions::
sage: x = var('x')
sage: K.<a> = NumberField(x**3-x-1)
sage: K.ideal(a)._quadratic_form()
Traceback (most recent call last):
...
ValueError: not defined for ideals in number fields of degree > 2 over Q.
REFERENCES:
- [Coh1993]_
"""
K = self.number_field()
if K.degree() == 2:
from sage.quadratic_forms.binary_qf import BinaryQF
gens = self.gens_reduced()
if len(gens) == 1:
u, v = K.ring_of_integers().basis()
alpha, beta = gens[0] * u, gens[0] * v
else:
alpha, beta = gens
if QQ((beta * alpha.galois_conjugate() - alpha * beta.galois_conjugate()) / K.gen()) < 0:
alpha, beta = beta, alpha
N = self.norm()
a = alpha.norm() // N
b = ZZ(alpha * beta.galois_conjugate() +
beta * alpha.galois_conjugate()) // N
c = beta.norm() // N
return BinaryQF([a, b, c])
raise ValueError("not defined for ideals in number fields of degree > 2 over Q.")
def basis_to_module(B, K):
r"""
Given a basis `B` of elements for a `\ZZ`-submodule of a number
field `K`, return the corresponding `\ZZ`-submodule.
EXAMPLES::
sage: K.<w> = NumberField(x^4 + 1)
sage: from sage.rings.number_field.number_field_ideal import basis_to_module
sage: basis_to_module([K.0, K.0^2 + 3], K)
Free module of degree 4 and rank 2 over Integer Ring
User basis matrix:
[0 1 0 0]
[3 0 1 0]
"""
V, from_V, to_V = K.absolute_vector_space()
M = ZZ**(V.dimension())
C = [to_V(K(b)) for b in B]
return M.span_of_basis(C)
def is_NumberFieldIdeal(x):
"""
Return True if x is an ideal of a number field.
EXAMPLES::
sage: from sage.rings.number_field.number_field_ideal import is_NumberFieldIdeal
sage: is_NumberFieldIdeal(2/3)
False
sage: is_NumberFieldIdeal(ideal(5))
False
sage: k.<a> = NumberField(x^2 + 2)
sage: I = k.ideal([a + 1]); I
Fractional ideal (a + 1)
sage: is_NumberFieldIdeal(I)
True
sage: Z = k.ideal(0); Z
Ideal (0) of Number Field in a with defining polynomial x^2 + 2
sage: is_NumberFieldIdeal(Z)
True
"""
return isinstance(x, NumberFieldIdeal)
class NumberFieldFractionalIdeal(MultiplicativeGroupElement, NumberFieldIdeal):
r"""
A fractional ideal in a number field.
EXAMPLES::
sage: R.<x> = PolynomialRing(QQ)
sage: K.<a> = NumberField(x^3 - 2)
sage: I = K.ideal(2/(5+a))
sage: J = I^2
sage: Jinv = I^(-2)
sage: J*Jinv
Fractional ideal (1)
"""
def __init__(self, field, gens, coerce=True):
"""
INPUT:
field -- a number field
x -- a list of NumberFieldElements of the field, not all zero
EXAMPLES::
sage: NumberField(x^2 + 1, 'a').ideal(7)
Fractional ideal (7)
"""
from .number_field import NumberField_generic
if not isinstance(field, NumberField_generic):
raise TypeError("field (=%s) must be a number field." % field)
if not gens:
raise ValueError("gens must have length at least 1 (zero ideal is not a fractional ideal)")
if len(gens) == 1 and isinstance(gens[0], (list, tuple)):
gens = gens[0]
if misc.exists(gens,bool)[0]:
NumberFieldIdeal.__init__(self, field, gens)
else:
raise ValueError("gens must have a nonzero element (zero ideal is not a fractional ideal)")
def __repr__(self):
"""
Return the string representation of this number field fractional ideal.
.. note::
Only the zero ideal actually has type NumberFieldIdeal; all
others have type NumberFieldFractionalIdeal.
EXAMPLES::
sage: K.<a>=NumberField(x^2+5)
sage: I = K.ideal([2,1+a]); I
Fractional ideal (2, a + 1)
sage: type(I)
<class 'sage.rings.number_field.number_field_ideal.NumberFieldFractionalIdeal'>
"""
return "Fractional ideal %s" % self._repr_short()
def divides(self, other):
"""
Returns True if this ideal divides other and False otherwise.
EXAMPLES::
sage: K.<a> = CyclotomicField(11); K
Cyclotomic Field of order 11 and degree 10
sage: I = K.factor(31)[0][0]; I
Fractional ideal (31, a^5 + 10*a^4 - a^3 + a^2 + 9*a - 1)
sage: I.divides(I)
True
sage: I.divides(31)
True
sage: I.divides(29)
False
"""
if not isinstance(other, NumberFieldIdeal):
other = self.number_field().ideal(other)
return (other / self).is_integral()
def factor(self):
"""
Factorization of this ideal in terms of prime ideals.
EXAMPLES::
sage: K.<a> = NumberField(x^4 + 23); K
Number Field in a with defining polynomial x^4 + 23
sage: I = K.ideal(19); I
Fractional ideal (19)
sage: F = I.factor(); F
(Fractional ideal (19, 1/2*a^2 + a - 17/2)) * (Fractional ideal (19, 1/2*a^2 - a - 17/2))
sage: type(F)
<class 'sage.structure.factorization.Factorization'>
sage: list(F)
[(Fractional ideal (19, 1/2*a^2 + a - 17/2), 1), (Fractional ideal (19, 1/2*a^2 - a - 17/2), 1)]
sage: F.prod()
Fractional ideal (19)
TESTS:
Number fields defined by non-monic and non-integral
polynomials are supported (:trac:`252`);
the representation depends on the PARI version::
sage: F.<a> = NumberField(2*x^3 + x + 1)
sage: fact = F.factor(2)
sage: (fact[0][1], fact[1][1])
(2, 1)
sage: fact[0][0] == F.ideal(2*a^2 + 1)
True
sage: fact[1][0] == F.ideal(-2*a^2)
True
sage: [p[0].norm() for p in fact]
[2, 2]
"""
try:
return self.__factorization
except AttributeError:
K = self.number_field()
F = K.pari_nf().idealfactor(self.pari_hnf())
A = []
for j in range(0, len(F[0])):
I = K.ideal(F[j,0])
A.append((I,ZZ(F[j,1])))
self.__factorization = Factorization(A)
return self.__factorization
def prime_factors(self):
"""
Return a list of the prime ideal factors of self
OUTPUT:
list -- list of prime ideals (a new list is returned
each time this function is called)
EXAMPLES::
sage: K.<w> = NumberField(x^2 + 23)
sage: I = ideal(w+1)
sage: I.prime_factors()
[Fractional ideal (2, 1/2*w - 1/2), Fractional ideal (2, 1/2*w + 1/2), Fractional ideal (3, 1/2*w + 1/2)]
"""
return [x[0] for x in self.factor()]
support = prime_factors
def _div_(self, other):
"""
Return the quotient self / other.
EXAMPLES::
sage: R.<x> = PolynomialRing(QQ)
sage: K.<a> = NumberField(x^2 - 5)
sage: I = K.ideal(2/(5+a))
sage: J = K.ideal(17+a)
sage: I/J
Fractional ideal (-11/1420*a + 9/284)
sage: (I/J) * J == I
True
"""
K = self.ring()
if self.ngens() == 1 and other.ngens() == 1:
return K.ideal(self.gen(0) / other.gen(0))
return K.ideal(K.pari_nf().idealdiv(self, other))
def __invert__(self):
"""
Return the multiplicative inverse of self. Call with ~self.
EXAMPLES::
sage: R.<x> = PolynomialRing(QQ)
sage: K.<a> = NumberField(x^3 - 2)
sage: I = K.ideal(2/(5+a))
sage: ~I
Fractional ideal (1/2*a + 5/2)
sage: 1/I
Fractional ideal (1/2*a + 5/2)
sage: (1/I) * I
Fractional ideal (1)
"""
nf = self.number_field().pari_nf()
hnf = nf.idealdiv(self.number_field().ideal(1).pari_hnf(),
self.pari_hnf())
I = self.number_field().ideal(NumberFieldIdeal._NumberFieldIdeal__elements_from_hnf(self,hnf))
I.__pari_hnf = hnf
return I
def is_maximal(self):
"""
Return True if this ideal is maximal. This is equivalent to
self being prime, since it is nonzero.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + 3); K
Number Field in a with defining polynomial x^3 + 3
sage: K.ideal(5).is_maximal()
False
sage: K.ideal(7).is_maximal()
True
"""
return self.is_prime()
def is_trivial(self, proof=None):
"""
Returns True if this is a trivial ideal.
EXAMPLES::
sage: F.<a> = QuadraticField(-5)
sage: I = F.ideal(3)
sage: I.is_trivial()
False
sage: J = F.ideal(5)
sage: J.is_trivial()
False
sage: (I+J).is_trivial()
True
"""
return self == self.number_field().ideal(1)
def ramification_index(self):
r"""
Return the ramification index of this fractional ideal,
assuming it is prime. Otherwise, raise a ValueError.
The ramification index is the power of this prime appearing in
the factorization of the prime in `\ZZ` that this prime lies
over.
EXAMPLES::
sage: K.<a> = NumberField(x^2 + 2); K
Number Field in a with defining polynomial x^2 + 2
sage: f = K.factor(2); f
(Fractional ideal (a))^2
sage: f[0][0].ramification_index()
2
sage: K.ideal(13).ramification_index()
1
sage: K.ideal(17).ramification_index()
Traceback (most recent call last):
...
ValueError: Fractional ideal (17) is not a prime ideal
"""
return ZZ(self.pari_prime().pr_get_e())
def reduce(self, f):
r"""
Return the canonical reduction of the element of `f` modulo the ideal
`I` (=self). This is an element of `R` (the ring of integers of the
number field) that is equivalent modulo `I` to `f`.
An error is raised if this fractional ideal is not integral or
the element `f` is not integral.
INPUT:
- ``f`` - an integral element of the number field
OUTPUT:
An integral element `g`, such that `f - g` belongs to the ideal self
and such that `g` is a canonical reduced representative of the coset
`f + I` (`I` =self) as described in the ``residues`` function, namely an integral element with coordinates `(r_0, \dots,r_{n-1})`, where:
- `r_i` is reduced modulo `d_i`
- `d_i = b_i[i]`, with `{b_0, b_1, \dots, b_n}` HNF basis
of the ideal self.
.. note::
The reduced element `g` is not necessarily small. To get a
small `g` use the method ``small_residue``.
EXAMPLES::
sage: k.<a> = NumberField(x^3 + 11)
sage: I = k.ideal(5, a^2 - a + 1)
sage: c = 4*a + 9
sage: I.reduce(c)
a^2 - 2*a
sage: c - I.reduce(c) in I
True
The reduced element is in the list of canonical representatives
returned by the ``residues`` method:
::
sage: I.reduce(c) in list(I.residues())
True
The reduced element does not necessarily have smaller norm (use
``small_residue`` for that)
::
sage: c.norm()
25
sage: (I.reduce(c)).norm()
209
sage: (I.small_residue(c)).norm()
10
Sometimes the canonical reduced representative of `1` won't be `1`
(it depends on the choice of basis for the ring of integers):
::
sage: k.<a> = NumberField(x^2 + 23)
sage: I = k.ideal(3)
sage: I.reduce(3*a + 1)
-3/2*a - 1/2
sage: k.ring_of_integers().basis()
[1/2*a + 1/2, a]
AUTHOR: Maite Aranes.
"""
if not self.is_integral():
raise ValueError("reduce only defined for integral ideals")
R = self.number_field().maximal_order()
if not (f in R):
raise TypeError("reduce only defined for integral elements")
Rbasis = R.basis()
n = len(Rbasis)
from sage.matrix.all import MatrixSpace
M = MatrixSpace(ZZ,n)([R.coordinates(y) for y in self.basis()])
D = M.hermite_form()
d = [D[i,i] for i in range(n)]
v = R.coordinates(f)
for i in range(n):
q, r = ZZ(v[i]).quo_rem(d[i])#v is a vector of rationals, we want division of integers
if 2*r > d[i]:
q = q + 1
v = v - q*D[i]
return sum([v[i]*Rbasis[i] for i in range(n)])
def residues(self):
r"""
Return a iterator through a complete list of residues modulo this integral ideal.
An error is raised if this fractional ideal is not integral.
OUTPUT:
An iterator through a complete list of residues modulo the integral
ideal self. This list is the set of canonical reduced representatives
given by all integral elements with coordinates `(r_0, \dots,r_{n-1})`,
where:
- `r_i` is reduced modulo `d_i`
- `d_i = b_i[i]`, with `{b_0, b_1, \dots, b_n}` HNF basis
of the ideal.
AUTHOR: John Cremona (modified by Maite Aranes)
EXAMPLES::
sage: K.<i>=NumberField(x^2+1)
sage: res = K.ideal(2).residues(); res
xmrange_iter([[0, 1], [0, 1]], <function ...<lambda> at 0x...>)
sage: list(res)
[0, i, 1, i + 1]
sage: list(K.ideal(2+i).residues())
[-2*i, -i, 0, i, 2*i]
sage: list(K.ideal(i).residues())
[0]
sage: I = K.ideal(3+6*i)
sage: reps=I.residues()
sage: len(list(reps)) == I.norm()
True
sage: all(r == s or not (r-s) in I for r in reps for s in reps) # long time (6s on sage.math, 2011)
True
sage: K.<a> = NumberField(x^3-10)
sage: I = K.ideal(a-1)
sage: len(list(I.residues())) == I.norm()
True
sage: K.<z> = CyclotomicField(11)
sage: len(list(K.primes_above(3)[0].residues())) == 3**5 # long time (5s on sage.math, 2011)
True
"""
if not self.is_integral():
raise ValueError("residues only defined for integral ideals")
R = self.number_field().maximal_order()
Rbasis = R.basis()
n = len(Rbasis)
from sage.matrix.all import MatrixSpace
M = MatrixSpace(ZZ, n)([R.coordinates(_) for _ in self.basis()])
D = M.hermite_form()
d = [D[i, i] for i in range(n)]
coord_ranges = [list(range((-di+2)//2,(di+2)//2)) for di in d]
combo = lambda c: sum(c[i] * Rbasis[i] for i in range(n))
return xmrange_iter(coord_ranges, combo)
def invertible_residues(self, reduce=True):
r"""
Returns a iterator through a list of invertible residues
modulo this integral ideal.
An error is raised if this fractional ideal is not integral.
INPUT:
- ``reduce`` - bool. If True (default), use ``small_residue`` to get
small representatives of the residues.
OUTPUT:
- An iterator through a list of invertible residues modulo this ideal
`I`, i.e. a list of elements in the ring of integers `R` representing
the elements of `(R/I)^*`.
ALGORITHM: Use :pari:`idealstar` to find the group structure and
generators of the multiplicative group modulo the ideal.
EXAMPLES::
sage: K.<i>=NumberField(x^2+1)
sage: ires = K.ideal(2).invertible_residues(); ires
xmrange_iter([[0, 1]], <function ...<lambda> at 0x...>)
sage: list(ires)
[1, -i]
sage: list(K.ideal(2+i).invertible_residues())
[1, 2, 4, 3]
sage: list(K.ideal(i).residues())
[0]
sage: list(K.ideal(i).invertible_residues())
[1]
sage: I = K.ideal(3+6*i)
sage: units=I.invertible_residues()
sage: len(list(units))==I.euler_phi()
True
sage: K.<a> = NumberField(x^3-10)
sage: I = K.ideal(a-1)
sage: len(list(I.invertible_residues())) == I.euler_phi()
True
sage: K.<z> = CyclotomicField(10)
sage: len(list(K.primes_above(3)[0].invertible_residues()))
80
TESTS:
Check that the integrality is not lost, cf. :trac:`30801`::
sage: K.<a> = NumberField(x^2 + x + 1)
sage: all(x.is_integral() for x in K.ideal(8).invertible_residues())
True
AUTHOR: John Cremona
"""
return self.invertible_residues_mod(subgp_gens=None, reduce=reduce)
def invertible_residues_mod(self, subgp_gens=[], reduce=True):
r"""
Returns a iterator through a list of representatives for the invertible
residues modulo this integral ideal, modulo the subgroup generated by
the elements in the list ``subgp_gens``.
INPUT:
- ``subgp_gens`` - either None or a list of elements of the number
field of self. These need not be integral, but should be coprime to
the ideal self. If the list is empty or None, the function returns
an iterator through a list of representatives for the invertible
residues modulo the integral ideal self.
- ``reduce`` - bool. If True (default), use ``small_residues`` to
get small representatives of the residues.
.. note::
See also invertible_residues() for a simpler version without the subgroup.
OUTPUT:
- An iterator through a list of representatives for the invertible
residues modulo self and modulo the group generated by
``subgp_gens``, i.e. a list of elements in the ring of integers `R`
representing the elements of `(R/I)^*/U`, where `I` is this ideal and
`U` is the subgroup of `(R/I)^*` generated by ``subgp_gens``.
EXAMPLES:
::
sage: k.<a> = NumberField(x^2 +23)
sage: I = k.ideal(a)
sage: list(I.invertible_residues_mod([-1]))
[1, 5, 2, 10, 4, 20, 8, 17, 16, 11, 9]
sage: list(I.invertible_residues_mod([1/2]))
[1, 5]
sage: list(I.invertible_residues_mod([23]))
Traceback (most recent call last):
...
TypeError: the element must be invertible mod the ideal
::
sage: K.<a> = NumberField(x^3-10)
sage: I = K.ideal(a-1)
sage: len(list(I.invertible_residues_mod([]))) == I.euler_phi()
True
sage: I = K.ideal(1)
sage: list(I.invertible_residues_mod([]))
[1]
::
sage: K.<z> = CyclotomicField(10)
sage: len(list(K.primes_above(3)[0].invertible_residues_mod([])))
80
AUTHOR: Maite Aranes.
"""
if self.norm() == 1:
return xmrange_iter([[1]], lambda l: l[0])
G = self.idealstar(2)
invs = G.invariants()
g = G.gens_values()
n = G.ngens()
from sage.matrix.all import Matrix, diagonal_matrix
M = diagonal_matrix(ZZ, invs)
if subgp_gens:
Units = Matrix(ZZ, [self.ideallog(_) for _ in subgp_gens])
M = M.stack(Units)
A, U, V = M.smith_form()
V = V.inverse()
new_basis = [prod([g[j]**(V[i, j] % invs[j]) for j in range(n)]) for i in range(n)]
if reduce:
combo = lambda c: self.small_residue(prod(new_basis[i] ** c[i]
for i in range(n)))
else:
combo = lambda c: prod(new_basis[i] ** c[i] for i in range(n))
coord_ranges = [list(range(A[i, i])) for i in range(n)]
return xmrange_iter(coord_ranges, combo)
def denominator(self):
r"""
Return the denominator ideal of this fractional ideal. Each
fractional ideal has a unique expression as `N/D` where `N`,
`D` are coprime integral ideals; the denominator is `D`.
EXAMPLES::
sage: K.<i>=NumberField(x^2+1)
sage: I = K.ideal((3+4*i)/5); I
Fractional ideal (4/5*i + 3/5)
sage: I.denominator()
Fractional ideal (2*i + 1)
sage: I.numerator()
Fractional ideal (-i - 2)
sage: I.numerator().is_integral() and I.denominator().is_integral()
True
sage: I.numerator() + I.denominator() == K.unit_ideal()
True
sage: I.numerator()/I.denominator() == I
True
"""
try:
return self._denom_ideal
except AttributeError:
pass
self._denom_ideal = (self + self.number_field().unit_ideal())**(-1)
return self._denom_ideal
def numerator(self):
r"""
Return the numerator ideal of this fractional ideal.
Each fractional ideal has a unique expression as `N/D` where `N`,
`D` are coprime integral ideals. The numerator is `N`.
EXAMPLES::
sage: K.<i>=NumberField(x^2+1)
sage: I = K.ideal((3+4*i)/5); I
Fractional ideal (4/5*i + 3/5)
sage: I.denominator()
Fractional ideal (2*i + 1)
sage: I.numerator()
Fractional ideal (-i - 2)
sage: I.numerator().is_integral() and I.denominator().is_integral()
True
sage: I.numerator() + I.denominator() == K.unit_ideal()
True
sage: I.numerator()/I.denominator() == I
True
"""
try:
return self._num_ideal
except AttributeError:
pass
self._num_ideal = self * self.denominator()
return self._num_ideal
def is_coprime(self, other):
"""
Returns True if this ideal is coprime to the other, else False.
INPUT:
- ``other`` -- another ideal of the same field, or generators
of an ideal.
OUTPUT:
True if self and other are coprime, else False.
.. note::
This function works for fractional ideals as well as
integral ideals.
AUTHOR: John Cremona
EXAMPLES::
sage: K.<i>=NumberField(x^2+1)
sage: I = K.ideal(2+i)
sage: J = K.ideal(2-i)
sage: I.is_coprime(J)
True
sage: (I^-1).is_coprime(J^3)
True
sage: I.is_coprime(5)
False
sage: I.is_coprime(6+i)
True
See :trac:`4536`::
sage: E.<a> = NumberField(x^5 + 7*x^4 + 18*x^2 + x - 3)
sage: OE = E.ring_of_integers()
sage: i,j,k = [u[0] for u in factor(3*OE)]
sage: (i/j).is_coprime(j/k)
False
sage: (j/k).is_coprime(j/k)
False
sage: F.<a, b> = NumberField([x^2 - 2, x^2 - 3])
sage: F.ideal(3 - a*b).is_coprime(F.ideal(3))
False
"""
# Catch invalid inputs by making sure that we can make an ideal out of other.
K = self.number_field()
one = K.unit_ideal()
other = K.ideal(other)
if self.is_integral() and other.is_integral():
if gcd(ZZ(self.absolute_norm()), ZZ(other.absolute_norm())) == 1:
return True
else:
return self+other == one
# This special case is necessary since the zero ideal is not a
# fractional ideal!
if other.absolute_norm() == 0:
return self == one
D1 = self.denominator()
N1 = self.numerator()
D2 = other.denominator()
N2 = other.numerator()
return N1+N2==one and N1+D2==one and D1+N2==one and D1+D2==one
def idealcoprime(self, J):
"""
Returns l such that l*self is coprime to J.
INPUT:
- ``J`` - another integral ideal of the same field as self, which must also be integral.
OUTPUT:
- ``l`` - an element such that l*self is coprime to the ideal J
TODO: Extend the implementation to non-integral ideals.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 23)
sage: A = k.ideal(a+1)
sage: B = k.ideal(3)
sage: A.is_coprime(B)
False
sage: lam = A.idealcoprime(B)
sage: lam # representation depends, not tested
-1/6*a + 1/6
sage: (lam*A).is_coprime(B)
True
ALGORITHM: Uses Pari function :pari:`idealcoprime`.
TESTS:
Check the above doctests, where the representation
depends on the PARI version::
sage: k.<a> = NumberField(x^2 + 23)
sage: A = k.ideal(a+1)
sage: B = k.ideal(3)
sage: lam = A.idealcoprime(B)
sage: lam in (-1/6*a + 1/6, 1/6*a - 1/6)
True
"""
if not (self.is_integral() and J.is_integral()):
raise ValueError("Both ideals must be integral.")
k = self.number_field()
# Catch invalid inputs by making sure that J is an ideal of the same field as self:
assert k == J.number_field()
l = k.pari_nf().idealcoprime(self.pari_hnf(), J.pari_hnf())
return k(l)
def small_residue(self, f):
r"""
Given an element `f` of the ambient number field, returns an
element `g` such that `f - g` belongs to the ideal self (which
must be integral), and `g` is small.
.. note::
The reduced representative returned is not uniquely determined.
ALGORITHM: Uses Pari function :pari:`nfeltreduce`.
EXAMPLES:
::
sage: k.<a> = NumberField(x^2 + 5)
sage: I = k.ideal(a)
sage: I.small_residue(14)
4
::
sage: K.<a> = NumberField(x^5 + 7*x^4 + 18*x^2 + x - 3)
sage: I = K.ideal(5)
sage: I.small_residue(a^2 -13)
a^2 + 5*a - 3
"""
if not self.is_integral():
raise ValueError("The ideal must be integral")
k = self.number_field()
return k(k.pari_nf().nfeltreduce(f, self.pari_hnf()))
def _pari_bid_(self, flag=1):
"""
Returns the pari structure ``bid`` associated to the ideal self.
INPUT:
- ``flag`` - when flag=2 it computes the generators of the group
`(O_K/I)^*`, which takes more time. By default
flag=1 (no generators are computed).
OUTPUT:
- The pari special structure ``bid``.
EXAMPLES::
sage: k.<a> = NumberField(x^4 + 13)
sage: I = k.ideal(2, a^2 + 1)
sage: hasattr(I, '_bid')
False
sage: bid = I._pari_bid_()
sage: hasattr(I, '_bid')
True
sage: bid.getattr('clgp')
[2, [2]]
"""
from sage.libs.pari.all import PariError
try:
bid = self._bid
if flag == 2:
# Try to access generators, we get PariError if this fails.
bid.bid_get_gen()
except (AttributeError, PariError):
k = self.number_field()
bid = k.pari_nf().idealstar(self.pari_hnf(), flag)
self._bid = bid
return bid
def idealstar(self, flag=1):
r"""
Returns the finite abelian group `(O_K/I)^*`, where I is the ideal self
of the number field K, and `O_K` is the ring of integers of K.
INPUT:
- ``flag`` (int default 1) -- when ``flag`` =2, it also
computes the generators of the group `(O_K/I)^*`, which
takes more time. By default ``flag`` =1 (no generators are
computed). In both cases the special pari structure ``bid``
is computed as well. If ``flag`` =0 (deprecated) it computes
only the group structure of `(O_K/I)^*` (with generators)
and not the special ``bid`` structure.
OUTPUT:
The finite abelian group `(O_K/I)^*`.
.. note::
Uses the pari function :pari:`idealstar`. The pari function outputs
a special ``bid`` structure which is stored in the internal
field ``_bid`` of the ideal (when flag=1,2). The special structure
``bid`` is used in the pari function :pari:`ideallog`
to compute discrete logarithms.
EXAMPLES::
sage: k.<a> = NumberField(x^3 - 11)
sage: A = k.ideal(5)
sage: G = A.idealstar(); G
Multiplicative Abelian group isomorphic to C24 x C4
sage: G.gens()
(f0, f1)
sage: G = A.idealstar(2)
sage: G.gens()
(f0, f1)
sage: G.gens_values() # random output
(2*a^2 - 1, 2*a^2 + 2*a - 2)
sage: all(G.gen(i).value() in k for i in range(G.ngens()))
True
TESTS::
sage: k.<a> = NumberField(x^2 + 1)
sage: k.ideal(a+1).idealstar(2)
Trivial Abelian group
ALGORITHM: Uses Pari function :pari:`idealstar`
"""
k = self.number_field()
if flag==0 and not hasattr(self, '_bid'):
G = k.pari_nf().idealstar(self.pari_hnf(), 0)
else:
G = self._pari_bid_(flag)
inv = [ZZ(c) for c in G.bid_get_cyc()]
if flag == 2 or flag == 0:
from sage.groups.abelian_gps.values import AbelianGroupWithValues
g = G.bid_get_gen()
AG = AbelianGroupWithValues(tuple(map(k, g)), inv, values_group=k)
else:
from sage.groups.abelian_gps.abelian_group import AbelianGroup
AG = AbelianGroup(inv)
return AG
def ideallog(self, x, gens=None, check=True):
r"""
Returns the discrete logarithm of x with respect to the generators
given in the ``bid`` structure of the ideal self, or with respect to
the generators ``gens`` if these are given.
INPUT:
- ``x`` - a non-zero element of the number field of self,
which must have valuation equal to 0 at all prime ideals in
the support of the ideal self.
- ``gens`` - a list of elements of the number field which generate `(R
/ I)^*`, where `R` is the ring of integers of the field and `I` is
this ideal, or ``None``. If ``None``, use the generators calculated
by :meth:`~idealstar`.
- ``check`` - if True, do a consistency check on the results. Ignored
if ``gens`` is None.
OUTPUT:
- ``l`` - a list of non-negative integers `(x_i)` such that `x =
\prod_i g_i^{x_i}` in `(R/I)^*`, where `x_i` are the generators, and
the list `(x_i)` is lexicographically minimal with respect to this
requirement. If the `x_i` generate independent cyclic factors of
order `d_i`, as is the case for the default generators calculated by
:meth:`~idealstar`, this just means that `0 \le x_i < d_i`.
A ``ValueError`` will be raised if the elements specified in ``gens``
do not in fact generate the unit group (even if the element `x` is in
the subgroup they generate).
EXAMPLES::
sage: k.<a> = NumberField(x^3 - 11)
sage: A = k.ideal(5)
sage: G = A.idealstar(2)
sage: l = A.ideallog(a^2 +3)
sage: r = G(l).value()
sage: (a^2 + 3) - r in A
True
sage: A.small_residue(r) # random
a^2 - 2
Examples with custom generators::
sage: K.<a> = NumberField(x^2 - 7)
sage: I = K.ideal(17)
sage: I.ideallog(a + 7, [1+a, 2])
[10, 3]
sage: I.ideallog(a + 7, [2, 1+a])
[0, 118]
sage: L.<b> = NumberField(x^4 - x^3 - 7*x^2 + 3*x + 2)
sage: J = L.ideal(-b^3 - b^2 - 2)
sage: u = -14*b^3 + 21*b^2 + b - 1
sage: v = 4*b^2 + 2*b - 1
sage: J.ideallog(5+2*b, [u, v], check=True)
[4, 13]
A non-example::
sage: I.ideallog(a + 7, [2])
Traceback (most recent call last):
...
ValueError: Given elements do not generate unit group -- they generate a subgroup of index 36
ALGORITHM: Uses Pari function :pari:`ideallog`, and (if ``gens`` is not
None) a Hermite normal form calculation to express the result in terms
of the generators ``gens``.
"""
# sanitise input
k = self.number_field()
if not all(k(x).valuation(p) == 0 for p, e in self.factor()):
raise TypeError("the element must be invertible mod the ideal")
# calculate ideal log w.r.t. standard gens
#Now it is important to call _pari_bid_() with flag=2 to make sure
#we fix a basis, since the log would be different for a different
#choice of basis.
L = [ZZ(_) for _ in k.pari_nf().ideallog(x, self._pari_bid_(2))]
if gens is None:
return L
# otherwise translate answer in terms of given gens
G = self.idealstar(2)
invs = G.invariants()
from sage.matrix.all import matrix, identity_matrix, zero_matrix, diagonal_matrix, block_matrix
# We use Hermite normal form twice: once to express the standard
# generators in terms of the new ones (independently of x) and once to
# reduce the resulting logarithm of x so it is lexicographically
# minimal.
mat = matrix(ZZ, [self.ideallog(_) for _ in gens]).augment(identity_matrix(ZZ, len(gens)))
mat = mat.stack( diagonal_matrix(ZZ, invs).augment(zero_matrix(ZZ, len(invs), len(gens))))
hmat = mat.hermite_form()
A = hmat[0:len(invs), 0:len(invs)]
if A != identity_matrix(len(invs)):
raise ValueError("Given elements do not generate unit group -- they generate a subgroup of index %s" % A.det())
B = hmat[0:len(invs), len(invs):]
C = hmat[len(invs):, len(invs):]
#print "Matrix of relations:\n%s" % C
M = (matrix(ZZ, L) * B)
N = block_matrix(2, 2, [[identity_matrix(1), M], [zero_matrix(len(gens), 1), C]], subdivide=False)
ans = N.hermite_form()[0, 1:].list()
if check:
from sage.rings.all import Zmod
t = 1
for i in range(len(ans)):
t = self.reduce(t * gens[i]**ans[i])
assert t == self.reduce(x * x.denominator() * (~Zmod(self.norm())(x.denominator())).lift())
return ans
def element_1_mod(self, other):
r"""
Returns an element `r` in this ideal such that `1-r` is in other
An error is raised if either ideal is not integral of if they
are not coprime.
INPUT:
- ``other`` -- another ideal of the same field, or generators
of an ideal.
OUTPUT:
An element `r` of the ideal self such that `1-r` is in the ideal other
AUTHOR: Maite Aranes (modified to use PARI's :pari:`idealaddtoone` by Francis Clarke)
EXAMPLES::
sage: K.<a> = NumberField(x^3-2)
sage: A = K.ideal(a+1); A; A.norm()
Fractional ideal (a + 1)
3
sage: B = K.ideal(a^2-4*a+2); B; B.norm()
Fractional ideal (a^2 - 4*a + 2)
68
sage: r = A.element_1_mod(B); r
-33
sage: r in A
True
sage: 1-r in B
True
TESTS::
sage: K.<a> = NumberField(x^3-2)
sage: A = K.ideal(a+1)
sage: B = K.ideal(a^2-4*a+1); B; B.norm()
Fractional ideal (a^2 - 4*a + 1)
99
sage: A.element_1_mod(B)
Traceback (most recent call last):
...
TypeError: Fractional ideal (a + 1), Fractional ideal (a^2 - 4*a + 1) are not coprime ideals
sage: B = K.ideal(1/a); B
Fractional ideal (1/2*a^2)
sage: A.element_1_mod(B)
Traceback (most recent call last):
...
TypeError: Fractional ideal (1/2*a^2) is not an integral ideal
"""
if not self.is_integral():
raise TypeError("%s is not an integral ideal" % self)
# Catch invalid inputs by making sure that we can make an ideal out of other.
K = self.number_field()
other = K.ideal(other)
if not other.is_integral():
raise TypeError("%s is not an integral ideal" % other)
if not self.is_coprime(other):
raise TypeError("%s, %s are not coprime ideals" % (self, other))
bnf = K.pari_bnf()
r = bnf.idealaddtoone(self.pari_hnf(), other.pari_hnf())[0]
return K(r)
def euler_phi(self):
r"""
Returns the Euler `\varphi`-function of this integral ideal.
This is the order of the multiplicative group of the quotient
modulo the ideal.
An error is raised if the ideal is not integral.
EXAMPLES::
sage: K.<i>=NumberField(x^2+1)
sage: I = K.ideal(2+i)
sage: [r for r in I.residues() if I.is_coprime(r)]
[-2*i, -i, i, 2*i]
sage: I.euler_phi()
4
sage: J = I^3
sage: J.euler_phi()
100
sage: len([r for r in J.residues() if J.is_coprime(r)])
100
sage: J = K.ideal(3-2*i)
sage: I.is_coprime(J)
True
sage: I.euler_phi()*J.euler_phi() == (I*J).euler_phi()
True
sage: L.<b> = K.extension(x^2 - 7)
sage: L.ideal(3).euler_phi()
64
"""
if not self.is_integral():
raise ValueError("euler_phi only defined for integral ideals")
return prod([(np-1)*np**(e-1) \
for np,e in [(p.absolute_norm(),e) \
for p,e in self.factor()]])
def prime_to_S_part(self,S):
r"""
Return the part of this fractional ideal which is coprime to
the prime ideals in the list ``S``.
.. NOTE::
This function assumes that `S` is a list of prime ideals,
but does not check this. This function will fail if `S` is
not a list of prime ideals.
INPUT:
- `S` -- a list of prime ideals
OUTPUT:
A fractional ideal coprime to the primes in `S`, whose prime
factorization is that of ``self`` with the primes in `S`
removed.
EXAMPLES::
sage: K.<a> = NumberField(x^2-23)
sage: I = K.ideal(24)
sage: S = [K.ideal(-a+5),K.ideal(5)]
sage: I.prime_to_S_part(S)
Fractional ideal (3)
sage: J = K.ideal(15)
sage: J.prime_to_S_part(S)
Fractional ideal (3)
sage: K.<a> = NumberField(x^5-23)
sage: I = K.ideal(24)
sage: S = [K.ideal(15161*a^4 + 28383*a^3 + 53135*a^2 + 99478*a + 186250),K.ideal(2*a^4 + 3*a^3 + 4*a^2 + 15*a + 11), K.ideal(101)]
sage: I.prime_to_S_part(S)
Fractional ideal (24)
"""
a = self
for p in S:
n = a.valuation(p)
a = a*p**(-n)
return a
def is_S_unit(self,S):
r"""
Return True if this fractional ideal is a unit with respect to the list of primes ``S``.
INPUT:
- `S` - a list of prime ideals (not checked if they are
indeed prime).
.. note::
This function assumes that `S` is a list of prime ideals,
but does not check this. This function will fail if `S` is
not a list of prime ideals.
OUTPUT:
True, if the ideal is an `S`-unit: that is, if the valuations of
the ideal at all primes not in `S` are zero. False, otherwise.
EXAMPLES::
sage: K.<a> = NumberField(x^2+23)
sage: I = K.ideal(2)
sage: P = I.factor()[0][0]
sage: I.is_S_unit([P])
False
"""
return self.prime_to_S_part(S).is_trivial()
def is_S_integral(self,S):
r"""
Return True if this fractional ideal is integral with respect to the list of primes ``S``.
INPUT:
- `S` - a list of prime ideals (not checked if they are indeed
prime).
.. note::
This function assumes that `S` is a list of prime ideals,
but does not check this. This function will fail if `S` is
not a list of prime ideals.
OUTPUT:
True, if the ideal is `S`-integral: that is, if the valuations
of the ideal at all primes not in `S` are non-negative. False,
otherwise.
EXAMPLES::
sage: K.<a> = NumberField(x^2+23)
sage: I = K.ideal(1/2)
sage: P = K.ideal(2,1/2*a - 1/2)
sage: I.is_S_integral([P])
False
sage: J = K.ideal(1/5)
sage: J.is_S_integral([K.ideal(5)])
True
"""
if self.is_integral():
return True
return self.prime_to_S_part(S).is_integral()
def prime_to_idealM_part(self, M):
r"""
Version for integral ideals of the ``prime_to_m_part`` function over `\ZZ`.
Returns the largest divisor of self that is coprime to the ideal ``M``.
INPUT:
- ``M`` -- an integral ideal of the same field, or generators of an ideal
OUTPUT:
An ideal which is the largest divisor of self that is coprime to `M`.
AUTHOR: Maite Aranes
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 23)
sage: I = k.ideal(a+1)
sage: M = k.ideal(2, 1/2*a - 1/2)
sage: J = I.prime_to_idealM_part(M); J
Fractional ideal (12, 1/2*a + 13/2)
sage: J.is_coprime(M)
True
sage: J = I.prime_to_idealM_part(2); J
Fractional ideal (3, 1/2*a + 1/2)
sage: J.is_coprime(M)
True
"""
# Catch invalid inputs by making sure that we can make an ideal out of M.
k = self.number_field()
M = k.ideal(M)
if not self.is_integral or not M.is_integral():
raise TypeError("prime_to_idealM_part defined only for integral ideals")
if self.is_coprime(M):
return self
G = self + M
I = self
while not G.is_trivial():
I = I/G
G = I + G
return I
def _p_quotient(self, p):
"""
This is an internal technical function that is used for example for
computing the quotient of the ring of integers by a prime ideal.
INPUT:
p -- a prime number contained in self.
OUTPUT:
V -- a vector space of characteristic p
quo -- a partially defined quotient homomorphism from the
ambient number field to V
lift -- a section of quo.
EXAMPLES::
sage: K.<i> = NumberField(x^2 + 1); O = K.maximal_order()
sage: I = K.factor(3)[0][0]
sage: Q, quo, lift = I._p_quotient(3); Q
Vector space quotient V/W of dimension 2 over Finite Field of size 3 where
V: Vector space of dimension 2 over Finite Field of size 3
W: Vector space of degree 2 and dimension 0 over Finite Field of size 3
Basis matrix:
[]
We do an example with a split prime and show both the quo
and lift maps::
sage: K.<i> = NumberField(x^2 + 1); O = K.maximal_order()
sage: I = K.factor(5)[0][0]
sage: Q,quo,lift = I._p_quotient(5)
sage: lift(quo(i))
3
sage: lift(quo(i)) - i in I
True
sage: quo(lift(Q.0))
(1)
sage: Q.0
(1)
sage: Q
Vector space quotient V/W of dimension 1 over Finite Field of size 5 where
V: Vector space of dimension 2 over Finite Field of size 5
W: Vector space of degree 2 and dimension 1 over Finite Field of size 5
Basis matrix:
[1 3]
sage: quo
Partially defined quotient map from Number Field in i with defining polynomial x^2 + 1 to an explicit vector space representation for the quotient of the ring of integers by (p,I) for the ideal I=Fractional ideal (-i - 2).
sage: lift
Lifting map to Gaussian Integers in Number Field in i with defining polynomial x^2 + 1 from quotient of integers by Fractional ideal (-i - 2)
"""
return quotient_char_p(self, p)
def residue_field(self, names=None):
r"""
Return the residue class field of this fractional ideal, which
must be prime.
EXAMPLES::
sage: K.<a> = NumberField(x^3-7)
sage: P = K.ideal(29).factor()[0][0]
sage: P.residue_field()
Residue field in abar of Fractional ideal (2*a^2 + 3*a - 10)
sage: P.residue_field('z')
Residue field in z of Fractional ideal (2*a^2 + 3*a - 10)
Another example::
sage: K.<a> = NumberField(x^3-7)
sage: P = K.ideal(389).factor()[0][0]; P
Fractional ideal (389, a^2 - 44*a - 9)
sage: P.residue_class_degree()
2
sage: P.residue_field()
Residue field in abar of Fractional ideal (389, a^2 - 44*a - 9)
sage: P.residue_field('z')
Residue field in z of Fractional ideal (389, a^2 - 44*a - 9)
sage: FF.<w> = P.residue_field()
sage: FF
Residue field in w of Fractional ideal (389, a^2 - 44*a - 9)
sage: FF((a+1)^390)
36
sage: FF(a)
w
An example of reduction maps to the residue field: these are
defined on the whole valuation ring, i.e. the subring of the
number field consisting of elements with non-negative
valuation. This shows that the issue raised in :trac:`1951`
has been fixed::
sage: K.<i> = NumberField(x^2 + 1)
sage: P1, P2 = [g[0] for g in K.factor(5)]; (P1,P2)
(Fractional ideal (-i - 2), Fractional ideal (2*i + 1))
sage: a = 1/(1+2*i)
sage: F1, F2 = [g.residue_field() for g in [P1,P2]]; (F1,F2)
(Residue field of Fractional ideal (-i - 2), Residue field of Fractional ideal (2*i + 1))
sage: a.valuation(P1)
0
sage: F1(i/7)
4
sage: F1(a)
3
sage: a.valuation(P2)
-1
sage: F2(a)
Traceback (most recent call last):
...
ZeroDivisionError: Cannot reduce field element -2/5*i + 1/5 modulo Fractional ideal (2*i + 1): it has negative valuation
An example with a relative number field::
sage: L.<a,b> = NumberField([x^2 + 1, x^2 - 5])
sage: p = L.ideal((-1/2*b - 1/2)*a + 1/2*b - 1/2)
sage: R = p.residue_field(); R
Residue field in abar of Fractional ideal ((-1/2*b - 1/2)*a + 1/2*b - 1/2)
sage: R.cardinality()
9
sage: R(17)
2
sage: R((a + b)/17)
abar
sage: R(1/b)
2*abar
We verify that :trac:`8721` is fixed::
sage: L.<a, b> = NumberField([x^2 - 3, x^2 - 5])
sage: L.ideal(a).residue_field()
Residue field in abar of Fractional ideal (a)
"""
if not self.is_prime():
raise ValueError("The ideal must be prime")
return self.number_field().residue_field(self, names = names)
def residue_class_degree(self):
r"""
Return the residue class degree of this fractional ideal,
assuming it is prime. Otherwise, raise a ValueError.
The residue class degree of a prime ideal `I` is the degree of
the extension `O_K/I` of its prime subfield.
EXAMPLES::
sage: K.<a> = NumberField(x^5 + 2); K
Number Field in a with defining polynomial x^5 + 2
sage: f = K.factor(19); f
(Fractional ideal (a^2 + a - 3)) * (Fractional ideal (2*a^4 + a^2 - 2*a + 1)) * (Fractional ideal (a^2 + a - 1))
sage: [i.residue_class_degree() for i, _ in f]
[2, 2, 1]
"""
return ZZ(self.pari_prime().pr_get_f())
def ray_class_number(self):
r"""
Return the order of the ray class group modulo this ideal. This is a
wrapper around Pari's :pari:`bnrclassno` function.
EXAMPLES::
sage: K.<z> = QuadraticField(-23)
sage: p = K.primes_above(3)[0]
sage: p.ray_class_number()
3
sage: x = polygen(K)
sage: L.<w> = K.extension(x^3 - z)
sage: I = L.ideal(5)
sage: I.ray_class_number()
5184
"""
bid = self._pari_bid_()
return ZZ(self.number_field().pari_bnf().bnrclassno(bid))
def is_NumberFieldFractionalIdeal(x):
"""
Return True if x is a fractional ideal of a number field.
EXAMPLES::
sage: from sage.rings.number_field.number_field_ideal import is_NumberFieldFractionalIdeal
sage: is_NumberFieldFractionalIdeal(2/3)
False
sage: is_NumberFieldFractionalIdeal(ideal(5))
False
sage: k.<a> = NumberField(x^2 + 2)
sage: I = k.ideal([a + 1]); I
Fractional ideal (a + 1)
sage: is_NumberFieldFractionalIdeal(I)
True
sage: Z = k.ideal(0); Z
Ideal (0) of Number Field in a with defining polynomial x^2 + 2
sage: is_NumberFieldFractionalIdeal(Z)
False
"""
return isinstance(x, NumberFieldFractionalIdeal)
class QuotientMap:
"""
Class to hold data needed by quotient maps from number field
orders to residue fields. These are only partial maps: the exact
domain is the appropriate valuation ring. For examples, see
:meth:`~sage.rings.number_field.number_field_ideal.NumberFieldFractionalIdeal.residue_field`.
"""
def __init__(self, K, M_OK_change, Q, I):
"""
Initialize this QuotientMap.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + 4)
sage: f = K.ideal(1 + a^2/2).residue_field().reduction_map(); f # indirect doctest
Partially defined reduction map:
From: Number Field in a with defining polynomial x^3 + 4
To: Residue field of Fractional ideal (1/2*a^2 + 1)
sage: f.__class__
<type 'sage.rings.finite_rings.residue_field.ReductionMap'>
"""
self.__M_OK_change = M_OK_change
self.__Q = Q
self.__K = K
self.__I = I
self.__L, self.__from_L, self.__to_L = K.absolute_vector_space()
def __call__(self, x):
"""
Apply this QuotientMap to an element of the number field.
INPUT:
x -- an element of the field
EXAMPLES::
sage: K.<a> = NumberField(x^3 + 4)
sage: f = K.ideal(1 + a^2/2).residue_field().reduction_map()
sage: f(a)
2
"""
v = self.__to_L(x)
w = v * self.__M_OK_change
return self.__Q( list(w) )
def __repr__(self):
"""
Return a string representation of this QuotientMap.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + 4)
sage: f = K.ideal(1 + a^2/2).residue_field().reduction_map()
sage: repr(f)
'Partially defined reduction map:\n From: Number Field in a with defining polynomial x^3 + 4\n To: Residue field of Fractional ideal (1/2*a^2 + 1)'
"""
return "Partially defined quotient map from %s to an explicit vector space representation for the quotient of the ring of integers by (p,I) for the ideal I=%s." % (self.__K, self.__I)
class LiftMap:
"""
Class to hold data needed by lifting maps from residue fields to
number field orders.
"""
def __init__(self, OK, M_OK_map, Q, I):
"""
Initialize this LiftMap.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + 4)
sage: I = K.ideal(1 + a^2/2)
sage: f = I.residue_field().lift_map()
sage: f.__class__
<type 'sage.rings.finite_rings.residue_field.LiftingMap'>
"""
self.__I = I
self.__OK = OK
self.__Q = Q
self.__M_OK_map = M_OK_map
self.__Kgen = OK.number_field().absolute_generator()
def __call__(self, x):
"""
Apply this LiftMap to an element of the residue field.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + 4)
sage: R = K.ideal(1 + a^2/2).residue_field()
sage: f = R.lift_map()
sage: f(R(a/17))
1
A relative example, which used to fail but is fixed by :trac:`8721`::
sage: L.<a, b> = NumberField([x^2 + 1, x^2 - 5])
sage: p = L.ideal(2*a + 3)
sage: V, to_V, from_V = p._p_quotient(13)
sage: from_V(V.0)
(-1/2*b + 7/2)*a - 1/2*b + 3/2
"""
# This lifts to OK tensor F_p
v = self.__Q.lift(x)
# This lifts to ZZ^n (= OK)
w = v.lift()
# Write back in terms of K
z = (w * self.__M_OK_map).list()
return self.__OK(sum(z[i] * self.__Kgen ** i for i in range(len(z))))
def __repr__(self):
"""
Return a string representation of this QuotientMap.
EXAMPLES::
sage: K.<a> = NumberField(x^3 + 4)
sage: R = K.ideal(1 + a^2/2).residue_field()
sage: repr(R.lift_map())
'Lifting map:\n From: Residue field of Fractional ideal (1/2*a^2 + 1)\n To: Maximal Order in Number Field in a with defining polynomial x^3 + 4'
"""
return "Lifting map to %s from quotient of integers by %s" % (self.__OK, self.__I)
def quotient_char_p(I, p):
r"""
Given an integral ideal `I` that contains a prime number `p`, compute
a vector space `V = (O_K \mod p) / (I \mod p)`, along with a
homomorphism `O_K \to V` and a section `V \to O_K`.
EXAMPLES::
sage: from sage.rings.number_field.number_field_ideal import quotient_char_p
sage: K.<i> = NumberField(x^2 + 1); O = K.maximal_order(); I = K.fractional_ideal(15)
sage: quotient_char_p(I, 5)[0]
Vector space quotient V/W of dimension 2 over Finite Field of size 5 where
V: Vector space of dimension 2 over Finite Field of size 5
W: Vector space of degree 2 and dimension 0 over Finite Field of size 5
Basis matrix:
[]
sage: quotient_char_p(I, 3)[0]
Vector space quotient V/W of dimension 2 over Finite Field of size 3 where
V: Vector space of dimension 2 over Finite Field of size 3
W: Vector space of degree 2 and dimension 0 over Finite Field of size 3
Basis matrix:
[]
sage: I = K.factor(13)[0][0]; I
Fractional ideal (-3*i - 2)
sage: I.residue_class_degree()
1
sage: quotient_char_p(I, 13)[0]
Vector space quotient V/W of dimension 1 over Finite Field of size 13 where
V: Vector space of dimension 2 over Finite Field of size 13
W: Vector space of degree 2 and dimension 1 over Finite Field of size 13
Basis matrix:
[1 8]
"""
if not I.is_integral():
raise ValueError("I must be an integral ideal.")
K = I.number_field()
OK = K.maximal_order() # will in the long run only really need a p-maximal order.
M_OK = OK.free_module()
M_I = I.free_module()
# Now we have to quite explicitly find a way to compute
# with OK / I viewed as a quotient of two F_p vector spaces,
# and itself viewed as an F_p vector space.
# Step 1. Write each basis vector for I (as a ZZ-module)
# in terms of the basis for OK.
M_OK_mat = M_OK.basis_matrix()
M_OK_change = M_OK_mat**(-1)
B_I_in_terms_of_M = M_I.basis_matrix() * M_OK_change
# Step 2. Define "M_OK mod p" to just be (F_p)^n and
# "M_I mod p" to be the reduction mod p of the elements
# compute in step 1.
n = K.absolute_degree()
k = FiniteField(p)
M_OK_modp = k**n
B_mod = B_I_in_terms_of_M.change_ring(k)
M_I_modp = M_OK_modp.span(B_mod.row_space())
# Step 3. Compute the quotient of these two F_p vector space.
Q = M_OK_modp.quotient(M_I_modp)
# Step 4. Now we get the maps we need from the above data.
K_to_Q = QuotientMap(K, M_OK_change, Q, I)
Q_to_OK = LiftMap(OK, M_OK_mat, Q, I)
return Q, K_to_Q, Q_to_OK
| 33.818851 | 425 | 0.531385 |
443fb2c4e1cfe8dcc35fe9ae22809c35084f95ea | 11,910 | py | Python | QUANTAXIS/__init__.py | qingduyu/QUANTAXIS | 1ac7def710d5c170b4980f5926bb97816f452867 | [
"MIT"
] | 1 | 2019-01-01T14:01:29.000Z | 2019-01-01T14:01:29.000Z | QUANTAXIS/__init__.py | qingduyu/QUANTAXIS | 1ac7def710d5c170b4980f5926bb97816f452867 | [
"MIT"
] | null | null | null | QUANTAXIS/__init__.py | qingduyu/QUANTAXIS | 1ac7def710d5c170b4980f5926bb97816f452867 | [
"MIT"
] | 2 | 2018-11-30T07:52:14.000Z | 2021-05-28T23:00:20.000Z | #coding :utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
QUANTAXIS
Quantitative Financial Strategy Framework
by yutiansut
2017/4/8
"""
__version__ = '1.1.8.dev5'
__author__ = 'yutiansut'
logo = ' \n \
```````````````````````````````````````````````````````````````````````````````````````````````````````````````````````` \n \
``########`````##````````##``````````##`````````####````````##```##########````````#``````##``````###```##`````######`` \n \
`##``````## ```##````````##`````````####````````##`##```````##```````##```````````###``````##````##`````##```##`````##` \n \
##````````##```##````````##````````##`##````````##``##``````##```````##``````````####```````#```##``````##```##``````## \n \
##````````##```##````````##```````##```##```````##```##`````##```````##`````````##`##```````##`##```````##````##``````` \n \
##````````##```##````````##``````##`````##``````##````##````##```````##````````##``###```````###````````##`````##`````` \n \
##````````##```##````````##``````##``````##`````##`````##```##```````##```````##````##```````###````````##``````###```` \n \
##````````##```##````````##`````##````````##````##``````##``##```````##``````##``````##`````##`##```````##````````##``` \n \
##````````##```##````````##````#############````##```````##`##```````##`````###########`````##``##``````##`````````##`` \n \
###```````##```##````````##```##```````````##```##```````##`##```````##````##`````````##```##```##``````##```##`````##` \n \
`##``````###````##``````###``##`````````````##``##````````####```````##```##``````````##``###````##`````##````##`````## \n \
``#########``````########```##``````````````###`##``````````##```````##``##````````````##`##``````##````##`````###``### \n \
````````#####`````````````````````````````````````````````````````````````````````````````````````````````````````##`` \n \
``````````````````````````````````````````````````````````````````````````````````````````````````````````````````````` \n \
``````````````````````````Copyright``yutiansut``2018``````QUANTITATIVE FINANCIAL FRAMEWORK````````````````````````````` \n \
``````````````````````````````````````````````````````````````````````````````````````````````````````````````````````` \n \
```````````````````````````````````````````````````````````````````````````````````````````````````````````````````````` \n \
```````````````````````````````````````````````````````````````````````````````````````````````````````````````````````` \n '
# fetch methods
from QUANTAXIS.QAFetch.Fetcher import QA_quotation # 统一的获取接口
from QUANTAXIS.QAFetch import (QA_fetch_get_stock_day, QA_fetch_get_trade_date, QA_fetch_get_stock_min, QA_fetch_get_stock_xdxr,
QA_fetch_get_stock_indicator, QA_fetch_get_stock_realtime, QA_fetch_get_stock_transaction,
QA_fetch_get_index_day, QA_fetch_get_index_min, QA_fetch_get_stock_list, QA_fetch_get_stock_info,
QA_fetch_get_stock_block, QA_fetch_get_stock_transaction_realtime, QA_fetch_get_security_bars,
QA_fetch_get_future_day, QA_fetch_get_future_min, QA_fetch_get_future_list, QA_fetch_get_future_transaction,
QA_fetch_get_future_transaction_realtime, QA_fetch_get_future_realtime, QA_fetch_get_bond_list, QA_fetch_get_index_list,
QA_fetch_get_hkfund_list, QA_fetch_get_hkfund_day, QA_fetch_get_hkfund_min,
QA_fetch_get_hkindex_list, QA_fetch_get_hkindex_day, QA_fetch_get_hkindex_min,
QA_fetch_get_hkstock_list, QA_fetch_get_hkstock_day, QA_fetch_get_hkstock_min,
QA_fetch_get_usstock_list, QA_fetch_get_usstock_day, QA_fetch_get_usstock_min,
QA_fetch_get_option_list, QA_fetch_get_option_day, QA_fetch_get_option_min,
QA_fetch_get_macroindex_list, QA_fetch_get_macroindex_day, QA_fetch_get_macroindex_min,
QA_fetch_get_exchangerate_list, QA_fetch_get_exchangerate_day, QA_fetch_get_exchangerate_min,
QA_fetch_get_globalfuture_list, QA_fetch_get_globalfuture_day, QA_fetch_get_globalfuture_min)
from QUANTAXIS.QAFetch.QAQuery import (QA_fetch_trade_date, QA_fetch_account, QA_fetch_financial_report,
QA_fetch_stock_day, QA_fetch_stock_min,
QA_fetch_index_day, QA_fetch_index_min, QA_fetch_index_list,
QA_fetch_future_min, QA_fetch_future_day, QA_fetch_future_list,
QA_fetch_future_tick, QA_fetch_stock_list, QA_fetch_stock_full, QA_fetch_stock_xdxr,
QA_fetch_backtest_info, QA_fetch_backtest_history, QA_fetch_stock_block, QA_fetch_stock_info,
QA_fetch_stock_name, QA_fetch_quotation, QA_fetch_quotations)
from QUANTAXIS.QAFetch.QACrawler import QA_fetch_get_sh_margin, QA_fetch_get_sz_margin
from QUANTAXIS.QAFetch.QAQuery_Advance import *
# save
from QUANTAXIS.QASU.main import (QA_SU_save_stock_list, QA_SU_save_stock_day, QA_SU_save_index_day, QA_SU_save_index_min, QA_SU_save_stock_info_tushare,
QA_SU_save_stock_min, QA_SU_save_stock_xdxr, QA_SU_save_stock_info, QA_SU_save_stock_min_5, QA_SU_save_index_list, QA_SU_save_future_list,
QA_SU_save_stock_block, QA_SU_save_etf_day, QA_SU_save_etf_min, QA_SU_save_financialfiles)
# from QUANTAXIS.QASU.save_backtest import (
# QA_SU_save_account_message, QA_SU_save_backtest_message, QA_SU_save_account_to_csv)
from QUANTAXIS.QASU.user import (QA_user_sign_in, QA_user_sign_up)
from QUANTAXIS.QASU.save_strategy import QA_SU_save_strategy
# event driver
# market
from QUANTAXIS.QAMarket import (QA_Order, QA_OrderQueue, QA_OrderHandler,
QA_Market, QA_Dealer,
QA_RandomBroker, QA_SimulatedBroker, QA_RealBroker, QA_BacktestBroker)
# Account,Risk,Portfolio,User,Strategy
from QUANTAXIS.QAARP.QAAccount import QA_Account
from QUANTAXIS.QAARP.QAPortfolio import QA_Portfolio, QA_PortfolioView
from QUANTAXIS.QAARP.QARisk import QA_Performance, QA_Risk
from QUANTAXIS.QAARP.QAUser import QA_User
from QUANTAXIS.QAARP.QAStrategy import QA_Strategy
# Backtest
from QUANTAXIS.QAApplication.QABacktest import QA_Backtest
from QUANTAXIS.QAApplication.QAAnalysis import QA_backtest_analysis_backtest
from QUANTAXIS.QAApplication.QAResult import backtest_result_analyzer
# ENGINE
from QUANTAXIS.QAEngine import QA_Thread, QA_Event, QA_Worker, QA_Task, QA_Engine
# Data
from QUANTAXIS.QAData import (QA_data_tick_resample, QA_data_day_resample, QA_data_min_resample, QA_data_calc_marketvalue, QA_data_marketvalue,
QA_data_make_qfq, QA_data_stock_to_fq, QA_data_make_hfq,
QA_DataStruct_Stock_day, QA_DataStruct_Stock_min,
QA_DataStruct_Future_day, QA_DataStruct_Future_min,
QA_DataStruct_Index_day, QA_DataStruct_Index_min, QA_DataStruct_Indicators, QA_DataStruct_Stock_realtime,
QA_DataStruct_Stock_transaction, QA_DataStruct_Stock_block, QA_DataStruct_Series, QA_DataStruct_Financial,
from_tushare, QDS_StockMinWarpper, QDS_StockDayWarpper, QDS_IndexDayWarpper, QDS_IndexMinWarpper)
from QUANTAXIS.QAData.dsmethods import *
# Analysis
from QUANTAXIS.QAAnalysis import *
# Setting
from QUANTAXIS.QASetting.QALocalize import qa_path, setting_path, cache_path, download_path, log_path
# Util
from QUANTAXIS.QAUtil import (QA_util_date_stamp, QA_util_time_stamp, QA_util_ms_stamp, QA_util_date_valid, QA_util_calc_time,
QA_util_realtime, QA_util_id2date, QA_util_is_trade, QA_util_get_date_index, QA_util_get_last_day, QA_util_get_next_day, QA_util_get_order_datetime, QA_util_get_trade_datetime,
QA_util_get_index_date, QA_util_select_hours, QA_util_date_gap, QA_util_time_gap, QA_util_get_last_datetime, QA_util_get_next_datetime,
QA_util_select_min, QA_util_time_delay, QA_util_time_now, QA_util_date_str2int,
QA_util_date_int2str, QA_util_date_today, QA_util_to_datetime,
QA_util_sql_mongo_setting, QA_util_sql_async_mongo_setting, QA_util_sql_mongo_sort_ASCENDING, QA_util_sql_mongo_sort_DESCENDING,
QA_util_log_debug, QA_util_log_expection, QA_util_log_info,
QA_util_cfg_initial, QA_util_get_cfg,
QA_Setting, DATABASE, info_ip_list, stock_ip_list, future_ip_list,
QA_util_web_ping, QA_util_send_mail,
trade_date_sse, QA_util_if_trade, QA_util_if_tradetime,
QA_util_get_real_datelist, QA_util_get_real_date,
QA_util_get_trade_range, QA_util_get_trade_gap,
QA_util_save_csv, QA_util_code_tostr, QA_util_code_tolist,
QA_util_dict_remove_key,
QA_util_multi_demension_list, QA_util_diff_list,
QA_util_to_json_from_pandas, QA_util_to_list_from_numpy, QA_util_to_list_from_pandas, QA_util_to_pandas_from_json, QA_util_to_pandas_from_list,
QA_util_mongo_initial, QA_util_mongo_status, QA_util_mongo_infos,
QA_util_make_min_index, QA_util_make_hour_index,
QA_util_random_with_topic, QA_util_file_md5,
MARKET_TYPE, ORDER_STATUS, TRADE_STATUS, MARKET_ERROR, AMOUNT_MODEL, ORDER_DIRECTION, ORDER_MODEL, ORDER_EVENT,
MARKET_EVENT, ENGINE_EVENT, RUNNING_ENVIRONMENT, FREQUENCE, BROKER_EVENT, BROKER_TYPE, DATASOURCE, OUTPUT_FORMAT) # QAPARAMETER
from QUANTAXIS.QAIndicator import *
#from QUANTAXIS.QAFetch.QATdx_adv import bat
from QUANTAXIS.QAWeb import SigninHandler, SignupHandler, SimulateSocketHandler, StockdayHandler, StockminHandler, RealtimeSocketHandler, QABaseHandler, QAWebSocketHandler
from QUANTAXIS.QAWeb.QA_Web import main
# CMD and Cli
import QUANTAXIS.QACmd
from QUANTAXIS.QACmd import QA_cmd
import argparse
# check
import sys
if sys.version_info.major != 3 or sys.version_info.minor not in [4, 5, 6, 7, 8]:
print('wrong version, should be 3.4/3.5/3.6/3.7/3.8 version')
sys.exit()
#QA_util_log_info('Welcome to QUANTAXIS, the Version is {}'.format(__version__))
# QA_util_log_info(logo)
| 65.801105 | 206 | 0.590512 |
0ba75752f6302b4ac69edd74e3a2785329277326 | 4,056 | py | Python | skimage/io/tests/test_mpl_imshow.py | jjhelmus/scikit-image | b9b5fde0821fe8bcece2528b30d012c65c64ad6f | [
"BSD-3-Clause"
] | 2 | 2017-03-30T11:22:11.000Z | 2019-03-03T05:18:01.000Z | skimage/io/tests/test_mpl_imshow.py | jjhelmus/scikit-image | b9b5fde0821fe8bcece2528b30d012c65c64ad6f | [
"BSD-3-Clause"
] | null | null | null | skimage/io/tests/test_mpl_imshow.py | jjhelmus/scikit-image | b9b5fde0821fe8bcece2528b30d012c65c64ad6f | [
"BSD-3-Clause"
] | 1 | 2019-12-17T14:53:28.000Z | 2019-12-17T14:53:28.000Z | from __future__ import division
import numpy as np
from skimage import io
from skimage._shared._warnings import expected_warnings
import matplotlib.pyplot as plt
def setup():
io.reset_plugins()
# test images. Note that they don't have their full range for their dtype,
# but we still expect the display range to equal the full dtype range.
im8 = np.array([[0, 64], [128, 240]], np.uint8)
im16 = im8.astype(np.uint16) * 256
im64 = im8.astype(np.uint64)
imf = im8 / 255
im_lo = imf / 1000
im_hi = imf + 10
def n_subplots(ax_im):
"""Return the number of subplots in the figure containing an ``AxesImage``.
Parameters
----------
ax_im : matplotlib.pyplot.AxesImage object
The input ``AxesImage``.
Returns
-------
n : int
The number of subplots in the corresponding figure.
Notes
-----
This function is intended to check whether a colorbar was drawn, in
which case two subplots are expected. For standard imshows, one
subplot is expected.
"""
return len(ax_im.get_figure().get_axes())
def test_uint8():
plt.figure()
with expected_warnings(["tight_layout : Falling back to Agg|\A\Z",
"CObject type is marked|\A\Z"]):
ax_im = io.imshow(im8)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 255)
assert n_subplots(ax_im) == 1
assert ax_im.colorbar is None
def test_uint16():
plt.figure()
with expected_warnings(["tight_layout : Falling back to Agg|\A\Z",
"CObject type is marked|\A\Z"]):
ax_im = io.imshow(im16)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 65535)
assert n_subplots(ax_im) == 1
assert ax_im.colorbar is None
def test_float():
plt.figure()
with expected_warnings(["tight_layout : Falling back to Agg|\A\Z",
"CObject type is marked|\A\Z"]):
ax_im = io.imshow(imf)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 1)
assert n_subplots(ax_im) == 1
assert ax_im.colorbar is None
def test_low_dynamic_range():
with expected_warnings(["Low image dynamic range|CObject type is marked",
"tight_layout : Falling back to Agg|\A\Z"]):
ax_im = io.imshow(im_lo)
assert ax_im.get_clim() == (im_lo.min(), im_lo.max())
# check that a colorbar was created
assert ax_im.colorbar is not None
def test_outside_standard_range():
plt.figure()
# Warning raised by matplotlib on Windows:
# "The CObject type is marked Pending Deprecation in Python 2.7.
# Please use capsule objects instead."
# Ref: https://docs.python.org/2/c-api/cobject.html
with expected_warnings(["out of standard range|CObject type is marked",
"tight_layout : Falling back to Agg|\A\Z"]):
ax_im = io.imshow(im_hi)
assert ax_im.get_clim() == (im_hi.min(), im_hi.max())
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
def test_nonstandard_type():
plt.figure()
# Warning raised by matplotlib on Windows:
# "The CObject type is marked Pending Deprecation in Python 2.7.
# Please use capsule objects instead."
# Ref: https://docs.python.org/2/c-api/cobject.html
with expected_warnings(["Low image dynamic range|CObject type is marked",
"tight_layout : Falling back to Agg|\A\Z"]):
ax_im = io.imshow(im64)
assert ax_im.get_clim() == (im64.min(), im64.max())
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
def test_signed_image():
plt.figure()
im_signed = np.array([[-0.5, -0.2], [0.1, 0.4]])
with expected_warnings(["tight_layout : Falling back to Agg|\A\Z",
"CObject type is marked|\A\Z"]):
ax_im = io.imshow(im_signed)
assert ax_im.get_clim() == (-0.5, 0.5)
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
if __name__ == '__main__':
np.testing.run_module_suite()
| 31.6875 | 79 | 0.636588 |
0cddb725f46c96a7220d36b3440adef3382d045e | 288 | py | Python | examples/2.py | CBORT-NCBIB/oct-cbort | 7f2bc525bb3f5b3bcf2e41622129c87ee710161a | [
"MIT"
] | 2 | 2021-12-16T00:03:19.000Z | 2022-02-21T10:58:39.000Z | examples/2.py | CBORT-NCBIB/oct-cbort | 7f2bc525bb3f5b3bcf2e41622129c87ee710161a | [
"MIT"
] | null | null | null | examples/2.py | CBORT-NCBIB/oct-cbort | 7f2bc525bb3f5b3bcf2e41622129c87ee710161a | [
"MIT"
] | 2 | 2021-11-19T02:32:50.000Z | 2021-12-16T00:05:43.000Z | import os
if os.name == 'nt':
os.system("python -m oct examples//data//2_BL_Catheter1_rat_clot_ramp_struct_ps tomo+struct+ps+proj mgh 1")
elif os.name == 'posix':
os.system("python -m oct examples//data//2_BL_Catheter1_rat_clot_ramp_struct_ps tomo+struct+angio+ps+proj mgh 1")
| 32 | 117 | 0.743056 |
db30e9bf5ca81cd6a83d6eafb4142c9bb37a6c4e | 1,815 | py | Python | test_code/grader_relu.py | pl80tech/CarND-Traffic-Sign-Classifier-Project | aac1de9d2d24cb0b4280c7733579a101a13ee2fa | [
"MIT"
] | 2 | 2018-09-24T04:43:31.000Z | 2018-09-24T04:44:13.000Z | test_code/grader_relu.py | pl80tech/CarND-Traffic-Sign-Classifier-Project | aac1de9d2d24cb0b4280c7733579a101a13ee2fa | [
"MIT"
] | null | null | null | test_code/grader_relu.py | pl80tech/CarND-Traffic-Sign-Classifier-Project | aac1de9d2d24cb0b4280c7733579a101a13ee2fa | [
"MIT"
] | 1 | 2018-10-11T05:16:47.000Z | 2018-10-11T05:16:47.000Z | import numpy as np
from tensorflow.python.framework.errors import FailedPreconditionError
import re
def get_result(output):
"""
Run tests
"""
answer = np.array([
[5.11000013, 8.44000053],
[0., 0.],
[24.01000214, 38.23999786]])
result = {
'correct': False,
'feedback': 'That\'s the wrong answer. It should print {answer}',
'comment': ''}
output_shape = np.shape(output)
answer_shape = np.shape(answer)
if output_shape != answer_shape:
result['feedback'] = 'Output is the wrong type or wrong dimension.'
result['comment'] = 'Output shape is {output_shape}, answer shape is {answer_shape})'
elif (0 > output).sum():
result['feedback'] = 'Output contains negative numbers.'
result['comment'] = 'Are you applying ReLU to hidden_layer?'
else:
if np.allclose(output, answer):
result['correct'] = True
result['feedback'] = 'You got it! That\'s how you use a ReLU.'
return result
def run_grader(output):
if not np.any(output):
print("Don't forget to complete all tasks and name your session variable output")
else:
try:
# Get grade result information
result = get_result(output)
except Exception as err:
# Default error result
result = {
'correct': False,
'feedback': 'Something went wrong with your submission:',
'comment': str(err)}
feedback = result.get('feedback')
comment = result.get('comment')
#print("{feedback}\n{comment}\n")
print(feedback)
print(comment)
if __name__ == "__main__":
run_grader(output) | 26.691176 | 93 | 0.563636 |
04c71d4e40c2ded8265246f75fda1105eccae0f1 | 4,379 | py | Python | test/functional/wallet_keypool_topup.py | tokyocoin-project/tokyocoin | 48cbabf73f0687cc04b6658cf69aba65aa1b997d | [
"MIT"
] | null | null | null | test/functional/wallet_keypool_topup.py | tokyocoin-project/tokyocoin | 48cbabf73f0687cc04b6658cf69aba65aa1b997d | [
"MIT"
] | null | null | null | test/functional/wallet_keypool_topup.py | tokyocoin-project/tokyocoin | 48cbabf73f0687cc04b6658cf69aba65aa1b997d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Tokyocoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test HD Wallet keypool restore function.
Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks.
- Start node1, shutdown and backup wallet.
- Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110.
- Stop node1, clear the datadir, move wallet file back into the datadir and restart node1.
- connect node1 to node0. Verify that they sync and node1 receives its funds."""
import os
import shutil
from test_framework.test_framework import TokyocoinTestFramework
from test_framework.util import (
assert_equal,
)
class KeypoolRestoreTest(TokyocoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [[], ['-keypool=100'], ['-keypool=100'], ['-keypool=100']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
wallet_path = os.path.join(self.nodes[1].datadir, self.chain, "wallets", self.default_wallet_name, self.wallet_data_filename)
wallet_backup_path = os.path.join(self.nodes[1].datadir, "wallet.bak")
self.nodes[0].generate(101)
self.log.info("Make backup of wallet")
self.stop_node(1)
shutil.copyfile(wallet_path, wallet_backup_path)
self.start_node(1, self.extra_args[1])
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
self.connect_nodes(0, 3)
for i, output_type in enumerate(["legacy", "p2sh-segwit", "bech32"]):
self.log.info("Generate keys for wallet with address type: {}".format(output_type))
idx = i+1
for _ in range(90):
addr_oldpool = self.nodes[idx].getnewaddress(address_type=output_type)
for _ in range(20):
addr_extpool = self.nodes[idx].getnewaddress(address_type=output_type)
# Make sure we're creating the outputs we expect
address_details = self.nodes[idx].validateaddress(addr_extpool)
if i == 0:
assert not address_details["isscript"] and not address_details["iswitness"]
elif i == 1:
assert address_details["isscript"] and not address_details["iswitness"]
else:
assert not address_details["isscript"] and address_details["iswitness"]
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(addr_extpool, 5)
self.nodes[0].generate(1)
self.sync_blocks()
self.log.info("Restart node with wallet backup")
self.stop_node(idx)
shutil.copyfile(wallet_backup_path, wallet_path)
self.start_node(idx, self.extra_args[idx])
self.connect_nodes(0, idx)
self.sync_all()
self.log.info("Verify keypool is restored and balance is correct")
assert_equal(self.nodes[idx].getbalance(), 15)
assert_equal(self.nodes[idx].listtransactions()[0]['category'], "receive")
# Check that we have marked all keys up to the used keypool key as used
if self.options.descriptors:
if output_type == 'legacy':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/44'/1'/0'/0/110")
elif output_type == 'p2sh-segwit':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/49'/1'/0'/0/110")
elif output_type == 'bech32':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/84'/1'/0'/0/110")
else:
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/0'/0'/110'")
if __name__ == '__main__':
KeypoolRestoreTest().main()
| 46.585106 | 164 | 0.654259 |
5c54627a793c4e208697803920447312b8b56a56 | 6,092 | py | Python | model/g_p.py | MatthewAbugeja/lmbn | 6e18e752f95614ef37d5a767672cb81e82708d94 | [
"MIT"
] | 45 | 2021-01-25T15:42:59.000Z | 2022-03-22T07:08:40.000Z | model/g_p.py | MatthewAbugeja/lmbn | 6e18e752f95614ef37d5a767672cb81e82708d94 | [
"MIT"
] | 12 | 2021-01-29T05:00:35.000Z | 2022-02-11T01:12:00.000Z | model/g_p.py | MatthewAbugeja/lmbn | 6e18e752f95614ef37d5a767672cb81e82708d94 | [
"MIT"
] | 13 | 2021-01-27T06:38:32.000Z | 2022-03-06T05:35:28.000Z | import copy
import torch
from torch import nn
import torch.nn.functional as F
import random
import math
from .osnet import osnet_x1_0, OSBlock
from .attention import BatchDrop, BatchRandomErasing, PAM_Module, CAM_Module, SE_Module, Dual_Module
from .bnneck import BNNeck, BNNeck3
from torch.autograd import Variable
class G_P(nn.Module):
def __init__(self, args):
super(G_P, self).__init__()
osnet = osnet_x1_0(pretrained=True)
self.backone = nn.Sequential(
osnet.conv1,
osnet.maxpool,
osnet.conv2,
osnet.conv3[0]
)
conv3 = osnet.conv3[1:]
# downsample_conv4 = osnet._make_layer(OSBlock, 2, 384, 512, True)
# downsample_conv4[:2].load_state_dict(osnet.conv4[:2].state_dict())
self.global_branch = nn.Sequential(copy.deepcopy(
conv3), copy.deepcopy(osnet.conv4), copy.deepcopy(osnet.conv5))
self.partial_branch = nn.Sequential(copy.deepcopy(
conv3), copy.deepcopy(osnet.conv4), copy.deepcopy(osnet.conv5))
# self.channel_branch = nn.Sequential(copy.deepcopy(
# conv3), copy.deepcopy(osnet.conv4), copy.deepcopy(osnet.conv5))
# if args.pool == 'max':
# pool2d = nn.AdaptiveMaxPool2d
# elif args.pool == 'avg':
# pool2d = nn.AdaptiveAvgPool2d
# else:
# raise Exception()
self.global_pooling = nn.AdaptiveMaxPool2d((1, 1))
self.partial_pooling = nn.AdaptiveAvgPool2d((2, 1))
# self.channel_pooling = nn.AdaptiveAvgPool2d((1, 1))
reduction = BNNeck3(512, args.num_classes,
args.feats, return_f=True)
self.reduction_0 = copy.deepcopy(reduction)
self.reduction_1 = copy.deepcopy(reduction)
self.reduction_2 = copy.deepcopy(reduction)
self.reduction_3 = copy.deepcopy(reduction)
# self.shared = nn.Sequential(nn.Conv2d(
# self.chs, args.feats, 1, bias=False), nn.BatchNorm2d(args.feats), nn.ReLU(True))
# self.weights_init_kaiming(self.shared)
# self.reduction_ch_0 = BNNeck(
# args.feats, args.num_classes, return_f=True)
# self.reduction_ch_1 = BNNeck(
# args.feats, args.num_classes, return_f=True)
# if args.drop_block:
# print('Using batch random erasing block.')
# self.batch_drop_block = BatchRandomErasing()
if args.drop_block:
print('Using batch drop block.')
self.batch_drop_block = BatchDrop(
h_ratio=args.h_ratio, w_ratio=args.w_ratio)
else:
self.batch_drop_block = None
self.activation_map = args.activation_map
def forward(self, x):
# if self.batch_drop_block is not None:
# x = self.batch_drop_block(x)
x = self.backone(x)
glo = self.global_branch(x)
par = self.partial_branch(x)
# cha = self.channel_branch(x)
# if self.activation_map:
# _, _, h_par, _ = par.size()
# fmap_p0 = par[:, :, :h_par // 2, :]
# fmap_p1 = par[:, :, h_par // 2:, :]
# fmap_c0 = cha[:, :self.chs, :, :]
# fmap_c1 = cha[:, self.chs:, :, :]
# print('activation_map')
# return glo, fmap_c0, fmap_c1, fmap_p0, fmap_p1
if self.batch_drop_block is not None:
glo = self.batch_drop_block(glo)
glo = self.global_pooling(glo) # shape:(batchsize, 2048,1,1)
g_par = self.global_pooling(par) # shape:(batchsize, 2048,1,1)
p_par = self.partial_pooling(par) # shape:(batchsize, 2048,3,1)
# cha = self.channel_pooling(cha)
p0 = p_par[:, :, 0:1, :]
p1 = p_par[:, :, 1:2, :]
f_glo = self.reduction_0(glo)
f_p0 = self.reduction_1(g_par)
f_p1 = self.reduction_2(p0)
f_p2 = self.reduction_3(p1)
################
# c0 = cha[:, :self.chs, :, :]
# c1 = cha[:, self.chs:, :, :]
# c0 = self.shared(c0)
# c1 = self.shared(c1)
# f_c0 = self.reduction_ch_0(c0)
# f_c1 = self.reduction_ch_1(c1)
################
fea = [f_glo[-1], f_p0[-1]]
if not self.training:
return torch.stack([f_glo[0], f_p0[0], f_p1[0], f_p2[0]], dim=2)
return [f_glo[1], f_p0[1], f_p1[1], f_p2[1]], fea
def weights_init_kaiming(self, m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
if __name__ == '__main__':
# Here I left a simple forward function.
# Test the model, before you train it.
import argparse
parser = argparse.ArgumentParser(description='MGN')
parser.add_argument('--num_classes', type=int, default=751, help='')
parser.add_argument('--bnneck', type=bool, default=True)
parser.add_argument('--pool', type=str, default='max')
parser.add_argument('--feats', type=int, default=512)
parser.add_argument('--drop_block', type=bool, default=True)
parser.add_argument('--w_ratio', type=float, default=1.0, help='')
args = parser.parse_args()
net = MCMP_n(args)
# net.classifier = nn.Sequential()
# print([p for p in net.parameters()])
# a=filter(lambda p: p.requires_grad, net.parameters())
# print(a)
print(net)
input = Variable(torch.FloatTensor(8, 3, 384, 128))
net.eval()
output = net(input)
print(output.shape)
print('net output size:')
# print(len(output))
# for k in output[0]:
# print(k.shape)
# for k in output[1]:
# print(k.shape)
| 33.108696 | 100 | 0.584537 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.