repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
neuropoly/spinalcordtoolbox | spinalcordtoolbox/scripts/sct_dmri_separate_b0_and_dwi.py | 1 | 10782 | #!/usr/bin/env python
#########################################################################################
#
# Separate b=0 and DW images from diffusion dataset.
#
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Julien Cohen-Adad
# Modified: 2014-08-14
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import sys
import math
import time
import os
import numpy as np
from spinalcordtoolbox.image import Image, generate_output_file, convert
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar, ActionCreateFolder
from spinalcordtoolbox.utils.sys import init_sct, printv, set_loglevel
from spinalcordtoolbox.utils.fs import tmp_create, copy, extract_fname, rmtree
from spinalcordtoolbox.scripts.sct_image import split_data, concat_data
class Param:
def __init__(self):
self.debug = 0
self.average = 1
self.remove_temp_files = 1
self.verbose = 1
self.bval_min = 100 # in case user does not have min bvalues at 0, set threshold.
def get_parser():
param_default = Param()
parser = SCTArgumentParser(
description="Separate b=0 and DW images from diffusion dataset. The output files will have a suffix "
"(_b0 and _dwi) appended to the input file name."
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Diffusion data. Example: dmri.nii.gz"
)
mandatory.add_argument(
'-bvec',
metavar=Metavar.file,
required=True,
help="Bvecs file. Example: bvecs.txt"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-a',
type=int,
choices=(0, 1),
default=param_default.average,
help="Average b=0 and DWI data. 0 = no, 1 = yes"
)
optional.add_argument(
'-bval',
metavar=Metavar.file,
default="",
help='bvals file. Used to identify low b-values (in case different from 0). Example: bvals.txt',
)
optional.add_argument(
'-bvalmin',
type=float,
metavar=Metavar.float,
help='B-value threshold (in s/mm2) below which data is considered as b=0. Example: 50.0',
)
optional.add_argument(
'-ofolder',
metavar=Metavar.folder,
action=ActionCreateFolder,
default='./',
help='Output folder. Example: dmri_separate_results/',
)
optional.add_argument(
"-r",
choices=('0', '1'),
default=param_default.remove_temp_files,
help="Remove temporary files. 0 = no, 1 = yes"
)
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode")
return parser
# MAIN
# ==========================================================================================
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
# initialize parameters
param = Param()
fname_data = arguments.i
fname_bvecs = arguments.bvec
average = arguments.a
remove_temp_files = arguments.r
path_out = arguments.ofolder
fname_bvals = arguments.bval
if arguments.bvalmin:
param.bval_min = arguments.bvalmin
# Initialization
start_time = time.time()
# printv(arguments)
printv('\nInput parameters:', verbose)
printv(' input file ............' + fname_data, verbose)
printv(' bvecs file ............' + fname_bvecs, verbose)
printv(' bvals file ............' + fname_bvals, verbose)
printv(' average ...............' + str(average), verbose)
# Get full path
fname_data = os.path.abspath(fname_data)
fname_bvecs = os.path.abspath(fname_bvecs)
if fname_bvals:
fname_bvals = os.path.abspath(fname_bvals)
# Extract path, file and extension
path_data, file_data, ext_data = extract_fname(fname_data)
# create temporary folder
path_tmp = tmp_create(basename="dmri_separate")
# copy files into tmp folder and convert to nifti
printv('\nCopy files into temporary folder...', verbose)
ext = '.nii'
dmri_name = 'dmri'
b0_name = file_data + '_b0'
b0_mean_name = b0_name + '_mean'
dwi_name = file_data + '_dwi'
dwi_mean_name = dwi_name + '_mean'
im_dmri = convert(Image(fname_data))
im_dmri.save(os.path.join(path_tmp, dmri_name + ext), mutable=True, verbose=verbose)
copy(fname_bvecs, os.path.join(path_tmp, "bvecs"), verbose=verbose)
# go to tmp folder
curdir = os.getcwd()
os.chdir(path_tmp)
# Get size of data
printv('\nGet dimensions data...', verbose)
nx, ny, nz, nt, px, py, pz, pt = im_dmri.dim
printv('.. ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz) + ' x ' + str(nt), verbose)
# Identify b=0 and DWI images
printv(fname_bvals)
index_b0, index_dwi, nb_b0, nb_dwi = identify_b0(fname_bvecs, fname_bvals, param.bval_min, verbose)
# Split into T dimension
printv('\nSplit along T dimension...', verbose)
im_dmri_split_list = split_data(im_dmri, 3)
for im_d in im_dmri_split_list:
im_d.save()
# Merge b=0 images
printv('\nMerge b=0...', verbose)
fname_in_list_b0 = []
for it in range(nb_b0):
fname_in_list_b0.append(dmri_name + '_T' + str(index_b0[it]).zfill(4) + ext)
im_in_list_b0 = [Image(fname) for fname in fname_in_list_b0]
concat_data(im_in_list_b0, 3).save(b0_name + ext)
# Average b=0 images
if average:
printv('\nAverage b=0...', verbose)
img = Image(b0_name + ext)
out = img.copy()
dim_idx = 3
if len(np.shape(img.data)) < dim_idx + 1:
raise ValueError("Expecting image with 4 dimensions!")
out.data = np.mean(out.data, dim_idx)
out.save(path=b0_mean_name + ext)
# Merge DWI
fname_in_list_dwi = []
for it in range(nb_dwi):
fname_in_list_dwi.append(dmri_name + '_T' + str(index_dwi[it]).zfill(4) + ext)
im_in_list_dwi = [Image(fname) for fname in fname_in_list_dwi]
concat_data(im_in_list_dwi, 3).save(dwi_name + ext)
# Average DWI images
if average:
printv('\nAverage DWI...', verbose)
img = Image(dwi_name + ext)
out = img.copy()
dim_idx = 3
if len(np.shape(img.data)) < dim_idx + 1:
raise ValueError("Expecting image with 4 dimensions!")
out.data = np.mean(out.data, dim_idx)
out.save(path=dwi_mean_name + ext)
# come back
os.chdir(curdir)
# Generate output files
fname_b0 = os.path.abspath(os.path.join(path_out, b0_name + ext_data))
fname_dwi = os.path.abspath(os.path.join(path_out, dwi_name + ext_data))
fname_b0_mean = os.path.abspath(os.path.join(path_out, b0_mean_name + ext_data))
fname_dwi_mean = os.path.abspath(os.path.join(path_out, dwi_mean_name + ext_data))
printv('\nGenerate output files...', verbose)
generate_output_file(os.path.join(path_tmp, b0_name + ext), fname_b0, verbose=verbose)
generate_output_file(os.path.join(path_tmp, dwi_name + ext), fname_dwi, verbose=verbose)
if average:
generate_output_file(os.path.join(path_tmp, b0_mean_name + ext), fname_b0_mean, verbose=verbose)
generate_output_file(os.path.join(path_tmp, dwi_mean_name + ext), fname_dwi_mean, verbose=verbose)
# Remove temporary files
if remove_temp_files == 1:
printv('\nRemove temporary files...', verbose)
rmtree(path_tmp, verbose=verbose)
# display elapsed time
elapsed_time = time.time() - start_time
printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's', verbose)
return fname_b0, fname_b0_mean, fname_dwi, fname_dwi_mean
# ==========================================================================================
# identify b=0 and DW images
# ==========================================================================================
def identify_b0(fname_bvecs, fname_bvals, bval_min, verbose):
# Identify b=0 and DWI images
printv('\nIdentify b=0 and DWI images...', verbose)
index_b0 = []
index_dwi = []
# if bval is not provided
if not fname_bvals:
# Open bvecs file
bvecs = []
with open(fname_bvecs) as f:
for line in f:
bvecs_new = [x for x in map(float, line.split())]
bvecs.append(bvecs_new)
# Check if bvecs file is nx3
if not len(bvecs[0][:]) == 3:
printv(' WARNING: bvecs file is 3xn instead of nx3. Consider using sct_dmri_transpose_bvecs.', verbose, 'warning')
printv(' Transpose bvecs...', verbose)
# transpose bvecs
bvecs = list(zip(*bvecs))
# get number of lines
nt = len(bvecs)
# identify b=0 and dwi
for it in range(0, nt):
if math.sqrt(math.fsum([i**2 for i in bvecs[it]])) < 0.01:
index_b0.append(it)
else:
index_dwi.append(it)
# if bval is provided
else:
# Open bvals file
from dipy.io import read_bvals_bvecs
bvals, bvecs = read_bvals_bvecs(fname_bvals, fname_bvecs)
# get number of lines
nt = len(bvals)
# Identify b=0 and DWI images
printv('\nIdentify b=0 and DWI images...', verbose)
for it in range(0, nt):
if bvals[it] < bval_min:
index_b0.append(it)
else:
index_dwi.append(it)
# check if no b=0 images were detected
if index_b0 == []:
printv('ERROR: no b=0 images detected. Maybe you are using non-null low bvals? in that case use flag -bvalmin. Exit program.', 1, 'error')
sys.exit(2)
# display stuff
nb_b0 = len(index_b0)
nb_dwi = len(index_dwi)
printv(' Number of b=0: ' + str(nb_b0) + ' ' + str(index_b0), verbose)
printv(' Number of DWI: ' + str(nb_dwi) + ' ' + str(index_dwi), verbose)
# return
return index_b0, index_dwi, nb_b0, nb_dwi
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
| mit | 082f7e04b83c743f76c5e49115308058 | 32.69375 | 146 | 0.572436 | 3.352612 | false | false | false | false |
neuropoly/spinalcordtoolbox | spinalcordtoolbox/scripts/sct_resample.py | 1 | 5075 | #!/usr/bin/env python
#########################################################################################
#
# Resample data using nibabel.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Julien Cohen-Adad, Sara Dupont
#
# About the license: see the file LICENSE.TXT
#########################################################################################
# TODO: add possiblity to resample to destination image
import os
import sys
from spinalcordtoolbox.utils import SCTArgumentParser, Metavar, init_sct, printv, set_loglevel
import spinalcordtoolbox.resampling
class Param:
# The constructor
def __init__(self):
self.fname_data = ''
self.fname_out = ''
self.new_size = ''
self.new_size_type = ''
self.interpolation = 'linear'
self.ref = None
self.x_to_order = {'nn': 0, 'linear': 1, 'spline': 2}
self.mode = 'reflect' # How to fill the points outside the boundaries of the input, possible options: constant, nearest, reflect or wrap
# constant put the superior edges to 0, wrap does something weird with the superior edges, nearest and reflect are fine
self.file_suffix = '_resampled' # output suffix
self.verbose = 1
# initialize parameters
param = Param()
def get_parser():
parser = SCTArgumentParser(
description="Anisotropic resampling of 3D or 4D data."
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Image to segment. Can be 3D or 4D. (Cannot be 2D) Example: dwi.nii.gz"
)
resample_types = parser.add_argument_group(
"\nTYPE OF THE NEW SIZE INPUT: with a factor of resampling, in mm or in number of voxels\n"
"Please choose only one of the 3 options"
)
resample_types.add_argument(
'-f',
metavar=Metavar.str,
help="R|Resampling factor in each dimensions (x,y,z). Separate with 'x'. Example: 0.5x0.5x1\n"
"For 2x upsampling, set to 2. For 2x downsampling set to 0.5"
)
resample_types.add_argument(
'-mm',
metavar=Metavar.str,
help="New resolution in mm. Separate dimension with 'x'. Example: 0.1x0.1x5"
)
resample_types.add_argument(
'-vox',
metavar=Metavar.str,
help="Resampling size in number of voxels in each dimensions (x,y,z). Separate with 'x'."
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-ref',
metavar=Metavar.file,
help="Reference image to resample input image to. Uses world coordinates."
)
optional.add_argument(
'-x',
choices=['nn', 'linear', 'spline'],
default='linear',
help="Interpolation method."
)
optional.add_argument(
'-o',
metavar=Metavar.file,
help="Output file name. Example: dwi_resampled.nii.gz"
)
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode"
)
return parser
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
param.fname_data = arguments.i
arg = 0
if arguments.f is not None:
param.new_size = arguments.f
param.new_size_type = 'factor'
arg += 1
elif arguments.mm is not None:
param.new_size = arguments.mm
param.new_size_type = 'mm'
arg += 1
elif arguments.vox is not None:
param.new_size = arguments.vox
param.new_size_type = 'vox'
arg += 1
elif arguments.ref is not None:
param.ref = arguments.ref
arg += 1
else:
printv(parser.error('ERROR: you need to specify one of those three arguments : -f, -mm or -vox'))
if arg > 1:
printv(parser.error('ERROR: you need to specify ONLY one of those three arguments : -f, -mm or -vox'))
if arguments.o is not None:
param.fname_out = arguments.o
if arguments.x is not None:
if len(arguments.x) == 1:
param.interpolation = int(arguments.x)
else:
param.interpolation = arguments.x
spinalcordtoolbox.resampling.resample_file(param.fname_data, param.fname_out, param.new_size, param.new_size_type,
param.interpolation, param.verbose, fname_ref=param.ref)
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
| mit | 2fa202c86dbe05da99dfdb74ef6a4657 | 31.532051 | 145 | 0.579507 | 3.745387 | false | false | false | false |
neuropoly/spinalcordtoolbox | spinalcordtoolbox/scripts/sct_register_to_template.py | 1 | 64537 | #!/usr/bin/env python
#########################################################################################
#
# Register anatomical image to the template using the spinal cord centerline/segmentation.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Benjamin De Leener, Julien Cohen-Adad, Augustin Roux
#
# About the license: see the file LICENSE.TXT
#########################################################################################
# TODO: for -ref subject, crop data, otherwise registration is too long
# TODO: testing script for all cases
# TODO: enable vertebral alignment with -ref subject
import sys
import os
import time
import numpy as np
from spinalcordtoolbox.metadata import get_file_label
from spinalcordtoolbox.image import Image, add_suffix, generate_output_file, concat_warp2d
from spinalcordtoolbox.centerline.core import ParamCenterline, get_centerline
from spinalcordtoolbox.reports.qc import generate_qc
from spinalcordtoolbox.resampling import resample_file
from spinalcordtoolbox.math import dilate, binarize
from spinalcordtoolbox.registration.register import *
from spinalcordtoolbox.registration.landmarks import *
from spinalcordtoolbox.types import Coordinate
from spinalcordtoolbox.utils.fs import (copy, extract_fname, check_file_exist, rmtree,
cache_save, cache_signature, cache_valid)
from spinalcordtoolbox.utils.shell import (SCTArgumentParser, ActionCreateFolder, Metavar, list_type,
printv, display_viewer_syntax)
from spinalcordtoolbox.utils.sys import set_loglevel, init_sct
from spinalcordtoolbox import __data_dir__
import spinalcordtoolbox.image as msct_image
import spinalcordtoolbox.labels as sct_labels
from spinalcordtoolbox.scripts import sct_apply_transfo
from spinalcordtoolbox.scripts.sct_image import split_data
class Param:
# The constructor
def __init__(self):
self.debug = 0
self.remove_temp_files = 1 # remove temporary files
self.fname_mask = '' # this field is needed in the function register@sct_register_multimodal
self.padding = 10 # this field is needed in the function register@sct_register_multimodal
self.verbose = 1 # verbose
self.path_template = os.path.join(__data_dir__, 'PAM50')
self.path_qc = None
self.zsubsample = '0.25'
self.rot_src = None
self.rot_dest = None
# get default parameters
# Note: step0 is used as pre-registration
step0 = Paramreg(step='0', type='label', dof='Tx_Ty_Tz_Sz') # if ref=template, we only need translations and z-scaling because the cord is already straight
step1 = Paramreg(step='1', type='imseg', algo='centermassrot', rot_method='pcahog')
step2 = Paramreg(step='2', type='seg', algo='bsplinesyn', metric='MeanSquares', iter='3', smooth='1', slicewise='0')
paramregmulti = ParamregMultiStep([step0, step1, step2])
# PARSER
# ==========================================================================================
def get_parser():
param = Param()
parser = SCTArgumentParser(
description=(
"Register an anatomical image to the spinal cord MRI template (default: PAM50).\n"
"\n"
"The registration process includes three main registration steps:\n"
" 1. straightening of the image using the spinal cord segmentation (see sct_straighten_spinalcord for "
"details);\n"
" 2. vertebral alignment between the image and the template, using labels along the spine;\n"
" 3. iterative slice-wise non-linear registration (see sct_register_multimodal for details)\n"
"\n"
"To register a subject to the template, try the default command:\n"
" sct_register_to_template -i data.nii.gz -s data_seg.nii.gz -l data_labels.nii.gz\n"
"\n"
"If this default command does not produce satisfactory results, the '-param' "
"argument should be tweaked according to the tips given here:\n"
" https://spinalcordtoolbox.com/en/latest/user_section/command-line.html#sct-register-multimodal\n"
"\n"
"The default registration method brings the subject image to the template, which can be problematic with "
"highly non-isotropic images as it would induce large interpolation errors during the straightening "
"procedure. Although the default method is recommended, you may want to register the template to the "
"subject (instead of the subject to the template) by skipping the straightening procedure. To do so, use "
"the parameter '-ref subject'. Example below:\n"
" sct_register_to_template -i data.nii.gz -s data_seg.nii.gz -l data_labels.nii.gz -ref subject -param "
"step=1,type=seg,algo=centermassrot,smooth=0:step=2,type=seg,algo=columnwise,smooth=0,smoothWarpXY=2\n"
"\n"
"Vertebral alignment (step 2) consists in aligning the vertebrae between the subject and the template. "
"Two types of labels are possible:\n"
" - Vertebrae mid-body labels, created at the center of the spinal cord using the parameter '-l';\n"
" - Posterior edge of the intervertebral discs, using the parameter '-ldisc'.\n"
"\n"
"If only one label is provided, a simple translation will be applied between the subject label and the "
"template label. No scaling will be performed. \n"
"\n"
"If two labels are provided, a linear transformation (translation + rotation + superior-inferior linear "
"scaling) will be applied. The strategy here is to defined labels that cover the region of interest. For "
"example, if you are interested in studying C2 to C6 levels, then provide one label at C2 and another at "
"C6. However, note that if the two labels are very far apart (e.g. C2 and T12), there might be a "
"mis-alignment of discs because a subject''s intervertebral discs distance might differ from that of the "
"template.\n"
"\n"
"If more than two labels (only with the parameter '-disc') are used, a non-linear registration will be "
"applied to align the each intervertebral disc between the subject and the template, as described in "
"sct_straighten_spinalcord. This the most accurate and preferred method. This feature does not work with "
"the parameter '-ref subject', where only a rigid registration is performed.\n"
"\n"
"More information about label creation can be found at "
"https://www.icloud.com/keynote/0th8lcatyVPkM_W14zpjynr5g#SCT%%5FCourse%%5F20200121 (p47)"
)
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Input anatomical image. Example: anat.nii.gz"
)
mandatory.add_argument(
'-s',
metavar=Metavar.file,
required=True,
help="Spinal cord segmentation. Example: anat_seg.nii.gz"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-s-template-id',
metavar=Metavar.int,
type=int,
help="Segmentation file ID to use for registration. The ID is an integer indicated in the file "
"'template/info_label.txt'. This 'info_label.txt' file corresponds to the template indicated by the flag "
"'-t'. By default, the spinal cord segmentation is used (ID=3), but if available, a different segmentation"
" such as white matter segmentation could produce better registration results.",
default=3
)
optional.add_argument(
'-l',
metavar=Metavar.file,
help="R|One or two labels (preferred) located at the center of the spinal cord, on the mid-vertebral slice. "
"Example: anat_labels.nii.gz\n"
"For more information about label creation, please see: "
"https://www.icloud.com/keynote/0th8lcatyVPkM_W14zpjynr5g#SCT%%5FCourse%%5F20200121 (p47)"
)
optional.add_argument(
'-ldisc',
metavar=Metavar.file,
help="R|File containing disc labels. Labels can be located either at the posterior edge "
"of the intervertebral discs, or at the orthogonal projection of each disc onto "
"the spinal cord (e.g.: the file 'xxx_seg_labeled_discs.nii.gz' output by sct_label_vertebrae).\n"
"If you are using more than 2 labels, all discs covering the region of interest should be provided. "
"E.g., if you are interested in levels C2 to C7, then you should provide disc labels 2,3,4,5,6,7. "
"For more information about label creation, please refer to "
"https://www.icloud.com/keynote/0th8lcatyVPkM_W14zpjynr5g#SCT%%5FCourse%%5F20200121 (p47)"
)
optional.add_argument(
'-lspinal',
metavar=Metavar.file,
help="R|Labels located in the center of the spinal cord, at the superior-inferior level corresponding to the "
"mid-point of the spinal level. Example: anat_labels.nii.gz\n"
"Each label is a single voxel, which value corresponds to the spinal level (e.g.: 2 for spinal level 2). "
"If you are using more than 2 labels, all spinal levels covering the region of interest should be "
"provided (e.g., if you are interested in levels C2 to C7, then you should provide spinal level labels "
"2,3,4,5,6,7)."
)
optional.add_argument(
'-ofolder',
metavar=Metavar.folder,
action=ActionCreateFolder,
help="Output folder."
)
optional.add_argument(
'-t',
metavar=Metavar.folder,
default=param.path_template,
help="Path to template"
)
optional.add_argument(
'-c',
choices=['t1', 't2', 't2s'],
default='t2',
help="Contrast to use for registration."
)
optional.add_argument(
'-ref',
choices=['template', 'subject'],
default='template',
help="Reference for registration: template: subject->template, subject: template->subject."
)
optional.add_argument(
'-param',
metavar=Metavar.list,
type=list_type(':', str),
help=(f"R|Parameters for registration (see sct_register_multimodal). Default:"
f"\n"
f"step=0\n"
f" - type={paramregmulti.steps['0'].type}\n"
f" - dof={paramregmulti.steps['0'].dof}\n"
f"\n"
f"step=1\n"
f" - type={paramregmulti.steps['1'].type}\n"
f" - algo={paramregmulti.steps['1'].algo}\n"
f" - metric={paramregmulti.steps['1'].metric}\n"
f" - iter={paramregmulti.steps['1'].iter}\n"
f" - smooth={paramregmulti.steps['1'].smooth}\n"
f" - gradStep={paramregmulti.steps['1'].gradStep}\n"
f" - slicewise={paramregmulti.steps['1'].slicewise}\n"
f" - smoothWarpXY={paramregmulti.steps['1'].smoothWarpXY}\n"
f" - pca_eigenratio_th={paramregmulti.steps['1'].pca_eigenratio_th}\n"
f"\n"
f"step=2\n"
f" - type={paramregmulti.steps['2'].type}\n"
f" - algo={paramregmulti.steps['2'].algo}\n"
f" - metric={paramregmulti.steps['2'].metric}\n"
f" - iter={paramregmulti.steps['2'].iter}\n"
f" - smooth={paramregmulti.steps['2'].smooth}\n"
f" - gradStep={paramregmulti.steps['2'].gradStep}\n"
f" - slicewise={paramregmulti.steps['2'].slicewise}\n"
f" - smoothWarpXY={paramregmulti.steps['2'].smoothWarpXY}\n"
f" - pca_eigenratio_th={paramregmulti.steps['1'].pca_eigenratio_th}")
)
optional.add_argument(
'-centerline-algo',
choices=['polyfit', 'bspline', 'linear', 'nurbs'],
default=ParamCenterline().algo_fitting,
help="Algorithm for centerline fitting (when straightening the spinal cord)."
)
optional.add_argument(
'-centerline-smooth',
metavar=Metavar.int,
type=int,
default=ParamCenterline().smooth,
help="Degree of smoothing for centerline fitting. Only use with -centerline-algo {bspline, linear}."
)
optional.add_argument(
'-qc',
metavar=Metavar.folder,
action=ActionCreateFolder,
default=param.path_qc,
help="The path where the quality control generated content will be saved."
)
optional.add_argument(
'-qc-dataset',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the dataset the process was run on."
)
optional.add_argument(
'-qc-subject',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the subject the process was run on."
)
optional.add_argument(
'-igt',
metavar=Metavar.file,
help="File name of ground-truth template cord segmentation (binary nifti)."
)
optional.add_argument(
'-r',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=param.remove_temp_files,
help="Whether to remove temporary files. 0 = no, 1 = yes"
)
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode"
)
return parser
# MAIN
# ==========================================================================================
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
# initializations
param = Param()
fname_data = arguments.i
fname_seg = arguments.s
if arguments.l is not None:
fname_landmarks = arguments.l
label_type = 'body'
elif arguments.ldisc is not None:
fname_landmarks = arguments.ldisc
label_type = 'disc'
elif arguments.lspinal is not None:
fname_landmarks = arguments.lspinal
label_type = 'spinal'
else:
printv('ERROR: Labels should be provided.', 1, 'error')
if arguments.ofolder is not None:
path_output = arguments.ofolder
else:
path_output = ''
param.path_qc = arguments.qc
path_template = arguments.t
contrast_template = arguments.c
ref = arguments.ref
param.remove_temp_files = arguments.r
param.verbose = verbose # TODO: not clean, unify verbose or param.verbose in code, but not both
param_centerline = ParamCenterline(
algo_fitting=arguments.centerline_algo,
smooth=arguments.centerline_smooth)
# registration parameters
if arguments.param is not None:
# reset parameters but keep step=0 (might be overwritten if user specified step=0)
paramregmulti = ParamregMultiStep([step0])
if ref == 'subject':
paramregmulti.steps['0'].dof = 'Tx_Ty_Tz_Rx_Ry_Rz_Sz'
# add user parameters
for paramStep in arguments.param:
paramregmulti.addStep(paramStep)
else:
paramregmulti = ParamregMultiStep([step0, step1, step2])
# if ref=subject, initialize registration using different affine parameters
if ref == 'subject':
paramregmulti.steps['0'].dof = 'Tx_Ty_Tz_Rx_Ry_Rz_Sz'
# initialize other parameters
zsubsample = param.zsubsample
# retrieve template file names
if label_type == 'spinal':
# point-wise spinal level labels
file_template_labeling = get_file_label(os.path.join(path_template, 'template'), id_label=14)
elif label_type == 'disc':
# point-wise intervertebral disc labels
file_template_labeling = get_file_label(os.path.join(path_template, 'template'), id_label=10)
else:
# spinal cord mask with discrete vertebral levels
file_template_labeling = get_file_label(os.path.join(path_template, 'template'), id_label=7)
id_label_dct = {'T1': 0, 'T2': 1, 'T2S': 2}
file_template = get_file_label(os.path.join(path_template, 'template'), id_label=id_label_dct[contrast_template.upper()]) # label = *-weighted template
file_template_seg = get_file_label(os.path.join(path_template, 'template'), id_label=arguments.s_template_id)
# start timer
start_time = time.time()
# get fname of the template + template objects
fname_template = os.path.join(path_template, 'template', file_template)
fname_template_labeling = os.path.join(path_template, 'template', file_template_labeling)
fname_template_seg = os.path.join(path_template, 'template', file_template_seg)
# check file existence
# TODO: no need to do that!
printv('\nCheck template files...')
check_file_exist(fname_template, verbose)
check_file_exist(fname_template_labeling, verbose)
check_file_exist(fname_template_seg, verbose)
path_data, file_data, ext_data = extract_fname(fname_data)
# printv(arguments)
printv('\nCheck parameters:', verbose)
printv(' Data: ' + fname_data, verbose)
printv(' Landmarks: ' + fname_landmarks, verbose)
printv(' Segmentation: ' + fname_seg, verbose)
printv(' Path template: ' + path_template, verbose)
printv(' Remove temp files: ' + str(param.remove_temp_files), verbose)
# check input labels
labels = check_labels(fname_landmarks, label_type=label_type)
level_alignment = False
if len(labels) > 2 and label_type in ['disc', 'spinal']:
level_alignment = True
path_tmp = tmp_create(basename="register_to_template")
# set temporary file names
ftmp_data = 'data.nii'
ftmp_seg = 'seg.nii.gz'
ftmp_label = 'label.nii.gz'
ftmp_template = 'template.nii'
ftmp_template_seg = 'template_seg.nii.gz'
ftmp_template_label = 'template_label.nii.gz'
# copy files to temporary folder
printv('\nCopying input data to tmp folder and convert to nii...', verbose)
Image(fname_data).save(os.path.join(path_tmp, ftmp_data))
Image(fname_seg).save(os.path.join(path_tmp, ftmp_seg))
Image(fname_landmarks).save(os.path.join(path_tmp, ftmp_label))
Image(fname_template).save(os.path.join(path_tmp, ftmp_template))
Image(fname_template_seg).save(os.path.join(path_tmp, ftmp_template_seg))
Image(fname_template_labeling).save(os.path.join(path_tmp, ftmp_template_label))
# go to tmp folder
curdir = os.getcwd()
os.chdir(path_tmp)
# Generate labels from template vertebral labeling
if label_type == 'body':
printv('\nGenerate labels from template vertebral labeling', verbose)
ftmp_template_label_, ftmp_template_label = ftmp_template_label, add_suffix(ftmp_template_label, "_body")
sct_labels.label_vertebrae(Image(ftmp_template_label_)).save(path=ftmp_template_label)
# check if provided labels are available in the template
printv('\nCheck if provided labels are available in the template', verbose)
image_label_template = Image(ftmp_template_label)
labels_template = image_label_template.getNonZeroCoordinates(sorting='value')
if labels[-1].value > labels_template[-1].value:
printv('ERROR: Wrong landmarks input. Labels must have correspondence in template space. \nLabel max '
'provided: ' + str(labels[-1].value) + '\nLabel max from template: ' +
str(labels_template[-1].value), verbose, 'error')
# if only one label is present, force affine transformation to be Tx,Ty,Tz only (no scaling)
if len(labels) == 1:
paramregmulti.steps['0'].dof = 'Tx_Ty_Tz'
printv('WARNING: Only one label is present. Forcing initial transformation to: ' + paramregmulti.steps['0'].dof,
1, 'warning')
# Project labels onto the spinal cord centerline because later, an affine transformation is estimated between the
# template's labels (centered in the cord) and the subject's labels (assumed to be centered in the cord).
# If labels are not centered, mis-registration errors are observed (see issue #1826)
ftmp_label = project_labels_on_spinalcord(ftmp_label, ftmp_seg, param_centerline)
# binarize segmentation (in case it has values below 0 caused by manual editing)
printv('\nBinarize segmentation', verbose)
ftmp_seg_, ftmp_seg = ftmp_seg, add_suffix(ftmp_seg, "_bin")
img = Image(ftmp_seg_)
out = img.copy()
out.data = binarize(out.data, 0.5)
out.save(path=ftmp_seg)
# Switch between modes: subject->template or template->subject
if ref == 'template':
# resample data to 1mm isotropic
printv('\nResample data to 1mm isotropic...', verbose)
resample_file(ftmp_data, add_suffix(ftmp_data, '_1mm'), '1.0x1.0x1.0', 'mm', 'linear', verbose)
ftmp_data = add_suffix(ftmp_data, '_1mm')
resample_file(ftmp_seg, add_suffix(ftmp_seg, '_1mm'), '1.0x1.0x1.0', 'mm', 'linear', verbose)
ftmp_seg = add_suffix(ftmp_seg, '_1mm')
# N.B. resampling of labels is more complicated, because they are single-point labels, therefore resampling
# with nearest neighbour can make them disappear.
resample_labels(ftmp_label, ftmp_data, add_suffix(ftmp_label, '_1mm'))
ftmp_label = add_suffix(ftmp_label, '_1mm')
# Change orientation of input images to RPI
printv('\nChange orientation of input images to RPI...', verbose)
img_tmp_data = Image(ftmp_data).change_orientation("RPI")
ftmp_data = add_suffix(img_tmp_data.absolutepath, "_rpi")
img_tmp_data.save(path=ftmp_data, mutable=True)
img_tmp_seg = Image(ftmp_seg).change_orientation("RPI")
ftmp_seg = add_suffix(img_tmp_seg.absolutepath, "_rpi")
img_tmp_seg.save(path=ftmp_seg, mutable=True)
img_tmp_label = Image(ftmp_label).change_orientation("RPI")
ftmp_label = add_suffix(img_tmp_label.absolutepath, "_rpi")
img_tmp_label.save(ftmp_label, mutable=True)
ftmp_seg_, ftmp_seg = ftmp_seg, add_suffix(ftmp_seg, '_crop')
if level_alignment:
# cropping the segmentation based on the label coverage to ensure good registration with level alignment
# See https://github.com/spinalcordtoolbox/spinalcordtoolbox/pull/1669 for details
image_labels = Image(ftmp_label)
coordinates_labels = image_labels.getNonZeroCoordinates(sorting='z')
nx, ny, nz, nt, px, py, pz, pt = image_labels.dim
offset_crop = 10.0 * pz # cropping the image 10 mm above and below the highest and lowest label
cropping_slices = [coordinates_labels[0].z - offset_crop, coordinates_labels[-1].z + offset_crop]
# make sure that the cropping slices do not extend outside of the slice range (issue #1811)
if cropping_slices[0] < 0:
cropping_slices[0] = 0
if cropping_slices[1] > nz:
cropping_slices[1] = nz
msct_image.spatial_crop(Image(ftmp_seg_), dict(((2, np.int32(np.round(cropping_slices))),))).save(ftmp_seg)
else:
# if we do not align the vertebral levels, we crop the segmentation from top to bottom
im_seg_rpi = Image(ftmp_seg_)
bottom = 0
for data in msct_image.SlicerOneAxis(im_seg_rpi, "IS"):
if (data != 0).any():
break
bottom += 1
top = im_seg_rpi.data.shape[2]
for data in msct_image.SlicerOneAxis(im_seg_rpi, "SI"):
if (data != 0).any():
break
top -= 1
msct_image.spatial_crop(im_seg_rpi, dict(((2, (bottom, top)),))).save(ftmp_seg)
# straighten segmentation
printv('\nStraighten the spinal cord using centerline/segmentation...', verbose)
# check if warp_curve2straight and warp_straight2curve already exist (i.e. no need to do it another time)
fn_warp_curve2straight = os.path.join(curdir, "warp_curve2straight.nii.gz")
fn_warp_straight2curve = os.path.join(curdir, "warp_straight2curve.nii.gz")
fn_straight_ref = os.path.join(curdir, "straight_ref.nii.gz")
cache_input_files = [ftmp_seg]
if level_alignment:
cache_input_files += [
ftmp_template_seg,
ftmp_label,
ftmp_template_label,
]
cache_sig = cache_signature(
input_files=cache_input_files,
)
cachefile = os.path.join(curdir, "straightening.cache")
if cache_valid(cachefile, cache_sig) and os.path.isfile(fn_warp_curve2straight) and os.path.isfile(fn_warp_straight2curve) and os.path.isfile(fn_straight_ref):
printv('Reusing existing warping field which seems to be valid', verbose, 'warning')
copy(fn_warp_curve2straight, 'warp_curve2straight.nii.gz')
copy(fn_warp_straight2curve, 'warp_straight2curve.nii.gz')
copy(fn_straight_ref, 'straight_ref.nii.gz')
# apply straightening
sct_apply_transfo.main(argv=[
'-i', ftmp_seg,
'-w', 'warp_curve2straight.nii.gz',
'-d', 'straight_ref.nii.gz',
'-o', add_suffix(ftmp_seg, '_straight')])
else:
from spinalcordtoolbox.straightening import SpinalCordStraightener
sc_straight = SpinalCordStraightener(ftmp_seg, ftmp_seg)
sc_straight.param_centerline = param_centerline
sc_straight.output_filename = add_suffix(ftmp_seg, '_straight')
sc_straight.path_output = './'
sc_straight.qc = '0'
sc_straight.remove_temp_files = param.remove_temp_files
sc_straight.verbose = verbose
if level_alignment:
sc_straight.centerline_reference_filename = ftmp_template_seg
sc_straight.use_straight_reference = True
sc_straight.discs_input_filename = ftmp_label
sc_straight.discs_ref_filename = ftmp_template_label
sc_straight.straighten()
cache_save(cachefile, cache_sig)
# N.B. DO NOT UPDATE VARIABLE ftmp_seg BECAUSE TEMPORARY USED LATER
# re-define warping field using non-cropped space (to avoid issue #367)
dimensionality = len(Image(ftmp_data).hdr.get_data_shape())
cmd = ['isct_ComposeMultiTransform', f"{dimensionality}", 'warp_straight2curve.nii.gz', '-R', ftmp_data, 'warp_straight2curve.nii.gz']
status, output = run_proc(cmd, verbose=verbose, is_sct_binary=True)
if status != 0:
raise RuntimeError(f"Subprocess call {cmd} returned non-zero: {output}")
if level_alignment:
copy('warp_curve2straight.nii.gz', 'warp_curve2straightAffine.nii.gz')
else:
# Label preparation:
# --------------------------------------------------------------------------------
# Remove unused label on template. Keep only label present in the input label image
printv('\nRemove unused label on template. Keep only label present in the input label image...', verbose)
sct_labels.remove_missing_labels(Image(ftmp_template_label), Image(ftmp_label)).save(path=ftmp_template_label)
# Dilating the input label so they can be straighten without losing them
printv('\nDilating input labels using 3vox ball radius')
dilate(Image(ftmp_label), 3, 'ball').save(add_suffix(ftmp_label, '_dilate'))
ftmp_label = add_suffix(ftmp_label, '_dilate')
# Apply straightening to labels
printv('\nApply straightening to labels...', verbose)
sct_apply_transfo.main(argv=[
'-i', ftmp_label,
'-o', add_suffix(ftmp_label, '_straight'),
'-d', add_suffix(ftmp_seg, '_straight'),
'-w', 'warp_curve2straight.nii.gz',
'-x', 'nn'])
ftmp_label = add_suffix(ftmp_label, '_straight')
# Compute rigid transformation straight landmarks --> template landmarks
printv('\nEstimate transformation for step #0...', verbose)
try:
register_landmarks(ftmp_label, ftmp_template_label, paramregmulti.steps['0'].dof,
fname_affine='straight2templateAffine.txt', verbose=verbose)
except RuntimeError:
raise('Input labels do not seem to be at the right place. Please check the position of the labels. '
'See documentation for more details: https://www.icloud.com/keynote/0th8lcatyVPkM_W14zpjynr5g#SCT%5FCourse%5F20200121 (p47)')
# Concatenate transformations: curve --> straight --> affine
printv('\nConcatenate transformations: curve --> straight --> affine...', verbose)
dimensionality = len(Image("template.nii").hdr.get_data_shape())
cmd = ['isct_ComposeMultiTransform', f"{dimensionality}", 'warp_curve2straightAffine.nii.gz', '-R', 'template.nii', 'straight2templateAffine.txt', 'warp_curve2straight.nii.gz']
status, output = run_proc(cmd, verbose=verbose, is_sct_binary=True)
if status != 0:
raise RuntimeError(f"Subprocess call {cmd} returned non-zero: {output}")
# Apply transformation
printv('\nApply transformation...', verbose)
sct_apply_transfo.main(argv=[
'-i', ftmp_data,
'-o', add_suffix(ftmp_data, '_straightAffine'),
'-d', ftmp_template,
'-w', 'warp_curve2straightAffine.nii.gz'])
ftmp_data = add_suffix(ftmp_data, '_straightAffine')
sct_apply_transfo.main(argv=[
'-i', ftmp_seg,
'-o', add_suffix(ftmp_seg, '_straightAffine'),
'-d', ftmp_template,
'-w', 'warp_curve2straightAffine.nii.gz',
'-x', 'linear'])
ftmp_seg = add_suffix(ftmp_seg, '_straightAffine')
"""
# Benjamin: Issue from Allan Martin, about the z=0 slice that is screwed up, caused by the affine transform.
# Solution found: remove slices below and above landmarks to avoid rotation effects
points_straight = []
for coord in landmark_template:
points_straight.append(coord.z)
min_point, max_point = int(np.round(np.min(points_straight))), int(np.round(np.max(points_straight)))
ftmp_seg_, ftmp_seg = ftmp_seg, add_suffix(ftmp_seg, '_black')
msct_image.spatial_crop(Image(ftmp_seg_), dict(((2, (min_point,max_point)),))).save(ftmp_seg)
"""
# open segmentation
im = Image(ftmp_seg)
im_new = msct_image.empty_like(im)
# binarize
im_new.data = im.data > 0.5
# find min-max of anat2template (for subsequent cropping)
zmin_template, zmax_template = msct_image.find_zmin_zmax(im_new, threshold=0.5)
# save binarized segmentation
im_new.save(add_suffix(ftmp_seg, '_bin')) # unused?
# crop template in z-direction (for faster processing)
# TODO: refactor to use python module instead of doing i/o
printv('\nCrop data in template space (for faster processing)...', verbose)
ftmp_template_, ftmp_template = ftmp_template, add_suffix(ftmp_template, '_crop')
msct_image.spatial_crop(Image(ftmp_template_), dict(((2, (zmin_template, zmax_template)),))).save(ftmp_template)
ftmp_template_seg_, ftmp_template_seg = ftmp_template_seg, add_suffix(ftmp_template_seg, '_crop')
msct_image.spatial_crop(Image(ftmp_template_seg_), dict(((2, (zmin_template, zmax_template)),))).save(ftmp_template_seg)
ftmp_data_, ftmp_data = ftmp_data, add_suffix(ftmp_data, '_crop')
msct_image.spatial_crop(Image(ftmp_data_), dict(((2, (zmin_template, zmax_template)),))).save(ftmp_data)
ftmp_seg_, ftmp_seg = ftmp_seg, add_suffix(ftmp_seg, '_crop')
msct_image.spatial_crop(Image(ftmp_seg_), dict(((2, (zmin_template, zmax_template)),))).save(ftmp_seg)
# sub-sample in z-direction
# TODO: refactor to use python module instead of doing i/o
printv('\nSub-sample in z-direction (for faster processing)...', verbose)
run_proc(['sct_resample', '-i', ftmp_template, '-o', add_suffix(ftmp_template, '_sub'), '-f', '1x1x' + zsubsample], verbose)
ftmp_template = add_suffix(ftmp_template, '_sub')
run_proc(['sct_resample', '-i', ftmp_template_seg, '-o', add_suffix(ftmp_template_seg, '_sub'), '-f', '1x1x' + zsubsample], verbose)
ftmp_template_seg = add_suffix(ftmp_template_seg, '_sub')
run_proc(['sct_resample', '-i', ftmp_data, '-o', add_suffix(ftmp_data, '_sub'), '-f', '1x1x' + zsubsample], verbose)
ftmp_data = add_suffix(ftmp_data, '_sub')
run_proc(['sct_resample', '-i', ftmp_seg, '-o', add_suffix(ftmp_seg, '_sub'), '-f', '1x1x' + zsubsample], verbose)
ftmp_seg = add_suffix(ftmp_seg, '_sub')
# Registration straight spinal cord to template
printv('\nRegister straight spinal cord to template...', verbose)
# TODO: find a way to input initwarp, corresponding to straightening warp
# Set the angle of the template orientation to 0 (destination image)
for key in list(paramregmulti.steps.keys()):
paramregmulti.steps[key].rot_dest = 0
fname_src2dest, fname_dest2src, warp_forward, warp_inverse = register_wrapper(
ftmp_data, ftmp_template, param, paramregmulti, fname_src_seg=ftmp_seg, fname_dest_seg=ftmp_template_seg,
same_space=True)
# Concatenate transformations: anat --> template
printv('\nConcatenate transformations: anat --> template...', verbose)
dimensionality = len(Image("template.nii").hdr.get_data_shape())
cmd = ['isct_ComposeMultiTransform', f"{dimensionality}", 'warp_anat2template.nii.gz', '-R', 'template.nii', warp_forward, 'warp_curve2straightAffine.nii.gz']
status, output = run_proc(cmd, verbose=verbose, is_sct_binary=True)
if status != 0:
raise RuntimeError(f"Subprocess call {cmd} returned non-zero: {output}")
# Concatenate transformations: template --> anat
printv('\nConcatenate transformations: template --> anat...', verbose)
# TODO: make sure the commented code below is consistent with the new implementation
# warp_inverse.reverse()
if level_alignment:
dimensionality = len(Image("data.nii").hdr.get_data_shape())
cmd = ['isct_ComposeMultiTransform', f"{dimensionality}", 'warp_template2anat.nii.gz', '-R', 'data.nii', 'warp_straight2curve.nii.gz', warp_inverse]
status, output = run_proc(cmd, verbose=verbose, is_sct_binary=True)
if status != 0:
raise RuntimeError(f"Subprocess call {cmd} returned non-zero: {output}")
else:
dimensionality = len(Image("data.nii").hdr.get_data_shape())
cmd = ['isct_ComposeMultiTransform', f"{dimensionality}", 'warp_template2anat.nii.gz', '-R', 'data.nii', 'warp_straight2curve.nii.gz', '-i', 'straight2templateAffine.txt', warp_inverse]
status, output = run_proc(cmd, verbose=verbose, is_sct_binary=True)
if status != 0:
raise RuntimeError(f"Subprocess call {cmd} returned non-zero: {output}")
# register template->subject
elif ref == 'subject':
# Change orientation of input images to RPI
printv('\nChange orientation of input images to RPI...', verbose)
img_tmp_data = Image(ftmp_data).change_orientation("RPI")
ftmp_data = add_suffix(img_tmp_data.absolutepath, "_rpi")
img_tmp_data.save(path=ftmp_data, mutable=True)
img_tmp_seg = Image(ftmp_seg).change_orientation("RPI")
ftmp_seg = add_suffix(img_tmp_seg.absolutepath, "_rpi")
img_tmp_seg.save(path=ftmp_seg, mutable=True)
img_tmp_label = Image(ftmp_label).change_orientation("RPI")
ftmp_label = add_suffix(img_tmp_label.absolutepath, "_rpi")
img_tmp_label.save(ftmp_label, mutable=True)
# Remove unused label on template. Keep only label present in the input label image
printv('\nRemove unused label on template. Keep only label present in the input label image...', verbose)
sct_labels.remove_missing_labels(Image(ftmp_template_label), Image(ftmp_label)).save(path=ftmp_template_label)
# Add one label because at least 3 orthogonal labels are required to estimate an affine transformation.
add_orthogonal_label(ftmp_label)
add_orthogonal_label(ftmp_template_label)
# Set the angle of the template orientation to 0 (source image)
for key in list(paramregmulti.steps.keys()):
paramregmulti.steps[key].rot_src = 0
fname_src2dest, fname_dest2src, warp_forward, warp_inverse = register_wrapper(
ftmp_template, ftmp_data, param, paramregmulti, fname_src_seg=ftmp_template_seg, fname_dest_seg=ftmp_seg,
fname_src_label=ftmp_template_label, fname_dest_label=ftmp_label, same_space=False)
# Renaming for code compatibility
os.rename(warp_forward, 'warp_template2anat.nii.gz')
os.rename(warp_inverse, 'warp_anat2template.nii.gz')
# Apply warping fields to anat and template
run_proc(['sct_apply_transfo', '-i', 'template.nii', '-o', 'template2anat.nii.gz', '-d', 'data.nii', '-w', 'warp_template2anat.nii.gz', '-crop', '0'], verbose)
run_proc(['sct_apply_transfo', '-i', 'data.nii', '-o', 'anat2template.nii.gz', '-d', 'template.nii', '-w', 'warp_anat2template.nii.gz', '-crop', '0'], verbose)
# come back
os.chdir(curdir)
# Generate output files
printv('\nGenerate output files...', verbose)
fname_template2anat = os.path.join(path_output, 'template2anat' + ext_data)
fname_anat2template = os.path.join(path_output, 'anat2template' + ext_data)
generate_output_file(os.path.join(path_tmp, "warp_template2anat.nii.gz"), os.path.join(path_output, "warp_template2anat.nii.gz"), verbose=verbose)
generate_output_file(os.path.join(path_tmp, "warp_anat2template.nii.gz"), os.path.join(path_output, "warp_anat2template.nii.gz"), verbose=verbose)
generate_output_file(os.path.join(path_tmp, "template2anat.nii.gz"), fname_template2anat, verbose=verbose)
generate_output_file(os.path.join(path_tmp, "anat2template.nii.gz"), fname_anat2template, verbose=verbose)
if ref == 'template':
# copy straightening files in case subsequent SCT functions need them
generate_output_file(os.path.join(path_tmp, "warp_curve2straight.nii.gz"), os.path.join(path_output, "warp_curve2straight.nii.gz"), verbose=verbose)
generate_output_file(os.path.join(path_tmp, "warp_straight2curve.nii.gz"), os.path.join(path_output, "warp_straight2curve.nii.gz"), verbose=verbose)
generate_output_file(os.path.join(path_tmp, "straight_ref.nii.gz"), os.path.join(path_output, "straight_ref.nii.gz"), verbose=verbose)
# Delete temporary files
if param.remove_temp_files:
printv('\nDelete temporary files...', verbose)
rmtree(path_tmp, verbose=verbose)
# display elapsed time
elapsed_time = time.time() - start_time
printv('\nFinished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's', verbose)
qc_dataset = arguments.qc_dataset
qc_subject = arguments.qc_subject
if param.path_qc is not None:
generate_qc(fname_data, fname_in2=fname_template2anat, fname_seg=fname_seg, args=argv,
path_qc=os.path.abspath(param.path_qc), dataset=qc_dataset, subject=qc_subject,
process='sct_register_to_template')
display_viewer_syntax([fname_data, fname_template2anat], verbose=verbose)
display_viewer_syntax([fname_template, fname_anat2template], verbose=verbose)
def add_orthogonal_label(fname_label):
"""
Add one label of value=99 at the axial slice that contains the label with the lowest value, 10 pixels to the right.
:param fname_label:
:return:
"""
im_label = Image(fname_label)
orient_orig = im_label.orientation
# For some reasons (#3304) calling self.change_orientation() replaces self.absolutepath with Null so we need to
# save it.
path_label = im_label.absolutepath
im_label.change_orientation('RPI')
coord_label = im_label.getCoordinatesAveragedByValue() # N.B. landmarks are sorted by value
# Create new label
from copy import deepcopy
new_label = deepcopy(coord_label[0])
# move it 5mm to the left (orientation is RAS)
nx, ny, nz, nt, px, py, pz, pt = im_label.dim
new_label.x = np.round(coord_label[0].x + 5.0 / px) # TODO change to 10 pixels
# assign value 99
new_label.value = 99
# Add to existing image
im_label.data[int(new_label.x), int(new_label.y), int(new_label.z)] = new_label.value
# Overwrite label file
im_label.change_orientation(orient_orig)
im_label.save(path_label)
def project_labels_on_spinalcord(fname_label, fname_seg, param_centerline):
"""
Project labels orthogonally on the spinal cord centerline. The algorithm works by finding the smallest distance
between each label and the spinal cord center of mass.
:param fname_label: file name of labels
:param fname_seg: file name of cord segmentation (could also be of centerline)
:return: file name of projected labels
"""
# build output name
fname_label_projected = add_suffix(fname_label, "_projected")
# open labels and segmentation
im_label = Image(fname_label).change_orientation("RPI")
im_seg = Image(fname_seg)
native_orient = im_seg.orientation
im_seg.change_orientation("RPI")
# smooth centerline and return fitted coordinates in voxel space
_, arr_ctl, _, _ = get_centerline(im_seg, param_centerline)
x_centerline_fit, y_centerline_fit, z_centerline = arr_ctl
# convert pixel into physical coordinates
centerline_xyz_transposed = \
[im_seg.transfo_pix2phys([[x_centerline_fit[i], y_centerline_fit[i], z_centerline[i]]])[0]
for i in range(len(x_centerline_fit))]
# transpose list
centerline_phys_x = [i[0] for i in centerline_xyz_transposed]
centerline_phys_y = [i[1] for i in centerline_xyz_transposed]
centerline_phys_z = [i[2] for i in centerline_xyz_transposed]
# get center of mass of label
labels = im_label.getCoordinatesAveragedByValue()
# initialize image of projected labels. Note that we use the space of the seg (not label).
im_label_projected = msct_image.zeros_like(im_seg, dtype=np.uint8)
# loop across label values
for label in labels:
# convert pixel into physical coordinates for the label
label_phys_x, label_phys_y, label_phys_z = im_label.transfo_pix2phys([[label.x, label.y, label.z]])[0]
# calculate distance between label and each point of the centerline
distance_centerline = [np.linalg.norm([centerline_phys_x[i] - label_phys_x,
centerline_phys_y[i] - label_phys_y,
centerline_phys_z[i] - label_phys_z])
for i in range(len(x_centerline_fit))]
# get the index corresponding to the min distance
ind_min_distance = np.argmin(distance_centerline)
# get centerline coordinate (in physical space)
[min_phy_x, min_phy_y, min_phy_z] = [centerline_phys_x[ind_min_distance],
centerline_phys_y[ind_min_distance],
centerline_phys_z[ind_min_distance]]
# convert coordinate to voxel space
minx, miny, minz = im_seg.transfo_phys2pix([[min_phy_x, min_phy_y, min_phy_z]])[0]
# use that index to assign projected label in the centerline
im_label_projected.data[minx, miny, minz] = label.value
# re-orient projected labels to native orientation and save
im_label_projected.change_orientation(native_orient).save(fname_label_projected)
return fname_label_projected
# Resample labels
# ==========================================================================================
def resample_labels(fname_labels, fname_dest, fname_output):
"""
This function re-create labels into a space that has been resampled. It works by re-defining the location of each
label using the old and new voxel size.
IMPORTANT: this function assumes that the origin and FOV of the two images are the SAME.
"""
# get dimensions of input and destination files
nx, ny, nz, _, _, _, _, _ = Image(fname_labels).dim
nxd, nyd, nzd, _, _, _, _, _ = Image(fname_dest).dim
sampling_factor = [float(nx) / nxd, float(ny) / nyd, float(nz) / nzd]
og_labels = Image(fname_labels).getNonZeroCoordinates()
new_labels = [Coordinate([int(np.round(int(x) / sampling_factor[0])),
int(np.round(int(y) / sampling_factor[1])),
int(np.round(int(z) / sampling_factor[2])),
int(float(v))])
for x, y, z, v in og_labels]
sct_labels.create_labels_empty(Image(fname_dest).change_type('uint8'), new_labels).save(path=fname_output)
def check_labels(fname_landmarks, label_type='body'):
"""
Make sure input labels are consistent
Parameters
----------
fname_landmarks: file name of input labels
label_type: 'body', 'disc', 'spinal'
Returns
-------
none
"""
printv('\nCheck input labels...')
# open label file
image_label = Image(fname_landmarks)
# -> all labels must be different
labels = image_label.getNonZeroCoordinates(sorting='value')
# check if there is two labels
if label_type == 'body' and not len(labels) <= 2:
printv('ERROR: Label file has ' + str(len(labels)) + ' label(s). It must contain one or two labels.', 1,
'error')
# check if labels are integer
for label in labels:
if not int(label.value) == label.value:
printv('ERROR: Label should be integer.', 1, 'error')
# check if there are duplicates in label values
n_labels = len(labels)
list_values = [labels[i].value for i in range(0, n_labels)]
list_duplicates = [x for x in list_values if list_values.count(x) > 1]
if not list_duplicates == []:
printv('ERROR: Found two labels with same value.', 1, 'error')
return labels
def register_wrapper(fname_src, fname_dest, param, paramregmulti, fname_src_seg='', fname_dest_seg='', fname_src_label='',
fname_dest_label='', fname_mask='', fname_initwarp='', fname_initwarpinv='', identity=False,
interp='linear', fname_output='', fname_output_warp='', path_out='', same_space=False):
"""
Wrapper for image registration.
:param fname_src:
:param fname_dest:
:param param: Class Param(): See definition in sct_register_multimodal
:param paramregmulti: Class ParamregMultiStep(): See definition in this file
:param fname_src_seg:
:param fname_dest_seg:
:param fname_src_label:
:param fname_dest_label:
:param fname_mask:
:param fname_initwarp: str: File name of initial transformation
:param fname_initwarpinv: str: File name of initial inverse transformation
:param identity:
:param interp:
:param fname_output:
:param fname_output_warp:
:param path_out:
:param same_space: Bool: Source and destination images are in the same physical space (i.e. same coordinates).
:return: fname_src2dest, fname_dest2src, fname_output_warp, fname_output_warpinv
"""
# TODO: move interp inside param.
# TODO: merge param inside paramregmulti by having a "global" sets of parameters that apply to all steps
# Extract path, file and extension
path_src, file_src, ext_src = extract_fname(fname_src)
path_dest, file_dest, ext_dest = extract_fname(fname_dest)
# check if source and destination images have the same name (related to issue #373)
# If so, change names to avoid conflict of result files and warns the user
suffix_src, suffix_dest = '_reg', '_reg'
if file_src == file_dest:
suffix_src, suffix_dest = '_src_reg', '_dest_reg'
# define output folder and file name
if fname_output == '':
path_out = '' if not path_out else path_out # output in user's current directory
file_out = file_src + suffix_src
file_out_inv = file_dest + suffix_dest
ext_out = ext_src
else:
path, file_out, ext_out = extract_fname(fname_output)
path_out = path if not path_out else path_out
file_out_inv = file_out + '_inv'
# create temporary folder
path_tmp = tmp_create(basename="register")
printv('\nCopying input data to tmp folder and convert to nii...', param.verbose)
Image(fname_src).save(os.path.join(path_tmp, "src.nii"))
Image(fname_dest).save(os.path.join(path_tmp, "dest.nii"))
if fname_src_seg:
Image(fname_src_seg).save(os.path.join(path_tmp, "src_seg.nii"))
if fname_dest_seg:
Image(fname_dest_seg).save(os.path.join(path_tmp, "dest_seg.nii"))
if fname_src_label:
Image(fname_src_label).save(os.path.join(path_tmp, "src_label.nii"))
Image(fname_dest_label).save(os.path.join(path_tmp, "dest_label.nii"))
if fname_mask != '':
Image(fname_mask).save(os.path.join(path_tmp, "mask.nii.gz"))
# go to tmp folder
curdir = os.getcwd()
os.chdir(path_tmp)
# reorient destination to RPI
Image('dest.nii').change_orientation("RPI").save('dest_RPI.nii')
if fname_dest_seg:
Image('dest_seg.nii').change_orientation("RPI").save('dest_seg_RPI.nii')
if fname_dest_label:
Image('dest_label.nii').change_orientation("RPI").save('dest_label_RPI.nii')
if fname_mask:
# TODO: change output name
Image('mask.nii.gz').change_orientation("RPI").save('mask.nii.gz')
if identity:
# overwrite paramregmulti and only do one identity transformation
step0 = Paramreg(step='0', type='im', algo='syn', metric='MI', iter='0', shrink='1', smooth='0', gradStep='0.5')
paramregmulti = ParamregMultiStep([step0])
# initialize list of warping fields
warp_forward = []
warp_forward_winv = []
warp_inverse = []
warp_inverse_winv = []
generate_warpinv = 1
# initial warping is specified, update list of warping fields and skip step=0
if fname_initwarp:
printv('\nSkip step=0 and replace with initial transformations: ', param.verbose)
printv(' ' + fname_initwarp, param.verbose)
# copy(fname_initwarp, 'warp_forward_0.nii.gz')
warp_forward.append(fname_initwarp)
start_step = 1
if fname_initwarpinv:
warp_inverse.append(fname_initwarpinv)
else:
printv('\nWARNING: No initial inverse warping field was specified, therefore the registration will be '
'src->dest only, and the inverse warping field will NOT be generated.', param.verbose, 'warning')
generate_warpinv = 0
else:
if same_space:
start_step = 1
else:
start_step = 0
# loop across registration steps
for i_step in range(start_step, len(paramregmulti.steps)):
step = paramregmulti.steps[str(i_step)]
printv('\n--\nESTIMATE TRANSFORMATION FOR STEP #' + str(i_step), param.verbose)
# identify which is the src and dest
if step.type == 'im':
src = ['src.nii']
dest = ['dest_RPI.nii']
interp_step = ['spline']
elif step.type == 'seg':
src = ['src_seg.nii']
dest = ['dest_seg_RPI.nii']
interp_step = ['nn']
elif step.type == 'imseg':
src = ['src.nii', 'src_seg.nii']
dest = ['dest_RPI.nii', 'dest_seg_RPI.nii']
interp_step = ['spline', 'nn']
elif step.type == 'label':
src = ['src_label.nii']
dest = ['dest_label_RPI.nii']
interp_step = ['nn']
else:
printv('ERROR: Wrong image type: {}'.format(step.type), 1, 'error')
# if step>0, apply warp_forward_concat to the src image to be used
if (not same_space and i_step > 0) or (same_space and i_step > 1):
printv('\nApply transformation from previous step', param.verbose)
for ifile in range(len(src)):
sct_apply_transfo.main(argv=[
'-i', src[ifile],
'-d', dest[ifile],
'-o', add_suffix(src[ifile], '_reg'),
'-x', interp_step[ifile],
'-w'] + warp_forward
)
src[ifile] = add_suffix(src[ifile], '_reg')
# register src --> dest
warp_forward_out, warp_inverse_out = register(src=src, dest=dest, step=step, param=param)
# deal with transformations with "-" as prefix. They should be inverted with calling isct_ComposeMultiTransform.
if warp_forward_out[0] == "-":
warp_forward_out = warp_forward_out[1:]
warp_forward_winv.append(warp_forward_out)
if warp_inverse_out[0] == "-":
warp_inverse_out = warp_inverse_out[1:]
warp_inverse_winv.append(warp_inverse_out)
# update list of forward/inverse transformations
warp_forward.append(warp_forward_out)
warp_inverse.insert(0, warp_inverse_out)
# Concatenate transformations
printv('\nConcatenate transformations...', param.verbose)
# if a warping field needs to be inverted, remove it from warp_forward
warp_forward = [f for f in warp_forward if f not in warp_forward_winv]
dimensionality = len(Image("dest.nii").hdr.get_data_shape())
cmd = ['isct_ComposeMultiTransform', f"{dimensionality}", 'warp_src2dest.nii.gz', '-R', 'dest.nii']
if warp_forward_winv:
cmd.append('-i')
cmd += reversed(warp_forward_winv)
if warp_forward:
cmd += reversed(warp_forward)
status, output = run_proc(cmd, is_sct_binary=True)
if status != 0:
raise RuntimeError(f"Subprocess call {cmd} returned non-zero: {output}")
# if an inverse warping field needs to be inverted, remove it from warp_inverse_winv
warp_inverse = [f for f in warp_inverse if f not in warp_inverse_winv]
cmd = ['isct_ComposeMultiTransform', f"{dimensionality}", 'warp_dest2src.nii.gz', '-R', 'src.nii']
dimensionality = len(Image("dest.nii").hdr.get_data_shape())
if warp_inverse_winv:
cmd.append('-i')
cmd += reversed(warp_inverse_winv)
if warp_inverse:
cmd += reversed(warp_inverse)
status, output = run_proc(cmd, is_sct_binary=True)
if status != 0:
raise RuntimeError(f"Subprocess call {cmd} returned non-zero: {output}")
# TODO: make the following code optional (or move it to sct_register_multimodal)
# Apply warping field to src data
printv('\nApply transfo source --> dest...', param.verbose)
sct_apply_transfo.main(argv=[
'-i', 'src.nii',
'-d', 'dest.nii',
'-w', 'warp_src2dest.nii.gz',
'-o', 'src_reg.nii',
'-x', interp])
if generate_warpinv:
printv('\nApply transfo dest --> source...', param.verbose)
sct_apply_transfo.main(argv=[
'-i', 'dest.nii',
'-d', 'src.nii',
'-w', 'warp_dest2src.nii.gz',
'-o', 'dest_reg.nii',
'-x', interp])
# come back
os.chdir(curdir)
# Generate output files
# ------------------------------------------------------------------------------------------------------------------
printv('\nGenerate output files...', param.verbose)
# generate src -> dest output files
fname_src2dest = os.path.join(path_out, file_out + ext_out)
generate_output_file(os.path.join(path_tmp, "src_reg.nii"), fname_src2dest, param.verbose)
if fname_output_warp == '':
fname_output_warp = os.path.join(path_out, 'warp_' + file_src + '2' + file_dest + '.nii.gz')
generate_output_file(os.path.join(path_tmp, "warp_src2dest.nii.gz"), fname_output_warp, param.verbose)
# generate dest -> src output files
if generate_warpinv:
fname_dest2src = os.path.join(path_out, file_out_inv + ext_dest)
generate_output_file(os.path.join(path_tmp, "dest_reg.nii"), fname_dest2src, param.verbose)
fname_output_warpinv = os.path.join(path_out, 'warp_' + file_dest + '2' + file_src + '.nii.gz')
generate_output_file(os.path.join(path_tmp, "warp_dest2src.nii.gz"), fname_output_warpinv, param.verbose)
else:
# we skip generating files if there is no inverse warping field (i.e. we're doing a one-way registration)
fname_dest2src = None
fname_output_warpinv = None
# Delete temporary files
if param.remove_temp_files:
printv('\nRemove temporary files...', param.verbose)
rmtree(path_tmp, verbose=param.verbose)
return fname_src2dest, fname_dest2src, fname_output_warp, fname_output_warpinv
# register images
# ==========================================================================================
def register(src, dest, step, param):
"""
Register src onto dest image. Output affine transformations that need to be inverted will have the prefix "-".
"""
# initiate default parameters of antsRegistration transformation
ants_registration_params = {'rigid': '', 'affine': '', 'compositeaffine': '', 'similarity': '', 'translation': '',
'bspline': ',10', 'gaussiandisplacementfield': ',3,0',
'bsplinedisplacementfield': ',5,10', 'syn': ',3,0', 'bsplinesyn': ',1,3'}
output = '' # default output if problem
# If the input type is either im or seg, we can convert the input list into a string for improved code clarity
if not step.type == 'imseg':
src = src[0]
dest = dest[0]
# display arguments
printv('Registration parameters:', param.verbose)
printv(' type ........... ' + step.type, param.verbose)
printv(' algo ........... ' + step.algo, param.verbose)
printv(' slicewise ...... ' + step.slicewise, param.verbose)
printv(' metric ......... ' + step.metric, param.verbose)
printv(' samplStrategy .. ' + step.samplingStrategy, param.verbose)
printv(' samplPercent ... ' + step.samplingPercentage, param.verbose)
printv(' iter ........... ' + step.iter, param.verbose)
printv(' smooth ......... ' + step.smooth, param.verbose)
printv(' laplacian ...... ' + step.laplacian, param.verbose)
printv(' shrink ......... ' + step.shrink, param.verbose)
printv(' gradStep ....... ' + step.gradStep, param.verbose)
printv(' deformation .... ' + step.deformation, param.verbose)
printv(' init ........... ' + step.init, param.verbose)
printv(' poly ........... ' + step.poly, param.verbose)
printv(' filter_size .... ' + str(step.filter_size), param.verbose)
printv(' dof ............ ' + step.dof, param.verbose)
printv(' smoothWarpXY ... ' + step.smoothWarpXY, param.verbose)
printv(' rot_method ..... ' + step.rot_method, param.verbose)
# set metricSize
if step.metric == 'MI':
metricSize = '32' # corresponds to number of bins
else:
metricSize = '4' # corresponds to radius (for CC, MeanSquares...)
# set masking
if param.fname_mask:
fname_mask = 'mask.nii.gz'
masking = ['-x', 'mask.nii.gz']
else:
fname_mask = ''
masking = []
# # landmark-based registration
if step.type in ['label']:
warp_forward_out, warp_inverse_out = register_step_label(
src=src,
dest=dest,
step=step,
verbose=param.verbose,
)
elif step.algo == 'slicereg':
warp_forward_out, warp_inverse_out, _ = register_step_ants_slice_regularized_registration(
src=src,
dest=dest,
step=step,
metricSize=metricSize,
fname_mask=fname_mask,
verbose=param.verbose,
)
# ANTS 3d
elif step.algo.lower() in ants_registration_params and step.slicewise == '0': # FIXME [AJ]
warp_forward_out, warp_inverse_out = register_step_ants_registration(
src=src,
dest=dest,
step=step,
masking=masking,
ants_registration_params=ants_registration_params,
padding=param.padding,
metricSize=metricSize,
verbose=param.verbose,
)
# ANTS 2d
elif step.algo.lower() in ants_registration_params and step.slicewise == '1': # FIXME [AJ]
warp_forward_out, warp_inverse_out = register_step_slicewise_ants(
src=src,
dest=dest,
step=step,
ants_registration_params=ants_registration_params,
fname_mask=fname_mask,
remove_temp_files=param.remove_temp_files,
verbose=param.verbose,
)
# slice-wise transfo
elif step.algo in ['centermass', 'centermassrot', 'columnwise']:
# check if user provided a mask-- if so, inform it will be ignored
if fname_mask:
printv('\nWARNING: algo ' + step.algo + ' will ignore the provided mask.\n', 1, 'warning')
warp_forward_out, warp_inverse_out = register_step_slicewise(
src=src,
dest=dest,
step=step,
ants_registration_params=ants_registration_params,
remove_temp_files=param.remove_temp_files,
verbose=param.verbose,
)
else:
printv('\nERROR: algo ' + step.algo + ' does not exist. Exit program\n', 1, 'error')
if not os.path.isfile(warp_forward_out):
# no forward warping field for rigid and affine
printv('\nERROR: file ' + warp_forward_out + ' doesn\'t exist (or is not a file).\n' + output +
'\nERROR: ANTs failed. Exit program.\n', 1, 'error')
elif not os.path.isfile(warp_inverse_out) and \
step.algo not in ['rigid', 'affine', 'translation'] and \
step.type not in ['label']:
# no inverse warping field for rigid and affine
printv('\nERROR: file ' + warp_inverse_out + ' doesn\'t exist (or is not a file).\n' + output +
'\nERROR: ANTs failed. Exit program.\n', 1, 'error')
else:
# rename warping fields
if (step.algo.lower() in ['rigid', 'affine', 'translation'] and
step.slicewise == '0'):
# if ANTs is used with affine/rigid --> outputs .mat file
warp_forward = 'warp_forward_' + str(step.step) + '.mat'
os.rename(warp_forward_out, warp_forward)
warp_inverse = '-warp_forward_' + str(step.step) + '.mat'
elif step.type in ['label']:
# if label-based registration is used --> outputs .txt file
warp_forward = 'warp_forward_' + str(step.step) + '.txt'
os.rename(warp_forward_out, warp_forward)
warp_inverse = '-warp_forward_' + str(step.step) + '.txt'
else:
warp_forward = 'warp_forward_' + str(step.step) + '.nii.gz'
warp_inverse = 'warp_inverse_' + str(step.step) + '.nii.gz'
os.rename(warp_forward_out, warp_forward)
os.rename(warp_inverse_out, warp_inverse)
return warp_forward, warp_inverse
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
| mit | 20d16d03c3a85c59901e405eb29d9ed1 | 47.414854 | 197 | 0.620249 | 3.565777 | false | false | false | false |
neuropoly/spinalcordtoolbox | spinalcordtoolbox/scripts/sct_process_segmentation.py | 1 | 19795 | #!/usr/bin/env python
#########################################################################################
#
# Perform various types of processing from the spinal cord segmentation (e.g. extract centerline, compute CSA, etc.).
# (extract_centerline) extract the spinal cord centerline from the segmentation. Output file is an image in the same
# space as the segmentation.
#
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Benjamin De Leener, Julien Touati, Gabriel Mangeat
# Modified: 2014-07-20 by jcohenadad
#
# About the license: see the file LICENSE.TXT
#########################################################################################
# TODO: the import of scipy.misc imsave was moved to the specific cases (orth and ellipse) in order to avoid issue #62. This has to be cleaned in the future.
import sys
import os
import logging
import numpy as np
from matplotlib.ticker import MaxNLocator
from spinalcordtoolbox.aggregate_slicewise import aggregate_per_slice_or_level, save_as_csv, func_wa, func_std, \
func_sum, merge_dict
from spinalcordtoolbox.process_seg import compute_shape
from spinalcordtoolbox.scripts import sct_maths
from spinalcordtoolbox.csa_pmj import get_slices_for_pmj_distance
from spinalcordtoolbox.centerline.core import ParamCenterline
from spinalcordtoolbox.image import add_suffix, splitext
from spinalcordtoolbox.reports.qc import generate_qc
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar, ActionCreateFolder, parse_num_list, display_open
from spinalcordtoolbox.utils.sys import init_sct, set_loglevel
from spinalcordtoolbox.utils.fs import get_absolute_path
logger = logging.getLogger(__name__)
def get_parser():
"""
:return: Returns the parser with the command line documentation contained in it.
"""
# Initialize the parser
parser = SCTArgumentParser(
description=(
"Compute the following morphometric measures based on the spinal cord segmentation:\n"
" - area [mm^2]: Cross-sectional area, measured by counting pixels in each slice. Partial volume can be "
"accounted for by inputing a mask comprising values within [0,1].\n"
" - angle_AP, angle_RL: Estimated angle between the cord centerline and the axial slice. This angle is "
"used to correct for morphometric information.\n"
" - diameter_AP, diameter_RL: Finds the major and minor axes of the cord and measure their length.\n"
" - eccentricity: Eccentricity of the ellipse that has the same second-moments as the spinal cord. "
"The eccentricity is the ratio of the focal distance (distance between focal points) over the major axis "
"length. The value is in the interval [0, 1). When it is 0, the ellipse becomes a circle.\n"
" - orientation: angle (in degrees) between the AP axis of the spinal cord and the AP axis of the "
"image\n"
" - solidity: CSA(spinal_cord) / CSA_convex(spinal_cord). If perfect ellipse, it should be one. This "
"metric is interesting for detecting non-convex shape (e.g., in case of strong compression)\n"
" - length: Length of the segmentation, computed by summing the slice thickness (corrected for the "
"centerline angle at each slice) across the specified superior-inferior region.\n"
"\n"
"To select the region to compute metrics over, choose one of the following arguments:\n"
" 1. '-z': Select axial slices based on slice index.\n"
" 2. '-pmj' + '-pmj-distance' + '-pmj-extent': Select axial slices based on distance from pontomedullary "
"junction.\n"
" (For options 1 and 2, you can also add '-perslice' to compute metrics for each axial slice, rather "
"than averaging.)\n"
" 3. '-vert' + '-vertfile': Select a region based on vertebral labels instead of individual slices.\n"
" (For option 3, you can also add '-perlevel' to compute metrics for each vertebral level, rather "
"than averaging.)"
)
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Mask to compute morphometrics from. Could be binary or weighted. E.g., spinal cord segmentation."
"Example: seg.nii.gz"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-o',
metavar=Metavar.file,
help="Output file name (add extension). Default: csa.csv."
)
optional.add_argument(
'-append',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=0,
help="Append results as a new line in the output csv file instead of overwriting it."
)
optional.add_argument(
'-z',
metavar=Metavar.str,
type=str,
help="Slice range to compute the metrics across. Example: 5:23"
)
optional.add_argument(
'-perslice',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=0,
help="Set to 1 to output one metric per slice instead of a single output metric. Please note that when "
"methods ml or map is used, outputing a single metric per slice and then averaging them all is not the "
"same as outputting a single metric at once across all slices."
)
optional.add_argument(
'-vert',
metavar=Metavar.str,
help="Vertebral levels to compute the metrics across. Example: 2:9 for C2 to T2."
)
optional.add_argument(
'-vertfile',
metavar=Metavar.str,
default='./label/template/PAM50_levels.nii.gz',
help="R|Vertebral labeling file. Only use with flag -vert.\n"
"The input and the vertebral labelling file must in the same voxel coordinate system "
"and must match the dimensions between each other. "
)
optional.add_argument(
'-perlevel',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=0,
help="Set to 1 to output one metric per vertebral level instead of a single output metric. This flag needs "
"to be used with flag -vert."
)
optional.add_argument(
'-r',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=1,
help="Removes temporary folder used for the algorithm at the end of execution."
)
optional.add_argument(
'-angle-corr',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=1,
help="Angle correction for computing morphometric measures. When angle correction is used, the cord within "
"the slice is stretched/expanded by a factor corresponding to the cosine of the angle between the "
"centerline and the axial plane. If the cord is already quasi-orthogonal to the slab, you can set "
"-angle-corr to 0."
)
optional.add_argument(
'-centerline-algo',
choices=['polyfit', 'bspline', 'linear', 'nurbs'],
default='bspline',
help="Algorithm for centerline fitting. Only relevant with -angle-corr 1."
)
optional.add_argument(
'-centerline-smooth',
metavar=Metavar.int,
type=int,
default=30,
help="Degree of smoothing for centerline fitting. Only use with -centerline-algo {bspline, linear}."
)
optional.add_argument(
'-pmj',
metavar=Metavar.file,
help="Ponto-Medullary Junction (PMJ) label file. "
"Example: pmj.nii.gz"
)
optional.add_argument(
'-pmj-distance',
type=float,
metavar=Metavar.float,
help="Distance (mm) from Ponto-Medullary Junction (PMJ) to the center of the mask used to compute morphometric "
"measures. (To be used with flag '-pmj'.)"
)
optional.add_argument(
'-pmj-extent',
type=float,
metavar=Metavar.float,
default=20,
help="Extent (in mm) for the mask used to compute morphometric measures. Each slice covered by the mask is "
"included in the calculation. (To be used with flag '-pmj' and '-pmj-distance'.)"
)
optional.add_argument(
'-qc',
metavar=Metavar.folder,
action=ActionCreateFolder,
help="The path where the quality control generated content will be saved."
" The QC report is only available for PMJ-based CSA (with flag '-pmj')."
)
optional.add_argument(
'-qc-image',
metavar=Metavar.str,
help="Input image to display in QC report. Typically, it would be the "
"source anatomical image used to generate the spinal cord "
"segmentation. This flag is mandatory if using flag '-qc'."
)
optional.add_argument(
'-qc-dataset',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the dataset the process was run on."
)
optional.add_argument(
'-qc-subject',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the subject the process was run on."
)
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode"
)
return parser
def _make_figure(metric, fit_results):
"""
Make a graph showing CSA and angles per slice.
:param metric: Dictionary of metrics
:param fit_results: class centerline.core.FitResults()
:return: image object
"""
import tempfile
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
fname_img = tempfile.NamedTemporaryFile().name + '.png'
z, csa, angle_ap, angle_rl = [], [], [], []
for key, value in metric.items():
z.append(key[0])
csa.append(value['MEAN(area)'])
angle_ap.append(value['MEAN(angle_AP)'])
angle_rl.append(value['MEAN(angle_RL)'])
z_ord = np.argsort(z)
z, csa, angle_ap, angle_rl = (
[np.array(x)[z_ord] for x in (z, csa, angle_ap, angle_rl)]
)
# Make figure
fig = Figure(figsize=(8, 7), tight_layout=True) # 640x700 pix
FigureCanvas(fig)
# If -angle-corr was set to 1, fit_results exists and centerline fitting results are displayed
if fit_results is not None:
ax = fig.add_subplot(311)
ax.plot(z, csa, 'k')
ax.plot(z, csa, 'k.')
ax.grid(True)
ax.set_ylabel('CSA [$mm^2$]')
ax.set_xticklabels([])
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax = fig.add_subplot(312)
ax.grid(True)
ax.plot(z, angle_ap, 'b', label='_nolegend_')
ax.plot(z, angle_ap, 'b.')
ax.plot(z, angle_rl, 'r', label='_nolegend_')
ax.plot(z, angle_rl, 'r.')
ax.legend(['Rotation about AP axis', 'Rotation about RL axis'])
ax.set_ylabel('Angle [$deg$]')
ax.set_xticklabels([])
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax = fig.add_subplot(313)
ax.grid(True)
# find a way to condense the following lines
zmean_list, xmean_list, xfit_list, ymean_list, yfit_list, zref_list = [], [], [], [], [], []
for i, value in enumerate(fit_results.data.zref):
if value in z:
zmean_list.append(fit_results.data.zmean[i])
xmean_list.append(fit_results.data.xmean[i])
xfit_list.append(fit_results.data.xfit[i])
ymean_list.append(fit_results.data.ymean[i])
yfit_list.append(fit_results.data.yfit[i])
zref_list.append(fit_results.data.zref[i])
ax.plot(zmean_list, xmean_list, 'b.', label='_nolegend_')
ax.plot(zref_list, xfit_list, 'b')
ax.plot(zmean_list, ymean_list, 'r.', label='_nolegend_')
ax.plot(zref_list, yfit_list, 'r')
ax.legend(['Fitted (RL)', 'Fitted (AP)'])
ax.set_ylabel('Centerline [$vox$]')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
else:
ax = fig.add_subplot(111)
ax.plot(z, csa, 'k')
ax.plot(z, csa, 'k.')
ax.grid(True)
ax.set_ylabel('CSA [$mm^2$]')
ax.set_xlabel('Slice (Inferior-Superior direction)')
fig.savefig(fname_img)
return fname_img
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
# Initialization
slices = ''
group_funcs = (('MEAN', func_wa), ('STD', func_std)) # functions to perform when aggregating metrics along S-I
fname_segmentation = get_absolute_path(arguments.i)
if arguments.o is not None:
file_out = os.path.abspath(arguments.o)
else:
file_out = ''
if arguments.append is not None:
append = arguments.append
else:
append = 0
if arguments.vert is not None:
vert_levels = arguments.vert
fname_vert_levels = arguments.vertfile
else:
vert_levels = ''
fname_vert_levels = ''
remove_temp_files = arguments.r
if arguments.perlevel is not None:
perlevel = arguments.perlevel
else:
perlevel = None
if arguments.z is not None:
slices = arguments.z
if arguments.perslice is not None:
perslice = arguments.perslice
else:
perslice = None
angle_correction = arguments.angle_corr
param_centerline = ParamCenterline(
algo_fitting=arguments.centerline_algo,
smooth=arguments.centerline_smooth,
minmax=True)
if arguments.pmj is not None:
fname_pmj = get_absolute_path(arguments.pmj)
else:
fname_pmj = None
if arguments.pmj_distance is not None:
distance_pmj = arguments.pmj_distance
else:
distance_pmj = None
extent_mask = arguments.pmj_extent
path_qc = arguments.qc
qc_dataset = arguments.qc_dataset
qc_subject = arguments.qc_subject
mutually_inclusive_args = (fname_pmj, distance_pmj)
is_pmj_none, is_distance_none = [arg is None for arg in mutually_inclusive_args]
if not (is_pmj_none == is_distance_none):
raise parser.error("Both '-pmj' and '-pmj-distance' are required in order to process segmentation from PMJ.")
# update fields
metrics_agg = {}
if not file_out:
file_out = 'csa.csv'
metrics, fit_results = compute_shape(fname_segmentation,
angle_correction=angle_correction,
param_centerline=param_centerline,
verbose=verbose)
if fname_pmj is not None:
im_ctl, mask, slices, centerline = get_slices_for_pmj_distance(fname_segmentation, fname_pmj,
distance_pmj, extent_mask,
param_centerline=param_centerline,
verbose=verbose)
# Save array of the centerline in a .csv file if verbose == 2
if verbose == 2:
fname_ctl_csv, _ = splitext(add_suffix(arguments.i, '_centerline_extrapolated'))
np.savetxt(fname_ctl_csv + '.csv', centerline, delimiter=",")
for key in metrics:
if key == 'length':
# For computing cord length, slice-wise length needs to be summed across slices
metrics_agg[key] = aggregate_per_slice_or_level(metrics[key], slices=parse_num_list(slices),
levels=parse_num_list(vert_levels),
distance_pmj=distance_pmj, perslice=perslice,
perlevel=perlevel, vert_level=fname_vert_levels,
group_funcs=(('SUM', func_sum),))
else:
# For other metrics, we compute the average and standard deviation across slices
metrics_agg[key] = aggregate_per_slice_or_level(metrics[key], slices=parse_num_list(slices),
levels=parse_num_list(vert_levels),
distance_pmj=distance_pmj, perslice=perslice,
perlevel=perlevel, vert_level=fname_vert_levels,
group_funcs=group_funcs)
metrics_agg_merged = merge_dict(metrics_agg)
save_as_csv(metrics_agg_merged, file_out, fname_in=fname_segmentation, append=append)
# QC report (only for PMJ-based CSA)
if path_qc is not None:
if fname_pmj is not None:
if arguments.qc_image is not None:
fname_mask_out = add_suffix(arguments.i, '_mask_csa')
fname_ctl = add_suffix(arguments.i, '_centerline_extrapolated')
fname_ctl_smooth = add_suffix(fname_ctl, '_smooth')
if verbose != 2:
from spinalcordtoolbox.utils.fs import tmp_create
path_tmp = tmp_create()
fname_mask_out = os.path.join(path_tmp, fname_mask_out)
fname_ctl = os.path.join(path_tmp, fname_ctl)
fname_ctl_smooth = os.path.join(path_tmp, fname_ctl_smooth)
# Save mask
mask.save(fname_mask_out)
# Save extrapolated centerline
im_ctl.save(fname_ctl)
# Generated centerline smoothed in RL direction for visualization (and QC report)
sct_maths.main(['-i', fname_ctl, '-smooth', '10,1,1', '-o', fname_ctl_smooth])
generate_qc(fname_in1=get_absolute_path(arguments.qc_image),
# NB: For this QC figure, the centerline has to be first in the list in order for the centerline
# to be properly layered underneath the PMJ + mask. However, Sagittal.get_center_spit
# is called during QC, and it uses `fname_seg[-1]` to center the slices. `fname_mask_out`
# doesn't work for this, so we have to repeat `fname_ctl_smooth` at the end of the list.
fname_seg=[fname_ctl_smooth, fname_pmj, fname_mask_out, fname_ctl_smooth],
args=sys.argv[1:],
path_qc=os.path.abspath(path_qc),
dataset=qc_dataset,
subject=qc_subject,
process='sct_process_segmentation')
else:
raise parser.error('-qc-image is required to display QC report.')
else:
logger.warning('QC report only available for PMJ-based CSA. QC report not generated.')
display_open(file_out)
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
| mit | eb5618c4ffd2b1d6b5a84f8e4dc2780d | 42.891353 | 157 | 0.588633 | 3.863947 | false | false | false | false |
neuropoly/spinalcordtoolbox | spinalcordtoolbox/testing/create_test_data.py | 1 | 13593 | # -*- coding: utf-8
# Collection of functions to create data for testing
import numpy as np
import numpy.matlib
from numpy.polynomial import Polynomial as P
from datetime import datetime
import itertools
from skimage.transform import rotate
from random import uniform
import copy
import nibabel as nib
from spinalcordtoolbox.image import Image, concat_data
from spinalcordtoolbox.resampling import resample_nib
from spinalcordtoolbox.centerline.curve_fitting import bspline, polyfit_1d
# TODO: retrieve os.environ['SCT_DEBUG']
DEBUG = False # Save img_sub
def dummy_blob(size_arr=(9, 9, 9), pixdim=(1, 1, 1), coordvox=None):
"""
Create an image with a non-null voxels at coordinates specified by coordvox.
:param size_arr:
:param pixdim:
:param coordvox: If None: will create a single voxel in the middle of the FOV.
If tuple: (x,y,z): Create single voxel at specified coordinate
If list of tuples: [(x1,y1,z1), (x2,y2,z2)]: Create multiple voxels.
:return: Image object
"""
# nx, ny, nz = size_arr
data = np.zeros(size_arr)
# if not specified, voxel coordinate is set at the middle of the volume
if coordvox is None:
coordvox = tuple([round(i / 2) for i in size_arr])
elif isinstance(coordvox, list):
for icoord in coordvox:
data[icoord] = 1
elif isinstance(coordvox, tuple):
data[coordvox] = 1
else:
ValueError("Wrong type for coordvox")
# Create image with default orientation LPI
affine = np.eye(4)
affine[0:3, 0:3] = affine[0:3, 0:3] * pixdim
nii = nib.nifti1.Nifti1Image(data, affine)
img = Image(data, hdr=nii.header, dim=nii.header.get_data_shape())
return img
def dummy_centerline(size_arr=(9, 9, 9), pixdim=(1, 1, 1), subsampling=1, dilate_ctl=0, hasnan=False, zeroslice=[],
outlier=[], orientation='RPI', debug=False):
"""
Create a dummy Image centerline of small size. Return the full and sub-sampled version along z. Voxel resolution
on fully-sampled data is 1x1x1 mm (so, 2x undersampled data along z would have resolution of 1x1x2 mm).
:param size_arr: tuple: (nx, ny, nz)
:param pixdim: tuple: (px, py, pz)
:param subsampling: int >=1. Subsampling factor along z. 1: no subsampling. 2: centerline defined every other z.
:param dilate_ctl: Dilation of centerline. E.g., if dilate_ctl=1, result will be a square of 3x3 per slice.
if dilate_ctl=0, result will be a single pixel per slice.
:param hasnan: Bool: Image has non-numerical values: nan, inf. In this case, do not subsample.
:param zeroslice: list int: zero all slices listed in this param
:param outlier: list int: replace the current point with an outlier at the corner of the image for the slices listed
:param orientation:
:param debug: Bool: Write temp files
:return:
"""
nx, ny, nz = size_arr
# create regularized curve, within X-Z plane, located at y=ny/4, passing through the following points:
x = np.array([round(nx/4.), round(nx/2.), round(3*nx/4.)])
z = np.array([0, round(nz/2.), nz-1])
# we use bspline (instead of poly) in order to avoid bad extrapolation at edges
# see: https://github.com/spinalcordtoolbox/spinalcordtoolbox/pull/2754
xfit, _ = bspline(z, x, range(nz), 10)
# p = P.fit(z, x, 3)
# p = np.poly1d(np.polyfit(z, x, deg=3))
data = np.zeros((nx, ny, nz))
arr_ctl = np.array([xfit.astype(np.int),
[round(ny / 4.)] * len(range(nz)),
range(nz)], dtype=np.uint16)
# Loop across dilation of centerline. E.g., if dilate_ctl=1, result will be a square of 3x3 per slice.
for ixiy_ctl in itertools.product(range(-dilate_ctl, dilate_ctl+1, 1), range(-dilate_ctl, dilate_ctl+1, 1)):
data[(arr_ctl[0] + ixiy_ctl[0]).tolist(),
(arr_ctl[1] + ixiy_ctl[1]).tolist(),
arr_ctl[2].tolist()] = 1
# Zero specified slices
if zeroslice is not []:
data[:, :, zeroslice] = 0
# Add outlier
if outlier is not []:
# First, zero all the slice
data[:, :, outlier] = 0
# Then, add point in the corner
data[0, 0, outlier] = 1
# Create image with default orientation LPI
affine = np.eye(4)
affine[0:3, 0:3] = affine[0:3, 0:3] * pixdim
nii = nib.nifti1.Nifti1Image(data, affine)
img = Image(data, hdr=nii.header, dim=nii.header.get_data_shape())
# subsample data
img_sub = img.copy()
img_sub.data = np.zeros((nx, ny, nz))
for iz in range(0, nz, subsampling):
img_sub.data[..., iz] = data[..., iz]
# Add non-numerical values at the top corner of the image
if hasnan:
img.data[0, 0, 0] = np.nan
img.data[1, 0, 0] = np.inf
# Update orientation
img.change_orientation(orientation)
img_sub.change_orientation(orientation)
if debug:
img_sub.save('tmp_dummy_seg_'+datetime.now().strftime("%Y%m%d%H%M%S%f")+'.nii.gz')
return img, img_sub, arr_ctl
def dummy_segmentation(size_arr=(256, 256, 256), pixdim=(1, 1, 1), dtype=np.float64, orientation='LPI',
shape='rectangle', angle_RL=0, angle_AP=0, angle_IS=0, radius_RL=5.0, radius_AP=3.0,
degree=2, interleaved=False, zeroslice=[], debug=False):
"""Create a dummy Image with a ellipse or ones running from top to bottom in the 3rd dimension, and rotate the image
to make sure that compute_csa and compute_shape properly estimate the centerline angle.
:param size_arr: tuple: (nx, ny, nz)
:param pixdim: tuple: (px, py, pz)
:param dtype: Numpy dtype.
:param orientation: Orientation of the image. Default: LPI
:param shape: {'rectangle', 'ellipse'}
:param angle_RL: int: angle around RL axis (in deg)
:param angle_AP: int: angle around AP axis (in deg)
:param angle_IS: int: angle around IS axis (in deg)
:param radius_RL: float: 1st radius. With a, b = 50.0, 30.0 (in mm), theoretical CSA of ellipse is 4712.4
:param radius_AP: float: 2nd radius
:param degree: int: degree of polynomial fit
:param interleaved: bool: create a dummy segmentation simulating interleaved acquisition
:param zeroslice: list int: zero all slices listed in this param
:param debug: Write temp files for debug
:return: img: Image object
"""
# Initialization
padding = 15 # Padding size (isotropic) to avoid edge effect during rotation
# Create a 3d array, with dimensions corresponding to x: RL, y: AP, z: IS
nx, ny, nz = [int(size_arr[i] * pixdim[i]) for i in range(3)]
data = np.zeros((nx, ny, nz))
xx, yy = np.mgrid[:nx, :ny]
# Create a dummy segmentation using polynomial function
# create regularized curve, within Y-Z plane (A-P), located at x=nx/2:
x = [round(nx / 2.)] * len(range(nz))
# and passing through the following points:
#y = np.array([round(ny / 4.), round(ny / 2.), round(3 * ny / 4.)]) # oblique curve (changing AP points across SI)
y = [round(ny / 2.), round(ny / 2.), round(ny / 2.)] # straight curve (same location of AP across SI)
z = np.array([0, round(nz / 2.), nz - 1])
# we use poly (instead of bspline) in order to allow change of scalar for each term of polynomial function
p = np.polynomial.Polynomial.fit(z, y, deg=degree)
# create two polynomial fits, by choosing random scalar for each term of both polynomial functions and then
# interleave these two fits (one for odd slices, second one for even slices)
if interleaved:
p_even = copy.copy(p)
p_odd = copy.copy(p)
# choose random scalar for each term of polynomial function
# even slices
p_even.coef = [element * uniform(0.5, 1) for element in p_even.coef]
# odd slices
p_odd.coef = [element * uniform(0.5, 1) for element in p_odd.coef]
# performs two polynomial fits - one will serve for even slices, second one for odd slices
yfit_even = np.round(p_even(range(nz)))
yfit_odd = np.round(p_odd(range(nz)))
# combine even and odd polynomial fits
yfit = np.zeros(nz)
yfit[0:nz:2] = yfit_even[0:nz:2]
yfit[1:nz:2] = yfit_odd[1:nz:2]
# IF INTERLEAVED=FALSE, perform only one polynomial fit without modification of term's scalars
else:
yfit = np.round(p(range(nz))) # has to be rounded for correct float -> int conversion in next step
yfit = yfit.astype(np.int)
# loop across slices and add object
for iz in range(nz):
if shape == 'rectangle': # theoretical CSA: (a*2+1)(b*2+1)
data[:, :, iz] = ((abs(xx - x[iz]) <= radius_RL) & (abs(yy - yfit[iz]) <= radius_AP)) * 1
if shape == 'ellipse':
data[:, :, iz] = (((xx - x[iz]) / radius_RL) ** 2 + ((yy - yfit[iz]) / radius_AP) ** 2 <= 1) * 1
# Pad to avoid edge effect during rotation
data = np.pad(data, padding, 'reflect')
# ROTATION ABOUT IS AXIS
# rotate (in deg), and re-grid using linear interpolation
data_rotIS = rotate(data, angle_IS, resize=False, center=None, order=1, mode='constant', cval=0, clip=False,
preserve_range=False)
# ROTATION ABOUT RL AXIS
# Swap x-z axes (to make a rotation within y-z plane, because rotate will apply rotation on the first 2 dims)
data_rotIS_swap = data_rotIS.swapaxes(0, 2)
# rotate (in deg), and re-grid using linear interpolation
data_rotIS_swap_rotRL = rotate(data_rotIS_swap, angle_RL, resize=False, center=None, order=1, mode='constant',
cval=0, clip=False, preserve_range=False)
# swap back
data_rotIS_rotRL = data_rotIS_swap_rotRL.swapaxes(0, 2)
# ROTATION ABOUT AP AXIS
# Swap y-z axes (to make a rotation within x-z plane)
data_rotIS_rotRL_swap = data_rotIS_rotRL.swapaxes(1, 2)
# rotate (in deg), and re-grid using linear interpolation
data_rotIS_rotRL_swap_rotAP = rotate(data_rotIS_rotRL_swap, angle_AP, resize=False, center=None, order=1,
mode='constant', cval=0, clip=False, preserve_range=False)
# swap back
data_rot = data_rotIS_rotRL_swap_rotAP.swapaxes(1, 2)
# Crop image (to remove padding)
data_rot_crop = data_rot[padding:nx+padding, padding:ny+padding, padding:nz+padding]
# Zero specified slices
if zeroslice is not []:
data_rot_crop[:, :, zeroslice] = 0
# Create nibabel object
xform = np.eye(4)
for i in range(3):
xform[i][i] = 1 # in [mm]
nii = nib.nifti1.Nifti1Image(data_rot_crop.astype('float32'), xform)
# resample to desired resolution
nii_r = resample_nib(nii, new_size=pixdim, new_size_type='mm', interpolation='linear')
# Create Image object. Default orientation is LPI.
# For debugging add .save() at the end of the command below
img = Image(nii_r.get_data(), hdr=nii_r.header, dim=nii_r.header.get_data_shape())
# Update orientation
img.change_orientation(orientation)
if debug:
img.save('tmp_dummy_seg_'+datetime.now().strftime("%Y%m%d%H%M%S%f")+'.nii.gz')
return img
def dummy_segmentation_4d(vol_num=10, create_bvecs=False, size_arr=(256, 256, 256), pixdim=(1, 1, 1), dtype=np.float64,
orientation='LPI', shape='rectangle', angle_RL=0, angle_AP=0, angle_IS=0, radius_RL=5.0,
radius_AP=3.0, degree=2, interleaved=False, zeroslice=[], debug=False):
"""
Create a dummy 4D segmentation (dMRI/fMRI) and dummy bvecs file (optional)
:param vol_num: int: number of volumes in 4D data
:param create_bvecs: bool: create dummy bvecs file (necessary e.g. for sct_dmri_moco)
other parameters are same as in dummy_segmentation function
:return: Image object
"""
img_list = []
# Loop across individual volumes of 4D data
for volume in range(0,vol_num):
# set debug=True in line below for saving individual volumes into individual nii files
img_list.append(dummy_segmentation(size_arr=size_arr, pixdim=pixdim, dtype=dtype, orientation=orientation,
shape=shape, angle_RL=angle_RL, angle_AP=angle_AP, angle_IS=angle_IS,
radius_RL=radius_RL, radius_AP=radius_AP, degree=degree, zeroslice=zeroslice,
interleaved=interleaved, debug=False))
# Concatenate individual 3D images into 4D data
img_4d = concat_data(img_list, 3)
if debug:
out_name = datetime.now().strftime("%Y%m%d%H%M%S%f")
file_4d_data = 'tmp_dummy_4d_' + out_name + '.nii.gz'
img_4d.save(file_4d_data, verbose=0)
# Create a dummy bvecs file (necessary e.g. for sct_dmri_moco)
if create_bvecs:
n_b0 = 1 # number of b0
n_dwi = vol_num-n_b0 # number of dwi
bvecs_dummy = ['', '', '']
bvec_b0 = np.array([[0.0, 0.0, 0.0]] * n_b0)
bvec_dwi = np.array([[uniform(0,1), uniform(0,1), uniform(0,1)]] * n_dwi)
bvec = np.concatenate((bvec_b0,bvec_dwi),axis=0)
# Concatenate bvecs
for i in (0, 1, 2):
bvecs_dummy[i] += ' '.join(str(v) for v in map(lambda n: '%.16f' % n, bvec[:, i]))
bvecs_dummy[i] += ' '
bvecs_concat = '\n'.join(str(v) for v in bvecs_dummy) # transform list into lines of strings
if debug:
new_f = open('tmp_dummy_4d_' + out_name + '.bvec', 'w')
new_f.write(bvecs_concat)
new_f.close()
return img_4d
| mit | 261d5edaed945bd19dd55b3690f7d83b | 47.031802 | 120 | 0.627161 | 3.213475 | false | false | false | false |
neuropoly/spinalcordtoolbox | spinalcordtoolbox/deepseg_gm/deepseg_gm.py | 1 | 10598 | # coding: utf-8
# This is the interface API for the deepseg_gm model
# that implements the model for the Spinal Cord Gray Matter Segmentation.
#
# Reference paper:
# Perone, C. S., Calabrese, E., & Cohen-Adad, J. (2017).
# Spinal cord gray matter segmentation using deep dilated convolutions.
# URL: https://arxiv.org/abs/1710.01269
import warnings
import json
import os
import sys
import io
import nibabel as nib
import numpy as np
# Avoid Keras logging
original_stderr = sys.stderr
if sys.hexversion < 0x03000000:
sys.stderr = io.BytesIO()
else:
sys.stderr = io.TextIOWrapper(io.BytesIO(), sys.stderr.encoding)
try:
from keras import backend as K
except Exception as e:
sys.stderr = original_stderr
raise
else:
sys.stderr = original_stderr
from spinalcordtoolbox import resampling, __data_dir__
from . import model
# Suppress warnings and TensorFlow logging
warnings.simplefilter(action='ignore', category=FutureWarning)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
SMALL_INPUT_SIZE = 200
BATCH_SIZE = 4
def check_backend():
"""This function will check for the current backend and
then it will warn the user if the backend is theano."""
if K.backend() != 'tensorflow':
print("\nWARNING: you're using a Keras backend different than\n"
"Tensorflow, which is not recommended. Please verify\n"
"your configuration file according to: https://keras.io/backend/\n"
"to make sure you're using Tensorflow Keras backend.\n")
return K.backend()
class DataResource(object):
"""This class is responsible for resource file
management (such as loding models)."""
def __init__(self, dirname):
"""Initialize the resource with the directory
name context.
:param dirname: the root directory name.
"""
self.data_root = os.path.join(__data_dir__, dirname)
def get_file_path(self, filename):
"""Get the absolute file path based on the
data root directory.
:param filename: the filename.
"""
return os.path.join(self.data_root, filename)
class CroppedRegion(object):
"""This class holds cropping information about the volume
center crop.
"""
def __init__(self, original_shape, starts, crops):
"""Constructor for the CroppedRegion.
:param original_shape: the original volume shape.
:param starts: crop beginning (x, y).
:param crops: the crops (x, y).
"""
self.originalx = original_shape[0]
self.originaly = original_shape[1]
self.startx = starts[0]
self.starty = starts[1]
self.cropx = crops[0]
self.cropy = crops[1]
def pad(self, image):
"""This method will pad an image using the saved
cropped region.
:param image: the image to pad.
:return: padded image.
"""
bef_x = self.startx
aft_x = self.originalx - (self.startx + self.cropx)
bef_y = self.starty
aft_y = self.originaly - (self.starty + self.cropy)
padded = np.pad(image,
((bef_y, aft_y),
(bef_x, aft_x)),
mode="constant")
return padded
class StandardizationTransform(object):
"""This transformation will standardize the volume
according to the specified mean/std.dev.
"""
def __init__(self, mean, std):
"""Constructor for the normalization transformation.
:param mean: the mean parameter
:param std: the standar deviation parameter
"""
self.mean = mean
self.std = std
def __call__(self, volume):
"""This method will enable the function call for the
class object.
:param volume: the volume to be normalized.
"""
volume -= self.mean
volume /= self.std
return volume
class VolumeStandardizationTransform(object):
"""This transformation will standardize the volume with
the parameters estimated from the volume itself.
"""
def __call__(self, volume):
"""This method will enable the function call for the
class object.
:param volume: the volume to be normalized.
"""
volume_mean = volume.mean()
volume_std = volume.std()
volume -= volume_mean
volume /= volume_std
return volume
def crop_center(img, cropx, cropy):
"""This function will crop the center of the volume image.
:param img: image to be cropped.
:param cropx: x-coord of the crop.
:param cropy: y-coord of the crop.
:return: (cropped image, cropped region)
"""
y, x = img.shape
startx = x // 2 - (cropx // 2)
starty = y // 2 - (cropy // 2)
if startx < 0 or starty < 0:
raise RuntimeError("Negative crop.")
cropped_region = CroppedRegion((x, y), (startx, starty),
(cropx, cropy))
return img[starty:starty + cropy,
startx:startx + cropx], cropped_region
def threshold_predictions(predictions, thr=0.999):
"""This method will threshold predictions.
:param thr: the threshold (if None, no threshold will
be applied).
:return: thresholded predictions
"""
if thr is None:
return predictions[:]
thresholded_preds = predictions[:]
low_values_indices = thresholded_preds <= thr
thresholded_preds[low_values_indices] = 0
low_values_indices = thresholded_preds > thr
thresholded_preds[low_values_indices] = 1
return thresholded_preds
def segment_volume(ninput_volume, model_name,
threshold=0.999, use_tta=False):
"""Segment a nifti volume.
:param ninput_volume: the input volume.
:param model_name: the name of the model to use.
:param threshold: threshold to be applied in predictions.
:param use_tta: whether TTA (test-time augmentation)
should be used or not.
:return: segmented slices.
"""
gmseg_model_challenge = DataResource('deepseg_gm_models')
model_path, metadata_path = model.MODELS[model_name]
metadata_abs_path = gmseg_model_challenge.get_file_path(metadata_path)
with open(metadata_abs_path) as fp:
metadata = json.load(fp)
volume_size = np.array(ninput_volume.shape[0:2])
small_input = (volume_size <= SMALL_INPUT_SIZE).any()
if small_input:
# Smaller than the trained net, don't crop
net_input_size = volume_size
else:
# larger sizer, crop at 200x200
net_input_size = (SMALL_INPUT_SIZE, SMALL_INPUT_SIZE)
deepgmseg_model = model.create_model(metadata['filters'],
net_input_size)
model_abs_path = gmseg_model_challenge.get_file_path(model_path)
deepgmseg_model.load_weights(model_abs_path)
volume_data = ninput_volume.get_data()
axial_slices = []
crops = []
for slice_num in range(volume_data.shape[2]):
data = volume_data[..., slice_num]
if not small_input:
data, cropreg = crop_center(data, SMALL_INPUT_SIZE,
SMALL_INPUT_SIZE)
crops.append(cropreg)
axial_slices.append(data)
axial_slices = np.asarray(axial_slices, dtype=np.float32)
axial_slices = np.expand_dims(axial_slices, axis=3)
normalization = VolumeStandardizationTransform()
axial_slices = normalization(axial_slices)
if use_tta:
pred_sampled = []
for i in range(8):
sampled_value = np.random.uniform(high=2.0)
sampled_axial_slices = axial_slices + sampled_value
preds = deepgmseg_model.predict(sampled_axial_slices,
batch_size=BATCH_SIZE,
verbose=True)
pred_sampled.append(preds)
preds = deepgmseg_model.predict(axial_slices, batch_size=BATCH_SIZE,
verbose=True)
pred_sampled.append(preds)
pred_sampled = np.asarray(pred_sampled)
pred_sampled = np.mean(pred_sampled, axis=0)
preds = threshold_predictions(pred_sampled, threshold)
else:
preds = deepgmseg_model.predict(axial_slices, batch_size=BATCH_SIZE,
verbose=True)
preds = threshold_predictions(preds, threshold)
pred_slices = []
# Un-cropping
for slice_num in range(preds.shape[0]):
pred_slice = preds[slice_num][..., 0]
if not small_input:
pred_slice = crops[slice_num].pad(pred_slice)
pred_slices.append(pred_slice)
pred_slices = np.asarray(pred_slices, dtype=np.uint8)
pred_slices = np.transpose(pred_slices, (1, 2, 0))
return pred_slices
def segment_file(input_filename, output_filename,
model_name, threshold, verbosity,
use_tta):
"""Segment a volume file.
:param input_filename: the input filename.
:param output_filename: the output filename.
:param model_name: the name of model to use.
:param threshold: threshold to apply in predictions (if None,
no threshold will be applied)
:param verbosity: the verbosity level.
:param use_tta: whether it should use TTA (test-time augmentation)
or not.
:return: the output filename.
"""
nii_original = nib.load(input_filename)
pixdim = nii_original.header["pixdim"][3]
target_resample = [0.25, 0.25, pixdim]
nii_resampled = resampling.resample_nib(
nii_original, new_size=target_resample, new_size_type='mm', interpolation='linear')
pred_slices = segment_volume(nii_resampled, model_name, threshold,
use_tta)
original_res = [
nii_original.header["pixdim"][1],
nii_original.header["pixdim"][2],
nii_original.header["pixdim"][3]]
volume_affine = nii_resampled.affine
volume_header = nii_resampled.header
nii_segmentation = nib.Nifti1Image(pred_slices, volume_affine,
volume_header)
nii_resampled_original = resampling.resample_nib(
nii_segmentation, new_size=original_res, new_size_type='mm', interpolation='linear')
res_data = nii_resampled_original.get_data()
# Threshold after resampling, only if specified
if threshold is not None:
res_data = threshold_predictions(res_data, 0.5)
nib.save(nii_resampled_original, output_filename)
return output_filename
| mit | 0555bb6784481136306cf9b184d87e47 | 30.825826 | 92 | 0.618041 | 3.810859 | false | false | false | false |
neuropoly/spinalcordtoolbox | spinalcordtoolbox/scripts/sct_compute_hausdorff_distance.py | 1 | 25171 | #!/usr/bin/env python
#
# Thinning with the Zhang-Suen algorithm (1984) --> code taken from https://github.com/linbojin/Skeletonization-by-Zhang-Suen-Thinning-Algorithm
# Computation of the distances between two skeleton
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Sara Dupont
# CREATED: 2015-07-15
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import sys
import os
import numpy as np
from spinalcordtoolbox.image import Image, add_suffix, empty_like, change_orientation
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar
from spinalcordtoolbox.utils.sys import init_sct, run_proc, printv, set_loglevel
from spinalcordtoolbox.utils.fs import tmp_create, copy, extract_fname
from spinalcordtoolbox.math import binarize
# TODO: display results ==> not only max : with a violin plot of h1 and h2 distribution ? see dev/straightening --> seaborn.violinplot
# TODO: add the option Hyberbolic Hausdorff's distance : see choi and seidel paper
# ----------------------------------------------------------------------------------------------------------------------
# PARAM ----------------------------------------------------------------------------------------------------------------
class Param:
def __init__(self):
self.debug = 0
self.thinning = True
self.verbose = 1
# ----------------------------------------------------------------------------------------------------------------------
# THINNING -------------------------------------------------------------------------------------------------------------
class Thinning:
def __init__(self, im, v=1):
printv('Thinning ... ', v, 'normal')
self.image = im
self.image.data = bin_data(self.image.data)
self.dim_im = len(self.image.data.shape)
if self.dim_im == 2:
self.thinned_image = empty_like(self.image)
self.thinned_image.data = self.zhang_suen(self.image.data)
self.thinned_image.absolutepath = add_suffix(self.image.absolutepath, "_thinned")
elif self.dim_im == 3:
if not self.image.orientation == 'IRP':
printv('-- changing orientation ...')
self.image.change_orientation('IRP')
thinned_data = np.asarray([self.zhang_suen(im_slice) for im_slice in self.image.data])
self.thinned_image = empty_like(self.image)
self.thinned_image.data = thinned_data
self.thinned_image.absolutepath = add_suffix(self.image.absolutepath, "_thinned")
# ------------------------------------------------------------------------------------------------------------------
def get_neighbours(self, x, y, image):
"""
Return 8-neighbours of image point P1(x,y), in a clockwise order
code from https://github.com/linbojin/Skeletonization-by-Zhang-Suen-Thinning-Algorithm
:param x:
:param y:
:param image:
:return:
"""
# now = time.time()
x_1, y_1, x1, y1 = x - 1, y - 1, x + 1, y + 1
neighbours = [image[x_1][y], image[x_1][y1], image[x][y1], image[x1][y1], # P2,P3,P4,P5
image[x1][y], image[x1][y_1], image[x][y_1], image[x_1][y_1]] # P6,P7,P8,P9
# t = time.time() - now
# printv('t neighbours: ', t)
return neighbours
# ------------------------------------------------------------------------------------------------------------------
def transitions(self, neighbours):
"""
No. of 0,1 patterns (transitions from 0 to 1) in the ordered sequence
code from https://github.com/linbojin/Skeletonization-by-Zhang-Suen-Thinning-Algorithm
:param neighbours:
:return:
"""
# now = time.time()
n = neighbours + neighbours[0:1] # P2, P3, ... , P8, P9, P2
s = np.sum((n1, n2) == (0, 1) for n1, n2 in zip(n, n[1:])) # (P2,P3), (P3,P4), ... , (P8,P9), (P9,P2)
# t = time.time() - now
# printv('t transitions sum: ', t)
return s
# ------------------------------------------------------------------------------------------------------------------
def zhang_suen(self, image):
"""
the Zhang-Suen Thinning Algorithm
code from https://github.com/linbojin/Skeletonization-by-Zhang-Suen-Thinning-Algorithm
:param image:
:return:
"""
# now = time.time()
image_thinned = image.copy() # deepcopy to protect the original image
changing1 = changing2 = 1 # the points to be removed (set as 0)
while changing1 or changing2: # iterates until no further changes occur in the image
# Step 1
changing1 = []
max = len(image_thinned) - 1
pass_list = [1, max]
# rows, columns = image_thinned.shape # x for rows, y for columns
# for x in range(1, rows - 1): # No. of rows
# for y in range(1, columns - 1): # No. of columns
for x, y in non_zero_coord(image_thinned):
if x not in pass_list and y not in pass_list:
# if image_thinned[x][y] == 1: # Condition 0: Point P1 in the object regions
P2, P3, P4, P5, P6, P7, P8, P9 = n = self.get_neighbours(x, y, image_thinned)
if (2 <= sum(n) <= 6 and # Condition 1: 2<= N(P1) <= 6
P2 * P4 * P6 == 0 and # Condition 3
P4 * P6 * P8 == 0 and # Condition 4
self.transitions(n) == 1): # Condition 2: S(P1)=1
changing1.append((x, y))
for x, y in changing1:
image_thinned[x][y] = 0
# Step 2
changing2 = []
# for x in range(1, rows - 1):
# for y in range(1, columns - 1):
for x, y in non_zero_coord(image_thinned):
if x not in pass_list and y not in pass_list:
# if image_thinned[x][y] == 1: # Condition 0
P2, P3, P4, P5, P6, P7, P8, P9 = n = self.get_neighbours(x, y, image_thinned)
if (2 <= sum(n) <= 6 and # Condition 1
P2 * P4 * P8 == 0 and # Condition 3
P2 * P6 * P8 == 0 and # Condition 4
self.transitions(n) == 1): # Condition 2
changing2.append((x, y))
for x, y in changing2:
image_thinned[x][y] = 0
# t = time.time() - now
# printv('t thinning: ', t)
return image_thinned
# ----------------------------------------------------------------------------------------------------------------------
# HAUSDORFF'S DISTANCE -------------------------------------------------------------------------------------------------
class HausdorffDistance:
def __init__(self, data1, data2, v=1):
"""
the hausdorff distance between two sets is the maximum of the distances from a point in any of the sets to the nearest point in the other set
:return:
"""
# now = time.time()
printv('Computing 2D Hausdorff\'s distance ... ', v, 'normal')
self.data1 = bin_data(data1)
self.data2 = bin_data(data2)
self.min_distances_1 = self.relative_hausdorff_dist(self.data1, self.data2, v)
self.min_distances_2 = self.relative_hausdorff_dist(self.data2, self.data1, v)
# relatives hausdorff's distances in pixel
self.h1 = np.max(self.min_distances_1)
self.h2 = np.max(self.min_distances_2)
# Hausdorff's distance in pixel
self.H = max(self.h1, self.h2)
# t = time.time() - now
# printv('Hausdorff dist time :', t)
# ------------------------------------------------------------------------------------------------------------------
def relative_hausdorff_dist(self, dat1, dat2, v=1):
h = np.zeros(dat1.shape)
nz_coord_1 = non_zero_coord(dat1)
nz_coord_2 = non_zero_coord(dat2)
if len(nz_coord_1) != 0 and len(nz_coord_2) != 0:
for x1, y1 in nz_coord_1:
# for x1 in range(dat1.shape[0]):
# for y1 in range(dat1.shape[1]):
# if dat1[x1, y1] == 1:
d_p1_dat2 = []
p1 = np.asarray([x1, y1])
for x2, y2 in nz_coord_2:
# for x2 in range(dat2.shape[0]):
# for y2 in range(dat2.shape[1]):
# if dat2[x2, y2] == 1:
p2 = np.asarray([x2, y2])
d_p1_dat2.append(np.linalg.norm(p1 - p2)) # Euclidean distance between p1 and p2
h[x1, y1] = min(d_p1_dat2)
else:
printv('Warning: an image is empty', v, 'warning')
return h
# ----------------------------------------------------------------------------------------------------------------------
# COMPUTE DISTANCES ----------------------------------------------------------------------------------------------------
class ComputeDistances:
def __init__(self, im1, im2=None, param=None):
self.im1 = im1
self.im2 = im2
self.dim_im = len(self.im1.data.shape)
self.dim_pix = 0
self.distances = None
self.res = ''
self.param = param
self.dist1_distribution = None
self.dist2_distribution = None
if self.dim_im == 3:
self.orientation1 = self.im1.orientation
if self.orientation1 != 'IRP':
self.im1.change_orientation('IRP')
self.im1.save(path=add_suffix(self.im1.absolutepath, "_irp"), mutable=True)
if self.im2 is not None:
self.orientation2 = self.im2.orientation
if self.orientation2 != 'IRP':
self.im2.change_orientation('IRP')
self.im2.save(path=add_suffix(self.im2.absolutepath, "_irp"), mutable=True)
if self.param.thinning:
self.thinning1 = Thinning(self.im1, self.param.verbose)
self.thinning1.thinned_image.save()
if self.im2 is not None:
self.thinning2 = Thinning(self.im2, self.param.verbose)
self.thinning2.thinned_image.save()
if self.dim_im == 2 and self.im2 is not None:
self.compute_dist_2im_2d()
if self.dim_im == 3:
if self.im2 is None:
self.compute_dist_1im_3d()
else:
self.compute_dist_2im_3d()
if self.dim_im == 2 and self.distances is not None:
self.dist1_distribution = self.distances.min_distances_1[np.nonzero(self.distances.min_distances_1)]
self.dist2_distribution = self.distances.min_distances_2[np.nonzero(self.distances.min_distances_2)]
if self.dim_im == 3:
self.dist1_distribution = []
self.dist2_distribution = []
for d in self.distances:
if np.nonzero(d.min_distances_1)[0].size: # Exist non zero values
self.dist1_distribution.append(d.min_distances_1[np.nonzero(d.min_distances_1)])
else: # all values are zero
self.dist1_distribution.append(0)
if np.nonzero(d.min_distances_2)[0].size: # Exist non zero values
self.dist2_distribution.append(d.min_distances_2[np.nonzero(d.min_distances_2)])
else: # all values are zero
self.dist2_distribution.append(0)
self.res = 'Hausdorff\'s distance - First relative Hausdorff\'s distance median - Second relative Hausdorff\'s distance median(all in mm)\n'
for i, d in enumerate(self.distances):
med1 = np.median(self.dist1_distribution[i])
med2 = np.median(self.dist2_distribution[i])
if self.im2 is None:
self.res += 'Slice ' + str(i) + ' - slice ' + str(i + 1) + ': ' + str(d.H * self.dim_pix) + ' - ' + str(med1 * self.dim_pix) + ' - ' + str(med2 * self.dim_pix) + ' \n'
else:
self.res += 'Slice ' + str(i) + ': ' + str(d.H * self.dim_pix) + ' - ' + str(med1 * self.dim_pix) + ' - ' + str(med2 * self.dim_pix) + ' \n'
printv('-----------------------------------------------------------------------------\n' +
self.res, self.param.verbose, 'normal')
if self.param.verbose == 2:
self.show_results()
# ------------------------------------------------------------------------------------------------------------------
def compute_dist_2im_2d(self):
nx1, ny1, nz1, nt1, px1, py1, pz1, pt1 = self.im1.dim
nx2, ny2, nz2, nt2, px2, py2, pz2, pt2 = self.im2.dim
assert np.isclose(px1, px2) and np.isclose(py1, py2) and np.isclose(px1, py1)
self.dim_pix = py1
if self.param.thinning:
dat1 = self.thinning1.thinned_image.data
dat2 = self.thinning2.thinned_image.data
else:
dat1 = bin_data(self.im1.data)
dat2 = bin_data(self.im2.data)
self.distances = HausdorffDistance(dat1, dat2, self.param.verbose)
self.res = 'Hausdorff\'s distance : ' + str(self.distances.H * self.dim_pix) + ' mm\n\n' \
'First relative Hausdorff\'s distance : ' + str(self.distances.h1 * self.dim_pix) + ' mm\n' \
'Second relative Hausdorff\'s distance : ' + str(self.distances.h2 * self.dim_pix) + ' mm'
# ------------------------------------------------------------------------------------------------------------------
def compute_dist_1im_3d(self):
nx1, ny1, nz1, nt1, px1, py1, pz1, pt1 = self.im1.dim
self.dim_pix = py1
if self.param.thinning:
dat1 = self.thinning1.thinned_image.data
else:
dat1 = bin_data(self.im1.data)
self.distances = []
for i, dat_slice in enumerate(dat1[:-1]):
self.distances.append(HausdorffDistance(bin_data(dat_slice), bin_data(dat1[i + 1]), self.param.verbose))
# ------------------------------------------------------------------------------------------------------------------
def compute_dist_2im_3d(self):
nx1, ny1, nz1, nt1, px1, py1, pz1, pt1 = self.im1.dim
nx2, ny2, nz2, nt2, px2, py2, pz2, pt2 = self.im2.dim
# assert np.round(pz1, 5) == np.round(pz2, 5) and np.round(py1, 5) == np.round(py2, 5)
assert nx1 == nx2
self.dim_pix = py1
if self.param.thinning:
dat1 = self.thinning1.thinned_image.data
dat2 = self.thinning2.thinned_image.data
else:
dat1 = bin_data(self.im1.data)
dat2 = bin_data(self.im2.data)
self.distances = []
for slice1, slice2 in zip(dat1, dat2):
self.distances.append(HausdorffDistance(slice1, slice2, self.param.verbose))
# ------------------------------------------------------------------------------------------------------------------
def show_results(self):
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
plt.hold(True)
sns.set(style="whitegrid", palette="pastel", color_codes=True)
plt.figure(figsize=(35, 20))
data_dist = {"distances": [], "image": [], "slice": []}
if self.dim_im == 2:
data_dist["distances"].append([dist * self.dim_pix for dist in self.dist1_distribution])
data_dist["image"].append(len(self.dist1_distribution) * [1])
data_dist["slice"].append(len(self.dist1_distribution) * [0])
data_dist["distances"].append([dist * self.dim_pix for dist in self.dist2_distribution])
data_dist["image"].append(len(self.dist2_distribution) * [2])
data_dist["slice"].append(len(self.dist2_distribution) * [0])
if self.dim_im == 3:
for i in range(len(self.distances)):
data_dist["distances"].append([dist * self.dim_pix for dist in self.dist1_distribution[i]])
data_dist["image"].append(len(self.dist1_distribution[i]) * [1])
data_dist["slice"].append(len(self.dist1_distribution[i]) * [i])
data_dist["distances"].append([dist * self.dim_pix for dist in self.dist2_distribution[i]])
data_dist["image"].append(len(self.dist2_distribution[i]) * [2])
data_dist["slice"].append(len(self.dist2_distribution[i]) * [i])
for k in data_dist.keys(): # flatten the lists in data_dist
data_dist[k] = [item for sublist in data_dist[k] for item in sublist]
data_dist = pd.DataFrame(data_dist)
sns.violinplot(x="slice", y="distances", hue="image", data=data_dist, split=True, inner="point", cut=0)
plt.savefig('violin_plot.png')
# plt.show()
# ----------------------------------------------------------------------------------------------------------------------
def bin_data(data):
return np.asarray((data > 0).astype(int))
# ----------------------------------------------------------------------------------------------------------------------
def resample_image(fname, suffix='_resampled.nii.gz', binary=False, npx=0.3, npy=0.3, thr=0.0, interpolation='spline'):
"""
Resampling function: add a padding, resample, crop the padding
:param fname: name of the image file to be resampled
:param suffix: suffix added to the original fname after resampling
:param binary: boolean, image is binary or not
:param npx: new pixel size in the x direction
:param npy: new pixel size in the y direction
:param thr: if the image is binary, it will be thresholded at thr (default=0) after the resampling
:param interpolation: type of interpolation used for the resampling
:return: file name after resampling (or original fname if it was already in the correct resolution)
"""
im_in = Image(fname)
orientation = im_in.orientation
if orientation != 'RPI':
im_in.change_orientation('RPI')
fname = add_suffix(im_in.absolutepath, "_rpi")
im_in.save(path=fname, mutable=True)
nx, ny, nz, nt, px, py, pz, pt = im_in.dim
if np.round(px, 2) != np.round(npx, 2) or np.round(py, 2) != np.round(npy, 2):
name_resample = extract_fname(fname)[1] + suffix
if binary:
interpolation = 'nn'
if nz == 1:
# when data is 2d: we convert it to a 3d image in order to avoid conversion problem with 2d data
# TODO: check if this above problem is still present (now that we are using nibabel instead of nipy)
run_proc(['sct_image', '-i', ','.join([fname, fname]), '-concat', 'z', '-o', fname])
run_proc(['sct_resample', '-i', fname, '-mm', str(npx) + 'x' + str(npy) + 'x' + str(pz), '-o', name_resample, '-x', interpolation])
if nz == 1: # when input data was 2d: re-convert data 3d-->2d
run_proc(['sct_image', '-i', name_resample, '-split', 'z'])
im_split = Image(name_resample.split('.nii.gz')[0] + '_Z0000.nii.gz')
im_split.save(name_resample)
if binary:
img = Image(name_resample)
img.data = binarize(img.data, thr)
img.save()
if orientation != 'RPI':
img = Image(name_resample)
img.change_orientation(orientation)
name_resample = add_suffix(img.absolutepath, "_{}".format(orientation.lower()))
img.save(path=name_resample, mutable=True)
return name_resample
else:
if orientation != 'RPI':
fname = add_suffix(fname, "_RPI")
im_in = change_orientation(im_in, orientation).save(fname)
printv('Image resolution already ' + str(npx) + 'x' + str(npy) + 'xpz')
return fname
# ----------------------------------------------------------------------------------------------------------------------
def non_zero_coord(data):
dim = len(data.shape)
if dim == 3:
X, Y, Z = (data > 0).nonzero()
list_coordinates = [(X[i], Y[i], Z[i]) for i in range(0, len(X))]
elif dim == 2:
X, Y = (data > 0).nonzero()
list_coordinates = [(X[i], Y[i]) for i in range(0, len(X))]
return list_coordinates
def get_parser():
parser = SCTArgumentParser(
description='Compute the Hausdorff\'s distance between two binary images which can be thinned (ie skeletonized).'
' If only one image is inputted, it will be only thinned'
)
mandatoryArguments = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatoryArguments.add_argument(
"-i",
required=True,
help='First Image on which you want to find the skeleton Example: t2star_manual_gmseg.nii.gz',
metavar=Metavar.file,
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="show this help message and exit")
optional.add_argument(
"-d",
help='Second Image on which you want to find the skeleton Example: t2star_manual_gmseg.nii.gz',
metavar=Metavar.file,
required=False,
default=None)
optional.add_argument(
"-thinning",
type=int,
help="Thinning : find the skeleton of the binary images using the Zhang-Suen algorithm (1984) and use it to "
"compute the hausdorff's distance",
required=False,
default=1,
choices=(0, 1))
optional.add_argument(
"-resampling",
type=float,
help="pixel size in mm to resample to Example: 0.5",
metavar=Metavar.float,
required=False,
default=0.1)
optional.add_argument(
"-o",
help='Name of the output file Example: my_hausdorff_dist.txt',
metavar=Metavar.str,
required=False,
default='hausdorff_distance.txt')
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode")
return parser
def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
param = Param()
if param.debug:
printv('\n*** WARNING: DEBUG MODE ON ***\n')
else:
input_fname = arguments.i
input_second_fname = ''
output_fname = 'hausdorff_distance.txt'
resample_to = 0.1
if arguments.d is not None:
input_second_fname = arguments.d
if arguments.thinning is not None:
param.thinning = bool(arguments.thinning)
if arguments.resampling is not None:
resample_to = arguments.resampling
if arguments.o is not None:
output_fname = arguments.o
param.verbose = verbose
tmp_dir = tmp_create()
im1_name = "im1.nii.gz"
copy(input_fname, os.path.join(tmp_dir, im1_name))
if input_second_fname != '':
im2_name = 'im2.nii.gz'
copy(input_second_fname, os.path.join(tmp_dir, im2_name))
else:
im2_name = None
curdir = os.getcwd()
os.chdir(tmp_dir)
# now = time.time()
input_im1 = Image(resample_image(im1_name, binary=True, thr=0.5, npx=resample_to, npy=resample_to))
input_im1.absolutepath = os.path.basename(input_fname)
if im2_name is not None:
input_im2 = Image(resample_image(im2_name, binary=True, thr=0.5, npx=resample_to, npy=resample_to))
input_im2.absolutepath = os.path.basename(input_second_fname)
else:
input_im2 = None
computation = ComputeDistances(input_im1, im2=input_im2, param=param)
# TODO change back the orientatin of the thinned image
if param.thinning:
computation.thinning1.thinned_image.save(
os.path.join(curdir, add_suffix(os.path.basename(input_fname), '_thinned')))
if im2_name is not None:
computation.thinning2.thinned_image.save(
os.path.join(curdir, add_suffix(os.path.basename(input_second_fname), '_thinned')))
os.chdir(curdir)
res_fic = open(output_fname, 'w')
res_fic.write(computation.res)
res_fic.write('\n\nInput 1: ' + input_fname)
res_fic.write('\nInput 2: ' + input_second_fname)
res_fic.close()
# printv('Total time: ', time.time() - now)
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
| mit | 100383ded5a95c9131f0b596e8b9550f | 43.237258 | 191 | 0.512415 | 3.620685 | false | false | false | false |
reviewboard/reviewboard | reviewboard/reviews/ui/text.py | 1 | 13698 | import logging
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes
from django.utils.safestring import mark_safe
from djblets.cache.backend import cache_memoize
from pygments import highlight
from pygments.lexers import (ClassNotFound, guess_lexer_for_filename,
TextLexer)
from reviewboard.attachments.mimetypes import TextMimetype
from reviewboard.attachments.models import FileAttachment
from reviewboard.diffviewer.chunk_generator import (NoWrapperHtmlFormatter,
RawDiffChunkGenerator)
from reviewboard.diffviewer.diffutils import get_chunks_in_range
from reviewboard.reviews.ui.base import FileAttachmentReviewUI
logger = logging.getLogger(__name__)
class TextBasedReviewUI(FileAttachmentReviewUI):
"""A Review UI for text-based files.
This renders the text file, applying syntax highlighting, and allows users
to comment on one or more lines.
"""
name = 'Text'
object_key = 'text'
supported_mimetypes = TextMimetype.supported_mimetypes
template_name = 'reviews/ui/text.html'
comment_thumbnail_template_name = 'reviews/ui/text_comment_thumbnail.html'
can_render_text = False
supports_diffing = True
source_chunk_generator_cls = RawDiffChunkGenerator
rendered_chunk_generator_cls = RawDiffChunkGenerator
extra_css_classes = []
js_model_class = 'RB.TextBasedReviewable'
js_view_class = 'RB.TextBasedReviewableView'
def get_js_model_data(self):
data = super(TextBasedReviewUI, self).get_js_model_data()
data['hasRenderedView'] = self.can_render_text
if self.can_render_text:
data['viewMode'] = 'rendered'
else:
data['viewMode'] = 'source'
return data
def get_extra_context(self, request):
context = {}
diff_type_mismatch = False
if self.diff_against_obj:
diff_against_review_ui = self.diff_against_obj.review_ui
context.update({
'diff_caption': self.diff_against_obj.caption,
'diff_filename': self.diff_against_obj.filename,
'diff_revision': self.diff_against_obj.attachment_revision,
})
if type(self) != type(diff_against_review_ui):
diff_type_mismatch = True
else:
chunk_generator = self._get_source_diff_chunk_generator()
context['source_chunks'] = chunk_generator.get_chunks()
chunk_generator = self._get_rendered_diff_chunk_generator()
context['rendered_chunks'] = chunk_generator.get_chunks()
else:
file_line_list = [
mark_safe(line)
for line in self.get_text_lines()
]
rendered_line_list = [
mark_safe(line)
for line in self.get_rendered_lines()
]
context.update({
'text_lines': file_line_list,
'rendered_lines': rendered_line_list,
})
if self.obj.attachment_history is not None:
num_revisions = FileAttachment.objects.filter(
attachment_history=self.obj.attachment_history).count()
else:
num_revisions = 1
context.update({
'filename': self.obj.filename,
'revision': self.obj.attachment_revision,
'is_diff': self.diff_against_obj is not None,
'num_revisions': num_revisions,
'diff_type_mismatch': diff_type_mismatch,
})
return context
def get_text(self):
"""Return the file contents as a string.
This will fetch the file and then cache it for future renders.
"""
return cache_memoize('text-attachment-%d-string' % self.obj.pk,
self._get_text_uncached)
def get_text_lines(self):
"""Return the file contents as syntax-highlighted lines.
This will fetch the file, render it however appropriate for the review
UI, and split it into reviewable lines. It will then cache it for
future renders.
"""
return cache_memoize('text-attachment-%d-lines' % self.obj.pk,
lambda: list(self.generate_highlighted_text()))
def get_rendered_lines(self):
"""Returns the file contents as a render, based on the raw text.
If a subclass sets ``can_render_text = True`` and implements
``generate_render``, then this will render the contents in some
specialized form, cache it as a list of lines, and return it.
"""
if self.can_render_text:
return cache_memoize(
'text-attachment-%d-rendered' % self.obj.pk,
lambda: list(self.generate_render()))
else:
return []
def _get_text_uncached(self):
"""Return the text from the file."""
self.obj.file.open()
with self.obj.file as f:
data = f.read()
return data
def generate_highlighted_text(self):
"""Generates syntax-highlighted text for the file.
This will render the text file to HTML, applying any syntax
highlighting that's appropriate. The contents will be split into
reviewable lines and will be cached for future renders.
"""
data = self.get_text()
lexer = self.get_source_lexer(self.obj.filename, data)
lines = highlight(data, lexer, NoWrapperHtmlFormatter()).splitlines()
return [
'<pre>%s</pre>' % line
for line in lines
]
def get_source_lexer(self, filename, data):
"""Returns the lexer that should be used for the text.
By default, this will attempt to guess the lexer based on the
filename, falling back to a plain-text lexer.
Subclasses can override this to choose a more specific lexer.
"""
try:
return guess_lexer_for_filename(filename, data)
except ClassNotFound:
return TextLexer()
def generate_render(self):
"""Generates a render of the text.
By default, this won't do anything. Subclasses should override it
to turn the raw text into some form of rendered content. For
example, rendering Markdown.
"""
raise NotImplementedError
def serialize_comments(self, comments):
"""Return a dictionary of the comments for this file attachment."""
result = {}
for comment in comments:
try:
key = '%s-%s' % (comment.extra_data['beginLineNum'],
comment.extra_data['endLineNum'])
except KeyError:
# It's possible this comment was made before the review UI
# was provided, meaning it has no data. If this is the case,
# ignore this particular comment, since it doesn't have a
# line region.
continue
result.setdefault(key, []).append(self.serialize_comment(comment))
return result
def get_comment_thumbnail(self, comment):
"""Generates and returns a thumbnail representing this comment.
This will find the appropriate lines the comment applies to and
return it as HTML suited for rendering in reviews.
"""
try:
begin_line_num = int(comment.extra_data['beginLineNum'])
end_line_num = int(comment.extra_data['endLineNum'])
view_mode = comment.extra_data['viewMode']
except (KeyError, ValueError):
# This may be a comment from before we had review UIs. Or,
# corrupted data. Either way, don't display anything.
return None
return cache_memoize(
'text-review-ui-comment-thumbnail-%s-%s' % (self.obj.pk,
comment.pk),
lambda: self.render_comment_thumbnail(comment, begin_line_num,
end_line_num, view_mode))
def render_comment_thumbnail(self, comment, begin_line_num, end_line_num,
view_mode):
"""Renders the content of a comment thumbnail.
This will, by default, call render() and then pull out the lines
that were commented on.
Subclasses can override to do more specialized thumbnail rendering.
"""
if view_mode not in ('source', 'rendered'):
logger.warning('Unexpected view mode "%s" when rendering '
'comment thumbnail.',
view_mode)
return ''
context = {
'is_diff': self.diff_against_obj is not None,
'review_ui': self,
'revision': self.obj.attachment_revision,
}
if self.diff_against_obj:
if view_mode == 'source':
chunk_generator = self._get_source_diff_chunk_generator()
elif view_mode == 'rendered':
chunk_generator = self._get_rendered_diff_chunk_generator()
chunks = get_chunks_in_range(chunk_generator.get_chunks(),
begin_line_num,
end_line_num - begin_line_num + 1)
context.update({
'chunks': chunks,
'diff_revision': self.diff_against_obj.attachment_revision,
})
else:
try:
if view_mode == 'source':
lines = self.get_text_lines()
elif view_mode == 'rendered':
lines = self.get_rendered_lines()
except Exception as e:
logger.error('Unable to generate text attachment comment '
'thumbnail for comment %s: %s',
comment, e)
return ''
# Grab only the lines we care about.
#
# The line numbers are stored 1-indexed, so normalize to 0.
lines = lines[begin_line_num - 1:end_line_num]
context['lines'] = [
{
'line_num': begin_line_num + i,
'text': mark_safe(line),
}
for i, line in enumerate(lines)
]
return render_to_string(
template_name=self.comment_thumbnail_template_name,
context=context)
def get_comment_link_url(self, comment):
"""Returns the URL to the file and line commented on.
This will link to the correct file, view mode, and line for the
given comment.
"""
base_url = super(TextBasedReviewUI, self).get_comment_link_url(comment)
try:
begin_line_num = int(comment.extra_data['beginLineNum'])
view_mode = comment.extra_data['viewMode']
except (KeyError, ValueError):
# This may be a comment from before we had review UIs. Or,
# corrupted data. Either way, just return the default.
return base_url
return '%s#%s/line%s' % (base_url, view_mode, begin_line_num)
def _get_diff_chunk_generator(self, chunk_generator_cls, orig, modified):
"""Return a chunk generator showing a diff for the text.
The chunk generator will diff the text of this attachment against
the text of the attachment being diffed against.
This is used both for displaying the file attachment and
rendering the thumbnail.
Args:
chunk_generator_cls (type):
The chunk generator to instantiate. This should be a subclass
of :py:class:`~reviewboard.diffviewer.chunk_generator
.RawDiffChunkGenerator`.
orig (bytes or list of bytes):
The original file content to diff against.
modified (bytes or list of bytes):
The new file content.
Returns:
reviewboard.diffviewer.chunk_generator.RawDiffChunkGenerator:
The chunk generator used to diff source or rendered text.
"""
assert self.diff_against_obj
return chunk_generator_cls(
old=orig,
new=modified,
orig_filename=self.obj.filename,
modified_filename=self.diff_against_obj.filename)
def _get_source_diff_chunk_generator(self):
"""Return a chunk generator for diffing source text.
Returns:
reviewboard.diffviewer.chunk_generator.RawDiffChunkGenerator:
The chunk generator used to diff source text.
"""
return self._get_diff_chunk_generator(
self.source_chunk_generator_cls,
force_bytes(self.diff_against_obj.review_ui.get_text()),
force_bytes(self.get_text()))
def _get_rendered_diff_chunk_generator(self):
"""Return a chunk generator for diffing rendered text.
Returns:
reviewboard.diffviewer.chunk_generator.RawDiffChunkGenerator:
The chunk generator used to diff rendered text.
"""
diff_against_review_ui = self.diff_against_obj.review_ui
return self._get_diff_chunk_generator(
self.rendered_chunk_generator_cls,
[
force_bytes(line)
for line in diff_against_review_ui.get_rendered_lines()
],
[
force_bytes(line)
for line in self.get_rendered_lines()
]
)
| mit | cd5845a887e21c8ad7b2580fbe95a2ad | 35.528 | 79 | 0.579501 | 4.523778 | false | false | false | false |
reviewboard/reviewboard | reviewboard/diffviewer/forms.py | 1 | 16962 | """Forms for uploading diffs."""
from functools import partial
from dateutil.parser import isoparse
from django import forms
from django.core.exceptions import ValidationError
from django.utils.encoding import force_str
from django.utils.translation import gettext, gettext_lazy as _
from reviewboard.diffviewer.commit_utils import (deserialize_validation_info,
get_file_exists_in_history)
from reviewboard.diffviewer.differ import DiffCompatVersion
from reviewboard.diffviewer.diffutils import check_diff_size
from reviewboard.diffviewer.filediff_creator import create_filediffs
from reviewboard.diffviewer.models import DiffCommit, DiffSet
from reviewboard.diffviewer.validators import (COMMIT_ID_LENGTH,
validate_commit_id)
class BaseCommitValidationForm(forms.Form):
"""A form mixin for handling validation metadata for commits."""
validation_info = forms.CharField(
label=_('Validation metadata'),
help_text=_('Validation metadata generated by the diff commit '
'validation resource.'),
widget=forms.HiddenInput,
required=False)
def clean_validation_info(self):
"""Clean the validation_info field.
This method ensures that if the field is supplied that it parses as
base64-encoded JSON.
Returns:
dict:
The parsed validation information.
Raises:
django.core.exceptions.ValidationError:
The value could not be parsed.
"""
validation_info = self.cleaned_data.get('validation_info', '').strip()
if not validation_info:
return {}
try:
return deserialize_validation_info(validation_info)
except (TypeError, ValueError) as e:
raise ValidationError(
gettext(
'Could not parse validation info "%(validation_info)s": '
'%(exc)s'
) % {
'exc': e,
'validation_info': validation_info,
})
class UploadCommitForm(BaseCommitValidationForm):
"""The form for uploading a diff and creating a DiffCommit."""
diff = forms.FileField(
label=_('Diff'),
help_text=_('The new diff to upload.'))
parent_diff = forms.FileField(
label=_('Parent diff'),
help_text=_('An optional diff that the main diff is based on. '
'This is usually used for distributed revision control '
'systems (Git, Mercurial, etc.).'),
required=False)
commit_id = forms.CharField(
label=_('Commit ID'),
help_text=_('The ID of this commit.'),
max_length=COMMIT_ID_LENGTH,
validators=[validate_commit_id])
parent_id = forms.CharField(
label=_('Parent commit ID'),
help_text=_('The ID of the parent commit.'),
max_length=COMMIT_ID_LENGTH,
validators=[validate_commit_id])
commit_message = forms.CharField(
label=_('Description'),
help_text=_('The commit message.'))
author_name = forms.CharField(
label=_('Author name'),
help_text=_('The name of the author of this commit.'),
max_length=DiffCommit.NAME_MAX_LENGTH)
author_email = forms.CharField(
label=_('Author e-mail address'),
help_text=_('The e-mail address of the author of this commit.'),
max_length=DiffCommit.EMAIL_MAX_LENGTH,
widget=forms.EmailInput)
author_date = forms.CharField(
label=_('Author date'),
help_text=_('The date and time this commit was authored.'))
committer_name = forms.CharField(
label=_('Committer name'),
help_text=_('The name of the committer of this commit.'),
max_length=DiffCommit.NAME_MAX_LENGTH,
required=True)
committer_email = forms.CharField(
label=_('Committer e-mail address'),
help_text=_('The e-mail address of the committer of this commit.'),
max_length=DiffCommit.EMAIL_MAX_LENGTH,
widget=forms.EmailInput,
required=True)
committer_date = forms.CharField(
label=_('Committer date'),
help_text=_('The date and time this commit was committed.'),
required=True)
def __init__(self, diffset, request=None, *args, **kwargs):
"""Initialize the form.
Args:
diffset (reviewboard.diffviewer.models.diffset.DiffSet):
The DiffSet to attach the created DiffCommit to.
request (django.http.HttpRequest, optional):
The HTTP request from the client.
*args (tuple):
Additional positional arguments.
**kwargs (dict):
Additional keyword arguments.
"""
super(UploadCommitForm, self).__init__(*args, **kwargs)
if not diffset.repository.scmtool_class.commits_have_committer:
del self.fields['committer_date']
del self.fields['committer_email']
del self.fields['committer_name']
self.diffset = diffset
self.request = request
def create(self):
"""Create the DiffCommit.
Returns:
reviewboard.diffviewer.models.diffcommit.DiffCommit:
The created DiffCommit.
"""
assert self.is_valid()
return DiffCommit.objects.create_from_upload(
request=self.request,
validation_info=self.cleaned_data['validation_info'],
diffset=self.diffset,
repository=self.diffset.repository,
base_commit_id=self.diffset.base_commit_id,
diff_file=self.cleaned_data['diff'],
parent_diff_file=self.cleaned_data.get('parent_diff'),
commit_message=self.cleaned_data['commit_message'],
commit_id=self.cleaned_data['commit_id'],
parent_id=self.cleaned_data['parent_id'],
author_name=self.cleaned_data['author_name'],
author_email=self.cleaned_data['author_email'],
author_date=self.cleaned_data['author_date'],
committer_name=self.cleaned_data.get('committer_name'),
committer_email=self.cleaned_data.get('committer_email'),
committer_date=self.cleaned_data.get('committer_date'))
def clean(self):
"""Clean the form.
Returns:
dict:
The cleaned form data.
Raises:
django.core.exceptions.ValidationError:
The form data was not valid.
"""
super(UploadCommitForm, self).clean()
if self.diffset.history_id is not None:
# A diffset will have a history attached if and only if it has been
# published, in which case we cannot attach further commits to it.
raise ValidationError(gettext(
'Cannot upload commits to a published diff.'))
if (self.diffset.commit_count and
'validation_info' not in self.cleaned_data and
'validation_info' not in self.errors):
# If validation_info is present in `errors`, it will not be in
# self.cleaned_data. We do not want to report it missing if it
# failed validation for another reason.
self._errors['validation_info'] = self.error_class([
self.fields['validation_info'].error_messages['required'],
])
return self.cleaned_data
def clean_author_date(self):
"""Parse the date and time in the author_date field.
Returns:
datetime.datetime:
The parsed date and time.
"""
try:
return isoparse(self.cleaned_data['author_date'])
except ValueError:
raise ValidationError(gettext(
'This date must be in ISO 8601 format.'))
def clean_committer_date(self):
"""Parse the date and time in the committer_date field.
Returns:
datetime.datetime:
The parsed date and time.
"""
try:
return isoparse(self.cleaned_data['committer_date'])
except ValueError:
raise ValidationError(gettext(
'This date must be in ISO 8601 format.'))
class UploadDiffForm(forms.Form):
"""The form for uploading a diff and creating a DiffSet."""
path = forms.FileField(
label=_('Diff'),
help_text=_('The new diff to upload.'))
parent_diff_path = forms.FileField(
label=_('Parent Diff'),
help_text=_('An optional diff that the main diff is based on. '
'This is usually used for distributed revision control '
'systems (Git, Mercurial, etc.).'),
required=False)
basedir = forms.CharField(
label=_('Base Directory'),
help_text=_('The absolute path in the repository the diff was '
'generated in.'))
base_commit_id = forms.CharField(
label=_('Base Commit ID'),
help_text=_('The ID/revision this change is built upon.'),
required=False)
def __init__(self, repository, request=None, *args, **kwargs):
"""Initialize the form.
Args:
repository (reviewboard.scmtools.models.Repository):
The repository the diff will be uploaded against.
request (django.http.HttpRequest, optional):
The HTTP request from the client.
*args (tuple):
Additional positional arguments.
**kwrgs (dict):
Additional keyword arguments.
"""
super(UploadDiffForm, self).__init__(*args, **kwargs)
self.repository = repository
self.request = request
if repository.diffs_use_absolute_paths:
# This SCMTool uses absolute paths, so there's no need to ask
# the user for the base directory.
del(self.fields['basedir'])
def clean_base_commit_id(self):
"""Clean the ``base_commit_id`` field.
Returns:
unicode:
The ``base_commit_id`` field stripped of leading and trailing
whitespace, or ``None`` if that value would be empty.
"""
return self.cleaned_data['base_commit_id'].strip() or None
def clean_basedir(self):
"""Clean the ``basedir`` field.
Returns:
unicode:
The basedir field as a unicode string with leading and trailing
whitespace removed.
"""
if self.repository.diffs_use_absolute_paths:
return ''
return force_str(self.cleaned_data['basedir'].strip())
def create(self, diffset_history=None):
"""Create the DiffSet.
Args:
diffset_history (reviewboard.diffviewer.models.diffset_history.
DiffSetHistory):
The DiffSet history to attach the created DiffSet to.
Returns:
reviewboard.diffviewer.models.diffset.DiffSet:
The created DiffSet.
"""
assert self.is_valid()
return DiffSet.objects.create_from_upload(
repository=self.repository,
diffset_history=diffset_history,
diff_file=self.cleaned_data['path'],
parent_diff_file=self.cleaned_data.get('parent_diff_path'),
basedir=self.cleaned_data.get('basedir', ''),
base_commit_id=self.cleaned_data['base_commit_id'],
request=self.request)
class ValidateCommitForm(BaseCommitValidationForm):
"""A form for validating of DiffCommits."""
diff = forms.FileField(
label=_('Diff'),
help_text=_('The new diff to upload.'))
parent_diff = forms.FileField(
label=_('Parent diff'),
help_text=_('An optional diff that the main diff is based on. '
'This is usually used for distributed revision control '
'systems (Git, Mercurial, etc.).'),
required=False)
commit_id = forms.CharField(
label=_('Commit ID'),
help_text=_('The ID of this commit.'),
max_length=COMMIT_ID_LENGTH,
validators=[validate_commit_id])
parent_id = forms.CharField(
label=_('Parent commit ID'),
help_text=_('The ID of the parent commit.'),
max_length=COMMIT_ID_LENGTH,
validators=[validate_commit_id])
base_commit_id = forms.CharField(
label=_('Base commit ID'),
help_text=_('The base commit ID that the commits are based off of.'),
required=False)
def __init__(self, repository, request=None, *args, **kwargs):
"""Initialize the form.
Args:
repository (reviewboard.scmtools.models.Repository):
The repository against which the diff is being validated.
request (django.http.HttpRequest, optional):
The HTTP request from the client.
*args (tuple):
Additional positional arguments to pass to the base
class initializer.
**kwargs (dict):
Additional keyword arguments to pass to the base class
initializer.
"""
super(ValidateCommitForm, self).__init__(*args, **kwargs)
self.repository = repository
self.request = request
def clean(self):
"""Clean the form.
Returns:
dict:
The cleaned form data.
Raises:
django.core.exceptions.ValidationError:
The form data was not valid.
"""
super(ValidateCommitForm, self).clean()
validation_info = self.cleaned_data.get('validation_info')
if validation_info:
errors = []
parent_id = self.cleaned_data.get('parent_id')
commit_id = self.cleaned_data.get('commit_id')
if commit_id and commit_id in validation_info:
errors.append(gettext('This commit was already validated.'))
elif parent_id and parent_id not in validation_info:
errors.append(gettext('The parent commit was not validated.'))
if errors:
self._errors['validation_info'] = self.error_class(errors)
self.cleaned_data.pop('validation_info')
return self.cleaned_data
def validate_diff(self):
"""Validate the DiffCommit.
This will attempt to parse the given diff (and optionally parent
diff) into :py:class:`FileDiffs
<reviewboard.diffviewer.models.filediff.FileDiff>`. This will not
result in anything being committed to the database.
Returns:
tuple:
A 2-tuple containing the following:
* A list of the created FileDiffs.
* A list of the parent FileDiffs, or ``None``.
Raises:
reviewboard.diffviewer.errors.DiffParserError:
The diff could not be parsed.
reviewboard.diffviewer.errors.DiffTooBigError:
The diff was too big.
reviewboard.diffviewer.errors.EmptyDiffError:
The diff did not contain any changes.
reviewboard.scmtools.errors.FileNotFoundError:
A file was not found in the repository.
reviewboard.scmtools.errors.SCMError:
An error occurred within the SCMTool.
"""
assert self.is_valid()
diff_file = self.cleaned_data['diff']
parent_diff_file = self.cleaned_data.get('parent_diff')
validation_info = self.cleaned_data.get('validation_info')
check_diff_size(diff_file, parent_diff_file)
if parent_diff_file:
parent_diff_file_contents = parent_diff_file.read()
else:
parent_diff_file_contents = None
base_commit_id = self.cleaned_data['base_commit_id']
diffset = DiffSet(name='diff',
revision=0,
basedir='',
repository=self.repository,
diffcompat=DiffCompatVersion.DEFAULT,
base_commit_id=base_commit_id)
get_file_exists = partial(get_file_exists_in_history,
validation_info or {},
self.repository,
self.cleaned_data['parent_id'])
return create_filediffs(
diff_file_contents=diff_file.read(),
parent_diff_file_contents=parent_diff_file_contents,
repository=self.repository,
basedir='',
base_commit_id=base_commit_id,
get_file_exists=get_file_exists,
diffset=diffset,
request=self.request,
diffcommit=None,
validate_only=True)
| mit | 3191a67e96886f7b0567e6d46a966ad3 | 33.758197 | 79 | 0.584719 | 4.621798 | false | false | false | false |
reviewboard/reviewboard | reviewboard/oauth/admin.py | 1 | 5321 | """Django model administration for OAuth2 applications."""
import importlib
from django.contrib.admin.options import IS_POPUP_VAR
from django.contrib.admin.utils import flatten_fieldsets
from django.utils.translation import gettext_lazy as _
from djblets.forms.fieldsets import filter_fieldsets
from reviewboard.admin import ModelAdmin, admin_site
from reviewboard.oauth.forms import (ApplicationChangeForm,
ApplicationCreationForm)
from reviewboard.oauth.models import Application
class ApplicationAdmin(ModelAdmin):
"""The model admin for the OAuth application model.
The default model admin provided by django-oauth-toolkit does not provide
help text for the majority of the fields, so this admin uses a custom form
which does provide the help text.
"""
form = ApplicationChangeForm
add_form = ApplicationCreationForm
raw_id_fields = ('local_site',)
fieldsets = (
(_('General Settings'), {
'fields': ('name',
'enabled',
'user',
'redirect_uris',),
}),
(_('Client Settings'), {
'fields': ('client_id',
'client_secret',
'client_type'),
}),
(_('Authorization Settings'), {
'fields': ('authorization_grant_type',
'skip_authorization',
'local_site',),
}),
(_('Internal State'), {
'description': _(
'<p>This is advanced state that should not be modified unless '
'something is wrong.</p>'
),
'fields': ('original_user',
'extra_data'),
'classes': ('collapse',),
}),
)
add_fieldsets = tuple(filter_fieldsets(
form=add_form,
fieldsets=fieldsets,
exclude_collapsed=False,
))
def get_fieldsets(self, request, obj=None):
"""Return the appropriate fieldset.
Args:
request (django.http.HttpRequest):
The current HTTP request.
obj (reviewboard.oauth.models.Application, optional):
The application being edited, if it already exists.
Returns:
tuple:
The fieldset for either changing an Application (i.e., when
``obj is not None``) or the fieldset for creating an Application.
"""
if obj is None:
return self.add_fieldsets
return super(ApplicationAdmin, self).get_fieldsets(request, obj=obj)
def get_form(self, request, obj=None, **kwargs):
"""Return the form class to use.
This method mostly delegates to the superclass, but hints that we
should use :py:attr:`add_form` (and its fields) when we are creating
the Application.
Args:
request (django.http.HttpRequest):
The current HTTP request.
obj (reviewboard.oauth.models.Application, optional):
The application being edited, if it exists.
Returns:
type:
The form class to use.
"""
if obj is None:
kwargs = kwargs.copy()
kwargs['form'] = self.add_form
kwargs['fields'] = flatten_fieldsets(self.add_fieldsets)
return super(ApplicationAdmin, self).get_form(request, obj=obj,
**kwargs)
def response_add(self, request, obj, post_url_continue=None):
"""Return the response for the ``add_view`` stage.
This method will redirect the user to the change form after creating
the application. We do this because the ``client_secret`` and
``client_id`` fields are generated by saving the form and it is likely
the user will want to view and/or copy them after creating this
Application.
Args:
request (django.http.HttpRequest):
The current HTTP request.
obj (reviewboard.oauth.models.Application):
The application that was created.
post_url_continue (unicode, optional):
The next URL to go to.
Returns:
django.http.HttpResponse:
A response redirecting the user to the change form.
"""
if ('_addanother' not in request.POST and
IS_POPUP_VAR not in request.POST):
# request.POST is immutable on modern versions of Django. The
# pattern used within Django for this exact situation is to copy
# the dictionary and then modify it.
request.POST = request.POST.copy()
request.POST['_continue'] = 1
return super(ApplicationAdmin, self).response_add(
request,
obj,
post_url_continue=post_url_continue,
)
# Ensure that the oauth2_provider admin modules is loaded so that we can
# replace their admin registration with our own. If we do not do this, we can't
# guarantee that it will be registered before we try to unregister it during
# unit tests.
importlib.import_module('oauth2_provider.admin')
admin_site.unregister(Application)
admin_site.register(Application, ApplicationAdmin)
| mit | d4850d1a9f7c8138ba251819b254be79 | 33.329032 | 79 | 0.588987 | 4.846084 | false | false | false | false |
reviewboard/reviewboard | reviewboard/webapi/resources/watched_review_request.py | 1 | 3491 | from djblets.util.decorators import augment_method_from
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.resources import resources
from reviewboard.webapi.resources.base_watched_object import \
BaseWatchedObjectResource
class WatchedReviewRequestResource(BaseWatchedObjectResource):
"""Lists and manipulates entries for review requests watched by the user.
These are requests that the user has starred in their Dashboard.
This resource can be used for listing existing review requests and adding
new review requests to watch.
Each item in the resource is an association between the user and the
review request. The entries in the list are not the review requests
themselves, but rather an entry that represents this association by
listing the association's ID (which can be used for removing the
association) and linking to the review request.
"""
name = 'watched_review_request'
uri_name = 'review-requests'
profile_field = 'starred_review_requests'
star_function = 'star_review_request'
unstar_function = 'unstar_review_request'
@property
def watched_resource(self):
"""Return the watched resource.
This is implemented as a property in order to work around
a circular reference issue.
"""
return resources.review_request
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def get(self, *args, **kwargs):
"""Redirects to the review request being watched.
Rather than returning a body with the entry, performing an HTTP GET
on this resource will redirect the client to the actual review request
being watched.
Clients must properly handle :http:`302` and expect this redirect
to happen.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def get_list(self, *args, **kwargs):
"""Retrieves the list of watched review requests.
Each entry in the list consists of a numeric ID that represents the
entry for the watched review request. This is not necessarily the ID
of the review request itself. It's used for looking up the resource
of the watched item so that it can be removed.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def create(self, *args, **kwargs):
"""Marks a review request as being watched.
The ID of the review group must be passed as ``object_id``, and will
store that review group in the list.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def delete(self, *args, **kwargs):
"""Deletes a watched review request entry.
This is the same effect as unstarring a review request. It does
not actually delete the review request, just the entry in the list.
"""
pass
def serialize_object(self, obj, *args, **kwargs):
return {
'id': obj.display_id,
self.item_result_key: obj,
}
def get_watched_object(self, queryset, obj_id, local_site_name=None,
*args, **kwargs):
if local_site_name:
return queryset.get(local_id=obj_id)
else:
return queryset.get(pk=obj_id)
watched_review_request_resource = WatchedReviewRequestResource()
| mit | 43b3823f975da28d9be50f5e339f9cfd | 34.989691 | 78 | 0.679175 | 4.27295 | false | false | false | false |
reviewboard/reviewboard | reviewboard/site/context_processors.py | 1 | 1972 | from django.contrib.auth.context_processors import PermLookupDict, PermWrapper
from reviewboard.site.models import LocalSite
class AllPermsLookupDict(PermLookupDict):
def __init__(self, user, app_label, perms_wrapper):
super(AllPermsLookupDict, self).__init__(user, app_label)
self.perms_wrapper = perms_wrapper
def __repr__(self):
return str(self.user.get_all_permissions(
self.perms_wrapper.get_local_site()))
def __getitem__(self, perm_name):
return self.user.has_perm('%s.%s' % (self.app_label, perm_name),
self.perms_wrapper.get_local_site())
def __nonzero__(self):
return super(AllPermsLookupDict, self).__nonzero__()
def __bool__(self):
return super(AllPermsLookupDict, self).__bool__()
class AllPermsWrapper(PermWrapper):
def __init__(self, user, local_site_name):
super(AllPermsWrapper, self).__init__(user)
self.local_site_name = local_site_name
self.local_site = None
def __getitem__(self, app_label):
return AllPermsLookupDict(self.user, app_label, self)
def get_local_site(self):
if self.local_site_name is None:
return None
if not self.local_site:
self.local_site = LocalSite.objects.get(name=self.local_site_name)
return self.local_site
def localsite(request):
"""Returns context variables useful to Local Sites.
This provides the name of the Local Site (``local_site_name``), and
a permissions variable used for accessing user permissions (``perm``).
``perm`` overrides the permissions provided by the Django auth framework.
These permissions cover Local Sites along with the standard global
permissions.
"""
local_site_name = getattr(request, '_local_site_name', None)
return {
'local_site_name': local_site_name,
'perms': AllPermsWrapper(request.user, local_site_name),
}
| mit | 8305db4ffb6fcdf80e183d0f8da73c13 | 30.806452 | 78 | 0.64858 | 3.8591 | false | false | false | false |
reviewboard/reviewboard | reviewboard/reviews/views/mixins.py | 1 | 11984 | """Mixins for review request views."""
from __future__ import annotations
import logging
from datetime import datetime
from typing import List, Optional
from django.db.models import Q
from django.http import Http404, HttpRequest, HttpResponse
from django.shortcuts import get_object_or_404, render
from django.template.defaultfilters import date
from django.utils.formats import localize
from django.utils.html import format_html
from django.utils.safestring import SafeString, mark_safe
from django.utils.timezone import localtime
from django.utils.translation import gettext
from djblets.views.generic.base import (CheckRequestMethodViewMixin,
PrePostDispatchViewMixin)
from typing_extensions import TypedDict
from reviewboard.accounts.mixins import CheckLoginRequiredViewMixin
from reviewboard.attachments.models import FileAttachment
from reviewboard.diffviewer.models import DiffSet
from reviewboard.reviews.models import ReviewRequest, ReviewRequestDraft
from reviewboard.reviews.models.base_review_request_details import \
BaseReviewRequestDetails
from reviewboard.reviews.models.review_request import ReviewRequestCloseInfo
from reviewboard.site.mixins import CheckLocalSiteAccessViewMixin
from reviewboard.site.models import LocalSite
logger = logging.getLogger(__name__)
class ReviewRequestViewMixin(CheckRequestMethodViewMixin,
CheckLoginRequiredViewMixin,
CheckLocalSiteAccessViewMixin,
PrePostDispatchViewMixin):
"""Common functionality for all review request-related pages.
This performs checks to ensure that the user has access to the page,
returning an error page if not. It also provides common functionality
for fetching a review request for the given page, returning suitable
context for the template, and generating an image used to represent
the site when posting to social media sites.
"""
permission_denied_template_name = \
'reviews/review_request_permission_denied.html'
class StatusExtraInfo(TypedDict):
"""Extra info to include in the status rendering.
This is used in :py:method:`get_review_request_status_html`.
"""
#: A text string to display to the user.
#:
#: Type:
#: str
text: str
#: The timestamp associated with this info block.
#:
#: Type:
#: datetime.datetime
timestamp: Optional[datetime]
#: Extra info to format into the ``text`` string.
#:
#: Type:
#: dict
extra_vars: dict
def pre_dispatch(
self,
request: HttpRequest,
review_request_id: int,
*args,
**kwargs,
) -> Optional[HttpResponse]:
"""Look up objects and permissions before dispatching the request.
This will first look up the review request, returning an error page
if it's not accessible. It will then store the review request before
calling the handler for the HTTP request.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
review_request_id (int):
The ID of the review request being accessed.
*args (tuple):
Positional arguments to pass to the handler.
**kwargs (dict):
Keyword arguments to pass to the handler.
These will be arguments provided by the URL pattern.
Returns:
django.http.HttpResponse:
The resulting HTTP response to send to the client, if there's
a Permission Denied.
"""
self.review_request = self.get_review_request(
review_request_id=review_request_id,
local_site=self.local_site)
if not self.review_request.is_accessible_by(request.user):
return self.render_permission_denied(request)
return None
def render_permission_denied(
self,
request: HttpRequest,
) -> HttpResponse:
"""Render a Permission Denied page.
This will be shown to the user if they're not able to view the
review request.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
Returns:
django.http.HttpResponse:
The resulting HTTP response to send to the client.
"""
return render(request,
self.permission_denied_template_name,
status=403)
def get_review_request(
self,
review_request_id: int,
local_site: Optional[LocalSite] = None,
) -> ReviewRequest:
"""Return the review request for the given display ID.
Args:
review_request_id (int):
The review request's display ID.
local_site (reviewboard.site.models.LocalSite):
The Local Site the review request is on.
Returns:
reviewboard.reviews.models.review_request.ReviewRequest:
The review request for the given display ID and Local Site.
Raises:
django.http.Http404:
The review request could not be found.
"""
q = ReviewRequest.objects.all()
if local_site:
q = q.filter(local_site=local_site,
local_id=review_request_id)
else:
q = q.filter(pk=review_request_id)
q = q.select_related('submitter', 'repository')
return get_object_or_404(q)
def get_diff(
self,
revision: Optional[int] = None,
draft: Optional[ReviewRequestDraft] = None,
) -> DiffSet:
"""Return a diff on the review request matching the given criteria.
If a draft is provided, and ``revision`` is either ``None`` or matches
the revision on the draft's DiffSet, that DiffSet will be returned.
Args:
revision (int, optional):
The revision of the diff to retrieve. If not provided, the
latest DiffSet will be returned.
draft (reviewboard.reviews.models.review_request_draft.
ReviewRequestDraft, optional):
The draft of the review request.
Returns:
reviewboard.diffviewer.models.diffset.DiffSet:
The resulting DiffSet.
Raises:
django.http.Http404:
The diff does not exist.
"""
# Normalize the revision, since it might come in as a string.
if revision:
revision = int(revision)
# This will try to grab the diff associated with a draft if the review
# request has an associated draft and is either the revision being
# requested or no revision is being requested.
if (draft and draft.diffset_id and
(revision is None or draft.diffset.revision == revision)):
return draft.diffset
query = Q(history=self.review_request.diffset_history_id)
# Grab a revision if requested.
if revision is not None:
query = query & Q(revision=revision)
try:
return DiffSet.objects.filter(query).latest()
except DiffSet.DoesNotExist:
raise Http404
def get_social_page_image_url(
self,
file_attachments: List[FileAttachment],
) -> Optional[str]:
"""Return the URL to an image used for social media sharing.
This will look for the first attachment in a list of attachments that
can be used to represent the review request on social media sites and
chat services. If a suitable attachment is found, its URL will be
returned.
Args:
file_attachments (list of reviewboard.attachments.models.
FileAttachment):
A list of file attachments used on a review request.
Returns:
str:
The URL to the first image file attachment, if found, or ``None``
if no suitable attachments were found.
"""
for file_attachment in file_attachments:
if file_attachment.mimetype.startswith('image/'):
return file_attachment.get_absolute_url()
return None
def get_review_request_status_html(
self,
review_request_details: BaseReviewRequestDetails,
close_info: ReviewRequestCloseInfo,
extra_info: List[ReviewRequestViewMixin.StatusExtraInfo] = [],
) -> SafeString:
"""Return HTML describing the current status of a review request.
This will return a description of the submitted, discarded, or open
state for the review request, for use in the rendering of the page.
Args:
review_request_details (reviewboard.reviews.models
.base_review_request_details
.BaseReviewRequestDetails):
The review request or draft being viewed.
close_info (dict):
A dictionary of information on the closed state of the
review request.
extra_info (list of dict):
A list of dictionaries showing additional status information.
Each must have a ``text`` field containing a format string
using ``{keyword}``-formatted variables, a ``timestamp`` field
(which will be normalized to the local timestamp), and an
optional ``extra_vars`` for the format string.
Returns:
django.utils.safestring.SafeString:
The status text as HTML for the page.
"""
review_request = self.review_request
status = review_request.status
review_request_details = review_request_details
if status == ReviewRequest.SUBMITTED:
timestamp = close_info['timestamp']
if timestamp:
text = gettext('Created {created_time} and submitted '
'{timestamp}')
else:
text = gettext('Created {created_time} and submitted')
elif status == ReviewRequest.DISCARDED:
timestamp = close_info['timestamp']
if timestamp:
text = gettext('Created {created_time} and discarded '
'{timestamp}')
else:
text = gettext('Created {created_time} and discarded')
elif status == ReviewRequest.PENDING_REVIEW:
text = gettext('Created {created_time} and updated {timestamp}')
timestamp = review_request_details.last_updated
else:
logger.error('Unexpected review request status %r for '
'review request %s',
status, review_request.display_id,
extra={'request': request})
return mark_safe('')
parts = [
{
'text': text,
'timestamp': timestamp,
'extra_vars': {
'created_time': date(localtime(review_request.time_added)),
},
},
] + extra_info
html_parts = []
for part in parts:
if part['timestamp']:
timestamp = localtime(part['timestamp'])
timestamp_html = format_html(
'<time class="timesince" datetime="{0}">{1}</time>',
timestamp.isoformat(),
localize(timestamp))
else:
timestamp_html = ''
html_parts.append(format_html(
part['text'],
timestamp=timestamp_html,
**part.get('extra_vars', {})))
return mark_safe(' — '.join(html_parts))
| mit | 778649051dff176c4a2ee0bf3bec6fcf | 34.351032 | 79 | 0.597213 | 4.937783 | false | false | false | false |
reviewboard/reviewboard | reviewboard/webapi/resources/base_file_attachment_comment.py | 1 | 4652 | """Base class for file attachment comment resources."""
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.template.defaultfilters import timesince
from djblets.util.decorators import augment_method_from
from djblets.webapi.fields import ResourceFieldType, StringFieldType
from reviewboard.reviews.models import FileAttachmentComment
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.resources import resources
from reviewboard.webapi.resources.base_comment import BaseCommentResource
class BaseFileAttachmentCommentResource(BaseCommentResource):
"""Base class for file attachment comment resources.
Provides common fields and functionality for all file attachment comment
resources. The list of comments cannot be modified from this resource.
"""
added_in = '1.6'
model = FileAttachmentComment
name = 'file_attachment_comment'
fields = dict({
'diff_against_file_attachment': {
'type': ResourceFieldType,
'resource': 'reviewboard.webapi.resources.file_attachment.'
'FileAttachmentResource',
'description': 'The file changes were made against in a diff.',
'added_in': '2.0',
},
'file_attachment': {
'type': ResourceFieldType,
'resource': 'reviewboard.webapi.resources.file_attachment.'
'FileAttachmentResource',
'description': 'The file the comment was made on.',
},
'link_text': {
'type': StringFieldType,
'description': 'The text used to describe a link to the file. '
'This may differ depending on the comment.',
'added_in': '1.7.10',
},
'review_url': {
'type': StringFieldType,
'description': 'The URL to the review UI for the comment on this '
'file attachment.',
'added_in': '1.7.10',
},
'thumbnail_html': {
'type': StringFieldType,
'description': 'The HTML representing a thumbnail, if any, for '
'this comment.',
'added_in': '1.7.10',
},
}, **BaseCommentResource.fields)
uri_object_key = 'comment_id'
allowed_methods = ('GET',)
def get_queryset(self, request, review_request_id=None, *args, **kwargs):
"""Return a queryset for FileAttachmentComment models.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
review_request_id (int, optional):
The review request ID used to filter the results. If set,
only comments from the given review request that are public
or owned by the requesting user will be included.
*args (tuple):
Additional positional arguments.
**kwargs (dict):
Additional keyword arguments.
Returns:
django.db.models.query.QuerySet:
A queryset for FileAttachmentComment models.
"""
q = Q(review__isnull=False)
if review_request_id is not None:
try:
review_request = resources.review_request.get_object(
request, review_request_id=review_request_id,
*args, **kwargs)
except ObjectDoesNotExist:
raise self.model.DoesNotExist
q &= (Q(file_attachment__review_request=review_request) |
Q(file_attachment__inactive_review_request=review_request))
return self.model.objects.filter(q)
def serialize_link_text_field(self, obj, **kwargs):
return obj.get_link_text()
def serialize_public_field(self, obj, **kwargs):
return obj.review.get().public
def serialize_review_url_field(self, obj, **kwargs):
return obj.get_review_url()
def serialize_thumbnail_html_field(self, obj, **kwargs):
return obj.thumbnail
def serialize_timesince_field(self, obj, **kwargs):
return timesince(obj.timestamp)
def serialize_user_field(self, obj, **kwargs):
return obj.review.get().user
@webapi_check_local_site
@augment_method_from(WebAPIResource)
def get(self, *args, **kwargs):
"""Returns information on the comment.
This contains the comment text, time the comment was made,
and the file the comment was made on, amongst other information.
"""
pass
| mit | e353735cd585d819220d6fa83d19fe3a | 35.629921 | 78 | 0.616509 | 4.516505 | false | false | false | false |
reviewboard/reviewboard | reviewboard/accounts/decorators.py | 1 | 3020 | from functools import wraps
from urllib.parse import quote
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from djblets.siteconfig.models import SiteConfiguration
from djblets.util.decorators import simple_decorator
from reviewboard.accounts.privacy import is_consent_missing
from reviewboard.site.urlresolvers import local_site_reverse
@simple_decorator
def check_login_required(view_func):
"""Check whether the user needs to log in.
This is a view decorator that checks whether login is required on this
installation and, if so, checks if the user is logged in. If login is
required and the user is not logged in, they're redirected to the login
link.
"""
def _check(*args, **kwargs):
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get("auth_require_sitewide_login"):
return login_required(view_func)(*args, **kwargs)
else:
return view_func(*args, **kwargs)
return _check
def valid_prefs_required(view_func=None, disable_consent_checks=None):
"""Check whether the profile object exists.
Several views assume that the user profile object exists, and will break if
it doesn't. This decorator will ensure that the profile exists before the
view code runs.
If the user is not logged in, this will do nothing. That allows it to
be used with @check_login_required.
Args:
view_func (callable, optional):
The view to decorate.
If this is not specified, this function returns a decorator that
accepts a view to decorate.
disable_consent_checks (callable, optional):
A callable that will determine whether or not consent checks should
be disabled.
Returns:
callable:
If ``view_func`` was provided, this returns a decorated version of that
view. Otherwise a decorator is returned.
"""
def decorator(view_func):
@wraps(view_func)
def decorated(request, *args, **kwargs):
user = request.user
if user.is_authenticated:
profile, is_new = user.get_profile(return_is_new=True)
siteconfig = SiteConfiguration.objects.get_current()
if (siteconfig.get('privacy_enable_user_consent') and
not (callable(disable_consent_checks) and
disable_consent_checks(request)) and
(is_new or is_consent_missing(user))):
return HttpResponseRedirect(
'%s?next=%s'
% (local_site_reverse('user-preferences',
request=request),
quote(request.get_full_path()))
)
return view_func(request, *args, **kwargs)
return decorated
if view_func is not None:
return decorator(view_func)
return decorator
| mit | 2de6ae88a14a7c2038c7f373f7c70468 | 34.116279 | 79 | 0.634437 | 4.589666 | false | true | false | false |
reviewboard/reviewboard | reviewboard/notifications/managers.py | 1 | 2090 | from django.db.models import Manager, Q
class WebHookTargetManager(Manager):
"""Manages WebHookTarget models.
This provides a utility function for querying WebHookTargets for a
given event.
"""
def for_event(self, event, local_site_id=None, repository_id=None):
"""Returns a list of matching webhook targets for the given event."""
if event == self.model.ALL_EVENTS:
raise ValueError('"%s" is not a valid event choice' % event)
q = Q(enabled=True) & Q(local_site=local_site_id)
if repository_id is None:
q &= (Q(apply_to=self.model.APPLY_TO_ALL) |
Q(apply_to=self.model.APPLY_TO_NO_REPOS))
else:
q &= (Q(apply_to=self.model.APPLY_TO_ALL) |
(Q(apply_to=self.model.APPLY_TO_SELECTED_REPOS) &
Q(repositories=repository_id)))
return [
target
for target in self.filter(q)
if event in target.events or self.model.ALL_EVENTS in target.events
]
def for_local_site(self, local_site=None):
"""Return a list of webhooks on the local site.
Args:
local_site (reviewboard.site.models.LocalSite):
An optional local site.
Returns:
django.db.models.query.QuerySet:
A queryset matching all accessible webhooks.
"""
return self.filter(local_site=local_site)
def can_create(self, user, local_site=None):
"""Return whether the user can create webhooks on the local site.
Args:
user (django.contrib.auth.models.User):
The user to check for permissions.
local_site (reviewboard.site.models.LocalSite):
The current local site, if it exists.
Returns:
bool:
Whether or not the use can create a webhook on the local site.
"""
return (user.is_superuser or
(user.is_authenticated and
local_site and
local_site.is_mutable_by(user)))
| mit | 8bbbfb88c02dfae680717b5a8dbb83b8 | 32.709677 | 79 | 0.58134 | 4.074074 | false | false | false | false |
reviewboard/reviewboard | reviewboard/diffviewer/models/diffcommit.py | 1 | 8673 | """DiffCommit model definition."""
from dateutil.tz import tzoffset
from django.db import models
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from djblets.db.fields import JSONField
from reviewboard.diffviewer.diffutils import get_total_line_counts
from reviewboard.diffviewer.managers import DiffCommitManager
from reviewboard.diffviewer.models.diffset import DiffSet
from reviewboard.diffviewer.validators import (COMMIT_ID_LENGTH,
validate_commit_id)
class DiffCommit(models.Model):
"""A representation of a commit from a version control system.
A DiffSet on a Review Request that represents a commit history will have
one or more DiffCommits. Each DiffCommit will have one or more associated
FileDiffs (which also belong to the parent DiffSet).
The information stored herein is intended to fully represent the state of
a single commit in that history. The series of DiffCommits can be used to
re-create the original series of commits posted for review.
"""
#: The maximum length of the author_name and committer_name fields.
NAME_MAX_LENGTH = 256
#: The maximum length of the author_email and committer_email fields.
EMAIL_MAX_LENGTH = 256
#: The date format that this model uses.
ISO_DATE_FORMAT = '%Y-%m-%d %H:%M:%S%z'
filename = models.CharField(
_('File Name'),
max_length=256,
help_text=_('The original file name of the diff.'))
diffset = models.ForeignKey(DiffSet,
on_delete=models.CASCADE,
related_name='commits')
author_name = models.CharField(
_('Author Name'),
max_length=NAME_MAX_LENGTH,
help_text=_('The name of the commit author.'))
author_email = models.CharField(
_('Author Email'),
max_length=EMAIL_MAX_LENGTH,
help_text=_('The e-mail address of the commit author.'))
author_date_utc = models.DateTimeField(
_('Author Date'),
help_text=_('The date the commit was authored in UTC.'))
author_date_offset = models.IntegerField(
_('Author Date UTC Offset'),
help_text=_("The author's UTC offset."))
committer_name = models.CharField(
_('Committer Name'),
max_length=NAME_MAX_LENGTH,
help_text=_('The name of the committer (if applicable).'),
null=True,
blank=True)
committer_email = models.CharField(
_('Committer Email'),
max_length=EMAIL_MAX_LENGTH,
help_text=_('The e-mail address of the committer (if applicable).'),
null=True,
blank=True)
committer_date_utc = models.DateTimeField(
_('Committer Date'),
help_text=_('The date the commit was committed in UTC '
'(if applicable).'),
null=True,
blank=True)
committer_date_offset = models.IntegerField(
_('Committer Date UTC Offset'),
help_text=_("The committer's UTC offset (if applicable)."),
null=True,
blank=True)
commit_message = models.TextField(
_('Description'),
help_text=_('The commit message.'))
commit_id = models.CharField(
_('Commit ID'),
max_length=COMMIT_ID_LENGTH,
validators=[validate_commit_id],
help_text=_('The unique identifier of the commit.'))
parent_id = models.CharField(
_('Parent ID'),
max_length=COMMIT_ID_LENGTH,
validators=[validate_commit_id],
help_text=_('The unique identifier of the parent commit.'))
#: A timestamp used for generating HTTP caching headers.
last_modified = models.DateTimeField(
_('Last Modified'),
default=timezone.now)
extra_data = JSONField(null=True)
objects = DiffCommitManager()
@property
def author(self):
"""The author's name and e-mail address.
This is formatted as :samp:`{author_name} <{author_email}>`.
"""
return self._format_user(self.author_name, self.author_email)
@property
def author_date(self):
"""The author date in its original timezone."""
tz = tzoffset(None, self.author_date_offset)
return self.author_date_utc.astimezone(tz)
@author_date.setter
def author_date(self, value):
"""Set the author date.
Args:
value (datetime.datetime):
The date to set.
"""
self.author_date_utc = value
if value is not None:
self.author_date_offset = value.utcoffset().total_seconds()
else:
self.author_date_offset = None
@property
def committer(self):
"""The committer's name and e-mail address (if applicable).
This will be formatted as :samp:`{committer_name} <{committer_email}>`
if both :py:attr:`committer_name` and :py:attr:`committer_email` are
set. Otherwise, it be whichever is defined. If neither are defined,
this will be ``None``.
"""
return self._format_user(self.committer_name, self.committer_email)
@property
def committer_date(self):
"""The committer date in its original timezone.
If the commit has no committer, this will be ``None``.
"""
if self.committer_date_offset is None:
return None
tz = tzoffset(None, self.committer_date_offset)
return self.committer_date_utc.astimezone(tz)
@committer_date.setter
def committer_date(self, value):
"""Set the committer date.
Args:
value (datetime.datetime):
The date to set.
"""
self.committer_date_utc = value
if value is not None:
self.committer_date_offset = value.utcoffset().total_seconds()
else:
self.committer_date_offset = None
@cached_property
def summary(self):
"""The first line of the commit message."""
summary = self.commit_message
if summary:
summary = summary.split('\n', 1)[0].strip()
return summary
@cached_property
def summary_truncated(self):
"""The first line of the commit message, truncated to 80 characters."""
summary = self.summary
if len(summary) > 80:
summary = summary[:77] + '...'
return summary
def serialize(self):
"""Serialize to a dictionary.
Returns:
dict:
A dictionary representing this commit.
"""
return {
'author_name': self.author_name,
'commit_id': self.commit_id,
'commit_message': self.commit_message,
'id': self.pk,
'parent_id': self.parent_id,
}
def get_total_line_counts(self):
"""Return the total line counts of all child FileDiffs.
Returns:
dict:
A dictionary with the following keys:
* ``raw_insert_count``
* ``raw_delete_count``
* ``insert_count``
* ``delete_count``
* ``replace_count``
* ``equal_count``
* ``total_line_count``
Each entry maps to the sum of that line count type for all child
:py:class:`FileDiffs
<reviewboard.diffviewer.models.filediff.FileDiff>`.
"""
return get_total_line_counts(self.files.all())
def __str__(self):
"""Return a human-readable representation of the commit.
Returns:
unicode:
The commit ID and its summary (if available).
"""
if self.summary:
return '%s: %s' % (self.commit_id, self.summary)
return self.commit_id
def _format_user(self, name, email):
"""Format a name and e-mail address.
Args:
name (unicode):
The user's name.
email (unicode):
The user's e-mail address.
Returns:
unicode:
A pretty representation of the user and e-mail, or ``None`` if
neither are defined.
"""
if name and email:
return '%s <%s>' % (name, email)
elif name:
return name
elif email:
return email
return None
class Meta:
app_label = 'diffviewer'
db_table = 'diffviewer_diffcommit'
verbose_name = _('Diff Commit')
verbose_name_plural = _('Diff Commits')
unique_together = ('diffset', 'commit_id')
ordering = ('pk',)
| mit | 316ebabe83c4daa914feb3ef6cbbe88c | 30.423913 | 79 | 0.58907 | 4.306356 | false | false | false | false |
reviewboard/reviewboard | reviewboard/scmtools/plastic.py | 1 | 12357 | import logging
import os
import re
import subprocess
from tempfile import mkstemp
from django.utils.translation import gettext_lazy as _
from djblets.util.filesystem import is_exe_in_path
from reviewboard.scmtools.core import (SCMTool, ChangeSet,
HEAD, PRE_CREATION)
from reviewboard.scmtools.errors import (SCMError, FileNotFoundError,
RepositoryNotFoundError)
from reviewboard.diffviewer.parser import DiffParser
logger = logging.getLogger(__name__)
class PlasticTool(SCMTool):
scmtool_id = 'plastic'
name = "Plastic SCM"
diffs_use_absolute_paths = True
supports_pending_changesets = True
field_help_text = {
'path': _('The Plastic repository spec in the form of '
'[repo]@[hostname]:[port].'),
}
dependencies = {
'executables': ['cm'],
}
REP_RE = re.compile(br'^(?P<reponame>.*)@(?P<hostname>.*):(?P<port>\d+)$')
CS_RE = re.compile(br'^(?P<csid>\d+) (?P<user>[^\s]+) (?P<revid>\d+) '
br'(?P<file>.*)$')
REPOLIST_RE = re.compile(br'^\s*\d+\s*(?P<reponame>[^\s]+)\s*.*:.*$')
UNKNOWN_REV = "rev:revid:-1"
def __init__(self, repository):
super(PlasticTool, self).__init__(repository)
self.reponame, self.hostname, self.port = \
self.parse_repository(repository.path)
self.client = PlasticClient(repository.path, self.reponame,
self.hostname, self.port)
def get_changeset(self, changesetid, allow_empty=False):
logger.debug('Plastic: get_changeset %s', changesetid)
changesetdata = self.client.get_changeset(changesetid)
logger.debug('Plastic: changesetdata %s', changesetdata)
# Changeset data is in the form of multiple lines of:
# <changesetid> <user> <revid> <file spec>
#
# We assume the user and comment will be the same for each item, so
# read it out of the first.
#
changeset = ChangeSet()
changeset.changenum = changesetid
split = changesetdata.split(b'\n')
m = self.CS_RE.match(split[0])
revid = m.group("revid")
changeset.username = m.group("user")
changeset.summary = self.client.get_changeset_comment(changesetid,
revid)
logger.debug('Plastic: changeset user %s summary %s',
changeset.username, changeset.summary)
for line in split:
if line:
m = self.CS_RE.match(line)
if not m:
logger.debug('Plastic: bad re %s failed to match %s',
self.CS_RE, line)
raise SCMError("Error looking up changeset")
if m.group('csid') != str(changesetid):
logger.debug('Plastic: csid %s != %s',
m.group('csid'), changesetid)
raise SCMError('The server returned a changeset ID that '
'was not requested')
logger.debug('Plastic: adding file %s',
m.group('file'))
changeset.files += m.group('file')
return changeset
def get_file(self, path, revision=HEAD, **kwargs):
logger.debug('Plastic: get_file %s revision %s', path, revision)
if revision == PRE_CREATION:
return b''
# Check for new files
if revision == self.UNKNOWN_REV:
return b''
return self.client.get_file(path, revision)
def file_exists(self, path, revision=HEAD, **kwargs):
logger.debug('Plastic: file_exists %s revision %s', path, revision)
if revision == PRE_CREATION:
return True
# Check for new files
if revision == self.UNKNOWN_REV:
return True
try:
return self.client.get_file(path, revision)
except FileNotFoundError:
return False
def parse_diff_revision(self, filename, revision, *args, **kwargs):
"""Parse and return a filename and revision from a diff.
Args:
filename (bytes):
The filename as represented in the diff.
revision (bytes):
The revision as represented in the diff.
*args (tuple, unused):
Unused positional arguments.
**kwargs (dict, unused):
Unused keyword arguments.
Returns:
tuple:
A tuple containing two items:
1. The normalized filename as a byte string.
2. The normalized revision as a byte string or a
:py:class:`~reviewboard.scmtools.core.Revision`.
"""
assert isinstance(filename, bytes), (
'filename must be a byte string, not %s' % type(filename))
assert isinstance(revision, bytes), (
'revision must be a byte string, not %s' % type(revision))
logger.debug('Plastic: parse_diff_revision file %s revision %s',
filename.decode('utf-8'),
revision.decode('utf-8'))
if revision == b'PRE-CREATION':
revision = PRE_CREATION
return filename, revision
def get_parser(self, data):
return PlasticDiffParser(data)
@classmethod
def parse_repository(cls, path):
m = cls.REP_RE.match(path)
if m:
repopath = m.group("reponame")
hostname = m.group("hostname")
port = m.group("port")
return repopath, hostname, port
else:
raise RepositoryNotFoundError()
@classmethod
def check_repository(cls, path, **kwargs):
"""Perform checks on a repository to test its validity.
This checks if a repository exists and can be connected to.
A failed result is returned as an exception. The exception may contain
extra information, such as a human-readable description of the problem.
If the repository is valid and can be connected to, no exception will
be thrown.
Args:
path (unicode):
The repository path.
**kwargs (dict, unused):
Additional settings for the repository.
Raises:
reviewboard.scmtools.errors.RepositoryNotFoundError:
The repository at the given path could not be found.
reviewboard.scmtools.errors.SCMError:
There was a general error communicating with Perforce.
"""
m = cls.REP_RE.match(path)
if not m:
raise RepositoryNotFoundError()
# Can't use 'cm checkconnection' here as it only checks the
# pre-configured server
server = "%s:%s" % (m.group("hostname"), m.group("port"))
reponame = m.group("reponame")
logger.debug('Plastic: Checking repository %s@%s',
reponame, server)
repositories = PlasticClient.get_repositories(server)
split = repositories.splitlines()
for rep in split:
m = cls.REPOLIST_RE.match(rep)
if m and m.group("reponame") == reponame:
break
else:
raise RepositoryNotFoundError()
class PlasticDiffParser(DiffParser):
"""
This class is able to parse diffs created with the plastic client
support in post-review.
"""
# As the diff creation is based on the Perforce code, so this is based
# on the PerforceDiffParser (specifically, the binary file markers)
BINARY_RE = re.compile(br'^==== ([^\s]+) \(([^\)]+)\) ==([ACIMR])==$')
def __init__(self, data):
super(PlasticDiffParser, self).__init__(data)
def parse_diff_header(self, linenum, info):
m = self.BINARY_RE.match(self.lines[linenum])
if m:
info['origFile'] = m.group(1)
info['origInfo'] = m.group(2)
info['newFile'] = m.group(1)
info['newInfo'] = ""
linenum += 1
if (linenum < len(self.lines) and
(self.lines[linenum].startswith(b"Binary files ") or
self.lines[linenum].startswith(b"Files "))):
info['binary'] = True
linenum += 1
# In this case, this *is* our diff header. We don't want to
# let the next line's real diff header be a part of this one,
# so return now
return linenum
return super(PlasticDiffParser, self).parse_diff_header(linenum, info)
class PlasticClient(object):
def __init__(self, repository, reponame, hostname, port):
if not is_exe_in_path('cm'):
# This is technically not the right kind of error, but it's the
# pattern we use with all the other tools.
raise ImportError
self.reponame = reponame
self.hostname = hostname
self.port = port
def get_file(self, path, revision):
logger.debug('Plastic: get_file %s rev %s', path, revision)
repo = "rep:%s@repserver:%s:%s" % (self.reponame, self.hostname,
self.port)
# Work around a plastic bug, where 'cm cat --file=blah' gets an
# extra newline, but plain 'cm cat' doesn't
fd, tmpfile = mkstemp()
os.close(fd)
p = subprocess.Popen(
['cm', 'cat', revision + '@' + repo, '--file=' + tmpfile],
stderr=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name != 'nt'))
errmsg = str(p.stderr.read())
failure = p.wait()
if failure:
if not errmsg:
errmsg = p.stdout.read()
raise SCMError(errmsg)
with open(tmpfile, 'rb') as readtmp:
contents = readtmp.read()
os.unlink(tmpfile)
return contents
def get_changeset(self, changesetid):
logger.debug('Plastic: get_changeset %s', changesetid)
repo = "rep:%s@repserver:%s:%s" % (self.reponame, self.hostname,
self.port)
p = subprocess.Popen(['cm', 'find', 'revs', 'where',
'changeset=' + str(changesetid), 'on',
'repository', '\'' + repo + '\'',
'--format={changeset} {owner} {id} {item}',
'--nototal'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name != 'nt'))
contents = p.stdout.read()
errmsg = p.stderr.read()
failure = p.wait()
if failure:
raise SCMError(errmsg)
return contents
def get_changeset_comment(self, changesetid, revid):
logger.debug('Plastic: get_changeset_comment %s', changesetid)
repo = "rep:%s@repserver:%s:%s" % (self.reponame, self.hostname,
self.port)
p = subprocess.Popen(['cm', 'find', 'changesets', 'where',
'changesetid=' + str(changesetid),
'on', 'repository', '\'' + repo + '\'',
'--format={comment}', '--nototal'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name != 'nt'))
contents = p.stdout.read()
errmsg = p.stderr.read()
failure = p.wait()
if failure:
raise SCMError(errmsg)
return contents
@classmethod
def get_repositories(cls, server):
logger.debug('Plastic: get_repositories %s', server)
p = subprocess.Popen(['cm', 'listrepositories', server],
stderr=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name != 'nt'))
repositories = p.stdout.read()
errmsg = p.stderr.read()
failure = p.wait()
if failure:
if not errmsg and repositories.startswith('Error:'):
error = repositories
else:
error = errmsg
raise SCMError(error)
return repositories
| mit | c3daa03459f50ab0bef5000ae2b5652d | 32.947802 | 79 | 0.540908 | 4.299582 | false | false | false | false |
reviewboard/reviewboard | reviewboard/webapi/resources/oauth_token.py | 1 | 11990 | """An API for managing OAuth2 tokens."""
from django.db.models.query import Q
from django.utils.translation import gettext_lazy as _
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_request_fields,
webapi_response_errors)
from djblets.webapi.oauth2_scopes import get_scope_dictionary
from djblets.webapi.errors import DOES_NOT_EXIST, INVALID_FORM_DATA
from djblets.webapi.fields import ListFieldType, StringFieldType
from oauth2_provider.models import AccessToken
from reviewboard.oauth.features import oauth2_service_feature
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.decorators import webapi_check_local_site
class OAuthTokenResource(WebAPIResource):
"""An API resource for managing OAuth2 tokens.
This resource allows callers to list, update, or delete their existing
tokens.
"""
model = AccessToken
name = 'oauth_token'
verbose_name = _('OAuth2 Tokens')
uri_object_key = 'oauth_token_id'
item_result_key = 'oauth_token'
required_features = [oauth2_service_feature]
allowed_methods = ('GET', 'PUT', 'DELETE')
api_token_access_allowed = False
oauth2_token_access_allowed = False
added_in = '3.0'
fields = {
'application': {
'type': StringFieldType,
'description': 'The name of the application this token is for.',
},
'expires': {
'type': StringFieldType,
'description': 'When this token is set to expire.',
},
'scope': {
'type': ListFieldType,
'items': {
'type': StringFieldType,
},
'description': 'The scopes this token has access to.',
},
'token': {
'type': StringFieldType,
'description': 'The access token.',
},
}
def serialize_application_field(self, obj, *args, **kwargs):
"""Serialize the application field.
Args:
obj (oauth2_provider.models.AccessToken):
The token that is being serialized.
*args (tuple):
Ignored positional arguments.
**kwargs (dict):
Ignored keyword arguments.
Returns:
unicode:
The name of the application the access token has access to.
"""
return obj.application.name
def serialize_expires_field(self, obj, *args, **kwargs):
"""Serialize the expires field.
Args:
obj (oauth2_provider.models.AccessToken):
The token that is being serialized.
*args (tuple):
Ignored positional arguments.
**kwargs (dict):
Ignored keyword arguments.
Returns:
unicode:
The expiry date of the token, in ISO-8601 format.
"""
return obj.expires.isoformat()
def serialize_scope_field(self, obj, *args, **kwargs):
"""Serialize the scope field.
Args:
obj (oauth2_provider.models.AccessToken):
The token that is being serialized.
*args (tuple):
Ignored positional arguments.
**kwargs (dict):
Ignored keyword arguments.
Returns:
list of unicode:
The list of scopes the token has.
"""
return obj.scope.split()
def get_queryset(self, request, *args, **kwargs):
"""Return the queryset for the request.
Args:
request (django.http.HttpRequest):
The current HTTP request.
local_site (reviewboard.site.models.LocalSite, optional):
The current LocalSite, if any.
Returns:
django.db.models.query.QuerySet:
The tokens the user has access to.
"""
if not request.user.is_authenticated:
return AccessToken.objects.none()
q = Q(application__local_site=request.local_site)
if not request.user.is_superuser:
q &= Q(user=request.user)
return (
AccessToken.objects
.filter(q)
.select_related('application')
)
def has_access_permissions(self, request, obj, *args, **kwargs):
"""Return whether or not the user has access permissions.
A user has this permission if they own the token or are a superuser.
Args:
request (django.http.HttpRequest):
The current HTTP request.
obj (oauth2_provider.models.AccessToken):
The token in question.
Returns:
bool:
Whether or not the user has permission.
"""
return (request.user.is_authenticated and
(obj.user_id == request.user.pk or
request.user.is_superuser))
def has_modify_permissions(self, request, obj, *args, **kwargs):
"""Return whether or not the user has modification permissions.
A user has this permission if they own the token or are a superuser.
Args:
request (django.http.HttpRequest):
The current HTTP request.
obj (oauth2_provider.models.AccessToken):
The token in question.
Returns:
bool:
Whether or not the user has permission.
"""
return self.has_access_permissions(request, obj, *args, **kwargs)
def has_delete_permissions(self, request, obj, *args, **kwargs):
"""Return whether or not the user has deletion permissions.
A user has this permission if they own the token or are a superuser.
Args:
request (django.http.HttpRequest):
The current HTTP request.
obj (oauth2_provider.models.AccessToken):
The token in question.
Returns:
bool:
Whether or not the user has permission.
"""
return self.has_access_permissions(request, obj, *args, **kwargs)
@webapi_login_required
@augment_method_from(WebAPIResource)
def get(self, *args, **kwargs):
"""Retrieves information on a particular OAuth2 token.
This can only be accessed by the owner of the tokens or superusers
"""
pass
@webapi_login_required
@augment_method_from(WebAPIResource)
def get_list(self, *args, **kwargs):
"""Retrieve a list of information about an OAuth2 token.
If accessing this API on a Local Site, the results will be limited
to those associated with that site. Otherwise, it will be limited to
those associated with no Local Site.
This can only be accessed by the owner of the tokens or superusers.
"""
pass
@augment_method_from(WebAPIResource)
def delete(self, *args, **kwargs):
"""Delete the OAuth2 token, invalidating all clients using it.
The OAuth token will be removed from the user's account, and will no
longer be usable for authentication.
After deletion, this will return a :http:`204`.
"""
pass
@webapi_login_required
@webapi_check_local_site
@webapi_response_errors(DOES_NOT_EXIST)
@webapi_request_fields(
optional={
'add_scopes': {
'type': StringFieldType,
'description': 'A comma-separated list of scopes to add.',
},
'remove_scopes': {
'type': StringFieldType,
'description': 'A comma-separated list of scopes to remove.',
},
'scopes': {
'type': StringFieldType,
'description': 'A comma-separated list of scopes to override '
'the current set with.\n\n'
'This field cannot be provided if either '
'add_scopes or remove_scopes is provided.',
},
},
)
def update(self, request, local_site=None, add_scopes=None,
remove_scopes=None, scopes=None, *args, **kwargs):
"""Update the scope of an OAuth2 token.
This resource allows a user to either (1) add and remove scopes or (2)
replace the set of scopes with a new set.
"""
try:
access_token = self.get_object(request, *args, **kwargs)
except AccessToken.DoesNotExist:
return DOES_NOT_EXIST
if not self.has_modify_permissions(request, access_token, *args,
**kwargs):
return self.get_no_access_error(request)
if ((add_scopes is not None or remove_scopes is not None) and
scopes is not None):
return INVALID_FORM_DATA, {
'fields': {
'scopes': [
'This field cannot be provided if either add_scopes '
'or remove_scopes is provided.',
],
},
}
field_errors = {}
valid_scopes = get_scope_dictionary()
if scopes is not None:
scopes = self._validate_scopes(valid_scopes, scopes, 'scopes',
field_errors)
elif add_scopes is not None or remove_scopes is not None:
add_scopes = self._validate_scopes(valid_scopes,
add_scopes,
'add_scopes',
field_errors)
remove_scopes = self._validate_scopes(valid_scopes,
remove_scopes,
'remove_scopes',
field_errors)
if field_errors:
return INVALID_FORM_DATA, {
'fields': field_errors,
}
if scopes is not None:
access_token.scope = ' '.join(scopes)
access_token.save(update_fields=('scope',))
elif add_scopes is not None or remove_scopes is not None:
current_scopes = set(access_token.scope.split(' '))
if add_scopes:
current_scopes.update(add_scopes)
if remove_scopes:
current_scopes.difference_update(remove_scopes)
access_token.scope = ' '.join(current_scopes)
access_token.save(update_fields=('scope',))
return 200, {
self.item_result_key: access_token,
}
def _validate_scopes(self, valid_scopes, scopes, field, field_errors):
"""Validate the given set of scopes against known valid scopes.
Args:
valid_scopes (dict):
The scope dictionary.
scopes (unicode):
The comma-separated list of scopes to validate.
field (unicode):
The name of the field that is being validated.
field_errors (dict):
A mapping of field names to errors.
An error message will be added to ``field_errors[field]`` for
each invalid scope.
Returns:
list:
The list of scopes, if they are all valid, or ``None`` otherwise.
"""
if scopes is None:
return None
scopes = scopes.split(',')
invalid_scopes = {
scope
for scope in scopes
if scope not in valid_scopes
}
if invalid_scopes:
field_errors[field] = [
'The scope "%s" is invalid.' % scope
for scope in invalid_scopes
]
return None
return scopes
oauth_token_resource = OAuthTokenResource()
| mit | 7b6072a25887fdae6bbd2659901814dd | 31.581522 | 78 | 0.550292 | 4.750396 | false | false | false | false |
reviewboard/reviewboard | reviewboard/diffviewer/tests/test_diff_renderer.py | 1 | 5193 | from django.http import HttpResponse
from django.test import RequestFactory
from djblets.cache.backend import cache_memoize
from kgb import SpyAgency
from reviewboard.diffviewer.errors import UserVisibleError
from reviewboard.diffviewer.models import FileDiff
from reviewboard.diffviewer.renderers import DiffRenderer
from reviewboard.testing import TestCase
class DiffRendererTests(SpyAgency, TestCase):
"""Unit tests for DiffRenderer."""
def test_construction_with_invalid_chunks(self):
"""Testing DiffRenderer construction with invalid chunks"""
diff_file = {
'chunks': [{}],
'filediff': None,
'interfilediff': None,
'force_interdiff': False,
'chunks_loaded': True,
}
renderer = DiffRenderer(diff_file, chunk_index=-1)
self.assertRaises(UserVisibleError,
lambda: renderer.render_to_string_uncached(None))
renderer = DiffRenderer(diff_file, chunk_index=1)
self.assertRaises(UserVisibleError,
lambda: renderer.render_to_string_uncached(None))
def test_construction_with_valid_chunks(self):
"""Testing DiffRenderer construction with valid chunks"""
diff_file = {
'chunks': [{}],
'chunks_loaded': True,
}
# Should not assert.
renderer = DiffRenderer(diff_file, chunk_index=0)
self.spy_on(renderer.render_to_string, call_original=False)
self.spy_on(renderer.make_context, call_original=False)
renderer.render_to_string_uncached(None)
self.assertEqual(renderer.num_chunks, 1)
self.assertEqual(renderer.chunk_index, 0)
def test_render_to_response(self):
"""Testing DiffRenderer.render_to_response"""
diff_file = {
'chunks': [{}]
}
renderer = DiffRenderer(diff_file)
self.spy_on(renderer.render_to_string,
call_fake=lambda self, request: 'Foo')
request_factory = RequestFactory()
request = request_factory.get('/')
response = renderer.render_to_response(request)
self.assertTrue(renderer.render_to_string.called)
self.assertTrue(isinstance(response, HttpResponse))
self.assertEqual(response.content, b'Foo')
def test_render_to_string(self):
"""Testing DiffRenderer.render_to_string"""
diff_file = {
'chunks': [{}]
}
renderer = DiffRenderer(diff_file)
self.spy_on(renderer.render_to_string_uncached,
call_fake=lambda self, request: 'Foo')
self.spy_on(renderer.make_cache_key,
call_fake=lambda self: 'my-cache-key')
self.spy_on(cache_memoize)
request_factory = RequestFactory()
request = request_factory.get('/')
response = renderer.render_to_response(request)
self.assertEqual(response.content, b'Foo')
self.assertTrue(renderer.render_to_string_uncached.called)
self.assertTrue(renderer.make_cache_key.called)
self.assertTrue(cache_memoize.spy.called)
def test_render_to_string_uncached(self):
"""Testing DiffRenderer.render_to_string_uncached"""
diff_file = {
'chunks': [{}]
}
renderer = DiffRenderer(diff_file, lines_of_context=[5, 5])
self.spy_on(renderer.render_to_string_uncached,
call_fake=lambda self, request: 'Foo')
self.spy_on(renderer.make_cache_key,
call_fake=lambda self: 'my-cache-key')
self.spy_on(cache_memoize)
request_factory = RequestFactory()
request = request_factory.get('/')
response = renderer.render_to_response(request)
self.assertEqual(response.content, b'Foo')
self.assertTrue(renderer.render_to_string_uncached.called)
self.assertFalse(renderer.make_cache_key.called)
self.assertFalse(cache_memoize.spy.called)
def test_make_context_with_chunk_index(self):
"""Testing DiffRenderer.make_context with chunk_index"""
diff_file = {
'newfile': True,
'interfilediff': None,
'filediff': FileDiff(),
'chunks': [
{
'lines': [],
'meta': {},
'change': 'insert',
},
{
# This is not how lines really look, but it's fine for
# current usage tests.
'lines': range(10),
'meta': {},
'change': 'replace',
},
{
'lines': [],
'meta': {},
'change': 'delete',
}
],
}
renderer = DiffRenderer(diff_file, chunk_index=1)
context = renderer.make_context()
self.assertEqual(context['standalone'], True)
self.assertEqual(context['file'], diff_file)
self.assertEqual(len(diff_file['chunks']), 1)
chunk = diff_file['chunks'][0]
self.assertEqual(chunk['change'], 'replace')
| mit | ce6e2d68dda83aeb8cb9a687cb2914b4 | 34.568493 | 75 | 0.580204 | 4.218522 | false | true | false | false |
reviewboard/reviewboard | reviewboard/diffviewer/parser.py | 1 | 67820 | import io
import logging
import re
import weakref
from copy import deepcopy
from django.utils.encoding import force_bytes
from django.utils.translation import gettext as _
from djblets.util.properties import AliasProperty, TypedProperty
from pydiffx import DiffType, DiffX
from pydiffx.errors import DiffXParseError
from reviewboard.deprecation import RemovedInReviewBoard60Warning
from reviewboard.diffviewer.errors import DiffParserError
from reviewboard.scmtools.core import HEAD, PRE_CREATION, Revision, UNKNOWN
logger = logging.getLogger(__name__)
class ParsedDiff(object):
"""Parsed information from a diff.
This stores information on the diff as a whole, along with a list of
commits made to the diff and a list of files within each.
Extra data can be stored by the parser, which will be made available in
:py:attr:`DiffSet.extra_data
<reviewboard.diffviewer.models.diffset.DiffSet.extra_data>`.
This is flexible enough to accommodate a variety of diff formats,
including DiffX files.
This class is meant to be used internally and by subclasses of
:py:class:`BaseDiffParser`.
Version Added:
4.0.5
Attributes:
changes (list of ParsedDiffChange):
The list of changes parsed in this diff. There should always be
at least one.
extra_data (dict):
Extra data to store along with the information on the diff. The
contents will be stored directly in :py:attr:`DiffSet.extra_data
<reviewboard.diffviewer.models.diffset.DiffSet.extra_data>`.
parser (BaseDiffParser):
The diff parser that parsed this file.
uses_commit_ids_as_revisions (bool):
Whether commit IDs are used as file revisions.
A commit ID will be used if an explicit revision isn't available
for a file. For instance, if a parent diff is available, and a file
isn't present in the parent diff, the file will use the parent
diff's parent commit ID as the parent revision.
"""
def __init__(self, parser, uses_commit_ids_as_revisions=False):
"""Initialize the parsed diff information.
Args:
parser (BaseDiffParser):
The diff parser that parsed this file.
uses_commit_ids_as_revisions (bool, optional):
Whether commit IDs are used as file revisions.
See :py:attr:`ParsedDiff.uses_commit_ids_as_revisions`.
"""
self.parser = parser
self.extra_data = {}
self.changes = []
self.uses_commit_ids_as_revisions = uses_commit_ids_as_revisions
class ParsedDiffChange(object):
"""Parsed change information from a diff.
This stores information on a change to a tree, consisting of a set of
parsed files and extra data to store (in :py:attr:`DiffCommit.extra_data
<reviewboard.diffviewer.models.diffcommit.DiffCommit.extra_data>`.
This will often map to a commit, or just a typical collection of files in a
diff. Traditional diffs will have only one of these. DiffX files may have
many (but for the moment, only diffs with a single change can be handled
when processing these results).
Version Added:
4.0.5
Attributes:
extra_data (dict):
Extra data to store along with the information on the change. The
contents will be stored directly in :py:attr:`DiffCommit.extra_data
<reviewboard.diffviewer.models.diffcommit.DiffCommit.extra_data>`.
files (list of ParsedDiffFile):
The list of files parsed for this change. There should always be
at least one.
"""
#: The ID of the commit, parsed from the diff.
#:
#: This may be ``None``.
#:
#: Type:
#: unicode
commit_id = TypedProperty(bytes)
#: The ID of the primary parent commit, parsed from the diff.
#:
#: This may be ``None``.
#:
#: Type:
#: unicode
parent_commit_id = TypedProperty(bytes)
def __init__(self, parsed_diff):
"""Initialize the parsed diff information.
Args:
parsed_diff (ParsedDiff):
The parent parsed diff information.
"""
assert parsed_diff is not None
self._parent = weakref.ref(parsed_diff)
self.extra_data = {}
self.files = []
parsed_diff.changes.append(self)
@property
def parent_parsed_diff(self):
"""The parent diff object.
Type:
ParsedDiff
"""
if self._parent:
return self._parent()
return None
class ParsedDiffFile(object):
"""A parsed file from a diff.
This stores information on a single file represented in a diff, including
the contents of that file's diff, as parsed by :py:class:`DiffParser` or
one of its subclasses.
Parsers should set the attributes on this based on the contents of the
diff, and should add any data found in the diff.
This class is meant to be used internally and by subclasses of
:py:class:`BaseDiffParser`.
Version Changed:
4.0.6:
Added :py:attr:`old_symlink_target` and py:attr:`new_symlink_target`.
Version Changed:
4.0.5:
Diff parsers that manually construct instances must pass in
``parsed_diff_change`` instead of ``parser`` when constructing the
object, and must call :py:meth:`discard` after construction if the
file isn't wanted in the results.
Attributes:
binary (bool);
Whether this represents a binary file.
copied (bool):
Whether this represents a file that has been copied. The file
may or may not be modified in the process.
deleted (bool):
Whether this represents a file that has been deleted.
delete_count (int):
The number of delete (``-``) lines found in the file.
insert_count (int):
The number of insert (``+``) lines found in the file.
is_symlink (bool):
Whether this represents a file that is a symbolic link to another
file.
moved (bool):
Whether this represents a file that has been moved/renamed. The
file may or may not be modified in the process.
parser (BaseDiffParser):
The diff parser that parsed this file.
skip (bool):
Whether this file should be skipped by the parser. If any of the
parser methods set this, the file will stop parsing and will be
excluded from results.
"""
#: The parsed original name of the file.
#:
#: Type:
#: bytes
orig_filename = TypedProperty(bytes)
#: The parsed file details of the original file.
#:
#: This will usually be a revision.
#:
#: Type:
#: bytes or reviewboard.scmtools.core.Revision
orig_file_details = TypedProperty((bytes, Revision))
#: The parsed modified name of the file.
#:
#: This may be the same as :py:attr:`orig_filename`.
#:
#: Type:
#: bytes
modified_filename = TypedProperty(bytes)
#: The parsed file details of the modified file.
#:
#: This will usually be a revision.
#:
#: Type:
#: bytes or reviewboard.scmtools.core.Revision
modified_file_details = TypedProperty((bytes, Revision))
#: The parsed value for an Index header.
#:
#: If present in the diff, this usually contains a filename, but may
#: contain other content as well, depending on the variation of the diff
#: format.
#:
#: Type:
#: bytes
index_header_value = TypedProperty(bytes)
#: The old target for a symlink.
#:
#: Version Added:
#: 4.0.6
#:
#: Type:
#: bytes
old_symlink_target = TypedProperty(bytes)
#: The new target for a symlink.
#:
#: Version Added:
#: 4.0.6
#:
#: Type:
#: bytes
new_symlink_target = TypedProperty(bytes)
#: The old UNIX mode for the file.
#:
#: Version Added:
#: 4.0.6
#:
#: Type:
#: int
old_unix_mode = TypedProperty(str)
#: The new UNIX mode for the file.
#:
#: Version Added:
#: 4.0.6
#:
#: Type:
#: int
new_unix_mode = TypedProperty(str)
#: The parsed original name of the file.
#:
#: Deprecated:
#: 4.0:
#: Use :py:attr:`orig_filename` instead.
origFile = AliasProperty('orig_filename',
convert_to_func=force_bytes,
deprecated=True,
deprecation_warning=RemovedInReviewBoard60Warning)
#: The parsed file details of the original file.
#:
#: Deprecated:
#: 4.0:
#: Use :py:attr:`orig_file_details` instead.
origInfo = AliasProperty('orig_file_details',
convert_to_func=force_bytes,
deprecated=True,
deprecation_warning=RemovedInReviewBoard60Warning)
#: The parsed original name of the file.
#:
#: Deprecated:
#: 4.0:
#: Use :py:attr:`modified_filename` instead.
newFile = AliasProperty('modified_filename',
convert_to_func=force_bytes,
deprecated=True,
deprecation_warning=RemovedInReviewBoard60Warning)
#: The parsed file details of the modified file.
#:
#: Deprecated:
#: 4.0:
#: Use :py:attr:`modified_file_details` instead.
newInfo = AliasProperty('modified_file_details',
convert_to_func=force_bytes,
deprecated=True,
deprecation_warning=RemovedInReviewBoard60Warning)
#: The parsed value for an Index header.
#:
#: Deprecated:
#: 4.0:
#: Use :py:attr:`index_header_value` instead.
index = AliasProperty('index_header_value',
convert_to_func=force_bytes,
deprecated=True,
deprecation_warning=RemovedInReviewBoard60Warning)
def __init__(self, parser=None, parsed_diff_change=None, **kwargs):
"""Initialize the parsed file information.
Version Changed:
4.0.5:
Added the ``parsed_diff_change`` argument (which will be required
in Review Board 6.0).
Deprecated the ``parser`` argument (which will be removed in
Review Board 6.0).
Args:
parser (reviewboard.diffviewer.parser.BaseDiffParser, optional):
The diff parser that parsed this file.
This is deprecated and will be remoed in Review Board 6.0.
parsed_diff_change (ParsedDiffChange, optional):
The diff change that owns this file.
This will be required in Review Board 6.0.
"""
if parsed_diff_change is None:
RemovedInReviewBoard60Warning.warn(
'Diff parsers must pass a ParsedDiffChange as the '
'parsed_diff_change= parameter when creating a '
'ParsedDiffFile. They should no longer pass a parser= '
'parameter. This will be mandatory in Review Board 6.0.')
if parsed_diff_change is not None:
parsed_diff_change.files.append(self)
parser = parsed_diff_change.parent_parsed_diff.parser
parsed_diff_change = weakref.ref(parsed_diff_change)
self._parent = parsed_diff_change
self.parser = parser
self.binary = False
self.deleted = False
self.moved = False
self.copied = False
self.is_symlink = False
self.insert_count = 0
self.delete_count = 0
self.skip = False
self.extra_data = {}
self._data_io = io.BytesIO()
self._data = None
self._deprecated_info = {}
@property
def parent_parsed_diff_change(self):
"""The parent change object.
Version Added:
4.0.5
Type:
ParsedDiffChange
"""
if self._parent:
return self._parent()
return None
def __setitem__(self, key, value):
"""Set information on the parsed file from a diff.
This is a legacy implementation used to help diff parsers retain
compatibility with the old dictionary-based ways of setting parsed
file information. Callers should be updated to set attributes instead.
Deprecated:
4.0:
This will be removed in Review Board 6.0.
Args:
key (str):
The key to set.
value (object):
The value to set.
"""
self._warn_old_usage_deprecation()
self._deprecated_info[key] = value
setattr(self, key, value)
def __getitem__(self, key):
"""Return information on the parsed file from a diff.
This is a legacy implementation used to help diff parsers retain
compatibility with the old dictionary-based ways of setting parsed
file information. Callers should be updated to access attributes
instead.
Deprecated:
4.0:
This will be removed in Review Board 6.0.
Args:
key (str):
The key to retrieve.
Returns:
object:
The resulting value.
Raises:
KeyError:
The key is invalid.
"""
self._warn_old_usage_deprecation()
return self._deprecated_info[key]
def __contains__(self, key):
"""Return whether an old parsed file key has been explicitly set.
This is a legacy implementation used to help diff parsers retain
compatibility with the old dictionary-based ways of setting parsed
file information. Callers should be updated to check attribute values
instead.
Deprecated:
4.0:
This will be removed in Review Board 6.0.
Args:
key (str):
The key to check.
Returns:
bool:
``True`` if the key has been explicitly set by a diff parser.
``False`` if it has not.
"""
self._warn_old_usage_deprecation()
return key in self._deprecated_info
def set(self, key, value):
"""Set information on the parsed file from a diff.
This is a legacy implementation used to help diff parsers retain
compatibility with the old dictionary-based ways of setting parsed
file information. Callers should be updated to set attributes instead.
Deprecated:
4.0:
This will be removed in Review Board 6.0.
Args:
key (str):
The key to set.
value (object):
The value to set.
"""
self._warn_old_usage_deprecation()
self._deprecated_info[key] = value
setattr(self, key, value)
def get(self, key, default=None):
"""Return information on the parsed file from a diff.
This is a legacy implementation used to help diff parsers retain
compatibility with the old dictionary-based ways of setting parsed
file information. Callers should be updated to access attributes
instead.
Deprecated:
4.0:
This will be removed in Review Board 6.0.
Args:
key (str):
The key to retrieve.
default (object, optional):
The default value to return.
Returns:
object:
The resulting value.
"""
self._warn_old_usage_deprecation()
return self._deprecated_info.get(key, default)
def update(self, items):
"""Update information on the parsed file from a diff.
This is a legacy implementation used to help diff parsers retain
compatibility with the old dictionary-based ways of setting parsed
file information. Callers should be updated to set individual
attributes instead.
Deprecated:
4.0:
This will be removed in Review Board 6.0.
Args:
items (dict):
The keys and values to set.
"""
self._warn_old_usage_deprecation()
for key, value in items.items():
self._deprecated_info[key] = value
setattr(self, key, value)
@property
def data(self):
"""The data for this diff.
This must be accessed after :py:meth:`finalize` has been called.
"""
if self._data is None:
raise ValueError('ParsedDiffFile.data cannot be accessed until '
'finalize() is called.')
return self._data
def discard(self):
"""Discard this from the parent change.
This will remove it from the list of files. It's intended for use
when a diff parser is populating the diff but then determines the
file is no longer needed.
Version Added:
4.0.5
"""
assert self.parent_parsed_diff_change
self.parent_parsed_diff_change.files.remove(self)
def finalize(self):
"""Finalize the parsed diff.
This makes the diff data available to consumers and closes the buffer
for writing.
"""
self._data = self._data_io.getvalue()
self._data_io.close()
def prepend_data(self, data):
"""Prepend data to the buffer.
Args:
data (bytes):
The data to prepend.
"""
if data:
new_data_io = io.BytesIO()
new_data_io.write(data)
new_data_io.write(self._data_io.getvalue())
self._data_io.close()
self._data_io = new_data_io
def append_data(self, data):
"""Append data to the buffer.
Args:
data (bytes):
The data to append.
"""
if data:
self._data_io.write(data)
def _warn_old_usage_deprecation(self):
"""Warn that a DiffParser is populating information in an old way."""
if self.parser is None:
message = (
'Diff parsers must be updated to populate attributes on a '
'ParsedDiffFile, instead of setting the information in a '
'dictionary. This will be required in Review Board 6.0.'
)
else:
message = (
'%r must be updated to populate attributes on a '
'ParsedDiffFile, instead of setting the information in a '
'dictionary. This will be required in Review Board 6.0.'
% type(self.parser)
)
RemovedInReviewBoard60Warning.warn(message, stacklevel=3)
class BaseDiffParser(object):
"""Base class for a diff parser.
This is a low-level, basic foundational interface for a diff parser. It
performs type checking of the incoming data and a couple of methods for
subclasses to implement.
Most SCM implementations will want to either subclass
:py:class:`DiffParser` or use :py:class:`DiffXParser`.
Version Added:
4.0.5
Attributes:
data (bytes):
The diff data being parsed.
uses_commit_ids_as_revisions (bool):
Whether commit IDs are used as file revisions.
See :py:attr:`ParsedDiff.uses_commit_ids_as_revisions`.
"""
def __init__(self, data, uses_commit_ids_as_revisions=False):
"""Initialize the parser.
Args:
data (bytes):
The diff content to parse.
uses_commit_ids_as_revisions (bool):
Whether commit IDs are used as file revisions.
See :py:attr:`ParsedDiff.uses_commit_ids_as_revisions`.
Raises:
TypeError:
The provided ``data`` argument was not a ``bytes`` type.
"""
if not isinstance(data, bytes):
raise TypeError(
_('%s expects bytes values for "data", not %s')
% (type(self).__name__, type(data)))
self.data = data
self.uses_commit_ids_as_revisions = uses_commit_ids_as_revisions
def parse_diff(self):
"""Parse the diff.
This will parse the content of the file, returning a representation
of the diff file and its content.
This must be implemented by subclasses.
Returns:
ParsedDiff:
The resulting parsed diff information.
Raises:
NotImplementedError:
This wasn't implemented by a subclass.
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing part of the diff. This may be a
corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
raise NotImplementedError
def raw_diff(self, diffset_or_commit):
"""Return a raw diff as a string.
This takes a DiffSet or DiffCommit and generates a new, single diff
file that represents all the changes made. It's used to regenerate
a diff and serve it up for other tools or processes to use.
This must be implemented by subclasses.
Args:
diffset_or_commit (reviewboard.diffviewer.models.diffset.DiffSet or
reviewboard.diffviewer.models.diffcommit
.DiffCommit):
The DiffSet or DiffCommit to render.
If passing in a DiffSet, only the cumulative diff's file
contents will be returned.
If passing in a DiffCommit, only that commit's file contents
will be returned.
Returns:
bytes:
The diff composed of all the component FileDiffs.
Raises:
NotImplementedError:
This wasn't implemented by a subclass.
TypeError:
The provided ``diffset_or_commit`` wasn't of a supported type.
"""
raise NotImplementedError
def normalize_diff_filename(self, filename):
"""Normalize filenames in diffs.
This returns a normalized filename suitable for populating in
:py:attr:`FileDiff.source_file
<reviewboard.diffviewer.models.filediff.FileDiff.source_file>` or
:py:attr:`FileDiff.dest_file
<reviewboard.diffviewer.models.filediff.FileDiff.dest_file>`, or
for when presenting a filename to the UI.
By default, this strips off any leading slashes, which might occur due
to differences in various diffing methods or APIs.
Subclasses can override this to provide additional methods of
normalization.
Args:
filename (unicode):
The filename to normalize.
Returns:
unicode:
The normalized filename.
"""
if filename.startswith('/'):
return filename[1:]
else:
return filename
class DiffParser(BaseDiffParser):
"""Parses diff files, allowing subclasses to specialize parsing behavior.
This class provides the base functionality for parsing Unified Diff files.
It looks for common information present in many variations of diffs,
such as ``Index:`` lines, in order to extract files and their modified
content from a diff.
Subclasses can extend the parsing behavior to extract additional metadata
or handle special representations of changes. They may want to override the
following methods:
* :py:meth:`parse_special_header`
* :py:meth:`parse_diff_header`
* :py:meth:`parse_filename_header`
* :py:meth:`parse_after_headers`
* :py:meth:`get_orig_commit_id`
* :py:meth:`normalize_diff_filename`
"""
#: A separator string below an Index header.
#:
#: This is commonly found immediately below an ``Index:`` header, meant
#: to help locate the beginning of the metadata or changes made to a file.
#:
#: Its presence and location is not guaranteed.
INDEX_SEP = b'=' * 67
def __init__(self, data, **kwargs):
"""Initialize the parser.
Version Changed:
4.0.5:
Added ``**kwargs``.
Args:
data (bytes):
The diff content to parse.
**kwargs (dict):
Keyword arguments to pass to the parent class.
Version Added:
4.0.5
Raises:
TypeError:
The provided ``data`` argument was not a ``bytes`` type.
"""
from reviewboard.diffviewer.diffutils import split_line_endings
super(DiffParser, self).__init__(data, **kwargs)
self.base_commit_id = None
self.new_commit_id = None
self.lines = split_line_endings(data)
self.parsed_diff = ParsedDiff(
parser=self,
uses_commit_ids_as_revisions=self.uses_commit_ids_as_revisions)
self.parsed_diff_change = ParsedDiffChange(
parsed_diff=self.parsed_diff)
def parse_diff(self):
"""Parse the diff.
Subclasses should override this if working with a diff format that
extracts more than one change from a diff.
Version Added:
4.0.5:
Historically, :py:meth:`parse` was the main method used to parse a
diff. That's now used exclusively to parse a list of files for
the default :py:attr:`parsed_diff_change`. The old method is
around for compatibility, but is no longer called directly outside
of this class.
Returns:
ParsedDiff:
The resulting parsed diff information.
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing part of the diff. This may be a
corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
class_name = type(self).__name__
logger.debug('%s.parse_diff: Beginning parse of diff, size = %s',
class_name, len(self.data))
parsed_diff_files = self.parse()
parsed_diff_change = self.parsed_diff_change
for parsed_diff_file in parsed_diff_files:
if parsed_diff_file.parent_parsed_diff_change is None:
parsed_diff_change.files.append(parsed_diff_file)
if parsed_diff_change.parent_commit_id is None:
parent_commit_id = self.get_orig_commit_id()
if parent_commit_id is not None:
parsed_diff_change.parent_commit_id = parent_commit_id
self.parsed_diff.uses_commit_ids_as_revisions = True
RemovedInReviewBoard60Warning.warn(
'%s.get_orig_commit_id() will no longer be supported in '
'Review Board 6.0. Please set the commit ID in '
'self.parsed_diff_change.parent_commit_id, and set '
'parsed_diff_change.uses_commit_ids_as_revisions = True.'
% type(self).__name__
)
logger.debug('%s.parse_diff: Finished parsing diff.', class_name)
return self.parsed_diff
def parse(self):
"""Parse the diff and return a list of files.
This will parse the content of the file, returning any files that
were found.
Version Change:
4.0.5:
Historically, this was the main method used to parse a diff. It's
now used exclusively to parse a list of files for the default
:py:attr:`parsed_diff_change`, and :py:meth:`parse_diff` is the
main method used to parse a diff. This method is around for
compatibility, but is no longer called directly outside of this
class.
Returns:
list of ParsedDiffFile:
The resulting list of files.
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing part of the diff. This may be a
corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
preamble = io.BytesIO()
self.files = []
parsed_file = None
i = 0
# Go through each line in the diff, looking for diff headers.
while i < len(self.lines):
next_linenum, new_file = self.parse_change_header(i)
if new_file:
# This line is the start of a new file diff.
#
# First, finalize the last one.
if self.files:
self.files[-1].finalize()
parsed_file = new_file
# We need to prepend the preamble, if we have one.
parsed_file.prepend_data(preamble.getvalue())
preamble.close()
preamble = io.BytesIO()
self.files.append(parsed_file)
i = next_linenum
else:
if parsed_file:
i = self.parse_diff_line(i, parsed_file)
else:
preamble.write(self.lines[i])
preamble.write(b'\n')
i += 1
if self.files:
self.files[-1].finalize()
preamble.close()
return self.files
def parse_diff_line(self, linenum, parsed_file):
"""Parse a line of data in a diff.
This will append the line to the parsed file's data, and if the
content represents active changes to a file, its insert/delete counts
will be updated to reflect them.
Args:
linenum (int):
The 0-based line number.
parsed_file (ParsedDiffFile):
The current parsed diff file info.
Returns:
int:
The next line number to parse.
"""
line = self.lines[linenum]
if (parsed_file.orig_filename is not None and
parsed_file.modified_filename is not None):
if line.startswith(b'-'):
parsed_file.delete_count += 1
elif line.startswith(b'+'):
parsed_file.insert_count += 1
parsed_file.append_data(line)
parsed_file.append_data(b'\n')
return linenum + 1
def parse_change_header(self, linenum):
"""Parse a header before a change to a file.
This will attempt to parse the following information, starting at the
specified line in the diff:
1. Any special file headers (such as ``Index:`` lines) through
:py:meth:`parse_special_header`
2. A standard Unified Diff file header (through
:py:meth:`parse_diff_header`)
3. Any content after the header (through
:py:meth:`parse_after_headers`)
If the special or diff headers are able to populate the original and
modified filenames and revisions/file details, and none of the methods
above mark the file as skipped (by setting
:py:attr:`ParsedDiffFile.skip`), then this will finish by appending
all parsed data and returning a parsed file entry.
Subclasses that need to control parsing logic should override one or
more of the above methods.
Args:
linenum (int):
The line number to begin parsing.
Returns:
tuple:
A tuple containing the following:
1. The next line number to parse
2. The populated :py:class:`ParsedDiffFile` instance for this file
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing the change header. This may be
a corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
parsed_file = \
ParsedDiffFile(parsed_diff_change=self.parsed_diff_change)
start = linenum
linenum = self.parse_special_header(linenum, parsed_file)
linenum = self.parse_diff_header(linenum, parsed_file)
skip = (
parsed_file.skip or
parsed_file.orig_filename is None or
parsed_file.orig_file_details is None or
parsed_file.modified_filename is None or
parsed_file.modified_file_details is None
)
if not skip:
# If we have enough information to represent a header, build the
# file to return.
if linenum < len(self.lines):
linenum = self.parse_after_headers(linenum, parsed_file)
skip = parsed_file.skip
if skip:
parsed_file.discard()
parsed_file = None
else:
# The header is part of the diff, so make sure it gets in the
# diff content.
for line in self.lines[start:linenum]:
parsed_file.append_data(line)
parsed_file.append_data(b'\n')
return linenum, parsed_file
def parse_special_header(self, linenum, parsed_file):
"""Parse a special diff header marking the start of a new file's info.
This attempts to locate an ``Index:`` line at the specified line
number, which usually indicates the beginning of file's information in
a diff (for Unified Diff variants that support it). By default, this
method expects the line to be found at ``linenum``.
If present, the value found immediately after the ``Index:`` will be
stored in :py:attr:`ParsedDiffFile.index_header_value`, allowing
subclasses to make a determination based on its contents (which may
vary between types of diffs, but should include at least a filename.
If the ``Index:`` line is not present, this won't do anything by
default.
Subclasses can override this to parse additional information before the
standard diff header. They may also set :py:attr:`ParsedFileDiff.skip`
to skip the rest of this file and begin parsing a new entry at the
returned line number.
Args:
linenum (int):
The line number to begin parsing.
parsed_file (ParsedDiffFile):
The file currently being parsed.
Returns:
int:
The next line number to parse.
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing the special header. This may be
a corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
try:
index_line = self.lines[linenum]
is_index = index_line.startswith(b'Index: ')
except IndexError:
is_index = False
if is_index:
# Try to find the "====" line.
temp_linenum = linenum + 1
while temp_linenum + 1 < len(self.lines):
line = self.lines[temp_linenum]
if line == self.INDEX_SEP:
# We found the line. This is looking like a valid diff
# for CVS, Subversion, and other systems. Try to parse
# the data from the line.
try:
parsed_file.index_header_value = \
index_line.split(None, 1)[1]
# Set these for backwards-compatibility.
#
# This should be removed in Review Board 6.0.
parsed_file._deprecated_info['index'] = \
parsed_file.index_header_value
except ValueError:
raise DiffParserError('Malformed Index line', linenum)
linenum = temp_linenum + 1
break
elif line.startswith((b'---', b'+++')):
# We never found that line, but we did hit the start of
# a diff file. We can't treat the "Index:" line as special
# in this case.
break
temp_linenum += 1
return linenum
def parse_diff_header(self, linenum, parsed_file):
"""Parse a standard header before changes made to a file.
This attempts to parse the ``---`` (original) and ``+++`` (modified)
file lines, which are usually present right before any changes to the
file. By default, this method expects the ``---`` line to be found at
``linenum``.
If found, this will populate :py:attr:`ParsedDiffFile.orig_filename`,
:py:attr:`ParsedDiffFile.orig_file_details`,
:py:attr:`ParsedDiffFile.modified_filename`, and
:py:attr:`ParsedDiffFile.modified_file_details`.
This calls out to :py:meth:`parse_filename_header` to help parse
the contents immediately after the ``---`` or ``+++``.
Subclasses can override this to parse these lines differently, or to
to process the results of these lines (such as converting special
filenames to states like "deleted" or "new file"). They may also set
:py:class:`ParsedFileDiff.skip` to skip the rest of this file and begin
parsing a new entry at the returned line number.
Args:
linenum (int):
The line number to begin parsing.
parsed_file (ParsedDiffFile):
The file currently being parsed.
Returns:
int:
The next line number to parse.
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing the diff header. This may be a
corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
try:
line1 = self.lines[linenum]
line2 = self.lines[linenum + 1]
is_diff_header = (
# Unified diff headers
(line1.startswith(b'--- ') and line2.startswith(b'+++ ')) or
# Context diff headers
(line1.startswith(b'*** ') and line2.startswith(b'--- ') and
not line1.endswith(b' ****'))
)
except IndexError:
is_diff_header = False
if is_diff_header:
# This is a unified or context diff header. Parse the
# file and extra info.
try:
(parsed_file.orig_filename,
parsed_file.orig_file_details) = \
self.parse_filename_header(self.lines[linenum][4:],
linenum)
linenum += 1
(parsed_file.modified_filename,
parsed_file.modified_file_details) = \
self.parse_filename_header(self.lines[linenum][4:],
linenum)
# Set these for backwards-compatibility.
#
# This should be removed in Review Board 6.0.
parsed_file._deprecated_info['origFile'] = \
parsed_file.orig_filename
parsed_file._deprecated_info['origInfo'] = \
parsed_file.orig_file_details
parsed_file._deprecated_info['newFile'] = \
parsed_file.modified_filename
parsed_file._deprecated_info['newInfo'] = \
parsed_file.modified_file_details
linenum += 1
except ValueError:
raise DiffParserError(
'The diff file is missing revision information',
linenum)
return linenum
def parse_after_headers(self, linenum, parsed_file):
"""Parse information after a diff header but before diff data.
This attempts to parse the information found after
:py:meth:`parse_diff_headers` is called, but before gathering any lines
that are part of the diff contents. It's intended for the few diff
formats that may place content at this location.
By default, this does nothing.
Subclasses can override this to provide custom parsing of any lines
that may exist here. They may also set :py:class:`ParsedFileDiff.skip`
to skip the rest of this file and begin parsing a new entry at the
returned line number.
Args:
linenum (int):
The line number to begin parsing.
parsed_file (ParsedDiffFile):
The file currently being parsed.
Returns:
int:
The next line number to parse.
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing the diff header. This may be a
corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
return linenum
def parse_filename_header(self, s, linenum):
"""Parse the filename found in a diff filename line.
This parses the value after a ``---`` or ``+++`` indicator (or a
special variant handled by a subclass), normalizing the filename and
any following file details, and returning both for processing and
storage.
Often times, the file details will be a revision for the original
file, but this is not guaranteed, and is up to the variation of the
diff format.
By default, this will assume that a filename and file details are
separated by either a single tab, or two or more spaces. If neither
are found, this will fail to parse.
This must parse only the provided value, and cannot parse subsequent
lines.
Subclasses can override this behavior to parse these lines another
way, or to normalize filenames (handling escaping or filenames with
spaces as needed by that particular diff variation).
Args:
s (bytes):
The value to parse.
linenum (int):
The line number containing the value to parse.
Returns:
tuple:
A tuple containing:
1. The filename (as bytes)
2. The additional file information (as bytes)
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing the diff header. This may be a
corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
if b'\t' in s:
# There's a \t separating the filename and info. This is the
# best case scenario, since it allows for filenames with spaces
# without much work.
return s.split(b'\t', 1)
# There's spaces being used to separate the filename and info.
# This is technically wrong, so all we can do is assume that
# 1) the filename won't have multiple consecutive spaces, and
# 2) there's at least 2 spaces separating the filename and info.
if b' ' in s:
return re.split(br' +', s, 1)
raise DiffParserError('No valid separator after the filename was '
'found in the diff header',
linenum)
def raw_diff(self, diffset_or_commit):
"""Return a raw diff as a string.
This takes a DiffSet or DiffCommit and generates a new, single diff
file that represents all the changes made. It's used to regenerate
a diff and serve it up for other tools or processes to use.
Subclasses can override this to provide any special logic for building
the diff.
Args:
diffset_or_commit (reviewboard.diffviewer.models.diffset.DiffSet or
reviewboard.diffviewer.models.diffcommit
.DiffCommit):
The DiffSet or DiffCommit to render.
If passing in a DiffSet, only the cumulative diff's file
contents will be returned.
If passing in a DiffCommit, only that commit's file contents
will be returned.
Returns:
bytes:
The diff composed of all the component FileDiffs.
Raises:
TypeError:
The provided ``diffset_or_commit`` wasn't of a supported type.
"""
if hasattr(diffset_or_commit, 'cumulative_files'):
# This will be a DiffSet.
filediffs = diffset_or_commit.cumulative_files
elif hasattr(diffset_or_commit, 'files'):
# This will be a DiffCommit.
filediffs = diffset_or_commit.files.all()
else:
raise TypeError('%r is not a valid value. Please pass a DiffSet '
'or DiffCommit.'
% diffset_or_commit)
return b''.join(
filediff.diff
for filediff in filediffs
)
def get_orig_commit_id(self):
"""Return the commit ID of the original revision for the diff.
By default, this returns ``None``. Subclasses would override this if
they work with repositories that always look up changes to a file by
the ID of the commit that made the changes instead of a per-file
revision or ID.
Non-``None`` values returned by this method will override the values
being stored in :py:attr:`FileDiff.source_revision
<reviewboard.diffviewer.models.filediff.FileDiff.source_revision>`.
Implementations would likely want to parse out the commit ID from
some prior header and return it here. By the time this is called, all
files will have been parsed already.
Returns:
bytes:
The commit ID used to override the source revision of any created
:py:class:`~reviewboard.diffviewer.models.filediff.FileDiff`
instances.
"""
return None
class DiffXParser(BaseDiffParser):
"""Parser for DiffX files.
This will parse files conforming to the DiffX_ standard, storing the
diff content provided in each file section, as well as all the information
available in each DiffX section (options, preamble, metadata) as
``extra_data``. This allows the diffs to be re-built on download.
This parser is sufficient for most any DiffX need, but subclasses can
be created that augment the stored ``extra_data`` for any of the parsed
objects.
.. _DiffX: https://diffx.org
Version Added:
4.0.5:
This is experimental in 4.0.x, with plans to make it stable for 5.0.
The API may change during this time.
"""
def parse_diff(self):
"""Parse the diff.
This will parse the content of the DiffX file, returning a
representation of the diff file and its content.
Returns:
ParsedDiff:
The resulting parsed diff information.
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing part of the diff. This may be a
corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
class_name = type(self).__name__
logger.debug('%s.parse_diff: Beginning parse of diff, size = %s',
class_name, len(self.data))
try:
diffx = DiffX.from_bytes(self.data)
except DiffXParseError as e:
raise DiffParserError(str(e))
MOVED_OPS = {
'move',
'move-modify',
}
COPIED_OPS = {
'copy',
'copy-modify',
}
# Process the main DiffX file information.
parsed_diff = ParsedDiff(parser=self)
parsed_diff.uses_commit_ids_as_revisions = \
self.uses_commit_ids_as_revisions
extra_data_diffx = {}
self._store_options(extra_data_diffx, diffx)
self._store_preamble(extra_data_diffx, diffx)
self._store_meta(extra_data_diffx, diffx)
if extra_data_diffx:
parsed_diff.extra_data['diffx'] = extra_data_diffx
# Process each change in the DiffX file.
for change_num, diffx_change in enumerate(diffx.changes, start=1):
parsed_diff_change = ParsedDiffChange(parsed_diff=parsed_diff)
# Extract information and populate the ParsedDiffChange.
change_meta = diffx_change.meta
commit_id = change_meta.get('id')
parent_ids = change_meta.get('parent ids')
if commit_id is not None:
parsed_diff_change.commit_id = commit_id.encode('utf-8')
if parent_ids:
parsed_diff_change.parent_commit_id = \
parent_ids[0].encode('utf-8')
extra_data_change = {}
self._store_options(extra_data_change, diffx_change)
self._store_preamble(extra_data_change, diffx_change)
self._store_meta(extra_data_change, diffx_change)
if extra_data_change:
parsed_diff_change.extra_data['diffx'] = extra_data_change
# Process each file in the DiffX change.
for file_num, diffx_file in enumerate(diffx_change.files, start=1):
parsed_diff_file = ParsedDiffFile(
parsed_diff_change=parsed_diff_change)
extra_data_file = {}
self._store_options(extra_data_file, diffx_file)
self._store_meta(extra_data_file, diffx_file)
self._store_options(extra_data_file, diffx_file.diff_section,
key='diff_options')
if extra_data_file:
parsed_diff_file.extra_data['diffx'] = extra_data_file
# Extract information needed to populate the ParsedDiffFile.
file_meta = diffx_file.meta
diff_data = diffx_file.diff
path_info = file_meta.get('path')
revision_info = file_meta.get('revision', {})
stats_info = file_meta.get('stats')
op = file_meta.get('op', 'modify')
# Parse the file path information.
if isinstance(path_info, dict):
# If the file is a dictionary, both keys are required.
try:
orig_filename = path_info['old']
modified_filename = path_info['new']
except KeyError as e:
raise DiffParserError(
_('Missing the "path.%(key)s" key in change '
'%(change_num)s, file %(file_num)s')
% {
'key': e.args[0],
'change_num': change_num,
'file_num': file_num,
})
elif isinstance(path_info, str):
# If the file is a string, both filenames are the same.
orig_filename = path_info
modified_filename = path_info
else:
raise DiffParserError(
_('Unexpected type %(type)s for "path" key in change '
'%(change_num)s, file %(file_num)s')
% {
'change_num': change_num,
'file_num': file_num,
'type': type(path_info),
})
# Parse the revision information.
if isinstance(revision_info, dict):
if 'old' in revision_info:
orig_revision = Revision(revision_info['old'])
else:
if op == 'create':
orig_revision = PRE_CREATION
else:
orig_revision = UNKNOWN
if 'new' in revision_info:
modified_revision = Revision(revision_info['new'])
else:
modified_revision = HEAD
else:
raise DiffParserError(
_('Unexpected type %(type)s for "revision" key in '
'change %(change_num)s, file %(file_num)s')
% {
'change_num': change_num,
'file_num': file_num,
'type': type(revision_info),
})
# Grab the insert/delete statistics.
if (not stats_info or
'insertions' not in stats_info or
'deletions' not in stats_info):
# This DiffX is lacking stats. We'll need to generate
# it now.
#
# If there's a problem with the diff, then this could
# still fail, so we'll still need to default the values
# to 0 below.
diffx_file.generate_stats()
stats_info = diffx_file.meta.get('stats') or {}
# We can now poulate the ParsedDiffFile.
parsed_diff_file.orig_filename = orig_filename.encode('utf-8')
parsed_diff_file.orig_file_details = orig_revision
parsed_diff_file.modified_filename = \
modified_filename.encode('utf-8')
parsed_diff_file.modified_file_details = modified_revision
parsed_diff_file.binary = \
(diffx_file.diff_type == DiffType.BINARY)
parsed_diff_file.is_symlink = \
(file_meta.get('type') == 'symlink')
parsed_diff_file.deleted = (op == 'delete')
parsed_diff_file.moved = op in MOVED_OPS
parsed_diff_file.copied = op in COPIED_OPS
parsed_diff_file.insert_count = stats_info.get('insertions', 0)
parsed_diff_file.delete_count = stats_info.get('deletions', 0)
try:
parsed_diff_file.extra_data['encoding'] = \
extra_data_file['diff_options']['encoding']
except KeyError:
# An explicit encoding wasn't set.
pass
# If this represents a symlink, set the information.
if parsed_diff_file.is_symlink:
symlink_target = file_meta.get('symlink target')
if isinstance(symlink_target, dict):
old_symlink_target = symlink_target.get('old')
new_symlink_target = symlink_target.get('new')
elif isinstance(symlink_target, str):
old_symlink_target = symlink_target
new_symlink_target = symlink_target
else:
logger.warning('Unexpected symlink target type (%r) '
'found in diff %r',
symlink_target, self.data)
old_symlink_target = None
new_symlink_target = None
if old_symlink_target or new_symlink_target:
if old_symlink_target:
old_symlink_target = \
old_symlink_target.encode('utf-8')
if new_symlink_target:
new_symlink_target = \
new_symlink_target.encode('utf-8')
if op == 'create':
parsed_diff_file.new_symlink_target = \
new_symlink_target
elif op == 'delete':
parsed_diff_file.old_symlink_target = \
old_symlink_target
else:
parsed_diff_file.old_symlink_target = \
old_symlink_target
parsed_diff_file.new_symlink_target = \
new_symlink_target
# If there are UNIX file modes, set them.
unix_mode = file_meta.get('unix file mode')
if unix_mode is not None:
if isinstance(unix_mode, dict):
old_unix_mode = unix_mode.get('old')
new_unix_mode = unix_mode.get('new')
elif isinstance(unix_mode, str):
old_unix_mode = unix_mode
new_unix_mode = unix_mode
else:
logger.warning('Unexpected UNIX file mode (%r) '
'found in diff %r',
unix_mode, self.data)
old_unix_mode = None
new_unix_mode = None
if old_unix_mode or new_unix_mode:
if op == 'create':
parsed_diff_file.new_unix_mode = new_unix_mode
elif op == 'delete':
parsed_diff_file.old_unix_mode = old_unix_mode
else:
parsed_diff_file.new_unix_mode = new_unix_mode
parsed_diff_file.old_unix_mode = old_unix_mode
parsed_diff_file.append_data(diff_data)
parsed_diff_file.finalize()
logger.debug('%s.parse_diff: Finished parsing diff.', class_name)
return parsed_diff
def raw_diff(self, diffset_or_commit):
"""Return a raw diff as a string.
This takes a :py:class:`~reviewboard.diffviewer.models.diffset.DiffSet`
or :py:class:`~reviewboard.diffviewer.models.diffcommit.DiffCommit` and
generates a new, single DiffX file that represents all the changes
made, based on the previously-stored DiffX information in
``extra_data`` dictionaries. It's used to regenerate a DiffX and serve
it up for other tools or processes to use.
Args:
diffset_or_commit (reviewboard.diffviewer.models.diffset.DiffSet or
reviewboard.diffviewer.models.diffcommit
.DiffCommit):
The DiffSet or DiffCommit to render.
If passing in a DiffSet, the full uploaded DiffX file
contents will be returned.
If passing in a DiffCommit, a new DiffX representing only
that commit's contents will be returned. This will lack the
main preamble or metadata, or any other changes previously
in the DiffX file.
Returns:
bytes:
The resulting DiffX file contents.
Raises:
TypeError:
The provided ``diffset_or_commit`` value wasn't of a
supported type.
"""
if hasattr(diffset_or_commit, 'cumulative_files'):
# This will be a DiffSet.
#
# We'll pull out all the commits and files at once, to reduce
# query counts.
#
# We also will be very careful about not assuming keys that are
# present will necessarily be dictionaries. Be a bit careful and
# default anything falsy to an empty dictionary, here and below.
diffset = diffset_or_commit
diffx_main_info = diffset.extra_data.get('diffx') or {}
diffcommits = diffset.commits.prefetch_related('files')
if diffcommits:
changes = [
{
'extra_data': diffcommit.extra_data,
'files': diffcommit.files.all(),
}
for diffcommit in diffcommits
]
else:
changes = [
{
'extra_data': diffset.extra_data.get(
'change_extra_data', {}),
'files': diffset.cumulative_files,
},
]
elif hasattr(diffset_or_commit, 'files'):
# This will be a DiffCommit.
#
# We'll still need to pull out the DiffSet and grab the encoding,
# if one is specified, since this will impact the DiffCommit's
# change section.
diffcommit = diffset_or_commit
changes = [
{
'extra_data': diffcommit.extra_data,
'files': diffcommit.files.all(),
},
]
diffset_diffx_info = \
diffcommit.diffset.extra_data.get('diffx') or {}
diffset_diffx_options = diffset_diffx_info.get('options') or {}
main_encoding = diffset_diffx_options.get('encoding')
diffx_main_info = {}
if main_encoding:
diffx_main_info['options'] = {
'encoding': main_encoding,
}
else:
raise TypeError('%r is not a valid value. Please pass a DiffSet '
'or DiffCommit.'
% diffset_or_commit)
diffx = DiffX()
self._load_options(diffx, diffx_main_info)
self._load_preamble(diffx, diffx_main_info)
self._load_meta(diffx, diffx_main_info)
for change in changes:
diffx_change_info = change['extra_data'].get('diffx', {})
diffx_change = diffx.add_change()
self._load_options(diffx_change, diffx_change_info)
self._load_preamble(diffx_change, diffx_change_info)
self._load_meta(diffx_change, diffx_change_info)
for filediff in change['files']:
diffx_file_info = filediff.extra_data.get('diffx') or {}
diffx_file = diffx_change.add_file()
self._load_options(diffx_file, diffx_file_info)
self._load_meta(diffx_file, diffx_file_info)
if filediff.diff:
diffx_file.diff = filediff.diff
self._load_options(diffx_file.diff_section,
diffx_file_info,
key='diff_options')
return diffx.to_bytes()
def _store_options(self, extra_data, diffx_section, key='options'):
"""Store options for a section in extra_data.
Options will be stored only if not empty.
Args:
extra_data (dict):
The dictionary in which to store option data.
diffx_section (pydiffx.dom.objects.BaseDiffXSection):
The section containing the options to store.
key (unicode, optional):
The name of the key to use in ``extra_data``.
"""
if diffx_section.options:
extra_data[key] = deepcopy(diffx_section.options)
def _store_preamble(self, extra_data, diffx_section):
"""Store preamble options and text for a section in extra_data.
Preamble text will only be stored if not empty. Options will only
be stored if neither is empty.
Args:
extra_data (dict):
The dictionary in which to store preamble information.
diffx_section (pydiffx.dom.objects.BaseDiffXSection):
The section containing the preamble options and text to store.
"""
if diffx_section.preamble:
extra_data['preamble'] = diffx_section.preamble
self._store_options(extra_data, diffx_section.preamble_section,
key='preamble_options')
def _store_meta(self, extra_data, diffx_section):
"""Store metadata options and content for a section in extra_data.
Metadata will only be stored if not empty. Options will only be
stored if neither is empty.
Args:
extra_data (dict):
The dictionary in which to store metadata information.
diffx_section (pydiffx.dom.objects.BaseDiffXSection):
The section containing the metadata options and content to
store.
"""
if diffx_section.meta:
extra_data['metadata'] = deepcopy(diffx_section.meta)
self._store_options(extra_data, diffx_section.meta_section,
key='metadata_options')
def _load_options(self, diffx_section, extra_data, key='options'):
"""Load options from extra_data into a section.
Args:
extra_data (dict):
The dictionary in which to load option data.
diffx_section (pydiffx.dom.objects.BaseDiffXSection):
The section to store the options in.
key (unicode, optional):
The name of the key to use in ``extra_data``.
"""
options = extra_data.get(key)
if options:
diffx_section.options.clear()
diffx_section.options.update(options)
def _load_preamble(self, diffx_section, extra_data):
"""Load a preamble and options from extra_data into a section.
Args:
extra_data (dict):
The dictionary in which to load preamble data.
diffx_section (pydiffx.dom.objects.BaseDiffXSection):
The section to store the preamble in.
key (unicode, optional):
The name of the key to use in ``extra_data``.
"""
preamble = extra_data.get('preamble')
if preamble:
diffx_section.preamble = preamble
self._load_options(diffx_section.preamble_section,
extra_data,
key='preamble_options')
def _load_meta(self, diffx_section, extra_data):
"""Load metadata and options from extra_data into a section.
Args:
extra_data (dict):
The dictionary in which to load metadata information.
diffx_section (pydiffx.dom.objects.BaseDiffXSection):
The section to store the metadata in.
key (unicode, optional):
The name of the key to use in ``extra_data``.
"""
preamble = extra_data.get('metadata')
if preamble:
diffx_section.meta = preamble
self._load_options(diffx_section.meta_section,
extra_data,
key='metadata_options')
| mit | 59b569c7e244fa46769f80886564a0a6 | 34.713533 | 79 | 0.55808 | 4.687262 | false | false | false | false |
reviewboard/reviewboard | reviewboard/webapi/resources/review_general_comment.py | 1 | 4426 | from django.core.exceptions import ObjectDoesNotExist
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_response_errors,
webapi_request_fields)
from djblets.webapi.errors import (DOES_NOT_EXIST, INVALID_FORM_DATA,
NOT_LOGGED_IN, PERMISSION_DENIED)
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.resources import resources
from reviewboard.webapi.resources.base_review_general_comment import \
BaseReviewGeneralCommentResource
class ReviewGeneralCommentResource(BaseReviewGeneralCommentResource):
"""Provides information on general comments made on a review.
If the review is a draft, then comments can be added, deleted, or
changed on this list. However, if the review is already published,
then no changes can be made.
"""
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
policy_id = 'review_general_comment'
model_parent_key = 'review'
def get_queryset(self, request, review_id, *args, **kwargs):
q = super(ReviewGeneralCommentResource, self).get_queryset(
request, *args, **kwargs)
return q.filter(review=review_id)
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, INVALID_FORM_DATA,
PERMISSION_DENIED, NOT_LOGGED_IN)
@webapi_request_fields(
required=BaseReviewGeneralCommentResource.REQUIRED_CREATE_FIELDS,
optional=BaseReviewGeneralCommentResource.OPTIONAL_CREATE_FIELDS,
allow_unknown=True
)
def create(self, request, *args, **kwargs):
"""Creates a general comment on a review.
This will create a new comment on a review. The comment contains text
only.
Extra data can be stored later lookup. See
:ref:`webapi2.0-extra-data` for more information.
"""
try:
review = resources.review.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not resources.review.has_modify_permissions(request, review):
return self.get_no_access_error(request)
return self.create_comment(fields=(),
review=review,
comments_m2m=review.general_comments,
**kwargs)
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
optional=BaseReviewGeneralCommentResource.OPTIONAL_UPDATE_FIELDS,
allow_unknown=True
)
def update(self, request, *args, **kwargs):
"""Updates a general comment.
This can update the text or region of an existing comment. It
can only be done for comments that are part of a draft review.
Extra data can be stored later lookup. See
:ref:`webapi2.0-extra-data` for more information.
"""
try:
resources.review_request.get_object(request, *args, **kwargs)
review = resources.review.get_object(request, *args, **kwargs)
general_comment = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
return self.update_comment(request=request,
review=review,
comment=general_comment,
**kwargs)
@webapi_check_local_site
@augment_method_from(BaseReviewGeneralCommentResource)
def delete(self, *args, **kwargs):
"""Deletes the comment.
This will remove the comment from the review. This cannot be undone.
Only comments on draft reviews can be deleted. Attempting to delete
a published comment will return a Permission Denied error.
Instead of a payload response on success, this will return :http:`204`.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseReviewGeneralCommentResource)
def get_list(self, *args, **kwargs):
"""Returns the list of general comments made on a review."""
pass
review_general_comment_resource = ReviewGeneralCommentResource()
| mit | 9c829966c8987a859770355f2068233d | 38.873874 | 79 | 0.640533 | 4.301263 | false | false | false | false |
reviewboard/reviewboard | reviewboard/webapi/tests/test_diffcommit.py | 1 | 11787 | """Unit tests for the DiffCommitResource."""
from django.contrib.auth.models import User
from djblets.features.testing import override_feature_checks
from djblets.testing.decorators import add_fixtures
from djblets.webapi.errors import PERMISSION_DENIED
from djblets.webapi.testing.decorators import webapi_test_template
from reviewboard.reviews.models import ReviewRequest
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (diffcommit_item_mimetype,
diffcommit_list_mimetype)
from reviewboard.webapi.tests.mixins import (BasicTestsMetaclass,
ReviewRequestChildItemMixin,
ReviewRequestChildListMixin)
from reviewboard.webapi.tests.mixins_extra_data import ExtraDataItemMixin
from reviewboard.webapi.tests.urls import (get_diffcommit_item_url,
get_diffcommit_list_url)
def compare_diffcommit(self, item_rsp, item):
"""Compare a serialized DiffCommit to the original.
Args:
item_rsp (dict):
The serialized response.
item (reviewboard.diffviewer.models.diffcommit.DiffCommit):
The DiffCommit to compare against.
Raises:
AssertionError:
The serialized response is not equivalent to the original
DiffCommit.
"""
self.assertEqual(item_rsp['id'], item.pk)
self.assertEqual(item_rsp['commit_id'], item.commit_id)
self.assertEqual(item_rsp['parent_id'], item.parent_id)
self.assertEqual(item_rsp['commit_message'], item.commit_message)
self.assertEqual(item_rsp['author_name'], item.author_name)
self.assertEqual(item_rsp['author_email'], item.author_email)
self.assertEqual(item_rsp['committer_name'], item.committer_name)
self.assertEqual(item_rsp['committer_email'], item.committer_email)
class ResourceListTests(ReviewRequestChildListMixin, BaseWebAPITestCase,
metaclass=BasicTestsMetaclass):
"""Tests for DiffCommitResource list resource."""
fixtures = ['test_users', 'test_scmtools']
sample_api_url = 'review-request/<id>/diffs/<revision>/commits/'
resource = resources.diffcommit
compare_item = compare_diffcommit
def setup_http_not_allowed_list_test(self, user):
repository = self.create_repository(tool_name='Git')
review_request = self.create_review_request(
repository=repository,
create_with_history=True,
public=True)
diffset = self.create_diffset(review_request=review_request)
return get_diffcommit_list_url(review_request, diffset.revision)
def setup_review_request_child_test(self, review_request):
review_request.extra_data = review_request.extra_data or {}
review_request.extra_data[
ReviewRequest._CREATED_WITH_HISTORY_EXTRA_DATA_KEY] = True
review_request.save(update_fields=('extra_data',))
diffset = self.create_diffset(review_request=review_request)
return (get_diffcommit_list_url(review_request,
diffset.revision),
diffcommit_list_mimetype)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
repository = self.create_repository(tool_name='Git')
review_request = self.create_review_request(
with_local_site=with_local_site,
repository=repository,
public=True)
diffset = self.create_diffset(review_request=review_request)
items = []
if populate_items:
items.append(self.create_diffcommit(diffset=diffset,
repository=repository))
return (get_diffcommit_list_url(review_request,
diffset.revision,
local_site_name=local_site_name),
diffcommit_list_mimetype,
items)
class ResourceItemTests(ExtraDataItemMixin, ReviewRequestChildItemMixin,
BaseWebAPITestCase, metaclass=BasicTestsMetaclass):
"""Tests for DiffCommitResource item resource."""
fixtures = ['test_users', 'test_scmtools']
sample_api_url = \
'review-request/<id>/diffs/<revision>/commits/<commit-id>/'
resource = resources.diffcommit
compare_item = compare_diffcommit
def setup_review_request_child_test(self, review_request):
diffset = self.create_diffset(review_request=review_request)
review_request.extra_data[
ReviewRequest._CREATED_WITH_HISTORY_EXTRA_DATA_KEY] = True
review_request.save(update_fields=('extra_data',))
commit = self.create_diffcommit(diffset=diffset,
repository=review_request.repository)
return (get_diffcommit_item_url(review_request,
diffset.revision,
commit.commit_id),
diffcommit_item_mimetype)
def setup_http_not_allowed_item_test(self, user):
repository = self.create_repository(tool_name='Git')
review_request = self.create_review_request(
repository=repository,
public=True)
diffset = self.create_diffset(review_request=review_request)
commit = self.create_diffcommit(diffset=diffset,
repository=repository)
return get_diffcommit_item_url(review_request, diffset.revision,
commit.commit_id)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
repository = self.create_repository(tool_name='Git')
review_request = self.create_review_request(
repository=repository,
submitter=user,
with_local_site=with_local_site)
diffset = self.create_diffset(review_request)
commit = self.create_diffcommit(diffset=diffset,
repository=repository)
return (get_diffcommit_item_url(review_request, diffset.revision,
commit.commit_id, local_site_name),
diffcommit_item_mimetype,
commit)
@webapi_test_template
def test_get_patch(self):
"""Testing the GET <URL> API with Accept: text/x-patch"""
url = self.setup_basic_get_test(self.user,
with_local_site=False,
local_site_name=None)[0]
with override_feature_checks(self.override_features):
rsp = self.api_get(url,
expected_mimetype='text/x-patch',
expected_json=False,
HTTP_ACCEPT='text/x-patch')
self.assertEqual(self.DEFAULT_GIT_FILEDIFF_DATA_DIFF, rsp)
@add_fixtures(['test_site'])
@webapi_test_template
def test_get_patch_local_site(self):
"""Testing the GET <URL> API with Accept: text/x-patch on a Local Site
"""
url = self.setup_basic_get_test(
User.objects.get(username='doc'),
with_local_site=True,
local_site_name=self.local_site_name)[0]
self.client.login(username='doc', password='doc')
with override_feature_checks(self.override_features):
rsp = self.api_get(url,
expected_mimetype='text/x-patch',
expected_json=False,
HTTP_ACCEPT='text/x-patch')
self.assertEqual(self.DEFAULT_GIT_FILEDIFF_DATA_DIFF, rsp)
@add_fixtures(['test_site'])
@webapi_test_template
def test_get_patch_local_site_no_access(self):
"""Testing the GET <URL> API with Accept: text/x-patch on a Local Site
without access
"""
url = self.setup_basic_get_test(
User.objects.get(username='doc'),
with_local_site=True,
local_site_name=self.local_site_name)[0]
with override_feature_checks(self.override_features):
rsp = self.api_get(url,
expected_status=403,
HTTP_ACCEPT='text/x-patch')
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
@webapi_test_template
def test_get_patch_private_repository(self):
"""Testing the GET <URL> API with Accept: text/x-patch on a private
repository
"""
doc = User.objects.get(username='doc')
repository = self.create_repository(tool_name='Git', public=False)
repository.users.add(doc)
review_request = self.create_review_request(repository=repository,
submitter=doc)
diffset = self.create_diffset(review_request)
commit = self.create_diffcommit(diffset=diffset, repository=repository)
self.client.login(username='doc', password='doc')
with override_feature_checks(self.override_features):
rsp = self.api_get(
get_diffcommit_item_url(review_request, diffset.revision,
commit.commit_id),
expected_mimetype='text/x-patch',
expected_json=False,
HTTP_ACCEPT='text/x-patch')
self.assertEqual(self.DEFAULT_GIT_FILEDIFF_DATA_DIFF, rsp)
@webapi_test_template
def test_get_patch_private_repository_no_access(self):
"""Testing the GET <URL> API with Accept: text/x-patch on a private
repository
"""
doc = User.objects.get(username='doc')
repository = self.create_repository(tool_name='Git', public=False)
repository.users.set([doc])
review_request = self.create_review_request(repository=repository,
submitter=doc)
diffset = self.create_diffset(review_request)
commit = self.create_diffcommit(diffset=diffset, repository=repository)
with override_feature_checks(self.override_features):
rsp = self.api_get(
get_diffcommit_item_url(review_request, diffset.revision,
commit.commit_id),
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
repository = self.create_repository(tool_name='Git')
review_request = self.create_review_request(
repository=repository,
submitter=user,
with_local_site=with_local_site)
diffset = self.create_diffset(review_request)
commit = self.create_diffcommit(diffset=diffset,
repository=repository)
return (get_diffcommit_item_url(review_request,
diffset.revision,
commit.commit_id,
local_site_name=local_site_name),
diffcommit_item_mimetype,
{},
commit,
[])
def check_put_result(self, user, item_rsp, item):
self.compare_item(item_rsp, item)
| mit | 1c38851f0f3459bccd1d468f4bdbd404 | 39.927083 | 79 | 0.591414 | 4.279956 | false | true | false | false |
reviewboard/reviewboard | reviewboard/hostingsvcs/beanstalk.py | 1 | 18155 | import json
import logging
import os
from collections import defaultdict
from urllib.error import HTTPError, URLError
from urllib.parse import quote
from django import forms
from django.http import HttpResponse
from django.urls import path
from django.utils.translation import gettext_lazy as _
from django.views.decorators.http import require_POST
from reviewboard.admin.server import get_server_url
from reviewboard.hostingsvcs.forms import (HostingServiceAuthForm,
HostingServiceForm)
from reviewboard.hostingsvcs.hook_utils import (close_all_review_requests,
get_review_request_id)
from reviewboard.hostingsvcs.service import HostingService
from reviewboard.scmtools.crypto_utils import (decrypt_password,
encrypt_password)
from reviewboard.scmtools.errors import FileNotFoundError
from reviewboard.scmtools.svn.utils import (collapse_svn_keywords,
has_expanded_svn_keywords)
logger = logging.getLogger(__name__)
class BeanstalkAuthForm(HostingServiceAuthForm):
"""Authentication form for the Beanstalk hosting service.
This replaces some of the help text to make setup a bit easier.
"""
class Meta:
labels = {
'hosting_account_password': 'Access token',
}
help_texts = {
'hosting_account_username': _(
'Your Beanstalk username. This is case-sensitive and will '
'not be your Beanstalk e-mail address.'
),
'hosting_account_password': _(
'A pre-generated access token used to log into your account '
'via an API. You can generate these on Beanstalk by clicking '
'your name in the top-right of any page, clicking "Access '
'Tokens," and then clicking "Generate a token."'
),
}
class BeanstalkForm(HostingServiceForm):
beanstalk_account_domain = forms.CharField(
label=_('Beanstalk account domain'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('This is the <tt>domain</tt> part of '
'<tt>domain.beanstalkapp.com</tt>'))
beanstalk_repo_name = forms.CharField(
label=_('Repository name'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}))
class BeanstalkHookViews(object):
"""Container class for hook views."""
@staticmethod
@require_POST
def process_post_receive_hook(request, local_site_name=None,
repository_id=None,
hosting_service_id=None,
*args, **kwargs):
"""Close review requests as submitted automatically after a push.
Args:
request (django.http.HttpRequest):
The request from the Beanstalk webhook.
local_site_name (unicode, optional):
The local site name, if available.
repository_id (int, optional):
The pk of the repository, if available.
hosting_service_id (unicode, optional):
The name of the hosting service.
*args (list):
Additional postitional arguments.
**kwargs (dict):
Additional keyword arguments.
Returns:
django.http.HttpResponse:
The HTTP response.
"""
try:
server_url = get_server_url(request=request)
# Check if it's a git or an SVN repository and close accordingly.
if 'payload' in request.POST:
payload = json.loads(request.POST['payload'])
BeanstalkHookViews._close_git_review_requests(
payload, server_url, local_site_name, repository_id,
hosting_service_id)
else:
payload = json.loads(request.POST['commit'])
BeanstalkHookViews._close_svn_review_request(
payload, server_url, local_site_name, repository_id,
hosting_service_id)
except KeyError as e:
logger.error('There is no JSON payload in the POST request.: %s',
e,
extra={'request': request})
return HttpResponse(status=415)
except ValueError as e:
logger.error('The payload is not in JSON format: %s', e,
extra={'request': request})
return HttpResponse(status=415)
return HttpResponse()
@staticmethod
def _close_git_review_requests(payload,
server_url,
local_site_name,
repository_id,
hosting_service_id):
"""Close all review requests for the git repository.
A git payload may contain multiple commits. If a commit's commit
message does not contain a review request ID, it closes based on
it's commit id.
Args:
payload (dict):
The decoded webhook payload.
server_url (unicode):
The current server URL.
local_site_name (unicode):
The local site name, if available.
repository_id (int):
The pk of the repository, if available.
hosting_service_id (unicode):
The name of the hosting service.
"""
review_id_to_commits_map = defaultdict(list)
branch_name = payload.get('branch')
if not branch_name:
return review_id_to_commits_map
commits = payload.get('commits', [])
for commit in commits:
commit_hash = commit.get('id')
commit_message = commit.get('message')
review_request_id = get_review_request_id(
commit_message, server_url, commit_hash)
commit_entry = '%s (%s)' % (branch_name, commit_hash[:7])
review_id_to_commits_map[review_request_id].append(commit_entry)
close_all_review_requests(review_id_to_commits_map, local_site_name,
repository_id, hosting_service_id)
@staticmethod
def _close_svn_review_request(payload,
server_url,
local_site_name,
repository_id,
hosting_service_id):
"""Close the review request for an SVN repository.
The SVN payload contains one commit. If the commit's message does not
contain a review request ID, this will not close any review requests.
Args:
payload (dict):
The decoded webhook payload.
server_url (unicode):
The current server URL.
local_site_name (unicode):
The local site name, if available.
repository_id (int):
The pk of the repository, if available.
hosting_service_id (unicode):
The name of the hosting service.
"""
review_id_to_commits_map = defaultdict(list)
commit_message = payload.get('message')
branch_name = payload.get('changeset_url', 'SVN Repository')
revision = '%s %d' % ('Revision: ', payload.get('revision'))
review_request_id = get_review_request_id(commit_message, server_url,
None)
commit_entry = '%s (%s)' % (branch_name, revision)
review_id_to_commits_map[review_request_id].append(commit_entry)
close_all_review_requests(review_id_to_commits_map, local_site_name,
repository_id, hosting_service_id)
class Beanstalk(HostingService):
"""Hosting service support for Beanstalk.
Beanstalk is a source hosting service that supports Git and Subversion
repositories. It's available at http://beanstalkapp.com/.
"""
name = 'Beanstalk'
hosting_service_id = 'beanstalk'
needs_authorization = True
supports_bug_trackers = False
supports_repositories = True
supported_scmtools = ['Git', 'Subversion']
form = BeanstalkForm
auth_form = BeanstalkAuthForm
repository_fields = {
'Git': {
'path': 'git@%(beanstalk_account_domain)s'
'.beanstalkapp.com:/%(beanstalk_account_domain)s/'
'%(beanstalk_repo_name)s.git',
'mirror_path': 'https://%(beanstalk_account_domain)s'
'.git.beanstalkapp.com/%(beanstalk_repo_name)s.git',
},
'Subversion': {
'path': 'https://%(beanstalk_account_domain)s'
'.svn.beanstalkapp.com/%(beanstalk_repo_name)s/',
},
}
repository_url_patterns = [
path('hooks/post-receive/',
BeanstalkHookViews.process_post_receive_hook),
]
def check_repository(self, beanstalk_account_domain=None,
beanstalk_repo_name=None, *args, **kwargs):
"""Checks the validity of a repository.
This will perform an API request against Beanstalk to get
information on the repository. This will throw an exception if
the repository was not found, and return cleanly if it was found.
"""
self._api_get_repository(beanstalk_account_domain, beanstalk_repo_name)
def authorize(self, username, password, hosting_url,
local_site_name=None, *args, **kwargs):
"""Authorizes the Beanstalk repository.
Beanstalk uses HTTP Basic Auth for the API, so this will store the
provided password, encrypted, for use in later API requests.
"""
self.account.data['password'] = encrypt_password(password)
self.account.save()
def is_authorized(self):
"""Determines if the account has supported authorization tokens.
This just checks if there's a password set on the account.
"""
return self.account.data.get('password', None) is not None
def get_password(self):
"""Returns the password for this account.
This is needed for API calls and for Subversion.
"""
return decrypt_password(self.account.data['password'])
def get_file(self, repository, path, revision, base_commit_id=None,
*args, **kwargs):
"""Fetches a file from Beanstalk.
This will perform an API request to fetch the contents of a file.
If using Git, this will expect a base commit ID to be provided.
"""
try:
contents = self._api_get_node(repository, path, revision,
base_commit_id, contents=True)
except URLError:
raise FileNotFoundError(path, revision)
# On Subversion repositories, we may need to expand properties within
# the file, like ``$Id$``. We only want to do this if we see keywords.
if repository.tool.name == 'Subversion':
contents = self._normalize_svn_file_content(
repository, contents, path, revision)
return contents
def get_file_exists(self, repository, path, revision, base_commit_id=None,
*args, **kwargs):
"""Determines if a file exists.
This will perform an API request to fetch the metadata for a file.
If using Git, this will expect a base commit ID to be provided.
"""
try:
self._api_get_node(repository, path, revision, base_commit_id)
return True
except (HTTPError, URLError, FileNotFoundError):
return False
def normalize_patch(self, repository, patch, filename, revision):
"""Normalize a diff/patch file before it's applied.
If working with a Subversion repository, then diffs being put up
for review may have expanded keywords in them. This may occur if
the file was diffed against a repository that did not (at that time)
list those keywords in the ``svn:keywords`` property. We need to
collapse these down.
For non-Subversion repositories, the default behavior of the
repository backend is used.
Args:
repository (reviewboard.scmtools.models.Repository):
The repository the patch is meant to apply to.
patch (bytes):
The diff/patch file to normalize.
filename (unicode):
The name of the file being changed in the diff.
revision (unicode):
The revision of the file being changed in the diff.
Returns:
bytes:
The resulting diff/patch file.
"""
if repository.tool.name == 'Subversion':
return self._normalize_svn_file_content(
repository, patch, filename, revision)
else:
return super(Beanstalk, self).normalize_patch(
repository, patch, filename, revision)
def _normalize_svn_file_content(self, repository, contents, path,
revision):
"""Post-process a file pertaining to a Subversion repository.
This is common code that handles collapsing keywords for files fetched
from or diffed against a Subversion repository.
Args:
repository (reviewboard.scmtools.models.Repository):
The repository the content is for.
contents (bytes):
The file content to normalize.
path (unicode):
The path to the file.
revision (unicode):
The revision of the file.
Returns:
bytes:
The resulting file.
"""
if has_expanded_svn_keywords(contents):
try:
props = self._api_get_svn_props(repository, path, revision)
except URLError:
props = None
if props and 'svn:keywords' in props:
contents = collapse_svn_keywords(
contents,
props['svn:keywords'].encode('utf-8'))
return contents
def _api_get_repository(self, account_domain, repository_name):
url = self._build_api_url(account_domain,
'repositories/%s.json' % repository_name)
return self._api_get(url)
def _api_get_node(self, repository, path, revision, base_commit_id,
contents=False):
# Unless we're fetching raw content, we optimistically want to
# grab the metadata for the file. That's going to be a lot smaller
# than the file contents in most cases. However, we can only do that
# with a base_commit_id. If we don't have that, we fall back on
# fetching the full file contents.
is_git = (repository.tool.name == 'Git')
if is_git and (contents or not base_commit_id):
url_path = ('blob?id=%s&name=%s'
% (quote(revision), quote(os.path.basename(path))))
raw_content = True
else:
if is_git:
expected_revision = base_commit_id
else:
expected_revision = revision
url_path = ('node.json?path=%s&revision=%s'
% (quote(path), quote(expected_revision)))
if contents:
url_path += '&contents=true'
raw_content = False
url = self._build_api_url(
self._get_repository_account_domain(repository),
'repositories/%s/%s'
% (repository.extra_data['beanstalk_repo_name'], url_path))
result = self._api_get(url, raw_content=raw_content)
if not raw_content and contents:
result = result['contents'].encode('utf-8')
return result
def _api_get_svn_props(self, repository, path, revision):
"""Return the SVN properties for a file in the repository.
This will query for all SVN properties set for a particular file,
returning them as a dictionary mapping property names to values.
Args:
repository (reviewboard.scmtools.models.Repository):
The Subversion repository containing the file.
path (unicode):
The path to the file to retrieve properties for.
revision (unicode):
The revision of the file.
Returns:
dict:
A mapping of property names to values.
"""
url = self._build_api_url(
self._get_repository_account_domain(repository),
'repositories/%s/props.json?path=%s&revision=%s'
% (repository.extra_data['beanstalk_repo_name'],
quote(path), quote(revision)))
result = self._api_get(url)
return result.get('svn_properties', {})
def _build_api_url(self, account_domain, url):
return 'https://%s.beanstalkapp.com/api/%s' % (account_domain, url)
def _get_repository_account_domain(self, repository):
return repository.extra_data['beanstalk_account_domain']
def _api_get(self, url, raw_content=False):
try:
response = self.client.http_get(
url,
username=self.account.username,
password=self.get_password())
if raw_content:
return response.data
else:
return response.json
except HTTPError as e:
data = e.read()
try:
rsp = json.loads(data)
except Exception:
rsp = None
if rsp and 'errors' in rsp:
raise Exception('; '.join(rsp['errors']))
else:
raise Exception(str(e))
| mit | a44e9e29202701e9b0ffaeb550738830 | 35.31 | 79 | 0.570366 | 4.57766 | false | false | false | false |
reviewboard/reviewboard | reviewboard/webapi/resources/root_general_comment.py | 1 | 5492 | """Root general comments API resource.
Version Added:
5.0
"""
from django.db.models import Q
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import webapi_request_fields
from djblets.webapi.fields import (BooleanFieldType,
DateTimeFieldType,
StringFieldType)
from reviewboard.accounts.models import User
from reviewboard.reviews.models import GeneralComment
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.resources.base_review_general_comment import \
BaseReviewGeneralCommentResource
class RootGeneralCommentResource(BaseReviewGeneralCommentResource):
"""Provide information on general comments.
This is a top level endpoint that allows you to list and query all
general comments in the system, across different review requests.
Version Added:
5.0
"""
added_in = '5.0'
allowed_methods = ('GET',)
model = GeneralComment
uri_template_name = 'all_general_comment'
def get_queryset(self, request, is_list=False, *args, **kwargs):
"""Return a queryset for GeneralComment models.
By default, this returns all general comments that are accessible
to the requester.
The queryset can be further filtered by one or more arguments
in the URL. These are listed in :py:meth:`get_list`.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
is_list (bool, unused):
Whether the coming HTTP request is request for list resources.
*args (tuple):
Additional positional arguments.
**kwargs (dict):
Additional keyword arguments.
Returns:
django.db.models.query.QuerySet:
A queryset for GeneralComment models.
"""
local_site = request.local_site
q = Q()
if 'is-reply' in request.GET:
val = request.GET.get('is-reply')
q &= Q(reply_to_id__isnull=(not val))
if 'last-updated-from' in request.GET:
q &= Q(timestamp__gte=request.GET.get('last-updated-from'))
if 'last-updated-to' in request.GET:
q &= Q(timestamp__lt=request.GET.get('last-updated-to'))
if 'review-id' in request.GET:
q &= Q(review=request.GET.get('review-id'))
if 'review-request-id' in request.GET:
review_request_id = request.GET.get('review-request-id')
if local_site is None:
q &= Q(review__review_request=review_request_id)
else:
q &= (Q(review__review_request__local_id=review_request_id) &
Q(review__review_request__local_site=local_site))
if 'user' in request.GET:
user = list((
User.objects
.filter(username=request.GET.get('user'))
.values_list('pk', flat=True)
))
if user:
q &= Q(review__user=user[0])
else:
return self.model.objects.none()
return self.model.objects.accessible(request.user,
extra_query=q,
local_site=local_site)
@webapi_check_local_site
@webapi_request_fields(
optional={
'is-reply': {
'type': BooleanFieldType,
'description': 'Determine whether to return general '
'comments that are replies or not.',
},
'last-updated-to': {
'type': DateTimeFieldType,
'description': "The date/time that all general "
"comments must be last updated before. This is "
"compared against the general "
"comment's ``timestamp`` field. This must be "
"a valid :term:`date/time format`.",
},
'last-updated-from': {
'type': DateTimeFieldType,
'description': "The earliest date/time the general "
"comments could be last updated. This is "
"compared against the general "
"comment's ``timestamp`` field. This must "
"be a valid :term:`date/time format`.",
},
'review-id': {
'type': StringFieldType,
'description': 'The review ID that the general '
'comments must be belonged to.',
},
'review-request-id': {
'type': StringFieldType,
'description': 'The review request ID that the general '
'comments must be belonged to.',
},
'user': {
'type': StringFieldType,
'description': 'The username of the user that the general '
'comments must be owned by.',
},
},
allow_unknown=True
)
@augment_method_from(BaseReviewGeneralCommentResource)
def get_list(self, *args, **kwargs):
"""Return the list of general comments."""
pass
root_general_comment_resource = RootGeneralCommentResource()
| mit | f4f1f89a9466c262df32bfceb2df79e3 | 35.370861 | 79 | 0.539694 | 4.682012 | false | false | false | false |
reviewboard/reviewboard | reviewboard/accounts/pages.py | 1 | 4183 | import logging
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from djblets.configforms.mixins import DynamicConfigPageMixin
from djblets.configforms.pages import ConfigPage
from djblets.configforms.registry import ConfigPageRegistry
from djblets.registries.errors import ItemLookupError
from djblets.registries.mixins import ExceptionFreeGetterMixin
from reviewboard.admin.server import build_server_url
from reviewboard.accounts.forms.pages import (AccountSettingsForm,
APITokensForm,
AvatarSettingsForm,
ChangePasswordForm,
PrivacyForm,
GroupsForm,
OAuthApplicationsForm,
OAuthTokensForm,
ProfileForm)
logger = logging.getLogger(__name__)
class AccountPageRegistry(ExceptionFreeGetterMixin, ConfigPageRegistry):
"""A registry for managing account pages."""
lookup_attrs = ('page_id',)
def get_defaults(self):
"""Return the default page classes.
Returns:
type: The page classes, as subclasses of :py:class:`AccountPage`.
"""
return (PrivacyPage, ProfilePage, AccountSettingsPage, GroupsPage,
AuthenticationPage, OAuth2Page)
def unregister(self, page_class):
"""Unregister the page class.
Args:
page_class (type):
The page class to unregister.
Raises:
ItemLookupError:
This exception is raised if the specified page class cannot
be found.
"""
try:
super(AccountPageRegistry, self).unregister(page_class)
except ItemLookupError as e:
logger.error(e)
raise e
class AccountPage(DynamicConfigPageMixin, ConfigPage):
"""Base class for a page of forms in the My Account page.
Each AccountPage is represented in the My Account page by an entry
in the navigation sidebar. When the user has navigated to that page,
any forms shown on the page will be displayed.
Extensions can provide custom pages in order to offer per-user
customization.
"""
registry = AccountPageRegistry()
@classmethod
def get_absolute_url(cls):
"""Return the absolute URL of the page.
Returns:
unicode:
The absolute URL of the page.
"""
assert cls.page_id
return (
'%s#%s'
% (build_server_url(reverse('user-preferences')),
cls.page_id)
)
class AccountSettingsPage(AccountPage):
"""A page containing the primary settings the user can customize."""
page_id = 'settings'
page_title = _('Settings')
form_classes = [AccountSettingsForm]
class AuthenticationPage(AccountPage):
"""A page containing authentication-related forms.
By default, this just shows the Change Password form, but extensions
can provide additional forms for display.
"""
page_id = 'authentication'
page_title = _('Authentication')
form_classes = [ChangePasswordForm, APITokensForm, OAuthTokensForm]
class ProfilePage(AccountPage):
"""A page containing settings for the user's profile."""
page_id = 'profile'
page_title = _('Profile')
form_classes = [ProfileForm, AvatarSettingsForm]
class GroupsPage(AccountPage):
"""A page containing a filterable list of groups to join."""
page_id = 'groups'
page_title = _('Groups')
form_classes = [GroupsForm]
class OAuth2Page(AccountPage):
"""A page containing a list of OAuth2 applications to manage."""
page_id = 'oauth2'
page_title = 'OAuth2 Applications'
form_classes = [OAuthApplicationsForm]
class PrivacyPage(AccountPage):
"""A page containing information on a user's privacy rights."""
page_id = 'privacy'
page_title = _('My Privacy Rights')
form_classes = [PrivacyForm]
| mit | 5594bbe5ce27b1164b628a62ea772ad9 | 29.532847 | 77 | 0.619651 | 4.715896 | false | true | false | false |
reviewboard/reviewboard | reviewboard/reviews/tests/test_root_view.py | 1 | 2709 | """Unit tests for reviewboard.reviews.views.RootView."""
from djblets.testing.decorators import add_fixtures
from reviewboard.site.urlresolvers import local_site_reverse
from reviewboard.testing import TestCase
class RootViewTests(TestCase):
"""Unit tests for reviewboard.reviews.views.RootView."""
fixtures = ['test_users']
def test_with_anonymous_with_private_access(self):
"""Testing RootView with anonymous user with anonymous access not
allowed
"""
with self.siteconfig_settings({'auth_require_sitewide_login': True},
reload_settings=False):
response = self.client.get(local_site_reverse('root'))
self.assertRedirects(response, '/account/login/?next=/')
def test_with_anonymous_with_public_access(self):
"""Testing RootView with anonymous user with anonymous access allowed
"""
response = self.client.get(local_site_reverse('root'))
self.assertRedirects(response, '/r/')
def test_with_logged_in(self):
"""Testing RootView with authenticated user"""
self.assertTrue(self.client.login(username='doc', password='doc'))
response = self.client.get(local_site_reverse('root'))
self.assertRedirects(response, '/dashboard/')
@add_fixtures(['test_site'])
def test_with_anonymous_with_local_site_private(self):
"""Testing RootView with anonymous user with private Local Site"""
response = self.client.get(
local_site_reverse('root', local_site_name=self.local_site_name))
self.assertRedirects(response,
'/account/login/?next=/s/%s/'
% self.local_site_name)
@add_fixtures(['test_site'])
def test_with_anonymous_with_local_site_public(self):
"""Testing RootView with anonymous user with public Local Site"""
local_site = self.get_local_site(name=self.local_site_name)
local_site.public = True
local_site.save()
response = self.client.get(local_site_reverse('root',
local_site=local_site))
self.assertRedirects(response, '/s/%s/r/' % self.local_site_name)
@add_fixtures(['test_site'])
def test_with_logged_in_with_local_site(self):
"""Testing RootView with authenticated user with Local Site"""
self.assertTrue(self.client.login(username='doc', password='doc'))
response = self.client.get(
local_site_reverse('root', local_site_name=self.local_site_name))
self.assertRedirects(response,
'/s/%s/dashboard/' % self.local_site_name)
| mit | 48289702cbd4e1d0bd8b38ab819cbd64 | 37.7 | 77 | 0.628276 | 4.2 | false | true | false | false |
reviewboard/reviewboard | reviewboard/search/search_backends/whoosh.py | 1 | 2205 | """A backend for the Whoosh search engine."""
import os
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from reviewboard.search.search_backends.base import (SearchBackend,
SearchBackendForm)
class WhooshConfigForm(SearchBackendForm):
"""A form for configuring the Whoosh search backend."""
search_index_file = forms.CharField(
label=_("Search index directory"),
help_text=_("The directory that search index data should be stored "
"in."),
widget=forms.TextInput(attrs={'size': '80'}))
def clean_search_index_file(self):
"""Clear the search_index_file field.
This ensures the value is an absolute path and is writable.
"""
index_file = self.cleaned_data['search_index_file'].strip()
if index_file:
if not os.path.isabs(index_file):
raise ValidationError(
_("The search index path must be absolute."))
if (os.path.exists(index_file) and
not os.access(index_file, os.W_OK)):
raise ValidationError(
_('The search index path is not writable. Make sure the '
'web server has write access to it and its parent '
'directory.'))
return index_file
class WhooshBackend(SearchBackend):
"""The Whoosh search backend."""
search_backend_id = 'whoosh'
name = _('Whoosh')
haystack_backend_name = 'haystack.backends.whoosh_backend.WhooshEngine'
config_form_class = WhooshConfigForm
form_field_map = {
'search_index_file': 'PATH',
}
@property
def default_settings(self):
"""The default settings for the backend.
This is dynamic, in order to account for a change to
``SITE_DATA_DIR``. In production, this value shouldn't change, but
it does in unit tests.
"""
return {
'PATH': os.path.join(settings.SITE_DATA_DIR, 'search-index'),
'STORAGE': 'file',
}
| mit | 4839e492004943f38ff84861a15b254d | 31.910448 | 77 | 0.598186 | 4.366337 | false | true | false | false |
reviewboard/reviewboard | reviewboard/reviews/views/attachments.py | 1 | 5268 | """Views for reviewing file attachments (and legacy screenshots)."""
import logging
from typing import Optional
from django.db.models import Q
from django.http import Http404, HttpRequest, HttpResponse
from django.shortcuts import get_object_or_404
from django.views.generic.base import View
from reviewboard.accounts.mixins import UserProfileRequiredViewMixin
from reviewboard.attachments.models import FileAttachment
from reviewboard.reviews.models import Screenshot
from reviewboard.reviews.ui.base import FileAttachmentReviewUI
from reviewboard.reviews.ui.screenshot import LegacyScreenshotReviewUI
from reviewboard.reviews.views.mixins import ReviewRequestViewMixin
logger = logging.getLogger(__name__)
class ReviewFileAttachmentView(ReviewRequestViewMixin,
UserProfileRequiredViewMixin,
View):
"""Displays a file attachment with a review UI."""
def get(
self,
request: HttpRequest,
file_attachment_id: int,
file_attachment_diff_id: Optional[int] = None,
*args,
**kwargs,
) -> HttpResponse:
"""Handle a HTTP GET request.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
file_attachment_id (int):
The ID of the file attachment to review.
file_attachment_diff_id (int, optional):
The ID of the file attachment to diff against.
*args (tuple):
Positional arguments passed to the handler.
**kwargs (dict):
Keyword arguments passed to the handler.
Returns:
django.http.HttpResponse:
The resulting HTTP response from the handler.
"""
review_request = self.review_request
draft = review_request.get_draft(request.user)
# Make sure the attachment returned is part of either the review
# request or an accessible draft.
review_request_q = (Q(review_request=review_request) |
Q(inactive_review_request=review_request))
if draft:
review_request_q |= Q(drafts=draft) | Q(inactive_drafts=draft)
file_attachment = get_object_or_404(
FileAttachment,
Q(pk=file_attachment_id) & review_request_q)
review_ui = file_attachment.review_ui
if not review_ui:
review_ui = FileAttachmentReviewUI(review_request, file_attachment)
if file_attachment_diff_id:
file_attachment_revision = get_object_or_404(
FileAttachment,
Q(pk=file_attachment_diff_id) &
Q(attachment_history=file_attachment.attachment_history) &
review_request_q)
review_ui.set_diff_against(file_attachment_revision)
try:
is_enabled_for = review_ui.is_enabled_for(
user=request.user,
review_request=review_request,
file_attachment=file_attachment)
except Exception as e:
logger.error('Error when calling is_enabled_for for '
'FileAttachmentReviewUI %r: %s',
review_ui, e, exc_info=True,
extra={'request': request})
is_enabled_for = False
if review_ui and is_enabled_for:
return review_ui.render_to_response(request)
else:
raise Http404
class ReviewScreenshotView(ReviewRequestViewMixin,
UserProfileRequiredViewMixin,
View):
"""Displays a review UI for a screenshot.
Screenshots are a legacy feature, predating file attachments. While they
can't be created anymore, this view does allow for reviewing screenshots
uploaded in old versions.
"""
def get(
self,
request: HttpRequest,
screenshot_id: int,
*args,
**kwargs,
) -> HttpResponse:
"""Handle a HTTP GET request.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
screenshot_id (int):
The ID of the screenshot to review.
*args (tuple):
Positional arguments passed to the handler.
**kwargs (dict):
Keyword arguments passed to the handler.
Returns:
django.http.HttpResponse:
The resulting HTTP response from the handler.
"""
review_request = self.review_request
draft = review_request.get_draft(request.user)
# Make sure the screenshot returned is part of either the review
# request or an accessible draft.
review_request_q = (Q(review_request=review_request) |
Q(inactive_review_request=review_request))
if draft:
review_request_q |= Q(drafts=draft) | Q(inactive_drafts=draft)
screenshot = get_object_or_404(Screenshot,
Q(pk=screenshot_id) & review_request_q)
review_ui = LegacyScreenshotReviewUI(review_request, screenshot)
return review_ui.render_to_response(request)
| mit | 807207aec92f291725fd3c06fe2c1872 | 33.207792 | 79 | 0.604214 | 4.65371 | false | false | false | false |
reviewboard/reviewboard | reviewboard/scmtools/models.py | 1 | 51341 | import logging
import uuid
from functools import wraps
from importlib import import_module
from time import time
from urllib.parse import quote
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.db import IntegrityError, models
from django.db.models import Q
from django.utils import timezone
from django.utils.translation import gettext, gettext_lazy as _
from djblets.cache.backend import cache_memoize, make_cache_key
from djblets.db.fields import JSONField
from djblets.log import log_timed
from djblets.util.decorators import cached_property
from reviewboard.deprecation import RemovedInReviewBoard60Warning
from reviewboard.hostingsvcs.errors import MissingHostingServiceError
from reviewboard.hostingsvcs.models import HostingServiceAccount
from reviewboard.hostingsvcs.service import get_hosting_service
from reviewboard.scmtools import scmtools_registry
from reviewboard.scmtools.core import FileLookupContext
from reviewboard.scmtools.crypto_utils import (decrypt_password,
encrypt_password)
from reviewboard.scmtools.managers import RepositoryManager, ToolManager
from reviewboard.scmtools.signals import (checked_file_exists,
checking_file_exists,
fetched_file, fetching_file)
from reviewboard.site.models import LocalSite
logger = logging.getLogger(__name__)
def _deprecated_proxy_property(property_name):
"""Implement a proxy property for tools.
The Tool class includes a number of properties that are proxied from the
scmtool class, and we'd like to transition away from them in favor of
having users get that information directly from the scmtool instead.
Version Added:
5.0
Args:
property_name (str):
The name of the property to proxy.
Returns:
property:
A property that will proxy the value and raise a warning.
"""
@wraps(property_name)
def _wrapped(tool):
RemovedInReviewBoard60Warning.warn(
'The Tool.%s property is deprecated and will be removed in '
'Review Board 6.0. Use the property on the repository or '
'SCMTool class instead.'
% property_name)
return getattr(tool.scmtool_class, property_name)
return property(_wrapped)
class Tool(models.Model):
"""A configured source code management tool.
Each :py:class:`~reviewboard.scmtools.core.SCMTool` used by repositories
must have a corresponding :py:class:`Tool` entry. These provide information
on the capabilities of the tool, and accessors to construct a tool for
a repository.
Deprecated:
5.0:
This model is now obsolete. Any usage of this should be updated to use
equivalent methods on the Repository or SCMTool instead.
"""
name = models.CharField(max_length=32, unique=True)
class_name = models.CharField(max_length=128, unique=True)
objects = ToolManager()
# Templates can't access variables on a class properly. It'll attempt to
# instantiate the class, which will fail without the necessary parameters.
# So, we use these as convenient wrappers to do what the template can't do.
#: Whether or not the SCMTool supports review requests with history.
#:
#: See :py:attr:`SCMTool.supports_history
#: <reviewboard.scmtools.core.SCMTool.supports_history>` for details.
supports_history = _deprecated_proxy_property('supports_history')
#: Whether custom URL masks can be defined to fetching file contents.
#:
#: See :py:attr:`SCMTool.supports_raw_file_urls
#: <reviewboard.scmtools.core.SCMTool.supports_raw_file_urls>` for details.
supports_raw_file_urls = _deprecated_proxy_property(
'supports_raw_file_urls')
#: Whether ticket-based authentication is supported.
#:
#: See :py:attr:`SCMTool.supports_ticket_auth
#: <reviewboard.scmtools.core.SCMTool.supports_ticket_auth>` for details.
supports_ticket_auth = _deprecated_proxy_property('supports_ticket_auth')
#: Whether server-side pending changesets are supported.
#:
#: See :py:attr:`SCMTool.supports_pending_changesets
#: <reviewboard.scmtools.core.SCMTool.supports_pending_changesets>` for
#: details.
supports_pending_changesets = _deprecated_proxy_property(
'supports_pending_changesets')
#: Overridden help text for the configuration form fields.
#:
#: See :py:attr:`SCMTool.field_help_text
#: <reviewboard.scmtools.core.SCMTool.field_help_text>` for details.
field_help_text = property(
lambda x: x.scmtool_class.field_help_text)
@property
def scmtool_id(self):
"""The unique ID for the SCMTool.
Type:
unicode
"""
return self.scmtool_class.scmtool_id
def get_scmtool_class(self):
"""Return the configured SCMTool class.
Returns:
type:
The subclass of :py:class:`~reviewboard.scmtools.core.SCMTool`
backed by this Tool entry.
Raises:
django.core.exceptions.ImproperlyConfigured:
The SCMTool could not be found.
"""
if not hasattr(self, '_scmtool_class'):
path = self.class_name
i = path.rfind('.')
module, attr = path[:i], path[i + 1:]
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured(
'Error importing SCM Tool %s: "%s"' % (module, e))
try:
self._scmtool_class = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured(
'Module "%s" does not define a "%s" SCM Tool'
% (module, attr))
return self._scmtool_class
scmtool_class = property(get_scmtool_class)
def __str__(self):
"""Return the name of the tool.
Returns:
unicode:
The name of the tool.
"""
return self.name
class Meta:
db_table = 'scmtools_tool'
ordering = ('name',)
verbose_name = _('Tool')
verbose_name_plural = _('Tools')
class Repository(models.Model):
"""A configured external source code repository.
Each configured Repository entry represents a source code repository that
Review Board can communicate with as part of the diff uploading and
viewing process.
Repositories are backed by a
:py:class:`~reviewboard.scmtools.core.SCMTool`, which functions as a client
for the type of repository and can fetch files, load lists of commits and
branches, and more.
Access control is managed by a combination of the :py:attr:`public`,
:py:attr:`users`, and :py:attr:`groups` fields. :py:attr:`public` controls
whether a repository is publicly-accessible by all users on the server.
When ``False``, only users explicitly listed in :py:attr:`users` or users
who are members of the groups listed in :py:attr:`groups` will be able
to access the repository or view review requests posted against it.
"""
#: The amount of time branches are cached, in seconds.
#:
#: Branches are cached for 5 minutes.
BRANCHES_CACHE_PERIOD = 60 * 5
#: The short period of time to cache commit information, in seconds.
#:
#: Some commit information (such as retrieving the latest commits in a
#: repository) should result in information cached only for a short
#: period of time. This is set to cache for 5 minutes.
COMMITS_CACHE_PERIOD_SHORT = 60 * 5
#: The long period of time to cache commit information, in seconds.
#:
#: Commit information that is unlikely to change should be kept around
#: for a longer period of time. This is set to cache for 1 day.
COMMITS_CACHE_PERIOD_LONG = 60 * 60 * 24 # 1 day
#: The fallback encoding for text-based files in repositories.
#:
#: This is used if the file isn't valid UTF-8, and if the repository
#: doesn't specify a list of encodings.
FALLBACK_ENCODING = 'iso-8859-15'
#: The error message used to indicate that a repository name conflicts.
NAME_CONFLICT_ERROR = _('A repository with this name already exists')
#: The error message used to indicate that a repository path conflicts.
PATH_CONFLICT_ERROR = _('A repository with this path already exists')
#: The prefix used to indicate an encrypted password.
#:
#: This is used to indicate whether a stored password is in encrypted
#: form or plain text form.
ENCRYPTED_PASSWORD_PREFIX = '\t'
name = models.CharField(_('Name'), max_length=255)
path = models.CharField(_('Path'), max_length=255)
mirror_path = models.CharField(max_length=255, blank=True)
raw_file_url = models.CharField(
_('Raw file URL mask'),
max_length=255,
blank=True)
username = models.CharField(max_length=32, blank=True)
encrypted_password = models.CharField(max_length=128, blank=True,
db_column='password')
extra_data = JSONField(null=True)
tool = models.ForeignKey(Tool, on_delete=models.CASCADE,
related_name='repositories')
scmtool_id = models.CharField(max_length=255, null=True, blank=True)
hosting_account = models.ForeignKey(
HostingServiceAccount,
on_delete=models.CASCADE,
related_name='repositories',
verbose_name=_('Hosting service account'),
blank=True,
null=True)
bug_tracker = models.CharField(
_('Bug tracker URL'),
max_length=256,
blank=True,
help_text=_("This should be the full path to a bug in the bug tracker "
"for this repository, using '%s' in place of the bug ID."))
encoding = models.CharField(
max_length=32,
blank=True,
help_text=_("The encoding used for files in this repository. This is "
"an advanced setting and should only be used if you're "
"sure you need it."))
visible = models.BooleanField(
_('Show this repository'),
default=True,
help_text=_('Use this to control whether or not a repository is '
'shown when creating new review requests. Existing '
'review requests are unaffected.'))
archived = models.BooleanField(
_('Archived'),
default=False,
help_text=_("Archived repositories do not show up in lists of "
"repositories, and aren't open to new review requests."))
archived_timestamp = models.DateTimeField(null=True, blank=True)
# Access control
local_site = models.ForeignKey(LocalSite,
on_delete=models.CASCADE,
verbose_name=_('Local site'),
blank=True,
null=True)
public = models.BooleanField(
_('publicly accessible'),
default=True,
help_text=_('Review requests and files on public repositories are '
'visible to anyone. Private repositories must explicitly '
'list the users and groups that can access them.'))
users = models.ManyToManyField(
User,
limit_choices_to={'is_active': True},
blank=True,
related_name='repositories',
verbose_name=_('Users with access'),
help_text=_('A list of users with explicit access to the repository.'))
review_groups = models.ManyToManyField(
'reviews.Group',
limit_choices_to={'invite_only': True},
blank=True,
related_name='repositories',
verbose_name=_('Review groups with access'),
help_text=_('A list of invite-only review groups whose members have '
'explicit access to the repository.'))
hooks_uuid = models.CharField(
_('Hooks UUID'),
max_length=32,
null=True,
blank=True,
help_text=_('Unique identifier used for validating incoming '
'webhooks.'))
objects = RepositoryManager()
@property
def password(self):
"""The password for the repository.
If a password is stored and encrypted, it will be decrypted and
returned.
If the stored password is in plain-text, then it will be encrypted,
stored in the database, and returned.
"""
password = self.encrypted_password
# NOTE: Due to a bug in 2.0.9, it was possible to get a string of
# "\tNone", indicating no password. We have to check for this.
if not password or password == '\tNone':
password = None
elif password.startswith(self.ENCRYPTED_PASSWORD_PREFIX):
password = password[len(self.ENCRYPTED_PASSWORD_PREFIX):]
if password:
password = decrypt_password(password)
else:
password = None
else:
# This is a plain-text password. Convert it.
self.password = password
self.save(update_fields=['encrypted_password'])
return password
@password.setter
def password(self, value):
"""Set the password for the repository.
The password will be stored as an encrypted value, prefixed with a
tab character in order to differentiate between legacy plain-text
passwords.
"""
if value:
value = '%s%s' % (self.ENCRYPTED_PASSWORD_PREFIX,
encrypt_password(value))
else:
value = ''
self.encrypted_password = value
@property
def scmtool_class(self):
"""The SCMTool subclass used for this repository.
Type:
type:
A subclass of :py:class:`~reviewboard.scmtools.core.SCMTool`.
Raises:
django.core.exceptions.ImproperlyConfigured:
The SCMTool could not be found, due to missing packages or
extensions. Details are in the message, and the failure is
logged.
"""
# We'll optimistically cache this, mirroring behavior in
# Tool.get_scmtool_class(). Note that we only cache below if we get
# a non-None value, as None can occur while an instance is being set
# up.
if hasattr(self, '_scmtool_class'):
return self._scmtool_class
scmtool_id = self.scmtool_id
if scmtool_id:
tool = scmtools_registry.get_by_id(scmtool_id)
if tool is not None:
self._scmtool_class = tool
return tool
logger.error('Error finding registered SCMTool "%s" in '
'repository ID %s.',
scmtool_id, self.pk)
elif not self.tool_id:
# For backwards-compatibility reasons, we return None when there's
# no Tool object associated.
return None
# We use ImproperlyConfigured here for compatibility with the
# the call to Tool.get_scmtool_class() in Review Board < 5.0.
raise ImproperlyConfigured(
gettext(
'There was an error loading the SCMTool "%s" needed by this '
'repository. The administrator should ensure all necessary '
'packages and extensions are installed.'
)
% (scmtool_id or self.tool.name))
@cached_property
def hosting_service(self):
"""The hosting service providing this repository.
This will be ``None`` if this is a standalone repository.
Type:
reviewboard.hostingsvcs.service.HostingService
Raises:
reviewboard.hostingsvcs.errors.MissingHostingServiceError:
The hosting service for this repository could not be loaded.
"""
if self.hosting_account:
try:
return self.hosting_account.service
except MissingHostingServiceError as e:
raise MissingHostingServiceError(e.hosting_service_id,
self.name)
return None
@cached_property
def bug_tracker_service(self):
"""The selected bug tracker service for the repository.
This will be ``None`` if this repository is not associated with a bug
tracker.
Type:
reviewboard.hostingsvcs.service.HostingService
Raises:
reviewboard.hostingsvcs.errors.MissingHostingServiceError:
The hosting service for this repository could not be loaded.
"""
if self.extra_data.get('bug_tracker_use_hosting'):
return self.hosting_service
bug_tracker_type = self.extra_data.get('bug_tracker_type')
if bug_tracker_type:
bug_tracker_cls = get_hosting_service(bug_tracker_type)
# TODO: we need to figure out some way of storing a second
# hosting service account for bug trackers.
return bug_tracker_cls(HostingServiceAccount())
return None
@property
def supports_post_commit(self):
"""Whether or not this repository supports post-commit creation.
If this is ``True``, the :py:meth:`get_branches` and
:py:meth:`get_commits` methods will be implemented to fetch information
about the committed revisions, and get_change will be implemented to
fetch the actual diff. This is used by
:py:meth:`ReviewRequestDraft.update_from_commit_id
<reviewboard.reviews.models.ReviewRequestDraft.update_from_commit_id>`.
Type:
bool
Raises:
reviewboard.hostingsvcs.errors.MissingHostingServiceError:
The hosting service for this repository could not be loaded.
"""
hosting_service = self.hosting_service
if hosting_service:
return hosting_service.supports_post_commit
else:
return self.scmtool_class.supports_post_commit
@property
def supports_pending_changesets(self):
"""Whether this repository supports server-aware pending changesets.
Type:
bool
"""
return self.scmtool_class.supports_pending_changesets
@cached_property
def diffs_use_absolute_paths(self):
"""Whether or not diffs for this repository contain absolute paths.
Some types of source code management systems generate diffs that
contain paths relative to the directory where the diff was generated.
Most contain absolute paths. This flag indicates which path format
this repository can expect.
Type:
bool
"""
# Ideally, we won't have to instantiate the class, as that can end up
# performing some expensive calls or HTTP requests. If the SCMTool is
# modern (doesn't define a get_diffs_use_absolute_paths), it will have
# all the information we need on the class. If not, we might have to
# instantiate it, but do this as a last resort.
scmtool_cls = self.scmtool_class
if isinstance(scmtool_cls.diffs_use_absolute_paths, bool):
return scmtool_cls.diffs_use_absolute_paths
elif hasattr(scmtool_cls, 'get_diffs_use_absolute_paths'):
# This will trigger a deprecation warning.
return self.get_scmtool().diffs_use_absolute_paths
else:
return False
def get_scmtool(self):
"""Return an instance of the SCMTool for this repository.
Each call will construct a brand new instance. The returned value
should be stored and used for multiple operations in a single session.
Returns:
reviewboard.scmtools.core.SCMTool:
A new instance of the SCMTool for this repository.
"""
return self.scmtool_class(self)
def get_credentials(self):
"""Return the credentials for this repository.
This returns a dictionary with ``username`` and ``password`` keys.
By default, these will be the values stored for the repository,
but if a hosting service is used and the repository doesn't have
values for one or both of these, the hosting service's credentials
(if available) will be used instead.
Returns:
dict:
A dictionary with credentials information.
"""
username = self.username
password = self.password
if self.hosting_account and self.hosting_account.service:
username = username or self.hosting_account.username
password = password or self.hosting_account.service.get_password()
return {
'username': username,
'password': password,
}
def get_or_create_hooks_uuid(self, max_attempts=20):
"""Return a hooks UUID, creating one if necessary.
A hooks UUID is used for creating unique incoming webhook URLs,
allowing services to communicate information to Review Board.
If a hooks UUID isn't already saved, then this will try to generate one
that doesn't conflict with any other registered hooks UUID. It will try
up to ``max_attempts`` times, and if it fails, ``None`` will be
returned.
Args:
max_attempts (int, optional):
The maximum number of UUID generation attempts to try before
giving up.
Returns:
unicode:
The resulting UUID.
Raises:
Exception:
The maximum number of attempts has been reached.
"""
if not self.hooks_uuid:
for attempt in range(max_attempts):
self.hooks_uuid = uuid.uuid4().hex
try:
self.save(update_fields=['hooks_uuid'])
break
except IntegrityError:
# We hit a collision with the token value. Try again.
self.hooks_uuid = None
if not self.hooks_uuid:
s = ('Unable to generate a unique hooks UUID for '
'repository %s after %d attempts'
% (self.pk, max_attempts))
logger.error(s)
raise Exception(s)
return self.hooks_uuid
def get_encoding_list(self):
"""Return a list of candidate text encodings for files.
This will return a list based on a comma-separated list of encodings
in :py:attr:`encoding`. If no encodings are configured, the default
of ``iso-8859-15`` will be used.
Returns:
list of unicode:
The list of text encodings to try for files in the repository.
"""
encodings = []
for e in self.encoding.split(','):
e = e.strip()
if e:
encodings.append(e)
return encodings or [self.FALLBACK_ENCODING]
def get_file(self, path, revision, base_commit_id=None, request=None,
context=None):
"""Return a file from the repository.
This will attempt to retrieve the file from the repository. If the
repository is backed by a hosting service, it will go through that.
Otherwise, it will attempt to directly access the repository.
This will send the
:py:data:`~reviewboard.scmtools.signals.fetching_file` signal before
beginning a file fetch from the repository (if not cached), and the
:py:data:`~reviewboard.scmtools.signals.fetched_file` signal after.
Args:
path (unicode):
The path to the file in the repository.
revision (unicode):
The revision of the file to retrieve.
base_commit_id (unicode, optional):
The ID of the commit containing the revision of the file
to retrieve. This is required for some types of repositories
where the revision of a file and the ID of a commit differ.
Deprecated:
4.0.5:
Callers should provide this in ``context`` instead.
request (django.http.HttpRequest, optional):
The current HTTP request from the client. This is used for
logging purposes.
Deprecated:
4.0.5:
Callers should provide this in ``context`` instead.
context (reviewboard.scmtools.core.FileLookupContext, optional):
Extra context used to help look up this file.
This contains information about the HTTP request, requesting
user, and parsed diff information, which may be useful as
part of the repository lookup process.
Version Added:
4.0.5
Returns:
bytes:
The resulting file contents.
Raises:
TypeError:
One or more of the provided arguments is an invalid type.
Details are contained in the error message.
"""
# We wrap the result of get_file in a list and then return the first
# element after getting the result from the cache. This prevents the
# cache backend from converting to unicode, since we're no longer
# passing in a string and the cache backend doesn't recursively look
# through the list in order to convert the elements inside.
#
# Basically, this fixes the massive regressions introduced by the
# Django unicode changes.
if not isinstance(path, str):
raise TypeError('"path" must be a Unicode string, not %s'
% type(path))
if not isinstance(revision, str):
raise TypeError('"revision" must be a Unicode string, not %s'
% type(revision))
if context is None:
# If an explicit context isn't provided, create one. In a future
# version, this will be required.
context = FileLookupContext(request=request,
base_commit_id=base_commit_id)
return cache_memoize(
self._make_file_cache_key(path=path,
revision=revision,
base_commit_id=context.base_commit_id),
lambda: [
self._get_file_uncached(path=path,
revision=revision,
context=context),
],
large_data=True)[0]
def get_file_exists(self, path, revision, base_commit_id=None,
request=None, context=None):
"""Return whether or not a file exists in the repository.
If the repository is backed by a hosting service, this will go
through that. Otherwise, it will attempt to directly access the
repository.
The result of this call will be cached, making future lookups
of this path and revision on this repository faster.
This will send the
:py:data:`~reviewboard.scmtools.signals.checking_file_exists` signal
before beginning a file fetch from the repository (if not cached), and
the :py:data:`~reviewboard.scmtools.signals.checked_file_exists` signal
after.
Args:
path (unicode):
The path to the file in the repository.
revision (unicode);
The revision of the file to check.
base_commit_id (unicode, optional):
The ID of the commit containing the revision of the file
to check. This is required for some types of repositories
where the revision of a file and the ID of a commit differ.
Deprecated:
4.0.5:
Callers should provide this in ``context`` instead.
request (django.http.HttpRequest, optional):
The current HTTP request from the client. This is used for
logging purposes.
Deprecated:
4.0.5:
Callers should provide this in ``context`` instead.
context (reviewboard.scmtools.core.FileLookupContext, optional):
Extra context used to help look up this file.
This contains information about the HTTP request, requesting
user, and parsed diff information, which may be useful as
part of the repository lookup process.
Version Added:
4.0.5
Returns:
bool:
``True`` if the file exists in the repository. ``False`` if it
does not.
Raises:
TypeError:
One or more of the provided arguments is an invalid type.
Details are contained in the error message.
"""
if not isinstance(path, str):
raise TypeError('"path" must be a Unicode string, not %s'
% type(path))
if not isinstance(revision, str):
raise TypeError('"revision" must be a Unicode string, not %s'
% type(revision))
if context is None:
# If an explicit context isn't provided, create one. In a future
# version, this will be required.
context = FileLookupContext(request=request,
base_commit_id=base_commit_id)
key = self._make_file_exists_cache_key(
path=path,
revision=revision,
base_commit_id=context.base_commit_id)
if cache.get(make_cache_key(key)) == '1':
return True
exists = self._get_file_exists_uncached(path=path,
revision=revision,
context=context)
if exists:
cache_memoize(key, lambda: '1')
return exists
def get_branches(self):
"""Return a list of all branches on the repository.
This will fetch a list of all known branches for use in the API and
New Review Request page.
Returns:
list of reviewboard.scmtools.core.Branch:
The list of branches in the repository. One (and only one) will
be marked as the default branch.
Raises:
reviewboard.hostingsvcs.errors.HostingServiceError:
The hosting service backing the repository encountered an
error.
reviewboard.scmtools.errors.SCMError:
The repository tool encountered an error.
NotImplementedError:
Branch retrieval is not available for this type of repository.
"""
hosting_service = self.hosting_service
cache_key = make_cache_key('repository-branches:%s' % self.pk)
if hosting_service:
branches_callable = lambda: hosting_service.get_branches(self)
else:
branches_callable = self.get_scmtool().get_branches
return cache_memoize(cache_key, branches_callable,
self.BRANCHES_CACHE_PERIOD)
def get_commit_cache_key(self, commit_id):
"""Return the cache key used for a commit ID.
The resulting cache key is used to cache information about a commit
retrieved from the repository that matches the provided ID. This can
be used to delete information already in cache.
Args:
commit_id (unicode):
The ID of the commit to generate a cache key for.
Returns:
unicode:
The resulting cache key.
"""
return 'repository-commit:%s:%s' % (self.pk, commit_id)
def get_commits(self, branch=None, start=None):
"""Return a list of commits.
This will fetch a batch of commits from the repository for use in the
API and New Review Request page.
The resulting commits will be in order from newest to oldest, and
should return upwards of a fixed number of commits (usually 30, but
this depends on the type of repository and its limitations). It may
also be limited to commits that exist on a given branch (if supported
by the repository).
This can be called multiple times in succession using the
:py:attr:`Commit.parent` of the last entry as the ``start`` parameter
in order to paginate through the history of commits in the repository.
Args:
branch (unicode, optional):
The branch to limit commits to. This may not be supported by
all repositories.
start (unicode, optional):
The commit to start at. If not provided, this will fetch the
first commit in the repository.
Returns:
list of reviewboard.scmtools.core.Commit:
The retrieved commits.
Raises:
reviewboard.hostingsvcs.errors.HostingServiceError:
The hosting service backing the repository encountered an
error.
reviewboard.scmtools.errors.SCMError:
The repository tool encountered an error.
NotImplementedError:
Commits retrieval is not available for this type of repository.
"""
hosting_service = self.hosting_service
commits_kwargs = {
'branch': branch,
'start': start,
}
if hosting_service:
commits_callable = \
lambda: hosting_service.get_commits(self, **commits_kwargs)
else:
commits_callable = \
lambda: self.get_scmtool().get_commits(**commits_kwargs)
# We cache both the entire list for 'start', as well as each individual
# commit. This allows us to reduce API load when people are looking at
# the "new review request" page more frequently than they're pushing
# code, and will usually save 1 API request when they go to actually
# create a new review request.
if branch and start:
cache_period = self.COMMITS_CACHE_PERIOD_LONG
else:
cache_period = self.COMMITS_CACHE_PERIOD_SHORT
cache_key = make_cache_key('repository-commits:%s:%s:%s'
% (self.pk, branch, start))
commits = cache_memoize(cache_key, commits_callable,
cache_period)
for commit in commits:
cache.set(self.get_commit_cache_key(commit.id),
commit, self.COMMITS_CACHE_PERIOD_LONG)
return commits
def get_change(self, revision):
"""Return an individual change/commit in the repository.
Args:
revision (unicode):
The commit ID or revision to retrieve.
Returns:
reviewboard.scmtools.core.Commit:
The commit from the repository.
Raises:
reviewboard.hostingsvcs.errors.HostingServiceError:
The hosting service backing the repository encountered an
error.
reviewboard.scmtools.errors.SCMError:
The repository tool encountered an error.
NotImplementedError:
Commits retrieval is not available for this type of repository.
"""
hosting_service = self.hosting_service
if hosting_service:
return hosting_service.get_change(self, revision)
else:
return self.get_scmtool().get_change(revision)
def normalize_patch(self, patch, filename, revision):
"""Normalize a diff/patch file before it's applied.
This can be used to take an uploaded diff file and modify it so that
it can be properly applied. This may, for instance, uncollapse
keywords or remove metadata that would confuse :command:`patch`.
This passes the request on to the hosting service or repository
tool backend.
Args:
patch (bytes):
The diff/patch file to normalize.
filename (unicode):
The name of the file being changed in the diff.
revision (unicode):
The revision of the file being changed in the diff.
Returns:
bytes:
The resulting diff/patch file.
Raises:
reviewboard.hostingsvcs.errors.MissingHostingServiceError:
The hosting service for this repository could not be loaded.
"""
hosting_service = self.hosting_service
if hosting_service:
return hosting_service.normalize_patch(repository=self,
patch=patch,
filename=filename,
revision=revision)
else:
return self.get_scmtool().normalize_patch(patch=patch,
filename=filename,
revision=revision)
def is_accessible_by(self, user):
"""Return whether or not the user has access to the repository.
The repository is accessibly by the user if it is public or
the user has access to it (either by being explicitly on the allowed
users list, or by being a member of a review group on that list).
Args:
user (django.contrib.auth.models.User):
The user to check.
Returns:
bool:
``True`` if the repository is accessible by the user.
``False`` if it is not.
"""
if self.local_site and not self.local_site.is_accessible_by(user):
return False
return (self.public or
user.is_superuser or
(user.is_authenticated and
(self.review_groups.filter(users__pk=user.pk).exists() or
self.users.filter(pk=user.pk).exists())))
def is_mutable_by(self, user):
"""Return whether or not the user can modify or delete the repository.
The repository is mutable by the user if the user is an administrator
with proper permissions or the repository is part of a LocalSite and
the user has permissions to modify it.
Args:
user (django.contrib.auth.models.User):
The user to check.
Returns:
bool:
``True`` if the repository can modify or delete the repository.
``False`` if they cannot.
"""
return user.has_perm('scmtools.change_repository', self.local_site)
def archive(self, save=True):
"""Archive a repository.
The repository won't appear in any public lists of repositories,
and won't be used when looking up repositories. Review requests
can't be posted against an archived repository.
New repositories can be created with the same information as the
archived repository.
Args:
save (bool, optional):
Whether to save the repository immediately.
"""
# This should be sufficiently unlikely to create duplicates. time()
# will use up a max of 8 characters, so we slice the name down to
# make the result fit in 64 characters
max_name_len = self._meta.get_field('name').max_length
encoded_time = '%x' % int(time())
reserved_len = len('ar::') + len(encoded_time)
self.name = 'ar:%s:%s' % (self.name[:max_name_len - reserved_len],
encoded_time)
self.archived = True
self.public = False
self.archived_timestamp = timezone.now()
if save:
self.save(update_fields=('name', 'archived', 'public',
'archived_timestamp'))
def save(self, **kwargs):
"""Save the repository.
This will perform any data normalization needed, and then save the
repository to the database.
Args:
**kwargs (dict):
Keyword arguments to pass to the parent method.
"""
# Prevent empty strings from saving in the admin UI, which could lead
# to database-level validation errors.
if self.hooks_uuid == '':
self.hooks_uuid = None
return super(Repository, self).save(**kwargs)
def clean(self):
"""Clean method for checking null unique_together constraints.
Django has a bug where unique_together constraints for foreign keys
aren't checked properly if one of the relations is null. This means
that users who aren't using local sites could create multiple groups
with the same name.
Raises:
django.core.exceptions.ValidationError:
Validation of the model's data failed. Details are in the
exception.
"""
super(Repository, self).clean()
if self.local_site is None:
existing_repos = (
Repository.objects
.exclude(pk=self.pk)
.filter(Q(name=self.name) |
(Q(archived=False) &
Q(path=self.path)))
.values('name', 'path')
)
errors = {}
for repo_info in existing_repos:
if repo_info['name'] == self.name:
errors['name'] = [
ValidationError(self.NAME_CONFLICT_ERROR,
code='repository_name_exists'),
]
if repo_info['path'] == self.path:
errors['path'] = [
ValidationError(self.PATH_CONFLICT_ERROR,
code='repository_path_exists'),
]
if errors:
raise ValidationError(errors)
def _make_file_cache_key(self, path, revision, base_commit_id):
"""Return a cache key for fetched files.
Args:
path (unicode):
The path to the file in the repository.
revision (unicode):
The revision of the file.
base_commit_id (unicode):
The ID of the commit containing the revision of the file.
This is required for some types of repositories where the
revision of a file and the ID of a commit differ.
Returns:
unicode:
A cache key representing this file.
"""
return 'file:%s:%s:%s:%s:%s' % (
self.pk,
quote(path),
quote(revision),
quote(base_commit_id or ''),
quote(self.raw_file_url or ''))
def _make_file_exists_cache_key(self, path, revision, base_commit_id):
"""Makes a cache key for file existence checks.
Args:
path (unicode):
The path to the file in the repository.
revision (unicode);
The revision of the file to check.
base_commit_id (unicode, optional):
The ID of the commit containing the revision of the file
to check. This is required for some types of repositories
where the revision of a file and the ID of a commit differ.
Returns:
unicode:
A cache key representing this file check.
"""
return 'file-exists:%s:%s:%s:%s:%s' % (
self.pk,
quote(path),
quote(revision),
quote(base_commit_id or ''),
quote(self.raw_file_url or ''))
def _get_file_uncached(self, path, revision, context):
"""Return a file from the repository, bypassing cache.
This is called internally by :py:meth:`get_file` if the file isn't
already in the cache.
This will send the
:py:data:`~reviewboard.scmtools.signals.fetching_file` signal before
beginning a file fetch from the repository, and the
:py:data:`~reviewboard.scmtools.signals.fetched_file` signal after.
Args:
path (unicode):
The path to the file in the repository.
revision (unicode):
The revision of the file to retrieve.
context (reviewboard.scmtools.core.FileLookupContext):
Extra context used to help look up this file.
Version Added:
4.0.5
Returns:
bytes:
The resulting file contents.
Raises:
reviewboard.hostingsvcs.errors.MissingHostingServiceError:
The hosting service for this repository could not be loaded.
"""
request = context.request
base_commit_id = context.base_commit_id
fetching_file.send(sender=self,
path=path,
revision=revision,
base_commit_id=base_commit_id,
request=request,
context=context)
if base_commit_id:
timer_msg = "Fetching file '%s' r%s (base commit ID %s) from %s" \
% (path, revision, base_commit_id, self)
else:
timer_msg = "Fetching file '%s' r%s from %s" \
% (path, revision, self)
log_timer = log_timed(timer_msg, request=request)
hosting_service = self.hosting_service
if hosting_service:
data = hosting_service.get_file(
self,
path,
revision,
base_commit_id=base_commit_id,
context=context)
assert isinstance(data, bytes), (
'%s.get_file() must return a byte string, not %s'
% (type(hosting_service).__name__, type(data)))
else:
tool = self.get_scmtool()
data = tool.get_file(path, revision,
base_commit_id=base_commit_id,
context=context)
assert isinstance(data, bytes), (
'%s.get_file() must return a byte string, not %s'
% (type(tool).__name__, type(data)))
log_timer.done()
fetched_file.send(sender=self,
path=path,
revision=revision,
base_commit_id=base_commit_id,
request=request,
context=context,
data=data)
return data
def _get_file_exists_uncached(self, path, revision, context):
"""Check for file existence, bypassing cache.
This is called internally by :py:meth:`get_file_exists` if the file
isn't already in the cache.
This function is smart enough to check if the file exists in cache,
and will use that for the result instead of making a separate call.
This will send the
:py:data:`~reviewboard.scmtools.signals.checking_file_exists` signal
before beginning a file fetch from the repository, and the
:py:data:`~reviewboard.scmtools.signals.checked_file_exists` signal
after.
Args:
path (unicode):
The path to the file in the repository.
revision (unicode):
The revision of the file to check.
context (reviewboard.scmtools.core.FileLookupContext):
Extra context used to help look up this file.
Version Added:
4.0.5
Returns:
bool:
``True`` if the file exists. ``False`` if it does not.
Raises:
reviewboard.hostingsvcs.errors.MissingHostingServiceError:
The hosting service for this repository could not be loaded.
"""
request = context.request
base_commit_id = context.base_commit_id
# First we check to see if we've fetched the file before. If so,
# it's in there and we can just return that we have it.
file_cache_key = make_cache_key(
self._make_file_cache_key(path=path,
revision=revision,
base_commit_id=base_commit_id))
if file_cache_key in cache:
exists = True
else:
# We didn't have that in the cache, so check from the repository.
checking_file_exists.send(sender=self,
path=path,
revision=revision,
base_commit_id=base_commit_id,
request=request,
context=context)
hosting_service = self.hosting_service
if hosting_service:
exists = hosting_service.get_file_exists(
self,
path,
revision,
base_commit_id=base_commit_id,
context=context)
else:
tool = self.get_scmtool()
exists = tool.file_exists(path, revision,
base_commit_id=base_commit_id,
context=context)
checked_file_exists.send(sender=self,
path=path,
revision=revision,
base_commit_id=base_commit_id,
request=request,
exists=exists,
context=context)
return exists
def __str__(self):
"""Return a string representation of the repository.
This uses the repository's name as the string representation. However,
it should not be used if explicitly wanting to retrieve the repository
name, as future versions may return a different value.
Returns:
unicode:
The repository name.
"""
return self.name
class Meta:
db_table = 'scmtools_repository'
unique_together = (('name', 'local_site'),
('archived_timestamp', 'path', 'local_site'),
('hooks_uuid', 'local_site'))
verbose_name = _('Repository')
verbose_name_plural = _('Repositories')
| mit | 51d8aeed5273ea1fafabecd7513248a9 | 36.069314 | 79 | 0.581485 | 4.800019 | false | false | false | false |
reviewboard/reviewboard | reviewboard/hostingsvcs/fedorahosted.py | 1 | 2006 | from django import forms
from django.utils.translation import gettext_lazy as _
from reviewboard.hostingsvcs.forms import HostingServiceForm
from reviewboard.hostingsvcs.service import HostingService
class FedoraHostedForm(HostingServiceForm):
fedorahosted_repo_name = forms.CharField(
label=_('Repository name'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}))
class FedoraHosted(HostingService):
"""Hosting service support for fedorahosted.org.
This was a hosting service for Git, Mercurial, and Subversion provided
by Fedora. This service was retired on March 1st, 2017.
Deprecated:
3.0.17:
This service will no longer appear as an option when configuring a
repository.
"""
name = 'Fedora Hosted'
hosting_service_id = 'fedorahosted'
visible = False
form = FedoraHostedForm
supports_repositories = True
supports_bug_trackers = True
supported_scmtools = ['Git', 'Mercurial', 'Subversion']
repository_fields = {
'Git': {
'path': 'git://git.fedorahosted.org/git/'
'%(fedorahosted_repo_name)s.git',
'raw_file_url': 'http://git.fedorahosted.org/cgit/'
'%(fedorahosted_repo_name)s.git/blob/'
'<filename>?id=<revision>'
},
'Mercurial': {
'path': 'http://hg.fedorahosted.org/hg/'
'%(fedorahosted_repo_name)s/',
'mirror_path': 'https://hg.fedorahosted.org/hg/'
'%(fedorahosted_repo_name)s/'
},
'Subversion': {
'path': 'http://svn.fedorahosted.org/svn/'
'%(fedorahosted_repo_name)s/',
'mirror_path': 'https://svn.fedorahosted.org/svn/'
'%(fedorahosted_repo_name)s/',
},
}
bug_tracker_field = \
'https://fedorahosted.org/%(fedorahosted_repo_name)s/ticket/%%s'
| mit | dd6dba7a211aada7f35b8dec5230c9cc | 33.586207 | 74 | 0.58674 | 3.910331 | false | false | false | false |
reviewboard/reviewboard | reviewboard/admin/forms/diff_settings.py | 1 | 4955 | """Administration form for diff viewer settings."""
import re
from django import forms
from django.utils.translation import gettext_lazy as _
from djblets.forms.fields import ListEditDictionaryField
from djblets.forms.widgets import ListEditWidget
from djblets.siteconfig.forms import SiteSettingsForm
from reviewboard.admin.form_widgets import LexersMappingWidget
class DiffSettingsForm(SiteSettingsForm):
"""Diff settings for Review Board."""
css_bundle_names = ['djblets-forms']
js_bundle_names = ['djblets-forms']
diffviewer_syntax_highlighting = forms.BooleanField(
label=_('Show syntax highlighting'),
required=False)
diffviewer_syntax_highlighting_threshold = forms.IntegerField(
label=_('Syntax highlighting threshold'),
help_text=_('Files with lines greater than this number will not have '
'syntax highlighting. Enter 0 for no limit.'),
required=False,
widget=forms.TextInput(attrs={'size': '5'}))
diffviewer_custom_pygments_lexers = ListEditDictionaryField(
label=_('Mapping of file extensions to syntax highlighters'),
help_text=_('A list of file extensions and their corresponding '
'Pygments lexer to use for syntax highlighting.'),
required=False,
widget=ListEditWidget(value_widget=LexersMappingWidget))
diffviewer_show_trailing_whitespace = forms.BooleanField(
label=_('Show trailing whitespace'),
help_text=_('Show excess trailing whitespace as red blocks. This '
'helps to visualize when a text editor added unwanted '
'whitespace to the end of a line.'),
required=False)
include_space_patterns = forms.CharField(
label=_('Show all whitespace for'),
required=False,
help_text=_('A comma-separated list of file patterns for which all '
'whitespace changes should be shown. '
'(e.g., "*.py, *.txt")'),
widget=forms.TextInput(attrs={'size': '60'}))
diffviewer_context_num_lines = forms.IntegerField(
label=_('Lines of Context'),
help_text=_('The number of unchanged lines shown above and below '
'changed lines.'),
initial=5,
widget=forms.TextInput(attrs={'size': '5'}))
diffviewer_paginate_by = forms.IntegerField(
label=_('Paginate by'),
help_text=_('The number of files to display per page in the diff '
'viewer.'),
initial=20,
widget=forms.TextInput(attrs={'size': '5'}))
diffviewer_paginate_orphans = forms.IntegerField(
label=_('Paginate orphans'),
help_text=_('The number of extra files required before adding another '
'page to the diff viewer.'),
initial=10,
widget=forms.TextInput(attrs={'size': '5'}))
diffviewer_max_diff_size = forms.IntegerField(
label=_('Max diff size (bytes)'),
help_text=_('The maximum size (in bytes) for any given diff. Enter 0 '
'to disable size restrictions.'),
widget=forms.TextInput(attrs={'size': '15'}))
def load(self):
"""Load settings from the form.
This will populate initial fields based on the site configuration.
"""
super(DiffSettingsForm, self).load()
self.fields['include_space_patterns'].initial = \
', '.join(self.siteconfig.get('diffviewer_include_space_patterns'))
def save(self):
"""Save the form.
This will write the new configuration to the database.
"""
self.siteconfig.set(
'diffviewer_include_space_patterns',
re.split(r',\s*', self.cleaned_data['include_space_patterns']))
super(DiffSettingsForm, self).save()
class Meta:
title = _('Diff Viewer Settings')
save_blacklist = ('include_space_patterns',)
fieldsets = (
{
'classes': ('wide',),
'fields': ('diffviewer_syntax_highlighting',
'diffviewer_syntax_highlighting_threshold',
'diffviewer_custom_pygments_lexers',
'diffviewer_show_trailing_whitespace',
'include_space_patterns'),
},
{
'title': _('Advanced'),
'description': _(
'These are advanced settings that control the behavior '
'and display of the diff viewer. In general, these '
'settings do not need to be changed.'
),
'classes': ('wide',),
'fields': ('diffviewer_max_diff_size',
'diffviewer_context_num_lines',
'diffviewer_paginate_by',
'diffviewer_paginate_orphans')
}
)
| mit | e25df6e4f073bba858de9c30e56f1e45 | 38.325397 | 79 | 0.58668 | 4.701139 | false | false | false | false |
reviewboard/reviewboard | reviewboard/scmtools/clearcase.py | 1 | 24203 | """ClearCase SCM provider."""
import logging
import os
import re
import subprocess
import sys
import tempfile
from django.conf import settings
from django.utils.encoding import force_str
from django.utils.translation import gettext_lazy as _
from reviewboard.diffviewer.parser import DiffParser
from reviewboard.scmtools.core import SCMTool, HEAD, PRE_CREATION
from reviewboard.scmtools.errors import SCMError, FileNotFoundError
# This specific import is necessary to handle the paths for
# cygwin enabled machines.
if sys.platform.startswith(('win', 'cygwin')):
import ntpath as cpath
else:
import posixpath as cpath
logger = logging.getLogger(__name__)
_cleartool = None
def get_cleartool():
"""Return the cleartool binary/path name.
This allows the user to configure a custom path to cleartool, or use a
wrapper, by setting CC_CTEXEC in the settings_local.py file.
Returns:
unicode:
The name or path of cleartool to use.
"""
global _cleartool
if _cleartool is None:
_cleartool = getattr(settings, 'CC_CTEXEC', None)
if not _cleartool:
_cleartool = 'cleartool'
logger.debug('Using cleartool %s', _cleartool)
return _cleartool
class ClearCaseTool(SCMTool):
"""ClearCase SCM provider."""
scmtool_id = 'clearcase'
name = 'ClearCase'
field_help_text = {
'path': _('The absolute path to the VOB.'),
}
dependencies = {
'executables': [get_cleartool()],
}
# This regular expression can extract from extended_path pure system path.
# It is construct from two main parts. The first match is everything from
# beginning of line to the first occurrence of /. The second match is
# parts between /main and numbers (file version). This patch assumes each
# branch present in extended_path was derived from /main and there is no
# file or directory called "main" in path.
UNEXTENDED = re.compile(r'^(.+?)/|/?(.+?)/main/?.*?/([0-9]+|CHECKEDOUT)')
# Currently, snapshot and dynamic views are supported. Automatic views and
# webviews will be reported as VIEW_UNKNOWN.
VIEW_SNAPSHOT, VIEW_DYNAMIC, VIEW_UNKNOWN = range(3)
def __init__(self, repository):
"""Initialize the tool.
Args:
repository (reviewboard.scmtools.models.Repository):
The associated repository object.
"""
self.repopath = repository.path
SCMTool.__init__(self, repository)
self.viewtype = self._get_view_type(self.repopath)
if self.viewtype == self.VIEW_SNAPSHOT:
self.client = ClearCaseSnapshotViewClient(self.repopath)
elif self.viewtype == self.VIEW_DYNAMIC:
self.client = ClearCaseDynamicViewClient(self.repopath)
else:
raise SCMError('Unsupported view type.')
@staticmethod
def run_cleartool(cmdline, cwd=None, ignore_errors=False,
results_unicode=True):
"""Run cleartool with the given command line.
Args:
cmdline (list of unicode):
The cleartool command-line to execute.
cwd (unicode, optional):
The working directory to use for the subprocess.
ignore_errors (bool, optional):
Whether to ignore error return codes.
results_unicode (bool, optional):
Whether to return unicode or bytes.
Returns:
bytes or unicode:
The output from the command.
Raises:
reviewboard.scmtools.errors.SCMError:
The cleartool execution returned an error code.
"""
popen_kwargs = {}
if results_unicode:
# Popen before Python 3.6 doesn't support the ``encoding``
# parameter, so we have to use ``universal_newlines`` and then
# decode later.
if sys.version_info[:2] >= (3, 6):
popen_kwargs['encoding'] = 'utf-8'
else:
popen_kwargs['universal_newlines'] = True
# On Windows 7+, executing a process that is marked SUBSYSTEM_CONSOLE
# (such as cleartool) will pop up a console window, even if output is
# redirected to a pipe. This hot mess prevents that from happening. If
# Popen gains a better API to do this, we should switch to that when
# we can. See https://bugs.python.org/issue30082 for details.
if sys.platform.startswith('win'):
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
si.wShowWindow = subprocess.SW_HIDE
popen_kwargs['startupinfo'] = si
cmdline = [get_cleartool()] + cmdline
logger.debug('Running %s', subprocess.list2cmdline(cmdline))
p = subprocess.Popen(
cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
**popen_kwargs)
(results, error) = p.communicate()
failure = p.returncode
if failure and not ignore_errors:
raise SCMError(error)
# We did not specify ``encoding`` to Popen earlier, so decode now.
if results_unicode and 'encoding' not in popen_kwargs:
results = force_str(results)
return results
def normalize_path_for_display(self, filename, extra_data=None, **kwargs):
"""Normalize a path from a diff for display to the user.
This will strip away information about the branch, version, and
repository path, returning an unextended path relative to the view.
ClearCase paths contain additional information about branch and file
version preceded by @@. This function removes these parts from the
ClearCase path to make it more readable. For example this function
converts the extended path::
/vobs/comm@@/main/122/network@@/main/55/sntp
@@/main/4/src@@/main/1/sntp.c@@/main/8
to the the to regular path::
/vobs/comm/network/sntp/src/sntp.c
Args:
filename (unicode):
The filename/path to normalize.
extra_data (dict, optional):
Extra data stored for the diff this file corresponds to.
This may be empty or ``None``. Subclasses should not assume the
presence of anything here.
**kwargs (dict, unused):
Additional keyword arguments.
Returns:
unicode:
The resulting filename/path.
"""
if '@@' not in filename:
return filename
# The result of regular expression search is a list of tuples. We must
# flatten this to a single list. b is first because it frequently
# occurs in tuples. Before that remove @@ from path.
unextended_chunks = [
b or a
for a, b, foo in self.UNEXTENDED.findall(
filename.replace('@@', ''))
]
if sys.platform.startswith('win'):
# Properly handle full (with drive letter) and UNC paths.
if unextended_chunks[0].endswith(':'):
unextended_chunks[0] = '%s\\' % unextended_chunks[0]
elif unextended_chunks[0] == '/' or unextended_chunks[0] == os.sep:
unextended_chunks[0] = '\\\\'
unextended_path = cpath.join(*unextended_chunks)
if not cpath.isabs(unextended_path):
unextended_path = cpath.join(self.repopath, unextended_path)
# Calling realpath here removes extra garbage like ``/./`` which can
# be generated by ClearCase when a vob branch is newly created.
unextended_path = cpath.realpath(unextended_path)
return cpath.relpath(unextended_path, self.repopath)
def get_repository_info(self):
"""Return repository information.
Returns:
dict:
A dictionary containing information for the repository, including
the VOB tag and UUID.
"""
vobstag = self._get_vobs_tag(self.repopath)
return {
'repopath': self.repopath,
'uuid': self._get_vobs_uuid(vobstag)
}
def _get_view_type(self, repopath):
"""Return the ClearCase view type for the given path.
Args:
repopath (unicode):
The repository path.
Returns:
int:
One of :py:attr:`VIEW_SNAPSHOT`, :py:attr:`VIEW_DYNAMIC`, or
:py:attr:`VIEW_UNKNOWN`.
Raises:
reviewboard.scmtools.errors.SCMError:
An error occurred when finding the view type.
"""
result = self.run_cleartool(
['lsview', '-full', '-properties', '-cview'],
cwd=repopath)
for line in result.splitlines(True):
splitted = line.split(' ')
if splitted[0] == 'Properties:':
if 'snapshot' in splitted:
return self.VIEW_SNAPSHOT
elif 'dynamic' in splitted:
return self.VIEW_DYNAMIC
return self.VIEW_UNKNOWN
def _get_vobs_tag(self, repopath):
"""Return the VOB tag for the given path.
Args:
repopath (unicode):
The repository path.
Returns:
unicode:
The VOB tag for the repository at the given path.
Raises:
reviewboard.scmtools.errors.SCMError:
An error occurred when finding the VOB tag.
"""
result = self.run_cleartool(
['describe', '-short', 'vob:.'],
cwd=repopath)
return result.rstrip()
def _get_vobs_uuid(self, vobstag):
"""Return the UUID for the given VOB tag.
Args:
vobstag (unicode):
The VOB tag.
Returns:
unicode:
The UUID associated with the given VOB tag.
Raises:
reviewboard.scmtools.errors.SCMError:
An error occurred when finding the UUID.
"""
result = self.run_cleartool(
['lsvob', '-long', vobstag],
cwd=self.repopath)
for line in result.splitlines(True):
if line.startswith('Vob family uuid:'):
return line.split(' ')[-1].rstrip()
raise SCMError('Unable to find family uuid for vob: %s' % vobstag)
def _get_element_kind(self, extended_path):
"""Return the element type of a VOB element.
Args:
extended_path (unicode):
The path of the element, including revision information.
Returns:
unicode:
The element type of the given element.
Raises:
reviewboard.scmtools.errors.SCMError:
An error occurred when finding the element type.
"""
result = self.run_cleartool(
['desc', '-fmt', '%m', extended_path],
cwd=self.repopath)
return result.strip()
def get_file(self, extended_path, revision=HEAD, **kwargs):
"""Return content of file or list content of directory.
Args:
extended_path (unicode):
The path of the element, including revision information.
revision (reviewboard.scmtools.core.Revision, optional):
Revision information. This will be either
:py:data:`~reviewboard.scmtools.core.PRE_CREATION` (new file),
or :py:data:`~reviewboard.scmtools.core.HEAD` (signifying to
use the revision information included in ``extended_path``).
**kwargs (dict, optional):
Additional unused keyword arguments.
Returns:
bytes:
The contents of the element.
Raises:
reviewboard.scmtools.errors.FileNotFoundError:
The given ``extended_path`` did not match a valid element.
reviewboard.scmtools.errors.SCMError:
Another error occurred.
"""
if not extended_path:
raise FileNotFoundError(extended_path, revision)
if revision == PRE_CREATION:
return ''
if self.viewtype == self.VIEW_SNAPSHOT:
# Get the path to (presumably) file element (remove version)
# The '@@' at the end of file_path is required.
file_path = extended_path.rsplit('@@', 1)[0] + '@@'
okind = self._get_element_kind(file_path)
if okind == 'directory element':
raise SCMError('Directory elements are unsupported.')
elif okind == 'file element':
output = self.client.cat_file(extended_path)
else:
raise FileNotFoundError(extended_path)
else:
if cpath.isdir(extended_path):
output = self.client.list_dir(extended_path)
elif cpath.exists(extended_path):
output = self.client.cat_file(extended_path)
else:
raise FileNotFoundError(extended_path)
return output
def parse_diff_revision(self, filename, revision, *args, **kwargs):
"""Parse and return a filename and revision from a diff.
In the diffs for ClearCase, the revision is actually part of the file
path. The ``revision_str`` argument contains modification timestamps.
Args:
filename (bytes):
The filename as represented in the diff.
revision (bytes):
The revision as represented in the diff.
*args (tuple, unused):
Unused positional arguments.
**kwargs (dict, unused):
Unused keyword arguments.
Returns:
tuple:
A tuple containing two items:
1. The normalized filename as a byte string.
2. The normalized revision as a byte string or a
:py:class:`~reviewboard.scmtools.core.Revision`.
"""
assert isinstance(filename, bytes), (
'filename must be a byte string, not %r' % type(filename))
assert isinstance(revision, bytes), (
'revision must be a byte string, not %r' % type(revision))
if filename.endswith(os.path.join(os.sep, 'main',
'0').encode('utf-8')):
revision = PRE_CREATION
elif filename.endswith(b'CHECKEDOUT') or b'@@' not in filename:
revision = HEAD
else:
revision = filename.rsplit(b'@@', 1)[1]
return filename, revision
def get_parser(self, data):
"""Return the diff parser for a ClearCase diff.
Args:
data (bytes):
The diff data.
Returns:
ClearCaseDiffParser:
The diff parser.
"""
return ClearCaseDiffParser(data,
self.repopath,
self._get_vobs_tag(self.repopath))
class ClearCaseDiffParser(DiffParser):
"""Special parser for ClearCase diffs created with RBTools."""
SPECIAL_REGEX = re.compile(br'^==== (\S+) (\S+) ====$')
def __init__(self, data, repopath, vobstag):
"""Initialize the parser.
Args:
data (bytes):
The diff data.
repopath (unicode):
The path to the repository.
vobstag (unicode):
The VOB tag for the repository.
"""
self.repopath = repopath
self.vobstag = vobstag
super(ClearCaseDiffParser, self).__init__(data)
def parse_diff_header(self, linenum, info):
"""Parse a diff header.
Paths for the same file may differ from paths in developer view because
it depends from configspec and this is custom so we translate oids,
attached by RBTools, to filenames to get paths working well inside
clearcase view on reviewboard side.
Args:
linenum (int):
The line number to start parsing at.
info (dict):
The diff info data to populate.
Returns:
int:
The line number after the end of the diff header.
"""
# Because ==== oid oid ==== is present after each header
# parse standard +++ and --- headers at the first place
linenum = super(ClearCaseDiffParser, self).parse_diff_header(
linenum, info)
# Parse for filename.
m = self.SPECIAL_REGEX.match(self.lines[linenum])
if m:
# When using ClearCase in multi-site mode, data replication takes
# much time, including oid. As said above, oid is used to retrieve
# filename path independent of developer view.
# When an oid is not found on server side an exception is thrown
# and review request submission fails.
# However at this time origFile and newFile info have already been
# filled by super.parse_diff_header and contain client side paths,
# client side paths are enough to start reviewing.
# So we can safely catch exception and restore client side paths
# if not found.
# Note: origFile and newFile attributes are not defined when
# managing binaries, so init to '' as fallback.
current_filename = info.get('origFile', '')
try:
info['origFile'] = self._oid2filename(m.group(1))
except Exception:
logger.debug('oid (%s) not found, get filename from client',
m.group(1))
info['origFile'] = self.client_relpath(current_filename)
current_filename = info.get('newFile', '')
try:
info['newFile'] = self._oid2filename(m.group(2))
except Exception:
logger.debug('oid (%s) not found, get filename from client',
m.group(2))
info['newFile'] = self.client_relpath(current_filename)
linenum += 1
if (linenum < len(self.lines) and
(self.lines[linenum].startswith((b'Binary files ',
b'Files ')))):
# To consider filenames translated from oids
# origInfo and newInfo keys must exists.
# Other files already contain this values field
# by timestamp from +++/--- diff header.
info['origInfo'] = ''
info['newInfo'] = ''
# Binary files need add origInfo and newInfo manually
# because they don't have diff's headers (only oids).
info['binary'] = True
linenum += 1
return linenum
def _oid2filename(self, oid):
"""Convert an oid to a filename.
Args:
oid (unicode):
The given oid.
Returns:
unicode:
The filename of the element relative to the repopath.
Raises:
reviewboard.scmtools.errors.SCMError:
An error occurred while finding the filename.
"""
result = ClearCaseTool.run_cleartool(
['describe', '-fmt', '%En@@%Vn', 'oid:%s' % oid],
cwd=self.repopath)
drive = os.path.splitdrive(self.repopath)[0]
if drive:
result = os.path.join(drive, result)
return cpath.relpath(result, self.repopath)
def client_relpath(self, filename):
"""Normalize a client view path.
Args:
filename (unicode):
A path in a client view.
Returns:
unicode:
The relative path against the vobstag.
"""
try:
path, revision = filename.split(b'@@', 1)
except ValueError:
path = filename
revision = None
relpath = b''
tries = 0
vobtag_bytes = self.vobstag.encode('utf-8')
logger.debug('vobstag: %s, path: %s', self.vobstag, path)
while tries < 20:
# An error should be raised if vobstag cannot be reached.
if path == b'/':
logger.debug('vobstag not found in path, use client filename')
return filename
# Vobstag reach, relpath can be returned.
if path.endswith(vobtag_bytes):
break
path, basename = os.path.split(path)
# Init relpath with basename.
if len(relpath) == 0:
relpath = basename
else:
relpath = os.path.join(basename, relpath)
tries += 1
else:
# A correctly formatted diff from RBTools will have the vobtag in
# the filenames, but if its not there, we don't want to infinitely
# loop trying to find it.
logger.debug('Failed to find vobtag prefix in filename %s, '
'aborting',
filename)
logger.debug('relpath: %s', relpath)
if revision:
relpath = relpath + b'@@' + revision
return relpath
class ClearCaseDynamicViewClient(object):
"""A client for ClearCase dynamic views."""
def __init__(self, path):
"""Initialize the client.
Args:
path (unicode):
The path of the view.
"""
self.path = path
def cat_file(self, extended_path):
"""Return the contents of a file at a given revision.
Args:
extended_path (unicode):
The file to fetch. This includes revision information.
Returns:
bytes:
The contents of the file.
"""
# As we are in a dynamic view, we can use the extended pathname to
# access the file directly.
with open(extended_path, 'rb') as f:
return f.read()
def list_dir(self, extended_path):
"""Return a directory listing of the given path.
Args:
extended_path (unicode):
The path to the directory. This includes revision information.
Returns:
unicode:
The contents of the given directory.
"""
# As we are in a dynamic view, we can use the extended pathname to
# access the directory directly.
return ''.join([
'%s\n' % s
for s in sorted(os.listdir(extended_path))
])
class ClearCaseSnapshotViewClient(object):
"""A client for ClearCase snapshot views."""
def __init__(self, path):
"""Initialize the client.
Args:
path (unicode):
The path of the view.
"""
self.path = path
def cat_file(self, extended_path):
"""Return the contents of a file at a given revision.
Args:
extended_path (unicode):
The file to fetch. This includes revision information.
Returns:
bytes:
The contents of the file.
Raises:
reviewboard.scmtools.errors.FileNotFoundError:
The given ``extended_path`` did not match a valid element.
"""
# In a snapshot view, we cannot directly access the file. Use cleartool
# to pull the desired revision into a temp file.
temp = tempfile.NamedTemporaryFile()
# Close and delete the existing file so we can write to it.
temp.close()
try:
ClearCaseTool.run_cleartool(
['get', '-to', temp.name, extended_path])
except SCMError:
raise FileNotFoundError(extended_path)
try:
with open(temp.name, 'rb') as f:
return f.read()
except Exception:
raise FileNotFoundError(extended_path)
finally:
try:
os.unlink(temp.name)
except Exception:
pass
| mit | 54ec6dc2268accbda7d40767bb9234dc | 31.795393 | 79 | 0.564682 | 4.568328 | false | false | false | false |
reviewboard/reviewboard | reviewboard/integrations/tests/test_views.py | 1 | 1869 | from django.test import RequestFactory
from reviewboard.integrations.base import Integration, get_integration_manager
from reviewboard.integrations.models import IntegrationConfig
from reviewboard.integrations.views import AdminIntegrationConfigFormView
from reviewboard.site.models import LocalSite
from reviewboard.testing.testcase import TestCase
class MyIntegration(Integration):
pass
class AdminIntegrationConfigFormViewTests(TestCase):
"""Unit tests for AdminIntegrationConfigFormView."""
def setUp(self):
super(AdminIntegrationConfigFormViewTests, self).setUp()
self.integration = MyIntegration(get_integration_manager())
self.config = IntegrationConfig()
self.request = RequestFactory().request()
# NOTE: integration and config are normally set in dispatch(), but
# we're not calling into all that, so we're taking advantage of
# the fact that Django's class-based generic views will set any
# attribute passed in during construction.
self.view = AdminIntegrationConfigFormView(
request=self.request,
integration=self.integration,
config=self.config)
def test_get_form_kwargs(self):
"""Testing AdminIntegrationConfigFormView.get_form_kwargs"""
form_kwargs = self.view.get_form_kwargs()
self.assertIsNone(form_kwargs['limit_to_local_site'])
def test_get_form_kwargs_with_local_site(self):
"""Testing AdminIntegrationConfigFormView.get_form_kwargs with
LocalSite
"""
# This is normally set by LocalSiteMiddleware.
local_site = LocalSite.objects.create(name='local-site-1')
self.request.local_site = local_site
form_kwargs = self.view.get_form_kwargs()
self.assertEqual(form_kwargs['limit_to_local_site'], local_site)
| mit | 677c8c46da5945143b50659ce5b75169 | 37.9375 | 78 | 0.708935 | 4.408019 | false | true | false | false |
reviewboard/reviewboard | reviewboard/admin/tests/test_lexers_mapping_widget.py | 1 | 4389 | """Unit tests for reviewboard.admin.form_widgets.LexersMappingWidget."""
from django import forms
from django.utils.html import escape, format_html
from pygments.lexers import get_all_lexers
from reviewboard.admin.form_widgets import LexersMappingWidget
from reviewboard.testing.testcase import TestCase
class TestForm(forms.Form):
"""A Test Form with a field that contains a LexersMappingWidget."""
my_mapping_field = forms.CharField(
label=('Lexer Mapping'),
required=False,
widget=LexersMappingWidget())
class LexersMappingWidgetTests(TestCase):
"""Unit tests for LexersMappingWidget."""
def test_render_empty(self):
"""Testing LexersMappingWidget.render with no initial data"""
my_form = TestForm()
html = my_form.fields['my_mapping_field'].widget.render(
'Lexer Mapping',
(),
{'id': 'lexer-mapping'})
correct_html_parts = [
'<input type="text" name="Lexer Mapping_0" id="lexer-mapping_0">',
'<select name="Lexer Mapping_1" id="lexer-mapping_1">'
]
for lex in get_all_lexers():
lex_name = escape(lex[0])
correct_html_parts.append(format_html(
'<option value="{}">{}</option>', lex_name, lex_name))
correct_html_parts.append('</select>')
correct_html = ''.join(correct_html_parts)
self.assertHTMLEqual(correct_html, html)
def test_render_with_data(self):
"""Testing LexersMappingWidget.render with initial data"""
my_form = TestForm()
html = my_form.fields['my_mapping_field'].widget.render(
'Lexer Mapping',
('.py', 'Python'),
{'id': 'lexer-mapping'})
correct_html_parts = [
'<input type="text" name="Lexer Mapping_0"',
'value=".py" id="lexer-mapping_0">',
'<select name="Lexer Mapping_1" id="lexer-mapping_1">'
]
for lex in get_all_lexers():
lex_name = escape(lex[0])
if lex_name == 'Python':
correct_html_parts.append(format_html(
'<option value="{}" selected>{}</option>', lex_name,
lex_name))
else:
correct_html_parts.append(format_html(
'<option value="{}">{}</option>', lex_name, lex_name))
correct_html_parts.append('</select>')
correct_html = ''.join(correct_html_parts)
self.assertHTMLEqual(correct_html, html)
def test_value_from_datadict(self):
"""Testing LexersMappingWidget.value_from_datadict"""
my_form = TestForm()
value = (
my_form.fields['my_mapping_field']
.widget
.value_from_datadict(
{'mapping_0': '.py',
'mapping_1': 'Python'},
{},
'mapping'))
self.assertEqual(value, ('.py', 'Python'))
def test_value_from_datadict_with_no_data(self):
"""Testing LexersMappingWidget.value_from_datadict with no data"""
my_form = TestForm()
value = (
my_form.fields['my_mapping_field']
.widget
.value_from_datadict(
{'mapping_0': '',
'mapping_1': ''},
{},
'mapping'))
self.assertEqual(value, ('', ''))
def test_value_from_datadict_with_missing_data(self):
"""Testing LexersMappingWidget.value_from_datadict with missing data"""
my_form = TestForm()
value = (
my_form.fields['my_mapping_field']
.widget
.value_from_datadict(
{},
{},
'mapping'))
self.assertEqual(value, (None, None))
def test_decompress(self):
"""Testing LexersMappingWidget.decompress"""
my_form = TestForm()
value = (
my_form.fields['my_mapping_field']
.widget
.decompress(('.py', 'Python')))
self.assertEqual(value, ['.py', 'Python'])
def test_decompress_with_no_data(self):
"""Testing LexersMappingWidget.decompress with no data"""
my_form = TestForm()
value = (
my_form.fields['my_mapping_field']
.widget
.decompress(()))
self.assertEqual(value, [None, None])
| mit | 17010beb3ff1945274a1023803375e12 | 32.761538 | 79 | 0.546822 | 4.082791 | false | true | false | false |
reviewboard/reviewboard | reviewboard/diffviewer/models/raw_file_diff_data.py | 1 | 3478 | """RawFileDiffData model definition."""
import bz2
import logging
from django.db import models
from django.utils.translation import gettext_lazy as _
from djblets.db.fields import JSONField
from reviewboard.diffviewer.errors import DiffParserError
from reviewboard.diffviewer.managers import RawFileDiffDataManager
logger = logging.getLogger(__name__)
class RawFileDiffData(models.Model):
"""Stores raw diff data as binary content in the database.
This is the class used in Review Board 2.5+ to store diff content.
Unlike in previous versions, the content is not base64-encoded. Instead,
it is stored either as bzip2-compressed data (if the resulting
compressed data is smaller than the raw data), or as the raw data itself.
"""
COMPRESSION_BZIP2 = 'B'
COMPRESSION_CHOICES = (
(COMPRESSION_BZIP2, _('BZip2-compressed')),
)
binary_hash = models.CharField(_("hash"), max_length=40, unique=True)
binary = models.BinaryField()
compression = models.CharField(max_length=1, choices=COMPRESSION_CHOICES,
null=True, blank=True)
extra_data = JSONField(null=True)
objects = RawFileDiffDataManager()
@property
def content(self):
"""Return the content of the diff.
The content will be uncompressed (if necessary) and returned as the
raw set of bytes originally uploaded.
"""
if self.compression == self.COMPRESSION_BZIP2:
return bz2.decompress(self.binary)
elif self.compression is None:
return bytes(self.binary)
else:
raise NotImplementedError(
'Unsupported compression method %s for RawFileDiffData %s'
% (self.compression, self.pk))
@property
def insert_count(self):
return self.extra_data.get('insert_count')
@insert_count.setter
def insert_count(self, value):
self.extra_data['insert_count'] = value
@property
def delete_count(self):
return self.extra_data.get('delete_count')
@delete_count.setter
def delete_count(self, value):
self.extra_data['delete_count'] = value
def recalculate_line_counts(self, tool):
"""Recalculates the insert_count and delete_count values.
This will attempt to re-parse the stored diff and fetch the
line counts through the parser.
"""
logger.debug('Recalculating insert/delete line counts on '
'RawFileDiffData %s',
self.pk)
try:
files = tool.get_parser(self.content).parse()
if len(files) != 1:
raise DiffParserError(
'Got wrong number of files (%d)' % len(files))
except DiffParserError as e:
logger.error('Failed to correctly parse stored diff data in '
'RawFileDiffData ID %s when trying to get '
'insert/delete line counts: %s',
self.pk, e)
else:
file_info = files[0]
self.insert_count = file_info.insert_count
self.delete_count = file_info.delete_count
if self.pk:
self.save(update_fields=['extra_data'])
class Meta:
app_label = 'diffviewer'
db_table = 'diffviewer_rawfilediffdata'
verbose_name = _('Raw File Diff Data')
verbose_name_plural = _('Raw File Diff Data Blobs')
| mit | e636d73082d490c8c6fa6faa0149e11b | 32.12381 | 77 | 0.620759 | 4.246642 | false | false | false | false |
reviewboard/reviewboard | reviewboard/reviews/management/commands/fill-database.py | 1 | 16212 | import os
import random
import string
import sys
from django import db
from django.contrib.auth.models import User
from django.core.files import File
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from reviewboard.accounts.models import Profile
from reviewboard.reviews.forms import UploadDiffForm
from reviewboard.diffviewer.models import DiffSetHistory
from reviewboard.reviews.models import ReviewRequest, Review, Comment
from reviewboard.scmtools import scmtools_registry
from reviewboard.scmtools.models import Repository, Tool
NORMAL = 1
DESCRIPTION_SIZE = 100
SUMMARY_SIZE = 6
LOREM_VOCAB = [
'Lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'Nullam', 'quis', 'erat', 'libero.', 'Ut', 'vel', 'velit', 'augue, ',
'risus.', 'Curabitur', 'dignissim', 'luctus', 'dui, ', 'et',
'tristique', 'id.', 'Etiam', 'blandit', 'adipiscing', 'molestie.',
'libero', 'eget', 'lacus', 'adipiscing', 'aliquet', 'ut', 'eget',
'urna', 'dui', 'auctor', 'id', 'varius', 'eget', 'consectetur',
'Sed', 'ornare', 'fermentum', 'erat', 'ut', 'consectetur', 'diam',
'in.', 'Aliquam', 'eleifend', 'egestas', 'erat', 'nec', 'semper.',
'a', 'mi', 'hendrerit', 'vestibulum', 'ut', 'vehicula', 'turpis.',
'habitant', 'morbi', 'tristique', 'senectus', 'et', 'netus', 'et',
'fames', 'ac', 'turpis', 'egestas.', 'Vestibulum', 'purus', 'odio',
'quis', 'consequat', 'non, ', 'vehicula', 'nec', 'ligula.', 'In',
'ipsum', 'in', 'volutpat', 'ipsum.', 'Morbi', 'aliquam', 'velit',
'molestie', 'suscipit.', 'Morbi', 'dapibus', 'nibh', 'vel',
'justo', 'nibh', 'facilisis', 'tortor, ', 'sit', 'amet', 'dictum',
'amet', 'arcu.', 'Quisque', 'ultricies', 'justo', 'non', 'neque',
'nibh', 'tincidunt.', 'Curabitur', 'sit', 'amet', 'sem', 'quis',
'vulputate.', 'Mauris', 'a', 'lorem', 'mi.', 'Donec', 'dolor',
'interdum', 'eu', 'scelerisque', 'vel', 'massa.', 'Vestibulum',
'risus', 'vel', 'ipsum', 'suscipit', 'laoreet.', 'Proin', 'congue',
'blandit.', 'Aenean', 'aliquet', 'auctor', 'nibh', 'sit', 'amet',
'Vestibulum', 'ante', 'ipsum', 'primis', 'in', 'faucibus', 'orci',
'posuere', 'cubilia', 'Curae;', 'Donec', 'lacinia', 'tincidunt',
'facilisis', 'nisl', 'eu', 'fermentum.', 'Ut', 'nec', 'laoreet',
'magna', 'egestas', 'nulla', 'pharetra', 'vel', 'egestas', 'tellus',
'Pellentesque', 'sed', 'pharetra', 'orci.', 'Morbi', 'eleifend, ',
'interdum', 'placerat,', 'mi', 'dolor', 'mollis', 'libero',
'quam', 'posuere', 'nisl.', 'Vivamus', 'facilisis', 'aliquam',
'condimentum', 'pulvinar', 'egestas.', 'Lorem', 'ipsum', 'dolor',
'consectetur', 'adipiscing', 'elit.', 'In', 'hac', 'habitasse',
'Aenean', 'blandit', 'lectus', 'et', 'dui', 'tincidunt', 'cursus',
'Suspendisse', 'ipsum', 'dui, ', 'accumsan', 'eget', 'imperdiet',
'est.', 'Integer', 'porta, ', 'ante', 'ac', 'commodo', 'faucibus',
'molestie', 'risus, ', 'a', 'imperdiet', 'eros', 'neque', 'ac',
'nisi', 'leo', 'pretium', 'congue', 'eget', 'quis', 'arcu.', 'Cras'
]
NAMES = [
'Aaron', 'Abbey', 'Adan', 'Adelle', 'Agustin', 'Alan', 'Aleshia',
'Alexia', 'Anderson', 'Ashely', 'Barbara', 'Belen', 'Bernardo',
'Bernie', 'Bethanie', 'Bev', 'Boyd', 'Brad', 'Bret', 'Caleb',
'Cammy', 'Candace', 'Carrol', 'Charlette', 'Charlie', 'Chelsea',
'Chester', 'Claude', 'Daisy', 'David', 'Delila', 'Devorah',
'Edwin', 'Elbert', 'Elisha', 'Elvis', 'Emmaline', 'Erin',
'Eugene', 'Fausto', 'Felix', 'Foster', 'Garrett', 'Garry',
'Garth', 'Gracie', 'Henry', 'Hertha', 'Holly', 'Homer',
'Ileana', 'Isabella', 'Jacalyn', 'Jaime', 'Jeff', 'Jefferey',
'Jefferson', 'Joie', 'Kanesha', 'Kassandra', 'Kirsten', 'Kymberly',
'Lashanda', 'Lean', 'Lonnie', 'Luis', 'Malena', 'Marci', 'Margarett',
'Marvel', 'Marvin', 'Mel', 'Melissia', 'Morton', 'Nickole', 'Nicky',
'Odette', 'Paige', 'Patricia', 'Porsche', 'Rashida', 'Raul',
'Renaldo', 'Rickie', 'Robbin', 'Russel', 'Sabine', 'Sabrina',
'Sacha', 'Sam', 'Sasha', 'Shandi', 'Sherly', 'Stacey', 'Stephania',
'Stuart', 'Talitha', 'Tanesha', 'Tena', 'Tobi', 'Tula', 'Valene',
'Veda', 'Vikki', 'Wanda', 'Wendie', 'Wendolyn', 'Wilda', 'Wiley',
'Willow', 'Yajaira', 'Yasmin', 'Yoshie', 'Zachariah', 'Zenia',
'Allbert', 'Amisano', 'Ammerman', 'Androsky', 'Arrowsmith',
'Bankowski', 'Bleakley', 'Boehringer', 'Brandstetter',
'Capehart', 'Charlesworth', 'Danforth', 'Debernardi',
'Delasancha', 'Denkins', 'Edmunson', 'Ernsberger', 'Faupel',
'Florence', 'Frisino', 'Gardner', 'Ghormley', 'Harrold',
'Hilty', 'Hopperstad', 'Hydrick', 'Jennelle', 'Massari',
'Solinski', 'Swisher', 'Talladino', 'Tatham', 'Thornhill',
'Ulabarro', 'Welander', 'Xander', 'Xavier', 'Xayas', 'Yagecic',
'Yagerita', 'Yamat', 'Ying', 'Yurek', 'Zaborski', 'Zeccardi',
'Zecchini', 'Zimerman', 'Zitzow', 'Zoroiwchak', 'Zullinger', 'Zyskowski'
]
class Command(BaseCommand):
help = 'Populates the database with the specified fields'
def add_arguments(self, parser):
"""Add arguments to the command.
Args:
parser (argparse.ArgumentParser):
The argument parser for the command.
"""
parser.add_argument(
'-u',
'--users',
type=int,
default=None,
dest='users',
help='The number of users to add')
parser.add_argument(
'--review-requests',
default=None,
dest='review_requests',
help='The number of review requests per user [min:max]')
parser.add_argument(
'--diffs',
default=None,
dest='diffs',
help='The number of diff per review request [min:max]')
parser.add_argument(
'--reviews',
default=None,
dest='reviews',
help='The number of reviews per diff [min:max]'),
parser.add_argument(
'--diff-comments',
default=None,
dest='diff_comments',
help='The number of comments per diff [min:max]'),
parser.add_argument(
'-p',
'--password',
default=None,
dest='password',
help='The login password for users created')
@transaction.atomic
def handle(self, users=None, review_requests=None, diffs=None,
reviews=None, diff_comments=None, password=None,
verbosity=NORMAL, **options):
num_of_requests = None
num_of_diffs = None
num_of_reviews = None
num_of_diff_comments = None
random.seed()
if review_requests:
num_of_requests = self.parse_command("review_requests",
review_requests)
# Setup repository.
repo_dir = os.path.abspath(
os.path.join(sys.argv[0], "..", "scmtools", "testdata",
"git_repo"))
# Throw exception on error so transaction reverts.
if not os.path.exists(repo_dir):
raise CommandError("No path to the repository")
scmtool = scmtools_registry.get_by_name('Git')
self.repository = Repository.objects.create(
name='Test Repository',
path=repo_dir,
tool=Tool.objects.get(name='Git'),
scmtool_id=scmtool.scmtool_id)
if diffs:
num_of_diffs = self.parse_command("diffs", diffs)
# Create the diff directory locations.
diff_dir_tmp = os.path.abspath(
os.path.join(sys.argv[0], "..", "reviews", "management",
"commands", "diffs"))
# Throw exception on error so transaction reverts.
if not os.path.exists(diff_dir_tmp):
raise CommandError("Diff dir does not exist")
diff_dir = diff_dir_tmp + '/' # Add trailing slash.
# Get a list of the appropriate files.
files = [f for f in os.listdir(diff_dir)
if f.endswith('.diff')]
# Check for any diffs in the files.
if len(files) == 0:
raise CommandError("No diff files in this directory")
if reviews:
num_of_reviews = self.parse_command("reviews", reviews)
if diff_comments:
num_of_diff_comments = self.parse_command("diff-comments",
diff_comments)
# Users is required for any other operation.
if not users:
raise CommandError("At least one user must be added")
# Start adding data to the database.
for i in range(1, users + 1):
new_user = User.objects.create(
username=self.rand_username(), # Avoids having to flush db.
first_name=random.choice(NAMES),
last_name=random.choice(NAMES),
email="test@example.com",
is_staff=False,
is_active=True,
is_superuser=False)
if password:
new_user.set_password(password)
new_user.save()
else:
new_user.set_password("test1")
new_user.save()
Profile.objects.create(
user=new_user,
first_time_setup_done=True,
collapsed_diffs=True,
wordwrapped_diffs=True,
syntax_highlighting=True,
show_closed=True)
# Review Requests.
req_val = self.pick_random_value(num_of_requests)
if int(verbosity) > NORMAL:
self.stdout.write("For user %s:%s" % (i, new_user.username))
self.stdout.write("=============================")
for j in range(0, req_val):
if int(verbosity) > NORMAL:
self.stdout.write("Request #%s:" % j)
review_request = ReviewRequest.objects.create(new_user, None)
review_request.public = True
review_request.summary = self.lorem_ipsum("summary")
review_request.description = self.lorem_ipsum("description")
review_request.shipit_count = 0
review_request.repository = self.repository
# Set the targeted reviewer to superuser or 1st defined.
if j == 0:
review_request.target_people.add(User.objects.get(pk=1))
review_request.save()
# Add the diffs if any to add.
diff_val = self.pick_random_value(num_of_diffs)
# If adding diffs add history.
if diff_val > 0:
diffset_history = DiffSetHistory.objects.create(
name='testDiffFile' + str(i))
diffset_history.save()
# Won't execute if diff_val is 0, ie: no diffs requested.
for k in range(0, diff_val):
if int(verbosity) > NORMAL:
self.stdout.write("%s:\tDiff #%s" % (i, k))
random_number = random.randint(0, len(files) - 1)
file_to_open = diff_dir + files[random_number]
f = open(file_to_open, 'r')
form = UploadDiffForm(review_request=review_request,
files={"path": File(f)})
if form.is_valid():
cur_diff = form.create(f, None, diffset_history)
review_request.diffset_history = diffset_history
review_request.save()
review_request.publish(new_user)
f.close()
# Add the reviews if any.
review_val = self.pick_random_value(num_of_reviews)
for l in range(0, review_val):
if int(verbosity) > NORMAL:
self.stdout.write("%s:%s:\t\tReview #%s:" %
(i, j, l))
reviews = Review.objects.create(
review_request=review_request,
user=new_user)
reviews.publish(new_user)
# Add comments if any.
comment_val = self.pick_random_value(
num_of_diff_comments)
for m in range(0, comment_val):
if int(verbosity) > NORMAL:
self.stdout.write("%s:%s:\t\t\tComments #%s" %
(i, j, m))
if m == 0:
file_diff = cur_diff.files.order_by('id')[0]
# Choose random lines to comment.
# Max lines: should be mod'd in future to read
# diff.
max_lines = 220
first_line = random.randrange(1, max_lines - 1)
remain_lines = max_lines - first_line
num_lines = random.randrange(1, remain_lines)
diff_comment = Comment.objects.create(
filediff=file_diff,
text="comment number %s" % (m + 1),
first_line=first_line,
num_lines=num_lines)
review_request.publish(new_user)
reviews.comments.add(diff_comment)
reviews.save()
reviews.publish(new_user)
db.reset_queries()
# No comments, so have previous layer clear queries.
if comment_val == 0:
db.reset_queries()
if review_val == 0:
db.reset_queries()
if diff_val == 0:
db.reset_queries()
if req_val == 0:
db.reset_queries()
# Generate output as users & data is created.
if req_val != 0:
self.stdout.write("user %s created with %s requests"
% (new_user.username, req_val))
else:
self.stdout.write("user %s created successfully"
% new_user.username)
def parse_command(self, com_arg, com_string):
"""Parse the values given in the command line."""
try:
return tuple((int(item.strip()) for item in com_string.split(':')))
except ValueError:
raise CommandError('You failed to provide "%s" with one or two '
'values of type int.\nExample: --%s=2:5'
% (com_arg, com_arg))
def rand_username(self):
"""Used to generate random usernames so no flushing needed."""
return ''.join(random.choice(string.ascii_lowercase)
for x in range(0, random.randrange(5, 9)))
def pick_random_value(self, value):
"""Pick a random value out of a range.
If the 'value' tuple is empty, this returns 0. If 'value' contains a
single number, this returns that number. Otherwise, this returns a
random number between the two given numbers.
"""
if not value:
return 0
if len(value) == 1:
return value[0]
return random.randrange(value[0], value[1])
def lorem_ipsum(self, ipsum_type):
"""Create some random text for summary/description."""
if ipsum_type == "description":
max_size = DESCRIPTION_SIZE
else:
max_size = SUMMARY_SIZE
return ' '.join(random.choice(LOREM_VOCAB)
for x in range(0, max_size))
| mit | 44785442dc14b0ac188f18e457071f28 | 41.439791 | 79 | 0.51098 | 3.601066 | false | false | false | false |
reviewboard/reviewboard | reviewboard/admin/templatetags/rbadmintags.py | 1 | 11584 | import re
from django import template
from django.contrib import messages
from django.contrib.admin.templatetags.admin_urls import (
add_preserved_filters,
admin_urlquote)
from django.contrib.auth.models import User
from django.template.context import RequestContext
from django.urls import reverse
from django.utils.safestring import mark_safe
from djblets.util.templatetags.djblets_js import json_dumps_items
from reviewboard import get_version_string
from reviewboard.admin.forms.change_form import ChangeFormFieldset
from reviewboard.hostingsvcs.models import HostingServiceAccount
from reviewboard.notifications.models import WebHookTarget
from reviewboard.oauth.models import Application
from reviewboard.reviews.models import DefaultReviewer, Group
from reviewboard.scmtools.models import Repository
from reviewboard.site.urlresolvers import local_site_reverse
register = template.Library()
@register.inclusion_tag('admin/subnav_item.html', takes_context=True)
def admin_subnav(context, url_name, name, icon=""):
"""Return an <li> containing a link to the desired setting tab."""
request = context.get('request')
url = local_site_reverse(url_name, request=request)
return RequestContext(
request, {
'url': url,
'name': name,
'current': request is not None and url == request.path,
'icon': icon,
})
@register.inclusion_tag('admin/sidebar.html', takes_context=True)
def admin_sidebar(context):
"""Render the admin sidebar.
This includes the configuration links and setting indicators.
"""
request = context.get('request')
request_context = {
'count_users': User.objects.count(),
'count_review_groups': Group.objects.count(),
'count_default_reviewers': DefaultReviewer.objects.count(),
'count_oauth_applications': Application.objects.count(),
'count_repository': Repository.objects.accessible(
request.user, visible_only=False).count(),
'count_webhooks': WebHookTarget.objects.count(),
'count_hosting_accounts': HostingServiceAccount.objects.count(),
'version': get_version_string(),
}
# We're precomputing URLs in here, rather than computing them in the
# template, because we need to always ensure that reverse() will be
# searching all available URL patterns and not just the ones bound to
# request.current_app.
#
# current_app gets set by AdminSite views, and if we're in an extension's
# AdminSite view, we'll fail to resolve these URLs from within the
# template. We don't have that problem if calling reverse() ourselves.
request_context.update({
'url_%s' % url_name: reverse('admin:%s' % url_name)
for url_name in ('auth_user_add',
'auth_user_changelist',
'hostingsvcs_hostingserviceaccount_add',
'hostingsvcs_hostingserviceaccount_changelist',
'notifications_webhooktarget_add',
'notifications_webhooktarget_changelist',
'oauth_application_add',
'oauth_application_changelist',
'reviews_defaultreviewer_add',
'reviews_defaultreviewer_changelist',
'reviews_group_add',
'reviews_group_changelist',
'scmtools_repository_add',
'scmtools_repository_changelist')
})
return RequestContext(request, request_context)
@register.simple_tag
def alert_css_classes_for_message(message):
"""Render the CSS classes for a rb-c-alert from a Django Message.
This helps to craft an alert that reflects the status of a
:py:class:`~django.contrib.messages.storage.base.Message`.
This will include a CSS modifier class reflecting the status of the
message and any extra tags defined on the message.
Args:
message (django.contrib.messages.storage.base.Message):
The message to render classes for.
Returns:
unicode:
A space-separated list of classes.
"""
status_class = {
messages.DEBUG: '-is-info',
messages.INFO: '-is-info',
messages.SUCCESS: '-is-success',
messages.WARNING: '-is-warning',
messages.ERROR: '-is-error',
}[message.level]
if message.extra_tags:
return '%s %s' % (status_class, message.extra_tags)
return status_class
@register.filter
def split_error_title_text(error):
"""Split an exception's text into a title and body text.
Args:
error (Exception):
The error containing text to split.
Returns:
tuple:
A tuple containing:
1. The title text.
2. The rest of the error message (or ``None``).
"""
return str(error).split('\n', 1)
@register.simple_tag()
def process_result_headers(result_headers):
"""Process a Django ChangeList's result headers to aid in rendering.
This will provide better information for our template so that we can
more effectively render a datagrid.
Args:
result_headers (list of dict):
The result headers to modify.
"""
class_attrib_re = re.compile(r'\s*class="([^"]+)"')
for header in result_headers:
m = class_attrib_re.match(header['class_attrib'])
if m:
class_value = m.groups(1)[0]
else:
class_value = ''
if class_value != 'action-checkbox-column':
class_value = 'has-label %s' % class_value
header['class_attrib'] = \
mark_safe(' class="datagrid-header %s"' % class_value)
if header['sortable'] and header['sort_priority'] > 0:
if header['ascending']:
sort_order = 'asc'
else:
sort_order = 'desc'
if header['sort_priority'] == 1:
sort_priority = 'primary'
else:
sort_priority = 'secondary'
header['sort_icon'] = 'datagrid-icon-sort-%s-%s' % (
sort_order, sort_priority)
return ''
@register.simple_tag(takes_context=True)
def changelist_js_model_attrs(context):
"""Return serialized JSON attributes for the RB.Admin.ChangeListPage model.
These will all be passed to the :js:class:`RB.Admin.ChangeListPage`
constructor.
Args:
context (django.template.Context):
The context for the page.
Returns:
django.utils.safestring.SafeText:
A string containing the JSON attributes for the page model.
"""
action_form = context.get('action_form')
cl = context['cl']
model_data = {
'modelName': cl.opts.verbose_name,
'modelNamePlural': cl.opts.verbose_name_plural,
}
if action_form is not None:
action_choices = action_form.fields['action'].choices
model_data['actions'] = [
{
'id': action_id,
'label': action_label,
}
for action_id, action_label in action_choices
if action_id
]
return json_dumps_items(model_data)
@register.inclusion_tag('admin/submit_line.html', takes_context=True)
def change_form_submit_buttons(context):
"""Return HTML for a change form's submit buttons.
This will compute the correct set of Save/Delete buttons, based on whether
this is rendering for a Django admin change form (taking into account
the object's state and user's permissions) or for any other type of form.
Args:
context (django.template.Context):
The context for the page.
Returns:
django.utils.safestring.SafeText:
A string containing the submit buttons.
"""
show_save = context.get('show_save', True)
delete_url = None
if 'change' in context:
change = context['change']
is_popup = context['is_popup']
show_delete = context.get('show_delete', True)
if is_popup:
show_delete = False
show_save_as_new = False
show_save_and_add_another = False
show_save_and_continue = False
else:
save_as = context['save_as']
opts = context['opts']
original = context['original']
show_delete = (
change and
context.get('show_delete', True) and
context['has_delete_permission'])
show_save_as_new = (
save_as and
change)
show_save_and_add_another = (
(not save_as or context['add']) and
context['has_add_permission'])
show_save_and_continue = (
context.get('show_save_and_continue', True) and
context['has_change_permission'])
if show_delete:
assert original is not None
request = context['request']
delete_urlname = '%s:%s_%s_delete' % (
request.current_app,
opts.app_label,
opts.model_name)
delete_url = add_preserved_filters(
context,
reverse(delete_urlname,
args=[admin_urlquote(original.pk)]))
else:
delete_url = context.get('delete_url', '#')
show_delete = context.get('show_delete', False)
show_save_as_new = context.get('show_save_as_new', False)
show_save_and_add_another = context.get('show_save_and_add_another',
False)
show_save_and_continue = context.get('show_save_and_continue', False)
return {
'delete_url': delete_url,
'show_delete_link': show_delete,
'show_save': show_save,
'show_save_and_add_another': show_save_and_add_another,
'show_save_and_continue': show_save_and_continue,
'show_save_as_new': show_save_as_new,
}
@register.filter
def change_form_fieldsets(admin_form):
"""Iterate through all fieldsets in an administration change form.
This will provide each field as a
:py:class:`~reviewboard.admin.forms.change_form.ChangeFormFieldset`.
Args:
admin_form (django.contrib.admin.helpers.AdminForm):
The administration form.
Yields:
reviewboard.admin.forms.change_form.ChangeFormFieldset:
Each fieldset in the form.
"""
form = admin_form.form
readonly_fields = admin_form.readonly_fields
model_admin = admin_form.model_admin
for name, options in admin_form.fieldsets:
yield ChangeFormFieldset(form=form,
name=name,
readonly_fields=readonly_fields,
model_admin=model_admin,
**options)
@register.simple_tag(takes_context=True)
def render_change_form_fieldset(context, fieldset):
"""Render a Change Form fieldset.
This will render a
:py:class:`~reviewboard.admin.forms.change_form.ChangeFormFieldset` to
HTML.
Args:
context (django.template.Context):
The current template context.
fieldset (reviewboard.admin.forms.change_form.ChangeFormFieldset):
The fieldset to render.
Returns:
django.utils.safestring.SafeText:
The resulting HTML for the fieldset.
"""
return fieldset.render(context)
| mit | d6b4bc3242cb9d8e82d6d1a8a55356b2 | 32.287356 | 79 | 0.607131 | 4.230825 | false | false | false | false |
reviewboard/reviewboard | reviewboard/webapi/resources/review_draft.py | 1 | 1253 | from django.core.exceptions import ObjectDoesNotExist
from djblets.webapi.decorators import webapi_login_required
from djblets.webapi.errors import DOES_NOT_EXIST
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.resources import resources
class ReviewDraftResource(WebAPIResource):
"""A redirecting resource that points to the current draft review."""
name = 'review_draft'
singleton = True
uri_name = 'draft'
@webapi_check_local_site
@webapi_login_required
def get(self, request, *args, **kwargs):
"""Returns an HTTP redirect to the current draft review."""
try:
review_request = \
resources.review_request.get_object(request, *args, **kwargs)
review = review_request.get_pending_review(request.user)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not review:
return DOES_NOT_EXIST
return 302, {}, {
'Location': self._build_redirect_with_args(
request,
resources.review.get_href(review, request, *args, **kwargs)),
}
review_draft_resource = ReviewDraftResource()
| mit | 5bdd5ada4e449122952d002ad0783095 | 32.864865 | 77 | 0.671987 | 4.135314 | false | false | false | false |
reviewboard/reviewboard | reviewboard/hostingsvcs/assembla.py | 1 | 7297 | from django import forms
from django.utils.translation import gettext_lazy as _
from reviewboard.admin.server import get_hostname
from reviewboard.hostingsvcs.forms import HostingServiceForm
from reviewboard.hostingsvcs.service import HostingService
from reviewboard.scmtools.crypto_utils import (decrypt_password,
encrypt_password)
class AssemblaForm(HostingServiceForm):
assembla_project_id = forms.CharField(
label=_('Project ID'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_(
"The project ID, as shown in the URL "
"(https://www.assembla.com/spaces/<b><project_id></b>), or "
"your Perforce repository's Depot Host."))
def save(self, repository):
"""Save the Assembla repository form.
This will force the Perforce host and ticket authentication settings
to values required for Assembla.
Args:
repository (reviewboard.scmtools.models.Repository):
The repository being saved.
"""
super(AssemblaForm, self).save(repository)
if repository.get_scmtool().name == 'Perforce':
project_id = self.cleaned_data['assembla_project_id']
client_name = Assembla.make_p4_client_name(project_id)
repository.extra_data.update({
'use_ticket_auth': True,
'p4_host': project_id,
'p4_client': client_name,
})
class Assembla(HostingService):
"""Hosting service support for Assembla.com.
Assembla is a hosting service that offers, amongst other features,
Perforce, Subversion, and Git repository support.
They do not have much of an API that we can take advantage of, so it's
impossible for us to support Git. However, Perforce and Subversion work.
"""
name = 'Assembla'
hosting_service_id = 'assembla'
needs_authorization = True
supports_bug_trackers = True
supports_repositories = True
supported_scmtools = ['Perforce', 'Subversion']
form = AssemblaForm
repository_fields = {
'Perforce': {
'path': 'perforce.assembla.com:1666',
},
'Subversion': {
'path': 'https://subversion.assembla.com/svn/'
'%(assembla_project_id)s/',
},
}
bug_tracker_field = (
'https://www.assembla.com/spaces/%(assembla_project_id)s/'
'tickets/%%s'
)
@classmethod
def make_p4_client_name(cls, project_id):
"""Return a new P4CLIENT value from the hostname and project ID.
The client name will consist of the Review Board server's hostname
and a sanitized version of the project ID.
Args:
project_id (unicode):
The project ID provided by Assembla. This is equivalent to the
P4HOST value for Perforce.
Returns:
unicode:
A new Perforce client name.
"""
return '%s-%s' % (get_hostname(), project_id.replace('/', '-'))
def check_repository(self, path, username, password, scmtool_class,
local_site_name, assembla_project_id=None,
*args, **kwargs):
"""Check the validity of a repository hosted on Assembla.
Perforce repositories are handled specially. The Assembla project ID
will be used as the Perforce host, which is needed to tell Assembla
which repository on the server to use.
Args:
path (unicode):
The repository path.
username (unicode):
The username used for authenticating.
password (unicode):
The password used for authenticating.
scmtool_class (type):
The SCMTool for the repository.
local_site_name (unicode):
The name of the Local Site, if any.
assembla_project_id (unicode):
The project ID for the Assembla team.
*args (tuple):
Additional arguments to pass to the superclass.
**kwargs (dict):
Additional keyword arguments to pass to the superclass.
"""
# We want to use the configured username and other information from
# the account.
username = self.account.username
password = self.get_password()
if scmtool_class.name == 'Perforce':
scmtool_class.check_repository(
path=path,
username=username,
password=password,
local_site_name=local_site_name,
p4_host=assembla_project_id,
p4_client=self.make_p4_client_name(assembla_project_id))
else:
super(Assembla, self).check_repository(
path=path,
username=username,
password=password,
local_site_name=local_site_name,
scmtool_class=scmtool_class,
**kwargs)
def authorize(self, username, password, *args, **kwargs):
"""Authorize the Assembla account.
For Assembla, we simply use the native SCMTool support, as there's
no useful API available. We just store the password encrypted, which
will be used by the SCMTool.
Args:
username (unicode):
The username for authentication.
password (unicode):
The password for authentication.
*args (tuple):
Additional arguments.
**kwargs (dict):
Additional keyword arguments.
"""
self.account.data['password'] = encrypt_password(password)
self.account.save()
def is_authorized(self):
"""Return if the account has a password set.
Returns:
bool:
``True`` if a password is set, or ``False`` if one has not yet
been set.
"""
return self.account.data.get('password') is not None
def get_password(self):
"""Return the password for this account.
This is needed for Perforce and Subversion.
Returns:
unicode:
The stored password for the account.
"""
return decrypt_password(self.account.data['password'])
@classmethod
def get_repository_fields(cls, tool_name=None, *args, **kwargs):
"""Return values for the fields in the repository form.
This forces the encoding value to "utf8" on Perforce, which is needed
by Assembla.
Args:
tool_name (unicode):
The name of the SCMTool for the repository.
*args (tuple):
Additional arguments.
**kwargs (dict):
Additional keyword arguments.
Returns:
dict:
The resulting repository field values.
"""
data = super(Assembla, cls).get_repository_fields(tool_name=tool_name,
*args, **kwargs)
if tool_name == 'Perforce':
data['encoding'] = 'utf8'
return data
| mit | 84b17b0f1434ed265fe38ad9d41a1f26 | 31.431111 | 78 | 0.573797 | 4.509889 | false | false | false | false |
reviewboard/reviewboard | reviewboard/webapi/resources/repository_user.py | 1 | 11412 | """API resource for managing a repository's user ACL.
Version Added:
4.0.11
"""
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_request_fields,
webapi_response_errors)
from djblets.webapi.errors import (DOES_NOT_EXIST, NOT_LOGGED_IN,
PERMISSION_DENIED)
from djblets.webapi.fields import BooleanFieldType, StringFieldType
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.errors import INVALID_USER
from reviewboard.webapi.resources import resources
from reviewboard.webapi.resources.user import UserResource
class RepositoryUserResource(UserResource):
"""Provides information on users who are allowed access to a repository.
Version Added:
4.0.11
"""
name = 'repository_user'
item_result_key = 'user'
list_result_key = 'users'
uri_name = 'users'
# We do not want the watched resource to be available under this resource
# as it will have the wrong URL and does not make sense as a sub-resource;
# we will be serializing a link to the user resource and it can be found
# from there.
item_child_resources = []
allowed_methods = ('GET', 'POST', 'DELETE')
policy_id = 'repository_user'
def get_queryset(self, request, *args, **kwargs):
"""Return a queryset for the repository users.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
*args (tuple):
Positional arguments passed through to the parent resource.
**kwargs (dict):
Keyword arguments passed through to the parent resource.
Returns:
django.db.models.query.QuerySet:
The queryset for the users.
"""
try:
repository = resources.repository.get_object(request, *args,
**kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
return repository.users.all()
def get_href_parent_ids(self, obj, **kwargs):
"""Return the href parent IDs for the object.
Args:
obj (django.contrib.auth.models.User):
The user.
**kwargs (dict):
Additional keyword arguments.
Returns:
dict:
The parent IDs to be used to determine the href of the resource.
"""
# Since we do not have a direct link to the model parent (the
# Repository.users field is a many-to-many field so we cannot use it
# because the reverse relation is not unique), we have to manually
# generate the parent IDs from the parent resource.
parent_id_key = self._parent_resource.uri_object_key
return {
parent_id_key: kwargs[parent_id_key],
}
def get_related_links(self, obj=None, request=None, *args, **kwargs):
"""Return the related links for the resource.
Args:
obj (django.contrib.auth.models.User, optional):
The user for which links are being generated.
request (django.http.HttpRequest, optional):
The current HTTP request.
*args (tuple):
Additional positional arguments.
**kwargs (dict):
Additional keyword arguments.
Returns:
dict:
The related links for the resource.
"""
links = super(RepositoryUserResource, self).get_related_links(
obj, request=request, *args, **kwargs)
# We only want the 'user' link when this is an item resource.
if self.uri_object_key in kwargs:
username = kwargs[self.uri_object_key]
links['user'] = {
'href': resources.user.get_item_url(username=username),
'method': 'GET',
}
return links
def get_serializer_for_object(self, obj):
"""Return the serializer for an object.
If the object is a :py:class:`~django.contrib.auth.models.User`
instance, we will serialize it (instead of the
:py:class:`~reviewboard.webapi.resources.user.UserResource` resource
so that the links will be correct. Otherwise, the POST and DELETE links
will be for the actual user instead of for this resource.
Args:
obj (django.db.models.base.Model):
The model being serialized.
Returns:
djblets.webapi.resources.base.WebAPIResource:
The resource that should be used to serialize the object.
"""
if isinstance(obj, User):
return self
return super(RepositoryUserResource, self).get_serializer_for_object(
obj)
def has_access_permissions(self, request, user, *args, **kwargs):
"""Return whether the item resource can be accessed.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
user (django.contrib.auth.models.User, unused):
The user in the resource item URL. This is unused because we
only care about repository-level access here.
*args (tuple):
Positional arguments to pass to the parent resource.
**kwargs (dict):
Keyword arguments to pass to the parent resource.
Returns:
bool:
Whether the current user can access the item resource.
"""
repository = resources.repository.get_object(request, *args, **kwargs)
return repository.is_mutable_by(request.user)
def has_list_access_permissions(self, request, *args, **kwargs):
"""Return whether the list resource can be accessed.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
*args (tuple):
Positional arguments to pass to the parent resource.
**kwargs (dict):
Keyword arguments to pass to the parent resource.
Returns:
bool:
Whether the current user can access the list resource.
"""
repository = resources.repository.get_object(request, *args, **kwargs)
return repository.is_mutable_by(request.user)
def has_modify_permissions(self, request, *args, **kwargs):
"""Return whether the resource can be modified.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
*args (tuple):
Positional arguments to pass to the parent resource.
**kwargs (dict):
Keyword arguments to pass to the parent resource.
Returns:
bool:
Whether the current user can modify the resource.
"""
repository = resources.repository.get_object(request, *args, **kwargs)
return repository.is_mutable_by(request.user)
def has_delete_permissions(self, request, *args, **kwargs):
"""Return whether the resource can be deleted.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
*args (tuple):
Positional arguments to pass to the parent resource.
**kwargs (dict):
Keyword arguments to pass to the parent resource.
Returns:
bool:
Whether the current user can delete the resource.
"""
repository = resources.repository.get_object(request, *args, **kwargs)
return repository.is_mutable_by(request.user)
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, INVALID_USER, NOT_LOGGED_IN,
PERMISSION_DENIED)
@webapi_request_fields(required={
'username': {
'type': StringFieldType,
'description': 'The user to add to the repository ACL.',
},
})
def create(self, request, username, local_site_name=None, *args, **kwargs):
"""Adds a user to the repository ACL."""
repo_resource = resources.repository
try:
repository = repo_resource.get_object(
request, local_site_name=local_site_name, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not repo_resource.has_modify_permissions(request, repository):
return self.get_no_access_error(request)
local_site = self._get_local_site(local_site_name)
try:
if local_site:
user = local_site.users.get(username=username)
else:
user = User.objects.get(username=username)
except ObjectDoesNotExist:
return INVALID_USER
repository.users.add(user)
return 201, {
self.item_result_key: user,
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
def delete(self, request, *args, **kwargs):
"""Removes a user from the repository ACL."""
repo_resource = resources.repository
try:
repository = repo_resource.get_object(request, *args, **kwargs)
user = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not repo_resource.has_modify_permissions(request, repository):
return self.get_no_access_error(request)
repository.users.remove(user)
return 204, {}
@webapi_check_local_site
@webapi_request_fields(optional={
'fullname': {
'type': BooleanFieldType,
'description': ''
},
'q': {
'type': StringFieldType,
'description': 'Limit the results to usernames starting with the '
'provided value. This is case-insensitive.',
},
})
@augment_method_from(UserResource)
def get_list(self, *args, **kwargs):
"""Retrieves the list of users belonging to a specific review group.
This includes only the users who have active accounts on the site.
Any account that has been disabled (for inactivity, spam reasons,
or anything else) will be excluded from the list.
The list of users can be filtered down using the ``q`` and
``fullname`` parameters.
Setting ``q`` to a value will by default limit the results to
usernames starting with that value. This is a case-insensitive
comparison.
If ``fullname`` is set to ``1``, the first and last names will also be
checked along with the username. ``fullname`` is ignored if ``q``
is not set.
For example, accessing ``/api/users/?q=bo&fullname=1`` will list
any users with a username, first name or last name starting with
``bo``.
"""
pass
repository_user_resource = RepositoryUserResource()
| mit | d1c6c1fdee628304c708c2537b8d021c | 33.27027 | 79 | 0.59972 | 4.570284 | false | false | false | false |
reviewboard/reviewboard | reviewboard/scmtools/errors.py | 1 | 7623 | from django.utils.encoding import force_str
from django.utils.translation import gettext as _
from reviewboard.ssh.errors import SSHAuthenticationError
class SCMError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class ChangeSetError(SCMError):
pass
class InvalidChangeNumberError(ChangeSetError):
def __init__(self):
ChangeSetError.__init__(self, None)
class ChangeNumberInUseError(ChangeSetError):
def __init__(self, review_request=None):
ChangeSetError.__init__(self, None)
self.review_request = review_request
class EmptyChangeSetError(ChangeSetError):
def __init__(self, changenum):
ChangeSetError.__init__(self, _('Changeset %s is empty') % changenum)
class InvalidRevisionFormatError(SCMError):
"""Indicates that a revision isn't in a recognizable format."""
def __init__(self, path, revision, detail=None):
"""Initialize the exception.
Args:
path (bytes or unicode):
The path the revision was for.
revision (bytes or unicode):
The revision that was invalid.
detail (unicode, optional):
Additional detail to display after the standard error message.
"""
path = force_str(path)
revision = force_str(revision)
msg = _("The revision '%(revision)s' for '%(path)s' isn't in a valid "
"format") % {
'revision': revision,
'path': path,
}
if detail:
msg += ': ' + detail
SCMError.__init__(self, msg)
self.path = path
self.revision = revision
self.detail = detail
class FileNotFoundError(SCMError):
"""An error indicating a file was not found in a repository.
Attributes:
base_commit_id (unicode):
The optional ID of the base commit the file and revision belonged
to.
context (reviewboard.scmtools.core.FileLookupContext, optional):
Extra context used to help look up the file.
Version Added:
4.0.5
detail (unicode):
Additional details for the error message.
path (unicode):
The path in the repository.
revision (reviewboard.scmtools.core.Revision or unicode):
The revision in the repository.
"""
def __init__(self, path, revision=None, detail=None, base_commit_id=None,
context=None):
"""Initialize the error.
Args:
path (unicode):
The path in the repository.
revision (reviewboard.scmtools.core.Revision or unicode, optional):
The revision in the repository.
detail (unicode, optional):
Additional details for the error message.
base_commit_id (unicode, optional):
The optional ID of the base commit the file and revision
belonged to.
context (reviewboard.scmtools.core.FileLookupContext, optional):
Extra context used to help look up the file.
Version Added:
4.0.5
"""
from reviewboard.scmtools.core import HEAD
if isinstance(path, bytes):
path = path.decode('utf-8', 'ignore')
if base_commit_id is None and context is not None:
base_commit_id = context.base_commit_id
if revision is None or revision == HEAD and base_commit_id is None:
msg = (_("The file '%s' could not be found in the repository")
% path)
elif base_commit_id is not None and base_commit_id != revision:
msg = _('The file "%(path)s" (revision %(revision)s, commit '
'%(base_commit_id)s) could not be found in the '
'repository') % {
'path': path,
'revision': revision,
'base_commit_id': base_commit_id,
}
else:
msg = _('The file "%(path)s" (revision %(revision)s) could not be '
'found in the repository') % {
'path': path,
'revision': revision,
}
if detail:
msg += ': ' + detail
Exception.__init__(self, msg)
self.revision = revision
self.base_commit_id = base_commit_id
self.context = context
self.path = path
self.detail = detail
class RepositoryNotFoundError(SCMError):
"""An error indicating that a given path is not a valid repository.
Version Changed:
4.0.11:
Added :py:attr:`form_field_id` and an equivalent argument to the
constructor.
Attributes:
form_field_id (unicode):
The ID of the form field that this error corresponds to.
This may be ``None`` if this error isn't about a specific field.
Version Added:
4.0.11
"""
def __init__(self, msg=None, form_field_id=None):
"""Initialize the error.
Version Changed:
4.0.11:
This now takes optional ``msg`` and ``form_field_id`` parameters.
Args:
msg (unicode, optional):
The optional custom message to display.
form_field_id (unicode, optional):
The optional ID of the form field that this error corresponds
to.
"""
super(RepositoryNotFoundError, self).__init__(
msg or
_('A repository was not found. Please check the configuration '
'to make sure the details are correct and that it allows '
'access using any credentials you may have provided.'))
self.form_field_id = form_field_id
class AuthenticationError(SSHAuthenticationError, SCMError):
"""An error representing a failed authentication for a repository.
This takes a list of authentication types that are allowed. These
are dependent on the backend, but are loosely based on SSH authentication
mechanisms. Primarily, we respond to "password" and "publickey".
This may also take the user's SSH key that was tried, if any.
"""
pass
class UnverifiedCertificateError(SCMError):
"""An error representing an unverified SSL certificate.
Attributes:
reviewboard.scmtools.certs.Certificate:
The certificate this error pertains to.
"""
def __init__(self, certificate):
"""Initialize the error message.
Args:
certificate (reviewboard.scmtools.certs.Certificate):
The certificate this error pertains to.
"""
info = []
if certificate.hostname:
info.append(_('hostname "%s"') % certificate.hostname)
if certificate.fingerprint:
info.append(_('fingerprint "%s"') % certificate.fingerprint)
if certificate and certificate.fingerprint:
msg = _(
'The SSL certificate for this repository (%s) was not '
'verified and might not be safe. This certificate needs to '
'be verified before the repository can be accessed.'
) % (', '.join(info))
else:
msg = _(
'The SSL certificate for this repository was not verified '
'and might not be safe. This certificate needs to be '
'verified before the repository can be accessed.'
)
super(SCMError, self).__init__(msg)
self.certificate = certificate
| mit | 73623cf0cfdbce6d6c34432c3c1e6b65 | 30.241803 | 79 | 0.578906 | 4.691077 | false | false | false | false |
reviewboard/reviewboard | reviewboard/webapi/resources/repository_group.py | 1 | 10388 | """API resource for managing a repository's group ACL.
Version Added:
4.0.11
"""
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from djblets.webapi.decorators import (webapi_login_required,
webapi_request_fields,
webapi_response_errors)
from djblets.webapi.errors import (DOES_NOT_EXIST, NOT_LOGGED_IN,
PERMISSION_DENIED)
from djblets.webapi.fields import StringFieldType
from reviewboard.reviews.models import Group
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.errors import INVALID_GROUP
from reviewboard.webapi.resources import resources
from reviewboard.webapi.resources.review_group import ReviewGroupResource
class RepositoryGroupResource(ReviewGroupResource):
"""Provides information on groups that are allowed access to a repository.
Version Added:
4.0.11
"""
name = 'repository_group'
item_result_key = 'group'
list_result_key = 'groups'
uri_name = 'groups'
mimetype_list_resource_name = 'repository-groups'
mimetype_item_resource_name = 'repository-group'
# We don't want any group child resources to be available under this
# resource, as they will have the wrong URLs, and do not make sense as
# sub-resources. We will be serializing a link to the authoritative group
# resource and children can be found from there.
item_child_resources = []
allowed_methods = ('GET', 'POST', 'DELETE')
policy_id = 'repository_group'
def get_queryset(self, request, *args, **kwargs):
"""Return a queryset for the repository users.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
*args (tuple):
Positional arguments passed through to the parent resource.
**kwargs (dict):
Keyword arguments passed through to the parent resource.
Returns:
django.db.models.query.QuerySet:
The queryset for the users.
"""
try:
repository = resources.repository.get_object(request, *args,
**kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
return repository.review_groups.all()
def get_href_parent_ids(self, obj, **kwargs):
"""Return the href parent IDs for the object.
Args:
obj (reviewboard.reviews.models.Group):
The review group.
**kwargs (dict):
Additional keyword arguments.
Returns:
dict:
The parent IDs to be used to determine the href of the resource.
"""
# Since we do not have a direct link to the model parent (the
# Repository.review_groups field is a many-to-many field so we cannot
# use it because the reverse relation is not unique), we have to
# manually generate the parent IDs from the parent resource.
parent_id_key = self._parent_resource.uri_object_key
return {
parent_id_key: kwargs[parent_id_key],
}
def get_related_links(self, obj=None, request=None, *args, **kwargs):
"""Return the related links for the resource.
Args:
obj (reviewboard.reviews.models.Group, optional):
The group for which links are being generated.
request (django.http.HttpRequest, optional):
The current HTTP request.
*args (tuple):
Additional positional arguments.
**kwargs (dict):
Additional keyword arguments.
Returns:
dict:
The related links for the resource.
"""
links = super(RepositoryGroupResource, self).get_related_links(
obj, request=request, *args, **kwargs)
# We only want the 'group' link when this is an item resource.
if self.uri_object_key in kwargs:
group_name = kwargs[self.uri_object_key]
links['group'] = {
'href': resources.review_group.get_item_url(
group_name=group_name),
'method': 'GET',
}
return links
def get_serializer_for_object(self, obj):
"""Return the serializer for an object.
If the object is a :py:class:`~reviewboard.reviews.models.Group`
instance, we will serialize it (instead of the
:py:class:`~reviewboard.webapi.resources.review_group.ReviewGroupResource`
resource so that the links will be correct. Otherwise, the POST and
DELETE links will be for the actual user instead of for this resource.
Args:
obj (django.db.models.base.Model):
The model being serialized.
Returns:
djblets.webapi.resources.base.WebAPIResource:
The resource that should be used to serialize the object.
"""
if isinstance(obj, Group):
return self
return super(RepositoryGroupResource, self).get_serializer_for_object(
obj)
def has_access_permissions(self, request, group, *args, **kwargs):
"""Return whether the item resource can be accessed.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
group (reviewboard.reviews.models.group.Group, unused):
The group in the resource item URL. This is unused because we
only care about repository-level access here.
*args (tuple):
Positional arguments to pass to the parent resource.
**kwargs (dict):
Keyword arguments to pass to the parent resource.
Returns:
bool:
Whether the current user can access the item resource.
"""
repository = resources.repository.get_object(request, *args, **kwargs)
return repository.is_mutable_by(request.user)
def has_list_access_permissions(self, request, *args, **kwargs):
"""Return whether the list resource can be accessed.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
*args (tuple):
Positional arguments to pass to the parent resource.
**kwargs (dict):
Keyword arguments to pass to the parent resource.
Returns:
bool:
Whether the current user can access the list resource.
"""
repository = resources.repository.get_object(request, *args, **kwargs)
return repository.is_mutable_by(request.user)
def has_modify_permissions(self, request, *args, **kwargs):
"""Return whether the resource can be modified.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
*args (tuple):
Positional arguments to pass to the parent resource.
**kwargs (dict):
Keyword arguments to pass to the parent resource.
Returns:
bool:
Whether the current user can modify the resource.
"""
repository = resources.repository.get_object(request, *args, **kwargs)
return repository.is_mutable_by(request.user)
def has_delete_permissions(self, request, *args, **kwargs):
"""Return whether the resource can be deleted.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
*args (tuple):
Positional arguments to pass to the parent resource.
**kwargs (dict):
Keyword arguments to pass to the parent resource.
Returns:
bool:
Whether the current user can delete the resource.
"""
repository = resources.repository.get_object(request, *args, **kwargs)
return repository.is_mutable_by(request.user)
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, INVALID_GROUP, NOT_LOGGED_IN,
PERMISSION_DENIED)
@webapi_request_fields(required={
'group_name': {
'type': StringFieldType,
'description': 'The group to add to the repository ACL.',
},
})
def create(self, request, group_name, local_site_name=None,
*args, **kwargs):
"""Adds a group to the repository ACL."""
repo_resource = resources.repository
try:
repository = repo_resource.get_object(
request, local_site_name=local_site_name, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not repo_resource.has_modify_permissions(request, repository):
return self.get_no_access_error(request)
local_site = self._get_local_site(local_site_name)
try:
if local_site:
group = local_site.groups.get(name=group_name,
invite_only=True)
else:
group = Group.objects.get(name=group_name,
invite_only=True)
except ObjectDoesNotExist:
return INVALID_GROUP
repository.review_groups.add(group)
return 201, {
self.item_result_key: group,
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
def delete(self, request, *args, **kwargs):
"""Removes a group from the repository ACL."""
repo_resource = resources.repository
try:
repository = repo_resource.get_object(request, *args, **kwargs)
group = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not repo_resource.has_modify_permissions(request, repository):
return self.get_no_access_error(request)
repository.review_groups.remove(group)
return 204, {}
repository_group_resource = RepositoryGroupResource()
| mit | e015c06878141ed12b158353e9c84921 | 33.511628 | 82 | 0.59819 | 4.610741 | false | false | false | false |
reviewboard/reviewboard | reviewboard/webapi/resources/user.py | 1 | 23642 | import logging
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.validators import validate_email
from django.db import IntegrityError, transaction
from django.db.models import Q
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_request_fields,
webapi_response_errors)
from djblets.webapi.errors import (DOES_NOT_EXIST, INVALID_FORM_DATA,
NOT_LOGGED_IN, PERMISSION_DENIED)
from djblets.webapi.fields import (BooleanFieldType,
DictFieldType,
StringFieldType)
from djblets.webapi.resources.user import UserResource as DjbletsUserResource
from reviewboard.accounts.backends import get_enabled_auth_backends
from reviewboard.accounts.errors import UserQueryError
from reviewboard.avatars import avatar_services
from reviewboard.site.urlresolvers import local_site_reverse
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.errors import USER_QUERY_ERROR
from reviewboard.webapi.resources import resources
logger = logging.getLogger(__name__)
class UserResource(WebAPIResource, DjbletsUserResource):
"""Creates and provides information on users.
If a user's profile is private, the fields ``email``, ``first_name``,
``last_name``, and ``fullname`` will be omitted for non-staff users.
"""
item_child_resources = [
resources.api_token,
resources.archived_review_request,
resources.muted_review_request,
resources.user_file_attachment,
resources.watched,
]
fields = dict({
'avatar_html': {
'type': DictFieldType,
'description': 'HTML for rendering the avatar at specified '
'sizes. This is only populated if using '
'``?render-avatars-at=...`` for GET requests or '
'``render_avatars_at=...`` for POST requests.',
'added_in': '3.0.14',
},
'avatar_url': {
'type': StringFieldType,
'description': 'The URL for an avatar representing the user, '
'if available.',
'added_in': '1.6.14',
'deprecated_in': '3.0',
},
'avatar_urls': {
'type': StringFieldType,
'description': 'The URLs for an avatar representing the user, '
'if available.',
'added_in': '3.0',
},
'is_active': {
'type': BooleanFieldType,
'description': 'Whether or not the user is active. Inactive users'
'are not able to log in or make changes to Review '
'Board.',
'added_in': '2.5.9',
},
}, **DjbletsUserResource.fields)
allowed_methods = ('GET', 'PUT', 'POST')
hidden_fields = ('email', 'first_name', 'last_name', 'fullname')
def get_queryset(self, request, local_site_name=None, *args, **kwargs):
search_q = request.GET.get('q', None)
include_inactive = \
request.GET.get('include-inactive', '0').lower() in ('1', 'true')
for backend in get_enabled_auth_backends():
try:
backend.populate_users(query=search_q,
request=request)
except Exception as e:
logger.exception('Error when calling populate_users for auth '
'backend %r: %s',
backend, e,
extra={'request': request})
local_site = self._get_local_site(local_site_name)
is_list = kwargs.get('is_list', False)
# When accessing individual users (not is_list) on public local sites,
# we allow accessing any username. This is so that the links on reviews
# and review requests from non-members won't be 404. The list is still
# restricted to members of the site to avoid leaking information.
if local_site and (is_list or not local_site.public):
query = local_site.users.all()
else:
query = self.model.objects.all()
if is_list and not include_inactive:
query = query.filter(is_active=True)
if search_q:
q = None
# Auth backends may have special naming conventions for users that
# they'd like to be represented in search. If any auth backends
# implement build_search_users_query(), prefer that over the
# built-in searching.
for backend in get_enabled_auth_backends():
try:
q = backend.build_search_users_query(query=search_q,
request=request)
except Exception as e:
logger.exception(
'Error when calling build_search_users_query for '
'auth backend %r: %s',
backend, e,
extra={'request': request})
if q:
break
if not q:
q = Q(username__istartswith=search_q)
if request.GET.get('fullname', None):
q = q | (Q(first_name__istartswith=search_q) |
Q(last_name__istartswith=search_q))
query = query.filter(q)
return query.extra(select={
'is_private': ('SELECT is_private FROM accounts_profile '
'WHERE accounts_profile.user_id = auth_user.id')
})
def serialize_object(self, obj, request=None, *args, **kwargs):
data = super(UserResource, self).serialize_object(
obj, request=request, *args, **kwargs)
if request:
# Hide user info from anonymous users and non-staff users (if
# his/her profile is private).
if not obj.is_profile_visible(request.user):
for field in self.hidden_fields:
try:
del data[field]
except KeyError:
# The caller may be using ?only-fields. We can ignore
# this.
pass
return data
def serialize_url_field(self, user, **kwargs):
return local_site_reverse('user', kwargs['request'],
kwargs={'username': user.username})
def serialize_avatar_html_field(self, user, request=None, **kwargs):
"""Serialize the avatar_html field.
Args:
user (django.contrib.auth.models.User):
The user the avatar is being serialized for.
request (django.http.HttpRequest, optional):
The HTTP request from the client.
**kwargs (dict, unused):
Additional keyword arguments.
Returns:
dict:
Dictionaries, mapping sizes to renders.
"""
if not avatar_services.avatars_enabled:
return None
renders = {}
# Look for both the GET and PUT/POST/PATCH versions of this value.
# If present, we'll render avatars at the sizes specified.
avatar_sizes = request.GET.get('render-avatars-at',
request.POST.get('render_avatars_at'))
if avatar_sizes:
norm_sizes = set()
for size in avatar_sizes.split(','):
try:
norm_sizes.add(int(size))
except ValueError:
# A non-integer value was passed in. Ignore it.
pass
avatar_sizes = norm_sizes
if avatar_sizes:
service = avatar_services.for_user(user)
if service:
for size in avatar_sizes:
try:
renders[str(size)] = \
service.render(request=request,
user=user,
size=size).strip()
except Exception as e:
logger.exception('Error rendering avatar at size %s '
'for user %s: %s',
size, user, e,
extra={'request': request})
return renders or None
def serialize_avatar_url_field(self, user, request=None, **kwargs):
urls = self.serialize_avatar_urls_field(user, request, **kwargs)
if urls:
return urls.get('1x')
return None
def serialize_avatar_urls_field(self, user, request=None, **kwargs):
if avatar_services.avatars_enabled:
service = avatar_services.for_user(user)
if service:
return service.get_avatar_urls(request, user, 48)
return {}
def has_access_permissions(self, *args, **kwargs):
return True
def has_modify_permissions(self, request, user, **kwargs):
"""Return whether the user has permissions to modify a user.
Users can only modify a user's information if it's the same user or
if it's an administrator or user with the ``auth.change_user``
permission making the modification.
Args:
request (django.http.HttpRequest):
The current HTTP request.
user (django.contrib.auth.models.User):
The user being modified.
*args (tuple, unused):
Additional arguments.
**kwargs (dict, unused):
Additional keyword arguments.
Returns:
bool:
Whether the user making the request has modify access for the
target user.
"""
return (request.user == user or
self.has_global_modify_permissions(request.user))
def has_global_modify_permissions(self, user):
"""Return whether the user has permissions to modify all other users.
This checks if the requesting user is an administrator or a special
user with the ``auth.change_user`` permission.
Args:
user (django.contrib.auth.models.User):
The user to check.
Returns:
bool:
Whether the user has full permissions to modify other users.
"""
return user.is_superuser or user.has_perm('auth.change_user')
@webapi_check_local_site
@webapi_response_errors(NOT_LOGGED_IN, PERMISSION_DENIED, DOES_NOT_EXIST,
USER_QUERY_ERROR)
@webapi_request_fields(
optional={
'fullname': {
'type': BooleanFieldType,
'description': 'Specifies whether ``q`` should also match '
'the beginning of the first name or last name. '
'Ignored if ``q`` is not set.',
},
'include-inactive': {
'type': BooleanFieldType,
'description': 'If set, users who are marked as inactive '
'(their accounts have been disabled) will be '
'included in the list.',
},
'render-avatars-at': {
'type': str,
'description': 'A comma-separated list of avatar pixel sizes '
'to render. Renders for each specified size '
'be available in the ``avatars_html`` '
'dictionary. If not provided, avatars will not '
'be rendered.',
},
'q': {
'type': StringFieldType,
'description': 'The string that the username (or the first '
'name or last name when using ``fullname``) '
'must start with in order to be included in '
'the list. This is case-insensitive.',
},
},
allow_unknown=True
)
@augment_method_from(WebAPIResource)
def get_list(self, *args, **kwargs):
"""Retrieves the list of users on the site.
This includes only the users who have active accounts on the site.
Any account that has been disabled (for inactivity, spam reasons,
or anything else) will be excluded from the list.
The list of users can be filtered down using the ``q`` and
``fullname`` parameters.
Setting ``q`` to a value will by default limit the results to
usernames starting with that value. This is a case-insensitive
comparison.
If ``fullname`` is set to ``1``, the first and last names will also be
checked along with the username. ``fullname`` is ignored if ``q``
is not set.
For example, accessing ``/api/users/?q=bo&fullname=1`` will list
any users with a username, first name or last name starting with
``bo``.
Inactive users will not be returned by default. However by providing
``?include-inactive=1`` they will be returned.
"""
try:
return super(UserResource, self).get_list(*args, **kwargs)
except UserQueryError as e:
return USER_QUERY_ERROR.with_message(e.msg)
@webapi_check_local_site
@webapi_request_fields(
optional={
'render-avatars-at': {
'type': str,
'description': 'A comma-separated list of avatar pixel sizes '
'to render. Renders for each specified size '
'be available in the ``avatars_html`` '
'dictionary. If not provided, avatars will not '
'be rendered.',
},
},
)
@augment_method_from(WebAPIResource)
def get(self, *args, **kwargs):
"""Retrieve information on a registered user.
This mainly returns some basic information (username, full name,
e-mail address) and links to that user's root Watched Items resource,
which is used for keeping track of the groups and review requests
that the user has "starred".
"""
pass
@webapi_login_required
@webapi_check_local_site
@webapi_response_errors(PERMISSION_DENIED, INVALID_FORM_DATA)
@webapi_request_fields(
required={
'username': {
'type': StringFieldType,
'description': 'The username of the user to create.',
},
'email': {
'type': StringFieldType,
'description': 'The e-mail address of the user to create.',
},
'password': {
'type': StringFieldType,
'description': 'The password of the user to create.',
}
},
optional={
'first_name': {
'type': StringFieldType,
'description': 'The first name of the user to create.',
},
'last_name': {
'type': StringFieldType,
'description': 'The last name of the user to create.',
},
'render_avatars_at': {
'type': str,
'description': 'A comma-separated list of avatar pixel sizes '
'to render. Renders for each specified size '
'be available in the ``avatars_html`` '
'dictionary. If not provided, avatars will not '
'be rendered.',
},
},
)
def create(self, request, username, email, password, first_name='',
last_name='', local_site=None, *args, **kwargs):
"""Create a new user.
The user will be allowed to authenticate into Review Board with the
given username and password.
Only administrators or those with the ``auth.add_user`` permission
will be able to create users.
This API cannot be used on :term:`Local Sites`.
"""
if (not request.user.is_superuser and
not request.user.has_perm('auth.add_user')):
return PERMISSION_DENIED.with_message(
'You do not have permission to create users.')
if local_site:
return PERMISSION_DENIED.with_message(
'This API is not available for local sites.')
try:
validate_email(email)
except ValidationError as e:
return INVALID_FORM_DATA, {
'fields': {
'email': e.messages,
},
}
try:
# We wrap this in a transaction.atomic block because attempting to
# create a user with a username that already exists will generate
# an IntegrityError and break the current transaction.
#
# Unit tests wrap each test case in a transaction.atomic block as
# well. If this is block is not here, the test case's transaction
# will break and cause errors during test teardown.
with transaction.atomic():
user = User.objects.create_user(username, email, password,
first_name=first_name,
last_name=last_name)
except IntegrityError:
return INVALID_FORM_DATA, {
'fields': {
'username': [
'A user with the requested username already exists.',
]
}
}
return 201, {
self.item_result_key: user,
}
@webapi_login_required
@webapi_check_local_site
@webapi_response_errors(DOES_NOT_EXIST, PERMISSION_DENIED,
INVALID_FORM_DATA)
@webapi_request_fields(
optional={
'email': {
'type': str,
'description': 'The e-mail address of the user to create.',
},
'first_name': {
'type': str,
'description': 'The first name of the user to create.',
},
'is_active': {
'type': bool,
'description': 'Whether the user should be allowed to log in '
'to Review Board.',
'added_in': '3.0.16',
},
'last_name': {
'type': str,
'description': 'The last name of the user to create.',
},
'render_avatars_at': {
'type': str,
'description': 'A comma-separated list of avatar pixel sizes '
'to render. Renders for each specified size '
'be available in the ``avatars_html`` '
'dictionary. If not provided, avatars will not '
'be rendered.',
},
},
)
def update(self, request, local_site=None, *args, **kwargs):
"""Update information on a user.
Users can update their own ``email``, ``first_name``, and ``last_name``
information.
Administrators or those with the ``auth.change_user`` permission can
update those along with ``is_active``. When setting ``is_active`` to
``False``, the user will not be able to log in through standard
credentials or API tokens. (Note that this does not delete their
password or API tokens. It simply blocks the ability to log in.)
.. note::
This API cannot be used on :term:`Local Sites`.
Version Added::
3.0.16
"""
if local_site:
return PERMISSION_DENIED.with_message(
'This API is not available for local sites.')
try:
user = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not self.has_modify_permissions(request, user):
return self.get_no_access_error(request)
has_global_modify_permissions = \
self.has_global_modify_permissions(request.user)
backend = get_enabled_auth_backends()[0]
updated_fields = set()
invalid_fields = {}
# Validate the fields that the user wants to set.
if 'email' in kwargs:
if backend.supports_change_email:
email = kwargs['email']
try:
validate_email(email)
except ValidationError as e:
invalid_fields['email'] = e.messages
else:
invalid_fields['email'] = [
'The configured auth backend does not allow e-mail '
'addresses to be changed.'
]
if not backend.supports_change_name:
for field in ('first_name', 'last_name'):
if field in kwargs:
invalid_fields[field] = [
'The configured auth backend does not allow names '
'to be changed.'
]
if 'is_active' in kwargs and not has_global_modify_permissions:
invalid_fields['is_active'] = [
'This field can only be set by administrators and users '
'with the auth.change_user permission.'
]
if invalid_fields:
return INVALID_FORM_DATA, {
'fields': invalid_fields,
}
# Set any affected fields.
for field in ('first_name', 'last_name', 'email', 'is_active'):
value = kwargs.get(field)
if value is not None:
old_value = getattr(user, field)
if old_value != value:
setattr(user, field, value)
updated_fields.add(field)
# Notify the auth backend, so any server-side state can be changed.
if updated_fields:
if ('first_name' in updated_fields or
'last_name' in updated_fields):
try:
backend.update_name(user)
except Exception as e:
logger.exception(
'Error when calling update_name for auth backend '
'%r for user ID %s: %s',
backend, user.pk, e,
extra={'request': request})
if 'email' in updated_fields:
try:
backend.update_email(user)
except Exception as e:
logger.exception(
'Error when calling update_email for auth backend '
'%r for user ID %s: %s',
backend, user.pk, e,
extra={'request': request})
user.save(update_fields=updated_fields)
return 200, {
self.item_result_key: user,
}
user_resource = UserResource()
| mit | eb4dc84961275db0cf88823640689688 | 37.317666 | 79 | 0.520684 | 4.787768 | false | false | false | false |
reviewboard/reviewboard | reviewboard/hostingsvcs/bugzilla.py | 1 | 2250 | import logging
from django import forms
from django.utils.translation import gettext_lazy as _
from reviewboard.hostingsvcs.bugtracker import BugTracker
from reviewboard.hostingsvcs.forms import HostingServiceForm
from reviewboard.hostingsvcs.service import HostingService
from reviewboard.admin.validation import validate_bug_tracker_base_hosting_url
logger = logging.getLogger(__name__)
class BugzillaForm(HostingServiceForm):
bugzilla_url = forms.CharField(
label=_('Bugzilla URL'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
validators=[validate_bug_tracker_base_hosting_url])
def clean_bugzilla_url(self):
return self.cleaned_data['bugzilla_url'].rstrip('/')
class Bugzilla(HostingService, BugTracker):
name = 'Bugzilla'
hosting_service_id = 'bugzilla'
form = BugzillaForm
bug_tracker_field = '%(bugzilla_url)s/show_bug.cgi?id=%%s'
supports_bug_trackers = True
def get_bug_info_uncached(self, repository, bug_id):
"""Get the bug info from the server."""
# This requires making two HTTP requests: one for the summary and
# status, and one to get the "first comment" (description).
bug_id = str(bug_id)
result = {
'summary': '',
'description': '',
'status': '',
}
url = '%s/rest/bug/%s' % (
repository.extra_data['bug_tracker-bugzilla_url'],
bug_id)
try:
rsp, headers = self.client.json_get(
'%s?include_fields=summary,status'
% url)
result['summary'] = rsp['bugs'][0]['summary']
result['status'] = rsp['bugs'][0]['status']
except Exception as e:
logger.warning('Unable to fetch bugzilla data from %s: %s',
url, e, exc_info=True)
try:
url += '/comment'
rsp, headers = self.client.json_get(url)
result['description'] = rsp['bugs'][bug_id]['comments'][0]['text']
except Exception as e:
logger.warning('Unable to fetch bugzilla data from %s: %s',
url, e, exc_info=True)
return result
| mit | f0d2c9fe3c317236e2354979d63b8b63 | 32.088235 | 78 | 0.597333 | 3.996448 | false | false | false | false |
reviewboard/reviewboard | reviewboard/webapi/resources/review_group_user.py | 1 | 9033 | from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_response_errors,
webapi_request_fields)
from djblets.webapi.errors import (DOES_NOT_EXIST, NOT_LOGGED_IN,
PERMISSION_DENIED)
from djblets.webapi.fields import BooleanFieldType, StringFieldType
from reviewboard.reviews.models import Group
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.errors import INVALID_USER
from reviewboard.webapi.resources import resources
from reviewboard.webapi.resources.user import UserResource
class ReviewGroupUserResource(UserResource):
"""Provides information on users that are members of a review group."""
name = 'review_group_user'
item_result_key = 'user'
list_result_key = 'users'
uri_name = 'users'
# We do not want the watched resource to be available under this resource
# as it will have the wrong URL and does not make sense as a sub-resource;
# we will be serializing a link to the user resource and it can be found
# from there.
item_child_resources = []
allowed_methods = ('GET', 'POST', 'DELETE')
policy_id = 'review_group_user'
def get_queryset(self, request, group_name, local_site_name=None,
*args, **kwargs):
group = Group.objects.get(name=group_name,
local_site__name=local_site_name)
return group.users.all()
def get_href_parent_ids(self, obj, **kwargs):
"""Return the href parent IDs for the object.
Args:
obj (django.contrib.auth.models.User):
The user.
**kwargs (dict):
Additional keyword arguments.
Returns:
dict:
The parent IDs to be used to determine the href of the resource.
"""
# Since we do not have a direct link to the model parent (the
# Group.users field is a many-to-many field so we cannot use it because
# the reverse relation is not unique), we have to manually generate the
# parent IDs from the parent resource.
parent_id_key = self._parent_resource.uri_object_key
return {
parent_id_key: kwargs[parent_id_key],
}
def get_related_links(self, obj=None, request=None, *args, **kwargs):
"""Return the related links for the resource.
Args:
obj (django.contrib.auth.models.User, optional):
The user for which links are being generated.
request (django.http.HttpRequest):
The current HTTP request.
*args (tuple):
Additional positional arguments.
**kwargs (dict):
Additional keyword arguments.
Returns:
dict:
The related links for the resource.
"""
links = super(ReviewGroupUserResource, self).get_related_links(
obj, request, *args, **kwargs)
# We only want the 'user' link when this is an item resource.
if self.uri_object_key in kwargs:
username = kwargs[self.uri_object_key]
links['user'] = {
'href': resources.user.get_item_url(username=username),
'method': 'GET',
}
return links
def get_serializer_for_object(self, obj):
"""Return the serializer for an object.
If the object is a :py:class:`~django.contrib.auth.models.User`
instance, we will serialize it (instead of the
:py:class:`~reviewboard.webapi.resources.user.UserResource` resource
so that the links will be correct. Otherwise, the POST and DELETE links
will be for the actual user instead of for this resource.
Args:
obj (django.db.models.base.Model):
The model being serialized.
Returns:
djblets.webapi.resources.base.WebAPIResource:
The resource that should be used to serialize the object.
"""
if isinstance(obj, User):
return self
return super(ReviewGroupUserResource, self).get_serializer_for_object(
obj)
def has_access_permissions(self, request, user, *args, **kwargs):
group = resources.review_group.get_object(request, *args, **kwargs)
return group.is_accessible_by(request.user)
def has_list_access_permissions(self, request, *args, **kwargs):
group = resources.review_group.get_object(request, *args, **kwargs)
return group.is_accessible_by(request.user)
def has_modify_permissions(self, request, group, username, local_site):
return (
resources.review_group.has_modify_permissions(request, group) or
(request.user.username == username and
group.is_accessible_by(request.user))
)
def has_delete_permissions(self, request, user, *args, **kwargs):
group = resources.review_group.get_object(request, *args, **kwargs)
return group.is_mutable_by(request.user)
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, INVALID_USER,
NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(required={
'username': {
'type': StringFieldType,
'description': 'The user to add to the group.',
'added_in': '1.6.14',
},
})
def create(self, request, username, *args, **kwargs):
"""Adds a user to a review group."""
group_resource = resources.review_group
try:
group = group_resource.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
local_site = self._get_local_site(kwargs.get('local_site_name', None))
if (not group_resource.has_access_permissions(request, group) or
not self.has_modify_permissions(request, group, username,
local_site)):
return self.get_no_access_error(request)
try:
if local_site:
user = local_site.users.get(username=username)
else:
user = User.objects.get(username=username)
except ObjectDoesNotExist:
return INVALID_USER
group.users.add(user)
return 201, {
self.item_result_key: user,
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, INVALID_USER,
NOT_LOGGED_IN, PERMISSION_DENIED)
def delete(self, request, *args, **kwargs):
"""Removes a user from a review group."""
group_resource = resources.review_group
try:
group = group_resource.get_object(request, *args, **kwargs)
user = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
local_site = self._get_local_site(kwargs.get('local_site_name', None))
if (not group_resource.has_access_permissions(request, group) or
not self.has_modify_permissions(request, group, user.username,
local_site)):
return self.get_no_access_error(request)
group.users.remove(user)
return 204, {}
@webapi_check_local_site
@webapi_request_fields(optional={
'fullname': {
'type': BooleanFieldType,
'description': ''
},
'q': {
'type': StringFieldType,
'description': 'Limit the results to usernames starting with the '
'provided value. This is case-insensitive.',
},
})
@augment_method_from(UserResource)
def get_list(self, *args, **kwargs):
"""Retrieves the list of users belonging to a specific review group.
This includes only the users who have active accounts on the site.
Any account that has been disabled (for inactivity, spam reasons,
or anything else) will be excluded from the list.
The list of users can be filtered down using the ``q`` and
``fullname`` parameters.
Setting ``q`` to a value will by default limit the results to
usernames starting with that value. This is a case-insensitive
comparison.
If ``fullname`` is set to ``1``, the first and last names will also be
checked along with the username. ``fullname`` is ignored if ``q``
is not set.
For example, accessing ``/api/users/?q=bo&fullname=1`` will list
any users with a username, first name or last name starting with
``bo``.
"""
pass
review_group_user_resource = ReviewGroupUserResource()
| mit | 5cb48f008fc32f10daaa30bfbc09507c | 36.020492 | 79 | 0.606 | 4.260849 | false | false | false | false |
exercism/python | exercises/practice/all-your-base/all_your_base_test.py | 2 | 3983 | import unittest
from all_your_base import (
rebase,
)
# Tests adapted from `problem-specifications//canonical-data.json`
class AllYourBaseTest(unittest.TestCase):
def test_single_bit_one_to_decimal(self):
self.assertEqual(rebase(2, [1], 10), [1])
def test_binary_to_single_decimal(self):
self.assertEqual(rebase(2, [1, 0, 1], 10), [5])
def test_single_decimal_to_binary(self):
self.assertEqual(rebase(10, [5], 2), [1, 0, 1])
def test_binary_to_multiple_decimal(self):
self.assertEqual(rebase(2, [1, 0, 1, 0, 1, 0], 10), [4, 2])
def test_decimal_to_binary(self):
self.assertEqual(rebase(10, [4, 2], 2), [1, 0, 1, 0, 1, 0])
def test_trinary_to_hexadecimal(self):
self.assertEqual(rebase(3, [1, 1, 2, 0], 16), [2, 10])
def test_hexadecimal_to_trinary(self):
self.assertEqual(rebase(16, [2, 10], 3), [1, 1, 2, 0])
def test_15_bit_integer(self):
self.assertEqual(rebase(97, [3, 46, 60], 73), [6, 10, 45])
def test_empty_list(self):
self.assertEqual(rebase(2, [], 10), [0])
def test_single_zero(self):
self.assertEqual(rebase(10, [0], 2), [0])
def test_multiple_zeros(self):
self.assertEqual(rebase(10, [0, 0, 0], 2), [0])
def test_leading_zeros(self):
self.assertEqual(rebase(7, [0, 6, 0], 10), [4, 2])
def test_input_base_is_one(self):
with self.assertRaises(ValueError) as err:
rebase(1, [0], 10)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "input base must be >= 2")
def test_input_base_is_zero(self):
with self.assertRaises(ValueError) as err:
rebase(0, [], 10)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "input base must be >= 2")
def test_input_base_is_negative(self):
with self.assertRaises(ValueError) as err:
rebase(-2, [1], 10)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "input base must be >= 2")
def test_negative_digit(self):
with self.assertRaises(ValueError) as err:
rebase(2, [1, -1, 1, 0, 1, 0], 10)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(
err.exception.args[0], "all digits must satisfy 0 <= d < input base"
)
def test_invalid_positive_digit(self):
with self.assertRaises(ValueError) as err:
rebase(2, [1, 2, 1, 0, 1, 0], 10)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(
err.exception.args[0], "all digits must satisfy 0 <= d < input base"
)
def test_output_base_is_one(self):
with self.assertRaises(ValueError) as err:
rebase(2, [1, 0, 1, 0, 1, 0], 1)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "output base must be >= 2")
def test_output_base_is_zero(self):
with self.assertRaises(ValueError) as err:
rebase(10, [7], 0)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "output base must be >= 2")
def test_output_base_is_negative(self):
with self.assertRaises(ValueError) as err:
rebase(2, [1], -7)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "output base must be >= 2")
def test_both_bases_are_negative(self):
with self.assertRaises(ValueError) as err:
rebase(-2, [1], -7)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "input base must be >= 2")
# Utility functions
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
if __name__ == "__main__":
unittest.main()
| mit | 897e22befe29d9096f4d65ad2c187bbf | 34.882883 | 80 | 0.609089 | 3.347059 | false | true | false | false |
exercism/python | exercises/practice/accumulate/accumulate_test.py | 8 | 1136 | import unittest
from accumulate import accumulate
class AccumulateTest(unittest.TestCase):
def test_empty_sequence(self):
self.assertEqual(accumulate([], lambda x: x / 2), [])
def test_pow(self):
self.assertEqual(
accumulate([1, 2, 3, 4, 5], lambda x: x * x), [1, 4, 9, 16, 25])
def test_divmod(self):
self.assertEqual(
accumulate([10, 17, 23], lambda x: divmod(x, 7)),
[(1, 3), (2, 3), (3, 2)])
def test_composition(self):
inp = [10, 17, 23]
self.assertEqual(
accumulate(
accumulate(inp, lambda x: divmod(x, 7)),
lambda x: 7 * x[0] + x[1]), inp)
def test_capitalize(self):
self.assertEqual(
accumulate(['hello', 'world'], str.upper), ['HELLO', 'WORLD'])
def test_recursive(self):
inp = ['a', 'b', 'c']
out = [['a1', 'a2', 'a3'], ['b1', 'b2', 'b3'], ['c1', 'c2', 'c3']]
self.assertEqual(
accumulate(
inp, lambda x: accumulate(list('123'), lambda y: x + y)), out)
if __name__ == '__main__':
unittest.main()
| mit | 403e58320f82487d428b48cb1d3118bb | 28.128205 | 78 | 0.503521 | 3.452888 | false | true | false | false |
exercism/python | bin/data.py | 2 | 9982 | from enum import Enum
from dataclasses import dataclass, asdict, fields
import dataclasses
from itertools import chain
import json
from pathlib import Path
import tomli
from typing import List, Any, Dict, Type
def _custom_dataclass_init(self, *args, **kwargs):
# print(self.__class__.__name__, "__init__")
names = [field.name for field in fields(self)]
used_names = set()
# Handle positional arguments
for value in args:
try:
name = names.pop(0)
except IndexError:
raise TypeError(f"__init__() given too many positional arguments")
# print(f'setting {k}={v}')
setattr(self, name, value)
used_names.add(name)
# Handle keyword arguments
for name, value in kwargs.items():
if name in names:
# print(f'setting {k}={v}')
setattr(self, name, value)
used_names.add(name)
elif name in used_names:
raise TypeError(f"__init__() got multiple values for argument '{name}'")
else:
raise TypeError(
f"Unrecognized field '{name}' for dataclass {self.__class__.__name__}."
"\nIf this field is valid, please add it to the dataclass in data.py."
"\nIf adding an object-type field, please create a new dataclass for it."
)
# Check for missing positional arguments
missing = [
f"'{field.name}'" for field in fields(self)
if isinstance(field.default, dataclasses._MISSING_TYPE) and field.name not in used_names
]
if len(missing) == 1:
raise TypeError(f"__init__() missing 1 required positional argument: {missing[0]}")
elif len(missing) == 2:
raise TypeError(f"__init__() missing 2 required positional arguments: {' and '.join(missing)}")
elif len(missing) != 0:
missing[-1] = f"and {missing[-1]}"
raise TypeError(f"__init__() missing {len(missing)} required positional arguments: {', '.join(missing)}")
# Run post init if available
if hasattr(self, "__post_init__"):
self.__post_init__()
@dataclass
class TrackStatus:
__init__ = _custom_dataclass_init
concept_exercises: bool = False
test_runner: bool = False
representer: bool = False
analyzer: bool = False
class IndentStyle(str, Enum):
Space = "space"
Tab = "tab"
@dataclass
class TestRunnerSettings:
average_run_time: float = -1
@dataclass
class EditorSettings:
__init__ = _custom_dataclass_init
indent_style: IndentStyle = IndentStyle.Space
indent_size: int = 4
ace_editor_language: str = "python"
highlightjs_language: str = "python"
def __post_init__(self):
if isinstance(self.indent_style, str):
self.indent_style = IndentStyle(self.indent_style)
class ExerciseStatus(str, Enum):
Active = "active"
WIP = "wip"
Beta = "beta"
Deprecated = "deprecated"
@dataclass
class ExerciseFiles:
__init__ = _custom_dataclass_init
solution: List[str]
test: List[str]
editor: List[str] = None
exemplar: List[str] = None
# practice exercises are different
example: List[str] = None
def __post_init__(self):
if self.exemplar is None:
if self.example is None:
raise ValueError(
"exercise config must have either files.exemplar or files.example"
)
else:
self.exemplar = self.example
delattr(self, "example")
elif self.example is not None:
raise ValueError(
"exercise config must have either files.exemplar or files.example, but not both"
)
@dataclass
class ExerciseConfig:
__init__ = _custom_dataclass_init
files: ExerciseFiles
authors: List[str] = None
forked_from: str = None
contributors: List[str] = None
language_versions: List[str] = None
test_runner: bool = True
source: str = None
source_url: str = None
blurb: str = None
icon: str = None
def __post_init__(self):
if isinstance(self.files, dict):
self.files = ExerciseFiles(**self.files)
for attr in ["authors", "contributors", "language_versions"]:
if getattr(self, attr) is None:
setattr(self, attr, [])
@classmethod
def load(cls, config_file: Path) -> "ExerciseConfig":
with config_file.open() as f:
return cls(**json.load(f))
@dataclass
class ExerciseInfo:
__init__ = _custom_dataclass_init
path: Path
slug: str
name: str
uuid: str
prerequisites: List[str]
type: str = "practice"
status: ExerciseStatus = ExerciseStatus.Active
# concept only
concepts: List[str] = None
# practice only
difficulty: int = 1
topics: List[str] = None
practices: List[str] = None
def __post_init__(self):
if self.concepts is None:
self.concepts = []
if self.topics is None:
self.topics = []
if self.practices is None:
self.practices = []
if isinstance(self.status, str):
self.status = ExerciseStatus(self.status)
@property
def solution_stub(self):
return next(
(
p
for p in self.path.glob("*.py")
if not p.name.endswith("_test.py") and p.name != "example.py"
),
None,
)
@property
def helper_file(self):
return next(self.path.glob("*_data.py"), None)
@property
def test_file(self):
return next(self.path.glob("*_test.py"), None)
@property
def meta_dir(self):
return self.path / ".meta"
@property
def exemplar_file(self):
if self.type == "concept":
return self.meta_dir / "exemplar.py"
return self.meta_dir / "example.py"
@property
def template_path(self):
return self.meta_dir / "template.j2"
@property
def config_file(self):
return self.meta_dir / "config.json"
def load_config(self) -> ExerciseConfig:
return ExerciseConfig.load(self.config_file)
@dataclass
class Exercises:
__init__ = _custom_dataclass_init
concept: List[ExerciseInfo]
practice: List[ExerciseInfo]
foregone: List[str] = None
def __post_init__(self):
if self.foregone is None:
self.foregone = []
for attr_name in ["concept", "practice"]:
base_path = Path("exercises") / attr_name
setattr(
self,
attr_name,
[
(
ExerciseInfo(path=(base_path / e["slug"]), type=attr_name, **e)
if isinstance(e, dict)
else e
)
for e in getattr(self, attr_name)
],
)
def all(self, status_filter={ExerciseStatus.Active, ExerciseStatus.Beta}):
return [
e for e in chain(self.concept, self.practice) if e.status in status_filter
]
@dataclass
class Concept:
__init__ = _custom_dataclass_init
uuid: str
slug: str
name: str
@dataclass
class Feature:
__init__ = _custom_dataclass_init
title: str
content: str
icon: str
@dataclass
class FilePatterns:
__init__ = _custom_dataclass_init
solution: List[str]
test: List[str]
example: List[str]
exemplar: List[str]
editor: List[str] = None
@dataclass
class Config:
__init__ = _custom_dataclass_init
language: str
slug: str
active: bool
status: TrackStatus
blurb: str
version: int
online_editor: EditorSettings
exercises: Exercises
concepts: List[Concept]
key_features: List[Feature] = None
tags: List[Any] = None
test_runner: TestRunnerSettings = None
files: FilePatterns = None
def __post_init__(self):
if isinstance(self.status, dict):
self.status = TrackStatus(**self.status)
if isinstance(self.online_editor, dict):
self.online_editor = EditorSettings(**self.online_editor)
if isinstance(self.test_runner, dict):
self.test_runner = TestRunnerSettings(**self.test_runner)
if isinstance(self.exercises, dict):
self.exercises = Exercises(**self.exercises)
if isinstance(self.files, dict):
self.files = FilePatterns(**self.files)
self.concepts = [
(Concept(**c) if isinstance(c, dict) else c) for c in self.concepts
]
if self.key_features is None:
self.key_features = []
if self.tags is None:
self.tags = []
@classmethod
def load(cls, path="config.json"):
try:
with Path(path).open() as f:
return cls(**json.load(f))
except IOError:
print(f"FAIL: {path} file not found")
raise SystemExit(1)
except TypeError as ex:
print(f"FAIL: {ex}")
raise SystemExit(1)
@dataclass
class TestCaseTOML:
__init__ = _custom_dataclass_init
uuid: str
description: str
include: bool = True
comment: str = ''
@dataclass
class TestsTOML:
__init__ = _custom_dataclass_init
cases: Dict[str, TestCaseTOML]
@classmethod
def load(cls, toml_path: Path):
with toml_path.open("rb") as f:
data = tomli.load(f)
return cls({uuid: TestCaseTOML(uuid, *opts) for
uuid, opts in
data.items() if
opts.get('include', None) is not False})
if __name__ == "__main__":
class CustomEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Path):
return str(obj)
return json.JSONEncoder.default(self, obj)
config = Config.load()
print(json.dumps(asdict(config), cls=CustomEncoder, indent=2))
| mit | fc7d70026b9891116e9314e5230db917 | 25.68984 | 113 | 0.577039 | 3.840708 | false | false | false | false |
exercism/python | exercises/concept/cater-waiter/.meta/exemplar.py | 2 | 4882 | """Functions for compiling dishes and ingredients for a catering company."""
from sets_categories_data import (VEGAN,
VEGETARIAN,
KETO,
PALEO,
OMNIVORE,
ALCOHOLS,
SPECIAL_INGREDIENTS)
def clean_ingredients(dish_name, dish_ingredients):
"""Remove duplicates from `dish_ingredients`.
:param dish_name: str - containing the dish name.
:param dish_ingredients: list - dish ingredients.
:return: tuple - containing (dish_name, ingredient set).
This function should return a `tuple` with the name of the dish as the first item,
followed by the de-duped `set` of ingredients as the second item.
"""
return dish_name, set(dish_ingredients)
def check_drinks(drink_name, drink_ingredients):
"""Append "Cocktail" (alcohol) or "Mocktail" (no alcohol) to `drink_name`, based on `drink_ingredients`.
:param drink_name: str - name of the drink.
:param drink_ingredients: list - ingredients in the drink.
:return: str - drink_name appended with "Mocktail" or "Cocktail".
The function should return the name of the drink followed by "Mocktail" (non-alcoholic) and drink
name followed by "Cocktail" (includes alcohol).
"""
if not ALCOHOLS.isdisjoint(drink_ingredients):
return drink_name + ' Cocktail'
return drink_name + ' Mocktail'
def categorize_dish(dish_name, dish_ingredients):
"""Categorize `dish_name` based on `dish_ingredients`.
:param dish_name: str - dish to be categorized.
:param dish_ingredients: list - ingredients for the dish.
:return: str - the dish name appended with ": <CATEGORY>".
This function should return a string with the `dish name: <CATEGORY>` (which meal category the dish belongs to).
`<CATEGORY>` can be any one of (VEGAN, VEGETARIAN, PALEO, KETO, or OMNIVORE).
All dishes will "fit" into one of the categories imported from `sets_categories_data.py`
"""
categories = ((VEGAN, 'VEGAN'),
(VEGETARIAN, 'VEGETARIAN'),
(KETO, 'KETO'),
(PALEO, 'PALEO'),
(OMNIVORE, 'OMNIVORE'))
for category in categories:
if set(dish_ingredients) <= category[0]:
return dish_name + ': ' + category[1]
return None
def tag_special_ingredients(dish):
"""Compare `dish` ingredients to `SPECIAL_INGREDIENTS`.
:param dish: tuple - of (dish name, list of dish ingredients).
:return: tuple - containing (dish name, dish special ingredients).
Return the dish name followed by the `set` of ingredients that require a special note on the dish description.
For the purposes of this exercise, all allergens or special ingredients that need to be tracked are in the
SPECIAL_INGREDIENTS constant imported from `sets_categories_data.py`.
"""
return dish[0], (SPECIAL_INGREDIENTS & set(dish[1]))
def compile_ingredients(dishes):
"""Create a master list of ingredients.
:param dishes: list - of dish ingredient sets.
:return: set - of ingredients compiled from `dishes`.
This function should return a `set` of all ingredients from all listed dishes.
"""
combined_ingredients = set()
for ingredients in dishes:
combined_ingredients = combined_ingredients.union(ingredients)
return combined_ingredients
def separate_appetizers(dishes, appetizers):
"""Determine which `dishes` are designated `appetizers` and remove them.
:param dishes: list - of dish names.
:param appetizers: list - of appetizer names.
:return: list - of dish names that do not appear on appetizer list.
The function should return the list of dish names with appetizer names removed.
Either list could contain duplicates and may require de-duping.
"""
return list(set(dishes) - set(appetizers))
def singleton_ingredients(dishes, intersection):
"""Determine which `dishes` have a singleton ingredient (an ingredient that only appears once across dishes).
:param dishes: list - of ingredient sets.
:param intersection: constant - can be one of `<CATEGORY>_INTERSECTION` constants imported from `sets_categories_data.py`.
:return: set - containing singleton ingredients.
Each dish is represented by a `set` of its ingredients.
Each `<CATEGORY>_INTERSECTION` is an `intersection` of all dishes in the category. `<CATEGORY>` can be any one of:
(VEGAN, VEGETARIAN, PALEO, KETO, or OMNIVORE).
The function should return a `set` of ingredients that only appear in a single dish.
"""
all_ingredients = set()
for ingredients in dishes:
all_ingredients = all_ingredients ^ ingredients
return all_ingredients - intersection
| mit | f30a4622857a7b52b23fd323deb9e666 | 35.432836 | 126 | 0.663867 | 3.445307 | false | false | false | false |
spaam/svtplay-dl | lib/svtplay_dl/service/mtvnn.py | 1 | 6186 | import json
import logging
import re
import xml.etree.ElementTree as ET
from urllib.parse import urlparse
from svtplay_dl.error import ServiceError
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.service import OpenGraphThumbMixin
from svtplay_dl.service import Service
# This is _very_ similar to mtvservices..
class Mtvnn(Service, OpenGraphThumbMixin):
supported_domains = ["nickelodeon.se", "nickelodeon.nl", "nickelodeon.no", "www.comedycentral.se", "nickelodeon.dk"]
def get(self):
data = self.get_urldata()
parse = urlparse(self.url)
if parse.netloc.endswith("se"):
match = re.search(r'<div class="video-player" (.*)>', data)
if not match:
yield ServiceError("Can't find video info")
return
match_id = re.search(r'data-id="([0-9a-fA-F|\-]+)" ', match.group(1))
if not match_id:
yield ServiceError("Can't find video info")
return
wanted_id = match_id.group(1)
url_service = (
f"http://feeds.mtvnservices.com/od/feed/intl-mrss-player-feed?mgid=mgid:arc:episode:nick.intl:{wanted_id}"
"&arcEp=nickelodeon.se&imageEp=nickelodeon.se&stage=staging&accountOverride=intl.mtvi.com&ep=a9cc543c"
)
service_asset = self.http.request("get", url_service)
match_guid = re.search('<guid isPermaLink="false">(.*)</guid>', service_asset.text)
if not match_guid:
yield ServiceError("Can't find video info")
return
hls_url = (
f"https://mediautilssvcs-a.akamaihd.net/services/MediaGenerator/{match_guid.group(1)}?arcStage=staging&accountOverride=intl.mtvi.com&"
"billingSection=intl&ep=a9cc543c&acceptMethods=hls"
)
hls_asset = self.http.request("get", hls_url)
xml = ET.XML(hls_asset.text)
if (
xml.find("./video") is not None
and xml.find("./video").find("item") is not None
and xml.find("./video").find("item").find("rendition") is not None
and xml.find("./video").find("item").find("rendition").find("src") is not None
):
hls_url = xml.find("./video").find("item").find("rendition").find("src").text
stream = hlsparse(self.config, self.http.request("get", hls_url), hls_url, output=self.output)
for key in list(stream.keys()):
yield stream[key]
return
match = re.search(r'data-mrss=[\'"](http://gakusei-cluster.mtvnn.com/v2/mrss.xml[^\'"]+)[\'"]', data)
if not match:
yield ServiceError("Can't find id for the video")
return
mrssxmlurl = match.group(1)
data = self.http.request("get", mrssxmlurl).content
xml = ET.XML(data)
title = xml.find("channel").find("item").find("title").text
self.output["title"] = title
match = re.search("gon.viacom_config=([^;]+);", self.get_urldata())
if match:
countrycode = json.loads(match.group(1))["country_code"].replace("_", "/")
match = re.search("mtvnn.com:([^&]+)", mrssxmlurl)
if match:
urlpart = match.group(1).replace("-", "/").replace("playlist", "playlists") # it use playlists dunno from where it gets it
hlsapi = f"http://api.mtvnn.com/v2/{countrycode}/{urlpart}.json?video_format=m3u8&callback=&"
data = self.http.request("get", hlsapi).text
dataj = json.loads(data)
for i in dataj["local_playlist_videos"]:
yield from hlsparse(self.config, self.http.request("get", i["url"]), i["url"], output=self.output)
def find_all_episodes(self, config):
episodes = []
match = re.search(r"data-franchise='([^']+)'", self.get_urldata())
if match is None:
logging.error("Couldn't program id")
return episodes
programid = match.group(1)
match = re.findall(r"<li class='([a-z]+ )?playlist-item( [a-z]+)*?'( data-[-a-z]+='[^']+')* data-item-id='([^']+)'", self.get_urldata())
if not match:
logging.error("Couldn't retrieve episode list")
return episodes
episodNr = []
for i in match:
episodNr.append(i[3])
n = 0
for i in sorted(episodNr):
if n == config.get("all_last"):
break
episodes.append(f"http://www.nickelodeon.se/serier/{programid}-something/videos/{i}-something")
n += 1
return episodes
class MtvMusic(Service, OpenGraphThumbMixin):
supported_domains = ["mtv.se"]
def get(self):
data = self.get_urldata()
match = re.search("window.pagePlaylist = (.*);", data)
if not match:
yield ServiceError("Can't find video info")
return
try:
janson = json.loads(match.group(1))
except Exception:
yield ServiceError(f"Can't decode api request: {match.group(1)}")
return
parse = urlparse(self.url)
wanted_id = parse.path.split("/")[-1].split("-")[0]
for n in janson:
if wanted_id == str(n["id"]):
mrssxmlurl = f"http://media-utils.mtvnservices.com/services/MediaGenerator/mgid:arc:video:mtv.se:{n['video_token']}?acceptMethods=hls"
hls_asset = self.http.request("get", mrssxmlurl)
xml = ET.XML(hls_asset.text)
if (
xml.find("./video") is not None
and xml.find("./video").find("item") is not None
and xml.find("./video").find("item").find("rendition") is not None
and xml.find("./video").find("item").find("rendition").find("src") is not None
):
hls_url = xml.find("./video").find("item").find("rendition").find("src").text
yield from hlsparse(self.config, self.http.request("get", hls_url), hls_url, output=self.output)
| mit | 9ed23300b7af9862c8ac06b484029101 | 40.516779 | 150 | 0.556741 | 3.586087 | false | false | false | false |
exercism/python | exercises/practice/markdown/markdown.py | 2 | 2720 | import re
def parse(markdown):
lines = markdown.split('\n')
res = ''
in_list = False
in_list_append = False
for i in lines:
if re.match('###### (.*)', i) is not None:
i = '<h6>' + i[7:] + '</h6>'
elif re.match('##### (.*)', i) is not None:
i = '<h5>' + i[6:] + '</h5>'
elif re.match('#### (.*)', i) is not None:
i = '<h4>' + i[5:] + '</h4>'
elif re.match('### (.*)', i) is not None:
i = '<h3>' + i[4:] + '</h3>'
elif re.match('## (.*)', i) is not None:
i = '<h2>' + i[3:] + '</h2>'
elif re.match('# (.*)', i) is not None:
i = '<h1>' + i[2:] + '</h1>'
m = re.match(r'\* (.*)', i)
if m:
if not in_list:
in_list = True
is_bold = False
is_italic = False
curr = m.group(1)
m1 = re.match('(.*)__(.*)__(.*)', curr)
if m1:
curr = m1.group(1) + '<strong>' + \
m1.group(2) + '</strong>' + m1.group(3)
is_bold = True
m1 = re.match('(.*)_(.*)_(.*)', curr)
if m1:
curr = m1.group(1) + '<em>' + m1.group(2) + \
'</em>' + m1.group(3)
is_italic = True
i = '<ul><li>' + curr + '</li>'
else:
is_bold = False
is_italic = False
curr = m.group(1)
m1 = re.match('(.*)__(.*)__(.*)', curr)
if m1:
is_bold = True
m1 = re.match('(.*)_(.*)_(.*)', curr)
if m1:
is_italic = True
if is_bold:
curr = m1.group(1) + '<strong>' + \
m1.group(2) + '</strong>' + m1.group(3)
if is_italic:
curr = m1.group(1) + '<em>' + m1.group(2) + \
'</em>' + m1.group(3)
i = '<li>' + curr + '</li>'
else:
if in_list:
in_list_append = True
in_list = False
m = re.match('<h|<ul|<p|<li', i)
if not m:
i = '<p>' + i + '</p>'
m = re.match('(.*)__(.*)__(.*)', i)
if m:
i = m.group(1) + '<strong>' + m.group(2) + '</strong>' + m.group(3)
m = re.match('(.*)_(.*)_(.*)', i)
if m:
i = m.group(1) + '<em>' + m.group(2) + '</em>' + m.group(3)
if in_list_append:
i = '</ul>' + i
in_list_append = False
res += i
if in_list:
res += '</ul>'
return res
| mit | d5bcf6de602980e764b580fbfd5d6e1f | 34.324675 | 79 | 0.316912 | 3.358025 | false | false | false | false |
spaam/svtplay-dl | lib/svtplay_dl/service/sr.py | 1 | 1048 | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
import copy
import json
import re
from svtplay_dl.error import ServiceError
from svtplay_dl.fetcher.http import HTTP
from svtplay_dl.service import OpenGraphThumbMixin
from svtplay_dl.service import Service
class Sr(Service, OpenGraphThumbMixin):
supported_domains = ["sverigesradio.se"]
def get(self):
data = self.get_urldata()
match = re.search(r'data-audio-id="(\d+)"', data)
match2 = re.search(r'data-audio-type="(\w+)"', data)
if match and match2:
aid = match.group(1)
type = match2.group(1)
else:
yield ServiceError("Can't find audio info")
return
dataurl = f"https://sverigesradio.se/sida/playerajax/getaudiourl?id={aid}&type={type}&quality=high&format=iis"
data = self.http.request("get", dataurl).text
playerinfo = json.loads(data)
yield HTTP(copy.copy(self.config), playerinfo["audioUrl"], 128, output=self.output)
| mit | 88cb43031b3d09dfb22fb4dd8c44cb5b | 32.806452 | 118 | 0.648855 | 3.18541 | false | false | false | false |
pinax/django-user-accounts | account/tests/test_models.py | 1 | 1652 | from django.test import TestCase
from account.models import SignupCode
class SignupCodeModelTestCase(TestCase):
def test_exists_no_match(self):
code = SignupCode(email="foobar@example.com", code="FOOFOO")
code.save()
self.assertFalse(SignupCode.exists(code="BARBAR"))
self.assertFalse(SignupCode.exists(email="bar@example.com"))
self.assertFalse(SignupCode.exists(email="bar@example.com", code="BARBAR"))
self.assertFalse(SignupCode.exists())
def test_exists_email_only_match(self):
code = SignupCode(email="foobar@example.com", code="FOOFOO")
code.save()
self.assertTrue(SignupCode.exists(email="foobar@example.com"))
def test_exists_code_only_match(self):
code = SignupCode(email="foobar@example.com", code="FOOFOO")
code.save()
self.assertTrue(SignupCode.exists(code="FOOFOO"))
self.assertTrue(SignupCode.exists(email="bar@example.com", code="FOOFOO"))
def test_exists_email_match_code_mismatch(self):
code = SignupCode(email="foobar@example.com", code="FOOFOO")
code.save()
self.assertTrue(SignupCode.exists(email="foobar@example.com", code="BARBAR"))
def test_exists_code_match_email_mismatch(self):
code = SignupCode(email="foobar@example.com", code="FOOFOO")
code.save()
self.assertTrue(SignupCode.exists(email="bar@example.com", code="FOOFOO"))
def test_exists_both_match(self):
code = SignupCode(email="foobar@example.com", code="FOOFOO")
code.save()
self.assertTrue(SignupCode.exists(email="foobar@example.com", code="FOOFOO"))
| mit | 3d5d94676044ff38db03a3a2d6fd6c53 | 35.711111 | 85 | 0.673729 | 3.545064 | false | true | false | false |
exercism/python | exercises/practice/word-search/.meta/example.py | 2 | 1676 | import copy
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return f'Point({self.x}:{self.y})'
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return Point(self.x - other.x, self.y - other.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __ne__(self, other):
return not self == other
DIRECTIONS = (Point(1, 0), Point(1, -1), Point(1, 1), Point(-1, -1),
Point(0, -1), Point(0, 1), Point(-1, 1), Point(-1, 0))
class WordSearch:
def __init__(self, puzzle):
self.rows = puzzle
self.width = len(self.rows[0])
self.height = len(self.rows)
def find_char(self, coordinate):
if coordinate.x < 0 or coordinate.x >= self.width:
return None
if coordinate.y < 0 or coordinate.y >= self.height:
return None
return self.rows[coordinate.y][coordinate.x]
def find(self, word, position, direction):
current = copy.copy(position)
for letter in word:
if self.find_char(current) != letter:
return None
current += direction
return position, current - direction
def search(self, word):
positions = (Point(idx, edx)
for idx in range(self.width) for edx in range(self.height))
for position in positions:
for direction in DIRECTIONS:
result = self.find(word, position, direction)
if result:
return result
return None
| mit | c5c7c8845f9029be233c4730680c8c15 | 27.896552 | 80 | 0.539976 | 3.766292 | false | false | false | false |
exercism/python | exercises/practice/connect/.meta/example.py | 2 | 2077 |
class ConnectGame:
DIRECTIONS = [(0, 1), (0, -1), (1, 0), (-1, 0), (1, -1), (-1, 1)]
WHITE = 'O'
BLACK = 'X'
def __init__(self, lines):
self.board = ConnectGame.make_board(lines)
assert len(self.board) > 0
self.width = len(self.board[0])
self.height = len(self.board)
assert self.width > 0 and self.height > 0
for line in self.board:
assert len(line) == self.width
def valid(self, width, height):
return 0 <= width < self.width and 0 <= height < self.height
@staticmethod
def make_board(lines):
return [''.join(cur_line.split()) for cur_line in lines.splitlines()]
def player_reach_dest(self, player, width, height):
if player == self.BLACK:
return width == self.width - 1
if player == self.WHITE:
return height == self.height - 1
return None
def walk_board(self, player, width, height, visited=None):
if not visited:
visited = []
if (width, height) in visited:
return False
if (not self.valid(width, height)) or self.board[height][width] != player:
return False
if self.player_reach_dest(player, width, height):
return True
for vector in self.DIRECTIONS:
if self.walk_board(player, width + vector[0], height + vector[1], visited + [(width, height)]):
return True
return None
def check_player_is_winner(self, player):
if player == self.BLACK:
for height in range(self.height):
if self.walk_board(player, 0, height):
return True
if player == self.WHITE:
for width in range(self.width):
if self.walk_board(player, width, 0):
return True
return None
def get_winner(self):
if self.check_player_is_winner(self.BLACK):
return self.BLACK
if self.check_player_is_winner(self.WHITE):
return self.WHITE
return ''
| mit | 992b3234dceb423602c5498d04275b5b | 30.469697 | 107 | 0.548869 | 3.933712 | false | false | false | false |
exercism/python | exercises/concept/card-games/.meta/exemplar.py | 2 | 2294 | """Functions for tracking poker hands and assorted card tasks.
Python list documentation: https://docs.python.org/3/tutorial/datastructures.html
"""
def get_rounds(number):
"""Create a list containing the current and next two round numbers.
:param number: int - current round number.
:return: list - current round and the two that follow.
"""
return [number, number + 1, number + 2]
def concatenate_rounds(rounds_1, rounds_2):
"""Concatenate two lists of round numbers.
:param rounds_1: list - first rounds played.
:param rounds_2: list - second set of rounds played.
:return: list - all rounds played.
"""
return rounds_1 + rounds_2
def list_contains_round(rounds, number):
"""Check if the list of rounds contains the specified number.
:param rounds: list - rounds played.
:param number: int - round number.
:return: bool - was the round played?
"""
return number in rounds
def card_average(hand):
"""Calculate and returns the average card value from the list.
:param hand: list - cards in hand.
:return: float - average value of the cards in the hand.
"""
return sum(hand) / len(hand)
def approx_average_is_average(hand):
"""Return if an average is using (first + last index values ) OR ('middle' card) == calculated average.
:param hand: list - cards in hand.
:return: bool - does one of the approximate averages equal the `true average`?
"""
real_average = card_average(hand)
if card_average([hand[0], hand[-1]]) == real_average:
is_same = True
elif hand[len(hand) // 2] == real_average:
is_same = True
else:
is_same = False
return is_same
def average_even_is_average_odd(hand):
"""Return if the (average of even indexed card values) == (average of odd indexed card values).
:param hand: list - cards in hand.
:return: bool - are even and odd averages equal?
"""
return card_average(hand[::2]) == card_average(hand[1::2])
def maybe_double_last(hand):
"""Multiply a Jack card value in the last index position by 2.
:param hand: list - cards in hand.
:return: list - hand with Jacks (if present) value doubled.
"""
if hand[-1] == 11:
hand[-1] *= 2
return hand
| mit | cc486c3b1bacaab601f079da3c39a61f | 25.068182 | 107 | 0.647341 | 3.748366 | false | false | false | false |
pinax/django-user-accounts | account/management/commands/user_password_expiry.py | 2 | 1267 | from django.contrib.auth import get_user_model
from django.core.management.base import LabelCommand
from account.conf import settings
from account.models import PasswordExpiry
class Command(LabelCommand):
help = "Create user-specific password expiration period."
label = "username"
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
"-e", "--expire",
type=int,
nargs="?",
default=settings.ACCOUNT_PASSWORD_EXPIRY,
help="number of seconds until password expires"
)
def handle_label(self, username, **options):
User = get_user_model()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return 'User "{}" not found'.format(username)
expire = options["expire"]
# Modify existing PasswordExpiry or create new if needed.
if not hasattr(user, "password_expiry"):
PasswordExpiry.objects.create(user=user, expiry=expire)
else:
user.password_expiry.expiry = expire
user.password_expiry.save()
return 'User "{}" password expiration set to {} seconds'.format(username, expire)
| mit | 9a57923425c14c5e0daa2a24cd6c39af | 31.487179 | 89 | 0.633781 | 4.477032 | false | false | false | false |
codefordc/housing-insights | back_end/ETL/project.py | 1 | 6472 | '''
make_projects_table.py
----------------------
This file creates the projects table in the database, which is sent to
the front-end via /api/projects/. It depends on the following data sources:
- Projects.csv, From the Preservation Catalog Folder in the s3
- 'Affordable Housing Data': Updated regularly from open data DC
- Master Adress Repository
Projects that are not from the preservation catalog have an nlihc_id
beginning with "AH" for affordable housing.
'''
from . import utils
from . import wmata
import requests
import numpy as np
import pandas as pd
import geopandas as gp
preservation_catalog_columns = [
'nlihc_id',
'latitude',
'longitude',
'census_tract',
'neighborhood_cluster',
'ward',
'neighborhood_cluster_desc',
# Basic Project Information',
'proj_name',
'proj_addre',
'proj_units_tot',
'proj_address_id',
'proj_units_assist_max',
'proj_owner_type',
'most_recent_reac_score_num',
'most_recent_reac_score_date',
]
def load_preservation_catalog_projects():
'''
Loads the raw data from the preservation catalog.
It is located in 'preservation_catalog' on the S3.
'''
df = pd.read_csv(utils.S3+'preservation_catalog/Project.csv')
df.columns = df.columns.str.lower()
df = utils.get_census_tract_for_data(df, 'proj_lon', 'proj_lat')
df['neighborhood_cluster'] = utils.just_digits(df.cluster_tr2000)
df['ward'] = utils.just_digits(df.ward2012)
df = df.merge(load_reac_data(), how='left')
return df.rename(columns={'proj_lat': 'latitude',
'proj_lon': 'longitude',
'tract': 'census_tract',
'date': 'most_recent_reac_score_date',
'reac_score_num': 'most_recent_reac_score_num',
'cluster_tr2000_name': 'neighborhood_cluster_desc',
})[preservation_catalog_columns]
def load_affordable_housing_projects():
'''Loads and transforms the "Affordabe Housing" raw data from opendata.dc'''
columns = {
'ADDRESS_ID': 'proj_address_id',
'FULLADDRESS': 'proj_addre',
'MAR_WARD': 'ward',
'PROJECT_NAME': 'proj_name',
'TOTAL_AFFORDABLE_UNITS': 'proj_units_tot',
'LATITUDE': 'latitude',
'LONGITUDE': 'longitude',
'tract': 'census_tract',
}
url = utils.get_paths_for_data('affordable_housing', years=utils.get_years())[0]
df = pd.read_csv(url)
df['MAR_WARD'] = utils.just_digits(df['MAR_WARD'])
df = utils.get_census_tract_for_data(df, 'LONGITUDE','LATITUDE')
df = df.rename(columns=columns)[columns.values()]
df = utils.get_cluster_for_data(df, 'longitude', 'latitude')
df['nlihc_id'] = pd.Series(df.index).astype(str).apply(lambda s: 'AH' + s.zfill(6))
return df[['nlihc_id', 'neighborhood_cluster']+ list(columns.values())]
def load_mar_projects():
'''Loads and trasforms the "Address Points" raw data from opendata.dc'''
url = utils.get_paths_for_data('mar', years=utils.get_years())[0]
df = pd.read_csv(url)
df = df[['ADDRESS_ID', 'ACTIVE_RES_UNIT_COUNT', 'SSL', 'CLUSTER_']]
df.columns = ['proj_address_id', 'active_res_unit_count', 'ssl', 'neighborhood_cluster']
return df
def load_tax():
'''Adds the Project Taxable Value attribute to the data.'''
# Tax Data. Seems to update every year.
r = requests.get(
'https://maps2.dcgis.dc.gov/dcgis/rest/services/DCGIS_DATA/Property_and_Land_WebMercator/MapServer/53/query?where=1%3D1&outFields=SSL,ASSESSMENT&returnGeometry=false&outSR=4326&f=json'
)
data = r.json()['features']
return {r['attributes']['SSL']: r['attributes']['ASSESSMENT'] for r in data}
def load_topa():
'''
This function loads the raw TOPA data, grabs the most recent date for
each address id, and counts the number of TOPA notices for each address id.
It returns a dataframe where the obserations are an address id, the most
recent topa notice as a data, and the number of topa notices.
'''
df = pd.read_csv(utils.S3+'topa/Rcasd_current.csv')
df.columns = df.columns.str.lower()
df['most_recent_topa_date'] = pd.to_datetime(df['notice_date'])
return pd.concat([
# The most recent topa data.
(df.sort_values('most_recent_topa_date', ascending=False)
.groupby('address_id').first()['most_recent_topa_date']),
# Number of observations per address id.
df.address_id.value_counts()
], axis=1).reset_index().rename(columns={
# Fixing column names
'address_id': 'topa_count', 'index': 'proj_address_id'})
def load_reac_data():
'''Gets REAC information from the s3.'''
df = pd.read_csv(utils.S3+'preservation_catalog/Reac_score.csv')
df.columns = df.columns.str.lower()
df['date'] = pd.to_datetime(df['reac_date'])
df = df.sort_values('date', ascending=False).groupby('nlihc_id').first()
return df[['date', 'reac_score_num']].reset_index()
def load_project_data(engine):
'''With the addition of MAR - this takes a long time (a few minutes).'''
print("Starting load")
df = pd.concat([load_preservation_catalog_projects(),
load_affordable_housing_projects()], sort=True)
df = df.sort_values('nlihc_id').drop_duplicates('proj_address_id')
df = add_mar_and_tax(df)
df = add_neighborhoods(df)
df = df.merge(load_topa(), on='proj_address_id', how='left')
bus = wmata.add_bus_stops(df[['nlihc_id', 'longitude', 'latitude']],
'longitude', 'latitude')
df = df.merge(bus, how='left')
return utils.write_table(df, 'new_project', engine)
def add_mar_and_tax(df):
print("Adding mar and tax")
df = df.merge(load_mar_projects(), on='proj_address_id', how='left')
df['sum_appraised_value_current_total'] = df['ssl'].map(load_tax())
return df
def add_neighborhoods(df):
print("Adding neighborhoods")
# Fix neighborhood Cluster Info
df['neighborhood_cluster_x'] = utils.just_digits(df.neighborhood_cluster_x)
df['neighborhood_cluster_y'] = utils.just_digits(df.neighborhood_cluster_y)
df['neighborhood_cluster'] = df.apply(lambda row: max(
row.neighborhood_cluster_x, row.neighborhood_cluster_y), axis=1)
return df.drop(columns=['neighborhood_cluster_x', 'neighborhood_cluster_y'])
| mit | ce361b25d3fd2ac5c74cd6679907529f | 40.487179 | 192 | 0.631335 | 3.313876 | false | false | false | false |
rapptz/discord.py | discord/types/scheduled_event.py | 3 | 3294 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from typing import List, Literal, Optional, TypedDict, Union
from typing_extensions import NotRequired
from .snowflake import Snowflake
from .user import User
from .member import Member
from .channel import PrivacyLevel as PrivacyLevel
EventStatus = Literal[1, 2, 3, 4]
EntityType = Literal[1, 2, 3]
class _BaseGuildScheduledEvent(TypedDict):
id: Snowflake
guild_id: Snowflake
entity_id: Optional[Snowflake]
name: str
scheduled_start_time: str
privacy_level: PrivacyLevel
status: EventStatus
creator_id: NotRequired[Optional[Snowflake]]
description: NotRequired[Optional[str]]
creator: NotRequired[User]
user_count: NotRequired[int]
image: NotRequired[Optional[str]]
class _VoiceChannelScheduledEvent(_BaseGuildScheduledEvent):
channel_id: Snowflake
entity_metadata: Literal[None]
scheduled_end_time: NotRequired[Optional[str]]
class StageInstanceScheduledEvent(_VoiceChannelScheduledEvent):
entity_type: Literal[1]
class VoiceScheduledEvent(_VoiceChannelScheduledEvent):
entity_type: Literal[2]
class EntityMetadata(TypedDict):
location: str
class ExternalScheduledEvent(_BaseGuildScheduledEvent):
channel_id: Literal[None]
entity_metadata: EntityMetadata
scheduled_end_time: str
entity_type: Literal[3]
GuildScheduledEvent = Union[StageInstanceScheduledEvent, VoiceScheduledEvent, ExternalScheduledEvent]
class _WithUserCount(TypedDict):
user_count: int
class _StageInstanceScheduledEventWithUserCount(StageInstanceScheduledEvent, _WithUserCount):
...
class _VoiceScheduledEventWithUserCount(VoiceScheduledEvent, _WithUserCount):
...
class _ExternalScheduledEventWithUserCount(ExternalScheduledEvent, _WithUserCount):
...
GuildScheduledEventWithUserCount = Union[
_StageInstanceScheduledEventWithUserCount, _VoiceScheduledEventWithUserCount, _ExternalScheduledEventWithUserCount
]
class ScheduledEventUser(TypedDict):
guild_scheduled_event_id: Snowflake
user: User
class ScheduledEventUserWithMember(ScheduledEventUser):
member: Member
ScheduledEventUsers = List[ScheduledEventUser]
ScheduledEventUsersWithMember = List[ScheduledEventUserWithMember]
| mit | 49b4fbc81280667d5959fa78f00e7fc5 | 28.675676 | 118 | 0.789314 | 3.852632 | false | false | false | false |
explosion/thinc | thinc/tests/test_import__all__.py | 1 | 1800 | import ast
from collections import namedtuple
from typing import Tuple, List
import pytest
import importlib
_Import = namedtuple("_Import", ["module", "name", "alias"])
def get_imports(path: str) -> Tuple[_Import, ...]:
"""Parse Python file at path, retrieve import statements.
Adapted from https://stackoverflow.com/a/9049549.
path (str): Path to Python file.
RETURNS (Tuple[_Import]): All imports found in file at path.
"""
with open(path) as fh:
root = ast.parse(fh.read(), path)
imports: List[_Import] = []
for node in ast.walk(root):
if isinstance(node, ast.Import):
module: List[str] = []
elif isinstance(node, ast.ImportFrom) and node.module:
module = node.module.split(".")
else:
continue
assert isinstance(node, (ast.Import, ast.ImportFrom))
imports.extend(
[_Import(module, n.name.split("."), n.asname) for n in node.names]
)
return tuple(imports)
@pytest.mark.parametrize("module_name", ["thinc.api", "thinc.shims", "thinc.layers"])
def test_import_reexport_equivalency(module_name: str):
"""Tests whether a module's __all__ is equivalent to its imports. This assumes that this module is supposed to
re-export all imported values.
module_name (str): Module to load.
"""
mod = importlib.import_module(module_name)
assert set(mod.__all__) == {
k
for k in set(n for i in get_imports(str(mod.__file__)) for n in i.name)
if (
# Ignore all values prefixed with _, as we expect those not to be re-exported.
# However, __version__ should be reexported in thinc/__init__.py.
(not k.startswith("_") or module_name == "thinc" and k == "__version__")
)
}
| mit | 30b44193b3650a7b73a34d87c11016db | 32.962264 | 114 | 0.613333 | 3.781513 | false | true | false | false |
explosion/thinc | thinc/layers/with_debug.py | 1 | 1333 | from typing import Optional, Callable, Any, Tuple, TypeVar
from ..model import Model
_ModelT = TypeVar("_ModelT", bound=Model)
do_nothing = lambda *args, **kwargs: None
def with_debug(
layer: _ModelT,
name: Optional[str] = None,
*,
on_init: Callable[[Model, Any, Any], None] = do_nothing,
on_forward: Callable[[Model, Any, bool], None] = do_nothing,
on_backprop: Callable[[Any], None] = do_nothing,
) -> _ModelT:
"""Debugging layer that wraps any layer and allows executing callbacks
during the forward pass, backward pass and initialization. The callbacks
will receive the same arguments as the functions they're called in.
"""
name = layer.name if name is None else name
orig_forward = layer._func
orig_init = layer.init
def forward(model: Model, X: Any, is_train: bool) -> Tuple[Any, Callable]:
on_forward(model, X, is_train)
layer_Y, layer_callback = orig_forward(layer, X, is_train=is_train)
def backprop(dY: Any) -> Any:
on_backprop(dY)
return layer_callback(dY)
return layer_Y, backprop
def init(model: Model, X: Any, Y: Any) -> None:
on_init(model, X, Y)
if orig_init is not None:
orig_init(layer, X, Y)
layer.replace_callbacks(forward, init=init)
return layer
| mit | edc4164e3518cdc75a543e95fa526435 | 29.295455 | 78 | 0.636909 | 3.535809 | false | false | false | false |
explosion/thinc | thinc/layers/with_array2d.py | 1 | 4080 | from typing import Tuple, Callable, Optional, TypeVar, cast, List, Union
from ..model import Model
from ..config import registry
from ..types import Array2d, Floats2d, List2d, Padded, Ragged
ValT = TypeVar("ValT", bound=Array2d)
SeqT = TypeVar("SeqT", bound=Union[Padded, Ragged, List2d, Array2d])
@registry.layers("with_array2d.v1")
def with_array2d(layer: Model[ValT, ValT], pad: int = 0) -> Model[SeqT, SeqT]:
"""Transform sequence data into a contiguous 2d array on the way into and
out of a model. Handles a variety of sequence types: lists, padded and ragged.
If the input is a 2d array, it is passed through unchanged.
"""
return Model(
f"with_array({layer.name})",
forward,
init=init,
layers=[layer],
attrs={"pad": pad},
dims={name: layer.maybe_get_dim(name) for name in layer.dim_names},
)
def forward(
model: Model[SeqT, SeqT], Xseq: SeqT, is_train: bool
) -> Tuple[SeqT, Callable]:
if isinstance(Xseq, Ragged):
return cast(Tuple[SeqT, Callable], _ragged_forward(model, Xseq, is_train))
elif isinstance(Xseq, Padded):
return cast(Tuple[SeqT, Callable], _padded_forward(model, Xseq, is_train))
elif not isinstance(Xseq, (list, tuple)):
return model.layers[0](Xseq, is_train)
else:
return cast(Tuple[SeqT, Callable], _list_forward(model, Xseq, is_train))
return
def init(
model: Model[SeqT, SeqT], X: Optional[SeqT] = None, Y: Optional[SeqT] = None
) -> None:
layer: Model[Array2d, Array2d] = model.layers[0]
layer.initialize(
X=_get_array(model, X) if X is not None else X,
Y=_get_array(model, Y) if Y is not None else Y,
)
for dim_name in layer.dim_names:
value = layer.maybe_get_dim(dim_name)
if value is not None:
model.set_dim(dim_name, value)
def _get_array(model, X: SeqT) -> Array2d:
if isinstance(X, Ragged):
return X.data
elif isinstance(X, Padded):
return model.ops.reshape2f(
X.data, X.data.shape[0] * X.data.shape[1], X.data.shape[2]
)
elif not isinstance(X, (list, tuple)):
return cast(Array2d, X)
else:
return model.ops.flatten(X)
def _list_forward(
model: Model[SeqT, SeqT], Xs: List2d, is_train: bool
) -> Tuple[List2d, Callable]:
layer: Model[Array2d, Array2d] = model.layers[0]
pad = model.attrs["pad"]
lengths = layer.ops.asarray1i([len(seq) for seq in Xs])
Xf = layer.ops.flatten(Xs, pad=pad)
Yf, get_dXf = layer(Xf, is_train)
def backprop(dYs: List2d) -> List2d:
dYf = layer.ops.flatten(dYs, pad=pad)
dXf = get_dXf(dYf)
return layer.ops.unflatten(dXf, lengths, pad=pad)
return layer.ops.unflatten(Yf, lengths, pad=pad), backprop
def _ragged_forward(
model: Model[SeqT, SeqT], Xr: Ragged, is_train: bool
) -> Tuple[Ragged, Callable]:
layer: Model[Array2d, Array2d] = model.layers[0]
Y, get_dX = layer(Xr.data, is_train)
x_shape = Xr.dataXd.shape
def backprop(dYr: Ragged) -> Ragged:
return Ragged(get_dX(dYr.dataXd).reshape(x_shape), dYr.lengths)
return Ragged(Y, Xr.lengths), backprop
def _padded_forward(
model: Model[SeqT, SeqT], Xp: Padded, is_train: bool
) -> Tuple[Padded, Callable]:
layer: Model[Array2d, Array2d] = model.layers[0]
X = model.ops.reshape2(
Xp.data, Xp.data.shape[0] * Xp.data.shape[1], Xp.data.shape[2]
)
Y2d, get_dX = layer(X, is_train)
Y = model.ops.reshape3f(
cast(Floats2d, Y2d), Xp.data.shape[0], Xp.data.shape[1], Y2d.shape[1]
)
def backprop(dYp: Padded) -> Padded:
assert isinstance(dYp, Padded)
dY = model.ops.reshape2(
dYp.data, dYp.data.shape[0] * dYp.data.shape[1], dYp.data.shape[2]
)
dX2d = get_dX(dY)
dX = model.ops.reshape3f(
dX2d, dYp.data.shape[0], dYp.data.shape[1], dX2d.shape[1]
)
return Padded(dX, dYp.size_at_t, dYp.lengths, dYp.indices)
return Padded(Y, Xp.size_at_t, Xp.lengths, Xp.indices), backprop
| mit | 5e77bb2a70e69e74f8442d614dee20cf | 32.442623 | 82 | 0.627206 | 2.839248 | false | false | false | false |
explosion/thinc | thinc/model.py | 1 | 34868 | from typing import Dict, List, Callable, Optional, Any, Union, Iterable, Set, cast
from typing import Generic, Sequence, Tuple, TypeVar, Iterator
import contextlib
from contextvars import ContextVar
import srsly
from pathlib import Path
import copy
import functools
import threading
from .backends import ParamServer, Ops, NumpyOps, CupyOps, get_current_ops
from .optimizers import Optimizer # noqa: F401
from .shims import Shim
from .util import convert_recursive, is_xp_array, DATA_VALIDATION
from .util import partial, validate_fwd_input_output
from .types import FloatsXd
InT = TypeVar("InT")
OutT = TypeVar("OutT")
SelfT = TypeVar("SelfT", bound="Model")
context_operators: ContextVar[dict] = ContextVar("context_operators", default={})
def empty_init(model: "Model", *args, **kwargs) -> "Model":
return model
class Model(Generic[InT, OutT]):
"""Class for implementing Thinc models and layers."""
global_id: int = 0
global_id_lock: threading.Lock = threading.Lock()
_context_operators = context_operators
name: str
ops: Ops
id: int
_func: Callable
init: Callable
_params: ParamServer
_dims: Dict[str, Optional[int]]
_layers: List["Model"]
_shims: List[Shim]
_attrs: Dict[str, Any]
_has_params: Dict[str, Optional[bool]]
# This "locks" the class, so we get an error if you try to assign to
# an unexpected variable.
__slots__ = [
"name",
"id",
"ops",
"_func",
"init",
"_params",
"_dims",
"_attrs",
"_refs",
"_layers",
"_shims",
"_has_params",
]
def __init__(
self,
name: str,
forward: Callable,
*,
init: Optional[Callable] = None,
dims: Dict[str, Optional[int]] = {},
params: Dict[str, Optional[FloatsXd]] = {},
layers: Sequence["Model"] = [],
shims: List[Shim] = [],
attrs: Dict[str, Any] = {},
refs: Dict[str, Optional["Model"]] = {},
ops: Optional[Union[NumpyOps, CupyOps]] = None,
):
"""Initialize a new model."""
self.name = name
if init is None:
init = partial(empty_init, self)
# Assign to callable attrs: https://github.com/python/mypy/issues/2427
setattr(self, "_func", forward)
setattr(self, "init", init)
self.ops = ops if ops is not None else get_current_ops()
self._params = ParamServer()
self._dims = dict(dims)
self._attrs = dict(attrs)
self._refs = dict(refs)
self._layers = list(layers)
self._shims = list(shims)
# Take care to increment the base class here! It needs to be unique
# across all models.
with Model.global_id_lock:
Model.global_id += 1
self.id = Model.global_id
self._has_params = {}
for name, value in params.items():
self._has_params[name] = None
if value is not None:
self.set_param(name, value)
@property
def layers(self) -> List["Model"]:
"""A list of child layers of the model. You can append to it to add
layers but not reassign it.
"""
return self._layers
@property
def shims(self) -> List[Shim]:
return self._shims
@property
def attrs(self) -> Dict[str, Any]:
"""A dict of the model's attrs. You can write to it to update attrs but
not reassign it.
"""
return self._attrs
@property
def param_names(self) -> Tuple[str, ...]:
"""Get the names of registered parameter (including unset)."""
return tuple(self._has_params.keys())
@property
def grad_names(self) -> Tuple[str, ...]:
"""Get the names of parameters with registered gradients (including unset)."""
return tuple([name for name in self.param_names if self.has_grad(name)])
@property
def dim_names(self) -> Tuple[str, ...]:
"""Get the names of registered dimensions (including unset)."""
return tuple(self._dims.keys())
@property
def ref_names(self) -> Tuple[str, ...]:
"""Get the names of registered node references (including unset)."""
return tuple(self._refs.keys())
@classmethod
@contextlib.contextmanager
def define_operators(cls, operators: Dict[str, Callable]):
"""Bind arbitrary binary functions to Python operators, for use in any
`Model` instance. Can (and should) be used as a contextmanager.
EXAMPLE:
with Model.define_operators({">>": chain}):
model = Relu(512) >> Relu(512) >> Softmax()
"""
token = cls._context_operators.set(dict(operators))
yield
cls._context_operators.reset(token)
def has_dim(self, name: str) -> Optional[bool]:
"""Check whether the model has a dimension of a given name. If the
dimension is registered but the value is unset, returns None.
"""
if name not in self._dims:
return False
elif self._dims[name] is not None:
return True
else:
return None
def get_dim(self, name: str) -> int:
"""Retrieve the value of a dimension of the given name."""
if name not in self._dims:
raise KeyError(f"Cannot get dimension '{name}' for model '{self.name}'")
value = self._dims[name]
if value is None:
err = f"Cannot get dimension '{name}' for model '{self.name}': value unset"
raise ValueError(err)
else:
return value
def set_dim(self, name: str, value: int, *, force: bool = False) -> None:
"""Set a value for a dimension."""
if name not in self._dims:
raise KeyError(
f"Cannot set unknown dimension '{name}' for model '{self.name}'."
)
old_value = self._dims[name]
has_params = any(bool(y) for x, y in self._has_params.items())
invalid_change = (old_value is not None and old_value != value) and (
not force or force and has_params
)
if invalid_change:
err = f"Attempt to change dimension '{name}' for model '{self.name}' from {old_value} to {value}"
raise ValueError(err)
self._dims[name] = value
def maybe_get_dim(self, name: str) -> Optional[int]:
"""Retrieve the value of a dimension of the given name, or None."""
return self.get_dim(name) if self.has_dim(name) else None
def has_param(self, name: str) -> Optional[bool]:
"""Check whether the model has a weights parameter of the given name.
Returns None if the parameter is registered but currently unset.
"""
if name not in self._has_params:
return False
elif self._has_params[name] is not None:
return True
else:
return None
def get_param(self, name: str) -> FloatsXd:
"""Retrieve a weights parameter by name."""
if name not in self._has_params:
raise KeyError(f"Unknown param: '{name}' for model '{self.name}'.")
if not self._params.has_param(self.id, name):
raise KeyError(
f"Parameter '{name}' for model '{self.name}' has not been allocated yet."
)
return self._params.get_param(self.id, name)
def maybe_get_param(self, name: str) -> Optional[FloatsXd]:
"""Retrieve a weights parameter by name, or None."""
return self.get_param(name) if self.has_param(name) else None
def set_param(self, name: str, value: Optional[FloatsXd]) -> None:
"""Set a weights parameter's value."""
if value is None:
self._has_params[name] = None
else:
self._params.set_param(self.id, name, value)
self._has_params[name] = True
def has_grad(self, name: str) -> bool:
"""Check whether the model has a non-zero gradient for a parameter."""
return self._params.has_grad(self.id, name)
def get_grad(self, name: str) -> FloatsXd:
"""Get a gradient from the model."""
return self._params.get_grad(self.id, name)
def set_grad(self, name: str, value: FloatsXd) -> None:
"""Set a gradient value for the model."""
self._params.set_grad(self.id, name, value)
def maybe_get_grad(self, name: str) -> Optional[FloatsXd]:
"""Retrieve a gradient by name, or None."""
return self.get_grad(name) if self.has_grad(name) else None
def inc_grad(self, name: str, value: FloatsXd) -> None:
"""Increment the gradient of a parameter by a value."""
self._params.inc_grad(self.id, name, value)
def has_ref(self, name: str) -> Optional[bool]:
"""Check whether the model has a reference of a given name. If the
reference is registered but the value is unset, returns None.
"""
if name not in self._refs:
return False
elif self._refs[name] is not None:
return True
else:
return None
def get_ref(self, name: str) -> "Model":
"""Retrieve the value of a reference of the given name."""
if name not in self._refs:
raise KeyError(f"Cannot get reference '{name}' for model '{self.name}'.")
value = self._refs[name]
if value is None:
err = f"Cannot get reference '{name}' for model '{self.name}': value unset."
raise ValueError(err)
else:
return value
def maybe_get_ref(self, name: str) -> Optional["Model"]:
"""Retrieve the value of a reference if it exists, or None."""
return self.get_ref(name) if self.has_ref(name) else None
def set_ref(self, name: str, value: Optional["Model"]) -> None:
"""Set a value for a reference."""
if value is None:
self._refs[name] = value
elif value in self.walk():
self._refs[name] = value
else:
raise ValueError("Cannot add reference to node not in tree.")
def __call__(self, X: InT, is_train: bool) -> Tuple[OutT, Callable]:
"""Call the model's `forward` function, returning the output and a
callback to compute the gradients via backpropagation."""
return self._func(self, X, is_train=is_train)
def initialize(self, X: Optional[InT] = None, Y: Optional[OutT] = None) -> "Model":
"""Finish initialization of the model, optionally providing a batch of
example input and output data to perform shape inference."""
if DATA_VALIDATION.get():
validate_fwd_input_output(self.name, self._func, X, Y)
if self.init is not None:
self.init(self, X=X, Y=Y)
return self
def begin_update(self, X: InT) -> Tuple[OutT, Callable[[InT], OutT]]:
"""Run the model over a batch of data, returning the output and a
callback to complete the backward pass. A tuple (Y, finish_update),
where Y is a batch of output data, and finish_update is a callback that
takes the gradient with respect to the output and an optimizer function,
and returns the gradient with respect to the input.
"""
return self._func(self, X, is_train=True)
def predict(self, X: InT) -> OutT:
"""Call the model's `forward` function with `is_train=False`, and return
only the output, instead of the `(output, callback)` tuple.
"""
return self._func(self, X, is_train=False)[0]
def finish_update(self, optimizer: Optimizer) -> None:
"""Update parameters with current gradients. The optimizer is called
with each parameter and gradient of the model.
"""
for node in self.walk():
for shim in node.shims:
shim.finish_update(optimizer)
for node in self.walk():
for name in node.param_names:
if node.has_grad(name):
param, grad = optimizer(
(node.id, name), node.get_param(name), node.get_grad(name)
)
node.set_param(name, param)
@contextlib.contextmanager
def use_params(self, params: Dict[Tuple[int, str], FloatsXd]):
"""Context manager to temporarily set the model's parameters to
specified values. The params are a dictionary keyed by model IDs, whose
values are arrays of weight values.
"""
backup = {}
for name in self.param_names:
key = (self.id, name)
if key in params:
backup[name] = self.get_param(name)
self.set_param(name, params[key])
with contextlib.ExitStack() as stack:
for layer in self.layers:
stack.enter_context(layer.use_params(params))
for shim in self.shims:
stack.enter_context(shim.use_params(params))
yield
if backup:
for name, param in backup.items():
self.set_param(name, param)
def walk(self, *, order: str = "bfs") -> Iterable["Model"]:
"""Iterate out layers of the model.
Nodes are returned in breadth-first order by default. Other possible
orders are "dfs_pre" (depth-first search in preorder) and "dfs_post"
(depth-first search in postorder)."""
if order == "bfs":
return self._walk_bfs()
elif order == "dfs_pre":
return self._walk_dfs(post_order=False)
elif order == "dfs_post":
return self._walk_dfs(post_order=True)
else:
raise ValueError("Invalid order, must be one of: bfs, dfs_pre, dfs_post")
def _walk_bfs(self) -> Iterable["Model"]:
"""Iterate out layers of the model, breadth-first."""
queue = [self]
seen: Set[int] = set()
for node in queue:
if id(node) in seen:
continue
seen.add(id(node))
yield node
queue.extend(node.layers)
def _walk_dfs(self, post_order: bool = False) -> Iterable["Model"]:
"""Iterate out layers of the model, depth-first."""
seen: Dict[int, Iterator["Model"]] = dict()
stack = [self]
seen[id(self)] = iter(self.layers)
if not post_order:
yield self
while stack:
try:
next_child = next(seen[id(stack[-1])])
if not id(next_child) in seen:
if not post_order:
yield next_child
stack.append(next_child)
seen[id(next_child)] = iter(next_child.layers)
except StopIteration:
if post_order:
yield stack[-1]
stack.pop()
def remove_node(self, node: "Model") -> None:
"""Remove a node from all layers lists, and then update references.
References that no longer point to a node within the tree will be set
to `None`. For instance, let's say a node has its grandchild as a reference.
If the child is removed, the grandchild reference will be left dangling,
so will be set to None.
"""
for child in list(self.walk()):
while node in child.layers:
child.layers.remove(node)
tree = set(self.walk())
for node in tree:
for name in node.ref_names:
ref = node.get_ref(name)
if ref is not None and ref not in tree:
node.set_ref(name, None)
def replace_callbacks(
self, forward: Callable, *, init: Optional[Callable] = None
) -> None:
setattr(self, "_func", forward)
setattr(self, "init", init)
def replace_node(self, old: "Model", new: "Model") -> bool:
"""Replace a node anywhere it occurs within the model. Returns a boolean
indicating whether the replacement was made."""
seen = False
# We need to replace nodes in topological order of the transposed graph
# to ensure that a node's dependencies are processed before the node.
# This is equivalent to a post-order traversal of the original graph.
for node in list(self.walk(order="dfs_post")):
if node is old:
seen = True
else:
node._layers = [
new if layer is old else layer for layer in node._layers
]
for name in node.ref_names:
if node.get_ref(name) is old:
node.set_ref(name, new)
return seen
def get_gradients(self) -> Dict[Tuple[int, str], Tuple[FloatsXd, FloatsXd]]:
"""Get non-zero gradients of the model's parameters, as a dictionary
keyed by the parameter ID. The values are (weights, gradients) tuples.
"""
gradients = {}
for node in self.walk():
for name in node.grad_names:
param = node.get_param(name)
grad = node.get_grad(name)
gradients[(node.id, name)] = (param, grad)
return gradients
def copy(self: SelfT) -> SelfT:
"""
Create a copy of the model, its attributes, and its parameters. Any child
layers will also be deep-copied. The copy will receive a distinct `model.id`
value.
"""
return self._copy()
def _copy(
self: SelfT, seen: Optional[Dict[int, Union["Model", Shim]]] = None
) -> SelfT:
if seen is None:
seen = {}
params = {}
for name in self.param_names:
params[name] = self.get_param(name) if self.has_param(name) else None
copied_layers: List[Model] = []
for layer in self.layers:
if id(layer) in seen:
copied_layers.append(cast(Model, seen[id(layer)]))
else:
copied_layer = layer._copy(seen)
seen[id(layer)] = copied_layer
copied_layers.append(copied_layer)
copied_shims = []
for shim in self.shims:
if id(shim) in seen:
copied_shims.append(cast(Shim, seen[id(shim)]))
else:
copied_shim = shim.copy()
seen[id(shim)] = copied_shim
copied_shims.append(copied_shim)
copied: Model[InT, OutT] = Model(
self.name,
self._func,
init=self.init,
params=copy.deepcopy(params),
dims=copy.deepcopy(self._dims),
attrs=copy.deepcopy(self._attrs),
layers=copied_layers,
shims=copied_shims,
)
for name in self.grad_names:
copied.set_grad(name, self.get_grad(name).copy())
return cast(SelfT, copied)
def to_gpu(self, gpu_id: int) -> None: # pragma: no cover
"""Transfer the model to a given GPU device."""
import cupy.cuda.device
with cupy.cuda.device.Device(gpu_id):
self._to_ops(CupyOps())
def to_cpu(self) -> None: # pragma: no cover
"""Transfer the model to CPU."""
self._to_ops(NumpyOps())
def _to_ops(self, ops: Ops) -> None: # pragma: no cover
"""Common method for to_cpu/to_gpu."""
for node in self.walk():
node.ops = ops
for name in node.param_names:
if node.has_param(name):
node.set_param(name, ops.asarray_f(node.get_param(name)))
if node.has_grad(name):
node.set_grad(name, ops.asarray_f(node.get_grad(name)))
for shim in node.shims:
shim.to_device(ops.device_type, ops.device_id)
def to_bytes(self) -> bytes:
"""Serialize the model to a bytes representation. Models are usually
serialized using msgpack, so you should be able to call msgpack.loads()
on the data and get back a dictionary with the contents.
Serialization should round-trip identically, i.e. the same bytes should
result from loading and serializing a model.
"""
msg = self.to_dict()
to_numpy_le = partial(self.ops.to_numpy, byte_order="<")
msg = convert_recursive(is_xp_array, to_numpy_le, msg)
return srsly.msgpack_dumps(msg)
def to_disk(self, path: Union[Path, str]) -> None:
"""Serialize the model to disk. Most models will serialize to a single
file, which should just be the bytes contents of model.to_bytes().
"""
path = Path(path) if isinstance(path, str) else path
with path.open("wb") as file_:
file_.write(self.to_bytes())
def to_dict(self) -> Dict:
"""Serialize the model to a dict representation.
Serialization should round-trip identically, i.e. the same dict should
result from loading and serializing a model.
"""
# We separate out like this to make it easier to read the data in chunks.
# The shims might have large weights, while the nodes data will be
# small. The attrs are probably not very large, but could be.
# The lists are aligned, and refer to the order of self.walk().
msg: Dict[str, List] = {"nodes": [], "attrs": [], "params": [], "shims": []}
nodes = list(self.walk())
# Serialize references by their index into the flattened tree.
# This is the main reason we can't accept out-of-tree references:
# we'd have no way to serialize/deserialize them.
node_to_i: Dict[int, Optional[int]]
node_to_i = {node.id: i for i, node in enumerate(nodes)}
for i, node in enumerate(nodes):
refs: Dict[str, Optional[int]] = {}
invalid_refs: List[str] = []
for name in node.ref_names:
if not node.has_ref(name):
refs[name] = None
else:
ref = node.get_ref(name)
if ref.id in node_to_i:
refs[name] = node_to_i[ref.id]
else:
invalid_refs.append(name)
if invalid_refs:
raise ValueError(f"Cannot get references: {invalid_refs}")
dims = {}
for dim in node.dim_names:
dims[dim] = node.get_dim(dim) if node.has_dim(dim) else None
msg["nodes"].append(
{"index": i, "name": node.name, "dims": dims, "refs": refs}
)
for node in nodes:
attrs = {}
for name, value in node.attrs.items():
try:
attrs[name] = serialize_attr(value, value, name, node)
except TypeError:
continue
msg["attrs"].append(attrs)
for node in nodes:
msg["shims"].append([shim.to_bytes() for shim in node.shims])
for node in nodes:
params: Dict[str, Optional[FloatsXd]] = {}
for name in node.param_names:
if node.has_param(name):
params[name] = cast(Optional[FloatsXd], node.get_param(name))
else:
params[name] = None
msg["params"].append(params)
return msg
def from_bytes(self, bytes_data: bytes) -> "Model":
"""Deserialize the model from a bytes representation. Models are usually
serialized using msgpack, so you should be able to call msgpack.loads()
on the data and get back a dictionary with the contents.
Serialization should round-trip identically, i.e. the same bytes should
result from loading and serializing a model.
"""
msg = srsly.msgpack_loads(bytes_data)
msg = convert_recursive(is_xp_array, self.ops.asarray, msg)
return self.from_dict(msg)
def from_disk(self, path: Union[Path, str]) -> "Model":
"""Deserialize the model from disk. Most models will serialize to a single
file, which should just be the bytes contents of model.to_bytes().
"""
path = Path(path) if isinstance(path, str) else path
with path.open("rb") as file_:
bytes_data = file_.read()
return self.from_bytes(bytes_data)
def from_dict(self, msg: Dict) -> "Model":
if "nodes" not in msg.keys(): # pragma: no cover
err = "Trying to read a Model that was created with an incompatible version of Thinc"
raise ValueError(err)
nodes = list(self.walk())
if len(msg["nodes"]) != len(nodes):
raise ValueError("Cannot deserialize model: mismatched structure")
for i, node in enumerate(nodes):
info = msg["nodes"][i]
node.name = info["name"]
for dim, value in info["dims"].items():
if value is not None:
node.set_dim(dim, value)
for ref, ref_index in info["refs"].items():
if ref_index is None:
node.set_ref(ref, None)
else:
node.set_ref(ref, nodes[ref_index])
for attr, value in msg["attrs"][i].items():
default_value = node.attrs.get(attr)
loaded_value = deserialize_attr(default_value, value, attr, node)
node.attrs[attr] = loaded_value
for param_name, value in msg["params"][i].items():
if value is not None:
value = node.ops.asarray(value).copy()
node.set_param(param_name, value)
for i, shim_bytes in enumerate(msg["shims"][i]):
node.shims[i].from_bytes(shim_bytes)
return self
def can_from_disk(self, path: Union[Path, str], *, strict: bool = True) -> bool:
"""Check whether serialized data on disk is compatible with the model.
If 'strict', the function returns False if the model has an attribute
already loaded that would be changed.
"""
path = Path(path) if isinstance(path, str) else path
if path.is_dir() or not path.exists():
return False
with path.open("rb") as file_:
bytes_data = file_.read()
return self.can_from_bytes(bytes_data, strict=strict)
def can_from_bytes(self, bytes_data: bytes, *, strict: bool = True) -> bool:
"""Check whether the bytes data is compatible with the model. If 'strict',
the function returns False if the model has an attribute already loaded
that would be changed.
"""
try:
msg = srsly.msgpack_loads(bytes_data)
except ValueError:
return False
return self.can_from_dict(msg, strict=strict)
def can_from_dict(self, msg: Dict, *, strict: bool = True) -> bool:
"""Check whether a dictionary is compatible with the model.
If 'strict', the function returns False if the model has an attribute
already loaded that would be changed.
"""
if "nodes" not in msg.keys():
return False
nodes = list(self.walk())
if len(msg["nodes"]) != len(nodes):
return False
for i, node in enumerate(nodes):
info = msg["nodes"][i]
if strict and info["name"] != node.name:
return False
if len(msg["shims"][i]) != len(node.shims):
# TODO: The shims should have a check for this too, but
# for now we just check if the lengths match.
return False
for dim, value in info["dims"].items():
has_dim = node.has_dim(dim)
if has_dim is False:
return False
elif has_dim and node.get_dim(dim) != value:
return False
for param_name, value in msg["params"][i].items():
has_param = node.has_param(param_name)
if has_param is False:
return False
elif has_param and value is not None:
param = node.get_param(param_name)
if param.shape != value.shape:
return False
if strict:
for attr, value in msg["attrs"][i].items():
if attr in node.attrs:
try:
serialized = serialize_attr(
node.attrs[attr], node.attrs[attr], attr, node
)
except TypeError:
continue
if serialized != value:
return False
return True
def __add__(self, other: Any) -> "Model":
"""Apply the function bound to the '+' operator."""
if "+" not in self._context_operators.get():
raise TypeError("Undefined operator: +")
return self._context_operators.get()["+"](self, other)
def __sub__(self, other: Any) -> "Model":
"""Apply the function bound to the '-' operator."""
if "-" not in self._context_operators.get():
raise TypeError("Undefined operator: -")
return self._context_operators.get()["-"](self, other)
def __mul__(self, other: Any) -> "Model":
"""Apply the function bound to the '*' operator."""
if "*" not in self._context_operators.get():
raise TypeError("Undefined operator: *")
return self._context_operators.get()["*"](self, other)
def __matmul__(self, other: Any) -> "Model":
"""Apply the function bound to the '@' operator."""
if "@" not in self._context_operators.get():
raise TypeError("Undefined operator: @")
return self._context_operators.get()["@"](self, other)
def __div__(self, other: Any) -> "Model": # pragma: no cover
"""Apply the function bound to the '/' operator."""
if "/" not in self._context_operators.get():
raise TypeError("Undefined operator: /")
return self._context_operators.get()["/"](self, other)
def __truediv__(self, other: Any) -> "Model":
"""Apply the function bound to the '/' operator."""
if "/" not in self._context_operators.get():
raise TypeError("Undefined operator: /")
return self._context_operators.get()["/"](self, other)
def __floordiv__(self, other: Any) -> "Model":
"""Apply the function bound to the '//' operator."""
if "//" not in self._context_operators.get():
raise TypeError("Undefined operator: //")
return self._context_operators.get()["//"](self, other)
def __mod__(self, other: Any) -> "Model":
"""Apply the function bound to the '%' operator."""
if "%" not in self._context_operators.get():
raise TypeError("Undefined operator: %")
return self._context_operators.get()["%"](self, other)
def __pow__(self, other: Any, **kwargs) -> "Model":
"""Apply the function bound to the '**' operator."""
if "**" not in self._context_operators.get():
raise TypeError("Undefined operator: **")
return self._context_operators.get()["**"](self, other)
def __lshift__(self, other: Any) -> "Model":
"""Apply the function bound to the '<<' operator."""
if "<<" not in self._context_operators.get():
raise TypeError("Undefined operator: <<")
return self._context_operators.get()["<<"](self, other)
def __rshift__(self, other: Any) -> "Model":
"""Apply the function bound to the '>>' operator."""
if ">>" not in self._context_operators.get():
raise TypeError("Undefined operator: >>")
return self._context_operators.get()[">>"](self, other)
def __and__(self, other: Any) -> "Model":
"""Apply the function bound to the '&' operator."""
if "&" not in self._context_operators.get():
raise TypeError("Undefined operator: &")
return self._context_operators.get()["&"](self, other)
def __xor__(self, other: Any) -> "Model":
"""Apply the function bound to the '^' operator."""
if "^" not in self._context_operators.get():
raise TypeError("Undefined operator: ^")
return self._context_operators.get()["^"](self, other)
def __or__(self, other: Any) -> "Model":
"""Apply the function bound to the '|' operator."""
if "|" not in self._context_operators.get():
raise TypeError("Undefined operator: |")
return self._context_operators.get()["|"](self, other)
@functools.singledispatch
def serialize_attr(_: Any, value: Any, name: str, model: Model) -> bytes:
"""Serialize an attribute value (defaults to msgpack). You can register
custom serializers using the @serialize_attr.register decorator with the
type to serialize, e.g.: @serialize_attr.register(MyCustomObject).
"""
return srsly.msgpack_dumps(value)
@functools.singledispatch
def deserialize_attr(_: Any, value: Any, name: str, model: Model) -> Any:
"""Deserialize an attribute value (defaults to msgpack). You can register
custom deserializers using the @deserialize_attr.register decorator with the
type to deserialize, e.g.: @deserialize_attr.register(MyCustomObject).
"""
return srsly.msgpack_loads(value)
_ModelT = TypeVar("_ModelT", bound=Model)
def change_attr_values(model: _ModelT, mapping: Dict[str, Dict[str, Any]]) -> _ModelT:
"""Walk over the model's nodes, changing the value of attributes using the
provided mapping, which maps node names to attr names to attr values.
"""
for node in model.walk():
if node.name in mapping:
attrs = mapping[node.name]
for attr, value in attrs.items():
if attr in node.attrs:
node.attrs[attr] = value
return model
def set_dropout_rate(model: _ModelT, drop: float, attrs=["dropout_rate"]) -> _ModelT:
"""Walk over the model's nodes, setting the dropout rate. You can specify
one or more attribute names, by default it looks for ["dropout_rate"].
"""
for node in model.walk():
for attr in attrs:
if attr in node.attrs:
node.attrs[attr] = drop
return model
def wrap_model_recursive(model: Model, wrapper: Callable[[Model], _ModelT]) -> _ModelT:
"""Recursively wrap a model and its submodules. The model is updated
in-place."""
for node in list(model.walk()):
model.replace_node(node, wrapper(node))
return wrapper(model)
__all__ = [
"Model",
"serialize_attr",
"deserialize_attr",
"change_attr_values",
"set_dropout_rate",
"wrap_model_recursive",
]
| mit | f802804091463a43c652283f17c9dc07 | 38.803653 | 109 | 0.567397 | 4.110338 | false | false | false | false |
rapptz/discord.py | discord/components.py | 1 | 16416 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import ClassVar, List, Literal, Optional, TYPE_CHECKING, Tuple, Union, overload
from .enums import try_enum, ComponentType, ButtonStyle, TextStyle
from .utils import get_slots, MISSING
from .partial_emoji import PartialEmoji, _EmojiTag
if TYPE_CHECKING:
from typing_extensions import Self
from .types.components import (
Component as ComponentPayload,
ButtonComponent as ButtonComponentPayload,
SelectMenu as SelectMenuPayload,
SelectOption as SelectOptionPayload,
ActionRow as ActionRowPayload,
TextInput as TextInputPayload,
ActionRowChildComponent as ActionRowChildComponentPayload,
)
from .emoji import Emoji
ActionRowChildComponentType = Union['Button', 'SelectMenu', 'TextInput']
__all__ = (
'Component',
'ActionRow',
'Button',
'SelectMenu',
'SelectOption',
'TextInput',
)
class Component:
"""Represents a Discord Bot UI Kit Component.
Currently, the only components supported by Discord are:
- :class:`ActionRow`
- :class:`Button`
- :class:`SelectMenu`
- :class:`TextInput`
This class is abstract and cannot be instantiated.
.. versionadded:: 2.0
"""
__slots__: Tuple[str, ...] = ()
__repr_info__: ClassVar[Tuple[str, ...]]
def __repr__(self) -> str:
attrs = ' '.join(f'{key}={getattr(self, key)!r}' for key in self.__repr_info__)
return f'<{self.__class__.__name__} {attrs}>'
@property
def type(self) -> ComponentType:
""":class:`ComponentType`: The type of component."""
raise NotImplementedError
@classmethod
def _raw_construct(cls, **kwargs) -> Self:
self = cls.__new__(cls)
for slot in get_slots(cls):
try:
value = kwargs[slot]
except KeyError:
pass
else:
setattr(self, slot, value)
return self
def to_dict(self) -> ComponentPayload:
raise NotImplementedError
class ActionRow(Component):
"""Represents a Discord Bot UI Kit Action Row.
This is a component that holds up to 5 children components in a row.
This inherits from :class:`Component`.
.. versionadded:: 2.0
Attributes
------------
children: List[Union[:class:`Button`, :class:`SelectMenu`, :class:`TextInput`]]
The children components that this holds, if any.
"""
__slots__: Tuple[str, ...] = ('children',)
__repr_info__: ClassVar[Tuple[str, ...]] = __slots__
def __init__(self, data: ActionRowPayload, /) -> None:
self.children: List[ActionRowChildComponentType] = []
for component_data in data.get('components', []):
component = _component_factory(component_data)
if component is not None:
self.children.append(component)
@property
def type(self) -> Literal[ComponentType.action_row]:
""":class:`ComponentType`: The type of component."""
return ComponentType.action_row
def to_dict(self) -> ActionRowPayload:
return {
'type': self.type.value,
'components': [child.to_dict() for child in self.children],
}
class Button(Component):
"""Represents a button from the Discord Bot UI Kit.
This inherits from :class:`Component`.
.. note::
The user constructible and usable type to create a button is :class:`discord.ui.Button`
not this one.
.. versionadded:: 2.0
Attributes
-----------
style: :class:`.ButtonStyle`
The style of the button.
custom_id: Optional[:class:`str`]
The ID of the button that gets received during an interaction.
If this button is for a URL, it does not have a custom ID.
url: Optional[:class:`str`]
The URL this button sends you to.
disabled: :class:`bool`
Whether the button is disabled or not.
label: Optional[:class:`str`]
The label of the button, if any.
emoji: Optional[:class:`PartialEmoji`]
The emoji of the button, if available.
"""
__slots__: Tuple[str, ...] = (
'style',
'custom_id',
'url',
'disabled',
'label',
'emoji',
)
__repr_info__: ClassVar[Tuple[str, ...]] = __slots__
def __init__(self, data: ButtonComponentPayload, /) -> None:
self.style: ButtonStyle = try_enum(ButtonStyle, data['style'])
self.custom_id: Optional[str] = data.get('custom_id')
self.url: Optional[str] = data.get('url')
self.disabled: bool = data.get('disabled', False)
self.label: Optional[str] = data.get('label')
self.emoji: Optional[PartialEmoji]
try:
self.emoji = PartialEmoji.from_dict(data['emoji'])
except KeyError:
self.emoji = None
@property
def type(self) -> Literal[ComponentType.button]:
""":class:`ComponentType`: The type of component."""
return ComponentType.button
def to_dict(self) -> ButtonComponentPayload:
payload: ButtonComponentPayload = {
'type': 2,
'style': self.style.value,
'disabled': self.disabled,
}
if self.label:
payload['label'] = self.label
if self.custom_id:
payload['custom_id'] = self.custom_id
if self.url:
payload['url'] = self.url
if self.emoji:
payload['emoji'] = self.emoji.to_dict()
return payload
class SelectMenu(Component):
"""Represents a select menu from the Discord Bot UI Kit.
A select menu is functionally the same as a dropdown, however
on mobile it renders a bit differently.
.. note::
The user constructible and usable type to create a select menu is
:class:`discord.ui.Select` not this one.
.. versionadded:: 2.0
Attributes
------------
custom_id: Optional[:class:`str`]
The ID of the select menu that gets received during an interaction.
placeholder: Optional[:class:`str`]
The placeholder text that is shown if nothing is selected, if any.
min_values: :class:`int`
The minimum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 0 and 25.
max_values: :class:`int`
The maximum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
options: List[:class:`SelectOption`]
A list of options that can be selected in this menu.
disabled: :class:`bool`
Whether the select is disabled or not.
"""
__slots__: Tuple[str, ...] = (
'custom_id',
'placeholder',
'min_values',
'max_values',
'options',
'disabled',
)
__repr_info__: ClassVar[Tuple[str, ...]] = __slots__
def __init__(self, data: SelectMenuPayload, /) -> None:
self.custom_id: str = data['custom_id']
self.placeholder: Optional[str] = data.get('placeholder')
self.min_values: int = data.get('min_values', 1)
self.max_values: int = data.get('max_values', 1)
self.options: List[SelectOption] = [SelectOption.from_dict(option) for option in data.get('options', [])]
self.disabled: bool = data.get('disabled', False)
@property
def type(self) -> Literal[ComponentType.select]:
""":class:`ComponentType`: The type of component."""
return ComponentType.select
def to_dict(self) -> SelectMenuPayload:
payload: SelectMenuPayload = {
'type': self.type.value,
'custom_id': self.custom_id,
'min_values': self.min_values,
'max_values': self.max_values,
'options': [op.to_dict() for op in self.options],
'disabled': self.disabled,
}
if self.placeholder:
payload['placeholder'] = self.placeholder
return payload
class SelectOption:
"""Represents a select menu's option.
These can be created by users.
.. versionadded:: 2.0
Parameters
-----------
label: :class:`str`
The label of the option. This is displayed to users.
Can only be up to 100 characters.
value: :class:`str`
The value of the option. This is not displayed to users.
If not provided when constructed then it defaults to the
label. Can only be up to 100 characters.
description: Optional[:class:`str`]
An additional description of the option, if any.
Can only be up to 100 characters.
emoji: Optional[Union[:class:`str`, :class:`Emoji`, :class:`PartialEmoji`]]
The emoji of the option, if available.
default: :class:`bool`
Whether this option is selected by default.
Attributes
-----------
label: :class:`str`
The label of the option. This is displayed to users.
Can only be up to 100 characters.
value: :class:`str`
The value of the option. This is not displayed to users.
If not provided when constructed then it defaults to the
label. Can only be up to 100 characters.
description: Optional[:class:`str`]
An additional description of the option, if any.
Can only be up to 100 characters.
default: :class:`bool`
Whether this option is selected by default.
"""
__slots__: Tuple[str, ...] = (
'label',
'value',
'description',
'_emoji',
'default',
)
def __init__(
self,
*,
label: str,
value: str = MISSING,
description: Optional[str] = None,
emoji: Optional[Union[str, Emoji, PartialEmoji]] = None,
default: bool = False,
) -> None:
self.label: str = label
self.value: str = label if value is MISSING else value
self.description: Optional[str] = description
self.emoji = emoji
self.default: bool = default
def __repr__(self) -> str:
return (
f'<SelectOption label={self.label!r} value={self.value!r} description={self.description!r} '
f'emoji={self.emoji!r} default={self.default!r}>'
)
def __str__(self) -> str:
if self.emoji:
base = f'{self.emoji} {self.label}'
else:
base = self.label
if self.description:
return f'{base}\n{self.description}'
return base
@property
def emoji(self) -> Optional[PartialEmoji]:
"""Optional[:class:`.PartialEmoji`]: The emoji of the option, if available."""
return self._emoji
@emoji.setter
def emoji(self, value: Optional[Union[str, Emoji, PartialEmoji]]) -> None:
if value is not None:
if isinstance(value, str):
self._emoji = PartialEmoji.from_str(value)
elif isinstance(value, _EmojiTag):
self._emoji = value._to_partial()
else:
raise TypeError(f'expected str, Emoji, or PartialEmoji, received {value.__class__} instead')
else:
self._emoji = None
@classmethod
def from_dict(cls, data: SelectOptionPayload) -> SelectOption:
try:
emoji = PartialEmoji.from_dict(data['emoji'])
except KeyError:
emoji = None
return cls(
label=data['label'],
value=data['value'],
description=data.get('description'),
emoji=emoji,
default=data.get('default', False),
)
def to_dict(self) -> SelectOptionPayload:
payload: SelectOptionPayload = {
'label': self.label,
'value': self.value,
'default': self.default,
}
if self.emoji:
payload['emoji'] = self.emoji.to_dict()
if self.description:
payload['description'] = self.description
return payload
class TextInput(Component):
"""Represents a text input from the Discord Bot UI Kit.
.. note::
The user constructible and usable type to create a text input is
:class:`discord.ui.TextInput` not this one.
.. versionadded:: 2.0
Attributes
------------
custom_id: Optional[:class:`str`]
The ID of the text input that gets received during an interaction.
label: :class:`str`
The label to display above the text input.
style: :class:`TextStyle`
The style of the text input.
placeholder: Optional[:class:`str`]
The placeholder text to display when the text input is empty.
value: Optional[:class:`str`]
The default value of the text input.
required: :class:`bool`
Whether the text input is required.
min_length: Optional[:class:`int`]
The minimum length of the text input.
max_length: Optional[:class:`int`]
The maximum length of the text input.
"""
__slots__: Tuple[str, ...] = (
'style',
'label',
'custom_id',
'placeholder',
'value',
'required',
'min_length',
'max_length',
)
__repr_info__: ClassVar[Tuple[str, ...]] = __slots__
def __init__(self, data: TextInputPayload, /) -> None:
self.style: TextStyle = try_enum(TextStyle, data['style'])
self.label: str = data['label']
self.custom_id: str = data['custom_id']
self.placeholder: Optional[str] = data.get('placeholder')
self.value: Optional[str] = data.get('value')
self.required: bool = data.get('required', True)
self.min_length: Optional[int] = data.get('min_length')
self.max_length: Optional[int] = data.get('max_length')
@property
def type(self) -> Literal[ComponentType.text_input]:
""":class:`ComponentType`: The type of component."""
return ComponentType.text_input
def to_dict(self) -> TextInputPayload:
payload: TextInputPayload = {
'type': self.type.value,
'style': self.style.value,
'label': self.label,
'custom_id': self.custom_id,
'required': self.required,
}
if self.placeholder:
payload['placeholder'] = self.placeholder
if self.value:
payload['value'] = self.value
if self.min_length:
payload['min_length'] = self.min_length
if self.max_length:
payload['max_length'] = self.max_length
return payload
@property
def default(self) -> Optional[str]:
"""Optional[:class:`str`]: The default value of the text input.
This is an alias to :attr:`value`.
"""
return self.value
@overload
def _component_factory(data: ActionRowChildComponentPayload) -> Optional[ActionRowChildComponentType]:
...
@overload
def _component_factory(data: ComponentPayload) -> Optional[Union[ActionRow, ActionRowChildComponentType]]:
...
def _component_factory(data: ComponentPayload) -> Optional[Union[ActionRow, ActionRowChildComponentType]]:
if data['type'] == 1:
return ActionRow(data)
elif data['type'] == 2:
return Button(data)
elif data['type'] == 3:
return SelectMenu(data)
elif data['type'] == 4:
return TextInput(data)
| mit | 516b60c1dcf58a0065d93f574e4219ec | 30.090909 | 113 | 0.605141 | 4.118414 | false | false | false | false |
rapptz/discord.py | discord/types/command.py | 2 | 6266 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Dict, List, Literal, Optional, TypedDict, Union
from typing_extensions import NotRequired, Required
from .channel import ChannelType
from .snowflake import Snowflake
ApplicationCommandType = Literal[1, 2, 3]
ApplicationCommandOptionType = Literal[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
class _BaseApplicationCommandOption(TypedDict):
name: str
description: str
name_localizations: NotRequired[Optional[Dict[str, str]]]
description_localizations: NotRequired[Optional[Dict[str, str]]]
class _SubCommandCommandOption(_BaseApplicationCommandOption):
type: Literal[1]
options: List[_ValueApplicationCommandOption]
class _SubCommandGroupCommandOption(_BaseApplicationCommandOption):
type: Literal[2]
options: List[_SubCommandCommandOption]
class _BaseValueApplicationCommandOption(_BaseApplicationCommandOption, total=False):
required: bool
class _StringApplicationCommandOptionChoice(TypedDict):
name: str
name_localizations: NotRequired[Optional[Dict[str, str]]]
value: str
class _StringApplicationCommandOption(_BaseApplicationCommandOption):
type: Literal[3]
choices: NotRequired[List[_StringApplicationCommandOptionChoice]]
min_length: NotRequired[int]
max_length: NotRequired[int]
autocomplete: NotRequired[bool]
class _IntegerApplicationCommandOptionChoice(TypedDict):
name: str
name_localizations: NotRequired[Optional[Dict[str, str]]]
value: int
class _IntegerApplicationCommandOption(_BaseApplicationCommandOption, total=False):
type: Required[Literal[4]]
min_value: int
max_value: int
choices: List[_IntegerApplicationCommandOptionChoice]
autocomplete: bool
class _BooleanApplicationCommandOption(_BaseValueApplicationCommandOption):
type: Literal[5]
class _ChannelApplicationCommandOptionChoice(_BaseApplicationCommandOption):
type: Literal[7]
channel_types: NotRequired[List[ChannelType]]
class _NonChannelSnowflakeApplicationCommandOptionChoice(_BaseValueApplicationCommandOption):
type: Literal[6, 8, 9, 11]
_SnowflakeApplicationCommandOptionChoice = Union[
_ChannelApplicationCommandOptionChoice,
_NonChannelSnowflakeApplicationCommandOptionChoice,
]
class _NumberApplicationCommandOptionChoice(TypedDict):
name: str
name_localizations: NotRequired[Optional[Dict[str, str]]]
value: float
class _NumberApplicationCommandOption(_BaseValueApplicationCommandOption, total=False):
type: Required[Literal[10]]
min_value: float
max_value: float
choices: List[_NumberApplicationCommandOptionChoice]
autocomplete: bool
_ValueApplicationCommandOption = Union[
_StringApplicationCommandOption,
_IntegerApplicationCommandOption,
_BooleanApplicationCommandOption,
_SnowflakeApplicationCommandOptionChoice,
_NumberApplicationCommandOption,
]
ApplicationCommandOption = Union[
_SubCommandGroupCommandOption,
_SubCommandCommandOption,
_ValueApplicationCommandOption,
]
ApplicationCommandOptionChoice = Union[
_StringApplicationCommandOptionChoice,
_IntegerApplicationCommandOptionChoice,
_NumberApplicationCommandOptionChoice,
]
class _BaseApplicationCommand(TypedDict):
id: Snowflake
application_id: Snowflake
name: str
dm_permission: NotRequired[Optional[bool]]
default_member_permissions: NotRequired[Optional[str]]
nsfw: NotRequired[bool]
version: Snowflake
name_localizations: NotRequired[Optional[Dict[str, str]]]
description_localizations: NotRequired[Optional[Dict[str, str]]]
class _ChatInputApplicationCommand(_BaseApplicationCommand, total=False):
description: Required[str]
type: Literal[1]
options: Union[
List[_ValueApplicationCommandOption],
List[Union[_SubCommandCommandOption, _SubCommandGroupCommandOption]],
]
class _BaseContextMenuApplicationCommand(_BaseApplicationCommand):
description: Literal[""]
class _UserApplicationCommand(_BaseContextMenuApplicationCommand):
type: Literal[2]
class _MessageApplicationCommand(_BaseContextMenuApplicationCommand):
type: Literal[3]
GlobalApplicationCommand = Union[
_ChatInputApplicationCommand,
_UserApplicationCommand,
_MessageApplicationCommand,
]
class _GuildChatInputApplicationCommand(_ChatInputApplicationCommand):
guild_id: Snowflake
class _GuildUserApplicationCommand(_UserApplicationCommand):
guild_id: Snowflake
class _GuildMessageApplicationCommand(_MessageApplicationCommand):
guild_id: Snowflake
GuildApplicationCommand = Union[
_GuildChatInputApplicationCommand,
_GuildUserApplicationCommand,
_GuildMessageApplicationCommand,
]
ApplicationCommand = Union[
GlobalApplicationCommand,
GuildApplicationCommand,
]
ApplicationCommandPermissionType = Literal[1, 2, 3]
class ApplicationCommandPermissions(TypedDict):
id: Snowflake
type: ApplicationCommandPermissionType
permission: bool
class GuildApplicationCommandPermissions(TypedDict):
id: Snowflake
application_id: Snowflake
guild_id: Snowflake
permissions: List[ApplicationCommandPermissions]
| mit | d2217f812cdd8a80008151d4096ff027 | 27.743119 | 93 | 0.783434 | 4.19411 | false | false | false | false |
alphagov/notifications-utils | notifications_utils/clients/redis/request_cache.py | 1 | 2989 | import json
from contextlib import suppress
from datetime import timedelta
from functools import wraps
from inspect import signature
class RequestCache:
DEFAULT_TTL = int(timedelta(days=7).total_seconds())
def __init__(self, redis_client):
self.redis_client = redis_client
@staticmethod
def _get_argument(argument_name, client_method, args, kwargs):
with suppress(KeyError):
return kwargs[argument_name]
with suppress(ValueError, IndexError):
argument_index = list(signature(client_method).parameters).index(argument_name)
return args[argument_index]
with suppress(KeyError):
return signature(client_method).parameters[argument_name].default
raise TypeError("{}() takes no argument called '{}'".format(client_method.__name__, argument_name))
@staticmethod
def _make_key(key_format, client_method, args, kwargs):
return key_format.format(
**{
argument_name: RequestCache._get_argument(argument_name, client_method, args, kwargs)
for argument_name in list(signature(client_method).parameters)
}
)
def set(self, key_format, *, ttl_in_seconds=DEFAULT_TTL):
def _set(client_method):
@wraps(client_method)
def new_client_method(*args, **kwargs):
redis_key = RequestCache._make_key(key_format, client_method, args, kwargs)
cached = self.redis_client.get(redis_key)
if cached:
return json.loads(cached.decode("utf-8"))
api_response = client_method(*args, **kwargs)
self.redis_client.set(
redis_key,
json.dumps(api_response),
ex=int(ttl_in_seconds),
)
return api_response
return new_client_method
return _set
def delete(self, key_format):
def _delete(client_method):
@wraps(client_method)
def new_client_method(*args, **kwargs):
try:
api_response = client_method(*args, **kwargs)
finally:
redis_key = self._make_key(key_format, client_method, args, kwargs)
self.redis_client.delete(redis_key)
return api_response
return new_client_method
return _delete
def delete_by_pattern(self, key_format):
def _delete(client_method):
@wraps(client_method)
def new_client_method(*args, **kwargs):
try:
api_response = client_method(*args, **kwargs)
finally:
redis_key = self._make_key(key_format, client_method, args, kwargs)
self.redis_client.delete_by_pattern(redis_key)
return api_response
return new_client_method
return _delete
| mit | 8e8d42a95f1c8a7b918554f55aee7535 | 33.356322 | 107 | 0.567748 | 4.487988 | false | false | false | false |
alphagov/notifications-utils | tests/clients/zendesk/test_zendesk_client.py | 1 | 7033 | from base64 import b64decode
import pytest
from notifications_utils.clients.zendesk.zendesk_client import (
NotifySupportTicket,
ZendeskClient,
ZendeskError,
)
@pytest.fixture(scope="function")
def zendesk_client(app):
client = ZendeskClient()
app.config["ZENDESK_API_KEY"] = "testkey"
client.init_app(app)
return client
def test_zendesk_client_send_ticket_to_zendesk(zendesk_client, app, mocker, rmock):
rmock.request(
"POST",
ZendeskClient.ZENDESK_TICKET_URL,
status_code=201,
json={
"ticket": {
"id": 12345,
"subject": "Something is wrong",
}
},
)
mock_logger = mocker.patch.object(app.logger, "info")
ticket = NotifySupportTicket("subject", "message", "incident")
zendesk_client.send_ticket_to_zendesk(ticket)
assert rmock.last_request.headers["Authorization"][:6] == "Basic "
b64_auth = rmock.last_request.headers["Authorization"][6:]
assert b64decode(b64_auth.encode()).decode() == "zd-api-notify@digital.cabinet-office.gov.uk/token:testkey"
assert rmock.last_request.json() == ticket.request_data
mock_logger.assert_called_once_with("Zendesk create ticket 12345 succeeded")
def test_zendesk_client_send_ticket_to_zendesk_error(zendesk_client, app, mocker, rmock):
rmock.request("POST", ZendeskClient.ZENDESK_TICKET_URL, status_code=401, json={"foo": "bar"})
mock_logger = mocker.patch.object(app.logger, "error")
ticket = NotifySupportTicket("subject", "message", "incident")
with pytest.raises(ZendeskError):
zendesk_client.send_ticket_to_zendesk(ticket)
mock_logger.assert_called_with("Zendesk create ticket request failed with 401 '{'foo': 'bar'}'")
@pytest.mark.parametrize(
"p1_arg, expected_tags, expected_priority",
(
(
{},
["govuk_notify_support"],
"normal",
),
(
{
"p1": False,
},
["govuk_notify_support"],
"normal",
),
(
{
"p1": True,
},
["govuk_notify_emergency"],
"urgent",
),
),
)
def test_notify_support_ticket_request_data(p1_arg, expected_tags, expected_priority):
notify_ticket_form = NotifySupportTicket("subject", "message", "question", **p1_arg)
assert notify_ticket_form.request_data == {
"ticket": {
"subject": "subject",
"comment": {
"body": "message",
"public": True,
},
"group_id": NotifySupportTicket.NOTIFY_GROUP_ID,
"organization_id": NotifySupportTicket.NOTIFY_ORG_ID,
"ticket_form_id": NotifySupportTicket.NOTIFY_TICKET_FORM_ID,
"priority": expected_priority,
"tags": expected_tags,
"type": "question",
"custom_fields": [
{"id": "1900000744994", "value": "notify_ticket_type_non_technical"},
{"id": "360022836500", "value": []},
{"id": "360022943959", "value": None},
{"id": "360022943979", "value": None},
{"id": "1900000745014", "value": None},
],
}
}
def test_notify_support_ticket_request_data_with_message_hidden_from_requester():
notify_ticket_form = NotifySupportTicket("subject", "message", "problem", requester_sees_message_content=False)
assert notify_ticket_form.request_data["ticket"]["comment"]["public"] is False
@pytest.mark.parametrize("name, zendesk_name", [("Name", "Name"), (None, "(no name supplied)")])
def test_notify_support_ticket_request_data_with_user_name_and_email(name, zendesk_name):
notify_ticket_form = NotifySupportTicket(
"subject", "message", "question", user_name=name, user_email="user@example.com"
)
assert notify_ticket_form.request_data["ticket"]["requester"]["email"] == "user@example.com"
assert notify_ticket_form.request_data["ticket"]["requester"]["name"] == zendesk_name
@pytest.mark.parametrize(
"custom_fields, tech_ticket_tag, categories, org_id, org_type, service_id",
[
({"technical_ticket": True}, "notify_ticket_type_technical", [], None, None, None),
({"technical_ticket": False}, "notify_ticket_type_non_technical", [], None, None, None),
(
{"ticket_categories": ["notify_billing", "notify_bug"]},
"notify_ticket_type_non_technical",
["notify_billing", "notify_bug"],
None,
None,
None,
),
(
{"org_id": "1234", "org_type": "local"},
"notify_ticket_type_non_technical",
[],
"1234",
"notify_org_type_local",
None,
),
(
{"service_id": "abcd", "org_type": "nhs"},
"notify_ticket_type_non_technical",
[],
None,
"notify_org_type_nhs",
"abcd",
),
],
)
def test_notify_support_ticket_request_data_custom_fields(
custom_fields,
tech_ticket_tag,
categories,
org_id,
org_type,
service_id,
):
notify_ticket_form = NotifySupportTicket("subject", "message", "question", **custom_fields)
assert notify_ticket_form.request_data["ticket"]["custom_fields"] == [
{"id": "1900000744994", "value": tech_ticket_tag},
{"id": "360022836500", "value": categories},
{"id": "360022943959", "value": org_id},
{"id": "360022943979", "value": org_type},
{"id": "1900000745014", "value": service_id},
]
def test_notify_support_ticket_request_data_email_ccs():
notify_ticket_form = NotifySupportTicket("subject", "message", "question", email_ccs=["someone@example.com"])
assert notify_ticket_form.request_data["ticket"]["email_ccs"] == [
{"user_email": "someone@example.com", "action": "put"},
]
def test_notify_support_ticket_with_html_body():
notify_ticket_form = NotifySupportTicket("subject", "message", "task", message_as_html=True)
assert notify_ticket_form.request_data == {
"ticket": {
"subject": "subject",
"comment": {
"html_body": "message",
"public": True,
},
"group_id": NotifySupportTicket.NOTIFY_GROUP_ID,
"organization_id": NotifySupportTicket.NOTIFY_ORG_ID,
"ticket_form_id": NotifySupportTicket.NOTIFY_TICKET_FORM_ID,
"priority": "normal",
"tags": ["govuk_notify_support"],
"type": "task",
"custom_fields": [
{"id": "1900000744994", "value": "notify_ticket_type_non_technical"},
{"id": "360022836500", "value": []},
{"id": "360022943959", "value": None},
{"id": "360022943979", "value": None},
{"id": "1900000745014", "value": None},
],
}
}
| mit | ac073a2507e0db0ed76ebbbdbb5ce12d | 32.650718 | 115 | 0.56633 | 3.612224 | false | true | false | false |
alphagov/notifications-utils | notifications_utils/countries/data.py | 1 | 2192 | import json
import os
def _load_data(filename):
with open(os.path.join(os.path.dirname(__file__), "_data", filename)) as contents:
if filename.endswith(".json"):
return json.load(contents)
return [line.strip() for line in contents.readlines()]
def find_canonical(item, graph, key):
if item["meta"]["canonical"]:
return key, item["names"]["en-GB"]
return find_canonical(
graph[item["edges"]["from"][0]],
graph,
key,
)
# Copied from
# https://github.com/alphagov/govuk-country-and-territory-autocomplete
# /blob/b61091a502983fd2a77b3cdb5f94a604412eb093
# /dist/location-autocomplete-graph.json
_graph = _load_data("location-autocomplete-graph.json")
UK = "United Kingdom"
ENDED_COUNTRIES = _load_data("ended-countries.json")
ADDITIONAL_SYNONYMS = list(_load_data("synonyms.json").items())
WELSH_NAMES = list(_load_data("welsh-names.json").items())
_UK_ISLANDS_LIST = _load_data("uk-islands.txt")
_EUROPEAN_ISLANDS_LIST = _load_data("european-islands.txt")
CURRENT_AND_ENDED_COUNTRIES_AND_TERRITORIES = [
find_canonical(item, _graph, item["names"]["en-GB"]) for item in _graph.values()
]
COUNTRIES_AND_TERRITORIES = []
for synonym, canonical in CURRENT_AND_ENDED_COUNTRIES_AND_TERRITORIES:
if canonical in _UK_ISLANDS_LIST:
COUNTRIES_AND_TERRITORIES.append((synonym, UK))
elif canonical in ENDED_COUNTRIES:
succeeding_country = ENDED_COUNTRIES[canonical]
if succeeding_country:
COUNTRIES_AND_TERRITORIES.append((synonym, succeeding_country))
COUNTRIES_AND_TERRITORIES.append((canonical, succeeding_country))
else:
COUNTRIES_AND_TERRITORIES.append((synonym, canonical))
UK_ISLANDS = [(synonym, UK) for synonym in _UK_ISLANDS_LIST]
EUROPEAN_ISLANDS = [(synonym, synonym) for synonym in _EUROPEAN_ISLANDS_LIST]
# Copied from https://www.royalmail.com/international-zones#europe
# Modified to use the canonical names for countries where incorrect
ROYAL_MAIL_EUROPEAN = _load_data("europe.txt")
class Postage:
UK = "united-kingdom"
FIRST = "first"
SECOND = "second"
EUROPE = "europe"
REST_OF_WORLD = "rest-of-world"
| mit | 18124577d3997b78b51d0ebff0cd8582 | 31.716418 | 86 | 0.691606 | 2.97019 | false | false | false | false |
alphagov/notifications-utils | tests/clients/antivirus/test_antivirus_client.py | 1 | 1579 | import io
import pytest
import requests
from notifications_utils.clients.antivirus.antivirus_client import (
AntivirusClient,
AntivirusError,
)
@pytest.fixture(scope="function")
def antivirus(app, mocker):
client = AntivirusClient()
app.config["ANTIVIRUS_API_HOST"] = "https://antivirus"
app.config["ANTIVIRUS_API_KEY"] = "test-antivirus-key"
client.init_app(app)
return client
def test_scan_document(antivirus, rmock):
document = io.BytesIO(b"filecontents")
rmock.request(
"POST",
"https://antivirus/scan",
json={"ok": True},
request_headers={
"Authorization": "Bearer test-antivirus-key",
},
status_code=200,
)
resp = antivirus.scan(document)
assert resp
assert "filecontents" in rmock.last_request.text
assert document.tell() == 0
def test_should_raise_for_status(antivirus, rmock):
with pytest.raises(AntivirusError) as excinfo:
rmock.request("POST", "https://antivirus/scan", json={"error": "Antivirus error"}, status_code=400)
antivirus.scan(io.BytesIO(b"document"))
assert excinfo.value.message == "Antivirus error"
assert excinfo.value.status_code == 400
def test_should_raise_for_connection_errors(antivirus, rmock):
with pytest.raises(AntivirusError) as excinfo:
rmock.request("POST", "https://antivirus/scan", exc=requests.exceptions.ConnectTimeout)
antivirus.scan(io.BytesIO(b"document"))
assert excinfo.value.message == "connection error"
assert excinfo.value.status_code == 503
| mit | f94a05d64d9a748bb3e14ae3ecdfe652 | 27.196429 | 107 | 0.678911 | 3.403017 | false | true | false | false |
alphagov/notifications-utils | notifications_utils/celery.py | 1 | 3295 | import time
from contextlib import contextmanager
from celery import Celery, Task
from flask import g, request
from flask.ctx import has_app_context, has_request_context
def make_task(app):
class NotifyTask(Task):
abstract = True
start = None
@property
def queue_name(self):
delivery_info = self.request.delivery_info or {}
return delivery_info.get("routing_key", "none")
@property
def request_id(self):
# Note that each header is a direct attribute of the
# task context (aka "request").
return self.request.get("notify_request_id")
@contextmanager
def app_context(self):
with app.app_context():
# Add 'request_id' to 'g' so that it gets logged.
g.request_id = self.request_id
yield
def on_success(self, retval, task_id, args, kwargs):
# enables request id tracing for these logs
with self.app_context():
elapsed_time = time.monotonic() - self.start
app.logger.info(
"Celery task {task_name} (queue: {queue_name}) took {time}".format(
task_name=self.name, queue_name=self.queue_name, time="{0:.4f}".format(elapsed_time)
)
)
app.statsd_client.timing(
"celery.{queue_name}.{task_name}.success".format(task_name=self.name, queue_name=self.queue_name),
elapsed_time,
)
def on_failure(self, exc, task_id, args, kwargs, einfo):
# enables request id tracing for these logs
with self.app_context():
app.logger.exception(
"Celery task {task_name} (queue: {queue_name}) failed".format(
task_name=self.name,
queue_name=self.queue_name,
)
)
app.statsd_client.incr(
"celery.{queue_name}.{task_name}.failure".format(task_name=self.name, queue_name=self.queue_name)
)
def __call__(self, *args, **kwargs):
# ensure task has flask context to access config, logger, etc
with self.app_context():
self.start = time.monotonic()
return super().__call__(*args, **kwargs)
return NotifyTask
class NotifyCelery(Celery):
def init_app(self, app):
super().__init__(
task_cls=make_task(app),
)
# Make sure this is present upfront to avoid errors later on.
assert app.statsd_client
# Configure Celery app with options from the main app config.
self.conf.update(app.config["CELERY"])
def send_task(self, name, args=None, kwargs=None, **other_kwargs):
other_kwargs["headers"] = other_kwargs.get("headers") or {}
if has_request_context() and hasattr(request, "request_id"):
other_kwargs["headers"]["notify_request_id"] = request.request_id
elif has_app_context() and "request_id" in g:
other_kwargs["headers"]["notify_request_id"] = g.request_id
return super().send_task(name, args, kwargs, **other_kwargs)
| mit | 08b2877d50173c021f4e14f0993b0538 | 34.815217 | 118 | 0.553566 | 4.186785 | false | false | false | false |
influxdata/influxdb-python | examples/tutorial_sine_wave.py | 2 | 1991 | # -*- coding: utf-8 -*-
"""Tutorial using all elements to define a sine wave."""
import argparse
import math
import datetime
import time
from influxdb import InfluxDBClient
USER = 'root'
PASSWORD = 'root'
DBNAME = 'tutorial'
def main(host='localhost', port=8086):
"""Define function to generate the sin wave."""
now = datetime.datetime.today()
points = []
for angle in range(0, 360):
y = 10 + math.sin(math.radians(angle)) * 10
point = {
"measurement": 'foobar',
"time": int(now.strftime('%s')) + angle,
"fields": {
"value": y
}
}
points.append(point)
client = InfluxDBClient(host, port, USER, PASSWORD, DBNAME)
print("Create database: " + DBNAME)
client.create_database(DBNAME)
client.switch_database(DBNAME)
# Write points
client.write_points(points)
time.sleep(3)
query = 'SELECT * FROM foobar'
print("Querying data: " + query)
result = client.query(query, database=DBNAME)
print("Result: {0}".format(result))
"""
You might want to comment the delete and plot the result on InfluxDB
Interface. Connect on InfluxDB Interface at http://127.0.0.1:8083/
Select the database tutorial -> Explore Data
Then run the following query:
SELECT * from foobar
"""
print("Delete database: " + DBNAME)
client.drop_database(DBNAME)
def parse_args():
"""Parse the args."""
parser = argparse.ArgumentParser(
description='example code to play with InfluxDB')
parser.add_argument('--host', type=str, required=False,
default='localhost',
help='hostname influxdb http API')
parser.add_argument('--port', type=int, required=False, default=8086,
help='port influxdb http API')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(host=args.host, port=args.port)
| mit | 1e51b08efed9a7ed63fa9fc310c58448 | 24.525641 | 73 | 0.604721 | 3.851064 | false | false | false | false |
alphagov/notifications-utils | tests/test_countries_iso.py | 1 | 20457 | import pytest
from notifications_utils.countries import Country, CountryNotFoundError
def _country_not_found(*test_case):
return pytest.param(
*test_case,
marks=pytest.mark.xfail(raises=CountryNotFoundError),
)
@pytest.mark.parametrize(
"alpha_2, expected_name",
(
("AF", "Afghanistan"),
("AL", "Albania"),
("DZ", "Algeria"),
("AS", "American Samoa"),
("AD", "Andorra"),
("AO", "Angola"),
("AI", "Anguilla"),
("AQ", "Antarctica"),
("AG", "Antigua and Barbuda"),
("AR", "Argentina"),
("AM", "Armenia"),
("AW", "Aruba"),
("AU", "Australia"),
("AT", "Austria"),
("AZ", "Azerbaijan"),
("BS", "The Bahamas"),
("BH", "Bahrain"),
("BD", "Bangladesh"),
("BB", "Barbados"),
("BY", "Belarus"),
("BE", "Belgium"),
("BZ", "Belize"),
("BJ", "Benin"),
("BM", "Bermuda"),
("BT", "Bhutan"),
("BO", "Bolivia"),
_country_not_found("BQ", "Bonaire, Sint Eustatius and Saba"),
("BA", "Bosnia and Herzegovina"),
("BW", "Botswana"),
("BV", "Bouvet Island"),
("BR", "Brazil"),
("IO", "British Indian Ocean Territory"),
("BN", "Brunei"),
("BG", "Bulgaria"),
("BF", "Burkina Faso"),
("BI", "Burundi"),
("CV", "Cape Verde"),
("KH", "Cambodia"),
("CM", "Cameroon"),
("CA", "Canada"),
("KY", "Cayman Islands"),
("CF", "Central African Republic"),
("TD", "Chad"),
("CL", "Chile"),
("CN", "China"),
("CX", "Christmas Island"),
("CC", "Cocos (Keeling) Islands"),
("CO", "Colombia"),
("KM", "Comoros"),
("CD", "Congo (Democratic Republic)"),
("CG", "Congo"),
("CK", "Cook Islands"),
("CR", "Costa Rica"),
("HR", "Croatia"),
("CU", "Cuba"),
("CW", "Curaçao"),
("CY", "Cyprus"),
("CZ", "Czechia"),
("CI", "Ivory Coast"),
("DK", "Denmark"),
("DJ", "Djibouti"),
("DM", "Dominica"),
("DO", "Dominican Republic"),
("EC", "Ecuador"),
("EG", "Egypt"),
("SV", "El Salvador"),
("GQ", "Equatorial Guinea"),
("ER", "Eritrea"),
("EE", "Estonia"),
("SZ", "Eswatini"),
("ET", "Ethiopia"),
("FK", "Falkland Islands"),
("FO", "Faroe Islands"),
("FJ", "Fiji"),
("FI", "Finland"),
("FR", "France"),
("GF", "French Guiana"),
("PF", "French Polynesia"),
("TF", "French Southern Territories"),
("GA", "Gabon"),
("GM", "The Gambia"),
("GE", "Georgia"),
("DE", "Germany"),
("GH", "Ghana"),
("GI", "Gibraltar"),
("GR", "Greece"),
("GL", "Greenland"),
("GD", "Grenada"),
("GP", "Guadeloupe"),
("GU", "Guam"),
("GT", "Guatemala"),
("GG", "United Kingdom"),
("GN", "Guinea"),
("GW", "Guinea-Bissau"),
("GY", "Guyana"),
("HT", "Haiti"),
("HM", "Heard Island and McDonald Islands"),
("VA", "Vatican City"),
("HN", "Honduras"),
("HK", "Hong Kong"),
("HU", "Hungary"),
("IS", "Iceland"),
("IN", "India"),
("ID", "Indonesia"),
("IR", "Iran"),
("IQ", "Iraq"),
("IE", "Ireland"),
("IM", "United Kingdom"),
("IL", "Israel"),
("IT", "Italy"),
("JM", "Jamaica"),
("JP", "Japan"),
("JE", "United Kingdom"),
("JO", "Jordan"),
("KZ", "Kazakhstan"),
("KE", "Kenya"),
("KI", "Kiribati"),
("KP", "North Korea"),
("KR", "South Korea"),
("KW", "Kuwait"),
("KG", "Kyrgyzstan"),
("LA", "Laos"),
("LV", "Latvia"),
("LB", "Lebanon"),
("LS", "Lesotho"),
("LR", "Liberia"),
("LY", "Libya"),
("LI", "Liechtenstein"),
("LT", "Lithuania"),
("LU", "Luxembourg"),
("MO", "Macao"),
("MG", "Madagascar"),
("MW", "Malawi"),
("MY", "Malaysia"),
("MV", "Maldives"),
("ML", "Mali"),
("MT", "Malta"),
("MH", "Marshall Islands"),
("MQ", "Martinique"),
("MR", "Mauritania"),
("MU", "Mauritius"),
("YT", "Mayotte"),
("MX", "Mexico"),
("FM", "Micronesia"),
("MD", "Moldova"),
("MC", "Monaco"),
("MN", "Mongolia"),
("ME", "Montenegro"),
("MS", "Montserrat"),
("MA", "Morocco"),
("MZ", "Mozambique"),
("MM", "Myanmar (Burma)"),
("NA", "Namibia"),
("NR", "Nauru"),
("NP", "Nepal"),
("NL", "Netherlands"),
("NC", "New Caledonia"),
("NZ", "New Zealand"),
("NI", "United Kingdom"), # NI gets interpreted as ‘Northern Ireland’
("NE", "Niger"),
("NG", "Nigeria"),
("NU", "Niue"),
("NF", "Norfolk Island"),
("MK", "North Macedonia"),
("MP", "Northern Mariana Islands"),
("NO", "Norway"),
("OM", "Oman"),
("PK", "Pakistan"),
("PW", "Palau"),
("PS", "Occupied Palestinian Territories"),
("PA", "Panama"),
("PG", "Papua New Guinea"),
("PY", "Paraguay"),
("PE", "Peru"),
("PH", "Philippines"),
("PN", "Pitcairn, Henderson, Ducie and Oeno Islands"),
("PL", "Poland"),
("PT", "Portugal"),
("PR", "Puerto Rico"),
("QA", "Qatar"),
("RO", "Romania"),
("RU", "Russia"),
("RW", "Rwanda"),
("RE", "Réunion"),
("BL", "Saint Barthélemy"),
_country_not_found("SH", "Saint Helena, Ascension and Tristan da Cunha"),
("KN", "St Kitts and Nevis"),
("LC", "St Lucia"),
("MF", "Saint-Martin (French part)"),
("PM", "Saint Pierre and Miquelon"),
("VC", "St Vincent"),
("WS", "Samoa"),
("SM", "San Marino"),
("ST", "Sao Tome and Principe"),
("SA", "Saudi Arabia"),
("SN", "Senegal"),
("RS", "Serbia"),
("SC", "Seychelles"),
("SL", "Sierra Leone"),
("SG", "Singapore"),
("SX", "Sint Maarten (Dutch part)"),
("SK", "Slovakia"),
("SI", "Slovenia"),
("SB", "Solomon Islands"),
("SO", "Somalia"),
("ZA", "South Africa"),
("GS", "South Georgia and South Sandwich Islands"),
("SS", "South Sudan"),
("ES", "Spain"),
("LK", "Sri Lanka"),
("SD", "Sudan"),
("SR", "Suriname"),
("SJ", "Svalbard and Jan Mayen"),
("SE", "Sweden"),
("CH", "Switzerland"),
("SY", "Syria"),
("TW", "Taiwan"),
("TJ", "Tajikistan"),
("TZ", "Tanzania"),
("TH", "Thailand"),
("TL", "East Timor"),
("TG", "Togo"),
("TK", "Tokelau"),
("TO", "Tonga"),
("TT", "Trinidad and Tobago"),
("TN", "Tunisia"),
("TR", "Turkey"),
("TM", "Turkmenistan"),
("TC", "Turks and Caicos Islands"),
("TV", "Tuvalu"),
("UG", "Uganda"),
("UA", "Ukraine"),
("AE", "United Arab Emirates"),
("GB", "United Kingdom"),
_country_not_found("UM", "United States Minor Outlying Islands"),
("US", "United States"),
("UY", "Uruguay"),
("UZ", "Uzbekistan"),
("VU", "Vanuatu"),
("VE", "Venezuela"),
("VN", "Vietnam"),
("VG", "British Virgin Islands"),
("VI", "United States Virgin Islands"),
("WF", "Wallis and Futuna"),
("EH", "Western Sahara"),
("YE", "Yemen"),
("ZM", "Zambia"),
("ZW", "Zimbabwe"),
("AX", "Åland Islands"),
),
)
def test_iso_alpha_2_country_codes(alpha_2, expected_name):
assert Country(alpha_2).canonical_name == expected_name
@pytest.mark.parametrize(
"alpha_3, expected_name",
(
_country_not_found("AFG", "Afghanistan"),
_country_not_found("ALB", "Albania"),
_country_not_found("DZA", "Algeria"),
_country_not_found("ASM", "American Samoa"),
_country_not_found("AND", "Andorra"),
_country_not_found("AGO", "Angola"),
_country_not_found("AIA", "Anguilla"),
_country_not_found("ATA", "Antarctica"),
_country_not_found("ATG", "Antigua and Barbuda"),
_country_not_found("ARG", "Argentina"),
_country_not_found("ARM", "Armenia"),
_country_not_found("ABW", "Aruba"),
_country_not_found("AUS", "Australia"),
_country_not_found("AUT", "Austria"),
_country_not_found("AZE", "Azerbaijan"),
_country_not_found("BHS", "The Bahamas"),
_country_not_found("BHR", "Bahrain"),
_country_not_found("BGD", "Bangladesh"),
_country_not_found("BRB", "Barbados"),
_country_not_found("BLR", "Belarus"),
_country_not_found("BEL", "Belgium"),
_country_not_found("BLZ", "Belize"),
_country_not_found("BEN", "Benin"),
_country_not_found("BMU", "Bermuda"),
_country_not_found("BTN", "Bhutan"),
_country_not_found("BOL", "Bolivia"),
_country_not_found("BES", "Bonaire, Sint Eustatius and Saba"),
("BIH", "Bosnia and Herzegovina"),
_country_not_found("BWA", "Botswana"),
_country_not_found("BVT", "Bouvet Island"),
_country_not_found("BRA", "Brazil"),
("IOT", "British Indian Ocean Territory"),
_country_not_found("BRN", "Brunei"),
_country_not_found("BGR", "Bulgaria"),
_country_not_found("BFA", "Burkina Faso"),
_country_not_found("BDI", "Burundi"),
_country_not_found("CPV", "Cape Verde"),
_country_not_found("KHM", "Cambodia"),
_country_not_found("CMR", "Cameroon"),
_country_not_found("CAN", "Canada"),
_country_not_found("CYM", "Cayman Islands"),
_country_not_found("CAF", "Central African Republic"),
_country_not_found("TCD", "Chad"),
_country_not_found("CHL", "Chile"),
_country_not_found("CHN", "China"),
_country_not_found("CXR", "Christmas Island"),
_country_not_found("CCK", "Cocos (Keeling) Islands"),
_country_not_found("COL", "Colombia"),
_country_not_found("COM", "Comoros"),
_country_not_found("COD", "Congo (Democratic Republic)"),
_country_not_found("COG", "Congo"),
_country_not_found("COK", "Cook Islands"),
_country_not_found("CRI", "Costa Rica"),
_country_not_found("HRV", "Croatia"),
_country_not_found("CUB", "Cuba"),
_country_not_found("CUW", "Curaçao"),
_country_not_found("CYP", "Cyprus"),
_country_not_found("CZE", "Czechia"),
_country_not_found("CIV", "Ivory Coast"),
_country_not_found("DNK", "Denmark"),
_country_not_found("DJI", "Djibouti"),
_country_not_found("DMA", "Dominica"),
_country_not_found("DOM", "Dominican Republic"),
_country_not_found("ECU", "Ecuador"),
_country_not_found("EGY", "Egypt"),
_country_not_found("SLV", "El Salvador"),
_country_not_found("GNQ", "Equatorial Guinea"),
_country_not_found("ERI", "Eritrea"),
_country_not_found("EST", "Estonia"),
_country_not_found("SWZ", "Eswatini"),
_country_not_found("ETH", "Ethiopia"),
_country_not_found("FLK", "Falkland Islands"),
_country_not_found("FRO", "Faroe Islands"),
_country_not_found("FJI", "Fiji"),
_country_not_found("FIN", "Finland"),
_country_not_found("FRA", "France"),
_country_not_found("GUF", "French Guiana"),
_country_not_found("PYF", "French Polynesia"),
_country_not_found("ATF", "French Southern Territories"),
_country_not_found("GAB", "Gabon"),
_country_not_found("GMB", "The Gambia"),
_country_not_found("GEO", "Georgia"),
_country_not_found("DEU", "Germany"),
_country_not_found("GHA", "Ghana"),
_country_not_found("GIB", "Gibraltar"),
_country_not_found("GRC", "Greece"),
_country_not_found("GRL", "Greenland"),
_country_not_found("GRD", "Grenada"),
_country_not_found("GLP", "Guadeloupe"),
_country_not_found("GUM", "Guam"),
_country_not_found("GTM", "Guatemala"),
_country_not_found("GGY", "United Kingdom"),
_country_not_found("GIN", "Guinea"),
_country_not_found("GNB", "Guinea-Bissau"),
_country_not_found("GUY", "Guyana"),
_country_not_found("HTI", "Haiti"),
_country_not_found("HMD", "Heard Island and McDonald Islands"),
_country_not_found("VAT", "Vatican City"),
_country_not_found("HND", "Honduras"),
_country_not_found("HKG", "Hong Kong"),
_country_not_found("HUN", "Hungary"),
_country_not_found("ISL", "Iceland"),
_country_not_found("IND", "India"),
_country_not_found("IDN", "Indonesia"),
_country_not_found("IRN", "Iran"),
_country_not_found("IRQ", "Iraq"),
_country_not_found("IRL", "Ireland"),
_country_not_found("IMN", "United Kingdom"),
_country_not_found("ISR", "Israel"),
_country_not_found("ITA", "Italy"),
_country_not_found("JAM", "Jamaica"),
_country_not_found("JPN", "Japan"),
_country_not_found("JEY", "United Kingdom"),
_country_not_found("JOR", "Jordan"),
_country_not_found("KAZ", "Kazakhstan"),
_country_not_found("KEN", "Kenya"),
_country_not_found("KIR", "Kiribati"),
("PRK", "North Korea"),
_country_not_found("KOR", "Korea"),
_country_not_found("KWT", "Kuwait"),
_country_not_found("KGZ", "Kyrgyzstan"),
("LAO", "Laos"),
_country_not_found("LVA", "Latvia"),
_country_not_found("LBN", "Lebanon"),
_country_not_found("LSO", "Lesotho"),
_country_not_found("LBR", "Liberia"),
_country_not_found("LBY", "Libya"),
_country_not_found("LIE", "Liechtenstein"),
_country_not_found("LTU", "Lithuania"),
_country_not_found("LUX", "Luxembourg"),
_country_not_found("MAC", "Macao"),
_country_not_found("MDG", "Madagascar"),
_country_not_found("MWI", "Malawi"),
_country_not_found("MYS", "Malaysia"),
_country_not_found("MDV", "Maldives"),
_country_not_found("MLI", "Mali"),
_country_not_found("MLT", "Malta"),
_country_not_found("MHL", "Marshall Islands"),
_country_not_found("MTQ", "Martinique"),
_country_not_found("MRT", "Mauritania"),
_country_not_found("MUS", "Mauritius"),
_country_not_found("MYT", "Mayotte"),
_country_not_found("MEX", "Mexico"),
_country_not_found("FSM", "Micronesia"),
_country_not_found("MDA", "Moldova"),
_country_not_found("MCO", "Monaco"),
_country_not_found("MNG", "Mongolia"),
_country_not_found("MNE", "Montenegro"),
_country_not_found("MSR", "Montserrat"),
_country_not_found("MAR", "Morocco"),
_country_not_found("MOZ", "Mozambique"),
_country_not_found("MMR", "Myanmar (Burma)"),
_country_not_found("NAM", "Namibia"),
_country_not_found("NRU", "Nauru"),
_country_not_found("NPL", "Nepal"),
_country_not_found("NLD", "Netherlands"),
_country_not_found("NCL", "New Caledonia"),
_country_not_found("NZL", "New Zealand"),
_country_not_found("NIC", "Nicaragua"),
_country_not_found("NER", "Niger"),
_country_not_found("NGA", "Nigeria"),
_country_not_found("NIU", "Niue"),
_country_not_found("NFK", "Norfolk Island"),
_country_not_found("MKD", "North Macedonia"),
_country_not_found("MNP", "Northern Mariana Islands"),
_country_not_found("NOR", "Norway"),
_country_not_found("OMN", "Oman"),
_country_not_found("PAK", "Pakistan"),
_country_not_found("PLW", "Palau"),
_country_not_found("PSE", "Occupied Palestinian Territories"),
_country_not_found("PAN", "Panama"),
("PNG", "Papua New Guinea"),
_country_not_found("PRY", "Paraguay"),
_country_not_found("PER", "Peru"),
_country_not_found("PHL", "Philippines"),
_country_not_found("PCN", "Pitcairn, Henderson, Ducie and Oeno Islands"),
_country_not_found("POL", "Poland"),
_country_not_found("PRT", "Portugal"),
_country_not_found("PRI", "Puerto Rico"),
_country_not_found("QAT", "Qatar"),
_country_not_found("ROU", "Romania"),
_country_not_found("RUS", "Russian Federation"),
_country_not_found("RWA", "Rwanda"),
_country_not_found("REU", "Réunion"),
_country_not_found("BLM", "Saint Barthélemy"),
_country_not_found("SHN", "Saint Helena, Ascension and Tristan da Cunha"),
_country_not_found("KNA", "St Kitts and Nevis"),
_country_not_found("LCA", "St Lucia"),
_country_not_found("MAF", "Saint-Martin (French part)"),
_country_not_found("SPM", "Saint Pierre and Miquelon"),
_country_not_found("VCT", "Saint Vincent"),
_country_not_found("WSM", "Samoa"),
_country_not_found("SMR", "San Marino"),
_country_not_found("STP", "Sao Tome and Principe"),
_country_not_found("SAU", "Saudi Arabia"),
_country_not_found("SEN", "Senegal"),
_country_not_found("SRB", "Serbia"),
_country_not_found("SYC", "Seychelles"),
_country_not_found("SLE", "Sierra Leone"),
_country_not_found("SGP", "Singapore"),
_country_not_found("SXM", "Sint Maarten (Dutch part)"),
_country_not_found("SVK", "Slovakia"),
_country_not_found("SVN", "Slovenia"),
_country_not_found("SLB", "Solomon Islands"),
_country_not_found("SOM", "Somalia"),
_country_not_found("ZAF", "South Africa"),
_country_not_found("SGS", "South Georgia and South Sandwich Islands"),
_country_not_found("SSD", "South Sudan"),
_country_not_found("ESP", "Spain"),
_country_not_found("LKA", "Sri Lanka"),
_country_not_found("SDN", "Sudan"),
_country_not_found("SUR", "Suriname"),
_country_not_found("SJM", "Svalbard and Jan Mayen"),
_country_not_found("SWE", "Sweden"),
_country_not_found("CHE", "Switzerland"),
_country_not_found("SYR", "Syrian Arab Republic"),
_country_not_found("TWN", "Taiwan"),
_country_not_found("TJK", "Tajikistan"),
_country_not_found("TZA", "Tanzania"),
_country_not_found("THA", "Thailand"),
_country_not_found("TLS", "East Timor"),
_country_not_found("TGO", "Togo"),
_country_not_found("TKL", "Tokelau"),
_country_not_found("TON", "Tonga"),
_country_not_found("TTO", "Trinidad and Tobago"),
_country_not_found("TUN", "Tunisia"),
_country_not_found("TUR", "Turkey"),
_country_not_found("TKM", "Turkmenistan"),
_country_not_found("TCA", "Turks and Caicos Islands"),
_country_not_found("TUV", "Tuvalu"),
_country_not_found("UGA", "Uganda"),
_country_not_found("UKR", "Ukraine"),
_country_not_found("ARE", "United Arab Emirates"),
("GBR", "United Kingdom"),
_country_not_found("UMI", "United States Minor Outlying Islands"),
("USA", "United States"),
_country_not_found("URY", "Uruguay"),
_country_not_found("UZB", "Uzbekistan"),
_country_not_found("VUT", "Vanuatu"),
_country_not_found("VEN", "Venezuela"),
_country_not_found("VNM", "Vietnam"),
_country_not_found("VGB", "British Virgin Islands"),
_country_not_found("VIR", "United States Virgin Islands"),
_country_not_found("WLF", "Wallis and Futuna"),
_country_not_found("ESH", "Western Sahara"),
_country_not_found("YEM", "Yemen"),
_country_not_found("ZMB", "Zambia"),
_country_not_found("ZWE", "Zimbabwe"),
_country_not_found("ALA", "Åland Islands"),
),
)
def test_iso_alpha_3_country_codes(alpha_3, expected_name):
assert Country(alpha_3).canonical_name == expected_name
| mit | 5b76471754272f7b11027ceb4bf0c235 | 37.868821 | 82 | 0.501492 | 2.904945 | false | false | false | false |
influxdata/influxdb-python | examples/tutorial.py | 2 | 2334 | # -*- coding: utf-8 -*-
"""Tutorial on using the InfluxDB client."""
import argparse
from influxdb import InfluxDBClient
def main(host='localhost', port=8086):
"""Instantiate a connection to the InfluxDB."""
user = 'root'
password = 'root'
dbname = 'example'
dbuser = 'smly'
dbuser_password = 'my_secret_password'
query = 'select Float_value from cpu_load_short;'
query_where = 'select Int_value from cpu_load_short where host=$host;'
bind_params = {'host': 'server01'}
json_body = [
{
"measurement": "cpu_load_short",
"tags": {
"host": "server01",
"region": "us-west"
},
"time": "2009-11-10T23:00:00Z",
"fields": {
"Float_value": 0.64,
"Int_value": 3,
"String_value": "Text",
"Bool_value": True
}
}
]
client = InfluxDBClient(host, port, user, password, dbname)
print("Create database: " + dbname)
client.create_database(dbname)
print("Create a retention policy")
client.create_retention_policy('awesome_policy', '3d', 3, default=True)
print("Switch user: " + dbuser)
client.switch_user(dbuser, dbuser_password)
print("Write points: {0}".format(json_body))
client.write_points(json_body)
print("Querying data: " + query)
result = client.query(query)
print("Result: {0}".format(result))
print("Querying data: " + query_where)
result = client.query(query_where, bind_params=bind_params)
print("Result: {0}".format(result))
print("Switch user: " + user)
client.switch_user(user, password)
print("Drop database: " + dbname)
client.drop_database(dbname)
def parse_args():
"""Parse the args."""
parser = argparse.ArgumentParser(
description='example code to play with InfluxDB')
parser.add_argument('--host', type=str, required=False,
default='localhost',
help='hostname of InfluxDB http API')
parser.add_argument('--port', type=int, required=False, default=8086,
help='port of InfluxDB http API')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(host=args.host, port=args.port)
| mit | a6dec1121156be7dba8b394be46e2d4a | 27.814815 | 75 | 0.577978 | 3.669811 | false | false | false | false |
alphagov/notifications-utils | tests/test_letter_timings.py | 1 | 12684 | from datetime import datetime
import pytest
import pytz
from freezegun import freeze_time
from notifications_utils.letter_timings import (
get_letter_timings,
letter_can_be_cancelled,
)
@freeze_time("2017-07-14 13:59:59") # Friday, before print deadline (3PM BST)
@pytest.mark.parametrize(
(
"upload_time, "
"expected_print_time, "
"is_printed, "
"first_class, "
"expected_earliest_second_class, "
"expected_latest_second_class, "
"expected_earliest_europe, "
"expected_latest_europe, "
"expected_earliest_rest_of_world, "
"expected_latest_rest_of_world, "
),
[
# BST
# ==================================================================
# First thing Monday
(
"Monday 2017-07-10 00:00:01",
"Tuesday 2017-07-11 15:00",
True,
"Wednesday 2017-07-12 16:00",
"Thursday 2017-07-13 16:00",
"Friday 2017-07-14 16:00",
"Saturday 2017-07-15 16:00",
"Tuesday 2017-07-18 16:00",
"Tuesday 2017-07-18 16:00",
"Thursday 2017-07-20 16:00",
),
# Monday at 17:29 BST (sent on monday)
(
"Monday 2017-07-10 16:29:59",
"Tuesday 2017-07-11 15:00",
True,
"Wednesday 2017-07-12 16:00",
"Thursday 2017-07-13 16:00",
"Friday 2017-07-14 16:00",
"Saturday 2017-07-15 16:00",
"Tuesday 2017-07-18 16:00",
"Tuesday 2017-07-18 16:00",
"Thursday 2017-07-20 16:00",
),
# Monday at 17:30 BST (sent on tuesday)
(
"Monday 2017-07-10 16:30:01",
"Wednesday 2017-07-12 15:00",
True,
"Thursday 2017-07-13 16:00",
"Friday 2017-07-14 16:00",
"Saturday 2017-07-15 16:00",
"Monday 2017-07-17 16:00",
"Wednesday 2017-07-19 16:00",
"Wednesday 2017-07-19 16:00",
"Friday 2017-07-21 16:00",
),
# Tuesday before 17:30 BST
(
"Tuesday 2017-07-11 12:00:00",
"Wednesday 2017-07-12 15:00",
True,
"Thursday 2017-07-13 16:00",
"Friday 2017-07-14 16:00",
"Saturday 2017-07-15 16:00",
"Monday 2017-07-17 16:00",
"Wednesday 2017-07-19 16:00",
"Wednesday 2017-07-19 16:00",
"Friday 2017-07-21 16:00",
),
# Wednesday before 17:30 BST
(
"Wednesday 2017-07-12 12:00:00",
"Thursday 2017-07-13 15:00",
True,
"Friday 2017-07-14 16:00",
"Saturday 2017-07-15 16:00",
"Monday 2017-07-17 16:00",
"Tuesday 2017-07-18 16:00",
"Thursday 2017-07-20 16:00",
"Thursday 2017-07-20 16:00",
"Saturday 2017-07-22 16:00",
),
# Thursday before 17:30 BST
(
"Thursday 2017-07-13 12:00:00",
"Friday 2017-07-14 15:00",
False,
"Saturday 2017-07-15 16:00",
"Monday 2017-07-17 16:00",
"Tuesday 2017-07-18 16:00",
"Wednesday 2017-07-19 16:00",
"Friday 2017-07-21 16:00",
"Friday 2017-07-21 16:00",
"Monday 2017-07-24 16:00",
),
# Friday anytime
(
"Friday 2017-07-14 00:00:00",
"Monday 2017-07-17 15:00",
False,
"Tuesday 2017-07-18 16:00",
"Wednesday 2017-07-19 16:00",
"Thursday 2017-07-20 16:00",
"Friday 2017-07-21 16:00",
"Monday 2017-07-24 16:00",
"Monday 2017-07-24 16:00",
"Wednesday 2017-07-26 16:00",
),
(
"Friday 2017-07-14 12:00:00",
"Monday 2017-07-17 15:00",
False,
"Tuesday 2017-07-18 16:00",
"Wednesday 2017-07-19 16:00",
"Thursday 2017-07-20 16:00",
"Friday 2017-07-21 16:00",
"Monday 2017-07-24 16:00",
"Monday 2017-07-24 16:00",
"Wednesday 2017-07-26 16:00",
),
(
"Friday 2017-07-14 22:00:00",
"Monday 2017-07-17 15:00",
False,
"Tuesday 2017-07-18 16:00",
"Wednesday 2017-07-19 16:00",
"Thursday 2017-07-20 16:00",
"Friday 2017-07-21 16:00",
"Monday 2017-07-24 16:00",
"Monday 2017-07-24 16:00",
"Wednesday 2017-07-26 16:00",
),
# Saturday anytime
(
"Saturday 2017-07-14 12:00:00",
"Monday 2017-07-17 15:00",
False,
"Tuesday 2017-07-18 16:00",
"Wednesday 2017-07-19 16:00",
"Thursday 2017-07-20 16:00",
"Friday 2017-07-21 16:00",
"Monday 2017-07-24 16:00",
"Monday 2017-07-24 16:00",
"Wednesday 2017-07-26 16:00",
),
# Sunday before 1730 BST
(
"Sunday 2017-07-15 15:59:59",
"Monday 2017-07-17 15:00",
False,
"Tuesday 2017-07-18 16:00",
"Wednesday 2017-07-19 16:00",
"Thursday 2017-07-20 16:00",
"Friday 2017-07-21 16:00",
"Monday 2017-07-24 16:00",
"Monday 2017-07-24 16:00",
"Wednesday 2017-07-26 16:00",
),
# Sunday after 17:30 BST
(
"Sunday 2017-07-16 16:30:01",
"Tuesday 2017-07-18 15:00",
False,
"Wednesday 2017-07-19 16:00",
"Thursday 2017-07-20 16:00",
"Friday 2017-07-21 16:00",
"Saturday 2017-07-22 16:00",
"Tuesday 2017-07-25 16:00",
"Tuesday 2017-07-25 16:00",
"Thursday 2017-07-27 16:00",
),
# GMT
# ==================================================================
# Monday at 17:29 GMT
(
"Monday 2017-01-02 17:29:59",
"Tuesday 2017-01-03 15:00",
True,
"Wednesday 2017-01-04 16:00",
"Thursday 2017-01-05 16:00",
"Friday 2017-01-06 16:00",
"Saturday 2017-01-07 16:00",
"Tuesday 2017-01-10 16:00",
"Tuesday 2017-01-10 16:00",
"Thursday 2017-01-12 16:00",
),
# Monday at 17:00 GMT
(
"Monday 2017-01-02 17:30:01",
"Wednesday 2017-01-04 15:00",
True,
"Thursday 2017-01-05 16:00",
"Friday 2017-01-06 16:00",
"Saturday 2017-01-07 16:00",
"Monday 2017-01-09 16:00",
"Wednesday 2017-01-11 16:00",
"Wednesday 2017-01-11 16:00",
"Friday 2017-01-13 16:00",
),
# Over Easter bank holiday weekend
(
"Thursday 2020-04-09 16:29:59",
"Tuesday 2020-04-14 15:00",
False,
"Wednesday 2020-04-15 16:00",
"Thursday 2020-04-16 16:00",
"Friday 2020-04-17 16:00",
"Saturday 2020-04-18 16:00",
"Tuesday 2020-04-21 16:00",
"Tuesday 2020-04-21 16:00",
"Thursday 2020-04-23 16:00",
),
],
)
def test_get_estimated_delivery_date_for_letter(
upload_time,
expected_print_time,
is_printed,
first_class,
expected_earliest_second_class,
expected_latest_second_class,
expected_earliest_europe,
expected_latest_europe,
expected_earliest_rest_of_world,
expected_latest_rest_of_world,
):
# remove the day string from the upload_time, which is purely informational
format_dt = lambda x: x.astimezone(pytz.timezone("Europe/London")).strftime("%A %Y-%m-%d %H:%M") # noqa
upload_time = upload_time.split(" ", 1)[1]
timings = get_letter_timings(upload_time, postage="second")
assert format_dt(timings.printed_by) == expected_print_time
assert timings.is_printed == is_printed
assert format_dt(timings.earliest_delivery) == expected_earliest_second_class
assert format_dt(timings.latest_delivery) == expected_latest_second_class
first_class_timings = get_letter_timings(upload_time, postage="first")
assert format_dt(first_class_timings.printed_by) == expected_print_time
assert first_class_timings.is_printed == is_printed
assert format_dt(first_class_timings.earliest_delivery) == first_class
assert format_dt(first_class_timings.latest_delivery) == first_class
europe_timings = get_letter_timings(upload_time, postage="europe")
assert format_dt(europe_timings.printed_by) == expected_print_time
assert europe_timings.is_printed == is_printed
assert format_dt(europe_timings.earliest_delivery) == expected_earliest_europe
assert format_dt(europe_timings.latest_delivery) == expected_latest_europe
rest_of_world_timings = get_letter_timings(upload_time, postage="rest-of-world")
assert format_dt(rest_of_world_timings.printed_by) == expected_print_time
assert rest_of_world_timings.is_printed == is_printed
assert format_dt(rest_of_world_timings.earliest_delivery) == expected_earliest_rest_of_world
assert format_dt(rest_of_world_timings.latest_delivery) == expected_latest_rest_of_world
def test_letter_timings_only_accept_real_postage_values():
with pytest.raises(KeyError):
get_letter_timings(datetime.utcnow().isoformat(), postage="foo")
@pytest.mark.parametrize("status", ["sending", "pending"])
def test_letter_cannot_be_cancelled_if_letter_status_is_not_created_or_pending_virus_check(status):
notification_created_at = datetime.utcnow()
assert not letter_can_be_cancelled(status, notification_created_at)
@freeze_time("2018-7-7 16:00:00")
@pytest.mark.parametrize(
"notification_created_at",
[
datetime(2018, 7, 6, 18, 0), # created yesterday after 1730
datetime(2018, 7, 7, 12, 0), # created today
],
)
def test_letter_can_be_cancelled_if_before_1730_and_letter_created_before_1730(notification_created_at):
notification_status = "pending-virus-check"
assert letter_can_be_cancelled(notification_status, notification_created_at)
@freeze_time("2017-12-12 17:30:00")
@pytest.mark.parametrize(
"notification_created_at",
[
datetime(2017, 12, 12, 17, 0),
datetime(2017, 12, 12, 17, 30),
],
)
def test_letter_cannot_be_cancelled_if_1730_exactly_and_letter_created_at_or_before_1730(notification_created_at):
notification_status = "pending-virus-check"
assert not letter_can_be_cancelled(notification_status, notification_created_at)
@freeze_time("2018-7-7 19:00:00")
@pytest.mark.parametrize(
"notification_created_at",
[
datetime(2018, 7, 6, 18, 0), # created yesterday after 1730
datetime(2018, 7, 7, 12, 0), # created today before 1730
],
)
def test_letter_cannot_be_cancelled_if_after_1730_and_letter_created_before_1730(notification_created_at):
notification_status = "created"
assert not letter_can_be_cancelled(notification_status, notification_created_at)
@freeze_time("2018-7-7 15:00:00")
def test_letter_cannot_be_cancelled_if_before_1730_and_letter_created_before_1730_yesterday():
notification_status = "created"
assert not letter_can_be_cancelled(notification_status, datetime(2018, 7, 6, 14, 0))
@freeze_time("2018-7-7 15:00:00")
def test_letter_cannot_be_cancelled_if_before_1730_and_letter_created_after_1730_two_days_ago():
notification_status = "created"
assert not letter_can_be_cancelled(notification_status, datetime(2018, 7, 5, 19, 0))
@freeze_time("2018-7-7 19:00:00")
@pytest.mark.parametrize(
"notification_created_at",
[
datetime(2018, 7, 7, 17, 30),
datetime(2018, 7, 7, 18, 0),
],
)
def test_letter_can_be_cancelled_if_after_1730_and_letter_created_at_1730_today_or_later(notification_created_at):
notification_status = "created"
assert letter_can_be_cancelled(notification_status, notification_created_at)
@freeze_time("2018-7-7 10:00:00")
@pytest.mark.parametrize(
"notification_created_at",
[
datetime(2018, 7, 6, 20, 30), # yesterday after deadline
datetime(2018, 7, 6, 23, 30), # this morning after deadline but yesterday in UTC
datetime(2018, 7, 7, 3, 30), # this morning after deadline, and today in UTC
],
)
def test_letter_can_be_cancelled_always_compares_in_bst(notification_created_at):
assert letter_can_be_cancelled("created", notification_created_at)
| mit | d39dc897698e47bc9d5f9644dae22109 | 33.655738 | 114 | 0.562835 | 3.187736 | false | true | false | false |
wbond/asn1crypto | tests/test_cms.py | 1 | 36612 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import unittest
import os
import zlib
import sys
from datetime import datetime
from asn1crypto import cms, util
from ._unittest_compat import patch
patch()
if sys.version_info < (3,):
byte_cls = str
else:
byte_cls = bytes
tests_root = os.path.dirname(__file__)
fixtures_dir = os.path.join(tests_root, 'fixtures')
class ClearanceTests(unittest.TestCase):
def test_clearance_decode_bad_tagging(self):
rfc_3281_wrong_tagging = b'\x30\x08\x80\x02\x88\x37\x81\x02\x02\x4c'
# This test documents the fact that we can't deal with the "wrong"
# version of Clearance in RFC 3281
self.assertRaises(
ValueError,
lambda: cms.Clearance.load(rfc_3281_wrong_tagging).native
)
def test_clearance_decode_correct_tagging(self):
correct_tagging = b'\x30\x08\x06\x02\x88\x37\x03\x02\x02\x4c'
clearance_obj = cms.Clearance.load(correct_tagging)
self.assertEqual(
util.OrderedDict([
('policy_id', '2.999'),
('class_list', set(['secret', 'top_secret', 'unclassified'])),
('security_categories', None)
]),
clearance_obj.native
)
class CMSTests(unittest.TestCase):
def test_create_content_info_data(self):
data = cms.SignedData({
'version': 'v1',
'encap_content_info': {
'content_type': 'data',
'content': b'Hello',
}
})
info = data['encap_content_info']
self.assertEqual('v1', data['version'].native)
self.assertEqual(
'data',
info['content_type'].native
)
self.assertEqual(
b'Hello',
info['content'].native
)
self.assertIsInstance(info, cms.ContentInfo)
def test_create_content_info_data_v2(self):
data = cms.SignedData({
'version': 'v2',
'encap_content_info': {
'content_type': 'data',
'content': b'Hello',
}
})
info = data['encap_content_info']
self.assertEqual('v2', data['version'].native)
self.assertEqual(
'data',
info['content_type'].native
)
self.assertEqual(
b'Hello',
info['content'].native
)
self.assertIsInstance(info, cms.EncapsulatedContentInfo)
def test_parse_content_info_data(self):
with open(os.path.join(fixtures_dir, 'message.der'), 'rb') as f:
info = cms.ContentInfo.load(f.read())
self.assertEqual(
'data',
info['content_type'].native
)
self.assertEqual(
b'This is the message to encapsulate in PKCS#7/CMS\r\n',
info['content'].native
)
def test_parse_content_info_compressed_data(self):
with open(os.path.join(fixtures_dir, 'cms-compressed.der'), 'rb') as f:
info = cms.ContentInfo.load(f.read())
compressed_data = info['content']
self.assertEqual(
'compressed_data',
info['content_type'].native
)
self.assertEqual(
'v0',
compressed_data['version'].native
)
self.assertEqual(
'zlib',
compressed_data['compression_algorithm']['algorithm'].native
)
self.assertEqual(
None,
compressed_data['compression_algorithm']['parameters'].native
)
self.assertEqual(
'data',
compressed_data['encap_content_info']['content_type'].native
)
self.assertEqual(
b'\x78\x9C\x0B\xC9\xC8\x2C\x56\x00\xA2\x92\x8C\x54\x85\xDC\xD4\xE2\xE2\xC4\xF4\x54\x85\x92\x7C\x85\xD4\xBC'
b'\xE4\xC4\x82\xE2\xD2\x9C\xC4\x92\x54\x85\xCC\x3C\x85\x00\x6F\xE7\x60\x65\x73\x7D\x67\xDF\x60\x2E\x00\xB5'
b'\xCF\x10\x71',
compressed_data['encap_content_info']['content'].native
)
self.assertEqual(
b'This is the message to encapsulate in PKCS#7/CMS\n',
compressed_data.decompressed
)
def test_parse_content_info_indefinite(self):
with open(os.path.join(fixtures_dir, 'meca2_compressed.der'), 'rb') as f:
info = cms.ContentInfo.load(f.read())
compressed_data = info['content']
self.assertEqual(
'compressed_data',
info['content_type'].native
)
self.assertEqual(
'v0',
compressed_data['version'].native
)
self.assertEqual(
'zlib',
compressed_data['compression_algorithm']['algorithm'].native
)
self.assertEqual(
None,
compressed_data['compression_algorithm']['parameters'].native
)
self.assertEqual(
'data',
compressed_data['encap_content_info']['content_type'].native
)
data = compressed_data['encap_content_info']['content'].native
self.assertIsInstance(zlib.decompress(data), byte_cls)
def test_parse_content_info_digested_data(self):
with open(os.path.join(fixtures_dir, 'cms-digested.der'), 'rb') as f:
info = cms.ContentInfo.load(f.read())
digested_data = info['content']
self.assertEqual(
'digested_data',
info['content_type'].native
)
self.assertEqual(
'v0',
digested_data['version'].native
)
self.assertEqual(
'sha1',
digested_data['digest_algorithm']['algorithm'].native
)
self.assertEqual(
None,
digested_data['digest_algorithm']['parameters'].native
)
self.assertEqual(
'data',
digested_data['encap_content_info']['content_type'].native
)
self.assertEqual(
b'This is the message to encapsulate in PKCS#7/CMS\n',
digested_data['encap_content_info']['content'].native
)
self.assertEqual(
b'\x53\xC9\xDB\xC1\x6D\xDB\x34\x3B\x28\x4E\xEF\xA6\x03\x0E\x02\x64\x79\x31\xAF\xFB',
digested_data['digest'].native
)
def test_parse_content_info_encrypted_data(self):
with open(os.path.join(fixtures_dir, 'cms-encrypted.der'), 'rb') as f:
info = cms.ContentInfo.load(f.read())
encrypted_data = info['content']
encrypted_content_info = encrypted_data['encrypted_content_info']
self.assertEqual(
'encrypted_data',
info['content_type'].native
)
self.assertEqual(
'v0',
encrypted_data['version'].native
)
self.assertEqual(
'data',
encrypted_content_info['content_type'].native
)
self.assertEqual(
'aes128_cbc',
encrypted_content_info['content_encryption_algorithm']['algorithm'].native
)
self.assertEqual(
'aes',
encrypted_content_info['content_encryption_algorithm'].encryption_cipher
)
self.assertEqual(
'cbc',
encrypted_content_info['content_encryption_algorithm'].encryption_mode
)
self.assertEqual(
16,
encrypted_content_info['content_encryption_algorithm'].key_length
)
self.assertEqual(
16,
encrypted_content_info['content_encryption_algorithm'].encryption_block_size
)
self.assertEqual(
b'\x1F\x34\x54\x9F\x7F\xB7\x06\xBD\x81\x57\x68\x84\x79\xB5\x2F\x6F',
encrypted_content_info['content_encryption_algorithm']['parameters'].native
)
self.assertEqual(
b'\x80\xEE\x34\x8B\xFC\x04\x69\x4F\xBE\x15\x1C\x0C\x39\x2E\xF3\xEA\x8E\xEE\x17\x0D\x39\xC7\x4B\x6C\x4B'
b'\x13\xEF\x17\x82\x0D\xED\xBA\x6D\x2F\x3B\xAB\x4E\xEB\xF0\xDB\xD9\x6E\x1C\xC2\x3C\x1C\x4C\xFA\xF3\x98'
b'\x9B\x89\xBD\x48\x77\x07\xE2\x6B\x71\xCF\xB7\xFF\xCE\xA5',
encrypted_content_info['encrypted_content'].native
)
def test_parse_content_info_enveloped_data(self):
with open(os.path.join(fixtures_dir, 'cms-enveloped.der'), 'rb') as f:
info = cms.ContentInfo.load(f.read())
enveloped_data = info['content']
encrypted_content_info = enveloped_data['encrypted_content_info']
recipient = enveloped_data['recipient_infos'][0].chosen
self.assertEqual(
'enveloped_data',
info['content_type'].native
)
self.assertEqual(
'v0',
enveloped_data['version'].native
)
self.assertEqual(
None,
enveloped_data['originator_info'].native
)
self.assertEqual(
1,
len(enveloped_data['recipient_infos'])
)
self.assertEqual(
'v0',
recipient['version'].native
)
self.assertEqual(
util.OrderedDict([
(
'issuer',
util.OrderedDict([
('country_name', 'US'),
('state_or_province_name', 'Massachusetts'),
('locality_name', 'Newbury'),
('organization_name', 'Codex Non Sufficit LC'),
('organizational_unit_name', 'Testing'),
('common_name', 'Will Bond'),
('email_address', 'will@codexns.io'),
])
),
(
'serial_number',
13683582341504654466
)
]),
recipient['rid'].native
)
self.assertEqual(
'rsaes_pkcs1v15',
recipient['key_encryption_algorithm']['algorithm'].native
)
self.assertEqual(
None,
recipient['key_encryption_algorithm']['parameters'].native
)
self.assertEqual(
b'\x97\x0A\xFD\x3B\x5C\x27\x45\x69\xCC\xDD\x45\x9E\xA7\x3C\x07\x27\x35\x16\x20\x21\xE4\x6E\x1D\xF8'
b'\x5B\xE8\x7F\xD8\x40\x41\xE9\xF2\x92\xCD\xC8\xC5\x03\x95\xEC\x6C\x0B\x97\x71\x87\x86\x3C\xEB\x68'
b'\x84\x06\x4E\xE6\xD0\xC4\x7D\x32\xFE\xA6\x06\xC9\xD5\xE1\x8B\xDA\xBF\x96\x5C\x20\x15\x49\x64\x7A'
b'\xA2\x4C\xFF\x8B\x0D\xEA\x76\x35\x9B\x7C\x43\xF7\x21\x95\x26\xE7\x70\x30\x98\x5F\x0D\x5E\x4A\xCB'
b'\xAD\x47\xDF\x46\xDA\x1F\x0E\xE2\xFE\x3A\x40\xD9\xF2\xDC\x0C\x97\xD9\x91\xED\x34\x8D\xF3\x73\xB0'
b'\x90\xF9\xDD\x31\x4D\x37\x93\x81\xD3\x92\xCB\x72\x4A\xD6\x9D\x01\x82\x85\xD5\x1F\xE2\xAA\x32\x12'
b'\x82\x4E\x17\xF6\xAA\x58\xDE\xBD\x1B\x80\xAF\x61\xF1\x8A\xD1\x7F\x9D\x41\x6A\xC0\xE4\xC7\x7E\x17'
b'\xDC\x94\x33\xE9\x74\x7E\xE9\xF8\x5C\x30\x87\x9B\xD6\xF0\xE3\x4A\xB7\xE3\xCC\x51\x8A\xD4\x37\xF1'
b'\xF9\x33\xB5\xD6\x1F\x36\xC1\x6F\x91\xA8\x5F\xE2\x6B\x08\xC7\x9D\xE8\xFD\xDC\xE8\x78\xE0\xC0\xC7'
b'\xCF\xC5\xEE\x60\xEC\x54\xFF\x1A\x9C\xF7\x4E\x2C\xD0\x88\xDC\xC2\x1F\xDC\x8A\x37\x9B\x71\x20\xFF'
b'\xFD\x6C\xE5\xBA\x8C\xDF\x0E\x3F\x20\xC6\xCB\x08\xA7\x07\xDB\x83',
recipient['encrypted_key'].native
)
self.assertEqual(
'data',
encrypted_content_info['content_type'].native
)
self.assertEqual(
'tripledes_3key',
encrypted_content_info['content_encryption_algorithm']['algorithm'].native
)
self.assertEqual(
'tripledes',
encrypted_content_info['content_encryption_algorithm'].encryption_cipher
)
self.assertEqual(
'cbc',
encrypted_content_info['content_encryption_algorithm'].encryption_mode
)
self.assertEqual(
24,
encrypted_content_info['content_encryption_algorithm'].key_length
)
self.assertEqual(
8,
encrypted_content_info['content_encryption_algorithm'].encryption_block_size
)
self.assertEqual(
b'\x52\x50\x98\xFA\x33\x88\xC7\x3C',
encrypted_content_info['content_encryption_algorithm']['parameters'].native
)
self.assertEqual(
b'\xDC\x88\x55\x08\xE5\x67\x70\x49\x99\x54\xFD\xF8\x40\x7C\x38\xD5\x78\x1D\x6A\x95\x6D\x1E\xC4\x12'
b'\x39\xFE\xC0\x76\xDC\xF5\x79\x1A\x69\xA1\xB9\x40\x1E\xCF\xC8\x79\x3E\xF3\x38\xB4\x90\x00\x27\xD1'
b'\xB5\x64\xAB\x99\x51\x13\xF1\x0A',
encrypted_content_info['encrypted_content'].native
)
self.assertEqual(
None,
enveloped_data['unprotected_attrs'].native
)
def test_parse_content_info_cms_signed_data(self):
with open(os.path.join(fixtures_dir, 'cms-signed.der'), 'rb') as f:
info = cms.ContentInfo.load(f.read())
signed_data = info['content']
encap_content_info = signed_data['encap_content_info']
self.assertEqual(
'signed_data',
info['content_type'].native
)
self.assertEqual(
'v1',
signed_data['version'].native
)
self.assertEqual(
[
util.OrderedDict([
('algorithm', 'sha256'),
('parameters', None),
])
],
signed_data['digest_algorithms'].native
)
self.assertEqual(
'data',
encap_content_info['content_type'].native
)
self.assertEqual(
b'This is the message to encapsulate in PKCS#7/CMS\r\n',
encap_content_info['content'].native
)
self.assertEqual(
1,
len(signed_data['certificates'])
)
certificate = signed_data['certificates'][0]
with open(os.path.join(fixtures_dir, 'keys/test-der.crt'), 'rb') as f:
self.assertEqual(
f.read(),
certificate.dump()
)
self.assertEqual(
1,
len(signed_data['signer_infos'])
)
signer = signed_data['signer_infos'][0]
self.assertEqual(
'v1',
signer['version'].native
)
self.assertEqual(
util.OrderedDict([
(
'issuer',
util.OrderedDict([
('country_name', 'US'),
('state_or_province_name', 'Massachusetts'),
('locality_name', 'Newbury'),
('organization_name', 'Codex Non Sufficit LC'),
('organizational_unit_name', 'Testing'),
('common_name', 'Will Bond'),
('email_address', 'will@codexns.io'),
])
),
(
'serial_number',
13683582341504654466
)
]),
signer['sid'].native
)
self.assertEqual(
util.OrderedDict([
('algorithm', 'sha256'),
('parameters', None),
]),
signer['digest_algorithm'].native
)
signed_attrs = signer['signed_attrs']
self.assertEqual(
3,
len(signed_attrs)
)
self.assertEqual(
'content_type',
signed_attrs[0]['type'].native
)
self.assertEqual(
'data',
signed_attrs[0]['values'][0].native
)
self.assertEqual(
'signing_time',
signed_attrs[1]['type'].native
)
self.assertEqual(
datetime(2015, 5, 30, 13, 12, 38, tzinfo=util.timezone.utc),
signed_attrs[1]['values'][0].native
)
self.assertEqual(
'message_digest',
signed_attrs[2]['type'].native
)
self.assertEqual(
b'\xA1\x30\xE2\x87\x90\x5A\x58\x15\x7A\x44\x54\x7A\xB9\xBC\xAE\xD3\x00\xF3\xEC\x3E\x97\xFF'
b'\x03\x20\x79\x34\x9D\x62\xAA\x20\xA5\x1D',
signed_attrs[2]['values'][0].native
)
self.assertEqual(
util.OrderedDict([
('algorithm', 'rsassa_pkcs1v15'),
('parameters', None),
]),
signer['signature_algorithm'].native
)
self.assertEqual(
b'\xAC\x2F\xE3\x25\x39\x8F\xD3\xDF\x80\x4F\x0D\xBA\xB1\xEE\x99\x09\xA9\x21\xBB\xDF\x3C\x1E'
b'\x70\xDA\xDF\xC4\x0F\x1D\x10\x29\xBC\x94\xBE\xF8\xA8\xC2\x2D\x2A\x1F\x14\xBC\x4A\x5B\x66'
b'\x7F\x6F\xE4\xDF\x82\x4D\xD9\x3F\xEB\x89\xAA\x05\x1A\xE5\x58\xCE\xC4\x33\x53\x6E\xE4\x66'
b'\xF9\x21\xCF\x80\x35\x46\x88\xB5\x6A\xEA\x5C\x54\x49\x40\x31\xD6\xDC\x20\xD8\xA0\x63\x8C'
b'\xC1\xC3\xA1\x72\x5D\x0D\xCE\x43\xB1\x5C\xD8\x32\x3F\xA9\xE7\xBB\xD9\x56\xAE\xE7\xFB\x7C'
b'\x37\x32\x8B\x93\xC2\xC4\x47\xDD\x00\xFB\x1C\xEF\xC3\x68\x32\xDC\x06\x26\x17\x45\xF5\xB3'
b'\xDC\xD8\x5C\x2B\xC1\x8B\x97\x93\xB8\xF1\x85\xE2\x92\x3B\xC4\x6A\x6A\x89\xC5\x14\x51\x4A'
b'\x06\x11\x54\xB0\x29\x07\x75\xD8\xDF\x6B\xFB\x21\xE4\xA4\x09\x17\xAF\xAC\xA0\xF5\xC0\xFE'
b'\x7B\x03\x04\x40\x41\x57\xC4\xFD\x58\x1D\x10\x5E\xAC\x23\xAB\xAA\x80\x95\x96\x02\x71\x84'
b'\x9C\x0A\xBD\x54\xC4\xA2\x47\xAA\xE7\xC3\x09\x13\x6E\x26\x7D\x72\xAA\xA9\x0B\xF3\xCC\xC4'
b'\x48\xB4\x97\x14\x00\x47\x2A\x6B\xD3\x93\x3F\xD8\xFD\xAA\xB9\xFB\xFB\xD5\x09\x8D\x82\x8B'
b'\xDE\x0F\xED\x39\x6D\x7B\xDC\x76\x8B\xA6\x4E\x9B\x7A\xBA',
signer['signature'].native
)
def test_parse_content_info_pkcs7_signed_data(self):
with open(os.path.join(fixtures_dir, 'pkcs7-signed.der'), 'rb') as f:
info = cms.ContentInfo.load(f.read())
signed_data = info['content']
encap_content_info = signed_data['encap_content_info']
self.assertEqual(
'signed_data',
info['content_type'].native
)
self.assertEqual(
'v1',
signed_data['version'].native
)
self.assertEqual(
[
util.OrderedDict([
('algorithm', 'sha256'),
('parameters', None),
])
],
signed_data['digest_algorithms'].native
)
self.assertEqual(
'data',
encap_content_info['content_type'].native
)
self.assertEqual(
b'This is the message to encapsulate in PKCS#7/CMS\n',
encap_content_info['content'].native
)
self.assertEqual(
1,
len(signed_data['certificates'])
)
certificate = signed_data['certificates'][0]
with open(os.path.join(fixtures_dir, 'keys/test-der.crt'), 'rb') as f:
self.assertEqual(
f.read(),
certificate.dump()
)
self.assertEqual(
1,
len(signed_data['signer_infos'])
)
signer = signed_data['signer_infos'][0]
self.assertEqual(
'v1',
signer['version'].native
)
self.assertEqual(
util.OrderedDict([
(
'issuer',
util.OrderedDict([
('country_name', 'US'),
('state_or_province_name', 'Massachusetts'),
('locality_name', 'Newbury'),
('organization_name', 'Codex Non Sufficit LC'),
('organizational_unit_name', 'Testing'),
('common_name', 'Will Bond'),
('email_address', 'will@codexns.io'),
])
),
(
'serial_number',
13683582341504654466
)
]),
signer['sid'].native
)
self.assertEqual(
util.OrderedDict([
('algorithm', 'sha256'),
('parameters', None),
]),
signer['digest_algorithm'].native
)
signed_attrs = signer['signed_attrs']
self.assertEqual(
4,
len(signed_attrs)
)
self.assertEqual(
'content_type',
signed_attrs[0]['type'].native
)
self.assertEqual(
'data',
signed_attrs[0]['values'][0].native
)
self.assertEqual(
'signing_time',
signed_attrs[1]['type'].native
)
self.assertEqual(
datetime(2015, 6, 3, 5, 55, 12, tzinfo=util.timezone.utc),
signed_attrs[1]['values'][0].native
)
self.assertEqual(
'message_digest',
signed_attrs[2]['type'].native
)
self.assertEqual(
b'\x52\x88\x25\x47\x15\x5B\x2D\x50\x44\x68\x05\x24\xC8\x71\x5A\xCC\x62\x28\x36\x17\xB7\x68'
b'\xEE\xA1\x12\x90\x96\x4F\x94\xAE\xDB\x79',
signed_attrs[2]['values'][0].native
)
self.assertEqual(
util.OrderedDict([
('algorithm', 'rsassa_pkcs1v15'),
('parameters', None),
]),
signer['signature_algorithm'].native
)
self.assertEqual(
b'\x43\x66\xEE\xF4\x6A\x02\x6F\xFE\x0D\xAE\xE6\xF3\x7A\x8F\x2C\x8E\x26\xB6\x25\x68\xEF\x5B'
b'\x4B\x4F\x9C\xE4\xE6\x71\x42\x22\xEC\x97\xFC\x53\xD9\xD6\x36\x1F\xA1\x32\x35\xFF\xA9\x95'
b'\x45\x50\x36\x36\x0C\x9A\x10\x6F\x06\xB6\x9D\x25\x10\x08\xF5\xF4\xE1\x68\x62\x60\xE5\xBF'
b'\xBD\xE2\x9F\xBD\x8A\x10\x29\x3B\xAF\xE7\xD6\x55\x7C\xEE\x3B\xFB\x93\x42\xE0\xB4\x4F\x89'
b'\xD0\x7B\x18\x51\x85\x90\x47\xF0\x5E\xE1\x15\x2C\xC1\x9A\xF1\x49\xE8\x11\x29\x17\x2E\x77'
b'\xD3\x35\x10\xAA\xCD\x32\x07\x32\x74\xCF\x2D\x89\xBD\xEF\xC7\xC9\xE7\xEC\x90\x44\xCE\x0B'
b'\xC5\x97\x00\x26\x67\x8A\x89\x5B\xFA\x46\xB2\x92\xD5\xCB\xA3\x52\x16\xDC\xF0\xF0\x79\xCB'
b'\x90\x93\x8E\x26\xB3\xEB\x8F\xBD\x54\x06\xD6\xB0\xA0\x04\x47\x7C\x63\xFC\x88\x5A\xE3\x81'
b'\xDF\x1E\x4D\x39\xFD\xF5\xA0\xE2\xD3\xAB\x13\xC1\xCF\x50\xB2\x0B\xC9\x36\xD6\xCB\xEA\x55'
b'\x39\x97\x8E\x34\x47\xE3\x6B\x44\x4A\x0E\x03\xAF\x41\xB2\x47\x2E\x26\xA3\x6B\x5F\xA1\x5C'
b'\x86\xA1\x96\x37\x02\xD3\x7C\x5F\xC1\xAF\x81\xE4\x1A\xD9\x87\x44\xB5\xB3\x5C\x45\x6C\xFF'
b'\x97\x4C\x3A\xB4\x2F\x5C\x2F\x86\x15\x51\x71\xA6\x27\x68',
signer['signature'].native
)
def test_parse_cms_signed_date_indefinite_length(self):
with open(os.path.join(fixtures_dir, 'cms-signed-indefinite-length.der'), 'rb') as f:
info = cms.ContentInfo.load(f.read())
signed_data = info['content']
self.assertIsInstance(signed_data.native, util.OrderedDict)
def test_parse_content_info_cms_signed_digested_data(self):
with open(os.path.join(fixtures_dir, 'cms-signed-digested.der'), 'rb') as f:
info = cms.ContentInfo.load(f.read())
signed_data = info['content']
encap_content_info = signed_data['encap_content_info']
self.assertEqual(
'signed_data',
info['content_type'].native
)
self.assertEqual(
'v2',
signed_data['version'].native
)
self.assertEqual(
[
util.OrderedDict([
('algorithm', 'sha256'),
('parameters', None),
])
],
signed_data['digest_algorithms'].native
)
self.assertEqual(
'digested_data',
encap_content_info['content_type'].native
)
self.assertEqual(
util.OrderedDict([
('version', 'v0'),
(
'digest_algorithm',
util.OrderedDict([
('algorithm', 'sha1'),
('parameters', None),
])
),
(
'encap_content_info',
util.OrderedDict([
('content_type', 'data'),
('content', b'This is the message to encapsulate in PKCS#7/CMS\n'),
])
),
(
'digest',
b'\x53\xC9\xDB\xC1\x6D\xDB\x34\x3B\x28\x4E\xEF\xA6\x03\x0E\x02\x64\x79\x31\xAF\xFB'
)
]),
encap_content_info['content'].native
)
self.assertEqual(
1,
len(signed_data['certificates'])
)
certificate = signed_data['certificates'][0]
with open(os.path.join(fixtures_dir, 'keys/test-der.crt'), 'rb') as f:
self.assertEqual(
f.read(),
certificate.dump()
)
self.assertEqual(
1,
len(signed_data['signer_infos'])
)
signer = signed_data['signer_infos'][0]
self.assertEqual(
'v1',
signer['version'].native
)
self.assertEqual(
util.OrderedDict([
(
'issuer',
util.OrderedDict([
('country_name', 'US'),
('state_or_province_name', 'Massachusetts'),
('locality_name', 'Newbury'),
('organization_name', 'Codex Non Sufficit LC'),
('organizational_unit_name', 'Testing'),
('common_name', 'Will Bond'),
('email_address', 'will@codexns.io'),
])
),
(
'serial_number',
13683582341504654466
)
]),
signer['sid'].native
)
self.assertEqual(
util.OrderedDict([
('algorithm', 'sha256'),
('parameters', None),
]),
signer['digest_algorithm'].native
)
signed_attrs = signer['signed_attrs']
self.assertEqual(
0,
len(signed_attrs)
)
self.assertEqual(
util.OrderedDict([
('algorithm', 'rsassa_pkcs1v15'),
('parameters', None),
]),
signer['signature_algorithm'].native
)
self.assertEqual(
b'\x70\xBC\x18\x82\x41\xD6\xD8\xE7\x5C\xDC\x42\x27\xA5\xA8\xAA\x8B\x16\x15\x61\x3A\xE5\x47'
b'\x53\xFD\x8F\x45\xA3\x82\xE2\x72\x44\x07\xD1\xCB\xBF\xB4\x85\x4A\x2A\x16\x19\xDE\xDC\x53'
b'\x15\xCF\x98\xEE\x5C\x0E\xDF\xDE\xC8\x79\xCE\x2B\x38\x61\x36\xB0\xA1\xCB\x94\xD6\x4F\xCD'
b'\x83\xEF\x0C\xC9\x23\xA0\x7B\x8B\x65\x40\x5C\x3D\xA8\x3E\xCC\x0D\x1F\x17\x23\xF3\x74\x9F'
b'\x7E\x88\xF8\xF3\xBE\x4E\x19\x95\x0F\xEB\x95\x55\x69\xB4\xAA\xC3\x2A\x36\x03\x93\x1C\xDC'
b'\xE5\x65\x3F\x4E\x5E\x03\xC8\x56\xD8\x57\x8F\xE8\x2D\x85\x32\xDA\xFD\x79\xD4\xDD\x88\xCA'
b'\xA3\x14\x41\xE4\x3B\x03\x88\x0E\x2B\x76\xDC\x44\x3D\x4D\xFF\xB2\xC8\xC3\x83\xB1\x33\x37'
b'\x53\x51\x33\x4B\xCA\x1A\xAD\x7E\x6A\xBC\x61\x8B\x84\xDB\x7F\xCF\x61\xB2\x1D\x21\x83\xCF'
b'\xB8\x3F\xC6\x98\xED\xD8\x66\x06\xCF\x03\x30\x96\x9D\xB4\x7A\x16\xDF\x6E\xA7\x30\xEB\x77'
b'\xF7\x40\x13\xFB\xF2\xAC\x41\x79\x9D\xDC\xC0\xED\x4B\x8B\x19\xEE\x05\x3D\x61\x20\x39\x7E'
b'\x80\x1D\x3A\x23\x69\x48\x43\x60\x8B\x3E\x63\xAD\x01\x7A\xDE\x6F\x01\xBA\x51\xF3\x4B\x14'
b'\xBF\x6B\x77\x1A\x32\xC2\x0C\x93\xCC\x35\xBC\x66\xC6\x69',
signer['signature'].native
)
def test_parse_content_info_pkcs7_signed_digested_data(self):
with open(os.path.join(fixtures_dir, 'pkcs7-signed-digested.der'), 'rb') as f:
info = cms.ContentInfo.load(f.read())
signed_data = info['content']
encap_content_info = signed_data['encap_content_info']
self.assertEqual(
'signed_data',
info['content_type'].native
)
self.assertEqual(
'v1',
signed_data['version'].native
)
self.assertEqual(
[
util.OrderedDict([
('algorithm', 'sha256'),
('parameters', None),
])
],
signed_data['digest_algorithms'].native
)
self.assertEqual(
'digested_data',
encap_content_info['content_type'].native
)
self.assertEqual(
util.OrderedDict([
('version', 'v0'),
(
'digest_algorithm',
util.OrderedDict([
('algorithm', 'sha1'),
('parameters', None),
])
),
(
'encap_content_info',
util.OrderedDict([
('content_type', 'data'),
('content', b'This is the message to encapsulate in PKCS#7/CMS\n'),
])
),
(
'digest',
b'\x53\xC9\xDB\xC1\x6D\xDB\x34\x3B\x28\x4E\xEF\xA6\x03\x0E\x02\x64\x79\x31\xAF\xFB'
)
]),
encap_content_info['content'].native
)
self.assertEqual(
1,
len(signed_data['certificates'])
)
certificate = signed_data['certificates'][0]
with open(os.path.join(fixtures_dir, 'keys/test-der.crt'), 'rb') as f:
self.assertEqual(
f.read(),
certificate.dump()
)
self.assertEqual(
1,
len(signed_data['signer_infos'])
)
signer = signed_data['signer_infos'][0]
self.assertEqual(
'v1',
signer['version'].native
)
self.assertEqual(
util.OrderedDict([
(
'issuer',
util.OrderedDict([
('country_name', 'US'),
('state_or_province_name', 'Massachusetts'),
('locality_name', 'Newbury'),
('organization_name', 'Codex Non Sufficit LC'),
('organizational_unit_name', 'Testing'),
('common_name', 'Will Bond'),
('email_address', 'will@codexns.io'),
])
),
(
'serial_number',
13683582341504654466
)
]),
signer['sid'].native
)
self.assertEqual(
util.OrderedDict([
('algorithm', 'sha256'),
('parameters', None),
]),
signer['digest_algorithm'].native
)
signed_attrs = signer['signed_attrs']
self.assertEqual(
0,
len(signed_attrs)
)
self.assertEqual(
util.OrderedDict([
('algorithm', 'rsassa_pkcs1v15'),
('parameters', None),
]),
signer['signature_algorithm'].native
)
self.assertEqual(
b'\x70\xBC\x18\x82\x41\xD6\xD8\xE7\x5C\xDC\x42\x27\xA5\xA8\xAA\x8B\x16\x15\x61\x3A\xE5\x47'
b'\x53\xFD\x8F\x45\xA3\x82\xE2\x72\x44\x07\xD1\xCB\xBF\xB4\x85\x4A\x2A\x16\x19\xDE\xDC\x53'
b'\x15\xCF\x98\xEE\x5C\x0E\xDF\xDE\xC8\x79\xCE\x2B\x38\x61\x36\xB0\xA1\xCB\x94\xD6\x4F\xCD'
b'\x83\xEF\x0C\xC9\x23\xA0\x7B\x8B\x65\x40\x5C\x3D\xA8\x3E\xCC\x0D\x1F\x17\x23\xF3\x74\x9F'
b'\x7E\x88\xF8\xF3\xBE\x4E\x19\x95\x0F\xEB\x95\x55\x69\xB4\xAA\xC3\x2A\x36\x03\x93\x1C\xDC'
b'\xE5\x65\x3F\x4E\x5E\x03\xC8\x56\xD8\x57\x8F\xE8\x2D\x85\x32\xDA\xFD\x79\xD4\xDD\x88\xCA'
b'\xA3\x14\x41\xE4\x3B\x03\x88\x0E\x2B\x76\xDC\x44\x3D\x4D\xFF\xB2\xC8\xC3\x83\xB1\x33\x37'
b'\x53\x51\x33\x4B\xCA\x1A\xAD\x7E\x6A\xBC\x61\x8B\x84\xDB\x7F\xCF\x61\xB2\x1D\x21\x83\xCF'
b'\xB8\x3F\xC6\x98\xED\xD8\x66\x06\xCF\x03\x30\x96\x9D\xB4\x7A\x16\xDF\x6E\xA7\x30\xEB\x77'
b'\xF7\x40\x13\xFB\xF2\xAC\x41\x79\x9D\xDC\xC0\xED\x4B\x8B\x19\xEE\x05\x3D\x61\x20\x39\x7E'
b'\x80\x1D\x3A\x23\x69\x48\x43\x60\x8B\x3E\x63\xAD\x01\x7A\xDE\x6F\x01\xBA\x51\xF3\x4B\x14'
b'\xBF\x6B\x77\x1A\x32\xC2\x0C\x93\xCC\x35\xBC\x66\xC6\x69',
signer['signature'].native
)
def test_parse_content_info_smime_capabilities(self):
with open(os.path.join(fixtures_dir, 'smime-signature-generated-by-thunderbird.p7s'), 'rb') as f:
info = cms.ContentInfo.load(f.read())
signed_attrs = info['content']['signer_infos'][0]['signed_attrs']
self.assertEqual(
'smime_capabilities',
signed_attrs[3]['type'].native
)
smime_capabilities = signed_attrs[3]
self.assertEqual(
1,
len(smime_capabilities['values'])
)
self.assertEqual(
7,
len(smime_capabilities['values'][0])
)
self.assertEqual(
[capability.native for capability in smime_capabilities['values'][0]],
[
util.OrderedDict([
('capability_id', 'aes256_cbc'),
('parameters', None),
]),
util.OrderedDict([
('capability_id', 'aes128_cbc'),
('parameters', None),
]),
util.OrderedDict([
('capability_id', 'tripledes_3key'),
('parameters', None),
]),
util.OrderedDict([
('capability_id', 'rc2'),
('parameters', 128),
]),
util.OrderedDict([
('capability_id', 'rc2'),
('parameters', 64),
]),
util.OrderedDict([
('capability_id', 'des'),
('parameters', None),
]),
util.OrderedDict([
('capability_id', 'rc2'),
('parameters', 40),
]),
]
)
def test_bad_teletex_inside_pkcs7(self):
with open(os.path.join(fixtures_dir, 'mozilla-generated-by-openssl.pkcs7.der'), 'rb') as f:
content = cms.ContentInfo.load(f.read())['content']
self.assertEqual(
util.OrderedDict([
('organizational_unit_name', 'Testing'),
('country_name', 'US'),
('locality_name', 'Mountain View'),
('organization_name', 'Addons Testing'),
('state_or_province_name', 'CA'),
('common_name', '{02b860db-e71f-48d2-a5a0-82072a93d33c}')
]),
content['certificates'][0].chosen['tbs_certificate']['subject'].native
)
def test_parse_attribute_cert(self):
# regression test for tagging issue in AttCertIssuer
with open(os.path.join(fixtures_dir, 'example-attr-cert.der'), 'rb') as f:
ac_bytes = f.read()
ac_parsed = cms.AttributeCertificateV2.load(ac_bytes)
self.assertEqual(ac_bytes, ac_parsed.dump(force=True))
ac_info = ac_parsed['ac_info']
self.assertIsInstance(ac_info['issuer'].chosen, cms.V2Form)
self.assertEqual(1, len(ac_info['issuer'].chosen['issuer_name']))
def test_create_role_syntax(self):
rs = cms.RoleSyntax({'role_name': {'rfc822_name': 'test@example.com'}})
self.assertEqual(
util.OrderedDict([
('role_authority', None),
('role_name', 'test@example.com')
]),
rs.native
)
| mit | 8190e5082a4b86451c8fbc088900e1ac | 35.502493 | 119 | 0.507976 | 3.136737 | false | false | false | false |
wbond/asn1crypto | asn1crypto/ocsp.py | 2 | 19024 | # coding: utf-8
"""
ASN.1 type classes for the online certificate status protocol (OCSP). Exports
the following items:
- OCSPRequest()
- OCSPResponse()
Other type classes are defined that help compose the types listed above.
"""
from __future__ import unicode_literals, division, absolute_import, print_function
from ._errors import unwrap
from .algos import DigestAlgorithm, SignedDigestAlgorithm
from .core import (
Boolean,
Choice,
Enumerated,
GeneralizedTime,
IA5String,
Integer,
Null,
ObjectIdentifier,
OctetBitString,
OctetString,
ParsableOctetString,
Sequence,
SequenceOf,
)
from .crl import AuthorityInfoAccessSyntax, CRLReason
from .keys import PublicKeyAlgorithm
from .x509 import Certificate, GeneralName, GeneralNames, Name
# The structures in this file are taken from https://tools.ietf.org/html/rfc6960
class Version(Integer):
_map = {
0: 'v1'
}
class CertId(Sequence):
_fields = [
('hash_algorithm', DigestAlgorithm),
('issuer_name_hash', OctetString),
('issuer_key_hash', OctetString),
('serial_number', Integer),
]
class ServiceLocator(Sequence):
_fields = [
('issuer', Name),
('locator', AuthorityInfoAccessSyntax),
]
class RequestExtensionId(ObjectIdentifier):
_map = {
'1.3.6.1.5.5.7.48.1.7': 'service_locator',
}
class RequestExtension(Sequence):
_fields = [
('extn_id', RequestExtensionId),
('critical', Boolean, {'default': False}),
('extn_value', ParsableOctetString),
]
_oid_pair = ('extn_id', 'extn_value')
_oid_specs = {
'service_locator': ServiceLocator,
}
class RequestExtensions(SequenceOf):
_child_spec = RequestExtension
class Request(Sequence):
_fields = [
('req_cert', CertId),
('single_request_extensions', RequestExtensions, {'explicit': 0, 'optional': True}),
]
_processed_extensions = False
_critical_extensions = None
_service_locator_value = None
def _set_extensions(self):
"""
Sets common named extensions to private attributes and creates a list
of critical extensions
"""
self._critical_extensions = set()
for extension in self['single_request_extensions']:
name = extension['extn_id'].native
attribute_name = '_%s_value' % name
if hasattr(self, attribute_name):
setattr(self, attribute_name, extension['extn_value'].parsed)
if extension['critical'].native:
self._critical_extensions.add(name)
self._processed_extensions = True
@property
def critical_extensions(self):
"""
Returns a set of the names (or OID if not a known extension) of the
extensions marked as critical
:return:
A set of unicode strings
"""
if not self._processed_extensions:
self._set_extensions()
return self._critical_extensions
@property
def service_locator_value(self):
"""
This extension is used when communicating with an OCSP responder that
acts as a proxy for OCSP requests
:return:
None or a ServiceLocator object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._service_locator_value
class Requests(SequenceOf):
_child_spec = Request
class ResponseType(ObjectIdentifier):
_map = {
'1.3.6.1.5.5.7.48.1.1': 'basic_ocsp_response',
}
class AcceptableResponses(SequenceOf):
_child_spec = ResponseType
class PreferredSignatureAlgorithm(Sequence):
_fields = [
('sig_identifier', SignedDigestAlgorithm),
('cert_identifier', PublicKeyAlgorithm, {'optional': True}),
]
class PreferredSignatureAlgorithms(SequenceOf):
_child_spec = PreferredSignatureAlgorithm
class TBSRequestExtensionId(ObjectIdentifier):
_map = {
'1.3.6.1.5.5.7.48.1.2': 'nonce',
'1.3.6.1.5.5.7.48.1.4': 'acceptable_responses',
'1.3.6.1.5.5.7.48.1.8': 'preferred_signature_algorithms',
}
class TBSRequestExtension(Sequence):
_fields = [
('extn_id', TBSRequestExtensionId),
('critical', Boolean, {'default': False}),
('extn_value', ParsableOctetString),
]
_oid_pair = ('extn_id', 'extn_value')
_oid_specs = {
'nonce': OctetString,
'acceptable_responses': AcceptableResponses,
'preferred_signature_algorithms': PreferredSignatureAlgorithms,
}
class TBSRequestExtensions(SequenceOf):
_child_spec = TBSRequestExtension
class TBSRequest(Sequence):
_fields = [
('version', Version, {'explicit': 0, 'default': 'v1'}),
('requestor_name', GeneralName, {'explicit': 1, 'optional': True}),
('request_list', Requests),
('request_extensions', TBSRequestExtensions, {'explicit': 2, 'optional': True}),
]
class Certificates(SequenceOf):
_child_spec = Certificate
class Signature(Sequence):
_fields = [
('signature_algorithm', SignedDigestAlgorithm),
('signature', OctetBitString),
('certs', Certificates, {'explicit': 0, 'optional': True}),
]
class OCSPRequest(Sequence):
_fields = [
('tbs_request', TBSRequest),
('optional_signature', Signature, {'explicit': 0, 'optional': True}),
]
_processed_extensions = False
_critical_extensions = None
_nonce_value = None
_acceptable_responses_value = None
_preferred_signature_algorithms_value = None
def _set_extensions(self):
"""
Sets common named extensions to private attributes and creates a list
of critical extensions
"""
self._critical_extensions = set()
for extension in self['tbs_request']['request_extensions']:
name = extension['extn_id'].native
attribute_name = '_%s_value' % name
if hasattr(self, attribute_name):
setattr(self, attribute_name, extension['extn_value'].parsed)
if extension['critical'].native:
self._critical_extensions.add(name)
self._processed_extensions = True
@property
def critical_extensions(self):
"""
Returns a set of the names (or OID if not a known extension) of the
extensions marked as critical
:return:
A set of unicode strings
"""
if not self._processed_extensions:
self._set_extensions()
return self._critical_extensions
@property
def nonce_value(self):
"""
This extension is used to prevent replay attacks by including a unique,
random value with each request/response pair
:return:
None or an OctetString object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._nonce_value
@property
def acceptable_responses_value(self):
"""
This extension is used to allow the client and server to communicate
with alternative response formats other than just basic_ocsp_response,
although no other formats are defined in the standard.
:return:
None or an AcceptableResponses object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._acceptable_responses_value
@property
def preferred_signature_algorithms_value(self):
"""
This extension is used by the client to define what signature algorithms
are preferred, including both the hash algorithm and the public key
algorithm, with a level of detail down to even the public key algorithm
parameters, such as curve name.
:return:
None or a PreferredSignatureAlgorithms object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._preferred_signature_algorithms_value
class OCSPResponseStatus(Enumerated):
_map = {
0: 'successful',
1: 'malformed_request',
2: 'internal_error',
3: 'try_later',
5: 'sign_required',
6: 'unauthorized',
}
class ResponderId(Choice):
_alternatives = [
('by_name', Name, {'explicit': 1}),
('by_key', OctetString, {'explicit': 2}),
]
# Custom class to return a meaningful .native attribute from CertStatus()
class StatusGood(Null):
def set(self, value):
"""
Sets the value of the object
:param value:
None or 'good'
"""
if value is not None and value != 'good' and not isinstance(value, Null):
raise ValueError(unwrap(
'''
value must be one of None, "good", not %s
''',
repr(value)
))
self.contents = b''
@property
def native(self):
return 'good'
# Custom class to return a meaningful .native attribute from CertStatus()
class StatusUnknown(Null):
def set(self, value):
"""
Sets the value of the object
:param value:
None or 'unknown'
"""
if value is not None and value != 'unknown' and not isinstance(value, Null):
raise ValueError(unwrap(
'''
value must be one of None, "unknown", not %s
''',
repr(value)
))
self.contents = b''
@property
def native(self):
return 'unknown'
class RevokedInfo(Sequence):
_fields = [
('revocation_time', GeneralizedTime),
('revocation_reason', CRLReason, {'explicit': 0, 'optional': True}),
]
class CertStatus(Choice):
_alternatives = [
('good', StatusGood, {'implicit': 0}),
('revoked', RevokedInfo, {'implicit': 1}),
('unknown', StatusUnknown, {'implicit': 2}),
]
class CrlId(Sequence):
_fields = [
('crl_url', IA5String, {'explicit': 0, 'optional': True}),
('crl_num', Integer, {'explicit': 1, 'optional': True}),
('crl_time', GeneralizedTime, {'explicit': 2, 'optional': True}),
]
class SingleResponseExtensionId(ObjectIdentifier):
_map = {
'1.3.6.1.5.5.7.48.1.3': 'crl',
'1.3.6.1.5.5.7.48.1.6': 'archive_cutoff',
# These are CRLEntryExtension values from
# https://tools.ietf.org/html/rfc5280
'2.5.29.21': 'crl_reason',
'2.5.29.24': 'invalidity_date',
'2.5.29.29': 'certificate_issuer',
# https://tools.ietf.org/html/rfc6962.html#page-13
'1.3.6.1.4.1.11129.2.4.5': 'signed_certificate_timestamp_list',
}
class SingleResponseExtension(Sequence):
_fields = [
('extn_id', SingleResponseExtensionId),
('critical', Boolean, {'default': False}),
('extn_value', ParsableOctetString),
]
_oid_pair = ('extn_id', 'extn_value')
_oid_specs = {
'crl': CrlId,
'archive_cutoff': GeneralizedTime,
'crl_reason': CRLReason,
'invalidity_date': GeneralizedTime,
'certificate_issuer': GeneralNames,
'signed_certificate_timestamp_list': OctetString,
}
class SingleResponseExtensions(SequenceOf):
_child_spec = SingleResponseExtension
class SingleResponse(Sequence):
_fields = [
('cert_id', CertId),
('cert_status', CertStatus),
('this_update', GeneralizedTime),
('next_update', GeneralizedTime, {'explicit': 0, 'optional': True}),
('single_extensions', SingleResponseExtensions, {'explicit': 1, 'optional': True}),
]
_processed_extensions = False
_critical_extensions = None
_crl_value = None
_archive_cutoff_value = None
_crl_reason_value = None
_invalidity_date_value = None
_certificate_issuer_value = None
def _set_extensions(self):
"""
Sets common named extensions to private attributes and creates a list
of critical extensions
"""
self._critical_extensions = set()
for extension in self['single_extensions']:
name = extension['extn_id'].native
attribute_name = '_%s_value' % name
if hasattr(self, attribute_name):
setattr(self, attribute_name, extension['extn_value'].parsed)
if extension['critical'].native:
self._critical_extensions.add(name)
self._processed_extensions = True
@property
def critical_extensions(self):
"""
Returns a set of the names (or OID if not a known extension) of the
extensions marked as critical
:return:
A set of unicode strings
"""
if not self._processed_extensions:
self._set_extensions()
return self._critical_extensions
@property
def crl_value(self):
"""
This extension is used to locate the CRL that a certificate's revocation
is contained within.
:return:
None or a CrlId object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._crl_value
@property
def archive_cutoff_value(self):
"""
This extension is used to indicate the date at which an archived
(historical) certificate status entry will no longer be available.
:return:
None or a GeneralizedTime object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._archive_cutoff_value
@property
def crl_reason_value(self):
"""
This extension indicates the reason that a certificate was revoked.
:return:
None or a CRLReason object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._crl_reason_value
@property
def invalidity_date_value(self):
"""
This extension indicates the suspected date/time the private key was
compromised or the certificate became invalid. This would usually be
before the revocation date, which is when the CA processed the
revocation.
:return:
None or a GeneralizedTime object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._invalidity_date_value
@property
def certificate_issuer_value(self):
"""
This extension indicates the issuer of the certificate in question.
:return:
None or an x509.GeneralNames object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._certificate_issuer_value
class Responses(SequenceOf):
_child_spec = SingleResponse
class ResponseDataExtensionId(ObjectIdentifier):
_map = {
'1.3.6.1.5.5.7.48.1.2': 'nonce',
'1.3.6.1.5.5.7.48.1.9': 'extended_revoke',
}
class ResponseDataExtension(Sequence):
_fields = [
('extn_id', ResponseDataExtensionId),
('critical', Boolean, {'default': False}),
('extn_value', ParsableOctetString),
]
_oid_pair = ('extn_id', 'extn_value')
_oid_specs = {
'nonce': OctetString,
'extended_revoke': Null,
}
class ResponseDataExtensions(SequenceOf):
_child_spec = ResponseDataExtension
class ResponseData(Sequence):
_fields = [
('version', Version, {'explicit': 0, 'default': 'v1'}),
('responder_id', ResponderId),
('produced_at', GeneralizedTime),
('responses', Responses),
('response_extensions', ResponseDataExtensions, {'explicit': 1, 'optional': True}),
]
class BasicOCSPResponse(Sequence):
_fields = [
('tbs_response_data', ResponseData),
('signature_algorithm', SignedDigestAlgorithm),
('signature', OctetBitString),
('certs', Certificates, {'explicit': 0, 'optional': True}),
]
class ResponseBytes(Sequence):
_fields = [
('response_type', ResponseType),
('response', ParsableOctetString),
]
_oid_pair = ('response_type', 'response')
_oid_specs = {
'basic_ocsp_response': BasicOCSPResponse,
}
class OCSPResponse(Sequence):
_fields = [
('response_status', OCSPResponseStatus),
('response_bytes', ResponseBytes, {'explicit': 0, 'optional': True}),
]
_processed_extensions = False
_critical_extensions = None
_nonce_value = None
_extended_revoke_value = None
def _set_extensions(self):
"""
Sets common named extensions to private attributes and creates a list
of critical extensions
"""
self._critical_extensions = set()
for extension in self['response_bytes']['response'].parsed['tbs_response_data']['response_extensions']:
name = extension['extn_id'].native
attribute_name = '_%s_value' % name
if hasattr(self, attribute_name):
setattr(self, attribute_name, extension['extn_value'].parsed)
if extension['critical'].native:
self._critical_extensions.add(name)
self._processed_extensions = True
@property
def critical_extensions(self):
"""
Returns a set of the names (or OID if not a known extension) of the
extensions marked as critical
:return:
A set of unicode strings
"""
if not self._processed_extensions:
self._set_extensions()
return self._critical_extensions
@property
def nonce_value(self):
"""
This extension is used to prevent replay attacks on the request/response
exchange
:return:
None or an OctetString object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._nonce_value
@property
def extended_revoke_value(self):
"""
This extension is used to signal that the responder will return a
"revoked" status for non-issued certificates.
:return:
None or a Null object (if present)
"""
if self._processed_extensions is False:
self._set_extensions()
return self._extended_revoke_value
@property
def basic_ocsp_response(self):
"""
A shortcut into the BasicOCSPResponse sequence
:return:
None or an asn1crypto.ocsp.BasicOCSPResponse object
"""
return self['response_bytes']['response'].parsed
@property
def response_data(self):
"""
A shortcut into the parsed, ResponseData sequence
:return:
None or an asn1crypto.ocsp.ResponseData object
"""
return self['response_bytes']['response'].parsed['tbs_response_data']
| mit | 9d1cfae9ba9c6ea05d153e6b5d5e3677 | 26.061166 | 111 | 0.599611 | 4.163712 | false | false | false | false |
wbond/asn1crypto | asn1crypto/algos.py | 2 | 35867 | # coding: utf-8
"""
ASN.1 type classes for various algorithms using in various aspects of public
key cryptography. Exports the following items:
- AlgorithmIdentifier()
- AnyAlgorithmIdentifier()
- DigestAlgorithm()
- DigestInfo()
- DSASignature()
- EncryptionAlgorithm()
- HmacAlgorithm()
- KdfAlgorithm()
- Pkcs5MacAlgorithm()
- SignedDigestAlgorithm()
Other type classes are defined that help compose the types listed above.
"""
from __future__ import unicode_literals, division, absolute_import, print_function
from ._errors import unwrap
from ._int import fill_width
from .util import int_from_bytes, int_to_bytes
from .core import (
Any,
Choice,
Integer,
Null,
ObjectIdentifier,
OctetString,
Sequence,
Void,
)
# Structures and OIDs in this file are pulled from
# https://tools.ietf.org/html/rfc3279, https://tools.ietf.org/html/rfc4055,
# https://tools.ietf.org/html/rfc5758, https://tools.ietf.org/html/rfc7292,
# http://www.emc.com/collateral/white-papers/h11302-pkcs5v2-1-password-based-cryptography-standard-wp.pdf
class AlgorithmIdentifier(Sequence):
_fields = [
('algorithm', ObjectIdentifier),
('parameters', Any, {'optional': True}),
]
class _ForceNullParameters(object):
"""
Various structures based on AlgorithmIdentifier require that the parameters
field be core.Null() for certain OIDs. This mixin ensures that happens.
"""
# The following attribute, plus the parameters spec callback and custom
# __setitem__ are all to handle a situation where parameters should not be
# optional and must be Null for certain OIDs. More info at
# https://tools.ietf.org/html/rfc4055#page-15 and
# https://tools.ietf.org/html/rfc4055#section-2.1
_null_algos = set([
'1.2.840.113549.1.1.1', # rsassa_pkcs1v15 / rsaes_pkcs1v15 / rsa
'1.2.840.113549.1.1.11', # sha256_rsa
'1.2.840.113549.1.1.12', # sha384_rsa
'1.2.840.113549.1.1.13', # sha512_rsa
'1.2.840.113549.1.1.14', # sha224_rsa
'1.3.14.3.2.26', # sha1
'2.16.840.1.101.3.4.2.4', # sha224
'2.16.840.1.101.3.4.2.1', # sha256
'2.16.840.1.101.3.4.2.2', # sha384
'2.16.840.1.101.3.4.2.3', # sha512
])
def _parameters_spec(self):
if self._oid_pair == ('algorithm', 'parameters'):
algo = self['algorithm'].native
if algo in self._oid_specs:
return self._oid_specs[algo]
if self['algorithm'].dotted in self._null_algos:
return Null
return None
_spec_callbacks = {
'parameters': _parameters_spec
}
# We have to override this since the spec callback uses the value of
# algorithm to determine the parameter spec, however default values are
# assigned before setting a field, so a default value can't be based on
# another field value (unless it is a default also). Thus we have to
# manually check to see if the algorithm was set and parameters is unset,
# and then fix the value as appropriate.
def __setitem__(self, key, value):
res = super(_ForceNullParameters, self).__setitem__(key, value)
if key != 'algorithm':
return res
if self['algorithm'].dotted not in self._null_algos:
return res
if self['parameters'].__class__ != Void:
return res
self['parameters'] = Null()
return res
class HmacAlgorithmId(ObjectIdentifier):
_map = {
'1.3.14.3.2.10': 'des_mac',
'1.2.840.113549.2.7': 'sha1',
'1.2.840.113549.2.8': 'sha224',
'1.2.840.113549.2.9': 'sha256',
'1.2.840.113549.2.10': 'sha384',
'1.2.840.113549.2.11': 'sha512',
'1.2.840.113549.2.12': 'sha512_224',
'1.2.840.113549.2.13': 'sha512_256',
'2.16.840.1.101.3.4.2.13': 'sha3_224',
'2.16.840.1.101.3.4.2.14': 'sha3_256',
'2.16.840.1.101.3.4.2.15': 'sha3_384',
'2.16.840.1.101.3.4.2.16': 'sha3_512',
}
class HmacAlgorithm(Sequence):
_fields = [
('algorithm', HmacAlgorithmId),
('parameters', Any, {'optional': True}),
]
class DigestAlgorithmId(ObjectIdentifier):
_map = {
'1.2.840.113549.2.2': 'md2',
'1.2.840.113549.2.5': 'md5',
'1.3.14.3.2.26': 'sha1',
'2.16.840.1.101.3.4.2.4': 'sha224',
'2.16.840.1.101.3.4.2.1': 'sha256',
'2.16.840.1.101.3.4.2.2': 'sha384',
'2.16.840.1.101.3.4.2.3': 'sha512',
'2.16.840.1.101.3.4.2.5': 'sha512_224',
'2.16.840.1.101.3.4.2.6': 'sha512_256',
'2.16.840.1.101.3.4.2.7': 'sha3_224',
'2.16.840.1.101.3.4.2.8': 'sha3_256',
'2.16.840.1.101.3.4.2.9': 'sha3_384',
'2.16.840.1.101.3.4.2.10': 'sha3_512',
'2.16.840.1.101.3.4.2.11': 'shake128',
'2.16.840.1.101.3.4.2.12': 'shake256',
'2.16.840.1.101.3.4.2.17': 'shake128_len',
'2.16.840.1.101.3.4.2.18': 'shake256_len',
}
class DigestAlgorithm(_ForceNullParameters, Sequence):
_fields = [
('algorithm', DigestAlgorithmId),
('parameters', Any, {'optional': True}),
]
# This structure is what is signed with a SignedDigestAlgorithm
class DigestInfo(Sequence):
_fields = [
('digest_algorithm', DigestAlgorithm),
('digest', OctetString),
]
class MaskGenAlgorithmId(ObjectIdentifier):
_map = {
'1.2.840.113549.1.1.8': 'mgf1',
}
class MaskGenAlgorithm(Sequence):
_fields = [
('algorithm', MaskGenAlgorithmId),
('parameters', Any, {'optional': True}),
]
_oid_pair = ('algorithm', 'parameters')
_oid_specs = {
'mgf1': DigestAlgorithm
}
class TrailerField(Integer):
_map = {
1: 'trailer_field_bc',
}
class RSASSAPSSParams(Sequence):
_fields = [
(
'hash_algorithm',
DigestAlgorithm,
{
'explicit': 0,
'default': {'algorithm': 'sha1'},
}
),
(
'mask_gen_algorithm',
MaskGenAlgorithm,
{
'explicit': 1,
'default': {
'algorithm': 'mgf1',
'parameters': {'algorithm': 'sha1'},
},
}
),
(
'salt_length',
Integer,
{
'explicit': 2,
'default': 20,
}
),
(
'trailer_field',
TrailerField,
{
'explicit': 3,
'default': 'trailer_field_bc',
}
),
]
class SignedDigestAlgorithmId(ObjectIdentifier):
_map = {
'1.3.14.3.2.3': 'md5_rsa',
'1.3.14.3.2.29': 'sha1_rsa',
'1.3.14.7.2.3.1': 'md2_rsa',
'1.2.840.113549.1.1.2': 'md2_rsa',
'1.2.840.113549.1.1.4': 'md5_rsa',
'1.2.840.113549.1.1.5': 'sha1_rsa',
'1.2.840.113549.1.1.14': 'sha224_rsa',
'1.2.840.113549.1.1.11': 'sha256_rsa',
'1.2.840.113549.1.1.12': 'sha384_rsa',
'1.2.840.113549.1.1.13': 'sha512_rsa',
'1.2.840.113549.1.1.10': 'rsassa_pss',
'1.2.840.10040.4.3': 'sha1_dsa',
'1.3.14.3.2.13': 'sha1_dsa',
'1.3.14.3.2.27': 'sha1_dsa',
'2.16.840.1.101.3.4.3.1': 'sha224_dsa',
'2.16.840.1.101.3.4.3.2': 'sha256_dsa',
'1.2.840.10045.4.1': 'sha1_ecdsa',
'1.2.840.10045.4.3.1': 'sha224_ecdsa',
'1.2.840.10045.4.3.2': 'sha256_ecdsa',
'1.2.840.10045.4.3.3': 'sha384_ecdsa',
'1.2.840.10045.4.3.4': 'sha512_ecdsa',
'2.16.840.1.101.3.4.3.9': 'sha3_224_ecdsa',
'2.16.840.1.101.3.4.3.10': 'sha3_256_ecdsa',
'2.16.840.1.101.3.4.3.11': 'sha3_384_ecdsa',
'2.16.840.1.101.3.4.3.12': 'sha3_512_ecdsa',
# For when the digest is specified elsewhere in a Sequence
'1.2.840.113549.1.1.1': 'rsassa_pkcs1v15',
'1.2.840.10040.4.1': 'dsa',
'1.2.840.10045.4': 'ecdsa',
# RFC 8410 -- https://tools.ietf.org/html/rfc8410
'1.3.101.112': 'ed25519',
'1.3.101.113': 'ed448',
}
_reverse_map = {
'dsa': '1.2.840.10040.4.1',
'ecdsa': '1.2.840.10045.4',
'md2_rsa': '1.2.840.113549.1.1.2',
'md5_rsa': '1.2.840.113549.1.1.4',
'rsassa_pkcs1v15': '1.2.840.113549.1.1.1',
'rsassa_pss': '1.2.840.113549.1.1.10',
'sha1_dsa': '1.2.840.10040.4.3',
'sha1_ecdsa': '1.2.840.10045.4.1',
'sha1_rsa': '1.2.840.113549.1.1.5',
'sha224_dsa': '2.16.840.1.101.3.4.3.1',
'sha224_ecdsa': '1.2.840.10045.4.3.1',
'sha224_rsa': '1.2.840.113549.1.1.14',
'sha256_dsa': '2.16.840.1.101.3.4.3.2',
'sha256_ecdsa': '1.2.840.10045.4.3.2',
'sha256_rsa': '1.2.840.113549.1.1.11',
'sha384_ecdsa': '1.2.840.10045.4.3.3',
'sha384_rsa': '1.2.840.113549.1.1.12',
'sha512_ecdsa': '1.2.840.10045.4.3.4',
'sha512_rsa': '1.2.840.113549.1.1.13',
'sha3_224_ecdsa': '2.16.840.1.101.3.4.3.9',
'sha3_256_ecdsa': '2.16.840.1.101.3.4.3.10',
'sha3_384_ecdsa': '2.16.840.1.101.3.4.3.11',
'sha3_512_ecdsa': '2.16.840.1.101.3.4.3.12',
'ed25519': '1.3.101.112',
'ed448': '1.3.101.113',
}
class SignedDigestAlgorithm(_ForceNullParameters, Sequence):
_fields = [
('algorithm', SignedDigestAlgorithmId),
('parameters', Any, {'optional': True}),
]
_oid_pair = ('algorithm', 'parameters')
_oid_specs = {
'rsassa_pss': RSASSAPSSParams,
}
@property
def signature_algo(self):
"""
:return:
A unicode string of "rsassa_pkcs1v15", "rsassa_pss", "dsa",
"ecdsa", "ed25519" or "ed448"
"""
algorithm = self['algorithm'].native
algo_map = {
'md2_rsa': 'rsassa_pkcs1v15',
'md5_rsa': 'rsassa_pkcs1v15',
'sha1_rsa': 'rsassa_pkcs1v15',
'sha224_rsa': 'rsassa_pkcs1v15',
'sha256_rsa': 'rsassa_pkcs1v15',
'sha384_rsa': 'rsassa_pkcs1v15',
'sha512_rsa': 'rsassa_pkcs1v15',
'rsassa_pkcs1v15': 'rsassa_pkcs1v15',
'rsassa_pss': 'rsassa_pss',
'sha1_dsa': 'dsa',
'sha224_dsa': 'dsa',
'sha256_dsa': 'dsa',
'dsa': 'dsa',
'sha1_ecdsa': 'ecdsa',
'sha224_ecdsa': 'ecdsa',
'sha256_ecdsa': 'ecdsa',
'sha384_ecdsa': 'ecdsa',
'sha512_ecdsa': 'ecdsa',
'sha3_224_ecdsa': 'ecdsa',
'sha3_256_ecdsa': 'ecdsa',
'sha3_384_ecdsa': 'ecdsa',
'sha3_512_ecdsa': 'ecdsa',
'ecdsa': 'ecdsa',
'ed25519': 'ed25519',
'ed448': 'ed448',
}
if algorithm in algo_map:
return algo_map[algorithm]
raise ValueError(unwrap(
'''
Signature algorithm not known for %s
''',
algorithm
))
@property
def hash_algo(self):
"""
:return:
A unicode string of "md2", "md5", "sha1", "sha224", "sha256",
"sha384", "sha512", "sha512_224", "sha512_256" or "shake256"
"""
algorithm = self['algorithm'].native
algo_map = {
'md2_rsa': 'md2',
'md5_rsa': 'md5',
'sha1_rsa': 'sha1',
'sha224_rsa': 'sha224',
'sha256_rsa': 'sha256',
'sha384_rsa': 'sha384',
'sha512_rsa': 'sha512',
'sha1_dsa': 'sha1',
'sha224_dsa': 'sha224',
'sha256_dsa': 'sha256',
'sha1_ecdsa': 'sha1',
'sha224_ecdsa': 'sha224',
'sha256_ecdsa': 'sha256',
'sha384_ecdsa': 'sha384',
'sha512_ecdsa': 'sha512',
'ed25519': 'sha512',
'ed448': 'shake256',
}
if algorithm in algo_map:
return algo_map[algorithm]
if algorithm == 'rsassa_pss':
return self['parameters']['hash_algorithm']['algorithm'].native
raise ValueError(unwrap(
'''
Hash algorithm not known for %s
''',
algorithm
))
class Pbkdf2Salt(Choice):
_alternatives = [
('specified', OctetString),
('other_source', AlgorithmIdentifier),
]
class Pbkdf2Params(Sequence):
_fields = [
('salt', Pbkdf2Salt),
('iteration_count', Integer),
('key_length', Integer, {'optional': True}),
('prf', HmacAlgorithm, {'default': {'algorithm': 'sha1'}}),
]
class KdfAlgorithmId(ObjectIdentifier):
_map = {
'1.2.840.113549.1.5.12': 'pbkdf2'
}
class KdfAlgorithm(Sequence):
_fields = [
('algorithm', KdfAlgorithmId),
('parameters', Any, {'optional': True}),
]
_oid_pair = ('algorithm', 'parameters')
_oid_specs = {
'pbkdf2': Pbkdf2Params
}
class DHParameters(Sequence):
"""
Original Name: DHParameter
Source: ftp://ftp.rsasecurity.com/pub/pkcs/ascii/pkcs-3.asc section 9
"""
_fields = [
('p', Integer),
('g', Integer),
('private_value_length', Integer, {'optional': True}),
]
class KeyExchangeAlgorithmId(ObjectIdentifier):
_map = {
'1.2.840.113549.1.3.1': 'dh',
}
class KeyExchangeAlgorithm(Sequence):
_fields = [
('algorithm', KeyExchangeAlgorithmId),
('parameters', Any, {'optional': True}),
]
_oid_pair = ('algorithm', 'parameters')
_oid_specs = {
'dh': DHParameters,
}
class Rc2Params(Sequence):
_fields = [
('rc2_parameter_version', Integer, {'optional': True}),
('iv', OctetString),
]
class Rc5ParamVersion(Integer):
_map = {
16: 'v1-0'
}
class Rc5Params(Sequence):
_fields = [
('version', Rc5ParamVersion),
('rounds', Integer),
('block_size_in_bits', Integer),
('iv', OctetString, {'optional': True}),
]
class Pbes1Params(Sequence):
_fields = [
('salt', OctetString),
('iterations', Integer),
]
class CcmParams(Sequence):
# https://tools.ietf.org/html/rfc5084
# aes_ICVlen: 4 | 6 | 8 | 10 | 12 | 14 | 16
_fields = [
('aes_nonce', OctetString),
('aes_icvlen', Integer),
]
class PSourceAlgorithmId(ObjectIdentifier):
_map = {
'1.2.840.113549.1.1.9': 'p_specified',
}
class PSourceAlgorithm(Sequence):
_fields = [
('algorithm', PSourceAlgorithmId),
('parameters', Any, {'optional': True}),
]
_oid_pair = ('algorithm', 'parameters')
_oid_specs = {
'p_specified': OctetString
}
class RSAESOAEPParams(Sequence):
_fields = [
(
'hash_algorithm',
DigestAlgorithm,
{
'explicit': 0,
'default': {'algorithm': 'sha1'}
}
),
(
'mask_gen_algorithm',
MaskGenAlgorithm,
{
'explicit': 1,
'default': {
'algorithm': 'mgf1',
'parameters': {'algorithm': 'sha1'}
}
}
),
(
'p_source_algorithm',
PSourceAlgorithm,
{
'explicit': 2,
'default': {
'algorithm': 'p_specified',
'parameters': b''
}
}
),
]
class DSASignature(Sequence):
"""
An ASN.1 class for translating between the OS crypto library's
representation of an (EC)DSA signature and the ASN.1 structure that is part
of various RFCs.
Original Name: DSS-Sig-Value
Source: https://tools.ietf.org/html/rfc3279#section-2.2.2
"""
_fields = [
('r', Integer),
('s', Integer),
]
@classmethod
def from_p1363(cls, data):
"""
Reads a signature from a byte string encoding accordint to IEEE P1363,
which is used by Microsoft's BCryptSignHash() function.
:param data:
A byte string from BCryptSignHash()
:return:
A DSASignature object
"""
r = int_from_bytes(data[0:len(data) // 2])
s = int_from_bytes(data[len(data) // 2:])
return cls({'r': r, 's': s})
def to_p1363(self):
"""
Dumps a signature to a byte string compatible with Microsoft's
BCryptVerifySignature() function.
:return:
A byte string compatible with BCryptVerifySignature()
"""
r_bytes = int_to_bytes(self['r'].native)
s_bytes = int_to_bytes(self['s'].native)
int_byte_length = max(len(r_bytes), len(s_bytes))
r_bytes = fill_width(r_bytes, int_byte_length)
s_bytes = fill_width(s_bytes, int_byte_length)
return r_bytes + s_bytes
class EncryptionAlgorithmId(ObjectIdentifier):
_map = {
'1.3.14.3.2.7': 'des',
'1.2.840.113549.3.7': 'tripledes_3key',
'1.2.840.113549.3.2': 'rc2',
'1.2.840.113549.3.4': 'rc4',
'1.2.840.113549.3.9': 'rc5',
# From http://csrc.nist.gov/groups/ST/crypto_apps_infra/csor/algorithms.html#AES
'2.16.840.1.101.3.4.1.1': 'aes128_ecb',
'2.16.840.1.101.3.4.1.2': 'aes128_cbc',
'2.16.840.1.101.3.4.1.3': 'aes128_ofb',
'2.16.840.1.101.3.4.1.4': 'aes128_cfb',
'2.16.840.1.101.3.4.1.5': 'aes128_wrap',
'2.16.840.1.101.3.4.1.6': 'aes128_gcm',
'2.16.840.1.101.3.4.1.7': 'aes128_ccm',
'2.16.840.1.101.3.4.1.8': 'aes128_wrap_pad',
'2.16.840.1.101.3.4.1.21': 'aes192_ecb',
'2.16.840.1.101.3.4.1.22': 'aes192_cbc',
'2.16.840.1.101.3.4.1.23': 'aes192_ofb',
'2.16.840.1.101.3.4.1.24': 'aes192_cfb',
'2.16.840.1.101.3.4.1.25': 'aes192_wrap',
'2.16.840.1.101.3.4.1.26': 'aes192_gcm',
'2.16.840.1.101.3.4.1.27': 'aes192_ccm',
'2.16.840.1.101.3.4.1.28': 'aes192_wrap_pad',
'2.16.840.1.101.3.4.1.41': 'aes256_ecb',
'2.16.840.1.101.3.4.1.42': 'aes256_cbc',
'2.16.840.1.101.3.4.1.43': 'aes256_ofb',
'2.16.840.1.101.3.4.1.44': 'aes256_cfb',
'2.16.840.1.101.3.4.1.45': 'aes256_wrap',
'2.16.840.1.101.3.4.1.46': 'aes256_gcm',
'2.16.840.1.101.3.4.1.47': 'aes256_ccm',
'2.16.840.1.101.3.4.1.48': 'aes256_wrap_pad',
# From PKCS#5
'1.2.840.113549.1.5.13': 'pbes2',
'1.2.840.113549.1.5.1': 'pbes1_md2_des',
'1.2.840.113549.1.5.3': 'pbes1_md5_des',
'1.2.840.113549.1.5.4': 'pbes1_md2_rc2',
'1.2.840.113549.1.5.6': 'pbes1_md5_rc2',
'1.2.840.113549.1.5.10': 'pbes1_sha1_des',
'1.2.840.113549.1.5.11': 'pbes1_sha1_rc2',
# From PKCS#12
'1.2.840.113549.1.12.1.1': 'pkcs12_sha1_rc4_128',
'1.2.840.113549.1.12.1.2': 'pkcs12_sha1_rc4_40',
'1.2.840.113549.1.12.1.3': 'pkcs12_sha1_tripledes_3key',
'1.2.840.113549.1.12.1.4': 'pkcs12_sha1_tripledes_2key',
'1.2.840.113549.1.12.1.5': 'pkcs12_sha1_rc2_128',
'1.2.840.113549.1.12.1.6': 'pkcs12_sha1_rc2_40',
# PKCS#1 v2.2
'1.2.840.113549.1.1.1': 'rsaes_pkcs1v15',
'1.2.840.113549.1.1.7': 'rsaes_oaep',
}
class EncryptionAlgorithm(_ForceNullParameters, Sequence):
_fields = [
('algorithm', EncryptionAlgorithmId),
('parameters', Any, {'optional': True}),
]
_oid_pair = ('algorithm', 'parameters')
_oid_specs = {
'des': OctetString,
'tripledes_3key': OctetString,
'rc2': Rc2Params,
'rc5': Rc5Params,
'aes128_cbc': OctetString,
'aes192_cbc': OctetString,
'aes256_cbc': OctetString,
'aes128_ofb': OctetString,
'aes192_ofb': OctetString,
'aes256_ofb': OctetString,
# From RFC5084
'aes128_ccm': CcmParams,
'aes192_ccm': CcmParams,
'aes256_ccm': CcmParams,
# From PKCS#5
'pbes1_md2_des': Pbes1Params,
'pbes1_md5_des': Pbes1Params,
'pbes1_md2_rc2': Pbes1Params,
'pbes1_md5_rc2': Pbes1Params,
'pbes1_sha1_des': Pbes1Params,
'pbes1_sha1_rc2': Pbes1Params,
# From PKCS#12
'pkcs12_sha1_rc4_128': Pbes1Params,
'pkcs12_sha1_rc4_40': Pbes1Params,
'pkcs12_sha1_tripledes_3key': Pbes1Params,
'pkcs12_sha1_tripledes_2key': Pbes1Params,
'pkcs12_sha1_rc2_128': Pbes1Params,
'pkcs12_sha1_rc2_40': Pbes1Params,
# PKCS#1 v2.2
'rsaes_oaep': RSAESOAEPParams,
}
@property
def kdf(self):
"""
Returns the name of the key derivation function to use.
:return:
A unicode from of one of the following: "pbkdf1", "pbkdf2",
"pkcs12_kdf"
"""
encryption_algo = self['algorithm'].native
if encryption_algo == 'pbes2':
return self['parameters']['key_derivation_func']['algorithm'].native
if encryption_algo.find('.') == -1:
if encryption_algo.find('_') != -1:
encryption_algo, _ = encryption_algo.split('_', 1)
if encryption_algo == 'pbes1':
return 'pbkdf1'
if encryption_algo == 'pkcs12':
return 'pkcs12_kdf'
raise ValueError(unwrap(
'''
Encryption algorithm "%s" does not have a registered key
derivation function
''',
encryption_algo
))
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s", can not determine key
derivation function
''',
encryption_algo
))
@property
def kdf_hmac(self):
"""
Returns the HMAC algorithm to use with the KDF.
:return:
A unicode string of one of the following: "md2", "md5", "sha1",
"sha224", "sha256", "sha384", "sha512"
"""
encryption_algo = self['algorithm'].native
if encryption_algo == 'pbes2':
return self['parameters']['key_derivation_func']['parameters']['prf']['algorithm'].native
if encryption_algo.find('.') == -1:
if encryption_algo.find('_') != -1:
_, hmac_algo, _ = encryption_algo.split('_', 2)
return hmac_algo
raise ValueError(unwrap(
'''
Encryption algorithm "%s" does not have a registered key
derivation function
''',
encryption_algo
))
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s", can not determine key
derivation hmac algorithm
''',
encryption_algo
))
@property
def kdf_salt(self):
"""
Returns the byte string to use as the salt for the KDF.
:return:
A byte string
"""
encryption_algo = self['algorithm'].native
if encryption_algo == 'pbes2':
salt = self['parameters']['key_derivation_func']['parameters']['salt']
if salt.name == 'other_source':
raise ValueError(unwrap(
'''
Can not determine key derivation salt - the
reserved-for-future-use other source salt choice was
specified in the PBKDF2 params structure
'''
))
return salt.native
if encryption_algo.find('.') == -1:
if encryption_algo.find('_') != -1:
return self['parameters']['salt'].native
raise ValueError(unwrap(
'''
Encryption algorithm "%s" does not have a registered key
derivation function
''',
encryption_algo
))
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s", can not determine key
derivation salt
''',
encryption_algo
))
@property
def kdf_iterations(self):
"""
Returns the number of iterations that should be run via the KDF.
:return:
An integer
"""
encryption_algo = self['algorithm'].native
if encryption_algo == 'pbes2':
return self['parameters']['key_derivation_func']['parameters']['iteration_count'].native
if encryption_algo.find('.') == -1:
if encryption_algo.find('_') != -1:
return self['parameters']['iterations'].native
raise ValueError(unwrap(
'''
Encryption algorithm "%s" does not have a registered key
derivation function
''',
encryption_algo
))
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s", can not determine key
derivation iterations
''',
encryption_algo
))
@property
def key_length(self):
"""
Returns the key length to pass to the cipher/kdf. The PKCS#5 spec does
not specify a way to store the RC5 key length, however this tends not
to be a problem since OpenSSL does not support RC5 in PKCS#8 and OS X
does not provide an RC5 cipher for use in the Security Transforms
library.
:raises:
ValueError - when the key length can not be determined
:return:
An integer representing the length in bytes
"""
encryption_algo = self['algorithm'].native
if encryption_algo[0:3] == 'aes':
return {
'aes128_': 16,
'aes192_': 24,
'aes256_': 32,
}[encryption_algo[0:7]]
cipher_lengths = {
'des': 8,
'tripledes_3key': 24,
}
if encryption_algo in cipher_lengths:
return cipher_lengths[encryption_algo]
if encryption_algo == 'rc2':
rc2_parameter_version = self['parameters']['rc2_parameter_version'].native
# See page 24 of
# http://www.emc.com/collateral/white-papers/h11302-pkcs5v2-1-password-based-cryptography-standard-wp.pdf
encoded_key_bits_map = {
160: 5, # 40-bit
120: 8, # 64-bit
58: 16, # 128-bit
}
if rc2_parameter_version in encoded_key_bits_map:
return encoded_key_bits_map[rc2_parameter_version]
if rc2_parameter_version >= 256:
return rc2_parameter_version
if rc2_parameter_version is None:
return 4 # 32-bit default
raise ValueError(unwrap(
'''
Invalid RC2 parameter version found in EncryptionAlgorithm
parameters
'''
))
if encryption_algo == 'pbes2':
key_length = self['parameters']['key_derivation_func']['parameters']['key_length'].native
if key_length is not None:
return key_length
# If the KDF params don't specify the key size, we can infer it from
# the encryption scheme for all schemes except for RC5. However, in
# practical terms, neither OpenSSL or OS X support RC5 for PKCS#8
# so it is unlikely to be an issue that is run into.
return self['parameters']['encryption_scheme'].key_length
if encryption_algo.find('.') == -1:
return {
'pbes1_md2_des': 8,
'pbes1_md5_des': 8,
'pbes1_md2_rc2': 8,
'pbes1_md5_rc2': 8,
'pbes1_sha1_des': 8,
'pbes1_sha1_rc2': 8,
'pkcs12_sha1_rc4_128': 16,
'pkcs12_sha1_rc4_40': 5,
'pkcs12_sha1_tripledes_3key': 24,
'pkcs12_sha1_tripledes_2key': 16,
'pkcs12_sha1_rc2_128': 16,
'pkcs12_sha1_rc2_40': 5,
}[encryption_algo]
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s"
''',
encryption_algo
))
@property
def encryption_mode(self):
"""
Returns the name of the encryption mode to use.
:return:
A unicode string from one of the following: "cbc", "ecb", "ofb",
"cfb", "wrap", "gcm", "ccm", "wrap_pad"
"""
encryption_algo = self['algorithm'].native
if encryption_algo[0:7] in set(['aes128_', 'aes192_', 'aes256_']):
return encryption_algo[7:]
if encryption_algo[0:6] == 'pbes1_':
return 'cbc'
if encryption_algo[0:7] == 'pkcs12_':
return 'cbc'
if encryption_algo in set(['des', 'tripledes_3key', 'rc2', 'rc5']):
return 'cbc'
if encryption_algo == 'pbes2':
return self['parameters']['encryption_scheme'].encryption_mode
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s"
''',
encryption_algo
))
@property
def encryption_cipher(self):
"""
Returns the name of the symmetric encryption cipher to use. The key
length can be retrieved via the .key_length property to disabiguate
between different variations of TripleDES, AES, and the RC* ciphers.
:return:
A unicode string from one of the following: "rc2", "rc5", "des",
"tripledes", "aes"
"""
encryption_algo = self['algorithm'].native
if encryption_algo[0:7] in set(['aes128_', 'aes192_', 'aes256_']):
return 'aes'
if encryption_algo in set(['des', 'rc2', 'rc5']):
return encryption_algo
if encryption_algo == 'tripledes_3key':
return 'tripledes'
if encryption_algo == 'pbes2':
return self['parameters']['encryption_scheme'].encryption_cipher
if encryption_algo.find('.') == -1:
return {
'pbes1_md2_des': 'des',
'pbes1_md5_des': 'des',
'pbes1_md2_rc2': 'rc2',
'pbes1_md5_rc2': 'rc2',
'pbes1_sha1_des': 'des',
'pbes1_sha1_rc2': 'rc2',
'pkcs12_sha1_rc4_128': 'rc4',
'pkcs12_sha1_rc4_40': 'rc4',
'pkcs12_sha1_tripledes_3key': 'tripledes',
'pkcs12_sha1_tripledes_2key': 'tripledes',
'pkcs12_sha1_rc2_128': 'rc2',
'pkcs12_sha1_rc2_40': 'rc2',
}[encryption_algo]
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s"
''',
encryption_algo
))
@property
def encryption_block_size(self):
"""
Returns the block size of the encryption cipher, in bytes.
:return:
An integer that is the block size in bytes
"""
encryption_algo = self['algorithm'].native
if encryption_algo[0:7] in set(['aes128_', 'aes192_', 'aes256_']):
return 16
cipher_map = {
'des': 8,
'tripledes_3key': 8,
'rc2': 8,
}
if encryption_algo in cipher_map:
return cipher_map[encryption_algo]
if encryption_algo == 'rc5':
return self['parameters']['block_size_in_bits'].native // 8
if encryption_algo == 'pbes2':
return self['parameters']['encryption_scheme'].encryption_block_size
if encryption_algo.find('.') == -1:
return {
'pbes1_md2_des': 8,
'pbes1_md5_des': 8,
'pbes1_md2_rc2': 8,
'pbes1_md5_rc2': 8,
'pbes1_sha1_des': 8,
'pbes1_sha1_rc2': 8,
'pkcs12_sha1_rc4_128': 0,
'pkcs12_sha1_rc4_40': 0,
'pkcs12_sha1_tripledes_3key': 8,
'pkcs12_sha1_tripledes_2key': 8,
'pkcs12_sha1_rc2_128': 8,
'pkcs12_sha1_rc2_40': 8,
}[encryption_algo]
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s"
''',
encryption_algo
))
@property
def encryption_iv(self):
"""
Returns the byte string of the initialization vector for the encryption
scheme. Only the PBES2 stores the IV in the params. For PBES1, the IV
is derived from the KDF and this property will return None.
:return:
A byte string or None
"""
encryption_algo = self['algorithm'].native
if encryption_algo in set(['rc2', 'rc5']):
return self['parameters']['iv'].native
# For DES/Triple DES and AES the IV is the entirety of the parameters
octet_string_iv_oids = set([
'des',
'tripledes_3key',
'aes128_cbc',
'aes192_cbc',
'aes256_cbc',
'aes128_ofb',
'aes192_ofb',
'aes256_ofb',
])
if encryption_algo in octet_string_iv_oids:
return self['parameters'].native
if encryption_algo == 'pbes2':
return self['parameters']['encryption_scheme'].encryption_iv
# All of the PBES1 algos use their KDF to create the IV. For the pbkdf1,
# the KDF is told to generate a key that is an extra 8 bytes long, and
# that is used for the IV. For the PKCS#12 KDF, it is called with an id
# of 2 to generate the IV. In either case, we can't return the IV
# without knowing the user's password.
if encryption_algo.find('.') == -1:
return None
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s"
''',
encryption_algo
))
class Pbes2Params(Sequence):
_fields = [
('key_derivation_func', KdfAlgorithm),
('encryption_scheme', EncryptionAlgorithm),
]
class Pbmac1Params(Sequence):
_fields = [
('key_derivation_func', KdfAlgorithm),
('message_auth_scheme', HmacAlgorithm),
]
class Pkcs5MacId(ObjectIdentifier):
_map = {
'1.2.840.113549.1.5.14': 'pbmac1',
}
class Pkcs5MacAlgorithm(Sequence):
_fields = [
('algorithm', Pkcs5MacId),
('parameters', Any),
]
_oid_pair = ('algorithm', 'parameters')
_oid_specs = {
'pbmac1': Pbmac1Params,
}
EncryptionAlgorithm._oid_specs['pbes2'] = Pbes2Params
class AnyAlgorithmId(ObjectIdentifier):
_map = {}
def _setup(self):
_map = self.__class__._map
for other_cls in (EncryptionAlgorithmId, SignedDigestAlgorithmId, DigestAlgorithmId):
for oid, name in other_cls._map.items():
_map[oid] = name
class AnyAlgorithmIdentifier(_ForceNullParameters, Sequence):
_fields = [
('algorithm', AnyAlgorithmId),
('parameters', Any, {'optional': True}),
]
_oid_pair = ('algorithm', 'parameters')
_oid_specs = {}
def _setup(self):
Sequence._setup(self)
specs = self.__class__._oid_specs
for other_cls in (EncryptionAlgorithm, SignedDigestAlgorithm):
for oid, spec in other_cls._oid_specs.items():
specs[oid] = spec
| mit | c2f73584b34c85779f473e2358d020f7 | 29.165685 | 117 | 0.518304 | 3.295993 | false | false | false | false |
wbond/asn1crypto | tests/test_crl.py | 2 | 1043 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import unittest
import sys
import os
from asn1crypto import crl
from ._unittest_compat import patch
patch()
if sys.version_info < (3,):
byte_cls = str
num_cls = long # noqa
else:
byte_cls = bytes
num_cls = int
tests_root = os.path.dirname(__file__)
fixtures_dir = os.path.join(tests_root, 'fixtures')
class CRLTests(unittest.TestCase):
def test_parse_crl(self):
with open(os.path.join(fixtures_dir, 'eid2011.crl'), 'rb') as f:
cert_list = crl.CertificateList.load(f.read())
serial_numbers = []
for revoked_cert in cert_list['tbs_cert_list']['revoked_certificates']:
serial_numbers.append(revoked_cert['user_certificate'].native)
self.assertEqual(
15752,
len(serial_numbers)
)
for serial_number in serial_numbers:
self.assertIsInstance(
serial_number,
num_cls
)
| mit | 02b6ec664c695b77862e977a71e8561b | 23.833333 | 82 | 0.616491 | 3.608997 | false | true | false | false |
nschloe/meshio | src/meshio/_cli/_convert.py | 1 | 2173 | import numpy as np
from .._helpers import _writer_map, read, reader_map, write
def add_args(parser):
parser.add_argument("infile", type=str, help="mesh file to be read from")
parser.add_argument(
"--input-format",
"-i",
type=str,
choices=sorted(list(reader_map.keys())),
help="input file format",
default=None,
)
parser.add_argument(
"--output-format",
"-o",
type=str,
choices=sorted(list(_writer_map.keys())),
help="output file format",
default=None,
)
parser.add_argument(
"--ascii",
"-a",
action="store_true",
help="write in ASCII format variant (where applicable, default: binary)",
)
parser.add_argument("outfile", type=str, help="mesh file to be written to")
parser.add_argument(
"--float-format",
"-f",
type=str,
help="float format used in output ASCII files (default: .16e)",
)
parser.add_argument(
"--sets-to-int-data",
"-s",
action="store_true",
help="if possible, convert sets to integer data (useful if the output type does not support sets)",
)
parser.add_argument(
"--int-data-to-sets",
"-d",
action="store_true",
help="if possible, convert integer data to sets (useful if the output type does not support integer data)",
)
def convert(args):
# read mesh data
mesh = read(args.infile, file_format=args.input_format)
# Some converters (like VTK) require `points` to be contiguous.
mesh.points = np.ascontiguousarray(mesh.points)
if args.sets_to_int_data:
mesh.point_sets_to_data()
mesh.cell_sets_to_data()
if args.int_data_to_sets:
for key in mesh.point_data:
mesh.point_data_to_sets(key)
for key in mesh.cell_data:
mesh.cell_data_to_sets(key)
# write it out
kwargs = {"file_format": args.output_format}
if args.float_format is not None:
kwargs["float_fmt"] = args.float_format
if args.ascii:
kwargs["binary"] = False
write(args.outfile, mesh, **kwargs)
| mit | 99c8fc204bc32011fab42ac3e81c3c71 | 27.973333 | 115 | 0.587207 | 3.708191 | false | false | false | false |
graphql-python/graphene | graphene/types/tests/test_base64.py | 1 | 2798 | import base64
from graphql import GraphQLError
from ..objecttype import ObjectType
from ..scalars import String
from ..schema import Schema
from ..base64 import Base64
class Query(ObjectType):
base64 = Base64(_in=Base64(name="input"), _match=String(name="match"))
bytes_as_base64 = Base64()
string_as_base64 = Base64()
number_as_base64 = Base64()
def resolve_base64(self, info, _in=None, _match=None):
if _match:
assert _in == _match
return _in
def resolve_bytes_as_base64(self, info):
return b"Hello world"
def resolve_string_as_base64(self, info):
return "Spam and eggs"
def resolve_number_as_base64(self, info):
return 42
schema = Schema(query=Query)
def test_base64_query():
base64_value = base64.b64encode(b"Random string").decode("utf-8")
result = schema.execute(
"""{{ base64(input: "{}", match: "Random string") }}""".format(base64_value)
)
assert not result.errors
assert result.data == {"base64": base64_value}
def test_base64_query_with_variable():
base64_value = base64.b64encode(b"Another string").decode("utf-8")
# test datetime variable in string representation
result = schema.execute(
"""
query GetBase64($base64: Base64) {
base64(input: $base64, match: "Another string")
}
""",
variables={"base64": base64_value},
)
assert not result.errors
assert result.data == {"base64": base64_value}
def test_base64_query_none():
result = schema.execute("""{ base64 }""")
assert not result.errors
assert result.data == {"base64": None}
def test_base64_query_invalid():
bad_inputs = [dict(), 123, "This is not valid base64"]
for input_ in bad_inputs:
result = schema.execute(
"""{ base64(input: $input) }""", variables={"input": input_}
)
assert isinstance(result.errors, list)
assert len(result.errors) == 1
assert isinstance(result.errors[0], GraphQLError)
assert result.data is None
def test_base64_from_bytes():
base64_value = base64.b64encode(b"Hello world").decode("utf-8")
result = schema.execute("""{ bytesAsBase64 }""")
assert not result.errors
assert result.data == {"bytesAsBase64": base64_value}
def test_base64_from_string():
base64_value = base64.b64encode(b"Spam and eggs").decode("utf-8")
result = schema.execute("""{ stringAsBase64 }""")
assert not result.errors
assert result.data == {"stringAsBase64": base64_value}
def test_base64_from_number():
base64_value = base64.b64encode(b"42").decode("utf-8")
result = schema.execute("""{ numberAsBase64 }""")
assert not result.errors
assert result.data == {"numberAsBase64": base64_value}
| mit | 0008efb94d2291f78b84a8b5d791cb28 | 27.845361 | 84 | 0.639385 | 3.493134 | false | true | false | false |
graphql-python/graphene | graphene/types/tests/test_objecttype.py | 1 | 8250 | from pytest import raises
from ..field import Field
from ..interface import Interface
from ..objecttype import ObjectType
from ..scalars import String
from ..schema import Schema
from ..structures import NonNull
from ..unmountedtype import UnmountedType
class MyType(Interface):
pass
class Container(ObjectType):
field1 = Field(MyType)
field2 = Field(MyType)
class MyInterface(Interface):
ifield = Field(MyType)
class ContainerWithInterface(ObjectType):
class Meta:
interfaces = (MyInterface,)
field1 = Field(MyType)
field2 = Field(MyType)
class MyScalar(UnmountedType):
def get_type(self):
return MyType
def test_generate_objecttype():
class MyObjectType(ObjectType):
"""Documentation"""
assert MyObjectType._meta.name == "MyObjectType"
assert MyObjectType._meta.description == "Documentation"
assert MyObjectType._meta.interfaces == tuple()
assert MyObjectType._meta.fields == {}
assert (
repr(MyObjectType)
== "<MyObjectType meta=<ObjectTypeOptions name='MyObjectType'>>"
)
def test_generate_objecttype_with_meta():
class MyObjectType(ObjectType):
class Meta:
name = "MyOtherObjectType"
description = "Documentation"
interfaces = (MyType,)
assert MyObjectType._meta.name == "MyOtherObjectType"
assert MyObjectType._meta.description == "Documentation"
assert MyObjectType._meta.interfaces == (MyType,)
def test_generate_lazy_objecttype():
class MyObjectType(ObjectType):
example = Field(lambda: InnerObjectType, required=True)
class InnerObjectType(ObjectType):
field = Field(MyType)
assert MyObjectType._meta.name == "MyObjectType"
example_field = MyObjectType._meta.fields["example"]
assert isinstance(example_field.type, NonNull)
assert example_field.type.of_type == InnerObjectType
def test_generate_objecttype_with_fields():
class MyObjectType(ObjectType):
field = Field(MyType)
assert "field" in MyObjectType._meta.fields
def test_generate_objecttype_with_private_attributes():
class MyObjectType(ObjectType):
def __init__(self, _private_state=None, **kwargs):
self._private_state = _private_state
super().__init__(**kwargs)
_private_state = None
assert "_private_state" not in MyObjectType._meta.fields
assert hasattr(MyObjectType, "_private_state")
m = MyObjectType(_private_state="custom")
assert m._private_state == "custom"
with raises(TypeError):
MyObjectType(_other_private_state="Wrong")
def test_ordered_fields_in_objecttype():
class MyObjectType(ObjectType):
b = Field(MyType)
a = Field(MyType)
field = MyScalar()
asa = Field(MyType)
assert list(MyObjectType._meta.fields) == ["b", "a", "field", "asa"]
def test_generate_objecttype_inherit_abstracttype():
class MyAbstractType:
field1 = MyScalar()
class MyObjectType(ObjectType, MyAbstractType):
field2 = MyScalar()
assert MyObjectType._meta.description is None
assert MyObjectType._meta.interfaces == ()
assert MyObjectType._meta.name == "MyObjectType"
assert list(MyObjectType._meta.fields) == ["field1", "field2"]
assert list(map(type, MyObjectType._meta.fields.values())) == [Field, Field]
def test_generate_objecttype_inherit_abstracttype_reversed():
class MyAbstractType:
field1 = MyScalar()
class MyObjectType(MyAbstractType, ObjectType):
field2 = MyScalar()
assert MyObjectType._meta.description is None
assert MyObjectType._meta.interfaces == ()
assert MyObjectType._meta.name == "MyObjectType"
assert list(MyObjectType._meta.fields) == ["field1", "field2"]
assert list(map(type, MyObjectType._meta.fields.values())) == [Field, Field]
def test_generate_objecttype_unmountedtype():
class MyObjectType(ObjectType):
field = MyScalar()
assert "field" in MyObjectType._meta.fields
assert isinstance(MyObjectType._meta.fields["field"], Field)
def test_parent_container_get_fields():
assert list(Container._meta.fields) == ["field1", "field2"]
def test_parent_container_interface_get_fields():
assert list(ContainerWithInterface._meta.fields) == ["ifield", "field1", "field2"]
def test_objecttype_as_container_only_args():
container = Container("1", "2")
assert container.field1 == "1"
assert container.field2 == "2"
def test_objecttype_repr():
container = Container("1", "2")
assert repr(container) == "Container(field1='1', field2='2')"
def test_objecttype_eq():
container1 = Container("1", "2")
container2 = Container("1", "2")
container3 = Container("2", "3")
assert container1 == container1
assert container1 == container2
assert container2 != container3
def test_objecttype_as_container_args_kwargs():
container = Container("1", field2="2")
assert container.field1 == "1"
assert container.field2 == "2"
def test_objecttype_as_container_few_kwargs():
container = Container(field2="2")
assert container.field2 == "2"
def test_objecttype_as_container_all_kwargs():
container = Container(field1="1", field2="2")
assert container.field1 == "1"
assert container.field2 == "2"
def test_objecttype_as_container_extra_args():
msg = r"__init__\(\) takes from 1 to 3 positional arguments but 4 were given"
with raises(TypeError, match=msg):
Container("1", "2", "3") # type: ignore
def test_objecttype_as_container_invalid_kwargs():
msg = r"__init__\(\) got an unexpected keyword argument 'unexisting_field'"
with raises(TypeError, match=msg):
Container(unexisting_field="3") # type: ignore
def test_objecttype_container_benchmark(benchmark):
@benchmark
def create_objecttype():
Container(field1="field1", field2="field2")
def test_generate_objecttype_description():
class MyObjectType(ObjectType):
"""
Documentation
Documentation line 2
"""
assert MyObjectType._meta.description == "Documentation\n\nDocumentation line 2"
def test_objecttype_with_possible_types():
class MyObjectType(ObjectType):
class Meta:
possible_types = (dict,)
assert MyObjectType._meta.possible_types == (dict,)
def test_objecttype_with_possible_types_and_is_type_of_should_raise():
with raises(AssertionError) as excinfo:
class MyObjectType(ObjectType):
class Meta:
possible_types = (dict,)
@classmethod
def is_type_of(cls, root, context, info):
return False
assert str(excinfo.value) == (
"MyObjectType.Meta.possible_types will cause type collision with "
"MyObjectType.is_type_of. Please use one or other."
)
def test_objecttype_no_fields_output():
class User(ObjectType):
name = String()
class Query(ObjectType):
user = Field(User)
def resolve_user(self, info):
return User()
schema = Schema(query=Query)
result = schema.execute(
""" query basequery {
user {
name
}
}
"""
)
assert not result.errors
assert result.data == {"user": {"name": None}}
def test_abstract_objecttype_can_str():
class MyObjectType(ObjectType):
class Meta:
abstract = True
field = MyScalar()
assert str(MyObjectType) == "MyObjectType"
def test_objecttype_meta_with_annotations():
class Query(ObjectType):
class Meta:
name: str = "oops"
hello = String()
def resolve_hello(self, info):
return "Hello"
schema = Schema(query=Query)
assert schema is not None
def test_objecttype_meta_arguments():
class MyInterface(Interface):
foo = String()
class MyType(ObjectType, interfaces=[MyInterface]):
bar = String()
assert MyType._meta.interfaces == [MyInterface]
assert list(MyType._meta.fields.keys()) == ["foo", "bar"]
def test_objecttype_type_name():
class MyObjectType(ObjectType, name="FooType"):
pass
assert MyObjectType._meta.name == "FooType"
| mit | c1551d1476d7ec870154f1e2fbef1b89 | 25.699029 | 86 | 0.658545 | 3.828306 | false | true | false | false |
nschloe/meshio | src/meshio/xdmf/common.py | 1 | 5436 | import numpy as np
from .._exceptions import ReadError
from .._mesh import CellBlock
numpy_to_xdmf_dtype = {
"int8": ("Int", "1"),
"int16": ("Int", "2"),
"int32": ("Int", "4"),
"int64": ("Int", "8"),
"uint8": ("UInt", "1"),
"uint16": ("UInt", "2"),
"uint32": ("UInt", "4"),
"uint64": ("UInt", "8"),
"float32": ("Float", "4"),
"float64": ("Float", "8"),
}
xdmf_to_numpy_type = {v: k for k, v in numpy_to_xdmf_dtype.items()}
dtype_to_format_string = {
"int32": "%d",
"int64": "%d",
"uint32": "%d",
"uint64": "%d",
"float32": "%.7e",
"float64": "%.16e",
}
# See
# <https://xdmf.org/index.php/XDMF_Model_and_Format#XML_Element_.28Xdmf_ClassName.29_and_Default_XML_Attributes>
# <https://gitlab.kitware.com/xdmf/xdmf/blob/master/Xdmf.dtd#L34>
# for XDMF types.
# There appears to be no particular consistency, so allow for different
# alternatives as well.
meshio_to_xdmf_type = {
"vertex": ["Polyvertex"],
"line": ["Polyline"],
"line3": ["Edge_3"],
"quad": ["Quadrilateral"],
"quad8": ["Quadrilateral_8", "Quad_8"],
"quad9": ["Quadrilateral_9", "Quad_9"],
"pyramid": ["Pyramid"],
"pyramid13": ["Pyramid_13"],
"tetra": ["Tetrahedron"],
"triangle": ["Triangle"],
"triangle6": ["Triangle_6", "Tri_6"],
"tetra10": ["Tetrahedron_10", "Tet_10"],
"wedge": ["Wedge"],
"wedge15": ["Wedge_15"],
"wedge18": ["Wedge_18"],
"hexahedron": ["Hexahedron"],
"hexahedron20": ["Hexahedron_20", "Hex_20"],
"hexahedron24": ["Hexahedron_24", "Hex_24"],
"hexahedron27": ["Hexahedron_27", "Hex_27"],
"hexahedron64": ["Hexahedron_64", "Hex_64"],
"hexahedron125": ["Hexahedron_125", "Hex_125"],
"hexahedron216": ["Hexahedron_216", "Hex_216"],
"hexahedron343": ["Hexahedron_343", "Hex_343"],
"hexahedron512": ["Hexahedron_512", "Hex_512"],
"hexahedron729": ["Hexahedron_729", "Hex_729"],
"hexahedron1000": ["Hexahedron_1000", "Hex_100"],
"hexahedron1331": ["Hexahedron_1331", "Hex_1331"],
}
xdmf_to_meshio_type = {v: k for k, vals in meshio_to_xdmf_type.items() for v in vals}
# Check out
# <https://gitlab.kitware.com/xdmf/xdmf/blob/master/XdmfTopologyType.cpp>
# for the list of indices.
xdmf_idx_to_meshio_type = {
0x1: "vertex",
0x2: "line",
0x4: "triangle",
0x5: "quad",
0x6: "tetra",
0x7: "pyramid",
0x8: "wedge",
0x9: "hexahedron",
0x22: "line3",
0x23: "quad9",
0x24: "triangle6",
0x25: "quad8",
0x26: "tetra10",
0x27: "pyramid13",
0x28: "wedge15",
0x29: "wedge18",
0x30: "hexahedron20",
0x31: "hexahedron24",
0x32: "hexahedron27",
0x33: "hexahedron64",
0x34: "hexahedron125",
0x35: "hexahedron216",
0x36: "hexahedron343",
0x37: "hexahedron512",
0x38: "hexahedron729",
0x39: "hexahedron1000",
0x40: "hexahedron1331",
# 0x41: 'hexahedron_spectral_64',
# 0x42: 'hexahedron_spectral_125',
# 0x43: 'hexahedron_spectral_216',
# 0x44: 'hexahedron_spectral_343',
# 0x45: 'hexahedron_spectral_512',
# 0x46: 'hexahedron_spectral_729',
# 0x47: 'hexahedron_spectral_1000',
# 0x48: 'hexahedron_spectral_1331',
}
meshio_type_to_xdmf_index = {v: k for k, v in xdmf_idx_to_meshio_type.items()}
def translate_mixed_cells(data):
# Translate it into the cells dictionary.
# `data` is a one-dimensional vector with
# (cell_type1, p0, p1, ... ,pk, cell_type2, p10, p11, ..., p1k, ...
# https://xdmf.org/index.php/XDMF_Model_and_Format#Arbitrary
# https://gitlab.kitware.com/xdmf/xdmf/blob/master/XdmfTopologyType.hpp#L394
xdmf_idx_to_num_nodes = {
1: 1, # vertex
2: 2, # line
4: 3, # triangle
5: 4, # quad
6: 4, # tet
7: 5, # pyramid
8: 6, # wedge
9: 8, # hex
11: 6, # triangle6
}
# collect types and offsets
types = []
offsets = []
r = 0
while r < len(data):
xdmf_type = data[r]
types.append(xdmf_type)
offsets.append(r)
if xdmf_type == 2: # line
if data[r + 1] != 2: # polyline
raise ReadError("XDMF reader: Only supports 2-point lines for now")
r += 1
r += 1
r += xdmf_idx_to_num_nodes[xdmf_type]
types = np.array(types)
offsets = np.array(offsets)
b = np.concatenate([[0], np.where(types[:-1] != types[1:])[0] + 1, [len(types)]])
cells = []
for start, end in zip(b[:-1], b[1:]):
meshio_type = xdmf_idx_to_meshio_type[types[start]]
n = xdmf_idx_to_num_nodes[types[start]]
point_offsets = offsets[start:end] + (2 if types[start] == 2 else 1)
indices = np.array([np.arange(n) + o for o in point_offsets])
cells.append(CellBlock(meshio_type, data[indices]))
return cells
def attribute_type(data):
# <https://xdmf.org/index.php/XDMF_Model_and_Format#Attribute>
if len(data.shape) == 1 or (len(data.shape) == 2 and data.shape[1] == 1):
return "Scalar"
elif len(data.shape) == 2 and data.shape[1] in [2, 3]:
return "Vector"
elif (len(data.shape) == 2 and data.shape[1] == 9) or (
len(data.shape) == 3 and data.shape[1] == 3 and data.shape[2] == 3
):
return "Tensor"
elif len(data.shape) == 2 and data.shape[1] == 6:
return "Tensor6"
if len(data.shape) != 3:
raise ReadError()
return "Matrix"
| mit | 17b3fed9e509b5db9459e4daed1cb0c1 | 30.062857 | 112 | 0.570272 | 2.667321 | false | false | false | false |
nschloe/meshio | src/meshio/h5m/_h5m.py | 1 | 8950 | """
I/O for h5m, cf.
<https://www.mcs.anl.gov/~fathom/moab-docs/html/h5mmain.html>.
"""
from datetime import datetime
import numpy as np
from .. import __about__
from .._common import warn
from .._helpers import register_format
from .._mesh import CellBlock, Mesh
# def _int_to_bool_list(num):
# # From <https://stackoverflow.com/a/33608387/353337>.
# bin_string = format(num, '04b')
# return [x == '1' for x in bin_string[::-1]]
def read(filename):
import h5py
f = h5py.File(filename, "r")
dset = f["tstt"]
points = dset["nodes"]["coordinates"][()]
# read point data
point_data = {}
if "tags" in dset["nodes"]:
for name, dataset in dset["nodes"]["tags"].items():
point_data[name] = dataset[()]
# # Assert that the GLOBAL_IDs are contiguous.
# point_gids = dset['nodes']['tags']['GLOBAL_ID'][()]
# point_start_gid = dset['nodes']['coordinates'].attrs['start_id']
# point_end_gid = point_start_gid + len(point_gids) - 1
# assert all(point_gids == range(point_start_gid, point_end_gid + 1))
h5m_to_meshio_type = {
"Edge2": "line",
"Hex8": "hexahedron",
"Prism6": "wedge",
"Pyramid5": "pyramid",
"Quad4": "quad",
"Tri3": "triangle",
"Tet4": "tetra",
}
cells = []
cell_data = {}
for h5m_type, data in dset["elements"].items():
meshio_type = h5m_to_meshio_type[h5m_type]
conn = data["connectivity"]
# Note that the indices are off by 1 in h5m.
cells.append(CellBlock(meshio_type, conn[()] - 1))
# TODO bring cell data back
# if 'tags' in data:
# for name, dataset in data['tags'].items():
# cell_data[name] = dataset[()]
# The `sets` in H5M are special in that they represent a segration of data
# in the current file, particularly by a load balancer (Metis, Zoltan,
# etc.). This segregation has no equivalent in other data types, but is
# certainly worthwhile visualizing.
# Hence, we will translate the sets into cell data with the prefix "set::"
# here.
field_data = {}
# TODO deal with sets
# if 'sets' in dset and 'contents' in dset['sets']:
# # read sets
# sets_contents = dset['sets']['contents'][()]
# sets_list = dset['sets']['list'][()]
# sets_tags = dset['sets']['tags']
# cell_start_gid = conn.attrs['start_id']
# cell_gids = cell_start_gid + elems['tags']['GLOBAL_ID'][()]
# cell_end_gid = cell_start_gid + len(cell_gids) - 1
# assert all(cell_gids == range(cell_start_gid, cell_end_gid + 1))
# # create the sets
# for key, value in sets_tags.items():
# mod_key = 'set::' + key
# cell_data[mod_key] = np.empty(len(cells), dtype=int)
# end = 0
# for k, row in enumerate(sets_list):
# bits = _int_to_bool_list(row[3])
# # is_owner = bits[0]
# # is_unique = bits[1]
# # is_ordered = bits[2]
# is_range_compressed = bits[3]
# if is_range_compressed:
# start_gids = sets_contents[end:row[0]+1:2]
# lengths = sets_contents[end+1:row[0]+1:2]
# for start_gid, length in zip(start_gids, lengths):
# end_gid = start_gid + length - 1
# if start_gid >= cell_start_gid and \
# end_gid <= cell_end_gid:
# i0 = start_gid - cell_start_gid
# i1 = end_gid - cell_start_gid + 1
# cell_data[mod_key][i0:i1] = value[k]
# else:
# # TODO deal with point data
# raise RuntimeError('')
# else:
# gids = sets_contents[end:row[0]+1]
# cell_data[mod_key][gids - cell_start_gid] = value[k]
# end = row[0] + 1
return Mesh(
points, cells, point_data=point_data, cell_data=cell_data, field_data=field_data
)
def write(filename, mesh, add_global_ids=True, compression="gzip", compression_opts=4):
import h5py
f = h5py.File(filename, "w")
tstt = f.create_group("tstt")
# The base index for h5m is 1.
global_id = 1
# add nodes
nodes = tstt.create_group("nodes")
coords = nodes.create_dataset(
"coordinates",
data=mesh.points,
compression=compression,
compression_opts=compression_opts,
)
coords.attrs.create("start_id", global_id)
global_id += len(mesh.points)
# Global tags
tstt_tags = tstt.create_group("tags")
# The GLOBAL_ID associated with a point is used to identify points if
# distributed across several processes. mbpart automatically adds them,
# too.
# Copy to pd to avoid changing point_data. The items are not deep-copied.
pd = mesh.point_data.copy()
if "GLOBAL_ID" not in pd and add_global_ids:
pd["GLOBAL_ID"] = np.arange(1, len(mesh.points) + 1)
# add point data
if pd:
tags = nodes.create_group("tags")
for key, data in pd.items():
if len(data.shape) == 1:
dtype = data.dtype
tags.create_dataset(
key,
data=data,
compression=compression,
compression_opts=compression_opts,
)
else:
# H5M doesn't accept n-x-k arrays as data; it wants an n-x-1
# array with k-tuples as entries.
n, k = data.shape
dtype = np.dtype((data.dtype, (k,)))
dset = tags.create_dataset(
key,
(n,),
dtype=dtype,
compression=compression,
compression_opts=compression_opts,
)
dset[:] = data
# Create entry in global tags
g = tstt_tags.create_group(key)
g["type"] = dtype
# Add a class tag:
# From
# <https://lists.mcs.anl.gov/pipermail/moab-dev/2015/007104.html>:
# ```
# /* Was dense tag data in mesh database */
# define mhdf_DENSE_TYPE 2
# /** \brief Was sparse tag data in mesh database */
# #define mhdf_SPARSE_TYPE 1
# /** \brief Was bit-field tag data in mesh database */
# #define mhdf_BIT_TYPE 0
# /** \brief Unused */
# #define mhdf_MESH_TYPE 3
#
g.attrs["class"] = 2
# add elements
elements = tstt.create_group("elements")
elem_dt = h5py.special_dtype(
enum=(
"i",
{
"Edge": 1,
"Tri": 2,
"Quad": 3,
"Polygon": 4,
"Tet": 5,
"Pyramid": 6,
"Prism": 7,
"Knife": 8,
"Hex": 9,
"Polyhedron": 10,
},
)
)
tstt["elemtypes"] = elem_dt
tstt.create_dataset(
"history",
data=[
__name__.encode(),
__about__.__version__.encode(),
str(datetime.now()).encode(),
],
compression=compression,
compression_opts=compression_opts,
)
# number of nodes to h5m name, element type
meshio_to_h5m_type = {
"line": {"name": "Edge2", "type": 1},
"triangle": {"name": "Tri3", "type": 2},
"tetra": {"name": "Tet4", "type": 5},
}
for cell_block in mesh.cells:
key = cell_block.type
data = cell_block.data
if key not in meshio_to_h5m_type:
warn("Unsupported H5M element type '%s'. Skipping.", key)
continue
this_type = meshio_to_h5m_type[key]
elem_group = elements.create_group(this_type["name"])
elem_group.attrs.create("element_type", this_type["type"], dtype=elem_dt)
# h5m node indices are 1-based
conn = elem_group.create_dataset(
"connectivity",
data=(data + 1),
compression=compression,
compression_opts=compression_opts,
)
conn.attrs.create("start_id", global_id)
global_id += len(data)
# add cell data
for cell_type, cd in mesh.cell_data.items():
if cd:
tags = elem_group.create_group("tags")
for key, value in cd.items():
tags.create_dataset(
key,
data=value,
compression=compression,
compression_opts=compression_opts,
)
# add empty set -- MOAB wants this
sets = tstt.create_group("sets")
sets.create_group("tags")
# set max_id
tstt.attrs.create("max_id", global_id, dtype="u8")
register_format("h5m", [".h5m"], read, {"h5m": write})
| mit | 89ed67456ea688904cf71346a60bd6be | 32.148148 | 88 | 0.513743 | 3.538948 | false | false | false | false |
nschloe/meshio | tests/performance.py | 1 | 11085 | import os
import pathlib
import tempfile
import time
import tracemalloc
import dufte
import matplotlib.pyplot as plt
import meshzoo
import numpy as np
import meshio
def generate_triangular_mesh():
p = pathlib.Path("sphere.xdmf")
if pathlib.Path.is_file(p):
mesh = meshio.read(p)
else:
points, cells = meshzoo.icosa_sphere(300)
mesh = meshio.Mesh(points, {"triangle": cells})
mesh.write(p)
return mesh
def generate_tetrahedral_mesh():
"""Generates a fairly large mesh."""
if pathlib.Path.is_file("cache.xdmf"):
mesh = meshio.read("cache.xdmf")
else:
import pygalmesh
s = pygalmesh.Ball([0, 0, 0], 1.0)
mesh = pygalmesh.generate_mesh(s, cell_size=2.0e-2, verbose=True)
# mesh = pygalmesh.generate_mesh(s, cell_size=1.0e-1, verbose=True)
mesh.cells = {"tetra": mesh.cells["tetra"]}
mesh.point_data = []
mesh.cell_data = {"tetra": {}}
mesh.write("cache.xdmf")
return mesh
def plot_speed(names, elapsed_write, elapsed_read):
plt.style.use(dufte.style)
names = np.asarray(names)
elapsed_write = np.asarray(elapsed_write)
elapsed_read = np.asarray(elapsed_read)
fig, ax = plt.subplots(1, 2, figsize=(12, 8))
idx = np.argsort(elapsed_write)[::-1]
ax[0].barh(range(len(names)), elapsed_write[idx], align="center")
ax[0].set_yticks(range(len(names)))
ax[0].set_yticklabels(names[idx])
ax[0].set_xlabel("time (s)")
ax[0].set_title("write")
ax[0].grid()
idx = np.argsort(elapsed_read)[::-1]
ax[1].barh(range(len(names)), elapsed_read[idx], align="center")
ax[1].set_yticks(range(len(names)))
ax[1].set_yticklabels(names[idx])
ax[1].set_xlabel("time (s)")
ax[1].set_title("read")
ax[1].grid()
fig.tight_layout()
# plt.show()
fig.savefig("performance.svg", transparent=True, bbox_inches="tight")
plt.close()
def plot_file_sizes(names, file_sizes, mem_size):
idx = np.argsort(file_sizes)
file_sizes = [file_sizes[i] for i in idx]
names = [names[i] for i in idx]
plt.figure(figsize=(8, 8))
ax = plt.gca()
y_pos = np.arange(len(file_sizes))
ax.barh(y_pos, file_sizes, align="center")
#
ylim = ax.get_ylim()
plt.plot(
[mem_size, mem_size], [-2, len(file_sizes) + 2], "C3", linewidth=2.0, zorder=0
)
ax.set_ylim(ylim)
#
ax.set_yticks(y_pos)
ax.set_yticklabels(names)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel("file size [MB]")
ax.set_title("file sizes")
plt.grid()
# plt.show()
plt.savefig("filesizes.svg", transparent=True, bbox_inches="tight")
plt.close()
def plot_memory_usage(names, peak_memory_write, peak_memory_read, mem_size):
names = np.asarray(names)
peak_memory_write = np.asarray(peak_memory_write)
peak_memory_read = np.asarray(peak_memory_read)
fig, ax = plt.subplots(1, 2, figsize=(12, 8))
idx = np.argsort(peak_memory_write)[::-1]
ax[0].barh(range(len(names)), peak_memory_write[idx], align="center")
ax[0].set_yticks(range(len(names)))
ax[0].set_yticklabels(names[idx])
ax[0].set_xlabel("peak memory [MB]")
ax[0].set_title("write")
ax[0].grid()
# plot memsize of mesh
ylim = ax[0].get_ylim()
ax[0].plot(
[mem_size, mem_size], [-2, len(names) + 2], "C3", linewidth=2.0, zorder=0
)
ax[0].set_ylim(ylim)
idx = np.argsort(peak_memory_read)[::-1]
ax[1].barh(range(len(names)), peak_memory_read[idx], align="center")
ax[1].set_yticks(range(len(names)))
ax[1].set_yticklabels(names[idx])
ax[1].set_xlabel("peak memory [MB]")
ax[1].set_title("read")
ax[1].grid()
# plot memsize of mesh
ylim = ax[1].get_ylim()
ax[1].plot(
[mem_size, mem_size], [-2, len(names) + 2], "C3", linewidth=2.0, zorder=0
)
ax[1].set_ylim(ylim)
fig.tight_layout()
# plt.show()
fig.savefig("memory.svg", transparent=True, bbox_inches="tight")
plt.close()
def read_write(plot=False):
# mesh = generate_tetrahedral_mesh()
mesh = generate_triangular_mesh()
print(mesh)
mem_size = mesh.points.nbytes + mesh.cells[0].data.nbytes
mem_size /= 1024.0**2
print(f"mem_size: {mem_size:.2f} MB")
formats = {
"Abaqus": (meshio.abaqus.write, meshio.abaqus.read, ["out.inp"]),
"Ansys (ASCII)": (
lambda f, m: meshio.ansys.write(f, m, binary=False),
meshio.ansys.read,
["out.ans"],
),
# "Ansys (binary)": (
# lambda f, m: meshio.ansys.write(f, m, binary=True),
# meshio.ansys.read,
# ["out.ans"],
# ),
"AVS-UCD": (meshio.avsucd.write, meshio.avsucd.read, ["out.ucd"]),
# "CGNS": (meshio.cgns.write, meshio.cgns.read, ["out.cgns"]),
"Dolfin-XML": (meshio.dolfin.write, meshio.dolfin.read, ["out.xml"]),
"Exodus": (meshio.exodus.write, meshio.exodus.read, ["out.e"]),
# "FLAC3D": (meshio.flac3d.write, meshio.flac3d.read, ["out.f3grid"]),
"Gmsh 4.1 (ASCII)": (
lambda f, m: meshio.gmsh.write(f, m, binary=False),
meshio.gmsh.read,
["out.msh"],
),
"Gmsh 4.1 (binary)": (
lambda f, m: meshio.gmsh.write(f, m, binary=True),
meshio.gmsh.read,
["out.msh"],
),
"MDPA": (meshio.mdpa.write, meshio.mdpa.read, ["out.mdpa"]),
"MED": (meshio.med.write, meshio.med.read, ["out.med"]),
"Medit": (meshio.medit.write, meshio.medit.read, ["out.mesh"]),
"MOAB": (meshio.h5m.write, meshio.h5m.read, ["out.h5m"]),
"Nastran": (meshio.nastran.write, meshio.nastran.read, ["out.bdf"]),
"Netgen": (meshio.netgen.write, meshio.netgen.read, ["out.vol"]),
"OFF": (meshio.off.write, meshio.off.read, ["out.off"]),
"Permas": (meshio.permas.write, meshio.permas.read, ["out.dato"]),
"PLY (binary)": (
lambda f, m: meshio.ply.write(f, m, binary=True),
meshio.ply.read,
["out.ply"],
),
"PLY (ASCII)": (
lambda f, m: meshio.ply.write(f, m, binary=False),
meshio.ply.read,
["out.ply"],
),
"STL (binary)": (
lambda f, m: meshio.stl.write(f, m, binary=True),
meshio.stl.read,
["out.stl"],
),
"STL (ASCII)": (
lambda f, m: meshio.stl.write(f, m, binary=False),
meshio.stl.read,
["out.stl"],
),
# "TetGen": (meshio.tetgen.write, meshio.tetgen.read, ["out.node", "out.ele"],),
"VTK (binary)": (
lambda f, m: meshio.vtk.write(f, m, binary=True),
meshio.vtk.read,
["out.vtk"],
),
"VTK (ASCII)": (
lambda f, m: meshio.vtk.write(f, m, binary=False),
meshio.vtk.read,
["out.vtk"],
),
"VTU (binary, uncompressed)": (
lambda f, m: meshio.vtu.write(f, m, binary=True, compression=None),
meshio.vtu.read,
["out.vtu"],
),
"VTU (binary, zlib)": (
lambda f, m: meshio.vtu.write(f, m, binary=True, compression="zlib"),
meshio.vtu.read,
["out.vtu"],
),
"VTU (binary, LZMA)": (
lambda f, m: meshio.vtu.write(f, m, binary=True, compression="lzma"),
meshio.vtu.read,
["out.vtu"],
),
"VTU (ASCII)": (
lambda f, m: meshio.vtu.write(f, m, binary=False),
meshio.vtu.read,
["out.vtu"],
),
"Wavefront .obj": (meshio.obj.write, meshio.obj.read, ["out.obj"]),
# "wkt": ".wkt",
"XDMF (binary)": (
lambda f, m: meshio.xdmf.write(f, m, data_format="Binary"),
meshio.xdmf.read,
["out.xdmf", "out0.bin", "out1.bin"],
),
"XDMF (HDF, GZIP)": (
lambda f, m: meshio.xdmf.write(f, m, data_format="HDF", compression="gzip"),
meshio.xdmf.read,
["out.xdmf", "out.h5"],
),
"XDMF (HDF, uncompressed)": (
lambda f, m: meshio.xdmf.write(f, m, data_format="HDF", compression=None),
meshio.xdmf.read,
["out.xdmf", "out.h5"],
),
"XDMF (XML)": (
lambda f, m: meshio.xdmf.write(f, m, data_format="XML"),
meshio.xdmf.read,
["out.xdmf"],
),
}
# formats = {
# # "VTK (ASCII)": formats["VTK (ASCII)"],
# # "VTK (binary)": formats["VTK (binary)"],
# # "VTU (ASCII)": formats["VTU (ASCII)"],
# # "VTU (binary)": formats["VTU (binary)"],
# # "Gmsh 4.1 (binary)": formats["Gmsh 4.1 (binary)"],
# # "FLAC3D": formats["FLAC3D"],
# "MDPA": formats["MDPA"],
# }
# max_key_length = max(len(key) for key in formats)
elapsed_write = []
elapsed_read = []
file_sizes = []
peak_memory_write = []
peak_memory_read = []
print()
print(
"format "
+ "write (s) "
+ "read(s) "
+ "file size "
+ "write mem "
+ "read mem "
)
print()
with tempfile.TemporaryDirectory() as directory:
directory = pathlib.Path(directory)
for name, (writer, reader, filenames) in formats.items():
filename = directory / filenames[0]
tracemalloc.start()
t = time.time()
writer(filename, mesh)
# snapshot = tracemalloc.take_snapshot()
elapsed_write.append(time.time() - t)
peak_memory_write.append(tracemalloc.get_traced_memory()[1])
tracemalloc.stop()
file_sizes.append(sum(os.stat(directory / f).st_size for f in filenames))
tracemalloc.start()
t = time.time()
reader(filename)
elapsed_read.append(time.time() - t)
peak_memory_read.append(tracemalloc.get_traced_memory()[1])
tracemalloc.stop()
print(
"{:<26} {:e} {:e} {:e} {:e} {:e}".format(
name,
elapsed_write[-1],
elapsed_read[-1],
file_sizes[-1] / 1024.0**2,
peak_memory_write[-1] / 1024.0**2,
peak_memory_read[-1] / 1024.0**2,
)
)
names = list(formats.keys())
# convert to MB
file_sizes = np.array(file_sizes)
file_sizes = file_sizes / 1024.0**2
peak_memory_write = np.array(peak_memory_write)
peak_memory_write = peak_memory_write / 1024.0**2
peak_memory_read = np.array(peak_memory_read)
peak_memory_read = peak_memory_read / 1024.0**2
if plot:
plot_speed(names, elapsed_write, elapsed_read)
plot_file_sizes(names, file_sizes, mem_size)
plot_memory_usage(names, peak_memory_write, peak_memory_read, mem_size)
if __name__ == "__main__":
read_write(plot=True)
| mit | 5ce00863ad3bdc0ae99c9fe751c12119 | 31.991071 | 88 | 0.533875 | 3.04533 | false | false | false | false |
nschloe/meshio | src/meshio/off/_off.py | 1 | 2842 | """
I/O for the OFF surface format, cf.
<https://en.wikipedia.org/wiki/OFF_(file_format)>,
<http://www.geomview.org/docs/html/OFF.html>.
"""
import numpy as np
from .._common import warn
from .._exceptions import ReadError
from .._files import open_file
from .._helpers import register_format
from .._mesh import CellBlock, Mesh
def read(filename):
with open_file(filename) as f:
points, cells = read_buffer(f)
return Mesh(points, cells)
def read_buffer(f):
# assert that the first line reads `OFF`
line = f.readline()
if isinstance(line, (bytes, bytearray)):
raise ReadError("Expected text buffer, not bytes.")
if line.strip() != "OFF":
raise ReadError("Expected the first line to be `OFF`.")
# fast forward to the next significant line
while True:
line = f.readline().strip()
if line and line[0] != "#":
break
# This next line contains:
# <number of vertices> <number of faces> <number of edges>
num_verts, num_faces, _ = line.split(" ")
num_verts = int(num_verts)
num_faces = int(num_faces)
verts = np.fromfile(f, dtype=float, count=3 * num_verts, sep=" ").reshape(
num_verts, 3
)
data = np.fromfile(f, dtype=int, count=4 * num_faces, sep=" ").reshape(num_faces, 4)
if not np.all(data[:, 0] == 3):
raise ReadError("Can only read triangular faces")
cells = [CellBlock("triangle", data[:, 1:])]
return verts, cells
def write(filename, mesh):
if mesh.points.shape[1] == 2:
warn(
"OFF requires 3D points, but 2D points given. "
"Appending 0 as third component."
)
points = np.column_stack([mesh.points, np.zeros_like(mesh.points[:, 0])])
else:
points = mesh.points
skip = [c for c in mesh.cells if c.type != "triangle"]
if skip:
string = ", ".join(item.type for item in skip)
warn(f"OFF only supports triangle cells. Skipping {string}.")
tri = mesh.get_cells_type("triangle")
with open(filename, "wb") as fh:
fh.write(b"OFF\n")
fh.write(b"# Created by meshio\n\n")
# counts
c = f"{mesh.points.shape[0]} {tri.shape[0]} {0}\n\n"
fh.write(c.encode())
# vertices
# np.savetxt(fh, mesh.points, "%r") # slower
fmt = " ".join(["{}"] * points.shape[1])
out = "\n".join([fmt.format(*row) for row in points]) + "\n"
fh.write(out.encode())
# triangles
out = np.column_stack([np.full(tri.shape[0], 3, dtype=tri.dtype), tri])
# savetxt is slower
# np.savetxt(fh, out, "%d %d %d %d")
fmt = " ".join(["{}"] * out.shape[1])
out = "\n".join([fmt.format(*row) for row in out]) + "\n"
fh.write(out.encode())
register_format("off", [".off"], read, {"off": write})
| mit | 562fa02f9dddb19131a4eb3639bbcb3e | 29.234043 | 88 | 0.578818 | 3.296984 | false | false | false | false |
sdispater/pendulum | pendulum/helpers.py | 1 | 5288 | from __future__ import annotations
import os
import struct
from datetime import date
from datetime import datetime
from datetime import timedelta
from math import copysign
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import overload
import pendulum
from pendulum.constants import DAYS_PER_MONTHS
from pendulum.formatting.difference_formatter import DifferenceFormatter
from pendulum.locales.locale import Locale
if TYPE_CHECKING:
# Prevent import cycles
from pendulum.duration import Duration
with_extensions = os.getenv("PENDULUM_EXTENSIONS", "1") == "1"
_DT = TypeVar("_DT", bound=datetime)
_D = TypeVar("_D", bound=date)
try:
# nopycln: file # noqa: E800
if not with_extensions or struct.calcsize("P") == 4:
raise ImportError()
from pendulum._extensions._helpers import PreciseDiff
from pendulum._extensions._helpers import days_in_year
from pendulum._extensions._helpers import is_leap
from pendulum._extensions._helpers import is_long_year
from pendulum._extensions._helpers import local_time
from pendulum._extensions._helpers import precise_diff
from pendulum._extensions._helpers import timestamp
from pendulum._extensions._helpers import week_day
except ImportError:
from pendulum._extensions.helpers import PreciseDiff # type: ignore[misc]
from pendulum._extensions.helpers import days_in_year
from pendulum._extensions.helpers import is_leap
from pendulum._extensions.helpers import is_long_year
from pendulum._extensions.helpers import local_time
from pendulum._extensions.helpers import precise_diff # type: ignore[misc]
from pendulum._extensions.helpers import timestamp
from pendulum._extensions.helpers import week_day
difference_formatter = DifferenceFormatter()
@overload
def add_duration(
dt: datetime,
years: int = 0,
months: int = 0,
weeks: int = 0,
days: int = 0,
hours: int = 0,
minutes: int = 0,
seconds: float = 0,
microseconds: int = 0,
) -> datetime:
...
@overload
def add_duration(
dt: date,
years: int = 0,
months: int = 0,
weeks: int = 0,
days: int = 0,
) -> date:
pass
def add_duration(
dt: date | datetime,
years: int = 0,
months: int = 0,
weeks: int = 0,
days: int = 0,
hours: int = 0,
minutes: int = 0,
seconds: float = 0,
microseconds: int = 0,
) -> date | datetime:
"""
Adds a duration to a date/datetime instance.
"""
days += weeks * 7
if (
isinstance(dt, date)
and not isinstance(dt, datetime)
and any([hours, minutes, seconds, microseconds])
):
raise RuntimeError("Time elements cannot be added to a date instance.")
# Normalizing
if abs(microseconds) > 999999:
s = _sign(microseconds)
div, mod = divmod(microseconds * s, 1000000)
microseconds = mod * s
seconds += div * s
if abs(seconds) > 59:
s = _sign(seconds)
div, mod = divmod(seconds * s, 60) # type: ignore[assignment]
seconds = mod * s
minutes += div * s
if abs(minutes) > 59:
s = _sign(minutes)
div, mod = divmod(minutes * s, 60)
minutes = mod * s
hours += div * s
if abs(hours) > 23:
s = _sign(hours)
div, mod = divmod(hours * s, 24)
hours = mod * s
days += div * s
if abs(months) > 11:
s = _sign(months)
div, mod = divmod(months * s, 12)
months = mod * s
years += div * s
year = dt.year + years
month = dt.month
if months:
month += months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(DAYS_PER_MONTHS[int(is_leap(year))][month], dt.day)
dt = dt.replace(year=year, month=month, day=day)
return dt + timedelta(
days=days,
hours=hours,
minutes=minutes,
seconds=seconds,
microseconds=microseconds,
)
def format_diff(
diff: Duration,
is_now: bool = True,
absolute: bool = False,
locale: str | None = None,
) -> str:
if locale is None:
locale = get_locale()
return difference_formatter.format(diff, is_now, absolute, locale)
def _sign(x: float) -> int:
return int(copysign(1, x))
# Global helpers
def locale(name: str) -> Locale:
return Locale.load(name)
def set_locale(name: str) -> None:
locale(name)
pendulum._LOCALE = name
def get_locale() -> str:
return pendulum._LOCALE
def week_starts_at(wday: int) -> None:
if wday < pendulum.SUNDAY or wday > pendulum.SATURDAY:
raise ValueError("Invalid week day as start of week.")
pendulum._WEEK_STARTS_AT = wday
def week_ends_at(wday: int) -> None:
if wday < pendulum.SUNDAY or wday > pendulum.SATURDAY:
raise ValueError("Invalid week day as start of week.")
pendulum._WEEK_ENDS_AT = wday
__all__ = [
"PreciseDiff",
"days_in_year",
"is_leap",
"is_long_year",
"local_time",
"precise_diff",
"timestamp",
"week_day",
"add_duration",
"format_diff",
"locale",
"set_locale",
"get_locale",
"week_starts_at",
"week_ends_at",
]
| mit | 2d717597ed19dd48c97ad59e99a155f6 | 22.713004 | 79 | 0.620651 | 3.483531 | false | false | false | false |
pyproj4/pyproj | pyproj/__main__.py | 2 | 6351 | """
This is the main entry point for pyproj CLI
e.g. python -m pyproj
"""
import argparse
import os
from pyproj import __proj_version__, __version__, _show_versions
from pyproj.aoi import BBox
from pyproj.datadir import get_data_dir, get_user_data_dir
from pyproj.sync import (
_download_resource_file,
get_proj_endpoint,
get_transform_grid_list,
)
parser = argparse.ArgumentParser(
description=f"pyproj version: {__version__} [PROJ version: {__proj_version__}]"
)
parser.add_argument(
"-v",
"--verbose",
help="Show verbose debugging version information.",
action="store_true",
)
subparsers = parser.add_subparsers(title="commands")
sync_parser = subparsers.add_parser(
name="sync",
description="Tool for synchronizing PROJ datum and transformation support data.",
)
sync_parser.add_argument(
"--bbox",
help=(
"Specify an area of interest to restrict the resources to download. "
"The area of interest is specified as a "
"bounding box with geographic coordinates, expressed in degrees in an "
"unspecified geographic CRS. "
"`west_long` and `east_long` should be in the [-180,180] range, and "
"`south_lat` and `north_lat` in the [-90,90]. `west_long` is generally "
"lower than `east_long`, except in the case where the area of interest "
"crosses the antimeridian."
),
)
sync_parser.add_argument(
"--spatial-test",
help=(
"Specify how the extent of the resource files "
"are compared to the area of use specified explicitly with `--bbox`. "
"By default, any resource files whose extent intersects the value specified "
"by `--bbox` will be selected. If using the ``contains`` strategy, "
"only resource files whose extent is contained in the value specified by "
"`--bbox` will be selected."
),
choices=["intersects", "contains"],
default="intersects",
)
sync_parser.add_argument(
"--source-id",
help=(
"Restrict resource files to be downloaded to those whose source_id property "
"contains the ID value. Default is all possible values."
),
)
sync_parser.add_argument(
"--area-of-use",
help=(
"Restrict resource files to be downloaded to those whose area_of_use property "
"contains the AREA_OF_USE value. Default is all possible values."
),
)
sync_parser.add_argument(
"--file",
help=(
"Restrict resource files to be downloaded to those whose name property "
" (file name) contains the FILE value. Default is all possible values."
),
)
sync_parser.add_argument(
"--exclude-world-coverage",
help="Exclude files which have world coverage.",
action="store_true",
)
sync_parser.add_argument(
"--include-already-downloaded",
help="Include grids that are already downloaded.",
action="store_true",
)
sync_parser.add_argument(
"--list-files", help="List the files without downloading.", action="store_true"
)
sync_parser.add_argument(
"--all", help="Download all missing transform grids.", action="store_true"
)
sync_parser.add_argument(
"--system-directory",
help=(
"If enabled, it will sync grids to the main PROJ data directory "
"instead of the user writable directory."
),
action="store_true",
)
sync_parser.add_argument(
"--target-directory",
help="The directory to sync grids to instead of the user writable directory.",
)
sync_parser.add_argument(
"-v", "--verbose", help="Print download information.", action="store_true"
)
def _parse_sync_command(args):
"""
Handle sync command arguments
"""
if not any(
(
args.bbox,
args.list_files,
args.all,
args.source_id,
args.area_of_use,
args.file,
)
):
sync_parser.print_help()
return
if args.all and any(
(
args.bbox,
args.list_files,
args.source_id,
args.area_of_use,
args.file,
)
):
raise RuntimeError(
"Cannot use '--all' with '--list-files', '--source-id',"
"'--area-of-use', '--bbox', or '--file'."
)
bbox = None
if args.bbox is not None:
west, south, east, north = args.bbox.split(",")
bbox = BBox(
west=float(west),
south=float(south),
east=float(east),
north=float(north),
)
if args.target_directory and args.system_directory:
raise RuntimeError("Cannot set both --target-directory and --system-directory.")
target_directory = args.target_directory
if args.system_directory:
target_directory = get_data_dir().split(os.path.sep)[0]
elif not target_directory:
target_directory = get_user_data_dir(True)
grids = get_transform_grid_list(
source_id=args.source_id,
area_of_use=args.area_of_use,
filename=args.file,
bbox=bbox,
spatial_test=args.spatial_test,
include_world_coverage=not args.exclude_world_coverage,
include_already_downloaded=args.include_already_downloaded,
target_directory=target_directory,
)
if args.list_files:
print("filename | source_id | area_of_use")
print("----------------------------------")
else:
endpoint = get_proj_endpoint()
for grid in grids:
if args.list_files:
print(
grid["properties"]["name"],
grid["properties"]["source_id"],
grid["properties"].get("area_of_use"),
sep=" | ",
)
else:
filename = grid["properties"]["name"]
_download_resource_file(
file_url=f"{endpoint}/{filename}",
short_name=filename,
directory=target_directory,
verbose=args.verbose,
sha256=grid["properties"]["sha256sum"],
)
def main():
"""
Main entrypoint into the command line interface.
"""
args = parser.parse_args()
if hasattr(args, "bbox"):
_parse_sync_command(args)
elif args.verbose:
_show_versions.show_versions()
else:
parser.print_help()
if __name__ == "__main__":
main()
| mit | a7c131541d26c494d3a9a1f2d9f95e64 | 29.38756 | 88 | 0.598961 | 3.952085 | false | false | false | false |
sdispater/pendulum | tests/localization/test_id.py | 1 | 2369 | from __future__ import annotations
import pendulum
locale = "id"
def test_diff_for_humans():
with pendulum.travel_to(pendulum.datetime(2016, 8, 29), freeze=True):
diff_for_humans()
def diff_for_humans():
d = pendulum.now().subtract(seconds=1)
assert d.diff_for_humans(locale=locale) == "beberapa detik yang lalu"
d = pendulum.now().subtract(seconds=2)
assert d.diff_for_humans(locale=locale) == "beberapa detik yang lalu"
d = pendulum.now().subtract(seconds=21)
assert d.diff_for_humans(locale=locale) == "21 detik yang lalu"
d = pendulum.now().subtract(minutes=1)
assert d.diff_for_humans(locale=locale) == "1 menit yang lalu"
d = pendulum.now().subtract(minutes=2)
assert d.diff_for_humans(locale=locale) == "2 menit yang lalu"
d = pendulum.now().subtract(hours=1)
assert d.diff_for_humans(locale=locale) == "1 jam yang lalu"
d = pendulum.now().subtract(hours=2)
assert d.diff_for_humans(locale=locale) == "2 jam yang lalu"
d = pendulum.now().subtract(days=1)
assert d.diff_for_humans(locale=locale) == "1 hari yang lalu"
d = pendulum.now().subtract(days=2)
assert d.diff_for_humans(locale=locale) == "2 hari yang lalu"
d = pendulum.now().subtract(weeks=1)
assert d.diff_for_humans(locale=locale) == "1 minggu yang lalu"
d = pendulum.now().subtract(weeks=2)
assert d.diff_for_humans(locale=locale) == "2 minggu yang lalu"
d = pendulum.now().subtract(months=1)
assert d.diff_for_humans(locale=locale) == "1 bulan yang lalu"
d = pendulum.now().subtract(months=2)
assert d.diff_for_humans(locale=locale) == "2 bulan yang lalu"
d = pendulum.now().subtract(years=1)
assert d.diff_for_humans(locale=locale) == "1 tahun yang lalu"
d = pendulum.now().subtract(years=2)
assert d.diff_for_humans(locale=locale) == "2 tahun yang lalu"
d = pendulum.now().add(seconds=1)
assert d.diff_for_humans(locale=locale) == "dalam beberapa detik"
d = pendulum.now().add(seconds=1)
d2 = pendulum.now()
assert d.diff_for_humans(d2, locale=locale) == "beberapa detik kemudian"
assert d2.diff_for_humans(d, locale=locale) == "beberapa detik yang lalu"
assert d.diff_for_humans(d2, True, locale=locale) == "beberapa detik"
assert d2.diff_for_humans(d.add(seconds=1), True, locale=locale) == "beberapa detik"
| mit | 43c47edb291e367225c05c245c8c7479 | 33.838235 | 88 | 0.669059 | 2.566631 | false | false | false | false |
sdispater/pendulum | pendulum/parsing/iso8601.py | 1 | 13836 | from __future__ import annotations
import datetime
import re
from typing import cast
from pendulum.constants import HOURS_PER_DAY
from pendulum.constants import MINUTES_PER_HOUR
from pendulum.constants import MONTHS_OFFSETS
from pendulum.constants import SECONDS_PER_MINUTE
from pendulum.duration import Duration
from pendulum.helpers import days_in_year
from pendulum.helpers import is_leap
from pendulum.helpers import is_long_year
from pendulum.helpers import week_day
from pendulum.parsing.exceptions import ParserError
from pendulum.tz.timezone import UTC
from pendulum.tz.timezone import FixedTimezone
ISO8601_DT = re.compile(
# Date (optional) # noqa: E800
"^"
"(?P<date>"
" (?P<classic>" # Classic date (YYYY-MM-DD) or ordinal (YYYY-DDD)
r" (?P<year>\d{4})" # Year
" (?P<monthday>"
r" (?P<monthsep>-)?(?P<month>\d{2})" # Month (optional)
r" ((?P<daysep>-)?(?P<day>\d{1,2}))?" # Day (optional)
" )?"
" )"
" |"
" (?P<isocalendar>" # Calendar date (2016-W05 or 2016-W05-5)
r" (?P<isoyear>\d{4})" # Year
" (?P<weeksep>-)?" # Separator (optional)
" W" # W separator
r" (?P<isoweek>\d{2})" # Week number
" (?P<weekdaysep>-)?" # Separator (optional)
r" (?P<isoweekday>\d)?" # Weekday (optional)
" )"
")?"
# Time (optional) # noqa: E800
"(?P<time>"
r" (?P<timesep>[T\ ])?" # Separator (T or space)
r" (?P<hour>\d{1,2})(?P<minsep>:)?(?P<minute>\d{1,2})?(?P<secsep>:)?(?P<second>\d{1,2})?" # HH:mm:ss (optional mm and ss)
# Subsecond part (optional)
" (?P<subsecondsection>"
" (?:[.,])" # Subsecond separator (optional)
r" (?P<subsecond>\d{1,9})" # Subsecond
" )?"
# Timezone offset
" (?P<tz>"
r" (?:[-+])\d{2}:?(?:\d{2})?|Z" # Offset (+HH:mm or +HHmm or +HH or Z)
" )?"
")?"
"$",
re.VERBOSE,
)
ISO8601_DURATION = re.compile(
"^P" # Duration P indicator
# Years, months and days (optional) # noqa: E800
"(?P<w>"
r" (?P<weeks>\d+(?:[.,]\d+)?W)"
")?"
"(?P<ymd>"
r" (?P<years>\d+(?:[.,]\d+)?Y)?"
r" (?P<months>\d+(?:[.,]\d+)?M)?"
r" (?P<days>\d+(?:[.,]\d+)?D)?"
")?"
"(?P<hms>"
" (?P<timesep>T)" # Separator (T)
r" (?P<hours>\d+(?:[.,]\d+)?H)?"
r" (?P<minutes>\d+(?:[.,]\d+)?M)?"
r" (?P<seconds>\d+(?:[.,]\d+)?S)?"
")?"
"$",
re.VERBOSE,
)
def parse_iso8601(
text: str,
) -> datetime.datetime | datetime.date | datetime.time | Duration:
"""
ISO 8601 compliant parser.
:param text: The string to parse
:type text: str
:rtype: datetime.datetime or datetime.time or datetime.date
"""
parsed = _parse_iso8601_duration(text)
if parsed is not None:
return parsed
m = ISO8601_DT.match(text)
if not m:
raise ParserError("Invalid ISO 8601 string")
ambiguous_date = False
is_date = False
is_time = False
year = 0
month = 1
day = 1
minute = 0
second = 0
microsecond = 0
tzinfo: FixedTimezone | None = None
if m.group("date"):
# A date has been specified
is_date = True
if m.group("isocalendar"):
# We have a ISO 8601 string defined
# by week number
if (
m.group("weeksep")
and not m.group("weekdaysep")
and m.group("isoweekday")
):
raise ParserError(f"Invalid date string: {text}")
if not m.group("weeksep") and m.group("weekdaysep"):
raise ParserError(f"Invalid date string: {text}")
try:
date = _get_iso_8601_week(
m.group("isoyear"), m.group("isoweek"), m.group("isoweekday")
)
except ParserError:
raise
except ValueError:
raise ParserError(f"Invalid date string: {text}")
year = date["year"]
month = date["month"]
day = date["day"]
else:
# We have a classic date representation
year = int(m.group("year"))
if not m.group("monthday"):
# No month and day
month = 1
day = 1
else:
if m.group("month") and m.group("day"):
# Month and day
if not m.group("daysep") and len(m.group("day")) == 1:
# Ordinal day
ordinal = int(m.group("month") + m.group("day"))
leap = is_leap(year)
months_offsets = MONTHS_OFFSETS[leap]
if ordinal > months_offsets[13]:
raise ParserError("Ordinal day is out of range")
for i in range(1, 14):
if ordinal <= months_offsets[i]:
day = ordinal - months_offsets[i - 1]
month = i - 1
break
else:
month = int(m.group("month"))
day = int(m.group("day"))
else:
# Only month
if not m.group("monthsep"):
# The date looks like 201207
# which is invalid for a date
# But it might be a time in the form hhmmss
ambiguous_date = True
month = int(m.group("month"))
day = 1
if not m.group("time"):
# No time has been specified
if ambiguous_date:
# We can "safely" assume that the ambiguous date
# was actually a time in the form hhmmss
hhmmss = f"{str(year)}{str(month):0>2}"
return datetime.time(int(hhmmss[:2]), int(hhmmss[2:4]), int(hhmmss[4:]))
return datetime.date(year, month, day)
if ambiguous_date:
raise ParserError(f"Invalid date string: {text}")
if is_date and not m.group("timesep"):
raise ParserError(f"Invalid date string: {text}")
if not is_date:
is_time = True
# Grabbing hh:mm:ss
hour = int(m.group("hour"))
minsep = m.group("minsep")
if m.group("minute"):
minute = int(m.group("minute"))
elif minsep:
raise ParserError("Invalid ISO 8601 time part")
secsep = m.group("secsep")
if secsep and not minsep and m.group("minute"):
# minute/second separator but no hour/minute separator
raise ParserError("Invalid ISO 8601 time part")
if m.group("second"):
if not secsep and minsep:
# No minute/second separator but hour/minute separator
raise ParserError("Invalid ISO 8601 time part")
second = int(m.group("second"))
elif secsep:
raise ParserError("Invalid ISO 8601 time part")
# Grabbing subseconds, if any
if m.group("subsecondsection"):
# Limiting to 6 chars
subsecond = m.group("subsecond")[:6]
microsecond = int(f"{subsecond:0<6}")
# Grabbing timezone, if any
tz = m.group("tz")
if tz:
if tz == "Z":
tzinfo = UTC
else:
negative = bool(tz.startswith("-"))
tz = tz[1:]
if ":" not in tz:
if len(tz) == 2:
tz = f"{tz}00"
off_hour = tz[0:2]
off_minute = tz[2:4]
else:
off_hour, off_minute = tz.split(":")
offset = ((int(off_hour) * 60) + int(off_minute)) * 60
if negative:
offset = -1 * offset
tzinfo = FixedTimezone(offset)
if is_time:
return datetime.time(hour, minute, second, microsecond)
return datetime.datetime(
year, month, day, hour, minute, second, microsecond, tzinfo=tzinfo
)
def _parse_iso8601_duration(text: str, **options: str) -> Duration | None:
m = ISO8601_DURATION.match(text)
if not m:
return None
years = 0
months = 0
weeks = 0
days: int | float = 0
hours: int | float = 0
minutes: int | float = 0
seconds: int | float = 0
microseconds: int | float = 0
fractional = False
_days: str | float
_hour: str | int | None
_minutes: str | int | None
_seconds: str | int | None
if m.group("w"):
# Weeks
if m.group("ymd") or m.group("hms"):
# Specifying anything more than weeks is not supported
raise ParserError("Invalid duration string")
_weeks = m.group("weeks")
if not _weeks:
raise ParserError("Invalid duration string")
_weeks = _weeks.replace(",", ".").replace("W", "")
if "." in _weeks:
_weeks, portion = _weeks.split(".")
weeks = int(_weeks)
_days = int(portion) / 10 * 7
days, hours = int(_days // 1), int(_days % 1 * HOURS_PER_DAY)
else:
weeks = int(_weeks)
if m.group("ymd"):
# Years, months and/or days
_years = m.group("years")
_months = m.group("months")
_days = m.group("days")
# Checking order
years_start = m.start("years") if _years else -3
months_start = m.start("months") if _months else years_start + 1
days_start = m.start("days") if _days else months_start + 1
# Check correct order
if not (years_start < months_start < days_start):
raise ParserError("Invalid duration")
if _years:
_years = _years.replace(",", ".").replace("Y", "")
if "." in _years:
raise ParserError("Float years in duration are not supported")
else:
years = int(_years)
if _months:
if fractional:
raise ParserError("Invalid duration")
_months = _months.replace(",", ".").replace("M", "")
if "." in _months:
raise ParserError("Float months in duration are not supported")
else:
months = int(_months)
if _days:
if fractional:
raise ParserError("Invalid duration")
_days = _days.replace(",", ".").replace("D", "")
if "." in _days:
fractional = True
_days, _hours = _days.split(".")
days = int(_days)
hours = int(_hours) / 10 * HOURS_PER_DAY
else:
days = int(_days)
if m.group("hms"):
# Hours, minutes and/or seconds
_hours = m.group("hours") or 0
_minutes = m.group("minutes") or 0
_seconds = m.group("seconds") or 0
# Checking order
hours_start = m.start("hours") if _hours else -3
minutes_start = m.start("minutes") if _minutes else hours_start + 1
seconds_start = m.start("seconds") if _seconds else minutes_start + 1
# Check correct order
if not (hours_start < minutes_start < seconds_start):
raise ParserError("Invalid duration")
if _hours:
if fractional:
raise ParserError("Invalid duration")
_hours = cast(str, _hours).replace(",", ".").replace("H", "")
if "." in _hours:
fractional = True
_hours, _mins = _hours.split(".")
hours += int(_hours)
minutes += int(_mins) / 10 * MINUTES_PER_HOUR
else:
hours += int(_hours)
if _minutes:
if fractional:
raise ParserError("Invalid duration")
_minutes = cast(str, _minutes).replace(",", ".").replace("M", "")
if "." in _minutes:
fractional = True
_minutes, _secs = _minutes.split(".")
minutes += int(_minutes)
seconds += int(_secs) / 10 * SECONDS_PER_MINUTE
else:
minutes += int(_minutes)
if _seconds:
if fractional:
raise ParserError("Invalid duration")
_seconds = cast(str, _seconds).replace(",", ".").replace("S", "")
if "." in _seconds:
_seconds, _microseconds = _seconds.split(".")
seconds += int(_seconds)
microseconds += int(f"{_microseconds[:6]:0<6}")
else:
seconds += int(_seconds)
return Duration(
years=years,
months=months,
weeks=weeks,
days=days,
hours=hours,
minutes=minutes,
seconds=seconds,
microseconds=microseconds,
)
def _get_iso_8601_week(
year: int | str, week: int | str, weekday: int | str
) -> dict[str, int]:
if not weekday:
weekday = 1
else:
weekday = int(weekday)
year = int(year)
week = int(week)
if week > 53 or week > 52 and not is_long_year(year):
raise ParserError("Invalid week for week date")
if weekday > 7:
raise ParserError("Invalid weekday for week date")
# We can't rely on strptime directly here since
# it does not support ISO week date
ordinal = week * 7 + weekday - (week_day(year, 1, 4) + 3)
if ordinal < 1:
# Previous year
ordinal += days_in_year(year - 1)
year -= 1
if ordinal > days_in_year(year):
# Next year
ordinal -= days_in_year(year)
year += 1
fmt = "%Y-%j"
string = f"{year}-{ordinal}"
dt = datetime.datetime.strptime(string, fmt)
return {"year": dt.year, "month": dt.month, "day": dt.day}
| mit | f9083bbb4dec6d162f5933a1f8f13f92 | 29.475771 | 129 | 0.499422 | 3.779295 | false | false | false | false |
pyproj4/pyproj | pyproj/crs/datum.py | 2 | 3722 | """
This module is for building datums to be used when
building a CRS.
"""
from typing import Any, Dict, Optional, Union
from pyproj._crs import Datum, Ellipsoid, PrimeMeridian
class CustomDatum(Datum):
"""
.. versionadded:: 2.5.0
Class to build a datum based on an ellipsoid and prime meridian.
"""
def __new__(
cls,
name: str = "undefined",
ellipsoid: Any = "WGS 84",
prime_meridian: Any = "Greenwich",
):
"""
Parameters
----------
name: str, default="undefined"
Name of the datum.
ellipsoid: Any, default="WGS 84"
Anything accepted by :meth:`pyproj.crs.Ellipsoid.from_user_input`
or a :class:`pyproj.crs.datum.CustomEllipsoid`.
prime_meridian: Any, default="Greenwich"
Anything accepted by :meth:`pyproj.crs.PrimeMeridian.from_user_input`.
"""
datum_json = {
"$schema": "https://proj.org/schemas/v0.2/projjson.schema.json",
"type": "GeodeticReferenceFrame",
"name": name,
"ellipsoid": Ellipsoid.from_user_input(ellipsoid).to_json_dict(),
"prime_meridian": PrimeMeridian.from_user_input(
prime_meridian
).to_json_dict(),
}
return cls.from_json_dict(datum_json)
class CustomEllipsoid(Ellipsoid):
"""
.. versionadded:: 2.5.0
Class to build a custom ellipsoid.
"""
def __new__(
cls,
name: str = "undefined",
semi_major_axis: Optional[float] = None,
inverse_flattening: Optional[float] = None,
semi_minor_axis: Optional[float] = None,
radius: Optional[float] = None,
):
"""
Parameters
----------
name: str, default="undefined"
Name of the ellipsoid.
semi_major_axis: float, optional
The semi major axis in meters. Required if missing radius.
inverse_flattening: float, optional
The inverse flattening in meters.
Required if missing semi_minor_axis and radius.
semi_minor_axis: float, optional
The semi minor axis in meters.
Required if missing inverse_flattening and radius.
radius: float, optional
The radius in meters. Can only be used alone.
Cannot be mixed with other parameters.
"""
ellipsoid_json: Dict[str, Union[float, str]] = {
"$schema": "https://proj.org/schemas/v0.2/projjson.schema.json",
"type": "Ellipsoid",
"name": name,
}
if semi_major_axis is not None:
ellipsoid_json["semi_major_axis"] = semi_major_axis
if inverse_flattening is not None:
ellipsoid_json["inverse_flattening"] = inverse_flattening
if semi_minor_axis is not None:
ellipsoid_json["semi_minor_axis"] = semi_minor_axis
if radius is not None:
ellipsoid_json["radius"] = radius
return cls.from_json_dict(ellipsoid_json)
class CustomPrimeMeridian(PrimeMeridian):
"""
.. versionadded:: 2.5.0
Class to build a prime meridian based on a longitude.
"""
def __new__(cls, longitude: float, name: str = "undefined"):
"""
Parameters
----------
longitude: float
Longitude of prime meridian.
name: str, optional
Name of the prime meridian.
"""
datum_json = {
"$schema": "https://proj.org/schemas/v0.2/projjson.schema.json",
"type": "PrimeMeridian",
"name": name,
"longitude": longitude,
}
return cls.from_json_dict(datum_json)
| mit | c1bc8cc3e86eed3afd55a00d04f22124 | 31.086207 | 82 | 0.566631 | 3.963791 | false | false | false | false |
sdispater/pendulum | pendulum/time.py | 1 | 8939 | from __future__ import annotations
import datetime
from datetime import time
from datetime import timedelta
from typing import TYPE_CHECKING
from typing import Optional
from typing import cast
from typing import overload
import pendulum
from pendulum.constants import SECS_PER_HOUR
from pendulum.constants import SECS_PER_MIN
from pendulum.constants import USECS_PER_SEC
from pendulum.duration import AbsoluteDuration
from pendulum.duration import Duration
from pendulum.mixins.default import FormattableMixin
if TYPE_CHECKING:
from typing import Literal
class Time(FormattableMixin, time):
"""
Represents a time instance as hour, minute, second, microsecond.
"""
# String formatting
def __repr__(self) -> str:
us = ""
if self.microsecond:
us = f", {self.microsecond}"
tzinfo = ""
if self.tzinfo:
tzinfo = f", tzinfo={repr(self.tzinfo)}"
return (
f"{self.__class__.__name__}"
f"({self.hour}, {self.minute}, {self.second}{us}{tzinfo})"
)
# Comparisons
def closest(self, dt1: Time | time, dt2: Time | time) -> Time:
"""
Get the closest time from the instance.
"""
dt1 = self.__class__(dt1.hour, dt1.minute, dt1.second, dt1.microsecond)
dt2 = self.__class__(dt2.hour, dt2.minute, dt2.second, dt2.microsecond)
if self.diff(dt1).in_seconds() < self.diff(dt2).in_seconds():
return dt1
return dt2
def farthest(self, dt1: Time | time, dt2: Time | time) -> Time:
"""
Get the farthest time from the instance.
"""
dt1 = self.__class__(dt1.hour, dt1.minute, dt1.second, dt1.microsecond)
dt2 = self.__class__(dt2.hour, dt2.minute, dt2.second, dt2.microsecond)
if self.diff(dt1).in_seconds() > self.diff(dt2).in_seconds():
return dt1
return dt2
# ADDITIONS AND SUBSTRACTIONS
def add(
self, hours: int = 0, minutes: int = 0, seconds: int = 0, microseconds: int = 0
) -> Time:
"""
Add duration to the instance.
:param hours: The number of hours
:param minutes: The number of minutes
:param seconds: The number of seconds
:param microseconds: The number of microseconds
"""
from pendulum.datetime import DateTime
return (
DateTime.EPOCH.at(self.hour, self.minute, self.second, self.microsecond)
.add(
hours=hours, minutes=minutes, seconds=seconds, microseconds=microseconds
)
.time()
)
def subtract(
self, hours: int = 0, minutes: int = 0, seconds: int = 0, microseconds: int = 0
) -> Time:
"""
Add duration to the instance.
:param hours: The number of hours
:type hours: int
:param minutes: The number of minutes
:type minutes: int
:param seconds: The number of seconds
:type seconds: int
:param microseconds: The number of microseconds
:type microseconds: int
:rtype: Time
"""
from pendulum.datetime import DateTime
return (
DateTime.EPOCH.at(self.hour, self.minute, self.second, self.microsecond)
.subtract(
hours=hours, minutes=minutes, seconds=seconds, microseconds=microseconds
)
.time()
)
def add_timedelta(self, delta: datetime.timedelta) -> Time:
"""
Add timedelta duration to the instance.
:param delta: The timedelta instance
"""
if delta.days:
raise TypeError("Cannot add timedelta with days to Time.")
return self.add(seconds=delta.seconds, microseconds=delta.microseconds)
def subtract_timedelta(self, delta: datetime.timedelta) -> Time:
"""
Remove timedelta duration from the instance.
:param delta: The timedelta instance
"""
if delta.days:
raise TypeError("Cannot subtract timedelta with days to Time.")
return self.subtract(seconds=delta.seconds, microseconds=delta.microseconds)
def __add__(self, other: datetime.timedelta) -> Time:
if not isinstance(other, timedelta):
return NotImplemented
return self.add_timedelta(other)
@overload
def __sub__(self, other: time) -> pendulum.Duration:
...
@overload
def __sub__(self, other: datetime.timedelta) -> Time:
...
def __sub__(self, other: time | datetime.timedelta) -> pendulum.Duration | Time:
if not isinstance(other, (Time, time, timedelta)):
return NotImplemented
if isinstance(other, timedelta):
return self.subtract_timedelta(other)
if isinstance(other, time):
if other.tzinfo is not None:
raise TypeError("Cannot subtract aware times to or from Time.")
other = self.__class__(
other.hour, other.minute, other.second, other.microsecond
)
return other.diff(self, False)
@overload
def __rsub__(self, other: time) -> pendulum.Duration:
...
@overload
def __rsub__(self, other: datetime.timedelta) -> Time:
...
def __rsub__(self, other: time | datetime.timedelta) -> pendulum.Duration | Time:
if not isinstance(other, (Time, time)):
return NotImplemented
if isinstance(other, time):
if other.tzinfo is not None:
raise TypeError("Cannot subtract aware times to or from Time.")
other = self.__class__(
other.hour, other.minute, other.second, other.microsecond
)
return other.__sub__(self)
# DIFFERENCES
def diff(self, dt: time | None = None, abs: bool = True) -> Duration:
"""
Returns the difference between two Time objects as an Duration.
:param dt: The time to subtract from
:param abs: Whether to return an absolute duration or not
"""
if dt is None:
dt = pendulum.now().time()
else:
dt = self.__class__(dt.hour, dt.minute, dt.second, dt.microsecond)
us1 = (
self.hour * SECS_PER_HOUR + self.minute * SECS_PER_MIN + self.second
) * USECS_PER_SEC
us2 = (
dt.hour * SECS_PER_HOUR + dt.minute * SECS_PER_MIN + dt.second
) * USECS_PER_SEC
klass = Duration
if abs:
klass = AbsoluteDuration
return klass(microseconds=us2 - us1)
def diff_for_humans(
self,
other: time | None = None,
absolute: bool = False,
locale: str | None = None,
) -> str:
"""
Get the difference in a human readable format in the current locale.
:param dt: The time to subtract from
:param absolute: removes time difference modifiers ago, after, etc
:param locale: The locale to use for localization
"""
is_now = other is None
if is_now:
other = pendulum.now().time()
diff = self.diff(other)
return pendulum.format_diff(diff, is_now, absolute, locale)
# Compatibility methods
def replace(
self,
hour: int | None = None,
minute: int | None = None,
second: int | None = None,
microsecond: int | None = None,
tzinfo: bool | datetime.tzinfo | Literal[True] | None = True,
fold: int = 0,
) -> Time:
if tzinfo is True:
tzinfo = self.tzinfo
hour = hour if hour is not None else self.hour
minute = minute if minute is not None else self.minute
second = second if second is not None else self.second
microsecond = microsecond if microsecond is not None else self.microsecond
t = super().replace(
hour,
minute,
second,
microsecond,
tzinfo=cast(Optional[datetime.tzinfo], tzinfo),
fold=fold,
)
return self.__class__(
t.hour, t.minute, t.second, t.microsecond, tzinfo=t.tzinfo
)
def __getnewargs__(self) -> tuple[Time]:
return (self,)
def _get_state(
self, protocol: int = 3
) -> tuple[int, int, int, int, datetime.tzinfo | None]:
tz = self.tzinfo
return self.hour, self.minute, self.second, self.microsecond, tz
def __reduce__(
self,
) -> tuple[type[Time], tuple[int, int, int, int, datetime.tzinfo | None]]:
return self.__reduce_ex__(2)
def __reduce_ex__( # type: ignore[override]
self, protocol: int
) -> tuple[type[Time], tuple[int, int, int, int, datetime.tzinfo | None]]:
return self.__class__, self._get_state(protocol)
Time.min = Time(0, 0, 0)
Time.max = Time(23, 59, 59, 999999)
Time.resolution = Duration(microseconds=1)
| mit | 5835337152a470a4509343255314a1df | 28.50165 | 88 | 0.583287 | 4.104224 | false | false | false | false |
sdispater/pendulum | tests/datetime/test_behavior.py | 1 | 3578 | from __future__ import annotations
import pickle
from copy import deepcopy
from datetime import date
from datetime import datetime
from datetime import time
import pytest
import pendulum
from pendulum import timezone
from pendulum.tz.timezone import Timezone
from pendulum.utils._compat import zoneinfo
@pytest.fixture
def p():
return pendulum.datetime(2016, 8, 27, 12, 34, 56, 123456, tz="Europe/Paris")
@pytest.fixture
def p1(p):
return p.in_tz("America/New_York")
@pytest.fixture
def dt():
tz = timezone("Europe/Paris")
return tz.convert(datetime(2016, 8, 27, 12, 34, 56, 123456))
def test_timetuple(p, dt):
assert dt.timetuple() == p.timetuple()
def test_utctimetuple(p, dt):
assert dt.utctimetuple() == p.utctimetuple()
def test_date(p, dt):
assert p.date() == dt.date()
def test_time(p, dt):
assert p.time() == dt.time()
def test_timetz(p, dt):
assert p.timetz() == dt.timetz()
def test_astimezone(p, dt, p1):
assert p.astimezone(p1.tzinfo) == dt.astimezone(p1.tzinfo)
def test_ctime(p, dt):
assert p.ctime() == dt.ctime()
def test_isoformat(p, dt):
assert p.isoformat() == dt.isoformat()
def test_utcoffset(p, dt):
assert p.utcoffset() == dt.utcoffset()
def test_tzname(p, dt):
assert p.tzname() == dt.tzname()
def test_dst(p, dt):
assert p.dst() == dt.dst()
def test_toordinal(p, dt):
assert p.toordinal() == dt.toordinal()
def test_weekday(p, dt):
assert p.weekday() == dt.weekday()
def test_isoweekday(p, dt):
assert p.isoweekday() == dt.isoweekday()
def test_isocalendar(p, dt):
assert p.isocalendar() == dt.isocalendar()
def test_fromtimestamp():
p = pendulum.DateTime.fromtimestamp(0, pendulum.UTC)
dt = datetime.fromtimestamp(0, pendulum.UTC)
assert p == dt
def test_utcfromtimestamp():
p = pendulum.DateTime.utcfromtimestamp(0)
dt = datetime.utcfromtimestamp(0)
assert p == dt
def test_fromordinal():
assert datetime.fromordinal(730120) == pendulum.DateTime.fromordinal(730120)
def test_combine():
p = pendulum.DateTime.combine(date(2016, 1, 1), time(1, 2, 3, 123456))
dt = datetime.combine(date(2016, 1, 1), time(1, 2, 3, 123456))
assert p == dt
def test_hash(p, dt):
assert hash(p) == hash(dt)
dt1 = pendulum.datetime(2016, 8, 27, 12, 34, 56, 123456, tz="Europe/Paris")
dt2 = pendulum.datetime(2016, 8, 27, 12, 34, 56, 123456, tz="Europe/Paris")
dt3 = pendulum.datetime(2016, 8, 27, 12, 34, 56, 123456, tz="America/Toronto")
assert hash(dt1) == hash(dt2)
assert hash(dt1) != hash(dt3)
def test_pickle():
dt1 = pendulum.datetime(2016, 8, 27, 12, 34, 56, 123456, tz="Europe/Paris")
s = pickle.dumps(dt1)
dt2 = pickle.loads(s)
assert dt1 == dt2
def test_pickle_with_integer_tzinfo():
dt1 = pendulum.datetime(2016, 8, 27, 12, 34, 56, 123456, tz=0)
s = pickle.dumps(dt1)
dt2 = pickle.loads(s)
assert dt1 == dt2
def test_proper_dst():
dt = pendulum.datetime(1941, 7, 1, tz="Europe/Amsterdam")
native_dt = datetime(1941, 7, 1, tzinfo=zoneinfo.ZoneInfo("Europe/Amsterdam"))
assert dt.dst() == native_dt.dst()
def test_deepcopy():
dt = pendulum.datetime(1941, 7, 1, tz="Europe/Amsterdam")
assert dt == deepcopy(dt)
def test_pickle_timezone():
dt1 = pendulum.timezone("Europe/Amsterdam")
s = pickle.dumps(dt1)
dt2 = pickle.loads(s)
assert isinstance(dt2, Timezone)
dt1 = pendulum.timezone("UTC")
s = pickle.dumps(dt1)
dt2 = pickle.loads(s)
assert isinstance(dt2, Timezone)
| mit | f6d12fa903b3087a710b4ff257ab6a65 | 19.802326 | 82 | 0.653158 | 2.848726 | false | true | false | false |
sdispater/pendulum | pendulum/locales/fr/locale.py | 1 | 4501 | from .custom import translations as custom_translations
"""
fr locale file.
It has been generated automatically and must not be modified directly.
"""
locale = {
"plural": lambda n: "one" if (n == n and ((n == 0) or (n == 1))) else "other",
"ordinal": lambda n: "one" if (n == n and (n == 1)) else "other",
"translations": {
"days": {
"abbreviated": {
0: "dim.",
1: "lun.",
2: "mar.",
3: "mer.",
4: "jeu.",
5: "ven.",
6: "sam.",
},
"narrow": {0: "D", 1: "L", 2: "M", 3: "M", 4: "J", 5: "V", 6: "S"},
"short": {0: "di", 1: "lu", 2: "ma", 3: "me", 4: "je", 5: "ve", 6: "sa"},
"wide": {
0: "dimanche",
1: "lundi",
2: "mardi",
3: "mercredi",
4: "jeudi",
5: "vendredi",
6: "samedi",
},
},
"months": {
"abbreviated": {
1: "janv.",
2: "févr.",
3: "mars",
4: "avr.",
5: "mai",
6: "juin",
7: "juil.",
8: "août",
9: "sept.",
10: "oct.",
11: "nov.",
12: "déc.",
},
"narrow": {
1: "J",
2: "F",
3: "M",
4: "A",
5: "M",
6: "J",
7: "J",
8: "A",
9: "S",
10: "O",
11: "N",
12: "D",
},
"wide": {
1: "janvier",
2: "février",
3: "mars",
4: "avril",
5: "mai",
6: "juin",
7: "juillet",
8: "août",
9: "septembre",
10: "octobre",
11: "novembre",
12: "décembre",
},
},
"units": {
"year": {"one": "{0} an", "other": "{0} ans"},
"month": {"one": "{0} mois", "other": "{0} mois"},
"week": {"one": "{0} semaine", "other": "{0} semaines"},
"day": {"one": "{0} jour", "other": "{0} jours"},
"hour": {"one": "{0} heure", "other": "{0} heures"},
"minute": {"one": "{0} minute", "other": "{0} minutes"},
"second": {"one": "{0} seconde", "other": "{0} secondes"},
"microsecond": {"one": "{0} microseconde", "other": "{0} microsecondes"},
},
"relative": {
"year": {
"future": {"other": "dans {0} ans", "one": "dans {0} an"},
"past": {"other": "il y a {0} ans", "one": "il y a {0} an"},
},
"month": {
"future": {"other": "dans {0} mois", "one": "dans {0} mois"},
"past": {"other": "il y a {0} mois", "one": "il y a {0} mois"},
},
"week": {
"future": {"other": "dans {0} semaines", "one": "dans {0} semaine"},
"past": {"other": "il y a {0} semaines", "one": "il y a {0} semaine"},
},
"day": {
"future": {"other": "dans {0} jours", "one": "dans {0} jour"},
"past": {"other": "il y a {0} jours", "one": "il y a {0} jour"},
},
"hour": {
"future": {"other": "dans {0} heures", "one": "dans {0} heure"},
"past": {"other": "il y a {0} heures", "one": "il y a {0} heure"},
},
"minute": {
"future": {"other": "dans {0} minutes", "one": "dans {0} minute"},
"past": {"other": "il y a {0} minutes", "one": "il y a {0} minute"},
},
"second": {
"future": {"other": "dans {0} secondes", "one": "dans {0} seconde"},
"past": {"other": "il y a {0} secondes", "one": "il y a {0} seconde"},
},
},
"day_periods": {
"midnight": "minuit",
"am": "AM",
"noon": "midi",
"pm": "PM",
"morning1": "du matin",
"afternoon1": "de l’après-midi",
"evening1": "du soir",
"night1": "de nuit",
},
},
"custom": custom_translations,
}
| mit | 41e604792f8d446d20c4aeac75322554 | 32.774436 | 86 | 0.317231 | 3.364794 | false | false | true | false |
sdispater/pendulum | pendulum/__init__.py | 1 | 8506 | from __future__ import annotations
import datetime as _datetime
from typing import Union
from typing import cast
from pendulum.__version__ import __version__
from pendulum.constants import DAYS_PER_WEEK
from pendulum.constants import FRIDAY
from pendulum.constants import HOURS_PER_DAY
from pendulum.constants import MINUTES_PER_HOUR
from pendulum.constants import MONDAY
from pendulum.constants import MONTHS_PER_YEAR
from pendulum.constants import SATURDAY
from pendulum.constants import SECONDS_PER_DAY
from pendulum.constants import SECONDS_PER_HOUR
from pendulum.constants import SECONDS_PER_MINUTE
from pendulum.constants import SUNDAY
from pendulum.constants import THURSDAY
from pendulum.constants import TUESDAY
from pendulum.constants import WEDNESDAY
from pendulum.constants import WEEKS_PER_YEAR
from pendulum.constants import YEARS_PER_CENTURY
from pendulum.constants import YEARS_PER_DECADE
from pendulum.date import Date
from pendulum.datetime import DateTime
from pendulum.duration import Duration
from pendulum.formatting import Formatter
from pendulum.helpers import format_diff
from pendulum.helpers import get_locale
from pendulum.helpers import locale
from pendulum.helpers import set_locale
from pendulum.helpers import week_ends_at
from pendulum.helpers import week_starts_at
from pendulum.interval import Interval
from pendulum.parser import parse
from pendulum.testing.traveller import Traveller
from pendulum.time import Time
from pendulum.tz import UTC
from pendulum.tz import local_timezone
from pendulum.tz import set_local_timezone
from pendulum.tz import test_local_timezone
from pendulum.tz import timezone
from pendulum.tz import timezones
from pendulum.tz.timezone import FixedTimezone
from pendulum.tz.timezone import Timezone
_TEST_NOW: DateTime | None = None
_LOCALE = "en"
_WEEK_STARTS_AT = MONDAY
_WEEK_ENDS_AT = SUNDAY
_formatter = Formatter()
def _safe_timezone(
obj: str | float | _datetime.tzinfo | Timezone | FixedTimezone | None,
dt: _datetime.datetime | None = None,
) -> Timezone | FixedTimezone:
"""
Creates a timezone instance
from a string, Timezone, TimezoneInfo or integer offset.
"""
if isinstance(obj, (Timezone, FixedTimezone)):
return obj
if obj is None or obj == "local":
return local_timezone()
if isinstance(obj, (int, float)):
obj = int(obj * 60 * 60)
elif isinstance(obj, _datetime.tzinfo):
# zoneinfo
if hasattr(obj, "key"):
obj = obj.key # type: ignore
# pytz
elif hasattr(obj, "localize"):
obj = obj.zone # type: ignore
elif obj.tzname(None) == "UTC":
return UTC
else:
offset = obj.utcoffset(dt)
if offset is None:
offset = _datetime.timedelta(0)
obj = int(offset.total_seconds())
obj = cast(Union[str, int], obj)
return timezone(obj)
# Public API
def datetime(
year: int,
month: int,
day: int,
hour: int = 0,
minute: int = 0,
second: int = 0,
microsecond: int = 0,
tz: str | float | Timezone | FixedTimezone | _datetime.tzinfo | None = UTC,
fold: int = 1,
raise_on_unknown_times: bool = False,
) -> DateTime:
"""
Creates a new DateTime instance from a specific date and time.
"""
return DateTime.create(
year,
month,
day,
hour=hour,
minute=minute,
second=second,
microsecond=microsecond,
tz=tz,
fold=fold,
raise_on_unknown_times=raise_on_unknown_times,
)
def local(
year: int,
month: int,
day: int,
hour: int = 0,
minute: int = 0,
second: int = 0,
microsecond: int = 0,
) -> DateTime:
"""
Return a DateTime in the local timezone.
"""
return datetime(
year, month, day, hour, minute, second, microsecond, tz=local_timezone()
)
def naive(
year: int,
month: int,
day: int,
hour: int = 0,
minute: int = 0,
second: int = 0,
microsecond: int = 0,
fold: int = 1,
) -> DateTime:
"""
Return a naive DateTime.
"""
return DateTime(year, month, day, hour, minute, second, microsecond, fold=fold)
def date(year: int, month: int, day: int) -> Date:
"""
Create a new Date instance.
"""
return Date(year, month, day)
def time(hour: int, minute: int = 0, second: int = 0, microsecond: int = 0) -> Time:
"""
Create a new Time instance.
"""
return Time(hour, minute, second, microsecond)
def instance(
dt: _datetime.datetime,
tz: str | Timezone | FixedTimezone | _datetime.tzinfo | None = UTC,
) -> DateTime:
"""
Create a DateTime instance from a datetime one.
"""
if not isinstance(dt, _datetime.datetime):
raise ValueError("instance() only accepts datetime objects.")
if isinstance(dt, DateTime):
return dt
tz = dt.tzinfo or tz
if tz is not None:
tz = _safe_timezone(tz, dt=dt)
return datetime(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
tz=cast(Union[str, int, Timezone, FixedTimezone, None], tz),
)
def now(tz: str | Timezone | None = None) -> DateTime:
"""
Get a DateTime instance for the current date and time.
"""
return DateTime.now(tz)
def today(tz: str | Timezone = "local") -> DateTime:
"""
Create a DateTime instance for today.
"""
return now(tz).start_of("day")
def tomorrow(tz: str | Timezone = "local") -> DateTime:
"""
Create a DateTime instance for today.
"""
return today(tz).add(days=1)
def yesterday(tz: str | Timezone = "local") -> DateTime:
"""
Create a DateTime instance for today.
"""
return today(tz).subtract(days=1)
def from_format(
string: str,
fmt: str,
tz: str | Timezone = UTC,
locale: str | None = None,
) -> DateTime:
"""
Creates a DateTime instance from a specific format.
"""
parts = _formatter.parse(string, fmt, now(tz=tz), locale=locale)
if parts["tz"] is None:
parts["tz"] = tz
return datetime(**parts)
def from_timestamp(timestamp: int | float, tz: str | Timezone = UTC) -> DateTime:
"""
Create a DateTime instance from a timestamp.
"""
dt = _datetime.datetime.utcfromtimestamp(timestamp)
dt = datetime(
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond
)
if tz is not UTC or tz != "UTC":
dt = dt.in_timezone(tz)
return dt
def duration(
days: float = 0,
seconds: float = 0,
microseconds: float = 0,
milliseconds: float = 0,
minutes: float = 0,
hours: float = 0,
weeks: float = 0,
years: float = 0,
months: float = 0,
) -> Duration:
"""
Create a Duration instance.
"""
return Duration(
days=days,
seconds=seconds,
microseconds=microseconds,
milliseconds=milliseconds,
minutes=minutes,
hours=hours,
weeks=weeks,
years=years,
months=months,
)
def interval(start: DateTime, end: DateTime, absolute: bool = False) -> Interval:
"""
Create an Interval instance.
"""
return Interval(start, end, absolute=absolute)
# Testing
_traveller = Traveller(DateTime)
freeze = _traveller.freeze
travel = _traveller.travel
travel_to = _traveller.travel_to
travel_back = _traveller.travel_back
__all__ = [
"__version__",
"DAYS_PER_WEEK",
"FRIDAY",
"HOURS_PER_DAY",
"MINUTES_PER_HOUR",
"MONDAY",
"MONTHS_PER_YEAR",
"SATURDAY",
"SECONDS_PER_DAY",
"SECONDS_PER_HOUR",
"SECONDS_PER_MINUTE",
"SUNDAY",
"THURSDAY",
"TUESDAY",
"WEDNESDAY",
"WEEKS_PER_YEAR",
"YEARS_PER_CENTURY",
"YEARS_PER_DECADE",
"Date",
"DateTime",
"Duration",
"Formatter",
"date",
"datetime",
"duration",
"format_diff",
"freeze",
"from_format",
"from_timestamp",
"get_locale",
"instance",
"interval",
"local",
"locale",
"naive",
"now",
"set_locale",
"week_ends_at",
"week_starts_at",
"parse",
"Interval",
"Time",
"UTC",
"local_timezone",
"set_local_timezone",
"test_local_timezone",
"time",
"timezone",
"timezones",
"today",
"tomorrow",
"travel",
"travel_back",
"travel_to",
"FixedTimezone",
"Timezone",
"yesterday",
]
| mit | 8f17b64ebb9d932c920edf79ede1a527 | 22.432507 | 84 | 0.626264 | 3.445119 | false | false | false | false |
sixohsix/twitter | twitter/oauth_dance.py | 3 | 3938 | from __future__ import print_function
import webbrowser
import time
from .api import Twitter, json
from .oauth import OAuth, write_token_file
from .oauth2 import OAuth2, write_bearer_token_file
try:
_input = raw_input
except NameError:
_input = input
def oauth2_dance(consumer_key, consumer_secret, token_filename=None):
"""
Perform the OAuth2 dance to transform a consumer key and secret into a
bearer token.
If a token_filename is given, the bearer token will be written to
the file.
"""
twitter = Twitter(
auth=OAuth2(consumer_key=consumer_key, consumer_secret=consumer_secret),
format="",
api_version="")
token = json.loads(twitter.oauth2.token(grant_type="client_credentials"))["access_token"]
if token_filename:
write_bearer_token_file(token_filename, token)
return token
def get_oauth_pin(oauth_url, open_browser=True):
"""
Prompt the user for the OAuth PIN.
By default, a browser will open the authorization page. If `open_browser`
is false, the authorization URL will just be printed instead.
"""
print("Opening: %s\n" % oauth_url)
if open_browser:
print("""
In the web browser window that opens please choose to Allow
access. Copy the PIN number that appears on the next page and paste or
type it here:
""")
try:
r = webbrowser.open(oauth_url)
time.sleep(2) # Sometimes the last command can print some
# crap. Wait a bit so it doesn't mess up the next
# prompt.
if not r:
raise Exception()
except:
print("""
Uh, I couldn't open a browser on your computer. Please go here to get
your PIN:
""" + oauth_url)
else: # not using a browser
print("""
Please go to the following URL, authorize the app, and copy the PIN:
""" + oauth_url)
return _input("Please enter the PIN: ").strip()
def oauth_dance(app_name, consumer_key, consumer_secret, token_filename=None, open_browser=True):
"""
Perform the OAuth dance with some command-line prompts. Return the
oauth_token and oauth_token_secret.
Provide the name of your app in `app_name`, your consumer_key, and
consumer_secret. This function will let the user allow your app to access
their Twitter account using PIN authentication.
If a `token_filename` is given, the oauth tokens will be written to
the file.
By default, this function attempts to open a browser to request access. If
`open_browser` is false it will just print the URL instead.
"""
print("Hi there! We're gonna get you all set up to use %s." % app_name)
twitter = Twitter(
auth=OAuth('', '', consumer_key, consumer_secret),
format='', api_version=None)
oauth_token, oauth_token_secret = parse_oauth_tokens(
twitter.oauth.request_token(oauth_callback="oob"))
oauth_url = ('https://api.twitter.com/oauth/authorize?oauth_token=' +
oauth_token)
oauth_verifier = get_oauth_pin(oauth_url, open_browser)
twitter = Twitter(
auth=OAuth(
oauth_token, oauth_token_secret, consumer_key, consumer_secret),
format='', api_version=None)
oauth_token, oauth_token_secret = parse_oauth_tokens(
twitter.oauth.access_token(oauth_verifier=oauth_verifier))
if token_filename:
write_token_file(
token_filename, oauth_token, oauth_token_secret)
print()
print("That's it! Your authorization keys have been written to %s." % (
token_filename))
return oauth_token, oauth_token_secret
def parse_oauth_tokens(result):
for r in result.split('&'):
k, v = r.split('=')
if k == 'oauth_token':
oauth_token = v
elif k == 'oauth_token_secret':
oauth_token_secret = v
return oauth_token, oauth_token_secret
| mit | 0f9c147d0f4e5132040717965e5b3fc3 | 32.092437 | 97 | 0.643982 | 3.89901 | false | false | false | false |
sdispater/pendulum | tests/parsing/test_parsing.py | 1 | 15572 | from __future__ import annotations
import datetime
import pytest
import pendulum
from pendulum.parsing import ParserError
from pendulum.parsing import parse
def test_y():
text = "2016"
parsed = parse(text)
assert parsed.year == 2016
assert parsed.month == 1
assert parsed.day == 1
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
def test_ym():
text = "2016-10"
parsed = parse(text)
assert parsed.year == 2016
assert parsed.month == 10
assert parsed.day == 1
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
def test_ymd():
text = "2016-10-06"
parsed = parse(text)
assert parsed.year == 2016
assert parsed.month == 10
assert parsed.day == 6
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
def test_ymd_one_character():
text = "2016-2-6"
parsed = parse(text, strict=False)
assert parsed.year == 2016
assert parsed.month == 2
assert parsed.day == 6
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
def test_ymd_hms():
text = "2016-10-06 12:34:56"
parsed = parse(text)
assert parsed.year == 2016
assert parsed.month == 10
assert parsed.day == 6
assert parsed.hour == 12
assert parsed.minute == 34
assert parsed.second == 56
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "2016-10-06 12:34:56.123456"
parsed = parse(text)
assert parsed.year == 2016
assert parsed.month == 10
assert parsed.day == 6
assert parsed.hour == 12
assert parsed.minute == 34
assert parsed.second == 56
assert parsed.microsecond == 123456
assert parsed.tzinfo is None
def test_rfc_3339():
text = "2016-10-06T12:34:56+05:30"
parsed = parse(text)
assert parsed.year == 2016
assert parsed.month == 10
assert parsed.day == 6
assert parsed.hour == 12
assert parsed.minute == 34
assert parsed.second == 56
assert parsed.microsecond == 0
assert parsed.utcoffset().total_seconds() == 19800
def test_rfc_3339_extended():
text = "2016-10-06T12:34:56.123456+05:30"
parsed = parse(text)
assert parsed.year == 2016
assert parsed.month == 10
assert parsed.day == 6
assert parsed.hour == 12
assert parsed.minute == 34
assert parsed.second == 56
assert parsed.microsecond == 123456
assert parsed.utcoffset().total_seconds() == 19800
text = "2016-10-06T12:34:56.000123+05:30"
parsed = parse(text)
assert parsed.year == 2016
assert parsed.month == 10
assert parsed.day == 6
assert parsed.hour == 12
assert parsed.minute == 34
assert parsed.second == 56
assert parsed.microsecond == 123
assert parsed.utcoffset().total_seconds() == 19800
def test_rfc_3339_extended_nanoseconds():
text = "2016-10-06T12:34:56.123456789+05:30"
parsed = parse(text)
assert parsed.year == 2016
assert parsed.month == 10
assert parsed.day == 6
assert parsed.hour == 12
assert parsed.minute == 34
assert parsed.second == 56
assert parsed.microsecond == 123456
assert parsed.utcoffset().total_seconds() == 19800
def test_iso_8601_date():
text = "2012"
parsed = parse(text)
assert parsed.year == 2012
assert parsed.month == 1
assert parsed.day == 1
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "2012-05-03"
parsed = parse(text)
assert parsed.year == 2012
assert parsed.month == 5
assert parsed.day == 3
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "20120503"
parsed = parse(text)
assert parsed.year == 2012
assert parsed.month == 5
assert parsed.day == 3
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "2012-05"
parsed = parse(text)
assert parsed.year == 2012
assert parsed.month == 5
assert parsed.day == 1
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
def test_iso8601_datetime():
text = "2016-10-01T14"
parsed = parse(text)
assert parsed.year == 2016
assert parsed.month == 10
assert parsed.day == 1
assert parsed.hour == 14
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "2016-10-01T14:30"
parsed = parse(text)
assert parsed.year == 2016
assert parsed.month == 10
assert parsed.day == 1
assert parsed.hour == 14
assert parsed.minute == 30
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "20161001T14"
parsed = parse(text)
assert parsed.year == 2016
assert parsed.month == 10
assert parsed.day == 1
assert parsed.hour == 14
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "20161001T1430"
parsed = parse(text)
assert parsed.year == 2016
assert parsed.month == 10
assert parsed.day == 1
assert parsed.hour == 14
assert parsed.minute == 30
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "20161001T1430+0530"
parsed = parse(text)
assert parsed.year == 2016
assert parsed.month == 10
assert parsed.day == 1
assert parsed.hour == 14
assert parsed.minute == 30
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.utcoffset().total_seconds() == 19800
text = "20161001T1430,4+0530"
parsed = parse(text)
assert parsed.year == 2016
assert parsed.month == 10
assert parsed.day == 1
assert parsed.hour == 14
assert parsed.minute == 30
assert parsed.second == 0
assert parsed.microsecond == 400000
assert parsed.utcoffset().total_seconds() == 19800
text = "2008-09-03T20:56:35.450686+01"
parsed = parse(text)
assert parsed.year == 2008
assert parsed.month == 9
assert parsed.day == 3
assert parsed.hour == 20
assert parsed.minute == 56
assert parsed.second == 35
assert parsed.microsecond == 450686
assert parsed.utcoffset().total_seconds() == 3600
def test_iso8601_week_number():
text = "2012-W05"
parsed = parse(text)
assert parsed.year == 2012
assert parsed.month == 1
assert parsed.day == 30
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "2012W05"
parsed = parse(text)
assert parsed.year == 2012
assert parsed.month == 1
assert parsed.day == 30
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
# Long Year
text = "2015W53"
parsed = parse(text)
assert parsed.year == 2015
assert parsed.month == 12
assert parsed.day == 28
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "2012-W05-5"
parsed = parse(text)
assert parsed.year == 2012
assert parsed.month == 2
assert parsed.day == 3
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "2012W055"
parsed = parse(text)
assert parsed.year == 2012
assert parsed.month == 2
assert parsed.day == 3
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "2009-W53-7"
parsed = parse(text)
assert parsed.year == 2010
assert parsed.month == 1
assert parsed.day == 3
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "2009-W01-1"
parsed = parse(text)
assert parsed.year == 2008
assert parsed.month == 12
assert parsed.day == 29
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
def test_iso8601_week_number_with_time():
text = "2012-W05T09"
parsed = parse(text)
assert parsed.year == 2012
assert parsed.month == 1
assert parsed.day == 30
assert parsed.hour == 9
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "2012W05T09"
parsed = parse(text)
assert parsed.year == 2012
assert parsed.month == 1
assert parsed.day == 30
assert parsed.hour == 9
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "2012-W05-5T09"
parsed = parse(text)
assert parsed.year == 2012
assert parsed.month == 2
assert parsed.day == 3
assert parsed.hour == 9
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "2012W055T09"
parsed = parse(text)
assert parsed.year == 2012
assert parsed.month == 2
assert parsed.day == 3
assert parsed.hour == 9
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
def test_iso8601_ordinal():
text = "2012-007"
parsed = parse(text)
assert parsed.year == 2012
assert parsed.month == 1
assert parsed.day == 7
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "2012007"
parsed = parse(text)
assert parsed.year == 2012
assert parsed.month == 1
assert parsed.day == 7
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
def test_iso8601_time():
now = pendulum.datetime(2015, 11, 12)
text = "201205"
parsed = parse(text, now=now)
assert parsed.year == 2015
assert parsed.month == 11
assert parsed.day == 12
assert parsed.hour == 20
assert parsed.minute == 12
assert parsed.second == 5
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "20:12:05"
parsed = parse(text, now=now)
assert parsed.year == 2015
assert parsed.month == 11
assert parsed.day == 12
assert parsed.hour == 20
assert parsed.minute == 12
assert parsed.second == 5
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "20:12:05.123456"
parsed = parse(text, now=now)
assert parsed.year == 2015
assert parsed.month == 11
assert parsed.day == 12
assert parsed.hour == 20
assert parsed.minute == 12
assert parsed.second == 5
assert parsed.microsecond == 123456
assert parsed.tzinfo is None
def test_iso8601_ordinal_invalid():
text = "2012-007-05"
with pytest.raises(ParserError):
parse(text)
def test_exact():
text = "2012"
parsed = parse(text, exact=True)
assert isinstance(parsed, datetime.date)
assert parsed.year == 2012
assert parsed.month == 1
assert parsed.day == 1
text = "2012-03"
parsed = parse(text, exact=True)
assert isinstance(parsed, datetime.date)
assert parsed.year == 2012
assert parsed.month == 3
assert parsed.day == 1
text = "2012-03-13"
parsed = parse(text, exact=True)
assert isinstance(parsed, datetime.date)
assert parsed.year == 2012
assert parsed.month == 3
assert parsed.day == 13
text = "2012W055"
parsed = parse(text, exact=True)
assert isinstance(parsed, datetime.date)
assert parsed.year == 2012
assert parsed.month == 2
assert parsed.day == 3
text = "2012007"
parsed = parse(text, exact=True)
assert isinstance(parsed, datetime.date)
assert parsed.year == 2012
assert parsed.month == 1
assert parsed.day == 7
text = "20:12:05"
parsed = parse(text, exact=True)
assert isinstance(parsed, datetime.time)
assert parsed.hour == 20
assert parsed.minute == 12
assert parsed.second == 5
assert parsed.microsecond == 0
def test_edge_cases():
text = "2013-11-1"
parsed = parse(text, strict=False)
assert parsed.year == 2013
assert parsed.month == 11
assert parsed.day == 1
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "10-01-01"
parsed = parse(text, strict=False)
assert parsed.year == 2010
assert parsed.month == 1
assert parsed.day == 1
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "31-01-01"
parsed = parse(text, strict=False)
assert parsed.year == 2031
assert parsed.month == 1
assert parsed.day == 1
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
text = "32-01-01"
parsed = parse(text, strict=False)
assert parsed.year == 2032
assert parsed.month == 1
assert parsed.day == 1
assert parsed.hour == 0
assert parsed.minute == 0
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
def test_strict():
text = "4 Aug 2015 - 11:20 PM"
with pytest.raises(ParserError):
parse(text)
parsed = parse(text, strict=False)
assert parsed.year == 2015
assert parsed.month == 8
assert parsed.day == 4
assert parsed.hour == 23
assert parsed.minute == 20
assert parsed.second == 0
assert parsed.microsecond == 0
assert parsed.tzinfo is None
def test_invalid():
text = "201610T"
with pytest.raises(ParserError):
parse(text)
text = "2012-W54"
with pytest.raises(ParserError):
parse(text)
text = "2012-W13-8"
with pytest.raises(ParserError):
parse(text)
# W53 in normal year (not long)
text = "2017W53"
with pytest.raises(ParserError):
parse(text)
def test_exif_edge_case():
text = "2016:12:26 15:45:28"
parsed = parse(text)
assert parsed.year == 2016
assert parsed.month == 12
assert parsed.day == 26
assert parsed.hour == 15
assert parsed.minute == 45
assert parsed.second == 28
| mit | 3731b2dd29172d5b56b0bd6499c1ea02 | 21.666667 | 54 | 0.634087 | 3.894947 | false | false | false | false |
intel/intel-iot-refkit | meta-refkit-core/scripts/lib/wic/plugins/source/dm-verity.py | 6 | 4985 | # Copyright (c) 2017, Intel Corporation.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# DESCRIPTION
# This source plugin can be used for a partition following sometime after
# the main rootfs in a wic file to generate a partition containing
# dm-verity hash data for the rootfs.
#
# AUTHORS
# Patrick Ohly
#
import base64
import glob
import logging
import os
import re
import shutil
import tempfile
from wic import WicError
from wic.pluginbase import SourcePlugin
from wic.misc import (exec_cmd, exec_native_cmd, get_bitbake_var)
logger = logging.getLogger('wic')
class DMVerityPlugin(SourcePlugin):
"""
Creates dm-verity hash data for one rootfs partition, as identified by
the --label parameter.
"""
name = 'dm-verity'
@classmethod
def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
rootfs_dir, native_sysroot):
"""
Called to do the actual content population for a partition i.e. it
'prepares' the partition to be incorporated into the image.
In this case, locate the temporary root partition and hash it.
"""
# We rely on the --label parameter and the naming convention
# in partition.py prepare_rootfs() here to find the already
# prepared rootfs partition image.
pattern = '%s/rootfs_%s.*' % (cr_workdir, part.label)
rootfs = glob.glob(pattern)
if len(rootfs) != 1:
raise WicError("%s shell pattern does not match exactly one rootfs image (missing --label parameter?): %s" % (pattern, rootfs))
else:
rootfs = rootfs[0]
logger.debug("Calculating dm-verity hash for rootfs %s (native %s)." % (rootfs, native_sysroot))
hashimg = '%s/dm-verity_%s.img' % (cr_workdir, part.label)
# Reserve some fixed amount of space at the start of the hash image
# for our own data (in particular, the signed root hash).
# The content of that part is:
# roothash=<....>
# <potentially some more assignments in the future>
# signature=<single line of base64 encoded OpenSSL sha256 digest>
header_size = 4096
ret, out = exec_native_cmd("veritysetup format '%s' '%s' --hash-offset=%d" %
(rootfs, hashimg, header_size),
native_sysroot)
m = re.search(r'^Root hash:\s*(\S+)$', out, re.MULTILINE)
if ret or not m:
raise WicError('veritysetup failed: %s' % out)
else:
root_hash = m.group(1)
privkey = get_bitbake_var('REFKIT_DMVERITY_PRIVATE_KEY')
password = get_bitbake_var('REFKIT_DMVERITY_PASSWORD')
tmp = tempfile.mkdtemp(prefix='dm-verity-')
try:
data_filename = os.path.join(tmp, 'data')
header = ('roothash=%s\nheadersize=%d\n' % (root_hash, header_size)).encode('ascii')
with open(data_filename, 'wb') as data:
data.write(header)
# Must use a temporary file, exec_native_cmd() only supports UTF-8 output.
signature = os.path.join(tmp, 'sig')
ret, out = exec_native_cmd("openssl dgst -sha256 -passin '%s' -sign '%s' -out '%s' '%s'" %
(password, privkey, signature, data_filename),
native_sysroot)
if ret:
raise WicError('openssl signing failed')
with open(signature, 'rb') as f:
header += b'signature=' + base64.standard_b64encode(f.read()) + b'\n'
if len(header) + 1 >= header_size:
raise WicError('reserved space for dm-verity header too small')
with open(hashimg, 'rb+') as hash:
hash.write(header)
finally:
shutil.rmtree(tmp)
data_bytes = os.stat(rootfs).st_size
hash_bytes = os.stat(hashimg).st_size
logger.debug("dm-verity data partition %d bytes, hash partition %d bytes, ratio %f." %
(data_bytes, hash_bytes, data_bytes / hash_bytes))
part.size = data_bytes // 1024
part.source_file = hashimg
| mit | f564a8fbe7829221ab2d1900b578118f | 42.72807 | 139 | 0.601605 | 3.991193 | false | false | false | false |
intel/intel-iot-refkit | meta-refkit-extra/recipes-multimedia/btspeaker/files/btspeaker.py | 6 | 7402 | #!/usr/bin/python3
import os
import sys
import dbus
import dbus.service
import dbus.mainloop.glib
import time
import threading
import wave
import alsaaudio
import evdev
from evdev import ecodes
from select import select
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
# Class to be used as bluetooth agent for pairing
class my_bt_agent(dbus.service.Object):
@dbus.service.method('org.bluez.Agent1', in_signature="os", out_signature="")
def AuthorizeService(self, device, uuid):
global mainloop
mainloop.quit()
return
# Class for playing wav files
class play_sound (threading.Thread):
def __init__(self, threadID, name, soundfile):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.soundfile = soundfile
def run(self):
# open audio file and device
audio_file = wave.open(self.soundfile, 'rb')
audio_device = alsaaudio.PCM(alsaaudio.PCM_PLAYBACK, alsaaudio.PCM_NORMAL, 'default')
# we are hard coding the audio format!
audio_device.setchannels(2)
audio_device.setrate(44100)
audio_device.setformat(alsaaudio.PCM_FORMAT_S16_LE)
audio_device.setperiodsize(980)
# play the audio
audio_data = audio_file.readframes(980)
while audio_data:
audio_device.write(audio_data)
audio_data = audio_file.readframes(980)
audio_file.close()
# Class for blinking the leds
class blink_led (threading.Thread):
def __init__(self, threadID, name):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.lede_on = False
def set_led(self, onoff):
value = open("/sys/class/leds/led-3/brightness","w")
if onoff == True:
value.write(str(1))
self.lede_on = True
else:
value.write(str(0))
self.lede_on = False
value.close()
def run(self):
global pairing
global mainloop
sleep_time = 0.2
sleep_counter = 0
max_pair_time = 60
while 1:
if (pairing == True):
if self.lede_on == False:
self.set_led(True)
else:
self.set_led(False)
time.sleep(sleep_time)
sleep_counter = sleep_counter + sleep_time
if sleep_counter >= max_pair_time:
pairing = False
mainloop.quit()
self.set_led(False)
return
else:
self.set_led(False)
return
# Class for capturing the button events
class button_cb(threading.Thread):
def __init__(self, threadID, name):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
def run(self):
global pairing
devices = [evdev.InputDevice(file_name) for file_name in evdev.list_devices()]
for dev in devices:
if 'PRP0001' in dev.name:
device = evdev.InputDevice(dev.fn)
while 1:
r,w,x = select([device.fd], [], [], 0.1)
if r:
for event in device.read():
if event.code == ecodes.KEY_HOME and event.value == 1:
if pairing == False:
pairing = True
buttonwait.set()
# Following function is heavily inspired by BlueZ tests
def remove_paired_bt_device():
bus = dbus.SystemBus()
om = dbus.Interface(bus.get_object("org.bluez", "/"), "org.freedesktop.DBus.ObjectManager")
obj = om.GetManagedObjects()
bt_adapter_path = "/org/bluez/hci0"
bt_device = None
bt_adapter = None
for path, interf in obj.iteritems():
if "org.bluez.Device1" in interf:
properties = interf["org.bluez.Device1"]
if properties["Adapter"] == bt_adapter_path:
bt_device_inter = interf.get("org.bluez.Device1")
if bt_device_inter["Address"] == properties["Address"]:
obj2 = bus.get_object("org.bluez", path)
bt_device = dbus.Interface(obj2, "org.bluez.Device1")
bt_adapter = dbus.Interface(obj2, "org.bluez.Adapter1")
print("found device object")
break;
for path, interf in obj.iteritems():
adapter_inter = interf.get("org.bluez.Adapter1")
if adapter_inter is not None:
obj2 = bus.get_object("org.bluez", path)
bt_adapter = dbus.Interface(obj2, "org.bluez.Adapter1")
if bt_device is not None:
for attr in dir(bt_device):
print "bt_device.%s = %s" % (attr, getattr(bt_device, attr))
bt_device.Disconnect()
path = bt_device.object_path
if bt_adapter is not None:
bt_adapter.RemoveDevice(path)
# This is the main pairing functions called from main thread
def do_pair():
global pairing
global mainloop
bus = dbus.SystemBus()
#remove connected and paired device from bluez
remove_paired_bt_device()
# we are using our own agent to bypass authorization and get callback for connected state
path = "/test/agent"
agent = my_bt_agent(bus, path)
obj = bus.get_object('org.bluez', "/org/bluez");
manager = dbus.Interface(obj, "org.bluez.AgentManager1")
manager.RegisterAgent(path, 'NoInputNoOutput')
manager.RequestDefaultAgent(path)
adapter1_path = "/org/bluez/hci0"
adapter1 = dbus.Interface(bus.get_object("org.bluez", adapter1_path), "org.freedesktop.DBus.Properties")
adapter1.Set("org.bluez.Adapter1", "Powered", dbus.Boolean(1))
adapter1.Set("org.bluez.Adapter1", "Pairable", dbus.Boolean(1))
adapter1.Set("org.bluez.Adapter1", "Discoverable", dbus.Boolean(1))
# let's wait for paired callback from bluez or timeout from led blink
mainloop.run()
pairing = False
manager.UnregisterAgent(path)
agent.remove_from_connection()
mainloop = 0;
buttonwait = threading.Event()
pairing = False
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
if __name__ == '__main__':
bus = dbus.SystemBus()
GObject.threads_init()
mainloop = GObject.MainLoop()
# let's unblock bt radio with connman
conn = bus.get_object('net.connman', '/net/connman/technology/bluetooth')
iface = dbus.Interface(conn, dbus_interface='net.connman.Technology')
props = iface.GetProperties()
if props["Powered"] == 0:
iface.SetProperty("Powered", dbus.Boolean(1))
# play start sound so we get pulseaudio up and running
sound = play_sound(4, "play_start", "/home/btspeaker/btstartup.wav")
sound.start()
# let's start listening for button events
button = button_cb(4, "button")
button.daemon = True
button.start();
# goto button wait and bt pairing loop
while 1:
buttonwait.wait()
sound = play_sound(2, "play_start", "/home/btspeaker/btpairing.wav")
sound.daemon = True
sound.start()
led = blink_led(1, "led")
led.daemon = True
led.start()
do_pair()
sound = play_sound(3, "play_paired", "/home/btspeaker/btsuccess.wav")
sound.daemon = True
sound.start()
buttonwait.clear()
| mit | 6771469c9e00d417f385b3b420c61314 | 31.60793 | 108 | 0.601324 | 3.677099 | false | false | false | false |
intel/intel-iot-refkit | meta-iotqa/lib/oeqa/runtime/connectivity/services/managerdaemon.py | 6 | 1105 | import string
from oeqa.oetest import oeRuntimeTest
class CommDaemonTest(oeRuntimeTest):
"""
@class CommDaemonTest
"""
log = ""
def target_collect_info(self, cmd):
"""
@fn target_collect_info
@param self
@param cmd
@return
"""
(status, output) = self.target.run(cmd)
self.log = self.log + "\n\n[Debug] Command output --- %s: \n" % cmd
self.log = self.log + output
'''Connmand daemon check'''
def test_comm_daemoncheck(self):
'''check connman daemon
@fn test_comm_daemoncheck
@param self
@return
'''
(status, output) = self.target.run('systemctl status connman')
if 'Active: active' in output:
pass
else:
# Collect system information as log
status=1
self.target_collect_info("ps")
self.target_collect_info("systemctl status connman -l")
##
# TESTPOINT: #1, test_comm_daemoncheck
#
self.assertEqual(status, 0, msg="Error messages: %s" % self.log)
| mit | 3b760459576e441f05a61ce6d26074fa | 27.333333 | 75 | 0.551131 | 3.890845 | false | true | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.