code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import codecs
def search_function(encoding):
if encoding == 'pdf_doc':
return getregentry()
codecs.register(search_function)
### Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='pdf-doc',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table (from the PDF reference)
decoding_table = (
'\ufffe' # 0x00 -> (NULL)
'\ufffe' # 0x01 -> (START OF HEADING)
'\ufffe' # 0x02 -> (START OF TEXT)
'\ufffe' # 0x03 -> (END OF TEXT)
'\ufffe' # 0x04 -> (END OF TEXT)
'\ufffe' # 0x05 -> (END OF TRANSMISSION)
'\ufffe' # 0x06 -> (ACKNOWLEDGE)
'\ufffe' # 0x07 -> (BELL)
'\ufffe' # 0x08 -> (BACKSPACE)
'\ufffe' # 0x09 -> (CHARACTER TABULATION)
'\ufffe' # 0x0A -> (LINE FEED)
'\ufffe' # 0x0B -> (LINE TABULATION)
'\ufffe' # 0x0C -> (FORM FEED)
'\ufffe' # 0x0D -> (CARRIAGE RETURN)
'\ufffe' # 0x0E -> (SHIFT OUT)
'\ufffe' # 0x0F -> (SHIFT IN)
'\ufffe' # 0x10 -> (DATA LINK ESCAPE)
'\ufffe' # 0x11 -> (DEVICE CONTROL ONE)
'\ufffe' # 0x12 -> (DEVICE CONTROL TWO)
'\ufffe' # 0x13 -> (DEVICE CONTROL THREE)
'\ufffe' # 0x14 -> (DEVICE CONTROL FOUR)
'\ufffe' # 0x15 -> (NEGATIVE ACKNOWLEDGE)
'\ufffe' # 0x16 -> (SYNCRONOUS IDLE)
'\ufffe' # 0x17 -> (END OF TRANSMISSION BLOCK)
'\u02d8' # 0x18 -> BREVE
'\u02c7' # 0x19 -> CARON
'\u02c6' # 0x1A -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u02d9' # 0x1B -> DOT ABOVE
'\u02dd' # 0x1C -> DOUBLE ACUTE ACCENT
'\u02db' # 0x1D -> OGONEK
'\u02da' # 0x1E -> RING ABOVE
'\u02dc' # 0x1F -> SMALL TILDE
' ' # 0x20 -> SPACE ( )
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK (")
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND (&)
"'" # 0x27 -> APOSTROPHE (')
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP (period)
'/' # 0x2F -> SOLIDUS (slash)
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGJT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS THAN SIGN (<)
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER THAN SIGN (>)
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 ->
'B' # 0x42 ->
'C' # 0x43 ->
'D' # 0x44 ->
'E' # 0x45 ->
'F' # 0x46 ->
'G' # 0x47 ->
'H' # 0x48 ->
'I' # 0x49 ->
'J' # 0x4A ->
'K' # 0x4B ->
'L' # 0x4C ->
'M' # 0x4D ->
'N' # 0x4E ->
'O' # 0x4F ->
'P' # 0x50 ->
'Q' # 0x51 ->
'R' # 0x52 ->
'S' # 0x53 ->
'T' # 0x54 ->
'U' # 0x55 ->
'V' # 0x56 ->
'W' # 0x57 ->
'X' # 0x58 ->
'Y' # 0x59 ->
'Z' # 0x5A ->
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS (backslash)
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT (hat)
'_' # 0x5F -> LOW LINE (SPACING UNDERSCORE)
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 ->
'b' # 0x62 ->
'c' # 0x63 ->
'd' # 0x64 ->
'e' # 0x65 ->
'f' # 0x66 ->
'g' # 0x67 ->
'h' # 0x68 ->
'i' # 0x69 ->
'j' # 0x6A ->
'k' # 0x6B ->
'l' # 0x6C ->
'm' # 0x6D ->
'n' # 0x6E ->
'o' # 0x6F ->
'p' # 0x70 ->
'q' # 0x71 ->
'r' # 0x72 ->
's' # 0x73 ->
't' # 0x74 ->
'u' # 0x75 ->
'v' # 0x76 ->
'w' # 0x77 ->
'x' # 0x78 ->
'y' # 0x79 ->
'z' # 0x7A ->
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\ufffe' # 0x7F -> Undefined
'\u2022' # 0x80 -> BULLET
'\u2020' # 0x81 -> DAGGER
'\u2021' # 0x82 -> DOUBLE DAGGER
'\u2026' # 0x83 -> HORIZONTAL ELLIPSIS
'\u2014' # 0x84 -> EM DASH
'\u2013' # 0x85 -> EN DASH
'\u0192' # 0x86 ->
'\u2044' # 0x87 -> FRACTION SLASH (solidus)
'\u2039' # 0x88 -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u203a' # 0x89 -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u2212' # 0x8A ->
'\u2030' # 0x8B -> PER MILLE SIGN
'\u201e' # 0x8C -> DOUBLE LOW-9 QUOTATION MARK (quotedblbase)
'\u201c' # 0x8D -> LEFT DOUBLE QUOTATION MARK (double quote left)
'\u201d' # 0x8E -> RIGHT DOUBLE QUOTATION MARK (quotedblright)
'\u2018' # 0x8F -> LEFT SINGLE QUOTATION MARK (quoteleft)
'\u2019' # 0x90 -> RIGHT SINGLE QUOTATION MARK (quoteright)
'\u201a' # 0x91 -> SINGLE LOW-9 QUOTATION MARK (quotesinglbase)
'\u2122' # 0x92 -> TRADE MARK SIGN
'\ufb01' # 0x93 -> LATIN SMALL LIGATURE FI
'\ufb02' # 0x94 -> LATIN SMALL LIGATURE FL
'\u0141' # 0x95 -> LATIN CAPITAL LETTER L WITH STROKE
'\u0152' # 0x96 -> LATIN CAPITAL LIGATURE OE
'\u0160' # 0x97 -> LATIN CAPITAL LETTER S WITH CARON
'\u0178' # 0x98 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\u017d' # 0x99 -> LATIN CAPITAL LETTER Z WITH CARON
'\u0131' # 0x9A -> LATIN SMALL LETTER DOTLESS I
'\u0142' # 0x9B -> LATIN SMALL LETTER L WITH STROKE
'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
'\u0161' # 0x9D -> LATIN SMALL LETTER S WITH CARON
'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON
'\ufffe' # 0x9F -> Undefined
'\u20ac' # 0xA0 -> EURO SIGN
'\u00a1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN (sterling)
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\ufffe' # 0xAD -> Undefined
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\xc0' # 0xC0 ->
'\xc1' # 0xC1 ->
'\xc2' # 0xC2 ->
'\xc3' # 0xC3 ->
'\xc4' # 0xC4 ->
'\xc5' # 0xC5 ->
'\xc6' # 0xC6 ->
'\xc7' # 0xC7 ->
'\xc8' # 0xC8 ->
'\xc9' # 0xC9 ->
'\xca' # 0xCA ->
'\xcb' # 0xCB ->
'\xcc' # 0xCC ->
'\xcd' # 0xCD ->
'\xce' # 0xCE ->
'\xcf' # 0xCF ->
'\xd0' # 0xD0 ->
'\xd1' # 0xD1 ->
'\xd2' # 0xD2 ->
'\xd3' # 0xD3 ->
'\xd4' # 0xD4 ->
'\xd5' # 0xD5 ->
'\xd6' # 0xD6 ->
'\xd7' # 0xD7 ->
'\xd8' # 0xD8 ->
'\xd9' # 0xD9 ->
'\xda' # 0xDA ->
'\xdb' # 0xDB ->
'\xdc' # 0xDC ->
'\xdd' # 0xDD ->
'\xde' # 0xDE ->
'\xdf' # 0xDF ->
'\xe0' # 0xE0 ->
'\xe1' # 0xE1 ->
'\xe2' # 0xE2 ->
'\xe3' # 0xE3 ->
'\xe4' # 0xE4 ->
'\xe5' # 0xE5 ->
'\xe6' # 0xE6 ->
'\xe7' # 0xE7 ->
'\xe8' # 0xE8 ->
'\xe9' # 0xE9 ->
'\xea' # 0xEA ->
'\xeb' # 0xEB ->
'\xec' # 0xEC ->
'\xed' # 0xED ->
'\xee' # 0xEE ->
'\xef' # 0xEF ->
'\xf0' # 0xF0 ->
'\xf1' # 0xF1 ->
'\xf2' # 0xF2 ->
'\xf3' # 0xF3 ->
'\xf4' # 0xF4 ->
'\xf5' # 0xF5 ->
'\xf6' # 0xF6 ->
'\xf7' # 0xF7 ->
'\xf8' # 0xF8 ->
'\xf9' # 0xF9 ->
'\xfa' # 0xFA ->
'\xfb' # 0xFB ->
'\xfc' # 0xFC ->
'\xfd' # 0xFD ->
'\xfe' # 0xFE ->
'\xff' # 0xFF ->
)
### Encoding table
encoding_table = codecs.charmap_build(decoding_table) | /rinohtype-0.5.4.tar.gz/rinohtype-0.5.4/src/rinoh/backend/pdf/pdfdoccodec.py | 0.447943 | 0.171027 | pdfdoccodec.py | pypi |
from fractions import Fraction
from io import SEEK_CUR
from pathlib import Path
from struct import Struct, unpack, calcsize
from warnings import warn
from ..cos import Name, Array, Stream, Integer
from ..filter import DCTDecode, FlateDecode
from . import XObjectImage, DEVICE_GRAY, DEVICE_RGB, DEVICE_CMYK
from .icc import SRGB, UNCALIBRATED, get_icc_stream
__all__ = ['JPEGReader']
def create_reader(data_format, process_struct=lambda data: data[0], endian='>'):
data_struct = Struct(endian + data_format)
def reader(jpeg_reader):
data = data_struct.unpack(jpeg_reader._file.read(data_struct.size))
return process_struct(data)
return reader
# useful resources
# * http://fileformats.archiveteam.org/wiki/JPEG
# * libjpeg.txt from the Independent JPEG Group's reference implementation
# * http://www.ozhiker.com/electronics/pjmt/jpeg_info/app_segments.html
# * http://www.w3.org/Graphics/JPEG/
# * http://www.cipa.jp/std/documents/e/DC-008-2012_E.pdf
class JPEGReader(XObjectImage):
COLOR_SPACE = {1: DEVICE_GRAY,
3: DEVICE_RGB,
4: DEVICE_CMYK}
def __init__(self, file_or_filename):
try:
self.filename = Path(file_or_filename)
self._file = self.filename.open('rb')
except TypeError:
self.filename = None
self._file = file_or_filename
(width, height, bits_per_component, num_components, exif_color_space,
icc_profile, adobe_color_transform, dpi) = self._get_metadata()
if bits_per_component != 8:
raise ValueError('PDF only supports JPEG files with 8 bits '
'per component')
device_color_space = self.COLOR_SPACE[num_components]
if icc_profile is None and exif_color_space is not UNCALIBRATED:
icc_profile = get_icc_stream(exif_color_space)
if icc_profile is not None:
icc_profile['N'] = Integer(num_components)
icc_profile['Alternate'] = device_color_space
colorspace = Array([Name('ICCBased'), icc_profile])
else:
colorspace = device_color_space
super().__init__(width, height, colorspace, bits_per_component, dpi,
filter=DCTDecode())
if adobe_color_transform and num_components == 4: # invert CMYK colors
self['Decode'] = Array([Integer(1), Integer(0)] * 4)
self._file.seek(0)
while True:
buffer = self._file.read(512 * 1024) # 512 KB
if not buffer:
break
self._data.write(buffer)
read_uchar = create_reader('B')
read_ushort = create_reader('H')
def _density(self, density):
if density is None:
return None
x_density, y_density, unit = density
if unit == DPI:
dpi = x_density, y_density
elif unit == DPCM:
dpi = 2.54 * x_density, 2.54 * y_density
else: # unit is None; return aspect ratio
dpi = x_density / y_density
return dpi
def _get_metadata(self):
dpi = None
icc_profile = None
exif_color_space = UNCALIBRATED
next_icc_part_number = 1
num_icc_parts = 0
adobe_color_xform = None
self._file.seek(0)
prefix, marker = self.read_uchar(), self.read_uchar()
if (prefix, marker) != (0xFF, 0xD8):
raise ValueError('Not a JPEG file')
while True:
prefix, marker = self.read_uchar(), self.read_uchar()
while marker == 0xFF:
marker = self.read_uchar()
if prefix != 0xFF or marker == 0x00:
raise ValueError('Invalid or corrupt JPEG file')
header_length = self.read_ushort()
if marker == 0xE0:
density = self._parse_jfif_segment(header_length)
dpi = self._density(density)
elif marker == 0xE1:
result = self._parse_exif_segment(header_length)
if result:
density, exif_color_space = result
dpi = self._density(density)
elif marker == 0xE2:
icc_part_number, num_icc_parts, icc_part_bytes = \
self._parse_icc_segment(header_length)
assert icc_part_number == next_icc_part_number
next_icc_part_number += 1
if icc_profile is None:
assert icc_part_number == 1
icc_profile = Stream(filter=FlateDecode())
icc_profile.write(icc_part_bytes)
elif marker == 0xEE:
adobe_color_xform = self._parse_adobe_dct_segment(header_length)
elif (marker & 0xF0) == 0xC0 and marker not in (0xC4, 0xC8, 0xCC):
v_size, h_size, bits_per_component, num_components = \
self._parse_start_of_frame(header_length)
break
else:
self._file.seek(header_length - 2, SEEK_CUR)
assert next_icc_part_number == num_icc_parts + 1
return (h_size, v_size, bits_per_component, num_components,
exif_color_space, icc_profile, adobe_color_xform, dpi)
JFIF_HEADER = create_reader('5s 2s B H H B B', lambda tuple: tuple)
def _parse_jfif_segment(self, header_length):
(identifier, version, units,
h_density, v_density, h_thumbnail, v_thumbnail) = self.JFIF_HEADER()
assert identifier == b'JFIF\0'
thumbnail_size = 3 * h_thumbnail * v_thumbnail
assert header_length == 16 + thumbnail_size
return h_density, v_density, JFIF_UNITS[units]
EXIF_HEADER = create_reader('5s B', lambda tuple: tuple)
EXIF_TIFF_HEADER = 'H I'
EXIF_TAG_FORMAT = 'H H I 4s'
def _parse_exif_segment(self, header_length):
resume_position = self._file.tell() + header_length - 2
identifier, null = self.EXIF_HEADER()
if identifier != b'Exif\0':
self._file.seek(resume_position)
return None
assert null == 0
tiff_header_offset = self._file.tell()
byte_order = self.read_ushort()
endian = EXIF_ENDIAN[byte_order]
tiff_header = create_reader(self.EXIF_TIFF_HEADER,
lambda tuple: tuple, endian)
fortytwo, ifd_offset = tiff_header(self)
assert fortytwo == 42
self._file.seek(tiff_header_offset + ifd_offset)
ifd_0th = self._parse_exif_ifd(endian, tiff_header_offset)
color_space = UNCALIBRATED
if EXIF_IFD_POINTER in ifd_0th:
self._file.seek(tiff_header_offset + ifd_0th[EXIF_IFD_POINTER])
ifd_exif = self._parse_exif_ifd(endian, tiff_header_offset)
try:
exif_color_space = ifd_exif[EXIF_COLOR_SPACE]
color_space = EXIF_COLOR_SPACES[exif_color_space]
except KeyError:
warn('The EXIF table in "{}" is missing color space information'
.format(self.filename))
density = (float(ifd_0th.get(EXIF_X_RESOLUTION, 72)),
float(ifd_0th.get(EXIF_Y_RESOLUTION, 72)),
EXIF_UNITS[ifd_0th.get(EXIF_RESOLUTION_UNIT, 2)])
self._file.seek(resume_position)
return density, color_space
def _parse_exif_ifd(self, endian, tiff_header_offset):
read_ushort = create_reader('H', endian=endian)
tag_format = create_reader(self.EXIF_TAG_FORMAT,
lambda tuple: tuple, endian)
def rational(numerator, denominator):
try:
return Fraction(numerator, denominator)
except ZeroDivisionError:
return None
def get_value(type, count, value_or_offset):
value_format = EXIF_TAG_TYPE[type]
num_bytes = count * calcsize(value_format)
if num_bytes > 4: # offset
saved_offset = self._file.tell()
offset, = unpack(endian + 'I', value_or_offset)
self._file.seek(tiff_header_offset + offset)
data = self._file.read(num_bytes)
format = '{}{}'.format(endian, count * value_format)
self._file.seek(saved_offset)
else:
format = endian + value_format
data = value_or_offset[:calcsize(format)]
raw_value = unpack(format, data)
if type in (1, 3, 4, 9):
try:
value, = raw_value
except ValueError:
value = raw_value
elif type == 2:
value = raw_value[0].decode('ISO-8859-1')
elif type in (5, 10):
try:
numerator, denominator = raw_value
value = rational(numerator, denominator)
except ValueError:
pairs = zip(*(iter(raw_value), ) * 2)
value = tuple(rational(num, denom) for num, denom in pairs)
elif type == 7:
value = raw_value
return value
num_tags = read_ushort(self)
result = {}
for i in range(num_tags):
tag, type, count, value_or_offset = tag_format(self)
result[tag] = get_value(type, count, value_or_offset)
return result
ICC_HEADER = create_reader('12s B B', lambda tuple: tuple)
def _parse_icc_segment(self, header_length):
resume_position = self._file.tell() + header_length - 2
identifier, part_number, num_parts = self.ICC_HEADER()
if identifier != b'ICC_PROFILE\0':
self._file.seek(resume_position)
return None
part_bytes = self._file.read(resume_position - self._file.tell())
return part_number, num_parts, part_bytes
ADOBE_DCT_HEADER = create_reader('5s H H H B', lambda tuple: tuple)
def _parse_adobe_dct_segment(self, header_length):
assert header_length >= 14
resume_position = self._file.tell() + header_length - 2
identifier, version, flags1, flags2, color_transform = \
self.ADOBE_DCT_HEADER()
if identifier != b'Adobe':
self._file.seek(resume_position)
return None
self._file.seek(resume_position)
return ADOBE_COLOR_TRANSFORM[color_transform]
SOF_HEADER = create_reader('B H H B', lambda tuple: tuple)
def _parse_start_of_frame(self, header_length):
resume_position = self._file.tell() + header_length - 2
sample_precision, v_size, h_size, num_components = self.SOF_HEADER()
self._file.seek(resume_position)
return v_size, h_size, sample_precision, num_components
DPCM = 'dpcm'
DPI = 'dpi'
JFIF_UNITS = {0: None,
1: DPI,
2: DPCM}
EXIF_ENDIAN = {0x4949: '<',
0x4D4D: '>'}
EXIF_TAG_TYPE = {1: 'B',
2: 's',
3: 'H',
4: 'I',
5: 'II',
7: 's',
9: 'i',
10: 'ii'}
EXIF_UNITS = {1: None,
2: DPI,
3: DPCM}
EXIF_COLOR_SPACES = {1: SRGB,
0xFFFF: UNCALIBRATED}
EXIF_X_RESOLUTION = 0x11A
EXIF_Y_RESOLUTION = 0x11B
EXIF_RESOLUTION_UNIT = 0x128
EXIF_IFD_POINTER = 0x8769
EXIF_COLOR_SPACE = 0xA001
UNKNOWN = 'RGB or CMYK'
YCC = 'YCbCr'
YCCK = 'YCCK'
ADOBE_COLOR_TRANSFORM = {0: UNKNOWN,
1: YCC,
2: YCCK} | /rinohtype-0.5.4.tar.gz/rinohtype-0.5.4/src/rinoh/backend/pdf/xobject/jpeg.py | 0.620737 | 0.258285 | jpeg.py | pypi |
import numpy as np
import rasterio
import riomucho
from rio_alpha.alpha_mask import mask_exact
def alpha_worker(open_file, window, ij, g_args):
"""rio mucho worker for alpha. It reads input
files and perform alpha calculations on each window.
Parameters
------------
open_files: list of rasterio open files
window: Window object
A window is a view onto a rectangular subset of a
raster dataset.
g_args: dictionary
Returns
---------
rgba: ndarray
ndarray with original RGB bands of shape (3, rows, cols)
and a mask of shape (rows, cols) where
opaque == 0 and transparent == max of dtype
"""
src = open_file[0]
arr = src.read(window=window)
# Determine Alpha Band
if g_args["ndv"]:
# User-supplied nodata value
alpha = mask_exact(arr, g_args["ndv"])
else:
# Let rasterio decide
alpha = src.dataset_mask(window=window)
# Replace or Add alpha band to input data
if arr.shape[0] == 4:
# replace band 4 with new alpha mask
# (likely the same but let's not make that assumption)
rgba = arr.copy()
rgba[3] = alpha
elif arr.shape[0] == 3:
# stack the alpha mask to add band 4
rgba = np.append(arr, alpha[np.newaxis, :, :], axis=0)
else:
raise ValueError("Array must have 3 or 4 bands (RGB or RGBA)")
return rgba
_alpha_worker = alpha_worker
def add_alpha(src_path, dst_path, ndv, creation_options, processes):
"""
Parameters
------------
src_paths: list of strings
dst_path: string
ndv: list
a list of floats where the
length of the list = band count
creation_options: dict
processes: integer
Returns
---------
None
Output is written to dst_path
"""
with rasterio.open(src_path) as src:
dst_profile = src.profile
dst_profile.update(**creation_options)
dst_profile.pop("photometric", None)
dst_profile.update(count=4, nodata=None)
global_args = {"src_nodata": 0, "dst_dtype": dst_profile["dtype"], "ndv": ndv}
with riomucho.RioMucho(
[src_path],
dst_path,
_alpha_worker,
options=dst_profile,
global_args=global_args,
mode="manual_read",
) as rm:
rm.run(processes) | /rio-alpha-1.0.2.tar.gz/rio-alpha-1.0.2/rio_alpha/alpha.py | 0.712332 | 0.486941 | alpha.py | pypi |
from __future__ import division
import json
import math
import re
import numpy as np
from scipy.stats import mode
def _parse_single(n):
"""Returns a single value nodata of type float
Parameters
----------
n: integer or str(integer)
Returns
-------
float(n)
"""
try:
return float(n)
except ValueError:
raise ValueError("{0} is not a valid nodata value".format(n))
def _parse_ndv(ndv, bands):
"""Returns a list of nodata values of type float
Parameters
----------
ndv: string, str(list of nodata values)
bands: integer, band count
Returns
-------
list: list of floats, length = band count
"""
if re.match(r"\[[0-9\.\,\s]+\]", ndv):
ndvals = [_parse_single(n) for n in json.loads(ndv)]
if len(ndvals) != bands:
raise ValueError(
"{0} parsed to ndv of {1} does "
"not match band count of {2}".format(ndv, json.dumps(ndvals), bands)
)
else:
return ndvals
else:
return [_parse_single(ndv) for i in range(bands)]
def _convert_rgb(rgb_orig):
# Sample to ~200 in smaller dimension if > 200 for performance
if rgb_orig[:, :, 0].shape[0] < rgb_orig[:, :, 0].shape[1]:
min_dimension = 0
else:
min_dimension = 1
if rgb_orig[:, :, 0].shape[min_dimension] < 200:
mod = 1
else:
mod = int(math.ceil(rgb_orig[:, :, 0].shape[min_dimension] / 200))
rgb_mod = rgb_orig[::mod, ::mod]
# Flatten image for full img histogram
rgb_mod_flat = rgb_mod.reshape(
(rgb_mod.shape[0] * rgb_mod.shape[1], rgb_mod.shape[-1])
)
return rgb_mod, rgb_mod_flat
# Squish array to only continuous values, return is in list form
def _find_continuous_rgb(input_array, axis_num):
diff_array = np.diff(input_array, axis=int(axis_num))
diff_array = np.insert(diff_array, 0, [99, 99, 99], axis=int(axis_num))
val_list = (input_array[diff_array == [0, 0, 0]]).tolist()
return val_list
# Find modal RGB value of continuous values array
# (val_list), takes list, returns [R,G,B]
def _group(lst, n, continuous):
arr = np.asarray(list(zip(*[lst[i::n] for i in range(n)])))
mode_vals = mode(arr)
continuous = [int((mode_vals[0])[0, i]) for i in range(3)]
return continuous, arr
def _compute_continuous(rgb_mod, loc):
cont_lst = []
return _group(_find_continuous_rgb(rgb_mod, loc), 3, cont_lst)
def _search_image_edge(rgb_mod, candidate_original, candidate_continuous):
# Make array of image edge
top_row = rgb_mod[0, :, :]
bottom_row = rgb_mod[-1, :, :]
first_col = rgb_mod[:, 0, :]
last_col = rgb_mod[:, -1, :]
img_edge = np.concatenate((top_row, last_col, bottom_row, first_col), axis=0)
# Squish image edge down to just continuous values
edge_mode_continuous, arr = _compute_continuous(rgb_mod, 0)
# Count nodata value frequency in full image edge & squished image edge
count_img_edge_full = [
len(np.transpose(np.where((img_edge == candidate).all(axis=1))))
for candidate in (candidate_original, candidate_continuous)
]
count_img_edge_continuous = [
len(np.transpose(np.where((arr == candidate).all(axis=1))))
for candidate in (candidate_original, candidate_continuous)
]
return count_img_edge_full, count_img_edge_continuous
def _evaluate_count(lst1, lst2, verbose):
# Q: will these always realiably be ordered as listed
# above with original first, continuous second?
if (lst1[0] > lst1[1]) and (lst2[0] > lst2[1]):
return lst1
elif (lst1[0] < lst1[1]) and (lst2[0] < lst2[1]):
return lst2
else:
if verbose:
return "None"
else:
return ""
def _debug_mode(rgb_flat, arr, output):
import matplotlib.pyplot as plt
plt.hist(rgb_flat, bins=range(256))
# histogram of continuous values only
plt.hist(arr, bins=range(256))
plt.savefig(output, bbox_inches="tight")
plt.close() | /rio-alpha-1.0.2.tar.gz/rio-alpha-1.0.2/rio_alpha/utils.py | 0.867668 | 0.514705 | utils.py | pypi |
import click
import numpy as np
import rasterio
from scipy.stats import mode
from rio_alpha.utils import (
_convert_rgb,
_compute_continuous,
_debug_mode,
_search_image_edge,
_evaluate_count,
)
def discover_ndv(rgb_orig, debug, verbose):
"""Returns nodata value by calculating mode of RGB array
Parameters
----------
rgb_orig: ndarray
array of input pixels of shape (rows, cols, depth)
debug: Boolean
Enables matplotlib & printing of figures
verbose: Boolean
Prints extra information, like competing candidate values
Returns
-------
list of nodata value candidates or empty string if none found
"""
rgb_mod, rgb_mod_flat = _convert_rgb(rgb_orig)
# Full image mode bincount
mode_vals = mode(rgb_mod_flat)
candidate_original = [int((mode_vals[0])[0, i]) for i in range(3)]
# Find continuous values in RGB array
candidate_continuous, arr = _compute_continuous(rgb_mod, 1)
# If debug mode, print histograms & be verbose
if debug:
click.echo("Original image ndv candidate: %s" % (str(candidate_original)))
click.echo("Filtered image ndv candidate: %s" % (str(candidate_continuous)))
outplot = "/tmp/hist_plot.png"
_debug_mode(rgb_mod_flat, arr, outplot)
# Compare ndv candidates from full & squished image
candidate_list = [
i for i, j in zip(candidate_original, candidate_continuous) if i == j
]
# If candidates from original & filtered images match exactly,
# print value & exit
if len(candidate_list) == 3:
return candidate_list
# If candidates do not match exactly, continue vetting process
# by searching image edge for frequency of each candidate
elif len(candidate_list) < 3:
if verbose:
click.echo(
"Competing ndv candidates...searching "
"image collar for value frequency. "
"Candidate list: %s" % str(candidate_list)
)
count_img_edge_full, count_img_edge_continuous = _search_image_edge(
rgb_mod, candidate_original, candidate_continuous
)
if verbose:
for candidate in (candidate_original, candidate_continuous):
click.echo(
"Candidate value: %s "
"Candidate count: %s "
"Continuous count: %s"
% (
str(candidate),
str(count_img_edge_full),
str(count_img_edge_continuous),
)
)
output = _evaluate_count(
count_img_edge_full, count_img_edge_continuous, verbose
)
return output
else:
raise ValueError("Invalid candidate list {!r}".format(candidate_list))
def determine_nodata(src_path, user_nodata, discovery, debug, verbose):
"""Worker function for determining nodata
Parameters
----------
src_path: string
user_nodata: string/integer
User supplies the nodata value,
input a single value or a string of list
discovery: Boolean
determines nodata if alpha channel does not exist
or internal ndv does not exist
debug: Boolean
Enables matplotlib & printing of figures
verbose: Boolean
Prints extra information, like competing candidate values
Returns
-------
nodata value: string
string(int) or stringified array of values of
len == the number of bands.
For example, string([int(ndv), int(ndv), int(ndv)])
"""
if user_nodata:
return user_nodata
with rasterio.open(src_path, "r") as src:
count = src.count
if count == 4:
return "alpha"
else:
nodata = src.nodata
if nodata is None:
if discovery:
data = np.rollaxis(src.read(), 0, 3)
candidates = discover_ndv(data, debug, verbose)
if len(candidates) != 3:
return ""
else:
return "[{}, {}, {}]".format(*candidates)
else:
return ""
else:
return "%s" % (str(int(nodata))) | /rio-alpha-1.0.2.tar.gz/rio-alpha-1.0.2/rio_alpha/findnodata.py | 0.651466 | 0.382084 | findnodata.py | pypi |
import logging
import click
import rasterio as rio
from rio_alpha.utils import _parse_ndv
from rio_alpha.islossy import count_ndv_regions
from rio_alpha.findnodata import determine_nodata
from rio_alpha.alpha import add_alpha
from rasterio.rio.options import creation_options
logger = logging.getLogger("rio_alpha")
@click.command("islossy")
@click.argument("input", nargs=1, type=click.Path(exists=True))
@click.option(
"--ndv",
default="[0, 0, 0]",
help="Expects a string containing a single integer value "
"(e.g. '255') or "
"a string representation of a list containing "
"per-band nodata values (e.g. '[255, 255, 255]').",
)
def islossy(input, ndv):
"""
Determine if there are >= 10 nodata regions in an image
If true, returns the string `--lossy lossy`.
"""
with rio.open(input, "r") as src:
img = src.read()
ndv = _parse_ndv(ndv, 3)
if count_ndv_regions(img, ndv) >= 10:
click.echo("True")
else:
click.echo("False")
@click.command("findnodata")
@click.argument("src_path", type=click.Path(exists=True))
@click.option(
"--user_nodata",
"-u",
default=None,
help="User supplies the nodata value, "
"input a string containing a single integer value "
"(e.g. '255') or "
"a string representation of a list containing "
"per-band nodata values (e.g. '[255, 255, 255]').",
)
@click.option(
"--discovery",
is_flag=True,
default=False,
help="Determines nodata if alpha channel"
"does not exist or internal ndv does not exist",
)
@click.option(
"--debug",
is_flag=True,
default=False,
help="Enables matplotlib & printing of figures",
)
@click.option(
"--verbose",
"-v",
is_flag=True,
default=False,
help="Prints extra information, " "like competing candidate values",
)
def findnodata(src_path, user_nodata, discovery, debug, verbose):
"""Print a dataset's nodata value."""
ndv = determine_nodata(src_path, user_nodata, discovery, debug, verbose)
click.echo("%s" % ndv)
@click.command("alpha")
@click.argument("src_path", type=click.Path(exists=True))
@click.argument("dst_path", type=click.Path(exists=False))
@click.option(
"--ndv",
default=None,
help="Expects a string containing a single integer value "
"(e.g. '255') or "
"a string representation of a list containing "
"per-band nodata values (e.g. '[255, 255, 255]').",
)
@click.option("--workers", "-j", type=int, default=1)
@click.pass_context
@creation_options
def alpha(ctx, src_path, dst_path, ndv, creation_options, workers):
"""Adds/replaced an alpha band to your RGB or RGBA image
If you don't supply ndv, the alpha mask will be infered.
"""
with rio.open(src_path) as src:
band_count = src.count
if ndv:
ndv = _parse_ndv(ndv, band_count)
add_alpha(src_path, dst_path, ndv, creation_options, workers) | /rio-alpha-1.0.2.tar.gz/rio-alpha-1.0.2/rio_alpha/scripts/cli.py | 0.736401 | 0.281884 | cli.py | pypi |
from typing import Dict, Optional, Tuple, Union
import morecantile
from rasterio.enums import ColorInterp, MaskFlags
from rasterio.io import DatasetReader, DatasetWriter
from rasterio.rio.overview import get_maximum_overview_level
from rasterio.transform import Affine
from rasterio.vrt import WarpedVRT
from rasterio.warp import calculate_default_transform
def has_alpha_band(src_dst: Union[DatasetReader, DatasetWriter, WarpedVRT]):
"""Check for alpha band or mask in source."""
if (
any([MaskFlags.alpha in flags for flags in src_dst.mask_flag_enums])
or ColorInterp.alpha in src_dst.colorinterp
):
return True
return False
def has_mask_band(src_dst):
"""Check for mask band in source."""
if any(
[
(MaskFlags.per_dataset in flags and MaskFlags.alpha not in flags)
for flags in src_dst.mask_flag_enums
]
):
return True
return False
def non_alpha_indexes(src_dst: Union[DatasetReader, DatasetWriter, WarpedVRT]) -> Tuple:
"""Return indexes of non-alpha bands."""
return tuple(
b
for ix, b in enumerate(src_dst.indexes)
if (
src_dst.mask_flag_enums[ix] is not MaskFlags.alpha
and src_dst.colorinterp[ix] is not ColorInterp.alpha
)
)
def get_zooms(
src_dst: Union[DatasetReader, DatasetWriter, WarpedVRT],
tilesize: int = 256,
tms: morecantile.TileMatrixSet = morecantile.tms.get("WebMercatorQuad"),
zoom_level_strategy: str = "auto",
) -> Tuple[int, int]:
"""Calculate raster min/max zoom level."""
# If the raster is not in the TMS CRS we calculate its projected properties (height, width, resolution)
tms_crs = tms.rasterio_crs
if src_dst.crs != tms_crs:
aff, w, h = calculate_default_transform(
src_dst.crs,
tms_crs,
src_dst.width,
src_dst.height,
*src_dst.bounds,
)
else:
aff = list(src_dst.transform)
w = src_dst.width
h = src_dst.height
resolution = max(abs(aff[0]), abs(aff[4]))
# The maxzoom is defined by finding the minimum difference between
# the raster resolution and the zoom level resolution
max_zoom = tms.zoom_for_res(
resolution,
max_z=30,
zoom_level_strategy=zoom_level_strategy,
)
# The minzoom is defined by the resolution of the maximum theoretical overview level
max_possible_overview_level = get_maximum_overview_level(w, h, minsize=tilesize)
ovr_resolution = resolution * (2**max_possible_overview_level)
min_zoom = tms.zoom_for_res(ovr_resolution, max_z=30)
return (min_zoom, max_zoom)
def get_web_optimized_params(
src_dst,
zoom_level_strategy: str = "auto",
zoom_level: Optional[int] = None,
aligned_levels: Optional[int] = None,
tms: morecantile.TileMatrixSet = morecantile.tms.get("WebMercatorQuad"),
) -> Dict:
"""Return VRT parameters for a WebOptimized COG."""
tms_crs = tms.rasterio_crs
if src_dst.crs != tms_crs:
with WarpedVRT(src_dst, crs=tms_crs) as vrt:
bounds = vrt.bounds
aff = list(vrt.transform)
else:
bounds = src_dst.bounds
aff = list(src_dst.transform)
resolution = max(abs(aff[0]), abs(aff[4]))
if zoom_level is None:
# find max zoom (closest to the raster resolution)
max_zoom = tms.zoom_for_res(
resolution,
max_z=30,
zoom_level_strategy=zoom_level_strategy,
)
else:
max_zoom = zoom_level
# defined the zoom level we want to align the raster
aligned_levels = aligned_levels or 0
base_zoom = max_zoom - aligned_levels
# find new raster bounds (bounds of UL tile / LR tile)
ul_tile = tms._tile(bounds[0], bounds[3], base_zoom)
w, _, _, n = tms.xy_bounds(ul_tile)
# The output resolution should match the TMS resolution at MaxZoom
vrt_res = tms._resolution(tms.matrix(max_zoom))
# Output transform is built from the origin (UL tile) and output resolution
vrt_transform = Affine(vrt_res, 0, w, 0, -vrt_res, n)
lr_tile = tms._tile(bounds[2], bounds[1], base_zoom)
e, _, _, s = tms.xy_bounds(
morecantile.Tile(lr_tile.x + 1, lr_tile.y + 1, lr_tile.z)
)
vrt_width = max(1, round((e - w) / vrt_transform.a))
vrt_height = max(1, round((s - n) / vrt_transform.e))
return {
"crs": tms_crs,
"transform": vrt_transform,
"width": vrt_width,
"height": vrt_height,
} | /rio_cogeo-5.0.0.tar.gz/rio_cogeo-5.0.0/rio_cogeo/utils.py | 0.931882 | 0.374247 | utils.py | pypi |
import warnings
from rasterio.profiles import Profile
class JPEGProfile(Profile):
"""Tiled, pixel-interleaved, JPEG-compressed, YCbCr colorspace, 8-bit GTiff."""
defaults = {
"driver": "GTiff",
"interleave": "pixel",
"tiled": True,
"blockxsize": 512,
"blockysize": 512,
"compress": "JPEG",
"photometric": "YCbCr",
}
class WEBPProfile(Profile):
"""Tiled, pixel-interleaved, WEBP-compressed, 8-bit GTiff."""
defaults = {
"driver": "GTiff",
"interleave": "pixel",
"tiled": True,
"blockxsize": 512,
"blockysize": 512,
"compress": "WEBP",
}
class ZSTDProfile(Profile):
"""Tiled, pixel-interleaved, ZSTD-compressed GTiff.
Note: ZSTD compression is available since gdal 2.3
"""
defaults = {
"driver": "GTiff",
"interleave": "pixel",
"tiled": True,
"blockxsize": 512,
"blockysize": 512,
"compress": "ZSTD",
}
class LZWProfile(Profile):
"""Tiled, pixel-interleaved, LZW-compressed GTiff."""
defaults = {
"driver": "GTiff",
"interleave": "pixel",
"tiled": True,
"blockxsize": 512,
"blockysize": 512,
"compress": "LZW",
}
class DEFLATEProfile(Profile):
"""Tiled, pixel-interleaved, DEFLATE-compressed GTiff."""
defaults = {
"driver": "GTiff",
"interleave": "pixel",
"tiled": True,
"blockxsize": 512,
"blockysize": 512,
"compress": "DEFLATE",
}
class PACKBITSProfile(Profile):
"""Tiled, pixel-interleaved, PACKBITS-compressed GTiff."""
defaults = {
"driver": "GTiff",
"interleave": "pixel",
"tiled": True,
"blockxsize": 512,
"blockysize": 512,
"compress": "PACKBITS",
}
class LZMAProfile(Profile):
"""Tiled, pixel-interleaved, LZMA-compressed GTiff."""
defaults = {
"driver": "GTiff",
"interleave": "pixel",
"tiled": True,
"blockxsize": 512,
"blockysize": 512,
"compress": "LZMA",
}
class LERCProfile(Profile):
"""Tiled, pixel-interleaved, LERC-compressed GTiff."""
defaults = {
"driver": "GTiff",
"interleave": "pixel",
"tiled": True,
"blockxsize": 512,
"blockysize": 512,
"compress": "LERC",
}
class LERCDEFLATEProfile(Profile):
"""Tiled, pixel-interleaved, LERC_DEFLATE-compressed GTiff."""
defaults = {
"driver": "GTiff",
"interleave": "pixel",
"tiled": True,
"blockxsize": 512,
"blockysize": 512,
"compress": "LERC_DEFLATE",
}
class LERCZSTDProfile(Profile):
"""Tiled, pixel-interleaved, LERC_ZSTD-compressed GTiff."""
defaults = {
"driver": "GTiff",
"interleave": "pixel",
"tiled": True,
"blockxsize": 512,
"blockysize": 512,
"compress": "LERC_ZSTD",
}
class RAWProfile(Profile):
"""Tiled, pixel-interleaved, no-compressed GTiff."""
defaults = {
"driver": "GTiff",
"interleave": "pixel",
"tiled": True,
"blockxsize": 512,
"blockysize": 512,
}
class COGProfiles(dict):
"""CloudOptimized GeoTIFF profiles."""
def __init__(self):
"""Initialize COGProfiles dict."""
self.update(
{
"jpeg": JPEGProfile(),
"webp": WEBPProfile(),
"zstd": ZSTDProfile(),
"lzw": LZWProfile(),
"deflate": DEFLATEProfile(),
"packbits": PACKBITSProfile(),
"lzma": LZMAProfile(),
"lerc": LERCProfile(),
"lerc_deflate": LERCDEFLATEProfile(),
"lerc_zstd": LERCZSTDProfile(),
"raw": RAWProfile(),
}
)
def get(self, key):
"""Like normal item access but error."""
key = key.lower()
if key not in (self.keys()):
raise KeyError("{} is not a valid COG profile name".format(key))
if key in ["zstd", "webp", "lerc", "lerc_deflate", "lerc_zstd"]:
warnings.warn(
"Non-standard compression schema: {}. The output COG might not be fully"
" supported by software not build against latest libtiff.".format(key)
)
return self[key].copy()
cog_profiles = COGProfiles() | /rio_cogeo-5.0.0.tar.gz/rio_cogeo-5.0.0/rio_cogeo/profiles.py | 0.844249 | 0.369258 | profiles.py | pypi |
import json
import os
import typing
import click
import numpy
from morecantile import TileMatrixSet
from rasterio.rio import options
from rio_cogeo import __version__ as cogeo_version
from rio_cogeo.cogeo import (
RIOResampling,
WarpResampling,
cog_info,
cog_translate,
cog_validate,
)
from rio_cogeo.profiles import cog_profiles
IN_MEMORY_THRESHOLD = int(os.environ.get("IN_MEMORY_THRESHOLD", 10980 * 10980))
class BdxParamType(click.ParamType):
"""Band index type."""
name = "bidx"
def convert(self, value, param, ctx):
"""Validate and parse band index."""
try:
bands = [int(x) for x in value.split(",")]
assert all(b > 0 for b in bands)
return bands
except (ValueError, AttributeError, AssertionError) as e:
raise click.ClickException(
"bidx must be a string of comma-separated integers (> 0), "
"representing the band indexes."
) from e
class NodataParamType(click.ParamType):
"""Nodata type."""
name = "nodata"
def convert(self, value, param, ctx):
"""Validate and parse band index."""
try:
if value.lower() == "nan":
return numpy.nan
elif value.lower() in ["nil", "none", "nada"]:
return None
else:
return float(value)
except (TypeError, ValueError) as e:
raise click.ClickException(
"{} is not a valid nodata value.".format(value)
) from e
class ThreadsParamType(click.ParamType):
"""num_threads index type."""
name = "threads"
def convert(self, value, param, ctx):
"""Validate and parse thread number."""
try:
if value.lower() == "all_cpus":
return "ALL_CPUS"
else:
return int(value)
except (TypeError, ValueError) as e:
raise click.ClickException(
"{} is not a valid thread value.".format(value)
) from e
@click.group(short_help="Create and Validate COGEO")
@click.version_option(version=cogeo_version, message="%(version)s")
def cogeo():
"""Rasterio cogeo subcommands."""
pass
@cogeo.command(short_help="Create COGEO")
@options.file_in_arg
@options.file_out_arg
@click.option("--bidx", "-b", type=BdxParamType(), help="Band indexes to copy.")
@click.option(
"--cog-profile",
"-p",
"cogeo_profile",
type=click.Choice(cog_profiles.keys(), case_sensitive=False),
default="deflate",
help="CloudOptimized GeoTIFF profile.",
show_default=True,
)
@click.option(
"--nodata",
type=NodataParamType(),
metavar="NUMBER|nan",
help="Set nodata masking values for input dataset.",
)
@click.option(
"--add-mask",
is_flag=True,
help="Force output dataset creation with an internal mask (convert alpha "
"band or nodata to mask).",
)
@click.option("--blocksize", type=int, help="Overwrite profile's tile size.")
@options.dtype_opt
@click.option(
"--overview-level",
type=int,
help="Overview level (if not provided, appropriate overview level will be "
"selected until the smallest overview is smaller than the value of the "
"internal blocksize)",
)
@click.option(
"--overview-resampling",
help="Overview creation resampling algorithm.",
type=click.Choice(list(typing.get_args(RIOResampling))),
default="nearest",
show_default=True,
)
@click.option(
"--overview-blocksize",
default=lambda: os.environ.get("GDAL_TIFF_OVR_BLOCKSIZE", 128),
help="Overview's internal tile size (default defined by "
"GDAL_TIFF_OVR_BLOCKSIZE env or 128)",
show_default=True,
)
@click.option(
"--web-optimized", "-w", is_flag=True, help="Create COGEO optimized for Web."
)
@click.option(
"--zoom-level-strategy",
type=click.Choice(["lower", "upper", "auto"], case_sensitive=False),
default="auto",
help="Strategy to determine zoom level.",
show_default=True,
)
@click.option(
"--zoom-level",
type=int,
help="Zoom level number for the highest resolution. If this option is specified, `--zoom-level-strategy` is ignored.",
)
@click.option(
"--aligned-levels",
type=int,
help="Number of overview levels for which GeoTIFF tile and tiles defined in the tiling scheme match.",
)
@click.option(
"--resampling",
"-r",
help="Resampling algorithm. Will only be applied with the `--web-optimized` option.",
type=click.Choice(list(typing.get_args(WarpResampling))),
default="nearest",
show_default=True,
)
@click.option(
"--in-memory/--no-in-memory",
default=None,
help="Force processing raster in memory / not in memory (default: process in memory "
"if smaller than {:.0f} million pixels)".format(IN_MEMORY_THRESHOLD // 1e6),
)
@click.option(
"--allow-intermediate-compression",
default=False,
is_flag=True,
help="Allow intermediate file compression to reduce memory/disk footprint.",
show_default=True,
)
@click.option(
"--forward-band-tags",
default=False,
is_flag=True,
help="Forward band tags to output bands.",
show_default=True,
)
@click.option(
"--forward-ns-tags",
default=False,
is_flag=True,
help="Forward namespaced tags to output dataset.",
show_default=True,
)
@click.option(
"--threads",
type=ThreadsParamType(),
default="ALL_CPUS",
help="Number of worker threads for multi-threaded compression.",
show_default=True,
)
@click.option(
"--use-cog-driver",
help="Use GDAL COG Driver (require GDAL>=3.1).",
is_flag=True,
default=False,
show_default=True,
)
@click.option(
"--tms",
help="Path to TileMatrixSet JSON file.",
type=click.Path(),
)
@options.creation_options
@click.option(
"--config",
"config",
metavar="NAME=VALUE",
multiple=True,
callback=options._cb_key_val,
help="GDAL configuration options.",
)
@click.option(
"--quiet", "-q", help="Remove progressbar and other non-error output.", is_flag=True
)
def create(
input,
output,
bidx,
cogeo_profile,
nodata,
dtype,
add_mask,
blocksize,
overview_level,
overview_resampling,
overview_blocksize,
web_optimized,
zoom_level_strategy,
zoom_level,
aligned_levels,
resampling,
in_memory,
allow_intermediate_compression,
forward_band_tags,
forward_ns_tags,
threads,
use_cog_driver,
tms,
creation_options,
config,
quiet,
):
"""Create Cloud Optimized Geotiff."""
output_profile = cog_profiles.get(cogeo_profile)
output_profile.update({"BIGTIFF": os.environ.get("BIGTIFF", "IF_SAFER")})
if creation_options:
output_profile.update(creation_options)
if blocksize:
output_profile["blockxsize"] = blocksize
output_profile["blockysize"] = blocksize
if web_optimized:
overview_blocksize = blocksize or 512
config.update(
{
"GDAL_NUM_THREADS": threads,
"GDAL_TIFF_INTERNAL_MASK": os.environ.get("GDAL_TIFF_INTERNAL_MASK", True),
"GDAL_TIFF_OVR_BLOCKSIZE": str(overview_blocksize),
}
)
if tms:
with open(tms, "r") as f:
tilematrixset = TileMatrixSet(**json.load(f))
else:
tilematrixset = None
cog_translate(
input,
output,
output_profile,
indexes=bidx,
nodata=nodata,
dtype=dtype,
add_mask=add_mask,
overview_level=overview_level,
overview_resampling=overview_resampling,
web_optimized=web_optimized,
zoom_level_strategy=zoom_level_strategy,
zoom_level=zoom_level,
aligned_levels=aligned_levels,
resampling=resampling,
in_memory=in_memory,
config=config,
allow_intermediate_compression=allow_intermediate_compression,
forward_band_tags=forward_band_tags,
forward_ns_tags=forward_ns_tags,
tms=tilematrixset,
use_cog_driver=use_cog_driver,
quiet=quiet,
)
@cogeo.command(short_help="Validate COGEO")
@options.file_in_arg
@click.option(
"--strict",
default=False,
is_flag=True,
help="Treat warnings as errors.",
show_default=True,
)
@click.option(
"--config",
"config",
metavar="NAME=VALUE",
multiple=True,
callback=options._cb_key_val,
help="GDAL configuration options.",
)
def validate(input, strict, config):
"""Validate Cloud Optimized Geotiff."""
is_valid, _, _ = cog_validate(input, strict=strict, config=config)
if is_valid:
click.echo("{} is a valid cloud optimized GeoTIFF".format(input))
else:
click.echo("{} is NOT a valid cloud optimized GeoTIFF".format(input))
@cogeo.command(short_help="Lists information about a raster dataset.")
@options.file_in_arg
@click.option(
"--json",
"to_json",
default=False,
is_flag=True,
help="Print as JSON.",
show_default=True,
)
@click.option(
"--config",
"config",
metavar="NAME=VALUE",
multiple=True,
callback=options._cb_key_val,
help="GDAL configuration options.",
)
def info(input, to_json, config): # noqa: C901
"""Dataset info."""
metadata = cog_info(input, config=config)
if to_json:
click.echo(metadata.model_dump_json(exclude_none=True, by_alias=True))
else:
sep = 25
click.echo(
f"""{click.style('Driver:', bold=True)} {metadata.Driver}
{click.style('File:', bold=True)} {metadata.Path}
{click.style('COG:', bold=True)} {metadata.COG}
{click.style('Compression:', bold=True)} {metadata.Compression}
{click.style('ColorSpace:', bold=True)} {metadata.ColorSpace}
{click.style('Profile', bold=True)}
{click.style("Width:", bold=True):<{sep}} {metadata.Profile.Width}
{click.style("Height:", bold=True):<{sep}} {metadata.Profile.Height}
{click.style("Bands:", bold=True):<{sep}} {metadata.Profile.Bands}
{click.style("Tiled:", bold=True):<{sep}} {metadata.Profile.Tiled}
{click.style("Dtype:", bold=True):<{sep}} {metadata.Profile.Dtype}
{click.style("NoData:", bold=True):<{sep}} {metadata.Profile.Nodata}
{click.style("Alpha Band:", bold=True):<{sep}} {metadata.Profile.AlphaBand}
{click.style("Internal Mask:", bold=True):<{sep}} {metadata.Profile.InternalMask}
{click.style("Interleave:", bold=True):<{sep}} {metadata.Profile.Interleave}
{click.style("ColorMap:", bold=True):<{sep}} {metadata.Profile.ColorMap}
{click.style("ColorInterp:", bold=True):<{sep}} {metadata.Profile.ColorInterp}
{click.style("Scales:", bold=True):<{sep}} {metadata.Profile.Scales}
{click.style("Offsets:", bold=True):<{sep}} {metadata.Profile.Offsets}"""
)
click.echo(
f"""
{click.style('Geo', bold=True)}
{click.style("Crs:", bold=True):<{sep}} {metadata.GEO.CRS}
{click.style("Origin:", bold=True):<{sep}} {metadata.GEO.Origin}
{click.style("Resolution:", bold=True):<{sep}} {metadata.GEO.Resolution}
{click.style("BoundingBox:", bold=True):<{sep}} {metadata.GEO.BoundingBox}
{click.style("MinZoom:", bold=True):<{sep}} {metadata.GEO.MinZoom}
{click.style("MaxZoom:", bold=True):<{sep}} {metadata.GEO.MaxZoom}"""
)
for ns, values in metadata.Tags.items():
click.echo(
f"""
{click.style(ns, bold=True)}"""
)
for key, val in values.items():
click.echo(
f""" {click.style(key, underline=True, bold=True)}: {val}"""
)
for ns, meta in metadata.Band_Metadata.items():
click.echo(
f"""
{click.style(ns, bold=True)}"""
)
if meta.Description:
click.echo(
f""" {click.style("Description", underline=True, bold=True)}: {meta.Description}"""
)
click.echo(
f""" {click.style("ColorInterp", underline=True, bold=True)}: {meta.ColorInterp}"""
)
if meta.Offset != 0.0 and meta.Scale != 1.0:
click.echo(
f""" {click.style("Offset", underline=True, bold=True)}: {meta.Offset}"""
)
click.echo(
f""" {click.style("Scale", underline=True, bold=True)}: {meta.Scale}"""
)
if meta.Metadata:
click.echo(
f""" {click.style("Metadata", underline=True, bold=True)}:"""
)
for key, val in meta.Metadata.items():
click.echo(
f""" {click.style(key, underline=True, bold=True)}: {val}"""
)
click.echo(
f"""
{click.style('IFD', bold=True)}
{click.style('Id', underline=True, bold=True):<20}{click.style('Size', underline=True, bold=True):<27}{click.style('BlockSize', underline=True, bold=True):<26}{click.style('Decimation', underline=True, bold=True):<33}"""
)
for ifd in metadata.IFD:
wh = f"{ifd.Width}x{ifd.Height}"
bl = f"{ifd.Blocksize[1]}x{ifd.Blocksize[0]}"
click.echo(f""" {ifd.Level:<8}{wh:<15}{bl:<14}{ifd.Decimation}""")
if metadata.COG_errors or metadata.COG_warnings:
click.echo(
f"""
{click.style('COG Validation info', bold=True)}"""
)
for error in metadata.COG_errors or []:
click.secho(f""" - {error} (error)""", fg="red")
for warning in metadata.COG_warnings or []:
click.secho(f""" - {warning} (warning)""", fg="yellow") | /rio_cogeo-5.0.0.tar.gz/rio_cogeo-5.0.0/rio_cogeo/scripts/cli.py | 0.672654 | 0.264358 | cli.py | pypi |
import numpy as np
from .utils import epsilon
from .colorspace import saturate_rgb
# Color manipulation functions
def sigmoidal(arr, contrast, bias):
r"""
Sigmoidal contrast is type of contrast control that
adjusts the contrast without saturating highlights or shadows.
It allows control over two factors:
the contrast range from light to dark, and where the middle value
of the mid-tones falls. The result is a non-linear and smooth
contrast change.
Parameters
----------
arr : ndarray, float, 0 .. 1
Array of color values to adjust
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5)
Notes
----------
Sigmoidal contrast is based on the sigmoidal transfer function:
.. math:: g(u) = ( 1/(1 + e^{- \alpha * u + \beta)})
This sigmoid function is scaled so that the output is bound by
the interval [0, 1].
.. math:: ( 1/(1 + e^(\beta * (\alpha - u))) - 1/(1 + e^(\beta * \alpha)))/
( 1/(1 + e^(\beta*(\alpha - 1))) - 1/(1 + e^(\beta * \alpha)) )
Where :math: `\alpha` is the threshold level, and :math: `\beta` the
contrast factor to be applied.
References
----------
.. [CT] Hany Farid "Fundamentals of Image Processing"
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf
"""
if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):
raise ValueError("Input array must have float values between 0 and 1")
if (bias > 1.0 + epsilon) or (bias < 0 - epsilon):
raise ValueError("bias must be a scalar float between 0 and 1")
alpha, beta = bias, contrast
# We use the names a and b to match documentation.
if alpha == 0:
alpha = epsilon
if beta == 0:
return arr
np.seterr(divide="ignore", invalid="ignore")
if beta > 0:
numerator = 1 / (1 + np.exp(beta * (alpha - arr))) - 1 / (
1 + np.exp(beta * alpha)
)
denominator = 1 / (1 + np.exp(beta * (alpha - 1))) - 1 / (
1 + np.exp(beta * alpha)
)
output = numerator / denominator
else:
# Inverse sigmoidal function:
# todo: account for 0s
# todo: formatting ;)
output = (
(beta * alpha)
- np.log(
(
1
/ (
(arr / (1 + np.exp(beta * alpha - beta)))
- (arr / (1 + np.exp(beta * alpha)))
+ (1 / (1 + np.exp(beta * alpha)))
)
)
- 1
)
) / beta
return output
def gamma(arr, g):
r"""
Gamma correction is a nonlinear operation that
adjusts the image's channel values pixel-by-pixel according
to a power-law:
.. math:: pixel_{out} = pixel_{in} ^ {\gamma}
Setting gamma (:math:`\gamma`) to be less than 1.0 darkens the image and
setting gamma to be greater than 1.0 lightens it.
Parameters
----------
gamma (:math:`\gamma`): float
Reasonable values range from 0.8 to 2.4.
"""
if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):
raise ValueError("Input array must have float values between 0 and 1")
if g <= 0 or np.isnan(g):
raise ValueError("gamma must be greater than 0")
return arr ** (1.0 / g)
def saturation(arr, proportion):
"""Apply saturation to an RGB array (in LCH color space)
Multiply saturation by proportion in LCH color space to adjust the intensity
of color in the image. As saturation increases, colors appear
more "pure." As saturation decreases, colors appear more "washed-out."
Parameters
----------
arr: ndarray with shape (3, ..., ...)
proportion: number
"""
if arr.shape[0] != 3:
raise ValueError("saturation requires a 3-band array")
return saturate_rgb(arr, proportion)
def simple_atmo_opstring(haze, contrast, bias):
"""Make a simple atmospheric correction formula."""
gamma_b = 1 - haze
gamma_g = 1 - (haze / 3.0)
ops = (
"gamma g {gamma_g}, " "gamma b {gamma_b}, " "sigmoidal rgb {contrast} {bias}"
).format(gamma_g=gamma_g, gamma_b=gamma_b, contrast=contrast, bias=bias)
return ops
def simple_atmo(rgb, haze, contrast, bias):
"""
A simple, static (non-adaptive) atmospheric correction function.
Parameters
----------
haze: float
Amount of haze to adjust for. For example, 0.03
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5 or 50%)
"""
gamma_b = 1 - haze
gamma_g = 1 - (haze / 3.0)
arr = np.empty(shape=(3, rgb.shape[1], rgb.shape[2]))
arr[0] = rgb[0]
arr[1] = gamma(rgb[1], gamma_g)
arr[2] = gamma(rgb[2], gamma_b)
output = rgb.copy()
output[0:3] = sigmoidal(arr, contrast, bias)
return output
def _op_factory(func, kwargs, opname, bands, rgb_op=False):
"""create an operation function closure
don't call directly, use parse_operations
returns a function which itself takes and returns ndarrays
"""
def f(arr):
# Avoid mutation by copying
newarr = arr.copy()
if rgb_op:
# apply func to array's first 3 bands, assumed r,g,b
# additional band(s) are untouched
newarr[0:3] = func(newarr[0:3], **kwargs)
else:
# apply func to array band at a time
for b in bands:
newarr[b - 1] = func(arr[b - 1], **kwargs)
return newarr
f.__name__ = str(opname)
return f
def parse_operations(ops_string):
"""Takes a string of operations written with a handy DSL
"OPERATION-NAME BANDS ARG1 ARG2 OPERATION-NAME BANDS ARG"
And returns a list of functions, each of which take and return ndarrays
"""
band_lookup = {"r": 1, "g": 2, "b": 3}
count = len(band_lookup)
opfuncs = {"saturation": saturation, "sigmoidal": sigmoidal, "gamma": gamma}
opkwargs = {
"saturation": ("proportion",),
"sigmoidal": ("contrast", "bias"),
"gamma": ("g",),
}
# Operations that assume RGB colorspace
rgb_ops = ("saturation",)
# split into tokens, commas are optional whitespace
tokens = [x.strip() for x in ops_string.replace(",", "").split(" ")]
operations = []
current = []
for token in tokens:
if token.lower() in opfuncs.keys():
if len(current) > 0:
operations.append(current)
current = []
current.append(token.lower())
if len(current) > 0:
operations.append(current)
result = []
for parts in operations:
opname = parts[0]
bandstr = parts[1]
args = parts[2:]
try:
func = opfuncs[opname]
except KeyError:
raise ValueError("{} is not a valid operation".format(opname))
if opname in rgb_ops:
# ignore bands, assumed to be in rgb
# push 2nd arg into args
args = [bandstr] + args
bands = (1, 2, 3)
else:
# 2nd arg is bands
# parse r,g,b ~= 1,2,3
bands = set()
for bs in bandstr:
try:
band = int(bs)
except ValueError:
band = band_lookup[bs.lower()]
if band < 1 or band > count:
raise ValueError(
"{} BAND must be between 1 and {}".format(opname, count)
)
bands.add(band)
# assume all args are float
args = [float(arg) for arg in args]
kwargs = dict(zip(opkwargs[opname], args))
# Create opperation function
f = _op_factory(
func=func,
kwargs=kwargs,
opname=opname,
bands=bands,
rgb_op=(opname in rgb_ops),
)
result.append(f)
return result | /rio_color-1.0.4-cp38-cp38-manylinux1_x86_64.whl/rio_color/operations.py | 0.895597 | 0.844345 | operations.py | pypi |
import numpy as np
import re
# The type to be used for all intermediate math
# operations. Should be a float because values will
# be scaled to the range 0..1 for all work.
math_type = np.float64
epsilon = np.finfo(math_type).eps
def to_math_type(arr):
"""Convert an array from native integer dtype range to 0..1
scaling down linearly
"""
max_int = np.iinfo(arr.dtype).max
return arr.astype(math_type) / max_int
def scale_dtype(arr, dtype):
"""Convert an array from 0..1 to dtype, scaling up linearly
"""
max_int = np.iinfo(dtype).max
return (arr * max_int).astype(dtype)
def magick_to_rio(convert_opts):
"""Translate a limited subset of imagemagick convert commands
to rio color operations
Parameters
----------
convert_opts: String, imagemagick convert options
Returns
-------
operations string, ordered rio color operations
"""
ops = []
bands = None
def set_band(x):
global bands
if x.upper() == "RGB":
x = "RGB"
bands = x.upper()
set_band("RGB")
def append_sig(arg):
global bands
args = list(filter(None, re.split("[,x]+", arg)))
if len(args) == 1:
args.append(0.5)
elif len(args) == 2:
args[1] = float(args[1].replace("%", "")) / 100.0
ops.append("sigmoidal {} {} {}".format(bands, *args))
def append_gamma(arg):
global bands
ops.append("gamma {} {}".format(bands, arg))
def append_sat(arg):
args = list(filter(None, re.split("[,x]+", arg)))
# ignore args[0]
# convert to proportion
prop = float(args[1]) / 100
ops.append("saturation {}".format(prop))
nextf = None
for part in convert_opts.strip().split(" "):
if part == "-channel":
nextf = set_band
elif part == "+channel":
set_band("RGB")
nextf = None
elif part == "-sigmoidal-contrast":
nextf = append_sig
elif part == "-gamma":
nextf = append_gamma
elif part == "-modulate":
nextf = append_sat
else:
if nextf:
nextf(part)
nextf = None
return " ".join(ops) | /rio_color-1.0.4-cp38-cp38-manylinux1_x86_64.whl/rio_color/utils.py | 0.848188 | 0.446676 | utils.py | pypi |
import click
import rasterio
from rasterio.rio.options import creation_options
from rasterio.transform import guard_transform
from rio_color.workers import atmos_worker, color_worker
from rio_color.operations import parse_operations, simple_atmo_opstring
import riomucho
jobs_opt = click.option(
"--jobs",
"-j",
type=int,
default=1,
help="Number of jobs to run simultaneously, Use -1 for all cores, default: 1",
)
def check_jobs(jobs):
"""Validate number of jobs."""
if jobs == 0:
raise click.UsageError("Jobs must be >= 1 or == -1")
elif jobs < 0:
import multiprocessing
jobs = multiprocessing.cpu_count()
return jobs
@click.command("color")
@jobs_opt
@click.option(
"--out-dtype",
"-d",
type=click.Choice(["uint8", "uint16"]),
help="Integer data type for output data, default: same as input",
)
@click.argument("src_path", type=click.Path(exists=True))
@click.argument("dst_path", type=click.Path(exists=False))
@click.argument("operations", nargs=-1, required=True)
@click.pass_context
@creation_options
def color(ctx, jobs, out_dtype, src_path, dst_path, operations, creation_options):
"""Color correction
Operations will be applied to the src image in the specified order.
Available OPERATIONS include:
\b
"gamma BANDS VALUE"
Applies a gamma curve, brightening or darkening midtones.
VALUE > 1 brightens the image.
\b
"sigmoidal BANDS CONTRAST BIAS"
Adjusts the contrast and brightness of midtones.
BIAS > 0.5 darkens the image.
\b
"saturation PROPORTION"
Controls the saturation in LCH color space.
PROPORTION = 0 results in a grayscale image
PROPORTION = 1 results in an identical image
PROPORTION = 2 is likely way too saturated
BANDS are specified as a single arg, no delimiters
\b
`123` or `RGB` or `rgb` are all equivalent
Example:
\b
rio color -d uint8 -j 4 input.tif output.tif \\
gamma 3 0.95, sigmoidal rgb 35 0.13
"""
with rasterio.open(src_path) as src:
opts = src.profile.copy()
windows = [(window, ij) for ij, window in src.block_windows()]
opts.update(**creation_options)
opts["transform"] = guard_transform(opts["transform"])
out_dtype = out_dtype if out_dtype else opts["dtype"]
opts["dtype"] = out_dtype
args = {"ops_string": " ".join(operations), "out_dtype": out_dtype}
# Just run this for validation this time
# parsing will be run again within the worker
# where its returned value will be used
try:
parse_operations(args["ops_string"])
except ValueError as e:
raise click.UsageError(str(e))
jobs = check_jobs(jobs)
if jobs > 1:
with riomucho.RioMucho(
[src_path],
dst_path,
color_worker,
windows=windows,
options=opts,
global_args=args,
mode="manual_read",
) as mucho:
mucho.run(jobs)
else:
with rasterio.open(dst_path, "w", **opts) as dest:
with rasterio.open(src_path) as src:
rasters = [src]
for window, ij in windows:
arr = color_worker(rasters, window, ij, args)
dest.write(arr, window=window)
dest.colorinterp = src.colorinterp
@click.command("atmos")
@click.option(
"--atmo",
"-a",
type=click.FLOAT,
default=0.03,
help="How much to dampen cool colors, thus cutting through "
"haze. 0..1 (0 is none), default: 0.03.",
)
@click.option(
"--contrast",
"-c",
type=click.FLOAT,
default=10,
help="Contrast factor to apply to the scene. -infinity..infinity"
"(0 is none), default: 10.",
)
@click.option(
"--bias",
"-b",
type=click.FLOAT,
default=0.15,
help="Skew (brighten/darken) the output. Lower values make it "
"brighter. 0..1 (0.5 is none), default: 0.15",
)
@click.option(
"--out-dtype",
"-d",
type=click.Choice(["uint8", "uint16"]),
help="Integer data type for output data, default: same as input",
)
@click.option(
"--as-color",
is_flag=True,
default=False,
help="Prints the equivalent rio color command to stdout."
"Does NOT run either command, SRC_PATH will not be created",
)
@click.argument("src_path", required=True)
@click.argument("dst_path", type=click.Path(exists=False))
@jobs_opt
@creation_options
@click.pass_context
def atmos(
ctx,
atmo,
contrast,
bias,
jobs,
out_dtype,
src_path,
dst_path,
creation_options,
as_color,
):
"""Atmospheric correction
"""
if as_color:
click.echo(
"rio color {} {} {}".format(
src_path, dst_path, simple_atmo_opstring(atmo, contrast, bias)
)
)
exit(0)
with rasterio.open(src_path) as src:
opts = src.profile.copy()
windows = [(window, ij) for ij, window in src.block_windows()]
opts.update(**creation_options)
opts["transform"] = guard_transform(opts["transform"])
out_dtype = out_dtype if out_dtype else opts["dtype"]
opts["dtype"] = out_dtype
args = {"atmo": atmo, "contrast": contrast, "bias": bias, "out_dtype": out_dtype}
jobs = check_jobs(jobs)
if jobs > 1:
with riomucho.RioMucho(
[src_path],
dst_path,
atmos_worker,
windows=windows,
options=opts,
global_args=args,
mode="manual_read",
) as mucho:
mucho.run(jobs)
else:
with rasterio.open(dst_path, "w", **opts) as dest:
with rasterio.open(src_path) as src:
rasters = [src]
for window, ij in windows:
arr = atmos_worker(rasters, window, ij, args)
dest.write(arr, window=window) | /rio_color-1.0.4-cp38-cp38-manylinux1_x86_64.whl/rio_color/scripts/cli.py | 0.703448 | 0.279833 | cli.py | pypi |
import os
import warnings
import click
import numpy
import rasterio
from rasterio.enums import ColorInterp, MaskFlags
from rasterio.enums import Resampling as ResamplingEnums
from rasterio.io import MemoryFile
from rasterio.rio import options
from rasterio.shutil import copy
def has_mask_band(src_dst):
"""Check for mask band in source."""
if any(
[
(MaskFlags.per_dataset in flags and MaskFlags.alpha not in flags)
for flags in src_dst.mask_flag_enums
]
):
return True
return False
@click.command()
@options.file_in_arg
@options.file_out_arg
@click.option(
"--value",
default=None,
type=float,
help="Set a custom value in the data.",
)
@click.option(
"--forward-band-tags",
default=False,
is_flag=True,
help="Forward band tags to output bands.",
)
@click.option(
"--forward-dataset-tags",
default=False,
is_flag=True,
help="Forward dataset tags to output image.",
)
@options.creation_options
@click.option(
"--config",
"config",
metavar="NAME=VALUE",
multiple=True,
callback=options._cb_key_val,
help="GDAL configuration options.",
)
def faux(
input,
output,
value,
forward_band_tags,
forward_dataset_tags,
creation_options,
config,
):
"""Create empty copy."""
# Check if the dataset has overviews
with rasterio.open(input) as src_dst:
ovr = src_dst.overviews(1)
# Get Overview Blocksize
overview_blocksize = 512
if ovr:
with rasterio.open(input, OVERVIEW_LEVEL=0) as src_dst:
overview_blocksize = src_dst.profile.get("blockxsize", overview_blocksize)
config.update(
{
"GDAL_NUM_THREADS": "ALL_CPUS",
"GDAL_TIFF_INTERNAL_MASK": os.environ.get("GDAL_TIFF_INTERNAL_MASK", True),
"GDAL_TIFF_OVR_BLOCKSIZE": str(overview_blocksize),
}
)
with rasterio.Env(**config):
with rasterio.open(input) as src_dst:
meta = src_dst.meta
with MemoryFile() as m:
with m.open(**meta) as tmp_dst:
tmp_dst.colorinterp = src_dst.colorinterp
if tmp_dst.colorinterp[0] is ColorInterp.palette:
try:
tmp_dst.write_colormap(1, src_dst.colormap(1))
except ValueError:
warnings.warn(
"Dataset has `Palette` color interpretation"
" but is missing colormap information"
)
if has_mask_band(src_dst):
tmp_dst.write_mask(src_dst.dataset_mask())
if value:
tmp_dst.write(
numpy.full(
(tmp_dst.count, tmp_dst.height, tmp_dst.width),
value,
dtype=tmp_dst.dtypes[0],
),
)
if ColorInterp.alpha in tmp_dst.colorinterp:
alpha_bidx = src_dst.colorinterp.index(ColorInterp.alpha) + 1
tmp_dst.write(
src_dst.read(indexes=alpha_bidx),
indexes=alpha_bidx,
)
tags = src_dst.tags()
overview_resampling = tags.get(
"OVR_RESAMPLING_ALG", "nearest"
).lower()
if ovr:
tmp_dst.build_overviews(
ovr, ResamplingEnums[overview_resampling]
)
indexes = src_dst.indexes
if forward_band_tags:
for i, b in enumerate(indexes):
tmp_dst.set_band_description(
i + 1, src_dst.descriptions[b - 1]
)
tmp_dst.update_tags(i + 1, **src_dst.tags(b))
if forward_dataset_tags:
tmp_dst.update_tags(**tags)
tmp_dst._set_all_scales([src_dst.scales[b - 1] for b in indexes])
tmp_dst._set_all_offsets([src_dst.offsets[b - 1] for b in indexes])
output_profile = src_dst.profile
output_profile.update(
{"BIGTIFF": os.environ.get("BIGTIFF", "IF_SAFER")}
)
if creation_options:
output_profile.update(creation_options)
keys = [
"dtype",
"nodata",
"width",
"height",
"count",
"crs",
"transform",
]
for key in keys:
output_profile.pop(key, None)
copy(tmp_dst, output, copy_src_overviews=True, **output_profile) | /rio_faux-0.2.1-py3-none-any.whl/rio_faux/cli.py | 0.578805 | 0.249533 | cli.py | pypi |
import math
import mercantile
import rasterio
from rasterio.warp import transform_bounds, calculate_default_transform
from rio_tiler.utils import tile_read
def _meters_per_pixel(zoom, lat):
return (math.cos(lat * math.pi / 180.0) * 2 * math.pi * 6378137) / (256 * 2 ** zoom)
class RasterTiles(object):
"""
Raster tiles object.
Attributes
----------
src_path : str or PathLike object
A dataset path or URL. Will be opened in "r" mode.
indexes : tuple, int, optional
Raster band indexes to read.
tiles_size: int, optional (default: 512)
X/Y tile size to return.
nodata: int, optional
nodata value for mask creation.
Methods
-------
get_bounds()
Get raster bounds (WGS84).
get_center()
Get raster lon/lat center coordinates.
tile_exists(z, x, y)
Check if a mercator tile is within raster bounds.
get_max_zoom(snap=0.5, max_z=23)
Calculate raster max zoom level.
get_min_zoom(snap=0.5, max_z=23)
Calculate raster min zoom level.
read_tile( z, x, y)
Read raster tile data and mask.
"""
def __init__(self, src_path, indexes=None, tiles_size=512, nodata=None):
"""Initialize RasterTiles object."""
self.path = src_path
self.tiles_size = tiles_size
with rasterio.open(src_path) as src:
try:
assert src.driver == "GTiff"
assert src.is_tiled
assert src.overviews(1)
except (AttributeError, AssertionError, KeyError):
raise Exception(
"{} is not a valid CloudOptimized Geotiff".format(src_path)
)
self.bounds = list(
transform_bounds(
*[src.crs, "epsg:4326"] + list(src.bounds), densify_pts=0
)
)
self.indexes = indexes if indexes is not None else src.indexes
self.nodata = nodata if nodata is not None else src.nodata
self.crs = src.crs
self.crs_bounds = src.bounds
self.meta = src.meta
self.overiew_levels = src.overviews(1)
def get_bounds(self):
"""Get raster bounds (WGS84)."""
return self.bounds
def get_center(self):
"""Get raster lon/lat center coordinates."""
lat = (self.bounds[3] - self.bounds[1]) / 2 + self.bounds[1]
lng = (self.bounds[2] - self.bounds[0]) / 2 + self.bounds[0]
return [lng, lat]
def tile_exists(self, z, x, y):
"""Check if a mercator tile is within raster bounds."""
mintile = mercantile.tile(self.bounds[0], self.bounds[3], z)
maxtile = mercantile.tile(self.bounds[2], self.bounds[1], z)
return (
(x <= maxtile.x + 1)
and (x >= mintile.x)
and (y <= maxtile.y + 1)
and (y >= mintile.y)
)
def get_max_zoom(self, snap=0.5, max_z=23):
"""Calculate raster max zoom level."""
dst_affine, w, h = calculate_default_transform(
self.crs,
"epsg:3857",
self.meta["width"],
self.meta["height"],
*self.crs_bounds
)
res_max = max(abs(dst_affine[0]), abs(dst_affine[4]))
tgt_z = max_z
mpp = 0.0
# loop through the pyramid to file the closest z level
for z in range(1, max_z):
mpp = _meters_per_pixel(z, 0)
if (mpp - ((mpp / 2) * snap)) < res_max:
tgt_z = z
break
return tgt_z
def get_min_zoom(self, snap=0.5, max_z=23):
"""Calculate raster min zoom level."""
dst_affine, w, h = calculate_default_transform(
self.crs,
"epsg:3857",
self.meta["width"],
self.meta["height"],
*self.crs_bounds
)
res_max = max(abs(dst_affine[0]), abs(dst_affine[4]))
max_decim = self.overiew_levels[-1]
resolution = max_decim * res_max
tgt_z = 0
mpp = 0.0
# loop through the pyramid to file the closest z level
for z in list(range(0, 24))[::-1]:
mpp = _meters_per_pixel(z, 0)
tgt_z = z
if (mpp - ((mpp / 2) * snap)) > resolution:
break
return tgt_z
def read_tile(self, z, x, y):
"""Read raster tile data and mask."""
mercator_tile = mercantile.Tile(x=x, y=y, z=z)
tile_bounds = mercantile.xy_bounds(mercator_tile)
return tile_read(
self.path,
tile_bounds,
self.tiles_size,
indexes=self.indexes,
nodata=self.nodata,
) | /rio_glui-1.0.6-py3-none-any.whl/rio_glui/raster.py | 0.804291 | 0.511778 | raster.py | pypi |
import os
import click
import numpy
from rio_glui.raster import RasterTiles
from rio_glui import server
class MbxTokenType(click.ParamType):
"""Mapbox token type."""
name = "token"
def convert(self, value, param, ctx):
"""Validate token."""
try:
if not value:
return ""
assert value.startswith("pk")
return value
except (AttributeError, AssertionError):
raise click.ClickException(
"Mapbox access token must be public (pk). "
"Please sign up at https://www.mapbox.com/signup/ to get a public token. "
"If you already have an account, you can retreive your "
"token at https://www.mapbox.com/account/."
)
class BdxParamType(click.ParamType):
"""Band inddex type."""
name = "bidx"
def convert(self, value, param, ctx):
"""Validate and parse band index."""
try:
bands = [int(x) for x in value.split(",")]
assert len(bands) in [1, 3]
assert all(b > 0 for b in bands)
return bands
except (ValueError, AttributeError, AssertionError):
raise click.ClickException(
"bidx must be a string with 1 or 3 ints comma-separated, "
"representing the band indexes for R,G,B"
)
class NodataParamType(click.ParamType):
"""Nodata inddex type."""
name = "nodata"
def convert(self, value, param, ctx):
"""Validate and parse band index."""
try:
if value.lower() == "nan":
return numpy.nan
elif value.lower() in ["nil", "none", "nada"]:
return None
else:
return float(value)
except (TypeError, ValueError):
raise click.ClickException("{} is not a valid nodata value.".format(value))
@click.command()
@click.argument("path", type=str)
@click.option("--bidx", "-b", type=BdxParamType(), help="Raster band index")
@click.option(
"--scale",
type=int,
multiple=True,
nargs=2,
help="Min and Max data bounds to rescale data from. "
"Form multiband you can either provide use '--scale 0 1000' or "
"'--scale 0 1000 --scale 0 500 --scale 0 1500'",
)
@click.option(
"--colormap",
type=click.Choice(["cfastie", "schwarzwald"]),
help=" Rio-tiler compatible colormap name",
)
@click.option(
"--tiles-format",
type=click.Choice(["png", "jpg", "webp"]),
default="png",
help="Tile image format (default: png)",
)
@click.option(
"--tiles-dimensions",
type=int,
default=512,
help="Dimension of images being served (default: 512)",
)
@click.option(
"--nodata",
type=NodataParamType(),
metavar="NUMBER|nan",
help="Set nodata masking values for input dataset.",
)
@click.option(
"--gl-tile-size",
type=int,
help="mapbox-gl tileSize (default is the same as `tiles-dimensions`)",
)
@click.option("--port", type=int, default=8080, help="Webserver port (default: 8080)")
@click.option("--playground", is_flag=True, help="Launch playground app")
@click.option(
"--mapbox-token",
type=MbxTokenType(),
metavar="TOKEN",
default=lambda: os.environ.get("MAPBOX_ACCESS_TOKEN", ""),
help="Pass Mapbox token",
)
def glui(
path,
bidx,
scale,
colormap,
tiles_format,
tiles_dimensions,
nodata,
gl_tile_size,
port,
playground,
mapbox_token,
):
"""Rasterio glui cli."""
if scale and len(scale) not in [1, 3]:
raise click.ClickException("Invalid number of scale values")
raster = RasterTiles(path, indexes=bidx, tiles_size=tiles_dimensions, nodata=nodata)
app = server.TileServer(
raster,
scale=scale,
colormap=colormap,
tiles_format=tiles_format,
gl_tiles_size=gl_tile_size,
gl_tiles_minzoom=raster.get_min_zoom(),
gl_tiles_maxzoom=raster.get_max_zoom(),
port=port,
)
if playground:
url = app.get_playground_url()
else:
url = app.get_template_url()
if mapbox_token:
url = "{}?access_token={}".format(url, mapbox_token)
click.launch(url)
click.echo("Inspecting {} at {}".format(path, url), err=True)
app.start() | /rio_glui-1.0.6-py3-none-any.whl/rio_glui/scripts/cli.py | 0.603581 | 0.449997 | cli.py | pypi |
from __future__ import division, absolute_import
import logging
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
from .utils import raster_to_image
logger = logging.getLogger(__name__)
def make_plot(source, reference, target,
src_arr, ref_arr, tar_arr,
output, bands):
""" Create a diagnostic plot showing source, reference and matched image
and the cumulative distribution functions for each band
"""
rcParams['figure.figsize'] = 16, 14
logger.debug("reading images")
i1 = raster_to_image(source)
i2 = raster_to_image(reference)
i3 = raster_to_image(target)
f, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots(3, 3)
logger.debug("showing images")
ax1.imshow(i1)
ax1.set_title('Source')
ax1.set_xticklabels([])
ax1.set_yticklabels([])
ax2.imshow(i2)
ax2.set_title('Reference')
ax2.set_xticklabels([])
ax2.set_yticklabels([])
ax3.imshow(i3)
ax3.set_title('Matched to ' + ', '.join(str(b) for _, b in bands))
ax3.set_xticklabels([])
ax3.set_yticklabels([])
logger.debug("RGB histograms")
axes = (ax4, ax5, ax6)
imgs = (i1, i2, i3)
titles = ('Source', 'Reference', 'Output')
bins = 32
for i, axis in enumerate(axes):
im = imgs[i]
title = titles[i]
# compressed for masked arrays, ravel for ndarray
red, _ = np.histogram(im[:, :, 0].compressed(), bins, [0, 1])
green, _ = np.histogram(im[:, :, 1].compressed(), bins, [0, 1])
blue, _ = np.histogram(im[:, :, 2].compressed(), bins, [0, 1])
for color, name in ((red, "red"), (green, "green"), (blue, "blue")):
norm = color / im.size
# axis.plot(norm, color=name, lw=2)
axis.fill_between([float(x) / bins for x in range(bins)],
norm, facecolor=name, alpha=0.15)
axis.set_title("{} RGB histogram".format(title))
# axis.set_yticklabels([])
axis.grid('on')
logger.debug("CDF match plots")
axes = (ax7, ax8, ax9)
for b, band in bands:
ax = axes[b]
source_band = src_arr[b]
reference_band = ref_arr[b]
target_band = tar_arr[b]
try:
source_band = source_band.compressed()
except:
pass
try:
reference_band = reference_band.compressed()
except:
pass
try:
target_band = target_band.compressed()
except:
pass
sv, sc = np.unique(source_band, return_counts=True)
rv, rc = np.unique(reference_band, return_counts=True)
tv, tc = np.unique(target_band, return_counts=True)
scdf = np.cumsum(sc).astype(np.float64) / source_band.size
rcdf = np.cumsum(rc).astype(np.float64) / reference_band.size
tcdf = np.cumsum(tc).astype(np.float64) / target_band.size
ax.set_title("{} cumulative distribution".format(band))
ax.plot(sv, scdf, label="Source")
ax.plot(rv, rcdf, label="Reference")
ax.plot(tv, tcdf, '--r', lw=2, label="Match")
if b == 1:
ax.legend(loc=9, bbox_to_anchor=(0.5, -0.05))
# ax.set_yticklabels([])
ax.grid('on')
plt.savefig(output, bbox_inches='tight') | /rio-hist-1.0b1.tar.gz/rio-hist-1.0b1/rio_hist/plot.py | 0.712532 | 0.525856 | plot.py | pypi |
from __future__ import division, absolute_import
import warnings
import numpy as np
import rasterio
from rasterio.enums import ColorInterp, MaskFlags
from rio_color.colorspace import convert_arr, ColorSpace
def reshape_as_image(arr):
"""raster order (bands, rows, cols) -> image (rows, cols, bands)
TODO Use rasterio.plot.reshape_as_image in rasterio 0.36?
"""
return np.swapaxes(np.swapaxes(arr, 0, 2), 0, 1)
def reshape_as_raster(arr):
"""image order (rows, cols, bands) -> rasterio (bands, rows, cols)
TODO Use rasterio.plot.reshape_as_image in rasterio 0.36?
"""
return np.swapaxes(np.swapaxes(arr, 2, 0), 2, 1)
def cs_forward(arr, cs='rgb'):
""" RGB (any dtype) to whatevs
"""
arrnorm_raw = arr.astype('float64') / np.iinfo(arr.dtype).max
arrnorm = arrnorm_raw[0:3]
cs = cs.lower()
if cs == 'rgb':
return arrnorm
elif cs == 'lch':
return convert_arr(arrnorm,
src=ColorSpace.rgb,
dst=ColorSpace.lch)
elif cs == 'lab':
return convert_arr(arrnorm,
src=ColorSpace.rgb,
dst=ColorSpace.lab)
elif cs == 'luv':
return convert_arr(arrnorm,
src=ColorSpace.rgb,
dst=ColorSpace.luv)
elif cs == 'xyz':
return convert_arr(arrnorm,
src=ColorSpace.rgb,
dst=ColorSpace.xyz)
def cs_backward(arr, cs='rgb'):
""" whatevs to RGB 8-bit
"""
cs = cs.lower()
if cs == 'rgb':
return (arr * 255).astype('uint8')
elif cs == 'lch':
rgb = convert_arr(arr,
src=ColorSpace.lch,
dst=ColorSpace.rgb)
return (rgb * 255).astype('uint8')
elif cs == 'lab':
rgb = convert_arr(arr,
src=ColorSpace.lab,
dst=ColorSpace.rgb)
return (rgb * 255).astype('uint8')
elif cs == 'luv':
rgb = convert_arr(arr,
src=ColorSpace.luv,
dst=ColorSpace.rgb)
return (rgb * 255).astype('uint8')
elif cs == 'xyz':
rgb = convert_arr(arr,
src=ColorSpace.xyz,
dst=ColorSpace.rgb)
return (rgb * 255).astype('uint8')
def raster_to_image(raster):
"""Make an image-ordered 8bit 3-band array
from a rasterio source
"""
with rasterio.open(raster) as src:
arr = src.read(masked=True)
return reshape_as_image(cs_forward(arr, 'RGB'))
def read_mask(dataset):
"""Get the dataset's mask
Returns
-------
numpy.array
Notes
-----
This function is no longer called by module code but we're going to
continue to test it for a few future versions as insurance on the new
implementation.
"""
return dataset.dataset_mask() | /rio-hist-1.0b1.tar.gz/rio-hist-1.0b1/rio_hist/utils.py | 0.557845 | 0.461988 | utils.py | pypi |
from __future__ import division, absolute_import
import logging
import os
import numpy as np
import rasterio
from rasterio.transform import guard_transform
from .utils import cs_forward, cs_backward
logger = logging.getLogger(__name__)
def histogram_match(source, reference, match_proportion=1.0):
"""
Adjust the values of a source array
so that its histogram matches that of a reference array
Parameters:
-----------
source: np.ndarray
reference: np.ndarray
match_proportion: float, range 0..1
Returns:
-----------
target: np.ndarray
The output array with the same shape as source
but adjusted so that its histogram matches the reference
"""
orig_shape = source.shape
source = source.ravel()
if np.ma.is_masked(reference):
logger.debug("ref is masked, compressing")
reference = reference.compressed()
else:
logger.debug("ref is unmasked, raveling")
reference = reference.ravel()
# get the set of unique pixel values
# and their corresponding indices and counts
logger.debug("Get unique pixel values")
s_values, s_idx, s_counts = np.unique(
source, return_inverse=True, return_counts=True)
r_values, r_counts = np.unique(reference, return_counts=True)
s_size = source.size
if np.ma.is_masked(source):
logger.debug("source is masked; get mask_index and remove masked values")
mask_index = np.ma.where(s_values.mask)
s_size = np.ma.where(s_idx != mask_index[0])[0].size
s_values = s_values.compressed()
s_counts = np.delete(s_counts, mask_index)
# take the cumsum of the counts; empirical cumulative distribution
logger.debug("calculate cumulative distribution")
s_quantiles = np.cumsum(s_counts).astype(np.float64) / s_size
r_quantiles = np.cumsum(r_counts).astype(np.float64) / reference.size
# find values in the reference corresponding to the quantiles in the source
logger.debug("interpolate values from source to reference by cdf")
interp_r_values = np.interp(s_quantiles, r_quantiles, r_values)
if np.ma.is_masked(source):
logger.debug("source is masked, add fill_value back at mask_index")
interp_r_values = np.insert(interp_r_values, mask_index[0], source.fill_value)
# using the inverted source indicies, pull out the interpolated pixel values
logger.debug("create target array from interpolated values by index")
target = interp_r_values[s_idx]
# interpolation b/t target and source
# 1.0 = full histogram match
# 0.0 = no change
if match_proportion is not None and match_proportion != 1:
diff = source - target
target = source - (diff * match_proportion)
if np.ma.is_masked(source):
logger.debug("source is masked, remask those pixels by position index")
target = np.ma.masked_where(s_idx == mask_index[0], target)
target.fill_value = source.fill_value
return target.reshape(orig_shape)
def calculate_mask(src, arr):
msk = arr.mask
if msk.sum() == 0:
mask = None
fill = None
else:
_gdal_mask = src.dataset_mask()
mask = np.invert((_gdal_mask / 255).astype('bool'))
fill = arr.fill_value
return mask, fill
def hist_match_worker(src_path, ref_path, dst_path, match_proportion,
creation_options, bands, color_space, plot):
"""Match histogram of src to ref, outputing to dst
optionally output a plot to <dst>_plot.png
"""
logger.info("Matching {} to histogram of {} using {} color space".format(
os.path.basename(src_path), os.path.basename(ref_path), color_space))
with rasterio.open(src_path) as src:
profile = src.profile.copy()
src_arr = src.read(masked=True)
src_mask, src_fill = calculate_mask(src, src_arr)
src_arr = src_arr.filled()
with rasterio.open(ref_path) as ref:
ref_arr = ref.read(masked=True)
ref_mask, ref_fill = calculate_mask(ref, ref_arr)
ref_arr = ref_arr.filled()
src = cs_forward(src_arr, color_space)
ref = cs_forward(ref_arr, color_space)
bixs = tuple([int(x) - 1 for x in bands.split(',')])
band_names = [color_space[x] for x in bixs] # assume 1 letter per band
target = src.copy()
for i, b in enumerate(bixs):
logger.debug("Processing band {}".format(b))
src_band = src[b]
ref_band = ref[b]
# Re-apply 2D mask to each band
if src_mask is not None:
logger.debug("apply src_mask to band {}".format(b))
src_band = np.ma.asarray(src_band)
src_band.mask = src_mask
src_band.fill_value = src_fill
if ref_mask is not None:
logger.debug("apply ref_mask to band {}".format(b))
ref_band = np.ma.asarray(ref_band)
ref_band.mask = ref_mask
ref_band.fill_value = ref_fill
target[b] = histogram_match(src_band, ref_band, match_proportion)
target_rgb = cs_backward(target, color_space)
# re-apply src_mask to target_rgb and write ndv
if src_mask is not None:
logger.debug("apply src_mask to target_rgb")
if not np.ma.is_masked(target_rgb):
target_rgb = np.ma.asarray(target_rgb)
target_rgb.mask = np.array((src_mask, src_mask, src_mask))
target_rgb.fill_value = src_fill
profile['count'] = 4
else:
profile['count'] = 3
profile['dtype'] = 'uint8'
profile['nodata'] = None
profile['transform'] = guard_transform(profile['transform'])
profile.update(creation_options)
logger.info("Writing raster {}".format(dst_path))
with rasterio.open(dst_path, 'w', **profile) as dst:
dst.write(target_rgb[0], 1)
dst.write(target_rgb[1], 2)
dst.write(target_rgb[2], 3)
if src_mask is not None:
gdal_mask = (np.invert(src_mask) * 255).astype('uint8')
dst.write(gdal_mask, 4)
if plot:
from .plot import make_plot
outplot = os.path.splitext(dst_path)[0] + "_plot.png"
logger.info("Writing figure to {}".format(outplot))
make_plot(
src_path, ref_path, dst_path,
src, ref, target,
output=outplot,
bands=tuple(zip(bixs, band_names))) | /rio-hist-1.0b1.tar.gz/rio-hist-1.0b1/rio_hist/match.py | 0.838779 | 0.549943 | match.py | pypi |
import rasterio as rio
import numpy as np
import click
import json
from shapely.geometry import Polygon, LineString
__version__ = '1.1.1'
def offset_rad_poly(lngs, lats, valRow, bounds, scaling_factor):
return np.concatenate([
np.dstack([lngs, lats + valRow * scaling_factor])[0],
[[bounds.right, lats[-1]]],
[[bounds.right, bounds.bottom]],
[[bounds.left, bounds.bottom]],
[[bounds.left, lats[0]]]
])
def offset_rad_line(lngs, lats, valRow, bounds, scaling_factor):
return np.concatenate([
np.dstack([lngs, lats + valRow * scaling_factor])[0],
[[bounds.right, lats[-1]]]
])
def make_point_grid(rows, cols, bounds):
return np.array(
[
np.arange(bounds.left, bounds.right, ((bounds.right - bounds.left) / float(cols))) for i in range(rows)
]), np.rot90(np.array([
np.arange(bounds.bottom, bounds.top, ((bounds.top - bounds.bottom) / float(rows))) for i in range(cols)
])
)
def joydivision(inputfile, row_interval, col_interval, scaling_factor, nodata_set, bidx, gtype):
with rio.open(inputfile) as src:
bounds = src.bounds
rasVals = np.zeros((
int(src.height / float(row_interval)),
int(src.width / float(col_interval))
), dtype=src.meta['dtype'])
src.read(bidx, out=rasVals)
cellsize = src.affine.a
if nodata_set:
rasVals[np.where(rasVals == src.nodata)] = nodata_set
rows, cols = rasVals.shape
lngs, lats, = make_point_grid(rows, cols, bounds)
for r in xrange(rows):
xy = offset_rad_line(lngs[r], lats[r], rasVals[r], bounds, scaling_factor)
if gtype == 'LineString':
polygon = LineString(xy.tolist())
polygon = polygon.simplify(cellsize)
click.echo(json.dumps({
"type": "Feature",
"properties": {
'row': r
},
"geometry": {
"type": "LineString",
"coordinates": [xy for xy in polygon.coords]
}
}))
else:
polygon = Polygon(xy.tolist())
polygon = polygon.simplify(cellsize)
click.echo(json.dumps({
"type": "Feature",
"properties": {
'row': r
},
"geometry": {
"type": "Polygon",
"coordinates": [[xy for xy in polygon.exterior.coords]]
}
}))
if __name__ == '__main__':
joydivision() | /rio-joydivision-0.0.1.tar.gz/rio-joydivision-0.0.1/joydivision/__init__.py | 0.47317 | 0.355327 | __init__.py | pypi |
import os
import numpy as np
import rasterio
from rasterio.transform import guard_transform
def _capture_bits(arr, b1, b2):
width_int = int((b1 - b2 + 1) * "1", 2)
return ((arr >> b2) & width_int).astype('uint8')
def fill_qa(arr):
"""
0 = No, this condition does not exist
1 = Yes, this condition exists
"""
return _capture_bits(arr, 0, 0)
def terrain_qa(arr):
"""
0 = No, this condition does not exist
1 = Yes, this condition exists
"""
return _capture_bits(arr, 1, 1)
def radiometric_qa(arr):
"""
For radiometric saturation bits (2-3), read from left to right
represent how many bands contain saturation:
00 - No bands contain saturation
01 - 1-2 bands contain saturation
10 - 3-4 bands contain saturation
11 - 5 or more bands contain saturation
"""
return _capture_bits(arr, 3, 2)
def cloud(arr):
"""
0 = No, this condition does not exist
1 = Yes, this condition exists
"""
return _capture_bits(arr, 4, 4)
def cloud_confidence(arr):
"""
00 = "Not Determined" = Algorithm did not determine the status of this condition
01 = "No" = Algorithm has low to no confidence that this condition exists (0-33 percent confidence)
10 = "Maybe" = Algorithm has medium confidence that this condition exists (34-66 percent confidence)
11 = "Yes" = Algorithm has high confidence that this condition exists (67-100 percent confidence
"""
return _capture_bits(arr, 6, 5)
def cloud_shadow_confidence(arr):
"""
00 = "Not Determined" = Algorithm did not determine the status of this condition
01 = "No" = Algorithm has low to no confidence that this condition exists (0-33 percent confidence)
10 = "Maybe" = Algorithm has medium confidence that this condition exists (34-66 percent confidence)
11 = "Yes" = Algorithm has high confidence that this condition exists (67-100 percent confidence
"""
return _capture_bits(arr, 8, 7)
def snow_ice_confidence(arr):
"""
00 = "Not Determined" = Algorithm did not determine the status of this condition
01 = "No" = Algorithm has low to no confidence that this condition exists (0-33 percent confidence)
10 = "Maybe" = Algorithm has medium confidence that this condition exists (34-66 percent confidence)
11 = "Yes" = Algorithm has high confidence that this condition exists (67-100 percent confidence
"""
return _capture_bits(arr, 10, 9)
def cirrus_confidence(arr):
"""
00 = "Not Determined" = Algorithm did not determine the status of this condition
01 = "No" = Algorithm has low to no confidence that this condition exists (0-33 percent confidence)
10 = "Maybe" = Algorithm has medium confidence that this condition exists (34-66 percent confidence)
11 = "Yes" = Algorithm has high confidence that this condition exists (67-100 percent confidence
"""
return _capture_bits(arr, 12, 11)
qa_vars = {
'fill': fill_qa,
'terrain': terrain_qa,
'radiometricSaturation': radiometric_qa,
'cloud': cloud,
'cloudConf': cloud_confidence,
'cirrusConf': cirrus_confidence,
'cloudShadowConf': cloud_shadow_confidence,
'snowIceConf': snow_ice_confidence,
}
binary_vars = ('terrain', 'cloud', 'fill')
def lookup(name, val):
if name in binary_vars:
if val == 0:
return "no"
return "yes"
else:
if val == 0:
return "notDetermined"
elif val == 1:
return "no"
elif val == 2:
return "maybe"
elif val == 3:
return "yes"
def write_cloud_mask(arr, profile, cloudmask, threshold=2):
"""
writes the cloud+alpha mask as single-band uint8 tiff
suitable for stacking as an alpha band
threshold defaults to 2; only 2 and above are considered clouds
"""
func = qa_vars['cloud']
data = func(arr)
profile.update(dtype='uint8')
profile.update(transform=guard_transform(profile['transform']))
with rasterio.open(cloudmask, 'w', **profile) as dest:
# clouds = (data >= threshold)
# nodata = (data == 0)
# yesdata = ((clouds + nodata) == 0)
data = (data * 255).astype('uint8')
dest.write(data, 1)
def summary_stats(arr, basename=None, outdir=None, profile=None, cloudmask=None):
"""Returns summary stats for QA variables
Input is a 16bit 2D array from a Landasat 8 band
Optional side effects:
write QA variables as uint8 tifs to outdir
write binary clouds as uint8 0/255 to cloudmask
"""
stats = {}
size = arr.size
for name, func in qa_vars.items():
data = func(arr)
u, counts = np.unique(data, return_counts=True)
u = [lookup(name, x) for x in u]
counts = [round(x / float(size), 6) for x in counts]
stats[name] = dict(zip(u, counts))
# Optionally write the band to outdir as a uint8 tif
if outdir and basename and profile:
profile.update(dtype='uint8')
if not os.path.exists(outdir):
os.makedirs(outdir)
outpath = os.path.join(outdir, basename.replace('BQA', name))
with rasterio.open(outpath, 'w', **profile) as dest:
dest.write_band(1, data)
return stats | /rio-l8qa-0.1.1.tar.gz/rio-l8qa-0.1.1/l8qa/qa.py | 0.725551 | 0.491029 | qa.py | pypi |
import os
import numpy as np
import rasterio
from rasterio.transform import guard_transform
def _capture_bits(arr, b1, b2):
width_int = int((b1 - b2 + 1) * "1", 2)
return ((arr >> b2) & width_int).astype('uint8')
# 0 = not determined
# 1 = no
# 2 = maybe
# 3 = yes
def cloud_qa(arr):
return _capture_bits(arr, 15, 14)
def cirrus_qa(arr):
return _capture_bits(arr, 13, 12)
def snow_ice_qa(arr):
return _capture_bits(arr, 11, 10)
def cloud_shadow_qa(arr):
return _capture_bits(arr, 7, 6)
def water_qa(arr):
return _capture_bits(arr, 5, 4)
# 1 bit qa bands: 0 = no, 1=yes
def terrain_qa(arr):
return _capture_bits(arr, 2, 2)
def dropped_frame_qa(arr):
return _capture_bits(arr, 1, 1)
def fill_qa(arr):
return _capture_bits(arr, 0, 0)
qa_vars = {
'clouds': cloud_qa,
'cirrus': cirrus_qa,
'cloudShadow': cloud_shadow_qa,
'water': water_qa,
'snowIce': snow_ice_qa,
'terrain': terrain_qa,
'droppedFrame': dropped_frame_qa,
'fill': fill_qa
}
binary_vars = ('terrain', 'droppedFrame', 'fill')
def lookup(name, val):
if name in binary_vars:
if val == 0:
return "no"
return "yes"
else:
if val == 0:
return "notDetermined"
elif val == 1:
return "no"
elif val == 2:
return "maybe"
elif val == 3:
return "yes"
def write_cloud_mask(arr, profile, cloudmask, threshold=2):
"""
writes the cloud+alpha mask as single-band uint8 tiff
suitable for stacking as an alpha band
threshold defaults to 2; only 2 and above are considered clouds
"""
func = qa_vars['clouds']
data = func(arr)
profile.update(dtype='uint8')
profile.update(transform=guard_transform(profile['transform']))
with rasterio.open(cloudmask, 'w', **profile) as dest:
clouds = (data >= threshold)
nodata = (data == 0)
yesdata = ((clouds + nodata) == 0)
data = (yesdata * 255).astype('uint8')
dest.write(data, 1)
def summary_stats(arr, basename=None, outdir=None, profile=None, cloudmask=None):
"""Returns summary stats for QA variables
Input is a 16bit 2D array from a Landasat 8 band
Optional side effects:
write QA variables as uint8 tifs to outdir
write binary clouds as uint8 0/255 to cloudmask
"""
stats = {}
size = arr.size
for name, func in qa_vars.items():
data = func(arr)
u, counts = np.unique(data, return_counts=True)
u = [lookup(name, x) for x in u]
counts = [round(x / float(size), 6) for x in counts]
stats[name] = dict(zip(u, counts))
# Optionally write the band to outdir as a uint8 tif
if outdir and basename and profile:
profile.update(dtype='uint8')
if not os.path.exists(outdir):
os.makedirs(outdir)
outpath = os.path.join(outdir, basename.replace('BQA', name))
with rasterio.open(outpath, 'w', **profile) as dest:
dest.write_band(1, data)
return stats | /rio-l8qa-0.1.1.tar.gz/rio-l8qa-0.1.1/l8qa/qa_pre.py | 0.582729 | 0.29549 | qa_pre.py | pypi |
# rio merge-rgba
[](https://travis-ci.org/mapbox/rio-merge-rgba.svg)
A `rio merge` alternative optimized for large RGBA tifs
`rio merge-rgba` is a CLI with nearly identical arguments to `rio merge`. They accomplish the same task, merging many rasters into one.
```
$ rio merge-rgba --help
Usage: rio merge-rgba [OPTIONS] INPUTS... OUTPUT
Options:
-o, --output PATH Path to output file (optional alternative to a
positional arg for some commands).
--bounds FLOAT... Output bounds: left bottom right top.
-r, --res FLOAT Output dataset resolution in units of
coordinate reference system. Pixels assumed to
be square if this option is used once,
otherwise use: --res pixel_width --res
pixel_height
-f, --force-overwrite Do not prompt for confirmation before
overwriting output file
--precision INTEGER Number of decimal places of precision in
alignment of pixels
--co NAME=VALUE Driver specific creation options.See the
documentation for the selected output driver
for more information.
--help Show this message and exit.
```
The differences are in the implementation, `rio merge-rgba`:
1. only accepts 4-band RGBA rasters
2. writes the destination data to disk rather than an in-memory array
3. reads/writes in windows corresponding to the destination block layout
4. once a window is filled with data values, the rest of the source files are skipped for that window
### Why windowed and why write to disk?
Memory efficiency. You'll never load more than `2 * blockxsize * blockysize` pixels into numpy arrays at one time, assuming garbage collection is infallible and there are no memory leaks.
While this does mean reading and writing to disk more frequently, having spatially aligned data with identical block layouts (like scenetifs) can make that as efficient as possible. Also...
### Why only RGBA?
`rio merge` is more flexible with regard to nodata. It relies on reads with `masked=True` to handle that logic across all cases.
By contrast, `rio merge-rgba` requires RGBA images because, by reading with `masked=False` and using the alpha band as the sole source of nodata-ness, we get huge speedups over the rio merge approach. Roughly 40x faster for my test cases. The exact reasons behind the discrepency is TBD but since we're reading/writing more intensively with the windowed approach, we need to keep IO as efficient as possible.
### Why not improve rio merge
I tried but the speed advantage comes from avoiding masked reads. Once we improve the efficiency of masked reads or invent another mechanism for handling nodata masks that is more efficient, we can pull the windowed approach [back into rasterio](https://github.com/mapbox/rasterio/issues/507)
### Benchmarks
Very promising. On the Landsat scenes, 23 rasters in total. I created reduced resolution versions of each in order to test the performance charachteristics as sizes increase.
<table class="dataframe" border="1">
<thead>
<tr>
<th>resolution</th>
<th>raster size</th>
<th>rio merge Memory (MB)</th>
<th>merge_rgba Memory (MB)</th>
<th>rio merge Time (s)</th>
<th>merge_rgba Time (s)</th>
</tr>
</thead>
<tbody>
<tr>
<th>300</th>
<th>1044x1044</th>
<td>135</td>
<td>83</td>
<td>1.10</td>
<td>0.70</td>
</tr>
<tr>
<th>150</th>
<th>2088x2088</th>
<td>220</td>
<td>107</td>
<td>3.20</td>
<td>1.90</td>
</tr>
<tr>
<th>90</th>
<th>3479x3479</th>
<td>412</td>
<td>115</td>
<td>8.85</td>
<td>3.10</td>
</tr>
<tr>
<th>60</th>
<th>5219x5219</th>
<td>750</td>
<td>121</td>
<td>25.00</td>
<td>7.00</td>
</tr>
<tr>
<th>30</th>
<th>10436x10436</th>
<td><i>1GB+ crashed</i></td>
<td>145</td>
<td><i>crashed at 38 minutes</i></td>
<td>19.80</td>
</tr>
</tbody>
</table>
Note that the "merge_aligned" refered to in the charts is the same command, just renamed:


### Note about pixel alignment
Since the inclusion of the [full cover window](https://github.com/mapbox/rasterio/pull/466) in rasterio, there is a possibility of including an additional bottom row or right column if the bounds of the destination are not directly aligned with the source.
In rio merge, which reads the entire raster at once, this can manifest itself as one additional row and column on the bottom right edge of the image. The image within remains consistent.
With `merge_rgba.py`, if we used the default full cover window, errors may appear within the image at block window boundaries where e.g. a 257x257 window is read into a 256x256 destination. To avoid this, we effectively embed a reimplementation of rasterio's `get_window` using the `round` operator which improves our chances that the pixel boundaries are snapped to appropriate bounds.
You may see small differences between rio merge and merge_rgba as a result but they *should* be limited to the single bottom row and right-most column.
### Note about resampling
Neither `rio merge` nor `rio merge-rgba` allow for bilinear resampling. The "resampling" is done effectively with a crude nearest neighbor via `np.copyto`. This means that up to 1/2 cell pixel shifts can occur if inputs are misaligned.
| /rio-merge-rgba-0.4.0.tar.gz/rio-merge-rgba-0.4.0/README.md | 0.527803 | 0.917857 | README.md | pypi |
from __future__ import with_statement
from functools import wraps
from multiprocessing import Pool
import sys
import traceback
import rasterio
from rasterio.transform import guard_transform
from riomucho import utils
from riomucho.single_process_pool import MockTub
global_args = None
srcs = None
class MuchoChildError(Exception):
"""A wrapper for exceptions in a child process.
See https://bugs.python.org/issue13831
"""
def __init__(self):
"""Wrap the last exception."""
exc_type, exc_value, exc_tb = sys.exc_info()
self.exception = exc_value
self.formatted = "".join(
traceback.format_exception(exc_type, exc_value, exc_tb)
)
def __str__(self):
return "{}\nChild process's traceback:\n{}".format(
Exception.__str__(self), self.formatted
)
def tb_capture(func):
"""A decorator which captures worker tracebacks.
Tracebacks in particular, are captured. Inspired by an example in
https://bugs.python.org/issue13831.
This decorator wraps rio-mucho worker tasks.
Parameters
----------
func : function
A function to be decorated.
Returns
-------
func
"""
@wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except Exception:
raise MuchoChildError()
return wrapper
def init_worker(inpaths, g_args):
"""The multiprocessing worker initializer
Parameters
----------
inpaths : list of str
A list of dataset paths.
g_args : dict
Global arguments.
Returns
-------
None
"""
global global_args
global srcs
global_args = g_args
srcs = [rasterio.open(i) for i in inpaths]
class ReaderBase(object):
"""Base class for readers"""
def __init__(self, user_func):
"""Create new instance
Parameters
----------
user_func : function
The user function with signature (data, window, ij, global_args)
Returns
-------
ReaderBase
"""
self.user_func = user_func
class manual_reader(ReaderBase):
"""Warps the user's func in a manual reading pattern.
"""
@tb_capture
def __call__(self, args):
"""Execute the user function."""
window, ij = args
return self.user_func(srcs, window, ij, global_args), window
class array_reader(ReaderBase):
"""Wraps the user's func in an array reading pattern.
"""
@tb_capture
def __call__(self, args):
"""Execute the user function."""
window, ij = args
return (
self.user_func(
utils.array_stack([src.read(window=window) for src in srcs]),
window,
ij,
global_args,
),
window,
)
class simple_reader(ReaderBase):
"""Wraps the user's func in a simple reading pattern.
"""
@tb_capture
def __call__(self, args):
"""Execute the user function."""
window, ij = args
return (
self.user_func(
[src.read(window=window) for src in srcs], window, ij, global_args
),
window,
)
class RioMucho(object):
"""Maps a raster processing function over blocks of data.
Uses a multiprocessing pool to distribute the work.
"""
def __init__(
self,
inpaths,
outpath_or_dataset,
run_function,
mode="simple_read",
windows=None,
options=None,
global_args=None,
):
"""Create a new instance
Parameters
----------
inpaths : list of str
A list of input dataset paths or identifiers.
outpath_or_dataset: str or dataset opened in 'w' mode
This parameter specifies the dataset to which results will be
written. If a str, a new dataset object will be created. Otherwise
the results will be written to the open dataset.
run_function : function
The function to be mapped.
mode : str, optional
One of ["simple_read", "manual_read", "array_read"].
windows : list, optional
A list of windows to work on. If not overridden, this will be the
block windows of the first source dataset.
options : dict
Creation options for the output dataset. If not overridden, this
will be the profile of the first source dataset.
global_args : dict
Extra arguments for the user function.
Returns
-------
RioMucho
"""
self.inpaths = inpaths
self.outpath_or_dataset = outpath_or_dataset
self.run_function = run_function
if mode not in ["simple_read", "manual_read", "array_read"]:
raise ValueError(
'mode must be one of: ["simple_read", "manual_read", "array_read"]'
)
else:
self.mode = mode
self.windows = windows or utils.getWindows(inpaths[0])
self.options = options or utils.getOptions(inpaths[0])
self.global_args = global_args or {}
def __enter__(self):
return self
def __exit__(self, ext_t, ext_v, trace):
pass
def run(self, processes=4):
"""TODO"""
if processes == 1:
self.pool = MockTub(init_worker, (self.inpaths, self.global_args))
else:
self.pool = Pool(processes, init_worker, (self.inpaths, self.global_args))
self.options["transform"] = guard_transform(self.options["transform"])
if self.mode == "manual_read":
reader_worker = manual_reader(self.run_function)
elif self.mode == "array_read":
reader_worker = array_reader(self.run_function)
else:
reader_worker = simple_reader(self.run_function)
if isinstance(self.outpath_or_dataset, rasterio.io.DatasetWriter):
destination = self.outpath_or_dataset
else:
destination = rasterio.open(self.outpath_or_dataset, "w", **self.options)
# Open an output file, work through the function in parallel,
# and write out the data.
with destination as dst:
for data, window in self.pool.imap_unordered(reader_worker, self.windows):
dst.write(data, window=window)
self.pool.close()
self.pool.join() | /rio-mucho-1.0rc1.tar.gz/rio-mucho-1.0rc1/riomucho/__init__.py | 0.728169 | 0.244245 | __init__.py | pypi |
from __future__ import division
import numpy as np
from affine import Affine
from rasterio.enums import Resampling
from rasterio.warp import reproject
def _adjust_block_size(width, height, blocksize):
"""Adjusts blocksize by adding 1 if the remainder
from the division of height/width by blocksize is 1.
"""
if width % blocksize == 1:
blocksize += 1
elif height % blocksize == 1:
blocksize += 1
return blocksize
def _make_windows(width, height, blocksize):
"""Manually makes windows of size equivalent to
pan band image
"""
for x in range(0, width, blocksize):
for y in range(0, height, blocksize):
yield ((y, min((y + blocksize), height)),
(x, min((x + blocksize), width)))
def _make_affine(fr_shape, to_shape):
"""Given from and to width and height,
compute affine transform defining the
georeferencing of the output array
"""
fr_window_affine = Affine(
1, 0, 0,
0, -1, 0)
to_window_affine = Affine(
(fr_shape[1] / float(to_shape[1])), 0, 0,
0, -(fr_shape[0] / float(to_shape[0])), 0)
return fr_window_affine, to_window_affine
def _half_window(window):
"""Computes half window sizes
"""
return tuple((w[0] / 2, w[1] / 2) for w in window)
def _check_crs(inputs):
"""Checks if crs of inputs are the same
"""
for i in range(1, len(inputs)):
if inputs[i-1]['crs'] != inputs[i]['crs']:
raise RuntimeError(
'CRS of inputs must be the same: '
'received %s and %s' % (inputs[i-1]['crs'],
inputs[i]['crs']))
def _create_apply_mask(rgb):
"""Create a mask of pixels where any channel is 0 (nodata),
then apply the mask to input numpy array.
"""
color_mask = np.all(
np.rollaxis(rgb, 0, 3) != 0,
axis=2
).astype(np.uint16) * np.iinfo(np.uint16).max
masked_rgb = np.array([
np.minimum(band, color_mask) for band in rgb])
return masked_rgb
def _upsample(rgb, panshape, src_aff, src_crs, to_aff, to_crs):
"""upsamples rgb to the shape of the panchromatic band
using reproject function from rasterio.warp
"""
up_rgb = np.empty(
(
rgb.shape[0], panshape[0],
panshape[1]), dtype=rgb.dtype
)
reproject(
rgb, up_rgb,
src_transform=src_aff,
src_crs=src_crs,
dst_transform=to_aff,
dst_crs=to_crs,
resampling=Resampling.bilinear)
return up_rgb
def _simple_mask(data, ndv):
'''Exact nodata masking'''
nd = np.iinfo(data.dtype).max
alpha = np.invert(
np.all(np.dstack(data) == ndv, axis=2)
).astype(data.dtype) * nd
return alpha
def _pad_window(wnd, pad):
"""Add padding to windows
"""
return (
(wnd[0][0] - pad, wnd[0][1] + pad),
(wnd[1][0] - pad, wnd[1][1] + pad))
def _calc_windows(pan_src, customwindow):
"""Given raster data, pan_width, pan_height, and window size
are used to compute and output appropriate windows
"""
if customwindow != 0 and isinstance(customwindow, int):
blocksize = _adjust_block_size(pan_src.meta['width'],
pan_src.meta['height'],
int(customwindow))
windows = [(window, (0, 0))
for window in _make_windows(pan_src.meta['width'],
pan_src.meta['height'],
blocksize)]
else:
windows = [(window, ij) for ij, window in pan_src.block_windows()]
return windows
def _rescale(arr, ndv, dst_dtype, out_alpha=True):
"""Convert an array from output dtype, scaling up linearly
"""
if dst_dtype == np.__dict__['uint16']:
scale = 1
else:
# convert to 8bit value range in place
scale = float(np.iinfo(np.uint16).max) / float(np.iinfo(np.uint8).max)
res = (arr / scale).astype(dst_dtype)
if out_alpha:
mask = _simple_mask(
arr.astype(dst_dtype),
(ndv, ndv, ndv)).reshape(
1, arr.shape[1], arr.shape[2])
return np.concatenate([res, mask])
else:
return res | /rio-pansharpen-0.2.0.tar.gz/rio-pansharpen-0.2.0/rio_pansharpen/utils.py | 0.88104 | 0.612744 | utils.py | pypi |
from __future__ import division
import click
import numpy as np
import rasterio
import riomucho
from rio_pansharpen.methods import Brovey
from rasterio.transform import guard_transform
from . utils import (
_pad_window, _upsample, _calc_windows, _check_crs,
_create_apply_mask, _half_window, _rescale)
def pansharpen(vis, vis_transform, pan, pan_transform,
pan_dtype, r_crs, dst_crs, weight,
method="Brovey", src_nodata=0):
"""Pansharpen a lower-resolution visual band
Parameters
=========
vis: ndarray, 3D with shape == (3, vh, vw)
Visual band array with RGB bands
vis_transform: Affine
affine transform defining the georeferencing of the vis array
pan: ndarray, 2D with shape == (ph, pw)
Panchromatic band array
pan_transform: Affine
affine transform defining the georeferencing of the pan array
method: string
Algorithm for pansharpening; default Brovey
Returns:
======
pansharp: ndarray, 3D with shape == (3, ph, pw)
pansharpened visual band
affine transform is identical to `pan_transform`
"""
rgb = _upsample(_create_apply_mask(vis), pan.shape, vis_transform, r_crs,
pan_transform, dst_crs)
# Main Pansharpening Processing
if method == "Brovey":
pansharp, _ = Brovey(rgb, pan, weight, pan_dtype)
# TODO: add other methods
return pansharp
def _pansharpen_worker(open_files, pan_window, _, g_args):
"""rio mucho worker for pansharpening. It reads input
files and performing pansharpening on each window.
Parameters
------------
open_files: list of rasterio open files
pan_window: tuples
g_args: dictionary
Returns
---------
out: None
Output is written to dst_path
"""
pan = open_files[0].read(1, window=pan_window).astype(np.float32)
pan_dtype = open_files[0].meta['dtype']
# Get the rgb window that covers the pan window
if g_args.get("half_window"):
rgb_window = _half_window(pan_window)
else:
padding = 2
pan_bounds = open_files[0].window_bounds(pan_window)
rgb_base_window = open_files[1].window(*pan_bounds)
rgb_window = _pad_window(rgb_base_window, padding)
# Determine affines for those windows
pan_affine = open_files[0].window_transform(pan_window)
rgb_affine = open_files[1].window_transform(rgb_window)
rgb = riomucho.utils.array_stack(
[src.read(window=rgb_window, boundless=True).astype(np.float32)
for src in open_files[1:]])
if g_args["verb"]:
click.echo('pan shape: %s, rgb shape %s' % (pan.shape, rgb.shape))
pansharpened = pansharpen(
rgb, rgb_affine, pan, pan_affine, pan_dtype,
g_args["r_crs"], g_args["dst_crs"],
g_args["weight"], method="Brovey")
pan_rescale = _rescale(
pansharpened, g_args["src_nodata"], g_args["dst_dtype"],
out_alpha=g_args.get("out_alpha", True))
return pan_rescale
def calculate_landsat_pansharpen(src_paths, dst_path, dst_dtype,
weight, verbosity, jobs, half_window,
customwindow, out_alpha, creation_opts):
"""Parameters
------------
src_paths: list of string (pan_path, r_path, g_path, b_path)
dst_path: string
dst_dtype: 'uint16', 'uint8'.
weight: float
jobs: integer
half_window: boolean
customwindow: integer
out_alpha: boolean
output an alpha band?
creation_opts: dict
creation options to update the write profile
Returns
---------
out: None
Output is written to dst_path
"""
with rasterio.open(src_paths[0]) as pan_src:
windows = _calc_windows(pan_src, customwindow)
profile = pan_src.profile
if profile['count'] > 1:
raise RuntimeError(
"Pan band must be 1 band - is {}".format(profile['count']))
dst_dtype = np.__dict__[dst_dtype]
profile.update(
transform=guard_transform(pan_src.transform),
dtype=dst_dtype,
count=3,
photometric='rgb')
if out_alpha:
profile['count'] = 4
if creation_opts:
profile.update(**creation_opts)
with rasterio.open(src_paths[1]) as r_src:
r_meta = r_src.meta
if profile['width'] <= r_meta['width'] or \
profile['height'] <= r_meta['height']:
raise RuntimeError(
"Pan band must be larger than RGB bands")
_check_crs([r_meta, profile])
g_args = {
"verb": verbosity,
"half_window": half_window,
"dst_dtype": dst_dtype,
"out_alpha": out_alpha,
"weight": weight,
"dst_aff": guard_transform(profile['transform']),
"dst_crs": profile['crs'],
"r_aff": guard_transform(r_meta['transform']),
"r_crs": r_meta['crs'],
"src_nodata": 0}
with riomucho.RioMucho(src_paths, dst_path, _pansharpen_worker,
windows=windows, global_args=g_args,
options=profile, mode='manual_read') as rm:
rm.run(jobs) | /rio-pansharpen-0.2.0.tar.gz/rio-pansharpen-0.2.0/rio_pansharpen/worker.py | 0.811863 | 0.337476 | worker.py | pypi |
import click
import rasterio as rio
import numpy as np
from riomucho import RioMucho
import json
from rasterio.rio.options import creation_options
from rio_rgbify.encoders import data_to_rgb
from rio_rgbify.mbtiler import RGBTiler
def _rgb_worker(data, window, ij, g_args):
return data_to_rgb(
data[0][g_args["bidx"] - 1], g_args["base_val"], g_args["interval"]
)
@click.command("rgbify")
@click.argument("src_path", type=click.Path(exists=True))
@click.argument("dst_path", type=click.Path(exists=False))
@click.option(
"--base-val",
"-b",
type=float,
default=0,
help="The base value of which to base the output encoding on [DEFAULT=0]",
)
@click.option(
"--interval",
"-i",
type=float,
default=1,
help="Describes the precision of the output, by incrementing interval [DEFAULT=1]",
)
@click.option("--bidx", type=int, default=1, help="Band to encode [DEFAULT=1]")
@click.option(
"--max-z",
type=int,
default=None,
help="Maximum zoom to tile (.mbtiles output only)",
)
@click.option(
"--bounding-tile",
type=str,
default=None,
help="Bounding tile '[{x}, {y}, {z}]' to limit output tiles (.mbtiles output only)",
)
@click.option(
"--min-z",
type=int,
default=None,
help="Minimum zoom to tile (.mbtiles output only)",
)
@click.option(
"--format",
type=click.Choice(["png", "webp"]),
default="png",
help="Output tile format (.mbtiles output only)",
)
@click.option("--workers", "-j", type=int, default=4, help="Workers to run [DEFAULT=4]")
@click.option("--verbose", "-v", is_flag=True, default=False)
@click.pass_context
@creation_options
def rgbify(
ctx,
src_path,
dst_path,
base_val,
interval,
bidx,
max_z,
min_z,
bounding_tile,
format,
workers,
verbose,
creation_options,
):
"""rio-rgbify cli."""
if dst_path.split(".")[-1].lower() == "tif":
with rio.open(src_path) as src:
meta = src.profile.copy()
meta.update(count=3, dtype=np.uint8)
for c in creation_options:
meta[c] = creation_options[c]
gargs = {"interval": interval, "base_val": base_val, "bidx": bidx}
with RioMucho(
[src_path], dst_path, _rgb_worker, options=meta, global_args=gargs
) as rm:
rm.run(workers)
elif dst_path.split(".")[-1].lower() == "mbtiles":
if min_z is None or max_z is None:
raise ValueError("Zoom range must be provided for mbtile output")
if max_z < min_z:
raise ValueError(
"Max zoom {0} must be greater than min zoom {1}".format(max_z, min_z)
)
if bounding_tile is not None:
try:
bounding_tile = json.loads(bounding_tile)
except Exception:
raise TypeError(
"Bounding tile of {0} is not valid".format(bounding_tile)
)
with RGBTiler(
src_path,
dst_path,
interval=interval,
base_val=base_val,
format=format,
bounding_tile=bounding_tile,
max_z=max_z,
min_z=min_z,
) as tiler:
tiler.run(workers)
else:
raise ValueError(
"{} output filetype not supported".format(dst_path.split(".")[-1])
) | /rio_rgbify-0.4.0-py3-none-any.whl/rio_rgbify/scripts/cli.py | 0.679072 | 0.184308 | cli.py | pypi |
# rio-stac
<p align="center">
<img src="https://user-images.githubusercontent.com/10407788/111794250-696da080-889c-11eb-9043-5bdc3aadb8bf.png" alt="rio-stac"></a>
</p>
<p align="center">
<em>Create STAC Items from raster datasets.</em>
</p>
<p align="center">
<a href="https://github.com/developmentseed/rio-stac/actions?query=workflow%3ACI" target="_blank">
<img src="https://github.com/developmentseed/rio-stac/workflows/CI/badge.svg" alt="Test">
</a>
<a href="https://codecov.io/gh/developmentseed/rio-stac" target="_blank">
<img src="https://codecov.io/gh/developmentseed/rio-stac/branch/main/graph/badge.svg" alt="Coverage">
</a>
<a href="https://pypi.org/project/rio-stac" target="_blank">
<img src="https://img.shields.io/pypi/v/rio-stac?color=%2334D058&label=pypi%20package" alt="Package version">
</a>
<a href="https://pypistats.org/packages/rio-stac" target="_blank">
<img src="https://img.shields.io/pypi/dm/rio-stac.svg" alt="Downloads">
</a>
<a href="https://github.com/developmentseed/rio-stac/blob/main/LICENSE" target="_blank">
<img src="https://img.shields.io/github/license/developmentseed/rio-stac.svg" alt="Downloads">
</a>
</p>
---
**Documentation**: <a href="https://developmentseed.github.io/rio-stac/" target="_blank">https://developmentseed.github.io/rio-stac/</a>
**Source Code**: <a href="https://github.com/developmentseed/rio-stac" target="_blank">https://github.com/developmentseed/rio-stac</a>
---
`rio-stac` is a simple [rasterio](https://github.com/mapbox/rasterio) plugin for creating valid STAC items from a raster dataset. The library is built on top of [pystac](https://github.com/stac-utils/pystac) to make sure we follow the STAC specification.
## Installation
```bash
$ pip install pip -U
# From Pypi
$ pip install rio-stac
# Or from source
$ pip install git+http://github.com/developmentseed/rio-stac
```
### Example
```json
// rio stac tests/fixtures/dataset_cog.tif | jq
{
"type": "Feature",
"stac_version": "1.0.0",
"id": "dataset_cog.tif",
"properties": {
"proj:epsg": 32621,
"proj:geometry": {
"type": "Polygon",
"coordinates": [
[
[
373185,
8019284.949381611
],
[
639014.9492102272,
8019284.949381611
],
[
639014.9492102272,
8286015
],
[
373185,
8286015
],
[
373185,
8019284.949381611
]
]
]
},
"proj:bbox": [
373185,
8019284.949381611,
639014.9492102272,
8286015
],
"proj:shape": [
2667,
2658
],
"proj:transform": [
100.01126757344893,
0,
373185,
0,
-100.01126757344893,
8286015,
0,
0,
1
],
"datetime": "2022-09-02T16:17:51.427680Z"
},
"geometry": {
"type": "Polygon",
"coordinates": [
[
[
-60.72634617297825,
72.23689137791739
],
[
-52.91627525610924,
72.22979795551834
],
[
-52.301598718454485,
74.61378388950398
],
[
-61.28762442711404,
74.62204314252978
],
[
-60.72634617297825,
72.23689137791739
]
]
]
},
"links": [],
"assets": {
"asset": {
"href": "/Users/vincentsarago/Dev/DevSeed/rio-stac/tests/fixtures/dataset_cog.tif",
"raster:bands": [
{
"data_type": "uint16",
"scale": 1,
"offset": 0,
"sampling": "point",
"statistics": {
"mean": 2107.524612053134,
"minimum": 1,
"maximum": 7872,
"stddev": 2271.0065537857326,
"valid_percent": 9.564764936336924e-05
},
"histogram": {
"count": 11,
"min": 1,
"max": 7872,
"buckets": [
503460,
0,
0,
161792,
283094,
0,
0,
0,
87727,
9431
]
}
}
],
"eo:bands": [
{
"name": "b1",
"description": "gray"
}
],
"roles": []
}
},
"bbox": [
-61.28762442711404,
72.22979795551834,
-52.301598718454485,
74.62204314252978
],
"stac_extensions": [
"https://stac-extensions.github.io/projection/v1.0.0/schema.json",
"https://stac-extensions.github.io/raster/v1.1.0/schema.json",
"https://stac-extensions.github.io/eo/v1.0.0/schema.json"
]
}
```
See https://developmentseed.org/rio-stac/intro/ for more.
## Contribution & Development
See [CONTRIBUTING.md](https://github.com/developmentseed/rio-stac/blob/main/CONTRIBUTING.md)
## Authors
See [contributors](https://github.com/developmentseed/rio-stac/graphs/contributors)
## Changes
See [CHANGES.md](https://github.com/developmentseed/rio-stac/blob/main/CHANGES.md).
## License
See [LICENSE](https://github.com/developmentseed/rio-stac/blob/main/LICENSE)
| /rio_stac-0.8.0.tar.gz/rio_stac-0.8.0/README.md | 0.52342 | 0.752763 | README.md | pypi |
import json
import click
import rasterio
from pystac import MediaType
from pystac.utils import datetime_to_str, str_to_datetime
from rasterio.rio import options
from rio_stac import create_stac_item
def _cb_key_val(ctx, param, value):
if not value:
return {}
else:
out = {}
for pair in value:
if "=" not in pair:
raise click.BadParameter(
"Invalid syntax for KEY=VAL arg: {}".format(pair)
)
else:
k, v = pair.split("=", 1)
out[k] = v
return out
@click.command()
@options.file_in_arg
@click.option(
"--datetime",
"-d",
"input_datetime",
type=str,
help="The date and time of the assets, in UTC (e.g 2020-01-01, 2020-01-01T01:01:01).",
)
@click.option(
"--extension",
"-e",
type=str,
multiple=True,
help="STAC extension URL the Item implements.",
)
@click.option(
"--collection", "-c", type=str, help="The Collection ID that this item belongs to."
)
@click.option("--collection-url", type=str, help="Link to the STAC Collection.")
@click.option(
"--property",
"-p",
metavar="NAME=VALUE",
multiple=True,
callback=_cb_key_val,
help="Additional property to add.",
)
@click.option("--id", type=str, help="Item id.")
@click.option(
"--asset-name",
"-n",
type=str,
default="asset",
help="Asset name.",
show_default=True,
)
@click.option("--asset-href", type=str, help="Overwrite asset href.")
@click.option(
"--asset-mediatype",
type=click.Choice([it.name for it in MediaType] + ["auto"]),
help="Asset media-type.",
)
@click.option(
"--with-proj/--without-proj",
default=True,
help="Add the 'projection' extension and properties.",
show_default=True,
)
@click.option(
"--with-raster/--without-raster",
default=True,
help="Add the 'raster' extension and properties.",
show_default=True,
)
@click.option(
"--with-eo/--without-eo",
default=True,
help="Add the 'eo' extension and properties.",
show_default=True,
)
@click.option(
"--max-raster-size",
type=int,
default=1024,
help="Limit array size from which to get the raster statistics.",
show_default=True,
)
@click.option(
"--densify-geom",
type=int,
help="Densifies the number of points on each edges of the polygon geometry to account for non-linear transformation.",
)
@click.option(
"--geom-precision",
type=int,
default=-1,
help="Round geometry coordinates to this number of decimal. By default, coordinates will not be rounded",
)
@click.option("--output", "-o", type=click.Path(exists=False), help="Output file name")
@click.option(
"--config",
"config",
metavar="NAME=VALUE",
multiple=True,
callback=options._cb_key_val,
help="GDAL configuration options.",
)
def stac(
input,
input_datetime,
extension,
collection,
collection_url,
property,
id,
asset_name,
asset_href,
asset_mediatype,
with_proj,
with_raster,
with_eo,
max_raster_size,
densify_geom,
geom_precision,
output,
config,
):
"""Rasterio STAC plugin: Create a STAC Item for raster dataset."""
property = property or {}
densify_geom = densify_geom or 0
if input_datetime:
if "/" in input_datetime:
start_datetime, end_datetime = input_datetime.split("/")
property["start_datetime"] = datetime_to_str(
str_to_datetime(start_datetime)
)
property["end_datetime"] = datetime_to_str(str_to_datetime(end_datetime))
input_datetime = None
else:
input_datetime = str_to_datetime(input_datetime)
if asset_mediatype and asset_mediatype != "auto":
asset_mediatype = MediaType[asset_mediatype]
extensions = [e for e in extension if e]
with rasterio.Env(**config):
item = create_stac_item(
input,
input_datetime=input_datetime,
extensions=extensions,
collection=collection,
collection_url=collection_url,
properties=property,
id=id,
asset_name=asset_name,
asset_href=asset_href,
asset_media_type=asset_mediatype,
with_proj=with_proj,
with_raster=with_raster,
with_eo=with_eo,
raster_max_size=max_raster_size,
geom_densify_pts=densify_geom,
geom_precision=geom_precision,
)
if output:
with open(output, "w") as f:
f.write(json.dumps(item.to_dict(), separators=(",", ":")))
else:
click.echo(json.dumps(item.to_dict(), separators=(",", ":"))) | /rio_stac-0.8.0.tar.gz/rio_stac-0.8.0/rio_stac/scripts/cli.py | 0.542621 | 0.178777 | cli.py | pypi |
import time
import warnings
import concurrent.futures
import click
import numpy as np
import rasterio
import rio_terrain as rt
import rio_terrain.tools.messages as msg
from rio_terrain import __version__ as plugin_version
@click.command('aspect', short_help="Calculate aspect.")
@click.argument('input', nargs=1, type=click.Path(exists=True))
@click.argument('output', nargs=1, type=click.Path())
@click.option('--neighbors', type=click.Choice(['4', '8']), default='4',
help='Specifies the number of neighboring cells to use.')
@click.option('--pcs', type=click.Choice(['compass', 'cartesian']), default='cartesian',
help='Specifies the polar coordinate system.')
@click.option('-j', '--njobs', type=int, default=1, help='Number of concurrent jobs to run.')
@click.option('-v', '--verbose', is_flag=True, help='Enables verbose mode.')
@click.version_option(version=plugin_version, message='rio-terrain v%(version)s')
@click.pass_context
def aspect(ctx, input, output, neighbors, pcs, njobs, verbose):
"""Calculate aspect of a raster.
INPUT should be a single-band raster.
\b
Example:
rio aspect elevation.tif aspect.tif --pcs compass
"""
if verbose:
np.warnings.filterwarnings('default')
else:
np.warnings.filterwarnings('ignore')
t0 = time.time()
command = click.get_current_context().info_name
with rasterio.open(input) as src:
profile = src.profile
affine = src.transform
res = (affine[0], affine[4])
profile.update(dtype=rasterio.float32, count=1, compress='lzw')
if (njobs >= 1) and src.is_tiled:
blockshape = (list(src.block_shapes))[0]
if (blockshape[0] == 1) or (blockshape[1] == 1):
warnings.warn((msg.STRIPED).format(blockshape))
read_windows = rt.tile_grid(
src.width, src.height, blockshape[0], blockshape[1], overlap=2)
write_windows = rt.tile_grid(
src.width, src.height, blockshape[0], blockshape[1], overlap=0)
else:
blockshape = 128
warnings.warn((msg.NOTILING).format(src.shape))
with rasterio.open(output, 'w', **profile) as dst:
if njobs < 1:
click.echo((msg.STARTING).format(command, msg.INMEMORY))
img = src.read(1)
img[img <= src.nodata + 1] = np.nan
result = rt.aspect(img, res=res, pcs=pcs, neighbors=int(neighbors))
dst.write(result.astype(profile['dtype']), 1)
elif njobs == 1:
click.echo((msg.STARTING).format(command, msg.SEQUENTIAL))
with click.progressbar(length=src.width * src.height, label='Blocks done:') as bar:
for (read_window, write_window) in zip(read_windows, write_windows):
img = src.read(1, window=read_window)
img[img <= src.nodata + 1] = np.nan
arr = rt.aspect(img, res=res, pcs=pcs, neighbors=int(neighbors))
result = rt.trim(arr, rt.margins(read_window, write_window))
dst.write(result.astype(profile['dtype']), 1, window=write_window)
bar.update(result.size)
else:
click.echo((msg.STARTING).format(command, msg.CONCURRENT))
def jobs():
for (read_window, write_window) in zip(read_windows, write_windows):
img = src.read(1, window=read_window)
img[img <= src.nodata + 1] = np.nan
yield img, read_window, write_window
with concurrent.futures.ThreadPoolExecutor(max_workers=njobs) as executor, \
click.progressbar(length=src.width * src.height, label='Blocks done:') as bar:
future_to_window = {
executor.submit(
rt.aspect,
img,
res=res,
pcs=pcs,
neighbors=int(neighbors),
): (read_window, write_window)
for (img, read_window, write_window) in jobs()
}
for future in concurrent.futures.as_completed(future_to_window):
read_window, write_window = future_to_window[future]
arr = future.result()
result = rt.trim(arr, rt.margins(read_window, write_window))
dst.write(result.astype(profile['dtype']), 1, window=write_window)
bar.update(result.size)
click.echo((msg.WRITEOUT).format(output))
click.echo((msg.COMPLETION).format(msg.printtime(t0, time.time()))) | /rio-terrain-0.0.29.tar.gz/rio-terrain-0.0.29/rio_terrain/cli/aspect.py | 0.477554 | 0.182936 | aspect.py | pypi |
import time
import warnings
import concurrent.futures
import click
import numpy as np
import rasterio
import rio_terrain as rt
import rio_terrain.tools.messages as msg
from rio_terrain import __version__ as plugin_version
@click.command('slope', short_help="Calculate slope.")
@click.argument('input', nargs=1, type=click.Path(exists=True))
@click.argument('output', nargs=1, type=click.Path())
@click.option('--neighbors', type=click.Choice(['4', '8']), default='4',
help='Specifies the number of neighboring cells to use.')
@click.option('-u', '--units', type=click.Choice(['grade', 'degrees']), default='grade',
help='Specifies the units of slope.')
@click.option('-b', '--blocks', 'blocks', nargs=1, type=int, default=40,
help='Multiple internal blocks to chunk.')
@click.option('-j', '--njobs', type=int, default=1, help='Number of concurrent jobs to run.')
@click.option('-v', '--verbose', is_flag=True, help='Enables verbose mode.')
@click.version_option(version=plugin_version, message='rio-terrain v%(version)s')
@click.pass_context
def slope(ctx, input, output, neighbors, units, blocks, njobs, verbose):
"""Calculate slope of a raster.
INPUT should be a single-band raster.
\b
Example:
rio slope elevation.tif slope.tif
"""
if verbose:
np.warnings.filterwarnings('default')
else:
np.warnings.filterwarnings('ignore')
t0 = time.time()
command = click.get_current_context().info_name
with rasterio.open(input) as src:
profile = src.profile
affine = src.transform
res = (affine[0], affine[4])
profile.update(dtype=rasterio.float32, count=1, compress='lzw')
if (njobs >= 1) and src.is_tiled:
blockshape = (list(src.block_shapes))[0]
if (blockshape[0] == 1) or (blockshape[1] == 1):
warnings.warn((msg.STRIPED).format(blockshape))
read_windows = rt.tile_grid(
src.width,
src.height,
blockshape[0] * blocks,
blockshape[1] * blocks,
overlap=2,
)
write_windows = rt.tile_grid(
src.width,
src.height,
blockshape[0] * blocks,
blockshape[1] * blocks,
overlap=0,
)
else:
blockshape = 128
warnings.warn((msg.NOTILING).format(src.shape))
with rasterio.open(output, 'w', **profile) as dst:
if njobs < 1:
click.echo((msg.STARTING).format(command, msg.INMEMORY))
img = src.read(1)
img[img <= src.nodata + 1] = np.nan
result = rt.slope(img, res=res, units=units, neighbors=int(neighbors))
dst.write(result.astype(profile['dtype']), 1)
elif njobs == 1:
click.echo((msg.STARTING).format(command, msg.SEQUENTIAL))
with click.progressbar(length=src.width * src.height, label='Blocks done:') as bar:
for (read_window, write_window) in zip(read_windows, write_windows):
img = src.read(1, window=read_window)
img[img <= src.nodata + 1] = np.nan
arr = rt.slope(img, res=res, units=units, neighbors=int(neighbors))
result = rt.trim(arr, rt.margins(read_window, write_window))
dst.write(result.astype(profile['dtype']), 1, window=write_window)
bar.update(result.size)
else:
click.echo((msg.STARTING).format(command, msg.CONCURRENT))
def jobs():
for (read_window, write_window) in zip(read_windows, write_windows):
img = src.read(1, window=read_window)
img[img <= src.nodata + 1] = np.nan
yield img, read_window, write_window
with concurrent.futures.ThreadPoolExecutor(max_workers=njobs) as executor, \
click.progressbar(length=src.width * src.height, label='Blocks done:') as bar:
future_to_window = {
executor.submit(
rt.slope,
img,
res=res,
units=units,
neighbors=int(neighbors),
): (read_window, write_window)
for (img, read_window, write_window) in jobs()
}
for future in concurrent.futures.as_completed(future_to_window):
read_window, write_window = future_to_window[future]
arr = future.result()
result = rt.trim(arr, rt.margins(read_window, write_window))
dst.write(result.astype(profile['dtype']), 1, window=write_window)
bar.update(result.size)
click.echo((msg.WRITEOUT).format(output))
click.echo((msg.COMPLETION).format(msg.printtime(t0, time.time()))) | /rio-terrain-0.0.29.tar.gz/rio-terrain-0.0.29/rio_terrain/cli/slope.py | 0.5 | 0.175856 | slope.py | pypi |
import time
import warnings
import concurrent.futures
import click
import numpy as np
import rasterio
import rio_terrain as rt
import rio_terrain.tools.messages as msg
from rio_terrain import __version__ as plugin_version
def propagate(img0, img1, instrumental0, instrumental1):
if instrumental0:
img0[img0 < instrumental0] = instrumental0
if instrumental1:
img1[img1 < instrumental1] = instrumental1
result = np.sqrt(np.square(img1) + np.square(img0))
return result
@click.command('uncertainty', short_help="Calculate a level-of-detection raster.")
@click.argument('uncertainty0', nargs=1, type=click.Path(exists=True))
@click.argument('uncertainty1', nargs=1, type=click.Path(exists=True))
@click.argument('output', nargs=1, type=click.Path())
@click.option('--instrumental0', nargs=1, default=None, type=float,
help='Minimum uncertainty for the first raster.')
@click.option('--instrumental1', nargs=1, default=None, type=float,
help='Minimum uncertainty for the second raster.')
@click.option('-j', '--njobs', type=int, default=1, help='Number of concurrent jobs to run.')
@click.option('-v', '--verbose', is_flag=True, help='Enables verbose mode.')
@click.version_option(version=plugin_version, message='rio-terrain v%(version)s')
@click.pass_context
def uncertainty(
ctx,
uncertainty0,
uncertainty1,
output,
instrumental0,
instrumental1,
njobs,
verbose,
):
"""Calculate a level-of-detection raster.
\b
UNCERTAINTY0 should be a single-band raster for uncertainty at time 0.
UNCERTAINTY1 should be a single-band raster for uncertainty at time 1.
\b
Example:
rio uncertainty roughness_t0.tif roughness_t1.tif uncertainty.tif
"""
if verbose:
np.warnings.filterwarnings('default')
else:
np.warnings.filterwarnings('ignore')
t0 = time.time()
command = click.get_current_context().info_name
with rasterio.open(uncertainty0) as src0, rasterio.open(uncertainty1) as src1:
if not rt.is_raster_intersecting(src0, src1):
raise ValueError(msg.NONINTERSECTING)
if not rt.is_raster_aligned(src0, src1):
raise ValueError(msg.NONALIGNED)
profile = src0.profile
affine = src0.transform
if njobs >= 1:
block_shape = (src0.block_shapes)[0]
blockxsize = block_shape[1]
blockysize = block_shape[0]
else:
blockxsize = None
blockysize = None
tiles = rt.tile_grid_intersection(src0, src1, blockxsize=blockxsize, blockysize=blockysize)
windows0, windows1, write_windows, affine, nrows, ncols = tiles
profile.update(
dtype=rasterio.float32,
count=1,
height=nrows,
width=ncols,
transform=affine,
compress='lzw',
bigtiff='yes',
)
with rasterio.open(output, 'w', **profile) as dst:
if njobs < 1:
click.echo((msg.STARTING).format(command, msg.INMEMORY))
img0 = src0.read(1)
img1 = src1.read(1)
result = propagate(img0, img1, instrumental0, instrumental1)
dst.write(result.astype(np.float32), 1)
elif njobs == 1:
click.echo((msg.STARTING).format(command, msg.SEQUENTIAL))
with click.progressbar(length=nrows * ncols, label='Blocks done:') as bar:
for (window0, window1, write_window) in zip(windows0, windows1, write_windows):
img0 = src0.read(1, window=window0)
img1 = src1.read(1, window=window1)
result = propagate(img0, img1, instrumental0, instrumental1)
dst.write(result.astype(np.float32), 1, window=write_window)
bar.update(result.size)
else:
click.echo((msg.STARTING).format(command, msg.CONCURRENT))
def jobs():
for (window0, window1, write_window) in zip(windows0, windows1, write_windows):
img0 = src0.read(1, window=window0)
img1 = src1.read(1, window=window1)
yield img0, img1, window0, window1, write_window
with concurrent.futures.ThreadPoolExecutor(max_workers=njobs) as executor, \
click.progressbar(length=nrows * ncols, label='Blocks done:') as bar:
future_to_window = {
executor.submit(
propagate, img0, img1, instrumental0, instrumental1
): (write_window)
for (img0, img1, window0, window1, write_window) in jobs()
}
for future in concurrent.futures.as_completed(future_to_window):
write_window = future_to_window[future]
result = future.result()
dst.write(result.astype(np.float32), 1, window=write_window)
bar.update(result.size)
click.echo((msg.WRITEOUT).format(output))
click.echo((msg.COMPLETION).format(msg.printtime(t0, time.time()))) | /rio-terrain-0.0.29.tar.gz/rio-terrain-0.0.29/rio_terrain/cli/uncertainty.py | 0.520253 | 0.211539 | uncertainty.py | pypi |
import time
import warnings
import concurrent.futures
import click
import numpy as np
import rasterio
import rio_terrain as rt
import rio_terrain.tools.messages as msg
from rio_terrain import __version__ as plugin_version
@click.command('curvature', short_help="Calculate curvature.")
@click.argument('input', nargs=1, type=click.Path(exists=True))
@click.argument('output', nargs=1, type=click.Path())
@click.option('--neighbors', type=click.Choice(['4', '8']), default='4',
help='Specifies the number of neighboring cells to use.')
@click.option('--stats/--no-stats', is_flag=True, default=False,
help='Print basic curvature statistics.')
@click.option('-j', '--njobs', type=int, default=1, help='Number of concurrent jobs to run.')
@click.option('-v', '--verbose', is_flag=True, help='Enables verbose mode.')
@click.version_option(version=plugin_version, message='rio-terrain v%(version)s')
@click.pass_context
def curvature(ctx, input, output, neighbors, stats, njobs, verbose):
"""Calculate curvature of a raster.
INPUT should be a single-band raster.
\b
Example:
rio curvature elevation.tif curvature.tif
"""
if verbose:
np.warnings.filterwarnings('default')
else:
np.warnings.filterwarnings('ignore')
# np.seterr(divide='ignore', invalid='ignore')
t0 = time.time()
command = click.get_current_context().info_name
with rasterio.open(input) as src:
profile = src.profile
affine = src.transform
res = (affine[0], affine[4])
profile.update(dtype=rasterio.float32, count=1, compress='lzw')
if (njobs >= 1) and src.is_tiled:
blockshape = (list(src.block_shapes))[0]
if (blockshape[0] == 1) or (blockshape[1] == 1):
warnings.warn((msg.STRIPED).format(blockshape))
read_windows = rt.tile_grid(
src.width, src.height, blockshape[0], blockshape[1], overlap=2)
write_windows = rt.tile_grid(
src.width, src.height, blockshape[0], blockshape[1], overlap=0)
else:
blockshape = 128
warnings.warn((msg.NOTILING).format(src.shape))
with rasterio.open(output, 'w', **profile) as dst:
if njobs < 1:
click.echo((msg.STARTING).format(command, msg.INMEMORY))
img = src.read(1)
img[img <= src.nodata + 1] = np.nan
result = rt.curvature(img, res=res, neighbors=int(neighbors))
dst.write(result.astype(profile['dtype']), 1)
elif njobs == 1:
click.echo((msg.STARTING).format(command, msg.SEQUENTIAL))
with click.progressbar(length=src.width * src.height, label='Blocks done:') as bar:
for (read_window, write_window) in zip(read_windows, write_windows):
img = src.read(1, window=read_window)
img[img <= src.nodata + 1] = np.nan
arr = rt.curvature(img, res=res, neighbors=int(neighbors))
result = rt.trim(arr, rt.margins(read_window, write_window))
dst.write(result.astype(profile['dtype']), 1, window=write_window)
bar.update(result.size)
else:
def jobs():
for (read_window, write_window) in zip(read_windows, write_windows):
img = src.read(1, window=read_window)
img[img <= src.nodata + 1] = np.nan
yield img, read_window, write_window
click.echo((msg.STARTING).format(command, msg.CONCURRENT))
with concurrent.futures.ThreadPoolExecutor(max_workers=njobs) as executor, \
click.progressbar(length=src.width * src.height, label='Blocks done:') as bar:
future_to_window = {
executor.submit(
rt.curvature, img, res=res, neighbors=int(neighbors)
): (read_window, write_window)
for img, read_window, write_window in jobs()
}
for future in concurrent.futures.as_completed(future_to_window):
read_window, write_window = future_to_window[future]
arr = future.result()
result = rt.trim(arr, rt.margins(read_window, write_window))
dst.write(result.astype(profile['dtype']), 1, window=write_window)
bar.update(result.size)
click.echo((msg.WRITEOUT).format(output))
click.echo((msg.COMPLETION).format(msg.printtime(t0, time.time()))) | /rio-terrain-0.0.29.tar.gz/rio-terrain-0.0.29/rio_terrain/cli/curvature.py | 0.442155 | 0.190969 | curvature.py | pypi |
import time
import warnings
import concurrent.futures
import click
import numpy as np
import rasterio
import rio_terrain as rt
import rio_terrain.tools.messages as msg
from rio_terrain import __version__ as plugin_version
def do_slice(img, minimum=None, maximum=None, keep_data=False, false_val=0):
"""Slice data or ones from an array given a value range.
Parameters:
img (ndarray)
minimum (float)
maximum (float)
keep_data (bool)
Returns:
result (ndarray)
"""
# default bounds
if minimum is None:
minimum = np.nanmin(img)
if maximum is None:
maximum = np.nanmax(img)
if keep_data:
result = np.where((img >= minimum) & (img <= maximum), img, false_val)
else:
result = np.where((img >= minimum) & (img <= maximum), 1, false_val)
return result
@click.command('slice', short_help="Extract regions by data range.")
@click.argument('input', nargs=1, type=click.Path(exists=True))
@click.argument('output', nargs=1, type=click.Path())
@click.option('--minimum', nargs=1, type=float, default=None, help='Minimum value to extract.')
@click.option('--maximum', nargs=1, type=float, default=None, help='Maximum value to extract.')
@click.option('--keep-data/--no-keep-data', is_flag=True,
help='Return the input data, or return ones.')
@click.option('--zeros/--no-zeros', is_flag=True,
help='Use the raster nodata value or zeros for False condition.')
@click.option('-j', '--njobs', type=int, default=1, help='Number of concurrent jobs to run.')
@click.option('-v', '--verbose', is_flag=True, help='Enables verbose mode.')
@click.version_option(version=plugin_version, message='rio-terrain v%(version)s')
@click.pass_context
def slice(ctx, input, output, minimum, maximum, keep_data, zeros, njobs, verbose):
"""Extract regions from a raster by a data range.
INPUT should be a single-band raster.
\b
Setting the --keep-data option will return the data values.
The default is to return a raster of ones and zeros.
\b
Example:
rio range diff.tif extracted.tif --minumum -2.0 --maximum 2.0
"""
if verbose:
np.warnings.filterwarnings('default')
else:
np.warnings.filterwarnings('ignore')
t0 = time.time()
command = click.get_current_context().info_name
with rasterio.open(input) as src:
profile = src.profile
affine = src.transform
if keep_data:
dtype = profile['dtype']
nodata = profile['nodata']
profile.update(count=1, compress='lzw')
else:
dtype = 'int32'
nodata = np.iinfo(np.int32).min
profile.update(
dtype=rasterio.int32, nodata=nodata, count=1, compress='lzw'
)
if zeros:
false_val = 0
else:
false_val = nodata
with rasterio.open(output, 'w', **profile) as dst:
if njobs < 1:
click.echo((msg.STARTING).format(command, msg.INMEMORY))
img = src.read(1)
result = do_slice(img, minimum, maximum, keep_data, false_val)
dst.write(result.astype(dtype), 1)
elif njobs == 1:
click.echo((msg.STARTING).format(command, msg.SEQUENTIAL))
with click.progressbar(length=src.width * src.height, label='Blocks done:') as bar:
for (ij, window) in src.block_windows():
img = src.read(1, window=window)
result = do_slice(img, minimum, maximum, keep_data, false_val)
dst.write(result.astype(dtype), 1, window=window)
bar.update(result.size)
else:
click.echo((msg.STARTING).format(command, msg.CONCURRENT))
def jobs():
for (ij, window) in src.block_windows():
img = src.read(1, window=window)
yield img, window
with concurrent.futures.ThreadPoolExecutor(max_workers=njobs) as executor, \
click.progressbar(length=src.width*src.height, label='Blocks done:') as bar:
future_to_window = {
executor.submit(
do_slice, img, minimum, maximum, keep_data, false_val
): (window)
for (img, window) in jobs()
}
for future in concurrent.futures.as_completed(future_to_window):
window = future_to_window[future]
result = future.result()
dst.write(result.astype(dtype), 1, window=window)
bar.update(result.size)
click.echo((msg.WRITEOUT).format(output))
click.echo((msg.COMPLETION).format(msg.printtime(t0, time.time()))) | /rio-terrain-0.0.29.tar.gz/rio-terrain-0.0.29/rio_terrain/cli/slice.py | 0.741674 | 0.21713 | slice.py | pypi |
import time
import warnings
import concurrent.futures
import click
import numpy as np
import rasterio
import rio_terrain as rt
import rio_terrain.tools.messages as msg
from rio_terrain import __version__ as plugin_version
def do_threshold(img0, img1, level, default=0):
conditions = [img0 >= img1 * level, img0 <= -img1 * level]
choices = [1, -1]
result = np.select(conditions, choices, default=default)
return result
@click.command('threshold', short_help="Threshold a raster with an uncertainty raster.")
@click.argument('input', nargs=1, type=click.Path(exists=True))
@click.argument('uncertainty', nargs=1, type=click.Path(exists=True))
@click.argument('output', nargs=1, type=click.Path())
@click.argument('level', nargs=1, type=float)
@click.option('-j', '--njobs', type=int, default=1, help='Number of concurrent jobs to run.')
@click.option('-v', '--verbose', is_flag=True, help='Enables verbose mode.')
@click.version_option(version=plugin_version, message='rio-terrain v%(version)s')
@click.pass_context
def threshold(ctx, input, uncertainty, output, level, njobs, verbose):
"""Threshold a raster with an uncertainty raster.
\b
INPUT should be a single-band raster.
UNCERTAINTY should be a single-band raster representing uncertainty.
\b
Example:
rio threshold diff.tif uncertainty.tif, detected.tif 1.68
"""
if verbose:
np.warnings.filterwarnings('default')
else:
np.warnings.filterwarnings('ignore')
t0 = time.time()
command = click.get_current_context().info_name
with rasterio.open(input) as src0, rasterio.open(uncertainty) as src1:
if not rt.is_raster_intersecting(src0, src1):
raise ValueError(msg.NONINTERSECTING)
if not rt.is_raster_aligned(src0, src1):
raise ValueError(msg.NONALIGNED)
profile = src0.profile
affine = src0.transform
nodata = np.iinfo(np.int32).min
if njobs >= 1:
block_shape = (src0.block_shapes)[0]
blockxsize = block_shape[1]
blockysize = block_shape[0]
else:
blockxsize = None
blockysize = None
tiles = rt.tile_grid_intersection(
src0, src1, blockxsize=blockxsize, blockysize=blockysize
)
windows0, windows1, write_windows, affine, nrows, ncols = tiles
profile.update(
dtype=rasterio.int32,
nodata=nodata,
count=1,
height=nrows,
width=ncols,
transform=affine,
compress='lzw',
bigtiff='yes',
)
with rasterio.open(output, 'w', **profile) as dst:
if njobs < 1:
click.echo((msg.STARTING).format(command, msg.INMEMORY))
img0 = src0.read(1, window=next(windows0))
img1 = src1.read(1, window=next(windows1))
result = do_threshold(img0, img1, level, default=nodata)
dst.write(result, 1)
elif njobs == 1:
click.echo((msg.STARTING).format(command, msg.SEQUENTIAL))
with click.progressbar(length=nrows * ncols, label='Blocks done:') as bar:
for (window0, window1, write_window) in zip(windows0, windows1, write_windows):
img0 = src0.read(1, window=window0)
img1 = src1.read(1, window=window1)
result = do_threshold(img0, img1, level, default=nodata)
dst.write(result, 1, window=write_window)
bar.update(result.size)
else:
click.echo((msg.STARTING).format(command, msg.CONCURRENT))
def jobs():
for (window0, window1, write_window) in zip(windows0, windows1, write_windows):
img0 = src0.read(1, window=window0)
img1 = src1.read(1, window=window1)
yield img0, img1, window0, window1, write_window
with concurrent.futures.ThreadPoolExecutor(max_workers=njobs) as executor, \
click.progressbar(length=nrows * ncols, label='Blocks done:') as bar:
future_to_window = {
executor.submit(
do_threshold, img0, img1, level, default=nodata
): (window0, window1, write_window)
for (img0, img1, window0, window1, write_window) in jobs()
}
for future in concurrent.futures.as_completed(future_to_window):
window0, window1, write_window = future_to_window[future]
result = future.result()
dst.write(result, 1, window=write_window)
bar.update(result.size)
click.echo((msg.WRITEOUT).format(output))
click.echo((msg.COMPLETION).format(msg.printtime(t0, time.time()))) | /rio-terrain-0.0.29.tar.gz/rio-terrain-0.0.29/rio_terrain/cli/threshold.py | 0.509276 | 0.197832 | threshold.py | pypi |
import time
import warnings
import concurrent.futures
import click
import numpy as np
import rasterio
import rio_terrain as rt
import rio_terrain.tools.messages as msg
from rio_terrain import __version__ as plugin_version
def do_extract(img, categorical, category):
if category is None:
_category = [1]
else:
_category = list(category)
mask = np.isin(categorical, _category)
result = img * mask
return result
@click.command('extract', short_help="Extract regions by category.")
@click.argument('input', nargs=1, type=click.Path(exists=True))
@click.argument('categorical', nargs=1, type=click.Path(exists=True))
@click.argument('output', nargs=1, type=click.Path())
@click.option('-c', '--category', multiple=True, type=int, help='Category to extract.')
@click.option('-j', '--njobs', type=int, default=1, help='Number of concurrent jobs to run')
@click.option('-v', '--verbose', is_flag=True, help='Enables verbose mode.')
@click.version_option(version=plugin_version, message='rio-terrain v%(version)s')
@click.pass_context
def extract(ctx, input, categorical, output, category, njobs, verbose):
"""Extract regions from a raster by category.
\b
INPUT should be a single-band raster.
CATEGORICAL should be a single-band raster with categories to extract.
The categorical data may be the input raster or another raster.
\b
Example:
rio extract diff.tif categorical.tif extract.tif -c 1 -c 3
"""
if verbose:
np.warnings.filterwarnings('default')
else:
np.warnings.filterwarnings('ignore')
t0 = time.time()
command = click.get_current_context().info_name
with rasterio.open(input) as src, rasterio.open(categorical) as cat:
if not rt.is_raster_intersecting(src, cat):
raise ValueError(msg.NONINTERSECTING)
if not rt.is_raster_aligned(src, cat):
raise ValueError(msg.NONALIGNED)
profile = src.profile
affine = src.transform
if njobs >= 1:
block_shape = (src.block_shapes)[0]
blockxsize = block_shape[1]
blockysize = block_shape[0]
else:
blockxsize = None
blockysize = None
tiles = rt.tile_grid_intersection(src, cat, blockxsize=blockxsize, blockysize=blockysize)
windows0, windows1, write_windows, affine, nrows, ncols = tiles
profile.update(
count=1,
compress='lzw',
bigtiff='yes',
height=nrows,
width=ncols,
transform=affine,
)
with rasterio.open(output, 'w', **profile) as dst:
if njobs < 1:
click.echo((msg.STARTING).format(command, msg.INMEMORY))
img = src.read(1, window=next(windows0))
mask = cat.read(1, window=next(windows1))
result = do_extract(img, mask, category)
dst.write(result.astype(profile['dtype']), 1, window=next(write_windows))
elif njobs == 1:
click.echo((msg.STARTING).format(command, msg.SEQUENTIAL))
with click.progressbar(length=nrows * ncols, label='Blocks done:') as bar:
for (window0, window1, write_window) in zip(windows0, windows1, write_windows):
img = src.read(1, window=window0)
mask = cat.read(1, window=window1)
result = do_extract(img, mask, category)
dst.write(result.astype(profile['dtype']), 1, window=write_window)
bar.update(result.size)
else:
click.echo((msg.STARTING).format(command, msg.CONCURRENT))
def jobs():
for (window0, window1, write_window) in zip(windows0, windows1, write_windows):
img = src.read(1, window=window0)
mask = cat.read(1, window=window1)
yield img, mask, window0, window1, write_window
with concurrent.futures.ThreadPoolExecutor(max_workers=njobs) as executor, \
click.progressbar(length=nrows * ncols, label='Blocks done:') as bar:
future_to_window = {
executor.submit(do_extract, img, mask, category): (
window0,
window1,
write_window,
)
for (img, mask, window0, window1, write_window) in jobs()
}
for future in concurrent.futures.as_completed(future_to_window):
window0, window1, write_window = future_to_window[future]
result = future.result()
dst.write(result.astype(profile['dtype']), 1, window=write_window)
bar.update(result.size)
click.echo((msg.WRITEOUT).format(output))
click.echo((msg.COMPLETION).format(msg.printtime(t0, time.time()))) | /rio-terrain-0.0.29.tar.gz/rio-terrain-0.0.29/rio_terrain/cli/extract.py | 0.502686 | 0.171442 | extract.py | pypi |
import time
import warnings
import concurrent.futures
import click
import numpy as np
import rasterio
import rio_terrain as rt
import rio_terrain.tools.messages as msg
from rio_terrain.core import focalstatistics
from rio_terrain import __version__ as plugin_version
@click.command('mad', short_help="Calculate median abolute deviation.")
@click.argument('input', nargs=1, type=click.Path(exists=True))
@click.argument('output', nargs=1, type=click.Path())
@click.option('-n', '--neighborhood', nargs=1, default=3, help='Neighborhood size in cells.')
@click.option('-b', '--blocks', 'blocks', nargs=1, type=int, default=40,
help='Multiple internal blocks to chunk.')
@click.option('-j', '--njobs', type=int, default=1, help='Number of concurrent jobs to run.')
@click.option('-v', '--verbose', is_flag=True, help='Enables verbose mode.')
@click.version_option(version=plugin_version, message='rio-terrain v%(version)s')
@click.pass_context
def mad(ctx, input, output, neighborhood, blocks, njobs, verbose):
"""Calculate a median absolute deviation raster.
INPUT should be a single-band raster.
\b
Example:
rio mad elevation.tif mad.tif
"""
if verbose:
np.warnings.filterwarnings('default')
else:
np.warnings.filterwarnings('ignore')
t0 = time.time()
command = click.get_current_context().info_name
with rasterio.open(input) as src:
profile = src.profile
affine = src.transform
profile.update(dtype=rasterio.float32, count=1, compress='lzw')
if (njobs >= 1) and src.is_tiled:
blockshape = (list(src.block_shapes))[0]
if (blockshape[0] == 1) or (blockshape[1] == 1):
warnings.warn((msg.STRIPED).format(blockshape))
read_windows = rt.tile_grid(
src.width,
src.height,
blockshape[0] * blocks,
blockshape[1] * blocks,
overlap=neighborhood,
)
write_windows = rt.tile_grid(
src.width,
src.height,
blockshape[0] * blocks,
blockshape[1] * blocks,
overlap=0,
)
else:
blockshape = 128
warnings.warn((msg.NOTILING).format(src.shape))
with rasterio.open(output, 'w', **profile) as dst:
if njobs < 1:
click.echo((msg.STARTING).format(command, msg.INMEMORY))
img = src.read(1)
img[img <= src.nodata + 1] = np.nan
result = focalstatistics.mad(img, size=(neighborhood, neighborhood))
dst.write(result.astype(profile['dtype']), 1)
elif njobs == 1:
click.echo((msg.STARTING).format(command, msg.SEQUENTIAL))
with click.progressbar(length=src.width * src.height, label='Blocks done:') as bar:
for (read_window, write_window) in zip(read_windows, write_windows):
img = src.read(1, window=read_window)
img[img <= src.nodata + 1] = np.nan
arr = focalstatistics.mad(img, size=(neighborhood, neighborhood))
result = rt.trim(arr, rt.margins(read_window, write_window))
dst.write(result.astype(profile['dtype']), 1, window=write_window)
bar.update(result.size)
else:
click.echo((msg.STARTING).format(command, msg.CONCURRENT))
def jobs():
for (read_window, write_window) in zip(read_windows, write_windows):
img = src.read(1, window=read_window)
img[img <= src.nodata + 1] = np.nan
yield img, read_window, write_window
with concurrent.futures.ThreadPoolExecutor(max_workers=njobs) as executor, \
click.progressbar(length=src.width*src.height, label='Blocks done:') as bar:
future_to_window = {
executor.submit(
focalstatistics.mad,
img,
size=(neighborhood, neighborhood),
): (read_window, write_window)
for (img, read_window, write_window) in jobs()
}
for future in concurrent.futures.as_completed(future_to_window):
read_window, write_window = future_to_window[future]
arr = future.result()
result = rt.trim(arr, rt.margins(read_window, write_window))
dst.write(result.astype(profile['dtype']), 1, window=write_window)
bar.update(result.size)
click.echo((msg.WRITEOUT).format(output))
click.echo((msg.COMPLETION).format(msg.printtime(t0, time.time()))) | /rio-terrain-0.0.29.tar.gz/rio-terrain-0.0.29/rio_terrain/cli/mad.py | 0.434701 | 0.164483 | mad.py | pypi |
import time
import warnings
from itertools import repeat
import concurrent.futures
import multiprocessing
from math import ceil
import click
import numpy as np
import rasterio
from crick import TDigest
from scipy import stats
from scipy.stats.mstats import mquantiles
import matplotlib.pyplot as plt
import rio_terrain as rt
import rio_terrain.tools.messages as msg
from rio_terrain import __version__ as plugin_version
def tdigest_mean(digest):
"""Estimate the mean from a tdigest
"""
ctr = digest.centroids()
mean = np.sum(ctr['mean']*ctr['weight'])/np.sum(ctr['weight'])
return mean
def tdigest_std(digest):
"""Estimate the standard deviation of the mean from a tdigest
"""
ctr = digest.centroids()
mean = tdigest_mean(digest)
std = np.sqrt(
(np.power((ctr['mean'] - mean), 2)*ctr['weight']).sum()
/ (ctr['weight'].sum() - 1)
)
return std
def tdigest_stats(digest):
"""Estimate descriptive statistics from a tdigest
"""
minX = digest.min()
maxX = digest.max()
meanX = tdigest_mean(digest)
stdX = tdigest_std(digest)
return (minX, maxX, meanX, stdX)
def digest_window(file, window, absolute):
'''Process worker that calculates a t-digest on a raster
'''
with rasterio.open(file) as src:
img = src.read(1, window=window[1])
img[img <= src.nodata+1] = np.nan
arr = img[np.isfinite(img)]
if absolute:
arr = np.absolute(arr)
count_ = np.count_nonzero(~np.isnan(img))
digest_ = TDigest()
digest_.update(arr.flatten())
return window, digest_, count_
@click.command('quantiles', short_help="Calculate quantile values.")
@click.argument('input', nargs=1, type=click.Path(exists=True))
@click.option('-q', '--quantile', multiple=True, type=float,
help='Print quantile value.')
@click.option('-f', '--fraction', nargs=1, default=1.0,
help='Randomly sample a fraction of internal blocks.')
@click.option('--absolute/--no-absolute', default=False,
help='Calculate quantiles for the absolute values.')
@click.option('--describe/--no-describe', default=False,
help='Print descriptive statistics to the console.')
@click.option('--plot/--no-plot', default=False,
help='Display statistics plots.')
@click.option('-j', '--jobs', 'njobs', type=int, default=multiprocessing.cpu_count(),
help='Number of concurrent jobs to run.')
@click.option('-v', '--verbose', is_flag=True, help='Enables verbose mode.')
@click.version_option(version=plugin_version, message='rio-terrain v%(version)s')
@click.pass_context
def quantiles(ctx, input, quantile, fraction, absolute, describe, plot, njobs, verbose):
"""Calculate and print quantile values.
INPUT should be a single-band raster.
\b
Example:
rio quantiles elevation.tif -q 0.5 -q 0.9
"""
if verbose:
np.warnings.filterwarnings('default')
else:
np.warnings.filterwarnings('ignore')
t0 = time.time()
command = click.get_current_context().info_name
count = 0
with rasterio.open(input) as src:
profile = src.profile
affine = src.transform
if njobs < 1:
click.echo("Running quantiles in-memory")
img = src.read(1)
img[img <= src.nodata+1] = np.nan
arr = img[np.isfinite(img)]
if absolute:
arr = np.absolute(arr)
count = np.count_nonzero(~np.isnan(img))
description = (arr.min(), arr.max(), arr.mean(), arr.std())
results = zip(quantile, mquantiles(arr, np.array(quantile)))
elif njobs == 1:
blocks = rt.subsample(src.block_windows(), probability=fraction)
n_blocks = ceil(rt.block_count(src.shape, src.block_shapes) * fraction)
digest = TDigest()
click.echo("Running quantiles with sequential t-digest")
with click.progressbar(length=n_blocks, label='Blocks done:') as bar:
for ij, window in blocks:
img = src.read(1, window=window)
img[img <= src.nodata+1] = np.nan
arr = img[np.isfinite(img[:])]
if absolute:
arr = np.absolute(arr)
window_count = np.count_nonzero(~np.isnan(img))
if window_count > 0:
window_digest = TDigest()
window_digest.update(arr.flatten())
digest.merge(window_digest)
count += window_count
bar.update(1)
description = tdigest_stats(digest)
results = zip(quantile, digest.quantile(quantile))
else:
blocks = rt.subsample(src.block_windows(), probability=fraction)
n_blocks = ceil(rt.block_count(src.shape, src.block_shapes)*fraction)
digest = TDigest()
click.echo("Running quantiles with multiprocess t-digest")
with concurrent.futures.ProcessPoolExecutor(max_workers=njobs) as executor, \
click.progressbar(length=n_blocks, label='Blocks done:') as bar:
for (window, window_digest, window_count) in executor.map(digest_window, repeat(input), blocks, repeat(absolute)):
if window_count > 0:
digest.merge(window_digest)
count += window_count
bar.update(1)
description = tdigest_stats(digest)
results = zip(quantile, digest.quantile(quantile))
click.echo((msg.COMPLETION).format(msg.printtime(t0, time.time())))
click.echo(list(results))
minX, maxX, meanX, stdX = description
if describe:
click.echo("min: {}".format(minX))
click.echo("max: {}".format(maxX))
click.echo("mean: {}".format(meanX))
click.echo("std: {}".format(stdX))
click.echo("count: {}".format(count))
if njobs > 0 and plot is True:
ctr = digest.centroids()
# scaled to theoretic normal
# calculate positions relative to standard normal distribution
qx_predicted_norm = stats.norm.ppf(digest.cdf(ctr['mean']))
qx_norm = np.linspace(start=stats.norm.ppf(0.001), stop=stats.norm.ppf(0.999), num=250)
qz_norm = qx_norm*stdX + meanX
cum_norm = stats.norm.cdf(qx_norm)
# scaled to theoretic laplace
# calculate positions relative to laplace distribution
"""
qx_predicted_laplace = stats.laplace.ppf(digest.cdf(ctr['mean']))
qx_laplace = np.linspace(start=stats.laplace.ppf(0.001), stop=stats.laplace.ppf(0.999), num=250)
qz_laplace = qx_laplace*stdX + mean
cum_laplace = stats.laplace.cdf(qx_laplace)
"""
# frequency at centroid (irregular width bins)
plt.plot(ctr['mean'], ctr['weight'], 'r.')
plt.plot([meanX], [0], 'k+', markersize=12)
plt.xlabel('Centroid Value')
plt.ylabel('Counts')
plt.title('Centroid Counts')
plt.show()
# histogram (equal width bins)
nbins = 1000
hist, bin_edges = digest.histogram(nbins)
width = (digest.max() - digest.min())/nbins
plt.bar(bin_edges[:-1], hist, width=width)
plt.xlabel('Value')
plt.ylabel('Counts')
plt.title('Histogram')
plt.show()
# cumulative probability distribution
spacing = (digest.max() - digest.min())/100
samples = np.arange(ctr['mean'].min(), ctr['mean'].max(), spacing)
cdf = digest.cdf(samples)
plt.plot(samples, cdf, 'r.')
plt.plot(qz_norm, cum_norm, linestyle='dashed', c='black')
# plt.plot(qz_laplace, cum_laplace, linestyle='dotted', c='gray')
plt.xlabel('Value')
plt.ylabel('Probability')
plt.title('Cumulative Distribution')
plt.plot([meanX], [digest.cdf(meanX)], "k+", markersize=12)
plt.show()
# theoretic normal
plt.plot(qx_predicted_norm, ctr['mean'], 'r.')
plt.plot(qx_norm, qz_norm, linestyle='dashed', c='black')
plt.xlabel('Standard Normal Variate')
plt.ylabel('Value')
plt.title('QQ-plot on theoretic standard normal')
plt.plot([0], [meanX], "k+", markersize=12)
plt.show()
# theoretic laplace
"""
plt.plot(qx_predicted_laplace, ctr['mean'], 'r.')
plt.plot(qx_laplace, qz_laplace, linestyle='dashed', c='black')
plt.plot([0], [mean], 'k+', markersize=12)
plt.xlabel('laplace variate')
plt.ylabel('Elevation (m)')
plt.title('QQ-plot on theoretic laplace')
plt.show()
"""
if njobs < 1 and plot is True:
# histogram (equal width bins)
nbins = 100
hist, bin_edges = np.histogram(arr, nbins)
width = (arr.max() - arr.min())/nbins
plt.bar(bin_edges[:-1], hist, width=width)
plt.xlabel('Elevation (m)')
plt.ylabel('Counts')
plt.title('Histogram')
plt.show()
# cumulative probability distribution
cdf = np.cumsum(hist)/count
qx_predicted_norm = stats.norm.ppf(cdf)
qx_norm = np.linspace(start=stats.norm.ppf(0.001), stop=stats.norm.ppf(0.999), num=250)
qz_norm = qx_norm * stdX + meanX
cum_norm = stats.norm.cdf(qx_norm)
plt.plot(bin_edges[:-1], cdf, 'r.')
plt.plot(qz_norm, cum_norm, linestyle='dashed', c='black')
plt.xlabel('Value')
plt.ylabel('Probability')
plt.title('Cumulative Distribution')
plt.plot([meanX], [0], "k+", markersize=12)
plt.show()
# theoretic normal
# full dataset is too large!
# zscore = (arr - meanX)/stdX
# plt.plot(zscore, arr, 'r.')
plt.plot(qx_predicted_norm, bin_edges[:-1], 'r.')
plt.plot(qx_norm, qz_norm, linestyle='dashed', c='black')
plt.xlabel('Standard Normal Variate')
plt.ylabel('Value')
plt.title('QQ-plot on theoretic standard normal')
plt.plot([0], [meanX], "k+", markersize=12)
plt.show() | /rio-terrain-0.0.29.tar.gz/rio-terrain-0.0.29/rio_terrain/cli/quantiles.py | 0.655777 | 0.319015 | quantiles.py | pypi |
import time
import warnings
import concurrent.futures
from math import ceil
import click
import numpy as np
import rasterio
import rio_terrain as rt
import rio_terrain.tools.messages as msg
from rio_terrain.core import focalstatistics
from rio_terrain import __version__ as plugin_version
@click.command('std', short_help="Calculate standard-deviation.")
@click.argument('input', nargs=1, type=click.Path(exists=True))
@click.argument('output', nargs=1, type=click.Path())
@click.option('-n', '--neighborhood', nargs=1, default=3, help='Neigborhood size in cells.')
@click.option('-b', '--blocks', 'blocks', nargs=1, type=int, default=40,
help='Multiple internal blocks to chunk.')
@click.option('-j', '--njobs', type=int, default=1, help='Number of concurrent jobs to run.')
@click.option('-v', '--verbose', is_flag=True, help='Enables verbose mode.')
@click.version_option(version=plugin_version, message='rio-terrain v%(version)s')
@click.pass_context
def std(ctx, input, output, neighborhood, blocks, njobs, verbose):
"""Calculate a standard-deviation raster.
INPUT should be a single-band raster.
\b
Example:
rio std elevation.tif stddev.tif
"""
if verbose:
np.warnings.filterwarnings('default')
else:
np.warnings.filterwarnings('ignore')
t0 = time.time()
command = click.get_current_context().info_name
with rasterio.open(input) as src:
profile = src.profile
affine = src.transform
profile.update(dtype=rasterio.float32, count=1, compress='lzw', bigtiff='yes')
if (njobs >= 1) and src.is_tiled:
blockshape = (list(src.block_shapes))[0]
if (blockshape[0] == 1) or (blockshape[1] == 1):
warnings.warn((msg.STRIPED).format(blockshape))
read_windows = rt.tile_grid(
src.width,
src.height,
blockshape[0] * blocks,
blockshape[1] * blocks,
overlap=neighborhood,
)
write_windows = rt.tile_grid(
src.width,
src.height,
blockshape[0] * blocks,
blockshape[1] * blocks,
overlap=0,
)
else:
blockshape = 128
warnings.warn((msg.NOTILING).format(src.shape))
with rasterio.open(output, 'w', **profile) as dst:
if njobs < 1:
click.echo((msg.STARTING).format(command, msg.INMEMORY))
img = src.read(1)
img[img <= src.nodata + 1] = np.nan
result = focalstatistics.std(img, size=(neighborhood, neighborhood))
dst.write(result.astype(profile['dtype']), 1)
elif njobs == 1:
click.echo((msg.STARTING).format(command, msg.SEQUENTIAL))
with click.progressbar(length=src.width * src.height, label='Blocks done:') as bar:
for (read_window, write_window) in zip(read_windows, write_windows):
img = src.read(1, window=read_window)
img[img <= src.nodata + 1] = np.nan
arr = focalstatistics.std(img, size=(neighborhood, neighborhood))
result = rt.trim(arr, rt.margins(read_window, write_window))
dst.write(result.astype(profile['dtype']), 1, window=write_window)
bar.update(result.size)
else:
click.echo((msg.STARTING).format(command, msg.CONCURRENT))
import dask.array as da
with click.progressbar(length=src.width * src.height, label='Blocks done:') as bar:
for (read_window, write_window) in zip(read_windows, write_windows):
img = src.read(1, window=read_window)
img[img <= src.nodata + 1] = np.nan
chunks_wanted = 100
bw = ceil(read_window.width / np.sqrt(chunks_wanted) / blockshape[0])
bh = ceil(read_window.height / np.sqrt(chunks_wanted) / blockshape[1])
hh, ww = rt.chunk_dims(
(img.shape[0], img.shape[1]),
(blockshape[0] * bw, blockshape[1] * bh),
min_size=blockshape[0] * 2)
arr = da.from_array(img, chunks=(tuple(hh), tuple(ww)))
tiles_in = da.overlap.overlap(
arr,
depth={0: neighborhood, 1: neighborhood},
boundary={0: np.nan, 1: np.nan})
tiles_out = tiles_in.map_blocks(
focalstatistics.std,
size=(neighborhood, neighborhood),
dtype=np.float64)
trim_out = da.overlap.trim_internal(
tiles_out, {0: neighborhood, 1: neighborhood})
full_result = trim_out.compute()
result = rt.trim(full_result, rt.margins(read_window, write_window))
dst.write(result.astype(profile['dtype']), 1, window=write_window)
bar.update(result.size)
"""
def jobs():
for (read_window, write_window) in zip(read_windows, write_windows):
img = src.read(1, window=read_window)
img[img <= src.nodata+1] = np.nan
yield img, read_window, write_window
with concurrent.futures.ThreadPoolExecutor(max_workers=njobs) as executor, \
click.progressbar(length=src.width*src.height, label='Blocks done:') as bar:
future_to_window = {
executor.submit(
focalstatistics.std,
img,
size=(neighborhood, neighborhood)): (read_window, write_window)
for (img, read_window, write_window) in jobs()}
for future in concurrent.futures.as_completed(future_to_window):
read_window, write_window = future_to_window[future]
arr = future.result()
result = rt.trim(arr, rt.margins(read_window, write_window))
dst.write(result, 1, window=write_window)
bar.update(result.size)
"""
click.echo((msg.WRITEOUT).format(output))
click.echo((msg.COMPLETION).format(msg.printtime(t0, time.time()))) | /rio-terrain-0.0.29.tar.gz/rio-terrain-0.0.29/rio_terrain/cli/std.py | 0.447219 | 0.183118 | std.py | pypi |
import warnings
from math import pi
import numpy as np
from scipy import ndimage
def _ring_gradient(arr, res=(1, 1)):
"""Convolve an array using a 3x3 ring-shaped kernel
Parameters:
arr (ndarray): 2D numpy array
res (tuple): tuple of raster cell width and height
Returns:
dz_dy, dz_dx (ndarrays): x and y gradient components
"""
origin = (0, 0)
k_X = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]])
k_Y = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
dz_dx = ndimage.convolve(arr, k_X, origin=origin) / (8 * res[0])
dz_dy = ndimage.convolve(arr, k_Y, origin=origin) / (8 * res[1])
return dz_dy, dz_dx
def slope(arr, res=(1, 1), units='grade', neighbors=4):
"""Calculates slope.
Parameters:
arr (ndarray): 2D numpy array
res (tuple): tuple of raster cell width and height
units (str, optional): choice of grade or degrees
neighbors (int, optional): use four or eight neighbors in calculation
Returns:
slope (ndarray): 2D numpy array representing slope
"""
if neighbors == 4:
dz_dy, dz_dx = np.gradient(arr, res[0])
else:
dz_dy, dz_dx = _ring_gradient(arr, res)
m = np.sqrt(dz_dx ** 2 + dz_dy ** 2)
if units == 'grade':
slope = m
elif units == 'degrees':
slope = (180 / pi) * np.arctan(m)
return slope
def aspect(arr, res=(1, 1), pcs='compass', neighbors=4):
"""Calculates aspect.
Parameters:
arr (ndarray): 2D numpy array
res (tuple): tuple of raster cell width and height
north (str, optional): choice of polar coordinate system
Returns:
aspect (ndarray): 2D numpy array representing slope aspect
"""
if neighbors == 4:
dz_dy, dz_dx = np.gradient(arr, res[0])
else:
dz_dy, dz_dx = _ring_gradient(arr, res)
if pcs == 'compass':
aspect = (180 / pi) * np.arctan2(dz_dy, dz_dx)
aspect += 270
aspect[aspect > 360] -= 360
elif pcs == 'cartesian':
aspect = -(180 / pi) * np.arctan2(-dz_dy, -dz_dx)
aspect[aspect < 0] += 360
else:
aspect = (180 / pi) * np.arctan2(dz_dy, dz_dx)
return aspect
def curvature(arr, res=(1, 1), neighbors=4):
"""Calculates curvature.
Parameters:
arr (ndarray): 2D numpy array
res (tuple): tuple of raster cell width and height
Returns:
curvature (ndarray): 2D numpy array representing surface curvature
"""
if neighbors == 4:
dz_dy, dz_dx = np.gradient(arr, res[0])
else:
dz_dy, dz_dx = _ring_gradient(arr, res)
m = np.sqrt(dz_dx ** 2 + dz_dy ** 2)
dT_dx = np.divide(dz_dx, m)
dT_dy = np.divide(dz_dy, m)
_, d2T_dxx = np.gradient(dT_dx, res[0])
d2T_dyy, _ = np.gradient(dT_dy, res[0])
curvature = d2T_dxx + d2T_dyy
return curvature | /rio-terrain-0.0.29.tar.gz/rio-terrain-0.0.29/rio_terrain/core/terrain.py | 0.894605 | 0.771241 | terrain.py | pypi |
import concurrent.futures
import numpy as np
def minmax(src, windows, njobs):
"""Calculates the minimum and maximum values in a rasterio source.
Parameters:
src : rasterio source
windows : iterable of read and write windows
njobs (integer) : number of processing jobs
Returns:
src_min (float) : minimum value
src_max (float) : maximum value
ArcGIS min = 77.278923034668
ArcGIS max = 218.81454467773
"""
def _minmax(arr):
mask = np.isfinite(arr[:])
if np.count_nonzero(mask) > 0:
arr_min = np.nanmin(arr[mask])
arr_max = np.nanmax(arr[mask])
else:
arr_min = None
arr_max = None
return arr_min, arr_max
src_min = None
src_max = None
if njobs < 1:
data = src.read(1)
data[data <= src.nodata + 1] = np.nan
src_min, src_max = _minmax(data)
return src_min, src_max
else:
def jobs():
for ij, window in windows:
data = src.read(1, window=window)
data[data <= src.nodata + 1] = np.nan
yield data, window
with concurrent.futures.ThreadPoolExecutor(max_workers=njobs) as executor:
future_to_window = {
executor.submit(_minmax, data): (window) for data, window in jobs()
}
for future in concurrent.futures.as_completed(future_to_window):
# window = future_to_window[future]
window_min, window_max = future.result()
if not src_min and not src_max:
if window_min and window_max:
src_min = window_min
src_max = window_max
elif window_min and window_max:
if window_min < src_min:
src_min = window_min
if window_max > src_max:
src_max = window_max
return src_min, src_max
def mean(src, windows, njobs):
"""Calculates the mean of a rasterio source
Parameters:
src : rasterio source
windows : iterable of read and write windows
njobs (integer) : number of processing jobs
Returns:
mean (float) : mean value
mean = 140.043719088
ArcGIS = 140.04371922353
"""
def _accumulate(arr):
mask = np.isfinite(arr[:])
sum_ = np.sum(arr[mask])
count_ = np.count_nonzero(mask)
return sum_, count_
if njobs < 1:
data = src.read(1)
data[data <= src.nodata + 1] = np.nan
vals = data[np.isfinite(data[:])]
mean = np.nanmean(vals)
else:
def jobs():
for ij, window in windows:
data = src.read(1, window=window)
data[data <= src.nodata + 1] = np.nan
yield data, window
src_sum = 0.0
src_count = 0.0
with concurrent.futures.ThreadPoolExecutor(max_workers=njobs) as executor:
future_to_window = {
executor.submit(_accumulate, data): (window) for data, window in jobs()
}
for future in concurrent.futures.as_completed(future_to_window):
# window = future_to_window[future]
window_sum, window_count = future.result()
src_sum += window_sum
src_count += window_count
mean = src_sum / src_count
return mean
def stddev(src, mean, windows, njobs):
"""Calculates the standard deviation of a rasterio source
Parameters:
src : rasterio source
mean : mean value
windows : iterable of read and write windows
njobs (integer) : number of processing jobs
Returns:
stddev (float) : standard deviation
stddev = 23.5554506735
ArcGIS = 23.555450665488
"""
def _accumulate(arr):
mask = np.isfinite(arr[:])
sum_ = np.sum(np.square(arr[mask] - mean))
count_ = np.count_nonzero(mask)
return sum_, count_
if njobs < 1:
data = src.read(1)
data[data <= src.nodata + 1] = np.nan
vals = data[np.isfinite(data[:])]
stddev = np.nanstd(vals)
else:
def jobs():
for ij, window in windows:
data = src.read(1, window=window)
data[data <= src.nodata + 1] = np.nan
yield data, window
src_dev_sum = 0.0
src_dev_count = 0.0
with concurrent.futures.ThreadPoolExecutor(max_workers=njobs) as executor:
future_to_window = {
executor.submit(_accumulate, data): (window) for data, window in jobs()
}
for future in concurrent.futures.as_completed(future_to_window):
# window = future_to_window[future]
window_dev_sum, window_dev_count = future.result()
src_dev_sum += window_dev_sum
src_dev_count += window_dev_count
stddev = np.sqrt(src_dev_sum / (src_dev_count - 1))
return stddev | /rio-terrain-0.0.29.tar.gz/rio-terrain-0.0.29/rio_terrain/core/statistics.py | 0.885804 | 0.57517 | statistics.py | pypi |
from concurrent import futures
from typing import Any, Dict, Optional, Sequence, Tuple
import attr
import morecantile
import numpy
from rasterio.transform import from_bounds
from rasterio.warp import calculate_default_transform
from rio_tiler import constants, reader
from rio_tiler.errors import TileOutsideBounds
from rio_tiler.expression import apply_expression, parse_expression
from rio_tiler.io import COGReader as RioTilerReader
default_tms = morecantile.tms.get("WebMercatorQuad")
def geotiff_options(
x: int,
y: int,
z: int,
tilesize: int = 256,
tms: morecantile.TileMatrixSet = default_tms,
) -> Dict:
"""GeoTIFF options."""
bounds = tms.xy_bounds(morecantile.Tile(x=x, y=y, z=z))
dst_transform = from_bounds(*bounds, tilesize, tilesize)
return dict(crs=tms.crs, transform=dst_transform)
@attr.s
class COGReader(RioTilerReader):
"""
Cloud Optimized GeoTIFF Reader.
Examples
--------
with CogeoReader(src_path) as cog:
cog.tile(...)
with rasterio.open(src_path) as src_dst:
with WarpedVRT(src_dst, ...) as vrt_dst:
with CogeoReader(None, dataset=vrt_dst) as cog:
cog.tile(...)
with rasterio.open(src_path) as src_dst:
with CogeoReader(None, dataset=src_dst) as cog:
cog.tile(...)
Attributes
----------
filepath: str
Cloud Optimized GeoTIFF path.
dataset: rasterio.DatasetReader, optional
Rasterio dataset.
tms: morecantile.TileMatrixSet, optional
TileMatrixSet to use, default is WebMercatorQuad.
Properties
----------
minzoom: int
COG minimum zoom level in TMS projection.
maxzoom: int
COG maximum zoom level in TMS projection.
bounds: tuple[float]
COG bounds in WGS84 crs.
center: tuple[float, float, int]
COG center + minzoom
colormap: dict
COG internal colormap.
info: dict
General information about the COG (datatype, indexes, ...)
Methods
-------
tile(0, 0, 0, indexes=(1,2,3), expression="B1/B2", tilesize=512, resampling_methods="nearest")
Read a map tile from the COG.
part((0,10,0,10), indexes=(1,2,3,), expression="B1/B20", max_size=1024)
Read part of the COG.
preview(max_size=1024)
Read preview of the COG.
point((10, 10), indexes=1)
Read a point value from the COG.
stats(pmin=5, pmax=95)
Get Raster statistics.
meta(pmin=5, pmax=95)
Get info + raster statistics
"""
tms: morecantile.TileMatrixSet = attr.ib(default=default_tms)
def _get_zooms(self):
"""Calculate raster min/max zoom level."""
def _zoom_for_pixelsize(pixel_size, max_z=24):
"""Get zoom level corresponding to a pixel resolution."""
for z in range(max_z):
matrix = self.tms.matrix(z)
if pixel_size > self.tms._resolution(matrix):
return max(0, z - 1) # We don't want to scale up
return max_z - 1
dst_affine, w, h = calculate_default_transform(
self.dataset.crs,
self.tms.crs,
self.dataset.width,
self.dataset.height,
*self.dataset.bounds,
)
resolution = max(abs(dst_affine[0]), abs(dst_affine[4]))
max_zoom = _zoom_for_pixelsize(resolution)
matrix = self.tms.tileMatrix[0]
ovr_resolution = (
resolution * max(h, w) / max(matrix.tileWidth, matrix.tileHeight)
)
min_zoom = _zoom_for_pixelsize(ovr_resolution)
self.minzoom = self.minzoom or min_zoom
self.maxzoom = self.maxzoom or max_zoom
return
def _tile_exists(self, tile: morecantile.Tile):
"""Check if a tile is inside a given bounds."""
tile_bounds = self.tms.bounds(*tile)
return (
(tile_bounds[0] < self.bounds[2])
and (tile_bounds[2] > self.bounds[0])
and (tile_bounds[3] > self.bounds[1])
and (tile_bounds[1] < self.bounds[3])
)
def tile(
self,
tile_x: int,
tile_y: int,
tile_z: int,
tilesize: int = 256,
indexes: Optional[Sequence] = None,
expression: Optional[str] = "",
**kwargs: Any,
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""Read a TMS map tile from a COG."""
kwargs = {**self._kwargs, **kwargs}
if isinstance(indexes, int):
indexes = (indexes,)
if expression:
indexes = parse_expression(expression)
tile = morecantile.Tile(x=tile_x, y=tile_y, z=tile_z)
if not self._tile_exists(tile):
raise TileOutsideBounds(
"Tile {}/{}/{} is outside image bounds".format(tile_z, tile_x, tile_y)
)
tile_bounds = self.tms.xy_bounds(*tile)
tile, mask = reader.part(
self.dataset,
tile_bounds,
tilesize,
tilesize,
dst_crs=self.tms.crs,
indexes=indexes,
**kwargs,
)
if expression:
blocks = expression.lower().split(",")
bands = [f"b{bidx}" for bidx in indexes]
tile = apply_expression(blocks, bands, tile)
return tile, mask
def multi_tile(
assets: Sequence[str],
*args: Any,
tms: morecantile.TileMatrixSet = default_tms,
**kwargs: Any,
) -> Tuple[numpy.ndarray, numpy.ndarray]:
"""Assemble multiple tiles."""
def _worker(asset: str):
with COGReader(asset, tms=tms) as cog: # type: ignore
return cog.tile(*args, **kwargs)
with futures.ThreadPoolExecutor(max_workers=constants.MAX_THREADS) as executor:
data, masks = zip(*list(executor.map(_worker, assets)))
data = numpy.concatenate(data)
mask = numpy.all(masks, axis=0).astype(numpy.uint8) * 255
return data, mask | /rio-tiler-crs-3.0b6.tar.gz/rio-tiler-crs-3.0b6/rio_tiler_crs/cogeo.py | 0.935461 | 0.411525 | cogeo.py | pypi |
from typing import Type
import attr
import morecantile
from rio_tiler.io import BaseReader
from rio_tiler.io import STACReader as RioTilerSTACReader
from .cogeo import COGReader
default_tms = morecantile.tms.get("WebMercatorQuad")
@attr.s
class STACReader(RioTilerSTACReader):
"""
STAC + Cloud Optimized GeoTIFF Reader.
Examples
--------
with STACReader(stac_path) as stac:
stac.tile(...)
my_stac = {
"type": "Feature",
"stac_version": "1.0.0",
...
}
with STACReader(None, item=my_stac) as stac:
stac.tile(...)
Attributes
----------
filepath: str
STAC Item path, URL or S3 URL.
item: Dict, optional
STAC Item dict.
tms: morecantile.TileMatrixSet, optional
TileMatrixSet to use, default is WebMercatorQuad.
minzoom: int, optional
Set minzoom for the tiles.
minzoom: int, optional
Set maxzoom for the tiles.
include_assets: Set, optional
Only accept some assets.
exclude_assets: Set, optional
Exclude some assets.
include_asset_types: Set, optional
Only include some assets base on their type
include_asset_types: Set, optional
Exclude some assets base on their type
Properties
----------
bounds: tuple[float]
STAC bounds in WGS84 crs.
center: tuple[float, float, int]
STAC item center + minzoom
Methods
-------
tile(0, 0, 0, assets="B01", expression="B01/B02")
Read a map tile from the COG.
part((0,10,0,10), assets="B01", expression="B1/B20", max_size=1024)
Read part of the COG.
preview(assets="B01", max_size=1024)
Read preview of the COG.
point((10, 10), assets="B01")
Read a point value from the COG.
stats(assets="B01", pmin=5, pmax=95)
Get Raster statistics.
info(assets="B01")
Get Assets raster info.
metadata(assets="B01", pmin=5, pmax=95)
info + stats
"""
reader: Type[BaseReader] = attr.ib(default=COGReader)
tms: morecantile.TileMatrixSet = attr.ib(default=default_tms)
minzoom: int = attr.ib(default=None)
maxzoom: int = attr.ib(default=None)
def __attrs_post_init__(self):
"""forward tms to readers options and set min/max zoom."""
self.reader_options.update({"tms": self.tms})
if self.minzoom is None:
self.minzoom = self.tms.minzoom
if self.maxzoom is None:
self.maxzoom = self.tms.maxzoom
super().__attrs_post_init__() | /rio-tiler-crs-3.0b6.tar.gz/rio-tiler-crs-3.0b6/rio_tiler_crs/stac.py | 0.84556 | 0.298977 | stac.py | pypi |
import re
from typing import Any, Dict
from rio_tiler_pds.errors import InvalidCBERSSceneId
def sceneid_parser(sceneid: str) -> Dict:
"""Parse CBERS 4/4A scene id.
Args:
sceneid (str): CBERS 4/4A sceneid.
Returns:
dict: dictionary with metadata constructed from the sceneid.
Raises:
InvalidCBERSSceneId: If `sceneid` doesn't match the regex schema.
Examples:
>>> sceneid_parser('CBERS_4_MUX_20171121_057_094_L2')
"""
if not re.match(r"^CBERS_(4|4A)_\w+_[0-9]{8}_[0-9]{3}_[0-9]{3}_L\w+$", sceneid):
raise InvalidCBERSSceneId("Could not match {}".format(sceneid))
cbers_pattern = (
r"(?P<satellite>\w+)_"
r"(?P<mission>\w+)"
r"_"
r"(?P<instrument>\w+)"
r"_"
r"(?P<acquisitionYear>[0-9]{4})"
r"(?P<acquisitionMonth>[0-9]{2})"
r"(?P<acquisitionDay>[0-9]{2})"
r"_"
r"(?P<path>[0-9]{3})"
r"_"
r"(?P<row>[0-9]{3})"
r"_"
r"(?P<processingCorrectionLevel>L\w+)$"
)
meta: Dict[str, Any] = re.match(cbers_pattern, sceneid, re.IGNORECASE).groupdict() # type: ignore
meta["scene"] = sceneid
meta["date"] = "{}-{}-{}".format(
meta["acquisitionYear"], meta["acquisitionMonth"], meta["acquisitionDay"]
)
instrument = meta["instrument"]
# Bands ids for CB4 and CB4A MUX and WFI/AWFI cameras are the same
# so we do not need to index this dict by mission
instrument_params = {
"MUX": {
"reference_band": "B6",
"bands": ("B5", "B6", "B7", "B8"),
"rgb": ("B7", "B6", "B5"),
},
"AWFI": {
"reference_band": "B14",
"bands": ("B13", "B14", "B15", "B16"),
"rgb": ("B15", "B14", "B13"),
},
"PAN10M": {
"reference_band": "B4",
"bands": ("B2", "B3", "B4"),
"rgb": ("B3", "B4", "B2"),
},
"PAN5M": {"reference_band": "B1", "bands": ("B1",), "rgb": ("B1", "B1", "B1")},
"WFI": {
"reference_band": "B14",
"bands": ("B13", "B14", "B15", "B16"),
"rgb": ("B15", "B14", "B13"),
},
"WPM": {
"reference_band": "B2",
"bands": ("B0", "B1", "B2", "B3", "B4"),
"rgb": ("B3", "B2", "B1"),
},
}
meta["reference_band"] = instrument_params[instrument]["reference_band"]
meta["bands"] = instrument_params[instrument]["bands"]
meta["rgb"] = instrument_params[instrument]["rgb"]
return meta | /rio_tiler_pds-0.10.1-py3-none-any.whl/rio_tiler_pds/cbers/utils.py | 0.882567 | 0.392773 | utils.py | pypi |
from typing import Dict, Type
import attr
from morecantile import TileMatrixSet
from rio_tiler.constants import WEB_MERCATOR_TMS
from rio_tiler.errors import InvalidBandName
from rio_tiler.io import MultiBandReader, Reader
from rio_tiler_pds.cbers.utils import sceneid_parser
@attr.s
class CBERSReader(MultiBandReader):
"""AWS Public Dataset CBERS 4 reader.
Args:
sceneid (str): CBERS 4 sceneid.
Attributes:
scene_params (dict): scene id parameters.
bands (tuple): list of available bands (default is defined for each sensor).
Examples:
>>> with CBERSReader('CBERS_4_AWFI_20170420_146_129_L2') as scene:
print(scene.bounds)
"""
input: str = attr.ib()
tms: TileMatrixSet = attr.ib(default=WEB_MERCATOR_TMS)
minzoom: int = attr.ib(default=None)
maxzoom: int = attr.ib(default=None)
reader: Type[Reader] = attr.ib(default=Reader)
reader_options: Dict = attr.ib(factory=dict)
_scheme: str = "s3"
bucket: str = attr.ib(default="cbers-pds")
prefix_pattern: str = attr.ib(
default="CBERS{mission}/{instrument}/{path}/{row}/{scene}"
)
def __attrs_post_init__(self):
"""Fetch Reference band to get the bounds."""
self.scene_params = sceneid_parser(self.input)
self.bands = self.scene_params["bands"]
ref = self._get_band_url(self.scene_params["reference_band"])
with self.reader(ref, tms=self.tms, **self.reader_options) as cog:
self.bounds = cog.bounds
self.crs = cog.crs
self.minzoom = cog.minzoom
self.maxzoom = cog.maxzoom
def _get_band_url(self, band: str) -> str:
"""Validate band's name and return band's url."""
if band not in self.bands:
raise InvalidBandName(f"{band} is not valid")
prefix = self.prefix_pattern.format(**self.scene_params)
band = band.replace("B", "BAND")
return f"{self._scheme}://{self.bucket}/{prefix}/{self.input}_{band}.tif" | /rio_tiler_pds-0.10.1-py3-none-any.whl/rio_tiler_pds/cbers/aws/cbers4.py | 0.913291 | 0.227523 | cbers4.py | pypi |
import re
from typing import Any, Dict, Tuple
import numpy
from rio_tiler_pds.errors import InvalidLandsatSceneId
OLI_SR_BANDS: Tuple[str, ...] = (
"QA_PIXEL",
"QA_RADSAT",
"SR_B1",
"SR_B2",
"SR_B3",
"SR_B4",
"SR_B5",
"SR_B6",
"SR_B7",
"SR_QA_AEROSOL",
)
TIRS_ST_BANDS: Tuple[str, ...] = (
"ST_ATRAN",
"ST_B10",
"ST_CDIST",
"ST_DRAD",
"ST_EMIS",
"ST_EMSD",
"ST_QA",
"ST_TRAD",
"ST_URAD",
)
TM_SR_BANDS: Tuple[str, ...] = (
"QA_PIXEL",
"QA_RADSAT",
"SR_ATMOS_OPACITY",
"SR_B1",
"SR_B2",
"SR_B3",
"SR_B4",
"SR_B5",
"SR_B7",
"SR_CLOUD_QA",
)
TM_ST_BANDS: Tuple[str, ...] = (
"ST_ATRAN",
"ST_B6",
"ST_CDIST",
"ST_DRAD",
"ST_EMIS",
"ST_EMSD",
"ST_QA",
"ST_TRAD",
"ST_URAD",
)
OLI_L1_BANDS: Tuple[str, ...] = ("B1", "B2", "B3", "B4", "B5", "B6", "B7", "B8", "B9")
TIRS_L1_BANDS: Tuple[str, ...] = ("B10", "B11")
OLI_L1_QA_BANDS: Tuple[str, ...] = (
"QA_PIXEL",
"QA_RADSAT",
"SAA",
"SZA",
"VAA",
"VZA",
)
TIRS_L1_QA_BANDS: Tuple[str, ...] = (
"QA_PIXEL",
"QA_RADSAT",
)
ETM_L1_BANDS: Tuple[str, ...] = (
"B1",
"B2",
"B3",
"B4",
"B5",
"B6_VCID_1",
"B6_VCID_2",
"B7",
"B8",
"QA_PIXEL",
"QA_RADSAT",
"SAA",
"SZA",
"VAA",
"VZA",
)
TM_L1_BANDS: Tuple[str, ...] = (
"B1",
"B2",
"B3",
"B4",
"B5",
"B6",
"B7",
"QA_PIXEL",
"QA_RADSAT",
"SAA",
"SZA",
"VAA",
"VZA",
)
MSS_L1_BANDS: Tuple[str, ...] = ("B4", "B5", "B6", "B7", "QA_PIXEL", "QA_RADSAT")
def sceneid_parser(sceneid: str) -> Dict:
"""Parse Landsat id.
Author @perrygeo - http://www.perrygeo.com
Args:
sceneid (str): Landsat sceneid.
Returns:
dict: dictionary with metadata constructed from the sceneid.
Raises:
InvalidLandsatSceneId: If `sceneid` doesn't match the regex schema.
Examples:
>>> sceneid_parser('LC08_L1TP_016037_20170813_20170814_01_RT')
"""
if not re.match(
r"^L[COTEM]\d{2}_L\d{1}[A-Z]{2}_\d{6}_\d{8}_\d{8}_\d{2}_\w{2}$", sceneid
):
raise InvalidLandsatSceneId("Could not match {}".format(sceneid))
collection_pattern = (
r"^L"
r"(?P<sensor>\w{1})"
r"(?P<satellite>\w{2})"
r"_"
r"(?P<processingCorrectionLevel>\w{4})"
r"_"
r"(?P<path>[0-9]{3})"
r"(?P<row>[0-9]{3})"
r"_"
r"(?P<acquisitionYear>[0-9]{4})"
r"(?P<acquisitionMonth>[0-9]{2})"
r"(?P<acquisitionDay>[0-9]{2})"
r"_"
r"(?P<processingYear>[0-9]{4})"
r"(?P<processingMonth>[0-9]{2})"
r"(?P<processingDay>[0-9]{2})"
r"_"
r"(?P<collectionNumber>\w{2})"
r"_"
r"(?P<collectionCategory>\w{2})$"
)
meta: Dict[str, Any] = re.match( # type: ignore
collection_pattern, sceneid, re.IGNORECASE
).groupdict()
meta["scene"] = sceneid
meta["date"] = "{}-{}-{}".format(
meta["acquisitionYear"], meta["acquisitionMonth"], meta["acquisitionDay"]
)
meta["_processingLevelNum"] = meta["processingCorrectionLevel"][1]
if meta["sensor"] == "C":
sensor_name = "oli-tirs"
elif meta["sensor"] == "O":
sensor_name = "oli"
elif meta["sensor"] == "T" and int(meta["satellite"]) >= 8:
sensor_name = "tirs"
elif meta["sensor"] == "E":
sensor_name = "etm"
elif meta["sensor"] == "T" and int(meta["satellite"]) < 8:
sensor_name = "tm"
elif meta["sensor"] == "M":
sensor_name = "mss"
meta["category"] = (
"albers" if meta["collectionCategory"] in ["A1", "A2"] else "standard"
)
# S3 paths always use oli-tirs
_sensor_s3_prefix = sensor_name
if _sensor_s3_prefix in ["oli", "tirs"]:
_sensor_s3_prefix = "oli-tirs"
meta["sensor_name"] = sensor_name
meta["_sensor_s3_prefix"] = _sensor_s3_prefix
meta["bands"] = get_bands_for_scene_meta(meta)
return meta
def get_bands_for_scene_meta(meta: Dict) -> Tuple[str, ...]: # noqa: C901
"""Get available Landsat bands given scene metadata"""
sensor_name = meta["sensor_name"]
if meta["processingCorrectionLevel"] == "L2SR":
if sensor_name in ["oli-tirs", "oli"]:
bands = OLI_SR_BANDS
elif sensor_name in ["tm", "etm"]:
bands = TM_SR_BANDS
elif meta["processingCorrectionLevel"] == "L2SP":
if sensor_name == "oli-tirs":
bands = OLI_SR_BANDS + TIRS_ST_BANDS
elif sensor_name in ["tm", "etm"]:
bands = TM_SR_BANDS + TM_ST_BANDS
# Level 1
else:
if sensor_name == "oli":
bands = OLI_L1_BANDS + OLI_L1_QA_BANDS
elif sensor_name == "tirs":
bands = TIRS_L1_BANDS + TIRS_L1_QA_BANDS
elif sensor_name == "oli-tirs":
bands = OLI_L1_BANDS + TIRS_L1_BANDS + OLI_L1_QA_BANDS
elif sensor_name == "etm":
bands = ETM_L1_BANDS
elif sensor_name == "tm":
bands = TM_L1_BANDS
elif sensor_name == "mss":
bands = MSS_L1_BANDS
return bands | /rio_tiler_pds-0.10.1-py3-none-any.whl/rio_tiler_pds/landsat/utils.py | 0.762336 | 0.383757 | utils.py | pypi |
import json
from typing import Dict, Type
import attr
from botocore.exceptions import ClientError
from morecantile import TileMatrixSet
from rio_tiler.constants import WEB_MERCATOR_TMS, WGS84_CRS
from rio_tiler.errors import InvalidBandName
from rio_tiler.io import MultiBandReader, Reader
from rio_tiler_pds.landsat.utils import sceneid_parser
from rio_tiler_pds.utils import fetch
@attr.s
class LandsatC2Reader(MultiBandReader):
"""AWS Public Dataset Landsat Collection 2 COG Reader.
Args:
input (str): Landsat 8 sceneid.
Attributes:
minzoom (int): Dataset's Min Zoom level (default is 5).
maxzoom (int): Dataset's Max Zoom level (default is 12).
scene_params (dict): scene id parameters.
bands (tuple): list of available bands.
Examples:
>>> with LandsatC2Reader('LC08_L2SR_093106_20200207_20201016_02_T2') as scene:
print(scene.bounds)
>>> with LandsatC2Reader('LC08_L1TP_116043_20201122_20201122_02_RT') as scene:
print(scene.bounds)
"""
input: str = attr.ib()
tms: TileMatrixSet = attr.ib(default=WEB_MERCATOR_TMS)
minzoom: int = attr.ib(default=5)
maxzoom: int = attr.ib(default=12)
reader: Type[Reader] = attr.ib(default=Reader)
reader_options: Dict = attr.ib(factory=dict)
_scheme: str = "s3"
bucket: str = attr.ib(default="usgs-landsat")
prefix_pattern: str = attr.ib(
default="collection02/level-{_processingLevelNum}/{category}/{_sensor_s3_prefix}/{acquisitionYear}/{path}/{row}/{scene}/{scene}"
)
def __attrs_post_init__(self):
"""Fetch productInfo and get bounds."""
self.scene_params = sceneid_parser(self.input)
self.bands = self.scene_params["bands"]
self.bounds = self.get_geometry()
self.crs = WGS84_CRS
def get_geometry(self):
"""Fetch geometry info for the scene."""
# Allow custom function for users who want to use the WRS2 grid and
# avoid this GET request.
prefix = self.prefix_pattern.format(**self.scene_params)
if self.scene_params["_processingLevelNum"] == "1":
stac_key = f"{prefix}_stac.json"
else:
# This fetches the Surface Reflectance (SR) STAC item.
# There are separate STAC items for Surface Reflectance and Surface
# Temperature (ST), but they have the same geometry. The SR should
# always exist, the ST might not exist based on the scene.
stac_key = f"{prefix}_SR_stac.json"
try:
self.stac_item = fetch(f"s3://{self.bucket}/{stac_key}", request_pays=True)
except ClientError as e:
if e.response["Error"]["Code"] == "NoSuchKey":
raise ValueError(
"stac_item not found. Some RT scenes may not exist in usgs-landsat bucket."
) from e
else:
raise e
return self.stac_item["bbox"]
def _get_band_url(self, band: str) -> str:
"""Validate band name and return band's url."""
# TODO: allow B1 instead of SR_B1
if band not in self.bands:
raise InvalidBandName(f"{band} is not valid.\nValid bands: {self.bands}")
prefix = self.prefix_pattern.format(**self.scene_params)
return f"{self._scheme}://{self.bucket}/{prefix}_{band}.TIF" | /rio_tiler_pds-0.10.1-py3-none-any.whl/rio_tiler_pds/landsat/aws/landsat_collection2.py | 0.808974 | 0.256299 | landsat_collection2.py | pypi |
import re
from typing import Any, Dict
from rio_tiler_pds.errors import InvalidSentinelSceneId
def s2_sceneid_parser(sceneid: str) -> Dict:
"""Parse Sentinel 2 scene id.
Args:
sceneid (str): Sentinel-2 sceneid.
Returns:
dict: dictionary with metadata constructed from the sceneid.
Raises:
InvalidSentinelSceneId: If `sceneid` doesn't match the regex schema.
Examples:
>>> s2_sceneid_parser('S2A_L1C_20170729_19UDP_0')
>>> s2_sceneid_parser('S2A_L2A_20170729_19UDP_0')
>>> s2_sceneid_parser('S2A_29RKH_20200219_0_L2A')
>>> s2_sceneid_parse('S2B_MSIL2A_20190730T190919_N0212_R056_T10UEU_20201005T200819')
"""
if re.match(
"^S2[AB]_L[0-2][A-C]_[0-9]{8}_[0-9]{1,2}[A-Z]{3}_[0-9]{1,2}$", sceneid
): # Legacy sceneid format
pattern = (
r"^S"
r"(?P<sensor>\w{1})"
r"(?P<satellite>[AB]{1})"
r"_"
r"(?P<processingLevel>L[0-2][ABC])"
r"_"
r"(?P<acquisitionYear>[0-9]{4})"
r"(?P<acquisitionMonth>[0-9]{2})"
r"(?P<acquisitionDay>[0-9]{2})"
r"_"
r"(?P<utm>[0-9]{1,2})"
r"(?P<lat>\w{1})"
r"(?P<sq>\w{2})"
r"_"
r"(?P<num>[0-9]{1,2})$"
)
elif re.match(
r"^S2[AB]_[0-9]{1,2}[A-Z]{3}_[0-9]{8}_[0-9]{1,2}_L[0-2][A-C]$", sceneid
): # New sceneid format
pattern = (
r"^S"
r"(?P<sensor>\w{1})"
r"(?P<satellite>[AB]{1})"
r"_"
r"(?P<utm>[0-9]{1,2})"
r"(?P<lat>\w{1})"
r"(?P<sq>\w{2})"
r"_"
r"(?P<acquisitionYear>[0-9]{4})"
r"(?P<acquisitionMonth>[0-9]{2})"
r"(?P<acquisitionDay>[0-9]{2})"
r"_"
r"(?P<num>[0-9]{1,2})"
r"_"
r"(?P<processingLevel>L[0-2][ABC])$"
)
elif re.match(
r"^S2[AB]_MSIL[0-2][ABC]_[0-9]{8}T[0-9]{6}_N[0-9]{4}_R[0-9]{3}_T[0-9A-Z]{5}_[0-9]{8}T[0-9]{6}$",
sceneid,
): # product id
pattern = (
r"^S"
r"(?P<sensor>\w{1})"
r"(?P<satellite>[AB]{1})"
r"_"
r"MSI(?P<processingLevel>L[0-2][ABC])"
r"_"
r"(?P<acquisitionYear>[0-9]{4})"
r"(?P<acquisitionMonth>[0-9]{2})"
r"(?P<acquisitionDay>[0-9]{2})"
r"T(?P<acquisitionHMS>[0-9]{6})"
r"_"
r"N(?P<baseline_number>[0-9]{4})"
r"_"
r"R(?P<relative_orbit>[0-9]{3})"
r"_T"
r"(?P<utm>[0-9]{2})"
r"(?P<lat>\w{1})"
r"(?P<sq>\w{2})"
r"_"
r"(?P<stopDateTime>[0-9]{8}T[0-9]{6})$"
)
else:
raise InvalidSentinelSceneId("Could not match {}".format(sceneid))
meta: Dict[str, Any] = re.match(pattern, sceneid, re.IGNORECASE).groupdict() # type: ignore
# When parsing product id, num won't be set.
if not meta.get("num"):
meta["num"] = "0"
meta["scene"] = sceneid
meta["date"] = "{}-{}-{}".format(
meta["acquisitionYear"], meta["acquisitionMonth"], meta["acquisitionDay"]
)
meta["_utm"] = meta["utm"].lstrip("0")
meta["_month"] = meta["acquisitionMonth"].lstrip("0")
meta["_day"] = meta["acquisitionDay"].lstrip("0")
meta["_levelLow"] = meta["processingLevel"].lower()
return meta
def s1_sceneid_parser(sceneid: str) -> Dict:
"""Parse Sentinel 1 scene id.
Args:
sceneid (str): Sentinel-1 sceneid.
Returns:
dict: dictionary with metadata constructed from the sceneid.
Raises:
InvalidSentinelSceneId: If `sceneid` doesn't match the regex schema.
Examples:
>>> s1_sceneid_parser('S1A_IW_GRDH_1SDV_20180716T004042_20180716T004107_022812_02792A_FD5B')
"""
if not re.match(
"^S1[AB]_(IW|EW|S[1-6])_[A-Z]{3}[FHM]_[0-9][SA][A-Z]{2}_[0-9]{8}T[0-9]{6}_[0-9]{8}T[0-9]{6}_[0-9A-Z]{6}_[0-9A-Z]{6}_[0-9A-Z]{4}$",
sceneid,
):
raise InvalidSentinelSceneId("Could not match {}".format(sceneid))
sentinel_pattern = (
r"^S"
r"(?P<sensor>\w{1})"
r"(?P<satellite>[AB]{1})"
r"_"
r"(?P<beam>(IW)|(EW)|(S[1-6]))"
r"_"
r"(?P<product>[A-Z]{3})"
r"(?P<resolution>[FHM])"
r"_"
r"(?P<processing_level>[0-9])"
r"(?P<product_class>[SA])"
r"(?P<polarisation>(SH)|(SV)|(DH)|(DV))"
r"_"
r"(?P<startDateTime>[0-9]{8}T[0-9]{6})"
r"_"
r"(?P<stopDateTime>[0-9]{8}T[0-9]{6})"
r"_"
r"(?P<absolute_orbit>[0-9]{6})"
r"_"
r"(?P<mission_task>[0-9A-Z]{6})"
r"_"
r"(?P<product_id>[0-9A-Z]{4})$"
)
meta: Dict[str, Any] = re.match( # type: ignore
sentinel_pattern, sceneid, re.IGNORECASE
).groupdict()
meta["acquisitionYear"] = meta["startDateTime"][0:4]
meta["acquisitionMonth"] = meta["startDateTime"][4:6]
meta["acquisitionDay"] = meta["startDateTime"][6:8]
meta["scene"] = sceneid
meta["date"] = "{}-{}-{}".format(
meta["acquisitionYear"], meta["acquisitionMonth"], meta["acquisitionDay"]
)
meta["_month"] = meta["acquisitionMonth"].lstrip("0")
meta["_day"] = meta["acquisitionDay"].lstrip("0")
return meta | /rio_tiler_pds-0.10.1-py3-none-any.whl/rio_tiler_pds/sentinel/utils.py | 0.852721 | 0.332744 | utils.py | pypi |
import json
from typing import Dict, Tuple, Type
import attr
from morecantile import TileMatrixSet
from rasterio.features import bounds as featureBounds
from rio_tiler.constants import WEB_MERCATOR_TMS, WGS84_CRS
from rio_tiler.errors import InvalidBandName
from rio_tiler.io import MultiBandReader, Reader
from rio_tiler_pds.sentinel.utils import s1_sceneid_parser
from rio_tiler_pds.utils import fetch
def get_bounds(geom: Dict) -> Tuple[float, float, float, float]:
"""Get Bounds from GeoJSON geometry and handle multi polygon crossing the antimeridian line.
ref: https://github.com/cogeotiff/rio-tiler-pds/issues/77
"""
if geom["type"] == "MultiPolygon":
bounds = [
featureBounds({"type": "Polygon", "coordinates": poly})
for poly in geom["coordinates"]
]
minx, miny, maxx, maxy = zip(*bounds)
return (max(minx), min(miny), min(maxx), max(maxy))
return featureBounds(geom)
@attr.s
class S1L1CReader(MultiBandReader):
"""AWS Public Dataset Sentinel 1 reader.
Args:
input (str): Sentinel-1 sceneid.
Attributes:
minzoom (int): Dataset's Min Zoom level (default is 8).
maxzoom (int): Dataset's Max Zoom level (default is 14).
bands (tuple): list of available bands (default is ('vv', 'vh')).
productInfo (dict): sentinel 1 productInfo.json content.
datageom (dict): sentinel 1 data geometry.
Examples:
>>> with S1L1CReader('S1A_IW_GRDH_1SDV_20180716T004042_20180716T004107_022812_02792A_FD5B') as scene:
print(scene.bounds)
"""
input: str = attr.ib()
tms: TileMatrixSet = attr.ib(default=WEB_MERCATOR_TMS)
minzoom: int = attr.ib(default=8)
maxzoom: int = attr.ib(default=14)
reader: Type[Reader] = attr.ib(default=Reader)
reader_options: Dict = attr.ib(factory=dict)
productInfo: Dict = attr.ib(init=False)
datageom: Dict = attr.ib(init=False)
_scheme: str = "s3"
bucket: str = attr.ib(default="sentinel-s1-l1c")
prefix_pattern: str = attr.ib(
default="{product}/{acquisitionYear}/{_month}/{_day}/{beam}/{polarisation}/{scene}"
)
def __attrs_post_init__(self):
"""Fetch productInfo and get bounds."""
self.scene_params = s1_sceneid_parser(self.input)
if self.scene_params["polarisation"] == "DH":
self.bands = ("hh", "hv")
elif self.scene_params["polarisation"] == "DV":
self.bands = ("vv", "vh")
elif self.scene_params["polarisation"] == "SH":
self.bands = ("hh",)
elif self.scene_params["polarisation"] == "SV":
self.bands = ("vv",)
prefix = self.prefix_pattern.format(**self.scene_params)
self.productInfo = fetch(
f"s3://{self.bucket}/{prefix}/productInfo.json", request_pays=True
)
self.datageom = self.productInfo["footprint"]
self.bounds = get_bounds(self.datageom)
self.crs = WGS84_CRS
def _get_band_url(self, band: str) -> str:
"""Validate band name and return band's url."""
if band not in self.bands:
raise InvalidBandName(f"{band} is not valid")
prefix = self.prefix_pattern.format(**self.scene_params)
return f"{self._scheme}://{self.bucket}/{prefix}/measurement/{self.scene_params['beam'].lower()}-{band}.tiff" | /rio_tiler_pds-0.10.1-py3-none-any.whl/rio_tiler_pds/sentinel/aws/sentinel1.py | 0.878581 | 0.365825 | sentinel1.py | pypi |
import json
import os
from collections import OrderedDict
from typing import Any, Dict, Sequence, Type, Union
import attr
from morecantile import TileMatrixSet
from rasterio.crs import CRS
from rasterio.features import bounds as featureBounds
from rio_tiler.constants import WEB_MERCATOR_TMS, WGS84_CRS
from rio_tiler.errors import InvalidBandName
from rio_tiler.io import MultiBandReader, Reader
from rio_tiler_pds.sentinel.utils import s2_sceneid_parser
from rio_tiler_pds.utils import fetch
default_l1c_bands = (
"B01",
"B02",
"B03",
"B04",
"B05",
"B06",
"B07",
"B08",
"B09",
"B11",
"B12",
"B8A",
)
@attr.s
class S2L1CReader(MultiBandReader):
"""AWS Public Dataset Sentinel 2 L1C reader.
Args:
input (str): Sentinel-2 L1C sceneid.
Attributes:
minzoom (int): Dataset's Min Zoom level (default is 8).
maxzoom (int): Dataset's Max Zoom level (default is 14).
scene_params (dict): scene id parameters.
bands (tuple): list of available bands (default is ('B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B09', 'B11', 'B12', 'B8A')).
tileInfo (dict): sentinel 2 tileInfo.json content.
datageom (dict): sentinel 2 data geometry.
Examples:
>>> with S2L1CReader('S2A_L1C_20170729_19UDP_0') as scene:
print(scene.bounds)
"""
input: str = attr.ib()
tms: TileMatrixSet = attr.ib(default=WEB_MERCATOR_TMS)
minzoom: int = attr.ib(default=8)
maxzoom: int = attr.ib(default=14)
reader: Type[Reader] = attr.ib(default=Reader)
reader_options: Dict = attr.ib(default={"options": {"nodata": 0}})
bands: Sequence[str] = attr.ib(init=False, default=default_l1c_bands)
tileInfo: Dict = attr.ib(init=False)
datageom: Dict = attr.ib(init=False)
_scheme: str = "s3"
bucket: str = attr.ib(default="sentinel-s2-l1c")
prefix_pattern: str = attr.ib(
default="tiles/{_utm}/{lat}/{sq}/{acquisitionYear}/{_month}/{_day}/{num}"
)
def __attrs_post_init__(self):
"""Fetch productInfo and get bounds."""
self.scene_params = s2_sceneid_parser(self.input)
prefix = self.prefix_pattern.format(**self.scene_params)
self.tileInfo = fetch(
f"s3://{self.bucket}/{prefix}/tileInfo.json", request_pays=True
)
self.datageom = self.tileInfo["tileDataGeometry"]
self.bounds = featureBounds(self.datageom)
self.crs = CRS.from_user_input(self.datageom["crs"]["properties"]["name"])
def _get_band_url(self, band: str) -> str:
"""Validate band name and return band's url."""
band = band if len(band) == 3 else f"B0{band[-1]}"
if band not in self.bands:
raise InvalidBandName(f"{band} is not valid.\nValid bands: {self.bands}")
prefix = self.prefix_pattern.format(**self.scene_params)
return f"{self._scheme}://{self.bucket}/{prefix}/{band}.jp2"
SENTINEL_L2_BANDS = OrderedDict(
[
("10", ["B02", "B03", "B04", "B08"]),
("20", ["B02", "B03", "B04", "B05", "B06", "B07", "B08", "B11", "B12", "B8A"]),
(
"60",
[
"B01",
"B02",
"B03",
"B04",
"B05",
"B06",
"B07",
"B08",
"B09",
"B11",
"B12",
"B8A",
],
),
]
)
SENTINEL_L2_PRODUCTS = OrderedDict(
[
("10", ["AOT", "WVP"]),
("20", ["AOT", "SCL", "WVP"]),
("60", ["AOT", "SCL", "WVP"]),
]
)
# STAC < 1.0.0
default_l2a_bands = (
"B01",
"B02",
"B03",
"B04",
"B05",
"B06",
"B07",
"B08",
"B09",
"B11",
"B12",
"B8A",
# "AOT",
# "SCL",
# "WVP",
)
# https://github.com/cogeotiff/rio-tiler-pds/issues/63
# STAC >= 1.0.0
sentinel_l2a_band_map = {
"B01": "coastal",
"B02": "blue",
"B03": "green",
"B04": "red",
"B05": "rededge1",
"B06": "rededge2",
"B07": "rededge3",
"B08": "nir",
"B8A": "nir08",
"B09": "nir09",
"B11": "swir16",
"B12": "swir22",
}
@attr.s
class S2L2AReader(S2L1CReader):
"""AWS Public Dataset Sentinel 2 L2A reader.
Args:
input (str): Sentinel-2 L2A sceneid.
Attributes:
bands (tuple): list of available bands (default is ('B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B09', 'B11', 'B12', 'B8A')).
Examples:
>>> with S2L1CReader('S2A_L1C_20170729_19UDP_0') as scene:
print(scene.bounds)
"""
bands: Sequence[str] = attr.ib(init=False, default=default_l2a_bands)
_scheme: str = "s3"
bucket: str = attr.ib(default="sentinel-s2-l2a")
prefix_pattern: str = attr.ib(
default="tiles/{_utm}/{lat}/{sq}/{acquisitionYear}/{_month}/{_day}/{num}"
)
def _get_resolution(self, band: str) -> str:
"""Return L2A resolution prefix"""
if band.startswith("B"):
for resolution, bands in SENTINEL_L2_BANDS.items():
if band in bands:
return resolution
for resolution, bands in SENTINEL_L2_PRODUCTS.items():
if band in bands:
return resolution
raise ValueError(f"Couldn't find resolution for Band {band}")
def _get_band_url(self, band: str) -> str:
"""Validate band name and return band's url."""
band = band if len(band) == 3 else f"B0{band[-1]}"
if band not in self.bands:
raise InvalidBandName(f"{band} is not valid.\nValid bands: {self.bands}")
prefix = self.prefix_pattern.format(**self.scene_params)
res = self._get_resolution(band)
return f"{self._scheme}://{self.bucket}/{prefix}/R{res}m/{band}.jp2"
@attr.s
class S2L2ACOGReader(MultiBandReader):
"""AWS Public Dataset Sentinel 2 L2A COGS reader.
Args:
input (str): Sentinel-2 sceneid.
Attributes:
minzoom (int): Dataset's Min Zoom level (default is 8).
maxzoom (int): Dataset's Max Zoom level (default is 14).
scene_params (dict): scene id parameters.
bands (tuple): list of available bands (defined by the STAC item.json).
stac_item (dict): sentinel 2 COG STAC item content.
Examples:
>>> with S2L2ACOGReader('S2A_29RKH_20200219_0_L2A') as scene:
print(scene.bounds)
"""
input: str = attr.ib()
tms: TileMatrixSet = attr.ib(default=WEB_MERCATOR_TMS)
minzoom: int = attr.ib(default=8)
maxzoom: int = attr.ib(default=14)
reader: Type[Reader] = attr.ib(default=Reader)
reader_options: Dict = attr.ib(factory=dict)
stac_item: Dict = attr.ib(init=False)
_scheme: str = "s3"
bucket: str = attr.ib(default="sentinel-cogs")
prefix_pattern: str = attr.ib(
default="sentinel-s2-{_levelLow}-cogs/{_utm}/{lat}/{sq}/{acquisitionYear}/{_month}/S{sensor}{satellite}_{_utm}{lat}{sq}_{acquisitionYear}{acquisitionMonth}{acquisitionDay}_{num}_{processingLevel}"
)
def __attrs_post_init__(self):
"""Fetch item.json and get bounds and bands."""
self.scene_params = s2_sceneid_parser(self.input)
cog_sceneid = "S{sensor}{satellite}_{_utm}{lat}{sq}_{acquisitionYear}{acquisitionMonth}{acquisitionDay}_{num}_{processingLevel}".format(
**self.scene_params
)
prefix = self.prefix_pattern.format(**self.scene_params)
try:
self.stac_item = fetch(
f"https://{self.bucket}.s3.us-west-2.amazonaws.com/{prefix}/{cog_sceneid}.json"
)
except: # noqa
self.stac_item = fetch(f"s3://{self.bucket}/{prefix}/{cog_sceneid}.json")
self.bounds = self.stac_item["bbox"]
self.crs = WGS84_CRS
if self.stac_item["stac_version"] == "1.0.0-beta.2":
self.bands = tuple(
[band for band in default_l2a_bands if band in self.stac_item["assets"]]
)
else:
self.bands = tuple(
[
band
for band, name in sentinel_l2a_band_map.items()
if name in self.stac_item["assets"]
]
)
def _get_band_url(self, band: str) -> str:
"""Validate band name and return band's url."""
band = band if len(band) == 3 else f"B0{band[-1]}"
if band not in self.bands:
raise InvalidBandName(f"{band} is not valid.\nValid bands: {self.bands}")
prefix = self.prefix_pattern.format(**self.scene_params)
return f"{self._scheme}://{self.bucket}/{prefix}/{band}.tif"
def S2COGReader(sceneid: str, **kwargs: Any) -> S2L2ACOGReader:
"""Sentinel-2 COG readers."""
scene_params = s2_sceneid_parser(sceneid)
level = scene_params["processingLevel"]
if level == "L2A":
return S2L2ACOGReader(sceneid, **kwargs)
else:
raise Exception(f"{level} is not supported")
def S2JP2Reader(sceneid: str, **kwargs: Any) -> Union[S2L2AReader, S2L1CReader]:
"""Sentinel-2 JPEG2000 readers."""
scene_params = s2_sceneid_parser(sceneid)
level = scene_params["processingLevel"]
if level == "L2A":
return S2L2AReader(sceneid, **kwargs)
elif level == "L1C":
return S2L1CReader(sceneid, **kwargs)
else:
raise Exception(f"{level} is not supported") | /rio_tiler_pds-0.10.1-py3-none-any.whl/rio_tiler_pds/sentinel/aws/sentinel2.py | 0.844858 | 0.277987 | sentinel2.py | pypi |
from typing import Tuple
from rio_tiler.errors import RioTilerError
class InvalidModlandGridID(RioTilerError):
"""Invalid MODLAND grid id."""
# Only non-fill tiles (460)
# format:
# horizontal_grid, vertical_grid, bbox(xmin, ymin, xmax, ymax)
MODLAND_GRID = [
("14", "00", (-180.0, 80.0, -172.7151, 80.4083)),
("15", "00", (-180.0, 80.0, -115.1274, 83.625)),
("16", "00", (-180.0, 80.0, -57.5397, 86.8167)),
("17", "00", (-180.0, 80.0, 57.2957, 90.0)),
("18", "00", (-0.004, 80.0, 180.0, 90.0)),
("19", "00", (57.5877, 80.0, 180.0, 86.8167)),
("20", "00", (115.1754, 80.0, 180.0, 83.625)),
("21", "00", (172.7631, 80.0, 180.0, 80.4083)),
("11", "01", (-180.0, 70.0, -175.4039, 70.5333)),
("12", "01", (-180.0, 70.0, -146.1659, 73.875)),
("13", "01", (-180.0, 70.0, -116.9278, 77.1667)),
("14", "01", (-180.0, 70.0, -87.6898, 80.0)),
("15", "01", (-172.7631, 70.0, -58.4517, 80.0)),
("16", "01", (-115.1754, 70.0, -29.2137, 80.0)),
("17", "01", (-57.5877, 70.0, 0.048, 80.0)),
("18", "01", (0.0, 70.0, 57.6357, 80.0)),
("19", "01", (29.238, 70.0, 115.2234, 80.0)),
("20", "01", (58.4761, 70.0, 172.8111, 80.0)),
("21", "01", (87.7141, 70.0, 180.0, 80.0)),
("22", "01", (116.9522, 70.0, 180.0, 77.1583)),
("23", "01", (146.1902, 70.0, 180.0, 73.875)),
("24", "01", (175.4283, 70.0, 180.0, 70.5333)),
("09", "02", (-180.0, 60.0, -159.9833, 63.6167)),
("10", "02", (-180.0, 60.0, -139.9833, 67.1167)),
("11", "02", (-180.0, 60.0, -119.9833, 70.0)),
("12", "02", (-175.4283, 60.0, -99.9833, 70.0)),
("13", "02", (-146.1902, 60.0, -79.9833, 70.0)),
("14", "02", (-116.9522, 60.0, -59.9833, 70.0)),
("15", "02", (-87.7141, 60.0, -39.9833, 70.0)),
("16", "02", (-58.4761, 60.0, -19.9833, 70.0)),
("17", "02", (-29.238, 60.0, 0.0244, 70.0)),
("18", "02", (0.0, 60.0, 29.2624, 70.0)),
("19", "02", (20.0, 60.0, 58.5005, 70.0)),
("20", "02", (40.0, 60.0, 87.7385, 70.0)),
("21", "02", (60.0, 60.0, 116.9765, 70.0)),
("22", "02", (80.0, 60.0, 146.2146, 70.0)),
("23", "02", (100.0, 60.0, 175.4526, 70.0)),
("24", "02", (120.0, 60.0, 180.0, 70.0)),
("25", "02", (140.0, 60.0, 180.0, 67.1167)),
("26", "02", (160.0, 60.0, 180.0, 63.6167)),
("06", "03", (-180.0, 50.0, -171.1167, 52.3333)),
("07", "03", (-180.0, 50.0, -155.5594, 56.2583)),
("08", "03", (-180.0, 50.0, -140.0022, 60.0)),
("09", "03", (-180.0, 50.0, -124.4449, 60.0)),
("10", "03", (-160.0, 50.0, -108.8877, 60.0)),
("11", "03", (-140.0, 50.0, -93.3305, 60.0)),
("12", "03", (-120.0, 50.0, -77.7732, 60.0)),
("13", "03", (-100.0, 50.0, -62.216, 60.0)),
("14", "03", (-80.0, 50.0, -46.6588, 60.0)),
("15", "03", (-60.0, 50.0, -31.1015, 60.0)),
("16", "03", (-40.0, 50.0, -15.5443, 60.0)),
("17", "03", (-20.0, 50.0, 0.0167, 60.0)),
("18", "03", (0.0, 50.0, 20.0167, 60.0)),
("19", "03", (15.5572, 50.0, 40.0167, 60.0)),
("20", "03", (31.1145, 50.0, 60.0167, 60.0)),
("21", "03", (46.6717, 50.0, 80.0167, 60.0)),
("22", "03", (62.229, 50.0, 100.0167, 60.0)),
("23", "03", (77.7862, 50.0, 120.0167, 60.0)),
("24", "03", (93.3434, 50.0, 140.0167, 60.0)),
("25", "03", (108.9007, 50.0, 160.0167, 60.0)),
("26", "03", (124.4579, 50.0, 180.0, 60.0)),
("27", "03", (140.0151, 50.0, 180.0, 60.0)),
("28", "03", (155.5724, 50.0, 180.0, 56.25)),
("29", "03", (171.1296, 50.0, 180.0, 52.3333)),
("04", "04", (-180.0, 40.0, -169.6921, 43.7667)),
("05", "04", (-180.0, 40.0, -156.638, 48.1917)),
("06", "04", (-180.0, 40.0, -143.5839, 50.0)),
("07", "04", (-171.1296, 40.0, -130.5299, 50.0)),
("08", "04", (-155.5724, 40.0, -117.4758, 50.0)),
("09", "04", (-140.0151, 40.0, -104.4217, 50.0)),
("10", "04", (-124.4579, 40.0, -91.3676, 50.0)),
("11", "04", (-108.9007, 40.0, -78.3136, 50.0)),
("12", "04", (-93.3434, 40.0, -65.2595, 50.0)),
("13", "04", (-77.7862, 40.0, -52.2054, 50.0)),
("14", "04", (-62.229, 40.0, -39.1513, 50.0)),
("15", "04", (-46.6717, 40.0, -26.0973, 50.0)),
("16", "04", (-31.1145, 40.0, -13.0432, 50.0)),
("17", "04", (-15.5572, 40.0, 0.013, 50.0)),
("18", "04", (0.0, 40.0, 15.5702, 50.0)),
("19", "04", (13.0541, 40.0, 31.1274, 50.0)),
("20", "04", (26.1081, 40.0, 46.6847, 50.0)),
("21", "04", (39.1622, 40.0, 62.2419, 50.0)),
("22", "04", (52.2163, 40.0, 77.7992, 50.0)),
("23", "04", (65.2704, 40.0, 93.3564, 50.0)),
("24", "04", (78.3244, 40.0, 108.9136, 50.0)),
("25", "04", (91.3785, 40.0, 124.4709, 50.0)),
("26", "04", (104.4326, 40.0, 140.0281, 50.0)),
("27", "04", (117.4867, 40.0, 155.5853, 50.0)),
("28", "04", (130.5407, 40.0, 171.1426, 50.0)),
("29", "04", (143.5948, 40.0, 180.0, 50.0)),
("30", "04", (156.6489, 40.0, 180.0, 48.1917)),
("31", "04", (169.7029, 40.0, 180.0, 43.7583)),
("02", "05", (-180.0, 30.0, -173.1955, 33.5583)),
("03", "05", (-180.0, 30.0, -161.6485, 38.95)),
("04", "05", (-180.0, 30.0, -150.1014, 40.0)),
("05", "05", (-169.7029, 30.0, -138.5544, 40.0)),
("06", "05", (-156.6489, 30.0, -127.0074, 40.0)),
("07", "05", (-143.5948, 30.0, -115.4604, 40.0)),
("08", "05", (-130.5407, 30.0, -103.9134, 40.0)),
("09", "05", (-117.4867, 30.0, -92.3664, 40.0)),
("10", "05", (-104.4326, 30.0, -80.8194, 40.0)),
("11", "05", (-91.3785, 30.0, -69.2724, 40.0)),
("12", "05", (-78.3244, 30.0, -57.7254, 40.0)),
("13", "05", (-65.2704, 30.0, -46.1784, 40.0)),
("14", "05", (-52.2163, 30.0, -34.6314, 40.0)),
("15", "05", (-39.1622, 30.0, -23.0844, 40.0)),
("16", "05", (-26.1081, 30.0, -11.5374, 40.0)),
("17", "05", (-13.0541, 30.0, 0.0109, 40.0)),
("18", "05", (0.0, 30.0, 13.065, 40.0)),
("19", "05", (11.547, 30.0, 26.119, 40.0)),
("20", "05", (23.094, 30.0, 39.1731, 40.0)),
("21", "05", (34.641, 30.0, 52.2272, 40.0)),
("22", "05", (46.188, 30.0, 65.2812, 40.0)),
("23", "05", (57.735, 30.0, 78.3353, 40.0)),
("24", "05", (69.282, 30.0, 91.3894, 40.0)),
("25", "05", (80.829, 30.0, 104.4435, 40.0)),
("26", "05", (92.376, 30.0, 117.4975, 40.0)),
("27", "05", (103.923, 30.0, 130.5516, 40.0)),
("28", "05", (115.4701, 30.0, 143.6057, 40.0)),
("29", "05", (127.0171, 30.0, 156.6598, 40.0)),
("30", "05", (138.5641, 30.0, 169.7138, 40.0)),
("31", "05", (150.1111, 30.0, 180.0, 40.0)),
("32", "05", (161.6581, 30.0, 180.0, 38.9417)),
("33", "05", (173.2051, 30.0, 180.0, 33.5583)),
("01", "06", (-180.0, 20.0, -170.2596, 27.2667)),
("02", "06", (-180.0, 20.0, -159.6178, 30.0)),
("03", "06", (-173.2051, 20.0, -148.976, 30.0)),
("04", "06", (-161.6581, 20.0, -138.3342, 30.0)),
("05", "06", (-150.1111, 20.0, -127.6925, 30.0)),
("06", "06", (-138.5641, 20.0, -117.0507, 30.0)),
("07", "06", (-127.0171, 20.0, -106.4089, 30.0)),
("08", "06", (-115.4701, 20.0, -95.7671, 30.0)),
("09", "06", (-103.923, 20.0, -85.1254, 30.0)),
("10", "06", (-92.376, 20.0, -74.4836, 30.0)),
("11", "06", (-80.829, 20.0, -63.8418, 30.0)),
("12", "06", (-69.282, 20.0, -53.2, 30.0)),
("13", "06", (-57.735, 20.0, -42.5582, 30.0)),
("14", "06", (-46.188, 20.0, -31.9165, 30.0)),
("15", "06", (-34.641, 20.0, -21.2747, 30.0)),
("16", "06", (-23.094, 20.0, -10.6329, 30.0)),
("17", "06", (-11.547, 20.0, 0.0096, 30.0)),
("18", "06", (0.0, 20.0, 11.5566, 30.0)),
("19", "06", (10.6418, 20.0, 23.1036, 30.0)),
("20", "06", (21.2836, 20.0, 34.6506, 30.0)),
("21", "06", (31.9253, 20.0, 46.1976, 30.0)),
("22", "06", (42.5671, 20.0, 57.7446, 30.0)),
("23", "06", (53.2089, 20.0, 69.2917, 30.0)),
("24", "06", (63.8507, 20.0, 80.8387, 30.0)),
("25", "06", (74.4924, 20.0, 92.3857, 30.0)),
("26", "06", (85.1342, 20.0, 103.9327, 30.0)),
("27", "06", (95.776, 20.0, 115.4797, 30.0)),
("28", "06", (106.4178, 20.0, 127.0267, 30.0)),
("29", "06", (117.0596, 20.0, 138.5737, 30.0)),
("30", "06", (127.7013, 20.0, 150.1207, 30.0)),
("31", "06", (138.3431, 20.0, 161.6677, 30.0)),
("32", "06", (148.9849, 20.0, 173.2147, 30.0)),
("33", "06", (159.6267, 20.0, 180.0, 30.0)),
("34", "06", (170.2684, 20.0, 180.0, 27.2667)),
("00", "07", (-180.0, 10.0, -172.6141, 19.1917)),
("01", "07", (-180.0, 10.0, -162.4598, 20.0)),
("02", "07", (-170.2684, 10.0, -152.3055, 20.0)),
("03", "07", (-159.6267, 10.0, -142.1513, 20.0)),
("04", "07", (-148.9849, 10.0, -131.997, 20.0)),
("05", "07", (-138.3431, 10.0, -121.8427, 20.0)),
("06", "07", (-127.7013, 10.0, -111.6885, 20.0)),
("07", "07", (-117.0596, 10.0, -101.5342, 20.0)),
("08", "07", (-106.4178, 10.0, -91.3799, 20.0)),
("09", "07", (-95.776, 10.0, -81.2257, 20.0)),
("10", "07", (-85.1342, 10.0, -71.0714, 20.0)),
("11", "07", (-74.4924, 10.0, -60.9171, 20.0)),
("12", "07", (-63.8507, 10.0, -50.7629, 20.0)),
("13", "07", (-53.2089, 10.0, -40.6086, 20.0)),
("14", "07", (-42.5671, 10.0, -30.4543, 20.0)),
("15", "07", (-31.9253, 10.0, -20.3001, 20.0)),
("16", "07", (-21.2836, 10.0, -10.1458, 20.0)),
("17", "07", (-10.6418, 10.0, 0.0089, 20.0)),
("18", "07", (0.0, 10.0, 10.6506, 20.0)),
("19", "07", (10.1543, 10.0, 21.2924, 20.0)),
("20", "07", (20.3085, 10.0, 31.9342, 20.0)),
("21", "07", (30.4628, 10.0, 42.576, 20.0)),
("22", "07", (40.6171, 10.0, 53.2178, 20.0)),
("23", "07", (50.7713, 10.0, 63.8595, 20.0)),
("24", "07", (60.9256, 10.0, 74.5013, 20.0)),
("25", "07", (71.0799, 10.0, 85.1431, 20.0)),
("26", "07", (81.2341, 10.0, 95.7849, 20.0)),
("27", "07", (91.3884, 10.0, 106.4266, 20.0)),
("28", "07", (101.5427, 10.0, 117.0684, 20.0)),
("29", "07", (111.6969, 10.0, 127.7102, 20.0)),
("30", "07", (121.8512, 10.0, 138.352, 20.0)),
("31", "07", (132.0055, 10.0, 148.9938, 20.0)),
("32", "07", (142.1597, 10.0, 159.6355, 20.0)),
("33", "07", (152.314, 10.0, 170.2773, 20.0)),
("34", "07", (162.4683, 10.0, 180.0, 20.0)),
("35", "07", (172.6225, 10.0, 180.0, 19.1833)),
("00", "08", (-180.0, -0.0, -169.9917, 10.0)),
("01", "08", (-172.6225, -0.0, -159.9917, 10.0)),
("02", "08", (-162.4683, -0.0, -149.9917, 10.0)),
("03", "08", (-152.314, -0.0, -139.9917, 10.0)),
("04", "08", (-142.1597, -0.0, -129.9917, 10.0)),
("05", "08", (-132.0055, -0.0, -119.9917, 10.0)),
("06", "08", (-121.8512, -0.0, -109.9917, 10.0)),
("07", "08", (-111.6969, -0.0, -99.9917, 10.0)),
("08", "08", (-101.5427, -0.0, -89.9917, 10.0)),
("09", "08", (-91.3884, -0.0, -79.9917, 10.0)),
("10", "08", (-81.2341, -0.0, -69.9917, 10.0)),
("11", "08", (-71.0799, -0.0, -59.9917, 10.0)),
("12", "08", (-60.9256, -0.0, -49.9917, 10.0)),
("13", "08", (-50.7713, -0.0, -39.9917, 10.0)),
("14", "08", (-40.6171, -0.0, -29.9917, 10.0)),
("15", "08", (-30.4628, -0.0, -19.9917, 10.0)),
("16", "08", (-20.3085, -0.0, -9.9917, 10.0)),
("17", "08", (-10.1543, -0.0, 0.0085, 10.0)),
("18", "08", (0.0, -0.0, 10.1627, 10.0)),
("19", "08", (10.0, -0.0, 20.317, 10.0)),
("20", "08", (20.0, -0.0, 30.4713, 10.0)),
("21", "08", (30.0, -0.0, 40.6255, 10.0)),
("22", "08", (40.0, -0.0, 50.7798, 10.0)),
("23", "08", (50.0, -0.0, 60.9341, 10.0)),
("24", "08", (60.0, -0.0, 71.0883, 10.0)),
("25", "08", (70.0, -0.0, 81.2426, 10.0)),
("26", "08", (80.0, -0.0, 91.3969, 10.0)),
("27", "08", (90.0, -0.0, 101.5511, 10.0)),
("28", "08", (100.0, -0.0, 111.7054, 10.0)),
("29", "08", (110.0, -0.0, 121.8597, 10.0)),
("30", "08", (120.0, -0.0, 132.0139, 10.0)),
("31", "08", (130.0, -0.0, 142.1682, 10.0)),
("32", "08", (140.0, -0.0, 152.3225, 10.0)),
("33", "08", (150.0, -0.0, 162.4767, 10.0)),
("34", "08", (160.0, -0.0, 172.631, 10.0)),
("35", "08", (170.0, -0.0, 180.0, 10.0)),
("00", "09", (-180.0, -10.0, -169.9917, -0.0)),
("01", "09", (-172.6225, -10.0, -159.9917, -0.0)),
("02", "09", (-162.4683, -10.0, -149.9917, -0.0)),
("03", "09", (-152.314, -10.0, -139.9917, -0.0)),
("04", "09", (-142.1597, -10.0, -129.9917, -0.0)),
("05", "09", (-132.0055, -10.0, -119.9917, -0.0)),
("06", "09", (-121.8512, -10.0, -109.9917, -0.0)),
("07", "09", (-111.6969, -10.0, -99.9917, -0.0)),
("08", "09", (-101.5427, -10.0, -89.9917, -0.0)),
("09", "09", (-91.3884, -10.0, -79.9917, -0.0)),
("10", "09", (-81.2341, -10.0, -69.9917, -0.0)),
("11", "09", (-71.0799, -10.0, -59.9917, -0.0)),
("12", "09", (-60.9256, -10.0, -49.9917, -0.0)),
("13", "09", (-50.7713, -10.0, -39.9917, -0.0)),
("14", "09", (-40.6171, -10.0, -29.9917, -0.0)),
("15", "09", (-30.4628, -10.0, -19.9917, -0.0)),
("16", "09", (-20.3085, -10.0, -9.9917, -0.0)),
("17", "09", (-10.1543, -10.0, 0.0085, -0.0)),
("18", "09", (0.0, -10.0, 10.1627, -0.0)),
("19", "09", (10.0, -10.0, 20.317, -0.0)),
("20", "09", (20.0, -10.0, 30.4713, -0.0)),
("21", "09", (30.0, -10.0, 40.6255, -0.0)),
("22", "09", (40.0, -10.0, 50.7798, -0.0)),
("23", "09", (50.0, -10.0, 60.9341, -0.0)),
("24", "09", (60.0, -10.0, 71.0883, -0.0)),
("25", "09", (70.0, -10.0, 81.2426, -0.0)),
("26", "09", (80.0, -10.0, 91.3969, -0.0)),
("27", "09", (90.0, -10.0, 101.5511, -0.0)),
("28", "09", (100.0, -10.0, 111.7054, -0.0)),
("29", "09", (110.0, -10.0, 121.8597, -0.0)),
("30", "09", (120.0, -10.0, 132.0139, -0.0)),
("31", "09", (130.0, -10.0, 142.1682, -0.0)),
("32", "09", (140.0, -10.0, 152.3225, -0.0)),
("33", "09", (150.0, -10.0, 162.4767, -0.0)),
("34", "09", (160.0, -10.0, 172.631, -0.0)),
("35", "09", (170.0, -10.0, 180.0, -0.0)),
("00", "10", (-180.0, -19.1917, -172.6141, -10.0)),
("01", "10", (-180.0, -20.0, -162.4598, -10.0)),
("02", "10", (-170.2684, -20.0, -152.3055, -10.0)),
("03", "10", (-159.6267, -20.0, -142.1513, -10.0)),
("04", "10", (-148.9849, -20.0, -131.997, -10.0)),
("05", "10", (-138.3431, -20.0, -121.8427, -10.0)),
("06", "10", (-127.7013, -20.0, -111.6885, -10.0)),
("07", "10", (-117.0596, -20.0, -101.5342, -10.0)),
("08", "10", (-106.4178, -20.0, -91.3799, -10.0)),
("09", "10", (-95.776, -20.0, -81.2257, -10.0)),
("10", "10", (-85.1342, -20.0, -71.0714, -10.0)),
("11", "10", (-74.4924, -20.0, -60.9171, -10.0)),
("12", "10", (-63.8507, -20.0, -50.7629, -10.0)),
("13", "10", (-53.2089, -20.0, -40.6086, -10.0)),
("14", "10", (-42.5671, -20.0, -30.4543, -10.0)),
("15", "10", (-31.9253, -20.0, -20.3001, -10.0)),
("16", "10", (-21.2836, -20.0, -10.1458, -10.0)),
("17", "10", (-10.6418, -20.0, 0.0089, -10.0)),
("18", "10", (0.0, -20.0, 10.6506, -10.0)),
("19", "10", (10.1543, -20.0, 21.2924, -10.0)),
("20", "10", (20.3085, -20.0, 31.9342, -10.0)),
("21", "10", (30.4628, -20.0, 42.576, -10.0)),
("22", "10", (40.6171, -20.0, 53.2178, -10.0)),
("23", "10", (50.7713, -20.0, 63.8595, -10.0)),
("24", "10", (60.9256, -20.0, 74.5013, -10.0)),
("25", "10", (71.0799, -20.0, 85.1431, -10.0)),
("26", "10", (81.2341, -20.0, 95.7849, -10.0)),
("27", "10", (91.3884, -20.0, 106.4266, -10.0)),
("28", "10", (101.5427, -20.0, 117.0684, -10.0)),
("29", "10", (111.6969, -20.0, 127.7102, -10.0)),
("30", "10", (121.8512, -20.0, 138.352, -10.0)),
("31", "10", (132.0055, -20.0, 148.9938, -10.0)),
("32", "10", (142.1597, -20.0, 159.6355, -10.0)),
("33", "10", (152.314, -20.0, 170.2773, -10.0)),
("34", "10", (162.4683, -20.0, 180.0, -10.0)),
("35", "10", (172.6225, -19.1833, 180.0, -10.0)),
("01", "11", (-180.0, -27.2667, -170.2596, -20.0)),
("02", "11", (-180.0, -30.0, -159.6178, -20.0)),
("03", "11", (-173.2051, -30.0, -148.976, -20.0)),
("04", "11", (-161.6581, -30.0, -138.3342, -20.0)),
("05", "11", (-150.1111, -30.0, -127.6925, -20.0)),
("06", "11", (-138.5641, -30.0, -117.0507, -20.0)),
("07", "11", (-127.0171, -30.0, -106.4089, -20.0)),
("08", "11", (-115.4701, -30.0, -95.7671, -20.0)),
("09", "11", (-103.923, -30.0, -85.1254, -20.0)),
("10", "11", (-92.376, -30.0, -74.4836, -20.0)),
("11", "11", (-80.829, -30.0, -63.8418, -20.0)),
("12", "11", (-69.282, -30.0, -53.2, -20.0)),
("13", "11", (-57.735, -30.0, -42.5582, -20.0)),
("14", "11", (-46.188, -30.0, -31.9165, -20.0)),
("15", "11", (-34.641, -30.0, -21.2747, -20.0)),
("16", "11", (-23.094, -30.0, -10.6329, -20.0)),
("17", "11", (-11.547, -30.0, 0.0096, -20.0)),
("18", "11", (0.0, -30.0, 11.5566, -20.0)),
("19", "11", (10.6418, -30.0, 23.1036, -20.0)),
("20", "11", (21.2836, -30.0, 34.6506, -20.0)),
("21", "11", (31.9253, -30.0, 46.1976, -20.0)),
("22", "11", (42.5671, -30.0, 57.7446, -20.0)),
("23", "11", (53.2089, -30.0, 69.2917, -20.0)),
("24", "11", (63.8507, -30.0, 80.8387, -20.0)),
("25", "11", (74.4924, -30.0, 92.3857, -20.0)),
("26", "11", (85.1342, -30.0, 103.9327, -20.0)),
("27", "11", (95.776, -30.0, 115.4797, -20.0)),
("28", "11", (106.4178, -30.0, 127.0267, -20.0)),
("29", "11", (117.0596, -30.0, 138.5737, -20.0)),
("30", "11", (127.7013, -30.0, 150.1207, -20.0)),
("31", "11", (138.3431, -30.0, 161.6677, -20.0)),
("32", "11", (148.9849, -30.0, 173.2147, -20.0)),
("33", "11", (159.6267, -30.0, 180.0, -20.0)),
("34", "11", (170.2684, -27.2667, 180.0, -20.0)),
("02", "12", (-180.0, -33.5583, -173.1955, -30.0)),
("03", "12", (-180.0, -38.95, -161.6485, -30.0)),
("04", "12", (-180.0, -40.0, -150.1014, -30.0)),
("05", "12", (-169.7029, -40.0, -138.5544, -30.0)),
("06", "12", (-156.6489, -40.0, -127.0074, -30.0)),
("07", "12", (-143.5948, -40.0, -115.4604, -30.0)),
("08", "12", (-130.5407, -40.0, -103.9134, -30.0)),
("09", "12", (-117.4867, -40.0, -92.3664, -30.0)),
("10", "12", (-104.4326, -40.0, -80.8194, -30.0)),
("11", "12", (-91.3785, -40.0, -69.2724, -30.0)),
("12", "12", (-78.3244, -40.0, -57.7254, -30.0)),
("13", "12", (-65.2704, -40.0, -46.1784, -30.0)),
("14", "12", (-52.2163, -40.0, -34.6314, -30.0)),
("15", "12", (-39.1622, -40.0, -23.0844, -30.0)),
("16", "12", (-26.1081, -40.0, -11.5374, -30.0)),
("17", "12", (-13.0541, -40.0, 0.0109, -30.0)),
("18", "12", (0.0, -40.0, 13.065, -30.0)),
("19", "12", (11.547, -40.0, 26.119, -30.0)),
("20", "12", (23.094, -40.0, 39.1731, -30.0)),
("21", "12", (34.641, -40.0, 52.2272, -30.0)),
("22", "12", (46.188, -40.0, 65.2812, -30.0)),
("23", "12", (57.735, -40.0, 78.3353, -30.0)),
("24", "12", (69.282, -40.0, 91.3894, -30.0)),
("25", "12", (80.829, -40.0, 104.4435, -30.0)),
("26", "12", (92.376, -40.0, 117.4975, -30.0)),
("27", "12", (103.923, -40.0, 130.5516, -30.0)),
("28", "12", (115.4701, -40.0, 143.6057, -30.0)),
("29", "12", (127.0171, -40.0, 156.6598, -30.0)),
("30", "12", (138.5641, -40.0, 169.7138, -30.0)),
("31", "12", (150.1111, -40.0, 180.0, -30.0)),
("32", "12", (161.6581, -38.9417, 180.0, -30.0)),
("33", "12", (173.2051, -33.5583, 180.0, -30.0)),
("04", "13", (-180.0, -43.7667, -169.6921, -40.0)),
("05", "13", (-180.0, -48.1917, -156.638, -40.0)),
("06", "13", (-180.0, -50.0, -143.5839, -40.0)),
("07", "13", (-171.1296, -50.0, -130.5299, -40.0)),
("08", "13", (-155.5724, -50.0, -117.4758, -40.0)),
("09", "13", (-140.0151, -50.0, -104.4217, -40.0)),
("10", "13", (-124.4579, -50.0, -91.3676, -40.0)),
("11", "13", (-108.9007, -50.0, -78.3136, -40.0)),
("12", "13", (-93.3434, -50.0, -65.2595, -40.0)),
("13", "13", (-77.7862, -50.0, -52.2054, -40.0)),
("14", "13", (-62.229, -50.0, -39.1513, -40.0)),
("15", "13", (-46.6717, -50.0, -26.0973, -40.0)),
("16", "13", (-31.1145, -50.0, -13.0432, -40.0)),
("17", "13", (-15.5572, -50.0, 0.013, -40.0)),
("18", "13", (0.0, -50.0, 15.5702, -40.0)),
("19", "13", (13.0541, -50.0, 31.1274, -40.0)),
("20", "13", (26.1081, -50.0, 46.6847, -40.0)),
("21", "13", (39.1622, -50.0, 62.2419, -40.0)),
("22", "13", (52.2163, -50.0, 77.7992, -40.0)),
("23", "13", (65.2704, -50.0, 93.3564, -40.0)),
("24", "13", (78.3244, -50.0, 108.9136, -40.0)),
("25", "13", (91.3785, -50.0, 124.4709, -40.0)),
("26", "13", (104.4326, -50.0, 140.0281, -40.0)),
("27", "13", (117.4867, -50.0, 155.5853, -40.0)),
("28", "13", (130.5407, -50.0, 171.1426, -40.0)),
("29", "13", (143.5948, -50.0, 180.0, -40.0)),
("30", "13", (156.6489, -48.1917, 180.0, -40.0)),
("31", "13", (169.7029, -43.7583, 180.0, -40.0)),
("06", "14", (-180.0, -52.3333, -171.1167, -50.0)),
("07", "14", (-180.0, -56.2583, -155.5594, -50.0)),
("08", "14", (-180.0, -60.0, -140.0022, -50.0)),
("09", "14", (-180.0, -60.0, -124.4449, -50.0)),
("10", "14", (-160.0, -60.0, -108.8877, -50.0)),
("11", "14", (-140.0, -60.0, -93.3305, -50.0)),
("12", "14", (-120.0, -60.0, -77.7732, -50.0)),
("13", "14", (-100.0, -60.0, -62.216, -50.0)),
("14", "14", (-80.0, -60.0, -46.6588, -50.0)),
("15", "14", (-60.0, -60.0, -31.1015, -50.0)),
("16", "14", (-40.0, -60.0, -15.5443, -50.0)),
("17", "14", (-20.0, -60.0, 0.0167, -50.0)),
("18", "14", (0.0, -60.0, 20.0167, -50.0)),
("19", "14", (15.5572, -60.0, 40.0167, -50.0)),
("20", "14", (31.1145, -60.0, 60.0167, -50.0)),
("21", "14", (46.6717, -60.0, 80.0167, -50.0)),
("22", "14", (62.229, -60.0, 100.0167, -50.0)),
("23", "14", (77.7862, -60.0, 120.0167, -50.0)),
("24", "14", (93.3434, -60.0, 140.0167, -50.0)),
("25", "14", (108.9007, -60.0, 160.0167, -50.0)),
("26", "14", (124.4579, -60.0, 180.0, -50.0)),
("27", "14", (140.0151, -60.0, 180.0, -50.0)),
("28", "14", (155.5724, -56.25, 180.0, -50.0)),
("29", "14", (171.1296, -52.3333, 180.0, -50.0)),
("09", "15", (-180.0, -63.6167, -159.9833, -60.0)),
("10", "15", (-180.0, -67.1167, -139.9833, -60.0)),
("11", "15", (-180.0, -70.0, -119.9833, -60.0)),
("12", "15", (-175.4283, -70.0, -99.9833, -60.0)),
("13", "15", (-146.1902, -70.0, -79.9833, -60.0)),
("14", "15", (-116.9522, -70.0, -59.9833, -60.0)),
("15", "15", (-87.7141, -70.0, -39.9833, -60.0)),
("16", "15", (-58.4761, -70.0, -19.9833, -60.0)),
("17", "15", (-29.238, -70.0, 0.0244, -60.0)),
("18", "15", (0.0, -70.0, 29.2624, -60.0)),
("19", "15", (20.0, -70.0, 58.5005, -60.0)),
("20", "15", (40.0, -70.0, 87.7385, -60.0)),
("21", "15", (60.0, -70.0, 116.9765, -60.0)),
("22", "15", (80.0, -70.0, 146.2146, -60.0)),
("23", "15", (100.0, -70.0, 175.4526, -60.0)),
("24", "15", (120.0, -70.0, 180.0, -60.0)),
("25", "15", (140.0, -67.1167, 180.0, -60.0)),
("26", "15", (160.0, -63.6167, 180.0, -60.0)),
("11", "16", (-180.0, -70.5333, -175.4039, -70.0)),
("12", "16", (-180.0, -73.875, -146.1659, -70.0)),
("13", "16", (-180.0, -77.1667, -116.9278, -70.0)),
("14", "16", (-180.0, -80.0, -87.6898, -70.0)),
("15", "16", (-172.7631, -80.0, -58.4517, -70.0)),
("16", "16", (-115.1754, -80.0, -29.2137, -70.0)),
("17", "16", (-57.5877, -80.0, 0.048, -70.0)),
("18", "16", (0.0, -80.0, 57.6357, -70.0)),
("19", "16", (29.238, -80.0, 115.2234, -70.0)),
("20", "16", (58.4761, -80.0, 172.8111, -70.0)),
("21", "16", (87.7141, -80.0, 180.0, -70.0)),
("22", "16", (116.9522, -77.1583, 180.0, -70.0)),
("23", "16", (146.1902, -73.875, 180.0, -70.0)),
("24", "16", (175.4283, -70.5333, 180.0, -70.0)),
("14", "17", (-180.0, -80.4083, -172.7151, -80.0)),
("15", "17", (-180.0, -83.625, -115.1274, -80.0)),
("16", "17", (-180.0, -86.8167, -57.5397, -80.0)),
("17", "17", (-180.0, -90.0, 57.2957, -80.0)),
("18", "17", (-0.004, -90.0, 180.0, -80.0)),
("19", "17", (57.5877, -86.8167, 180.0, -80.0)),
("20", "17", (115.1754, -83.625, 180.0, -80.0)),
("21", "17", (172.7631, -80.4083, 180.0, -80.0)),
]
def tile_bbox(
horizontal_grid: str, vertical_grid: str
) -> Tuple[float, float, float, float]:
"""Get WGS84 boundbox for any modland grid index."""
grid = list(
filter(
lambda x: x[0] == horizontal_grid and x[1] == vertical_grid, MODLAND_GRID
)
)
if not grid:
raise InvalidModlandGridID(
f"Could not find bounds for grid h{horizontal_grid}v{vertical_grid}"
)
return grid[0][2] | /rio_tiler_pds-0.10.1-py3-none-any.whl/rio_tiler_pds/modis/modland_grid.py | 0.74158 | 0.300354 | modland_grid.py | pypi |
from typing import Dict, Type
import attr
from morecantile import TileMatrixSet
from rio_tiler.constants import WEB_MERCATOR_TMS, WGS84_CRS
from rio_tiler.errors import InvalidBandName
from rio_tiler.io import MultiBandReader, Reader
from rio_tiler_pds.errors import InvalidMODISProduct
from rio_tiler_pds.modis.modland_grid import tile_bbox
from rio_tiler_pds.modis.utils import sceneid_parser
MCD43A4_BANDS = (
"B01",
"B01qa",
"B02",
"B02qa",
"B03",
"B03qa",
"B04",
"B04qa",
"B05",
"B05qa",
"B06",
"B06qa",
"B07",
"B07qa",
)
MODIS_BANDS = (
"B01",
"B02",
"B03",
"B04",
"B05",
"B06",
"B07",
"B08",
"B09",
"B10",
"B11",
"B12",
)
modis_valid_bands = {
"MCD43A4": MCD43A4_BANDS,
"MOD11A1": MODIS_BANDS,
"MYD11A1": MODIS_BANDS,
"MOD13A1": MODIS_BANDS,
"MYD13A1": MODIS_BANDS,
}
MOD11A1_MYD11A1_PREFIX = {
"B01": "LSTD_",
"B02": "QCD_",
"B03": "DVT_",
"B04": "DVA_",
"B05": "LSTN_",
"B06": "QCN_",
"B07": "NVT_",
"B08": "NVA_",
"B09": "E31_",
"B10": "E32_",
"B11": "CDC_",
"B12": "CNC_",
}
MOD13A1_MYD13A1_PREFIX = {
"B01": "NDVI_",
"B02": "EVI_",
"B03": "VIQ_",
"B04": "RR_",
"B05": "NIRR_",
"B06": "BR_",
"B07": "MIRR_",
"B08": "VZA_",
"B09": "SZA_",
"B10": "RAA_",
"B11": "CDOY_",
"B12": "PR_",
}
@attr.s
class MODISReader(MultiBandReader):
"""AWS Public Dataset MODIS reader.
Args:
sceneid (str): MODIS sceneid.
Attributes:
scene_params (dict): scene id parameters.
bands (tuple): list of available bands (default is defined for each sensor).
Examples:
>>> with MODISReader('MCD43A4.A2017006.h21v11.006.2017018074804') as scene:
print(scene.bounds)
"""
input: str = attr.ib()
tms: TileMatrixSet = attr.ib(default=WEB_MERCATOR_TMS)
minzoom: int = attr.ib(default=4)
# Most of MODIS product are at 500m resolution (zoom = 8)
# Some are at 250m (zoom = 10) (MOD09GQ & MYD09GQ) thus we use maxzoom = 9 by default
maxzoom: int = attr.ib(default=9)
reader: Type[Reader] = attr.ib(default=Reader)
reader_options: Dict = attr.ib(factory=dict)
_scheme: str = "s3"
bucket: str = attr.ib(default="astraea-opendata")
prefix_pattern: str = attr.ib(
default="{product}.{version}/{horizontal_grid}/{vertical_grid}/{date}"
)
def __attrs_post_init__(self):
"""Parse Sceneid and get grid bounds."""
self.scene_params = sceneid_parser(self.input)
product = self.scene_params["product"]
if product not in modis_valid_bands:
raise InvalidMODISProduct(f"{product} is not supported.")
self.bands = modis_valid_bands[product]
self.bounds = tile_bbox(
self.scene_params["horizontal_grid"],
self.scene_params["vertical_grid"],
)
self.crs = WGS84_CRS
def _get_band_url(self, band: str) -> str:
"""Validate band's name and return band's url."""
band = f"B0{band[-1]}" if band.startswith("B") and len(band) < 3 else band
if band not in self.bands:
raise InvalidBandName(f"{band} is not valid")
if self.scene_params["product"] in ["MOD11A1", "MYD11A1"]:
band_prefix = MOD11A1_MYD11A1_PREFIX[band]
elif self.scene_params["product"] in ["MOD13A1", "MYD13A1"]:
band_prefix = MOD13A1_MYD13A1_PREFIX[band]
else:
band_prefix = ""
prefix = self.prefix_pattern.format(**self.scene_params)
return f"{self._scheme}://{self.bucket}/{prefix}/{self.input}_{band_prefix}{band}.TIF" | /rio_tiler_pds-0.10.1-py3-none-any.whl/rio_tiler_pds/modis/aws/modis_astraea.py | 0.856227 | 0.455017 | modis_astraea.py | pypi |
from typing import Dict, Type
import attr
from morecantile import TileMatrixSet
from rio_tiler.constants import WEB_MERCATOR_TMS, WGS84_CRS
from rio_tiler.errors import InvalidBandName
from rio_tiler.io import MultiBandReader, Reader
from rio_tiler_pds.errors import InvalidMODISProduct
from rio_tiler_pds.modis.modland_grid import tile_bbox
from rio_tiler_pds.modis.utils import sceneid_parser
MCD43A4_BANDS = (
"B01",
"B01qa",
"B02",
"B02qa",
"B03",
"B03qa",
"B04",
"B04qa",
"B05",
"B05qa",
"B06",
"B06qa",
"B07",
"B07qa",
)
MOD09GQ_MYD09GQ_BANDS = (
"B01",
"B02",
"granule",
"numobs",
"obscov",
"obsnum",
"orbit",
"qc",
)
MOD09GA_MYD09GA_BAND = (
"B01",
"B02",
"B03",
"B04",
"B05",
"B06",
"B07",
"geoflags",
"granule",
"numobs1km",
"numobs500m",
"obscov",
"obsnum",
"orbit",
"qc500m",
"qscan",
"range",
"senaz",
"senzen",
"solaz",
"solzen",
"state",
)
modis_valid_bands = {
"MCD43A4": MCD43A4_BANDS,
"MOD09GQ": MOD09GQ_MYD09GQ_BANDS,
"MYD09GQ": MOD09GQ_MYD09GQ_BANDS,
"MOD09GA": MOD09GA_MYD09GA_BAND,
"MYD09GA": MOD09GA_MYD09GA_BAND,
}
@attr.s
class MODISReader(MultiBandReader):
"""AWS Public Dataset MODIS reader.
Args:
sceneid (str): MODIS sceneid.
Attributes:
scene_params (dict): scene id parameters.
bands (tuple): list of available bands (default is defined for each sensor).
Examples:
>>> with MODISReader('MCD43A4.A2017006.h21v11.006.2017018074804') as scene:
print(scene.bounds)
"""
input: str = attr.ib()
tms: TileMatrixSet = attr.ib(default=WEB_MERCATOR_TMS)
minzoom: int = attr.ib(default=4)
# Most of MODIS product are at 500m resolution (zoom = 8)
# Some are at 250m (zoom = 10) (MOD09GQ & MYD09GQ) thus we use maxzoom = 9 by default
maxzoom: int = attr.ib(default=9)
reader: Type[Reader] = attr.ib(default=Reader)
reader_options: Dict = attr.ib(factory=dict)
_scheme: str = "s3"
bucket: str = attr.ib(default="modis-pds")
prefix_pattern: str = attr.ib(
default="{product}.{version}/{horizontal_grid}/{vertical_grid}/{date}"
)
def __attrs_post_init__(self):
"""Parse Sceneid and get grid bounds."""
self.scene_params = sceneid_parser(self.input)
product = self.scene_params["product"]
if product not in modis_valid_bands:
raise InvalidMODISProduct(f"{product} is not supported.")
self.bands = modis_valid_bands[product]
self.bounds = tile_bbox(
self.scene_params["horizontal_grid"],
self.scene_params["vertical_grid"],
)
self.crs = WGS84_CRS
def _get_band_url(self, band: str) -> str:
"""Validate band's name and return band's url."""
band = f"B0{band[-1]}" if band.startswith("B") and len(band) < 3 else band
if band not in self.bands:
raise InvalidBandName(f"{band} is not valid")
prefix = self.prefix_pattern.format(**self.scene_params)
return f"{self._scheme}://{self.bucket}/{prefix}/{self.input}_{band}.TIF" | /rio_tiler_pds-0.10.1-py3-none-any.whl/rio_tiler_pds/modis/aws/modis_pds.py | 0.871338 | 0.438725 | modis_pds.py | pypi |
# rio-tiler
<p align="center">
<img src="https://user-images.githubusercontent.com/10407788/88133997-77560f00-cbb1-11ea-874c-a8f1d123a9df.jpg" style="max-width: 800px;" alt="rio-tiler"></a>
</p>
<p align="center">
<em>User friendly Rasterio plugin to read raster datasets.</em>
</p>
<p align="center">
<a href="https://github.com/cogeotiff/rio-tiler/actions?query=workflow%3ACI" target="_blank">
<img src="https://github.com/cogeotiff/rio-tiler/workflows/CI/badge.svg" alt="Test">
</a>
<a href="https://codecov.io/gh/cogeotiff/rio-tiler" target="_blank">
<img src="https://codecov.io/gh/cogeotiff/rio-tiler/branch/main/graph/badge.svg" alt="Coverage">
</a>
<a href="https://pypi.org/project/rio-tiler" target="_blank">
<img src="https://img.shields.io/pypi/v/rio-tiler?color=%2334D058&label=pypi%20package" alt="Package version">
</a>
<a href="https://anaconda.org/conda-forge/rio-tiler" target="_blank">
<img src="https://img.shields.io/conda/v/conda-forge/rio-tiler.svg" alt="Conda Forge">
</a>
<a href="https://pypistats.org/packages/rio-tiler" target="_blank">
<img src="https://img.shields.io/pypi/dm/rio-tiler.svg" alt="Downloads">
</a>
<a href="https://github.com/cogeotiff/rio-tiler/blob/main/LICENSE" target="_blank">
<img src="https://img.shields.io/github/license/cogeotiff/rio-tiler.svg" alt="Downloads">
</a>
<a href="https://mybinder.org/v2/gh/cogeotiff/rio-tiler/main?filepath=docs%2Fexamples%2F" target="_blank" alt="Binder">
<img src="https://mybinder.org/badge_logo.svg" alt="Binder">
</a>
</p>
---
**Documentation**: <a href="https://cogeotiff.github.io/rio-tiler/" target="_blank">https://cogeotiff.github.io/rio-tiler/</a>
**Source Code**: <a href="https://github.com/cogeotiff/rio-tiler" target="_blank">https://github.com/cogeotiff/rio-tiler</a>
---
## Description
`rio-tiler` was initially designed to create [slippy map
tiles](https://en.wikipedia.org/wiki/Tiled_web_map) from large raster data
sources and render these tiles dynamically on a web map. Since `rio-tiler` v2.0, we added many more helper methods to read
data and metadata from any raster source supported by Rasterio/GDAL.
This includes local and remote files via HTTP, AWS S3, Google Cloud Storage,
etc.
At the low level, `rio-tiler` is *just* a wrapper around the [rasterio](https://github.com/rasterio/rasterio) and [GDAL](https://github.com/osgeo/gdal) libraries.
## Features
- Read any dataset supported by GDAL/Rasterio
```python
from rio_tiler.io import Reader
with Reader("my.tif") as image:
print(image.dataset) # rasterio opened dataset
img = image.read() # similar to rasterio.open("my.tif").read() but returns a rio_tiler.models.ImageData object
```
- User friendly `tile`, `part`, `feature`, `point` reading methods
```python
from rio_tiler.io import Reader
with Reader("my.tif") as image:
img = image.tile(x, y, z) # read mercator tile z-x-y
img = image.part(bbox) # read the data intersecting a bounding box
img = image.feature(geojson_feature) # read the data intersecting a geojson feature
img = image.point(lon,lat) # get pixel values for a lon/lat coordinates
```
- Enable property assignment (e.g nodata) on data reading
```python
from rio_tiler.io import Reader
with Reader("my.tif") as image:
img = image.tile(x, y, z, nodata=-9999) # read mercator tile z-x-y
```
- [STAC](https://github.com/radiantearth/stac-spec) support
```python
from rio_tiler.io import STACReader
with STACReader("item.json") as stac:
print(stac.assets) # available asset
img = stac.tile( # read tile for asset1 and indexes 1,2,3
x,
y,
z,
assets="asset1",
indexes=(1, 2, 3), # same as asset_indexes={"asset1": (1, 2, 3)},
)
# Merging data from different assets
img = stac.tile( # create an image from assets 1,2,3 using their first band
x,
y,
z,
assets=("asset1", "asset2", "asset3",),
asset_indexes={"asset1": 1, "asset2": 1, "asset3": 1},
)
```
- [Xarray](https://xarray.dev) support **(>=4.0)**
```python
import xarray
from rio_tiler.io import XarrayReader
ds = xarray.open_dataset(
"https://pangeo.blob.core.windows.net/pangeo-public/daymet-rio-tiler/na-wgs84.zarr/",
engine="zarr",
decode_coords="all",
consolidated=True,
)
da = ds["tmax"]
with XarrayReader(da) as dst:
print(dst.info())
img = dst.tile(1, 1, 2)
```
*Note: The XarrayReader needs optional dependencies to be installed `pip install rio-tiler["xarray"]`.*
- Non-Geo Image support **(>=4.0)**
```python
from rio_tiler.io import ImageReader
with ImageReader("image.jpeg") as src:
im = src.tile(0, 0, src.maxzoom) # read top-left `tile`
im = src.part((0, 100, 100, 0)) # read top-left 100x100 pixels
pt = src.point(0, 0) # read pixel value
```
*Note: `ImageReader` is also compatible with proper geo-referenced raster datasets.*
- [Mosaic](https://cogeotiff.github.io/rio-tiler/mosaic/) (merging or stacking)
```python
from rio_tiler.io import Reader
from rio_tiler.mosaic import mosaic_reader
def reader(file, x, y, z, **kwargs):
with Reader(file) as image:
return image.tile(x, y, z, **kwargs)
img, assets = mosaic_reader(["image1.tif", "image2.tif"], reader, x, y, z)
```
- Native support for multiple TileMatrixSet via [morecantile](https://developmentseed.org/morecantile/)
```python
import morecantile
from rio_tiler.io import Reader
# Use EPSG:4326 (WGS84) grid
wgs84_grid = morecantile.tms.get("WorldCRS84Quad")
with Reader("my.tif", tms=wgs84_grid) as src:
img = src.tile(1, 1, 1)
```
## Install
You can install `rio-tiler` using pip
```bash
$ pip install -U pip
$ pip install -U rio-tiler
```
or install from source:
```bash
$ git clone https://github.com/cogeotiff/rio-tiler.git
$ cd rio-tiler
$ pip install -U pip
$ pip install -e .
```
## Plugins
#### [**rio-tiler-pds**][rio-tiler-pds]
[rio-tiler-pds]: https://github.com/cogeotiff/rio-tiler-pds
`rio-tiler` v1 included several helpers for reading popular public datasets (e.g. Sentinel 2, Sentinel 1, Landsat 8, CBERS) from cloud providers. This functionality is now in a [separate plugin][rio-tiler-pds], enabling easier access to more public datasets.
#### [**rio-tiler-mvt**][rio-tiler-mvt]
Create Mapbox Vector Tiles from raster sources
[rio-tiler-mvt]: https://github.com/cogeotiff/rio-tiler-mvt
## Implementations
[**titiler**][titiler]: A lightweight Cloud Optimized GeoTIFF dynamic tile server.
[**cogeo-mosaic**][cogeo-mosaic]: Create mosaics of Cloud Optimized GeoTIFF based on the [mosaicJSON][mosaicjson_spec] specification.
[titiler]: https://github.com/developmentseed/titiler
[cogeo-mosaic]: https://github.com/developmentseed/cogeo-mosaic
[mosaicjson_spec]: https://github.com/developmentseed/mosaicjson-spec
## Contribution & Development
See [CONTRIBUTING.md](https://github.com/cogeotiff/rio-tiler/blob/main/CONTRIBUTING.md)
## Authors
The `rio-tiler` project was begun at Mapbox and was transferred to the `cogeotiff` Github organization in January 2019.
See [AUTHORS.txt](https://github.com/cogeotiff/rio-tiler/blob/main/AUTHORS.txt) for a listing of individual contributors.
## Changes
See [CHANGES.md](https://github.com/cogeotiff/rio-tiler/blob/main/CHANGES.md).
## License
See [LICENSE](https://github.com/cogeotiff/rio-tiler/blob/main/LICENSE)
| /rio_tiler-6.0.2.tar.gz/rio_tiler-6.0.2/README.md | 0.873147 | 0.825941 | README.md | pypi |
import re
from typing import List, Sequence, Tuple
import numexpr
import numpy
from rio_tiler.errors import InvalidExpression
def parse_expression(expression: str, cast: bool = True) -> Tuple:
"""Parse rio-tiler band math expression.
Args:
expression (str): band math/combination expression.
cast (bool): cast band names to integers (convert to index values). Defaults to True.
Returns:
tuple: band names/indexes.
Examples:
>>> parse_expression("b1;b2")
(2, 1)
>>> parse_expression("B1/B2", cast=False)
("2", "1")
"""
bands = set(re.findall(r"\bb(?P<bands>[0-9A-Z]+)\b", expression, re.IGNORECASE))
output_bands = tuple(map(int, bands)) if cast else tuple(bands)
if not output_bands:
raise InvalidExpression(
f"Could not find any valid bands in '{expression}' expression."
)
return output_bands
def get_expression_blocks(expression: str) -> List[str]:
"""Split expression in blocks.
Args:
expression (str): band math/combination expression.
Returns:
list: expression blocks (str).
Examples:
>>> parse_expression("b1/b2,b2+b1")
("b1/b2", "b2+b1")
"""
return [expr for expr in expression.split(";") if expr]
def apply_expression(
blocks: Sequence[str],
bands: Sequence[str],
data: numpy.ndarray,
) -> numpy.ma.MaskedArray:
"""Apply rio-tiler expression.
Args:
blocks (sequence): expression for a specific layer.
bands (sequence): bands names.
data (numpy.array): array of bands.
Returns:
numpy.array: output data.
"""
if len(bands) != data.shape[0]:
raise ValueError(
f"Incompatible number of bands ({bands}) and data shape {data.shape}"
)
try:
return numpy.ma.MaskedArray(
[
numpy.nan_to_num(
numexpr.evaluate(bloc.strip(), local_dict=dict(zip(bands, data)))
)
for bloc in blocks
if bloc
]
)
except KeyError as e:
raise InvalidExpression(f"Invalid band/asset name {str(e)}") from e | /rio_tiler-6.0.2.tar.gz/rio_tiler-6.0.2/rio_tiler/expression.py | 0.920571 | 0.604224 | expression.py | pypi |
from concurrent import futures
from functools import partial
from typing import Any, Callable, Dict, Generator, Optional, Sequence, Tuple, Union
from rio_tiler.constants import MAX_THREADS
from rio_tiler.logger import logger
from rio_tiler.models import ImageData, PointData
TaskType = Sequence[Tuple[Union[futures.Future, Callable], Any]]
def filter_tasks(
tasks: TaskType,
allowed_exceptions: Optional[Tuple] = None,
) -> Generator:
"""Filter Tasks to remove Exceptions.
Args:
tasks (sequence): Sequence of 'concurrent.futures._base.Future' or 'Callable'
allowed_exceptions (tuple, optional): List of exceptions which won't be raised.
Yields:
Task results.
"""
if allowed_exceptions is None:
allowed_exceptions = ()
for (future, asset) in tasks:
try:
if isinstance(future, futures.Future):
yield future.result(), asset
else:
yield future(), asset
except allowed_exceptions as err:
logger.info(err)
pass
def create_tasks(
reader: Callable, asset_list: Sequence, threads: int, *args, **kwargs
) -> TaskType:
"""Create Future Tasks."""
if threads and threads > 1:
logger.debug(f"Running tasks in ThreadPool with max_workers={threads}")
with futures.ThreadPoolExecutor(max_workers=threads) as executor:
return [
(executor.submit(reader, asset, *args, **kwargs), asset)
for asset in asset_list
]
else:
logger.debug(f"Running tasks outside ThreadsPool (max_workers={threads})")
return [
(partial(reader, asset, *args, **kwargs), asset) for asset in asset_list
]
def multi_arrays(
asset_list: Sequence,
reader: Callable[..., ImageData],
*args: Any,
threads: int = MAX_THREADS,
allowed_exceptions: Optional[Tuple] = None,
**kwargs: Any,
) -> ImageData:
"""Merge arrays returned from tasks."""
tasks = create_tasks(reader, asset_list, threads, *args, **kwargs)
return ImageData.create_from_list(
[data for data, _ in filter_tasks(tasks, allowed_exceptions=allowed_exceptions)]
)
def multi_points(
asset_list: Sequence,
reader: Callable[..., PointData],
*args: Any,
threads: int = MAX_THREADS,
allowed_exceptions: Optional[Tuple] = None,
**kwargs: Any,
) -> PointData:
"""Merge points returned from tasks."""
tasks = create_tasks(reader, asset_list, threads, *args, **kwargs)
return PointData.create_from_list(
[data for data, _ in filter_tasks(tasks, allowed_exceptions=allowed_exceptions)]
)
def multi_values(
asset_list: Sequence,
reader: Callable,
*args: Any,
threads: int = MAX_THREADS,
allowed_exceptions: Optional[Tuple] = None,
**kwargs: Any,
) -> Dict:
"""Merge values returned from tasks."""
tasks = create_tasks(reader, asset_list, threads, *args, **kwargs)
return {
asset: val
for val, asset in filter_tasks(tasks, allowed_exceptions=allowed_exceptions)
} | /rio_tiler-6.0.2.tar.gz/rio_tiler-6.0.2/rio_tiler/tasks.py | 0.909972 | 0.239194 | tasks.py | pypi |
import abc
import contextlib
import re
import warnings
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union
import attr
import numpy
from morecantile import Tile, TileMatrixSet
from rasterio.crs import CRS
from rasterio.warp import transform_bounds
from rio_tiler.constants import WEB_MERCATOR_TMS, WGS84_CRS
from rio_tiler.errors import (
AssetAsBandError,
ExpressionMixingWarning,
InvalidExpression,
MissingAssets,
MissingBands,
TileOutsideBounds,
)
from rio_tiler.models import BandStatistics, ImageData, Info, PointData
from rio_tiler.tasks import multi_arrays, multi_points, multi_values
from rio_tiler.types import AssetInfo, BBox, Indexes
from rio_tiler.utils import normalize_bounds
@attr.s
class SpatialMixin:
"""Spatial Info Mixin.
Attributes:
tms (morecantile.TileMatrixSet, optional): TileMatrixSet grid definition. Defaults to `WebMercatorQuad`.
"""
tms: TileMatrixSet = attr.ib(default=WEB_MERCATOR_TMS)
bounds: BBox = attr.ib(init=False)
crs: CRS = attr.ib(init=False)
geographic_crs: CRS = attr.ib(init=False, default=WGS84_CRS)
@property
def geographic_bounds(self) -> BBox:
"""Return dataset bounds in geographic_crs."""
if self.crs == self.geographic_crs:
return self.bounds
try:
bounds = transform_bounds(
self.crs,
self.geographic_crs,
*self.bounds,
densify_pts=21,
)
except: # noqa
warnings.warn(
"Cannot determine bounds in geographic CRS, will default to (-180.0, -90.0, 180.0, 90.0).",
UserWarning,
)
bounds = (-180.0, -90, 180.0, 90)
if not all(numpy.isfinite(bounds)):
warnings.warn(
"Transformation to geographic CRS returned invalid values, will default to (-180.0, -90.0, 180.0, 90.0).",
UserWarning,
)
bounds = (-180.0, -90, 180.0, 90)
return bounds
def tile_exists(self, tile_x: int, tile_y: int, tile_z: int) -> bool:
"""Check if a tile intersects the dataset bounds.
Args:
tile_x (int): Tile's horizontal index.
tile_y (int): Tile's vertical index.
tile_z (int): Tile's zoom level index.
Returns:
bool: True if the tile intersects the dataset bounds.
"""
# bounds in TileMatrixSet's CRS
tile_bounds = self.tms.xy_bounds(Tile(x=tile_x, y=tile_y, z=tile_z))
if not self.tms.rasterio_crs == self.crs:
# Transform the bounds to the dataset's CRS
try:
tile_bounds = transform_bounds(
self.tms.rasterio_crs,
self.crs,
*tile_bounds,
densify_pts=21,
)
except: # noqa
# HACK: gdal will first throw an error for invalid transformation
# but if retried it will then pass.
# Note: It might return `+/-inf` values
tile_bounds = transform_bounds(
self.tms.rasterio_crs,
self.crs,
*tile_bounds,
densify_pts=21,
)
# If tile_bounds has non-finite value in the dataset CRS we return True
if not all(numpy.isfinite(tile_bounds)):
return True
tile_bounds = normalize_bounds(tile_bounds)
dst_bounds = normalize_bounds(self.bounds)
return (
(tile_bounds[0] < dst_bounds[2])
and (tile_bounds[2] > dst_bounds[0])
and (tile_bounds[3] > dst_bounds[1])
and (tile_bounds[1] < dst_bounds[3])
)
@attr.s
class BaseReader(SpatialMixin, metaclass=abc.ABCMeta):
"""Rio-tiler.io BaseReader.
Attributes:
input (any): Reader's input.
tms (morecantile.TileMatrixSet, optional): TileMatrixSet grid definition. Defaults to `WebMercatorQuad`.
"""
input: Any = attr.ib()
tms: TileMatrixSet = attr.ib(default=WEB_MERCATOR_TMS)
def __enter__(self):
"""Support using with Context Managers."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Support using with Context Managers."""
pass
@abc.abstractmethod
def info(self) -> Info:
"""Return Dataset's info.
Returns:
rio_tile.models.Info: Dataset info.
"""
...
@abc.abstractmethod
def statistics(self) -> Dict[str, BandStatistics]:
"""Return bands statistics from a dataset.
Returns:
Dict[str, rio_tiler.models.BandStatistics]: bands statistics.
"""
...
@abc.abstractmethod
def tile(self, tile_x: int, tile_y: int, tile_z: int) -> ImageData:
"""Read a Map tile from the Dataset.
Args:
tile_x (int): Tile's horizontal index.
tile_y (int): Tile's vertical index.
tile_z (int): Tile's zoom level index.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and tile spatial info.
"""
...
@abc.abstractmethod
def part(self, bbox: BBox) -> ImageData:
"""Read a Part of a Dataset.
Args:
bbox (tuple): Output bounds (left, bottom, right, top) in target crs.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and input spatial info.
"""
...
@abc.abstractmethod
def preview(self) -> ImageData:
"""Read a preview of a Dataset.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and input spatial info.
"""
...
@abc.abstractmethod
def point(self, lon: float, lat: float) -> PointData:
"""Read a value from a Dataset.
Args:
lon (float): Longitude.
lat (float): Latitude.
Returns:
rio_tiler.models.PointData: PointData instance with data, mask and spatial info.
"""
...
@abc.abstractmethod
def feature(self, shape: Dict) -> ImageData:
"""Read a Dataset for a GeoJSON feature.
Args:
shape (dict): Valid GeoJSON feature.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and input spatial info.
"""
...
@attr.s
class MultiBaseReader(SpatialMixin, metaclass=abc.ABCMeta):
"""MultiBaseReader Reader.
This Abstract Base Class Reader is suited for dataset that are composed of multiple assets (e.g. STAC).
Attributes:
input (any): input data.
tms (morecantile.TileMatrixSet, optional): TileMatrixSet grid definition. Defaults to `WebMercatorQuad`.
minzoom (int, optional): Set dataset's minzoom.
maxzoom (int, optional): Set dataset's maxzoom.
reader_options (dict, option): options to forward to the reader. Defaults to `{}`.
"""
input: Any = attr.ib()
tms: TileMatrixSet = attr.ib(default=WEB_MERCATOR_TMS)
minzoom: int = attr.ib(default=None)
maxzoom: int = attr.ib(default=None)
reader: Type[BaseReader] = attr.ib(init=False)
reader_options: Dict = attr.ib(factory=dict)
assets: Sequence[str] = attr.ib(init=False)
ctx: Any = attr.ib(init=False, default=contextlib.nullcontext)
def __enter__(self):
"""Support using with Context Managers."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Support using with Context Managers."""
pass
@abc.abstractmethod
def _get_asset_info(self, asset: str) -> AssetInfo:
"""Validate asset name and construct url."""
...
def parse_expression(self, expression: str, asset_as_band: bool = False) -> Tuple:
"""Parse rio-tiler band math expression."""
input_assets = "|".join(self.assets)
if asset_as_band:
_re = re.compile(rf"\b({input_assets})\b")
else:
_re = re.compile(rf"\b({input_assets})_b\d+\b")
assets = tuple(set(re.findall(_re, expression)))
if not assets:
raise InvalidExpression(
f"Could not find any valid assets in '{expression}' expression."
if asset_as_band
else f"Could not find any valid assets in '{expression}' expression, maybe try with `asset_as_band=True`."
)
return assets
def _update_statistics(
self,
img: ImageData,
indexes: Optional[Indexes] = None,
statistics: Optional[Sequence[Tuple[float, float]]] = None,
):
"""Update ImageData Statistics from AssetInfo."""
if isinstance(indexes, int):
indexes = (indexes,)
if indexes is None:
indexes = tuple(range(1, img.count + 1))
if not img.dataset_statistics and statistics:
if max(max(indexes), len(indexes)) > len(statistics): # type: ignore
return
img.dataset_statistics = [statistics[bidx - 1] for bidx in indexes]
def info(
self, assets: Union[Sequence[str], str] = None, **kwargs: Any
) -> Dict[str, Info]:
"""Return metadata from multiple assets.
Args:
assets (sequence of str or str, optional): assets to fetch info from. Required keyword argument.
Returns:
dict: Multiple assets info in form of {"asset1": rio_tile.models.Info}.
"""
if not assets:
warnings.warn(
"No `assets` option passed, will fetch info for all available assets.",
UserWarning,
)
assets = assets or self.assets
if isinstance(assets, str):
assets = (assets,)
def _reader(asset: str, **kwargs: Any) -> Dict:
asset_info = self._get_asset_info(asset)
url = asset_info["url"]
with self.ctx(**asset_info.get("env", {})):
with self.reader(url, tms=self.tms, **self.reader_options) as src: # type: ignore
return src.info()
return multi_values(assets, _reader, **kwargs)
def statistics(
self,
assets: Union[Sequence[str], str] = None,
asset_indexes: Optional[Dict[str, Indexes]] = None, # Indexes for each asset
asset_expression: Optional[Dict[str, str]] = None, # Expression for each asset
**kwargs: Any,
) -> Dict[str, Dict[str, BandStatistics]]:
"""Return array statistics for multiple assets.
Args:
assets (sequence of str or str): assets to fetch info from.
asset_indexes (dict, optional): Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}).
asset_expression (dict, optional): rio-tiler expression for each asset (e.g. {"asset1": "b1/b2+b3", "asset2": ...}).
kwargs (optional): Options to forward to the `self.reader.statistics` method.
Returns:
dict: Multiple assets statistics in form of {"asset1": {"1": rio_tiler.models.BandStatistics, ...}}.
"""
if not assets:
warnings.warn(
"No `assets` option passed, will fetch statistics for all available assets.",
UserWarning,
)
assets = assets or self.assets
if isinstance(assets, str):
assets = (assets,)
asset_indexes = asset_indexes or {}
asset_expression = asset_expression or {}
def _reader(asset: str, *args, **kwargs) -> Dict:
asset_info = self._get_asset_info(asset)
url = asset_info["url"]
with self.ctx(**asset_info.get("env", {})):
with self.reader(url, tms=self.tms, **self.reader_options) as src: # type: ignore
return src.statistics(
*args,
indexes=asset_indexes.get(asset, kwargs.pop("indexes", None)), # type: ignore
expression=asset_expression.get(asset), # type: ignore
**kwargs,
)
return multi_values(assets, _reader, **kwargs)
def merged_statistics(
self,
assets: Union[Sequence[str], str] = None,
expression: Optional[str] = None,
asset_indexes: Optional[Dict[str, Indexes]] = None, # Indexes for each asset
categorical: bool = False,
categories: Optional[List[float]] = None,
percentiles: Optional[List[int]] = None,
hist_options: Optional[Dict] = None,
max_size: int = 1024,
**kwargs: Any,
) -> Dict[str, BandStatistics]:
"""Return array statistics for multiple assets.
Args:
assets (sequence of str or str): assets to fetch info from.
expression (str, optional): rio-tiler expression for the asset list (e.g. asset1/asset2+asset3).
asset_indexes (dict, optional): Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}).
categorical (bool): treat input data as categorical data. Defaults to False.
categories (list of numbers, optional): list of categories to return value for.
percentiles (list of numbers, optional): list of percentile values to calculate. Defaults to `[2, 98]`.
hist_options (dict, optional): Options to forward to numpy.histogram function.
max_size (int, optional): Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. Defaults to 1024.
kwargs (optional): Options to forward to the `self.preview` method.
Returns:
Dict[str, rio_tiler.models.BandStatistics]: bands statistics.
"""
if not expression:
if not assets:
warnings.warn(
"No `assets` option passed, will fetch statistics for all available assets.",
UserWarning,
)
assets = assets or self.assets
data = self.preview(
assets=assets,
expression=expression,
asset_indexes=asset_indexes,
max_size=max_size,
**kwargs,
)
return data.statistics(
categorical=categorical,
categories=categories,
percentiles=percentiles,
hist_options=hist_options,
)
def tile(
self,
tile_x: int,
tile_y: int,
tile_z: int,
assets: Union[Sequence[str], str] = None,
expression: Optional[str] = None,
asset_indexes: Optional[Dict[str, Indexes]] = None, # Indexes for each asset
asset_as_band: bool = False,
**kwargs: Any,
) -> ImageData:
"""Read and merge Wep Map tiles from multiple assets.
Args:
tile_x (int): Tile's horizontal index.
tile_y (int): Tile's vertical index.
tile_z (int): Tile's zoom level index.
assets (sequence of str or str, optional): assets to fetch info from.
expression (str, optional): rio-tiler expression for the asset list (e.g. asset1/asset2+asset3).
asset_indexes (dict, optional): Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}).
kwargs (optional): Options to forward to the `self.reader.tile` method.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and tile spatial info.
"""
if not self.tile_exists(tile_x, tile_y, tile_z):
raise TileOutsideBounds(
f"Tile {tile_z}/{tile_x}/{tile_y} is outside image bounds"
)
if isinstance(assets, str):
assets = (assets,)
if assets and expression:
warnings.warn(
"Both expression and assets passed; expression will overwrite assets parameter.",
ExpressionMixingWarning,
)
if expression:
assets = self.parse_expression(expression, asset_as_band=asset_as_band)
if not assets:
raise MissingAssets(
"assets must be passed either via expression or assets options."
)
asset_indexes = asset_indexes or {}
def _reader(asset: str, *args: Any, **kwargs: Any) -> ImageData:
idx = asset_indexes.get(asset) or kwargs.pop("indexes", None) # type: ignore
asset_info = self._get_asset_info(asset)
url = asset_info["url"]
with self.ctx(**asset_info.get("env", {})):
with self.reader(url, tms=self.tms, **self.reader_options) as src: # type: ignore
data = src.tile(*args, indexes=idx, **kwargs)
self._update_statistics(
data,
indexes=idx,
statistics=asset_info.get("dataset_statistics"),
)
metadata = data.metadata or {}
if m := asset_info.get("metadata"):
metadata.update(m)
data.metadata = {asset: metadata}
if asset_as_band:
if len(data.band_names) > 1:
raise AssetAsBandError(
"Can't use asset_as_band for multibands asset"
)
data.band_names = [asset]
else:
data.band_names = [f"{asset}_{n}" for n in data.band_names]
return data
img = multi_arrays(assets, _reader, tile_x, tile_y, tile_z, **kwargs)
if expression:
return img.apply_expression(expression)
return img
def part(
self,
bbox: BBox,
assets: Union[Sequence[str], str] = None,
expression: Optional[str] = None,
asset_indexes: Optional[Dict[str, Indexes]] = None, # Indexes for each asset
asset_as_band: bool = False,
**kwargs: Any,
) -> ImageData:
"""Read and merge parts from multiple assets.
Args:
bbox (tuple): Output bounds (left, bottom, right, top) in target crs.
assets (sequence of str or str, optional): assets to fetch info from.
expression (str, optional): rio-tiler expression for the asset list (e.g. asset1/asset2+asset3).
asset_indexes (dict, optional): Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}).
kwargs (optional): Options to forward to the `self.reader.part` method.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and tile spatial info.
"""
if isinstance(assets, str):
assets = (assets,)
if assets and expression:
warnings.warn(
"Both expression and assets passed; expression will overwrite assets parameter.",
ExpressionMixingWarning,
)
if expression:
assets = self.parse_expression(expression, asset_as_band=asset_as_band)
if not assets:
raise MissingAssets(
"assets must be passed either via expression or assets options."
)
asset_indexes = asset_indexes or {}
def _reader(asset: str, *args: Any, **kwargs: Any) -> ImageData:
idx = asset_indexes.get(asset) or kwargs.pop("indexes", None) # type: ignore
asset_info = self._get_asset_info(asset)
url = asset_info["url"]
with self.ctx(**asset_info.get("env", {})):
with self.reader(url, tms=self.tms, **self.reader_options) as src: # type: ignore
data = src.part(*args, indexes=idx, **kwargs)
self._update_statistics(
data,
indexes=idx,
statistics=asset_info.get("dataset_statistics"),
)
metadata = data.metadata or {}
if m := asset_info.get("metadata"):
metadata.update(m)
data.metadata = {asset: metadata}
if asset_as_band:
if len(data.band_names) > 1:
raise AssetAsBandError(
"Can't use asset_as_band for multibands asset"
)
data.band_names = [asset]
else:
data.band_names = [f"{asset}_{n}" for n in data.band_names]
return data
img = multi_arrays(assets, _reader, bbox, **kwargs)
if expression:
return img.apply_expression(expression)
return img
def preview(
self,
assets: Union[Sequence[str], str] = None,
expression: Optional[str] = None,
asset_indexes: Optional[Dict[str, Indexes]] = None, # Indexes for each asset
asset_as_band: bool = False,
**kwargs: Any,
) -> ImageData:
"""Read and merge previews from multiple assets.
Args:
assets (sequence of str or str, optional): assets to fetch info from.
expression (str, optional): rio-tiler expression for the asset list (e.g. asset1/asset2+asset3).
asset_indexes (dict, optional): Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}).
kwargs (optional): Options to forward to the `self.reader.preview` method.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and tile spatial info.
"""
if isinstance(assets, str):
assets = (assets,)
if assets and expression:
warnings.warn(
"Both expression and assets passed; expression will overwrite assets parameter.",
ExpressionMixingWarning,
)
if expression:
assets = self.parse_expression(expression, asset_as_band=asset_as_band)
if not assets:
raise MissingAssets(
"assets must be passed either via expression or assets options."
)
asset_indexes = asset_indexes or {}
def _reader(asset: str, **kwargs: Any) -> ImageData:
idx = asset_indexes.get(asset) or kwargs.pop("indexes", None) # type: ignore
asset_info = self._get_asset_info(asset)
url = asset_info["url"]
with self.ctx(**asset_info.get("env", {})):
with self.reader(url, tms=self.tms, **self.reader_options) as src: # type: ignore
data = src.preview(indexes=idx, **kwargs)
self._update_statistics(
data,
indexes=idx,
statistics=asset_info.get("dataset_statistics"),
)
metadata = data.metadata or {}
if m := asset_info.get("metadata"):
metadata.update(m)
data.metadata = {asset: metadata}
if asset_as_band:
if len(data.band_names) > 1:
raise AssetAsBandError(
"Can't use asset_as_band for multibands asset"
)
data.band_names = [asset]
else:
data.band_names = [f"{asset}_{n}" for n in data.band_names]
return data
img = multi_arrays(assets, _reader, **kwargs)
if expression:
return img.apply_expression(expression)
return img
def point(
self,
lon: float,
lat: float,
assets: Union[Sequence[str], str] = None,
expression: Optional[str] = None,
asset_indexes: Optional[Dict[str, Indexes]] = None, # Indexes for each asset
asset_as_band: bool = False,
**kwargs: Any,
) -> PointData:
"""Read pixel value from multiple assets.
Args:
lon (float): Longitude.
lat (float): Latitude.
assets (sequence of str or str, optional): assets to fetch info from.
expression (str, optional): rio-tiler expression for the asset list (e.g. asset1/asset2+asset3).
asset_indexes (dict, optional): Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}).
kwargs (optional): Options to forward to the `self.reader.point` method.
Returns:
PointData
"""
if isinstance(assets, str):
assets = (assets,)
if assets and expression:
warnings.warn(
"Both expression and assets passed; expression will overwrite assets parameter.",
ExpressionMixingWarning,
)
if expression:
assets = self.parse_expression(expression, asset_as_band=asset_as_band)
if not assets:
raise MissingAssets(
"assets must be passed either via expression or assets options."
)
asset_indexes = asset_indexes or {}
def _reader(asset: str, *args, **kwargs: Any) -> PointData:
idx = asset_indexes.get(asset) or kwargs.pop("indexes", None) # type: ignore
asset_info = self._get_asset_info(asset)
url = asset_info["url"]
with self.ctx(**asset_info.get("env", {})):
with self.reader(url, tms=self.tms, **self.reader_options) as src: # type: ignore
data = src.point(*args, indexes=idx, **kwargs)
metadata = data.metadata or {}
if m := asset_info.get("metadata"):
metadata.update(m)
data.metadata = {asset: metadata}
if asset_as_band:
if len(data.band_names) > 1:
raise AssetAsBandError(
"Can't use asset_as_band for multibands asset"
)
data.band_names = [asset]
else:
data.band_names = [f"{asset}_{n}" for n in data.band_names]
return data
data = multi_points(assets, _reader, lon, lat, **kwargs)
if expression:
return data.apply_expression(expression)
return data
def feature(
self,
shape: Dict,
assets: Union[Sequence[str], str] = None,
expression: Optional[str] = None,
asset_indexes: Optional[Dict[str, Indexes]] = None, # Indexes for each asset
asset_as_band: bool = False,
**kwargs: Any,
) -> ImageData:
"""Read and merge parts defined by geojson feature from multiple assets.
Args:
shape (dict): Valid GeoJSON feature.
assets (sequence of str or str, optional): assets to fetch info from.
expression (str, optional): rio-tiler expression for the asset list (e.g. asset1/asset2+asset3).
asset_indexes (dict, optional): Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}).
kwargs (optional): Options to forward to the `self.reader.feature` method.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and tile spatial info.
"""
if isinstance(assets, str):
assets = (assets,)
if assets and expression:
warnings.warn(
"Both expression and assets passed; expression will overwrite assets parameter.",
ExpressionMixingWarning,
)
if expression:
assets = self.parse_expression(expression, asset_as_band=asset_as_band)
if not assets:
raise MissingAssets(
"assets must be passed either via expression or assets options."
)
asset_indexes = asset_indexes or {}
def _reader(asset: str, *args: Any, **kwargs: Any) -> ImageData:
idx = asset_indexes.get(asset) or kwargs.pop("indexes", None) # type: ignore
asset_info = self._get_asset_info(asset)
url = asset_info["url"]
with self.ctx(**asset_info.get("env", {})):
with self.reader(url, tms=self.tms, **self.reader_options) as src: # type: ignore
data = src.feature(*args, indexes=idx, **kwargs)
self._update_statistics(
data,
indexes=idx,
statistics=asset_info.get("dataset_statistics"),
)
metadata = data.metadata or {}
if m := asset_info.get("metadata"):
metadata.update(m)
data.metadata = {asset: metadata}
if asset_as_band:
if len(data.band_names) > 1:
raise AssetAsBandError(
"Can't use asset_as_band for multibands asset"
)
data.band_names = [asset]
else:
data.band_names = [f"{asset}_{n}" for n in data.band_names]
return data
img = multi_arrays(assets, _reader, shape, **kwargs)
if expression:
return img.apply_expression(expression)
return img
@attr.s
class MultiBandReader(SpatialMixin, metaclass=abc.ABCMeta):
"""Multi Band Reader.
This Abstract Base Class Reader is suited for dataset that stores spectral bands as separate files (e.g. Sentinel 2).
Attributes:
input (any): input data.
tms (morecantile.TileMatrixSet, optional): TileMatrixSet grid definition. Defaults to `WebMercatorQuad`.
minzoom (int, optional): Set dataset's minzoom.
maxzoom (int, optional): Set dataset's maxzoom.
reader_options (dict, option): options to forward to the reader. Defaults to `{}`.
"""
input: Any = attr.ib()
tms: TileMatrixSet = attr.ib(default=WEB_MERCATOR_TMS)
minzoom: int = attr.ib(default=None)
maxzoom: int = attr.ib(default=None)
reader: Type[BaseReader] = attr.ib(init=False)
reader_options: Dict = attr.ib(factory=dict)
bands: Sequence[str] = attr.ib(init=False)
def __enter__(self):
"""Support using with Context Managers."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Support using with Context Managers."""
pass
@abc.abstractmethod
def _get_band_url(self, band: str) -> str:
"""Validate band name and construct url."""
...
def parse_expression(self, expression: str) -> Tuple:
"""Parse rio-tiler band math expression."""
input_bands = "|".join([rf"\b{band}\b" for band in self.bands])
_re = re.compile(input_bands.replace("\\\\", "\\"))
bands = tuple(set(re.findall(_re, expression)))
if not bands:
raise InvalidExpression(
f"Could not find any valid bands in '{expression}' expression."
)
return bands
def info(
self, bands: Union[Sequence[str], str] = None, *args, **kwargs: Any
) -> Info:
"""Return metadata from multiple bands.
Args:
bands (sequence of str or str, optional): band names to fetch info from. Required keyword argument.
Returns:
dict: Multiple bands info in form of {"band1": rio_tile.models.Info}.
"""
if not bands:
warnings.warn(
"No `bands` option passed, will fetch info for all available bands.",
UserWarning,
)
bands = bands or self.bands
if isinstance(bands, str):
bands = (bands,)
def _reader(band: str, **kwargs: Any) -> Info:
url = self._get_band_url(band)
with self.reader(url, tms=self.tms, **self.reader_options) as src: # type: ignore
return src.info()
bands_metadata = multi_values(bands, _reader, *args, **kwargs)
meta = {
"bounds": self.geographic_bounds,
"minzoom": self.minzoom,
"maxzoom": self.maxzoom,
}
# We only keep the value for the first band.
meta["band_metadata"] = [
(band, bands_metadata[band].band_metadata[0][1])
for ix, band in enumerate(bands)
]
meta["band_descriptions"] = [
(band, bands_metadata[band].band_descriptions[0][1])
for ix, band in enumerate(bands)
]
meta["dtype"] = bands_metadata[bands[0]].dtype
meta["colorinterp"] = [
bands_metadata[band].colorinterp[0] for _, band in enumerate(bands)
]
meta["nodata_type"] = bands_metadata[bands[0]].nodata_type
return Info(**meta)
def statistics(
self,
bands: Union[Sequence[str], str] = None,
expression: Optional[str] = None,
categorical: bool = False,
categories: Optional[List[float]] = None,
percentiles: Optional[List[int]] = None,
hist_options: Optional[Dict] = None,
max_size: int = 1024,
**kwargs: Any,
) -> Dict[str, BandStatistics]:
"""Return array statistics for multiple assets.
Args:
bands (sequence of str or str): bands to fetch info from. Required keyword argument.
expression (str, optional): rio-tiler expression for the band list (e.g. b1/b2+b3).
categorical (bool): treat input data as categorical data. Defaults to False.
categories (list of numbers, optional): list of categories to return value for.
percentiles (list of numbers, optional): list of percentile values to calculate. Defaults to `[2, 98]`.
hist_options (dict, optional): Options to forward to numpy.histogram function.
max_size (int, optional): Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. Defaults to 1024.
kwargs (optional): Options to forward to the `self.preview` method.
Returns:
dict: Multiple assets statistics in form of {"{band}/{expression}": rio_tiler.models.BandStatistics, ...}.
"""
if not expression:
if not bands:
warnings.warn(
"No `bands` option passed, will fetch statistics for all available bands.",
UserWarning,
)
bands = bands or self.bands
data = self.preview(
bands=bands,
expression=expression,
max_size=max_size,
**kwargs,
)
return data.statistics(
categorical=categorical,
categories=categories,
percentiles=percentiles,
hist_options=hist_options,
)
def tile(
self,
tile_x: int,
tile_y: int,
tile_z: int,
bands: Union[Sequence[str], str] = None,
expression: Optional[str] = None,
**kwargs: Any,
) -> ImageData:
"""Read and merge Web Map tiles multiple bands.
Args:
tile_x (int): Tile's horizontal index.
tile_y (int): Tile's vertical index.
tile_z (int): Tile's zoom level index.
bands (sequence of str or str, optional): bands to fetch info from.
expression (str, optional): rio-tiler expression for the band list (e.g. b1/b2+b3).
kwargs (optional): Options to forward to the `self.reader.tile` method.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and tile spatial info.
"""
if not self.tile_exists(tile_x, tile_y, tile_z):
raise TileOutsideBounds(
f"Tile {tile_z}/{tile_x}/{tile_y} is outside image bounds"
)
if isinstance(bands, str):
bands = (bands,)
if bands and expression:
warnings.warn(
"Both expression and bands passed; expression will overwrite bands parameter.",
ExpressionMixingWarning,
)
if expression:
bands = self.parse_expression(expression)
if not bands:
raise MissingBands(
"bands must be passed either via expression or bands options."
)
def _reader(band: str, *args: Any, **kwargs: Any) -> ImageData:
url = self._get_band_url(band)
with self.reader(url, tms=self.tms, **self.reader_options) as src: # type: ignore
data = src.tile(*args, **kwargs)
if data.metadata:
data.metadata = {band: data.metadata}
data.band_names = [band] # use `band` as name instead of band index
return data
img = multi_arrays(bands, _reader, tile_x, tile_y, tile_z, **kwargs)
if expression:
return img.apply_expression(expression)
return img
def part(
self,
bbox: BBox,
bands: Union[Sequence[str], str] = None,
expression: Optional[str] = None,
**kwargs: Any,
) -> ImageData:
"""Read and merge parts from multiple bands.
Args:
bbox (tuple): Output bounds (left, bottom, right, top) in target crs.
bands (sequence of str or str, optional): bands to fetch info from.
expression (str, optional): rio-tiler expression for the band list (e.g. b1/b2+b3).
kwargs (optional): Options to forward to the 'self.reader.part' method.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and tile spatial info.
"""
if isinstance(bands, str):
bands = (bands,)
if bands and expression:
warnings.warn(
"Both expression and bands passed; expression will overwrite bands parameter.",
ExpressionMixingWarning,
)
if expression:
bands = self.parse_expression(expression)
if not bands:
raise MissingBands(
"bands must be passed either via expression or bands options."
)
def _reader(band: str, *args: Any, **kwargs: Any) -> ImageData:
url = self._get_band_url(band)
with self.reader(url, tms=self.tms, **self.reader_options) as src: # type: ignore
data = src.part(*args, **kwargs)
if data.metadata:
data.metadata = {band: data.metadata}
data.band_names = [band] # use `band` as name instead of band index
return data
img = multi_arrays(bands, _reader, bbox, **kwargs)
if expression:
return img.apply_expression(expression)
return img
def preview(
self,
bands: Union[Sequence[str], str] = None,
expression: Optional[str] = None,
**kwargs: Any,
) -> ImageData:
"""Read and merge previews from multiple bands.
Args:
bands (sequence of str or str, optional): bands to fetch info from.
expression (str, optional): rio-tiler expression for the band list (e.g. b1/b2+b3).
kwargs (optional): Options to forward to the `self.reader.preview` method.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and tile spatial info.
"""
if isinstance(bands, str):
bands = (bands,)
if bands and expression:
warnings.warn(
"Both expression and bands passed; expression will overwrite bands parameter.",
ExpressionMixingWarning,
)
if expression:
bands = self.parse_expression(expression)
if not bands:
raise MissingBands(
"bands must be passed either via expression or bands options."
)
def _reader(band: str, **kwargs: Any) -> ImageData:
url = self._get_band_url(band)
with self.reader(url, tms=self.tms, **self.reader_options) as src: # type: ignore
data = src.preview(**kwargs)
if data.metadata:
data.metadata = {band: data.metadata}
data.band_names = [band] # use `band` as name instead of band index
return data
img = multi_arrays(bands, _reader, **kwargs)
if expression:
return img.apply_expression(expression)
return img
def point(
self,
lon: float,
lat: float,
bands: Union[Sequence[str], str] = None,
expression: Optional[str] = None,
**kwargs: Any,
) -> PointData:
"""Read a pixel values from multiple bands.
Args:
lon (float): Longitude.
lat (float): Latitude.
bands (sequence of str or str, optional): bands to fetch info from.
expression (str, optional): rio-tiler expression for the band list (e.g. b1/b2+b3).
kwargs (optional): Options to forward to the `self.reader.point` method.
Returns:
PointData
"""
if isinstance(bands, str):
bands = (bands,)
if bands and expression:
warnings.warn(
"Both expression and bands passed; expression will overwrite bands parameter.",
ExpressionMixingWarning,
)
if expression:
bands = self.parse_expression(expression)
if not bands:
raise MissingBands(
"bands must be passed either via expression or bands options."
)
def _reader(band: str, *args, **kwargs: Any) -> PointData:
url = self._get_band_url(band)
with self.reader(url, tms=self.tms, **self.reader_options) as src: # type: ignore
data = src.point(*args, **kwargs)
if data.metadata:
data.metadata = {band: data.metadata}
data.band_names = [band] # use `band` as name instead of band index
return data
data = multi_points(bands, _reader, lon, lat, **kwargs)
if expression:
return data.apply_expression(expression)
return data
def feature(
self,
shape: Dict,
bands: Union[Sequence[str], str] = None,
expression: Optional[str] = None,
**kwargs: Any,
) -> ImageData:
"""Read and merge parts defined by geojson feature from multiple bands.
Args:
shape (dict): Valid GeoJSON feature.
bands (sequence of str or str, optional): bands to fetch info from.
expression (str, optional): rio-tiler expression for the band list (e.g. b1/b2+b3).
kwargs (optional): Options to forward to the `self.reader.feature` method.
Returns:
rio_tiler.models.ImageData: ImageData instance with data, mask and tile spatial info.
"""
if isinstance(bands, str):
bands = (bands,)
if bands and expression:
warnings.warn(
"Both expression and bands passed; expression will overwrite bands parameter.",
ExpressionMixingWarning,
)
if expression:
bands = self.parse_expression(expression)
if not bands:
raise MissingBands(
"bands must be passed either via expression or bands options."
)
def _reader(band: str, *args: Any, **kwargs: Any) -> ImageData:
url = self._get_band_url(band)
with self.reader(url, tms=self.tms, **self.reader_options) as src: # type: ignore
data = src.feature(*args, **kwargs)
if data.metadata:
data.metadata = {band: data.metadata}
data.band_names = [band] # use `band` as name instead of band index
return data
img = multi_arrays(bands, _reader, shape, **kwargs)
if expression:
return img.apply_expression(expression)
return img | /rio_tiler-6.0.2.tar.gz/rio_tiler-6.0.2/rio_tiler/io/base.py | 0.918176 | 0.349644 | base.py | pypi |
from inspect import isclass
from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union, cast
from rasterio.crs import CRS
from rio_tiler.constants import MAX_THREADS
from rio_tiler.errors import (
EmptyMosaicError,
InvalidMosaicMethod,
PointOutsideBounds,
TileOutsideBounds,
)
from rio_tiler.models import ImageData, PointData
from rio_tiler.mosaic.methods.base import MosaicMethodBase
from rio_tiler.mosaic.methods.defaults import FirstMethod
from rio_tiler.tasks import create_tasks, filter_tasks
from rio_tiler.types import BBox
from rio_tiler.utils import _chunks
def mosaic_reader(
mosaic_assets: Sequence,
reader: Callable[..., ImageData],
*args: Any,
pixel_selection: Union[Type[MosaicMethodBase], MosaicMethodBase] = FirstMethod,
chunk_size: Optional[int] = None,
threads: int = MAX_THREADS,
allowed_exceptions: Tuple = (TileOutsideBounds,),
**kwargs,
) -> Tuple[ImageData, List]:
"""Merge multiple assets.
Args:
mosaic_assets (sequence): List of assets.
reader (callable): Reader function. The function MUST take `(asset, *args, **kwargs)` as arguments, and MUST return an ImageData.
args (Any): Argument to forward to the reader function.
pixel_selection (MosaicMethod, optional): Instance of MosaicMethodBase class. Defaults to `rio_tiler.mosaic.methods.defaults.FirstMethod`.
chunk_size (int, optional): Control the number of asset to process per loop.
threads (int, optional): Number of threads to use. If <= 1, runs single threaded without an event loop. By default reads from the MAX_THREADS environment variable, and if not found defaults to multiprocessing.cpu_count() * 5.
allowed_exceptions (tuple, optional): List of exceptions which will be ignored. Note: `TileOutsideBounds` is likely to be raised and should be included in the allowed_exceptions. Defaults to `(TileOutsideBounds, )`.
kwargs (optional): Reader callable's keywords options.
Returns:
tuple: ImageData and assets (list).
Examples:
>>> def reader(asset: str, *args, **kwargs) -> ImageData:
with Reader(asset) as src:
return src.tile(*args, **kwargs)
x, y, z = 10, 10, 4
img = mosaic_reader(["cog.tif", "cog2.tif"], reader, x, y, z)
>>> def reader(asset: str, *args, **kwargs) -> ImageData:
with Reader(asset) as src:
return src.preview(*args, **kwargs)
img = mosaic_reader(["cog.tif", "cog2.tif"], reader)
"""
if isclass(pixel_selection):
pixel_selection = cast(Type[MosaicMethodBase], pixel_selection)
if issubclass(pixel_selection, MosaicMethodBase):
pixel_selection = pixel_selection()
if not isinstance(pixel_selection, MosaicMethodBase):
raise InvalidMosaicMethod(
"Mosaic filling algorithm should be an instance of "
"'rio_tiler.mosaic.methods.base.MosaicMethodBase'"
)
if not chunk_size:
chunk_size = threads if threads > 1 else len(mosaic_assets)
assets_used: List = []
crs: Optional[CRS]
bounds: Optional[BBox]
band_names: List[str]
for chunks in _chunks(mosaic_assets, chunk_size):
tasks = create_tasks(reader, chunks, threads, *args, **kwargs)
for img, asset in filter_tasks(
tasks,
allowed_exceptions=allowed_exceptions,
):
crs = img.crs
bounds = img.bounds
band_names = img.band_names
pixel_selection.cutline_mask = img.cutline_mask
assets_used.append(asset)
pixel_selection.feed(img.array)
if pixel_selection.is_done and pixel_selection.data is not None:
return (
ImageData(
pixel_selection.data,
assets=assets_used,
crs=crs,
bounds=bounds,
band_names=band_names,
),
assets_used,
)
if pixel_selection.data is None:
raise EmptyMosaicError("Method returned an empty array")
return (
ImageData(
pixel_selection.data,
assets=assets_used,
crs=crs,
bounds=bounds,
band_names=band_names,
),
assets_used,
)
def mosaic_point_reader(
mosaic_assets: Sequence,
reader: Callable[..., PointData],
*args: Any,
pixel_selection: Union[Type[MosaicMethodBase], MosaicMethodBase] = FirstMethod,
chunk_size: Optional[int] = None,
threads: int = MAX_THREADS,
allowed_exceptions: Tuple = (PointOutsideBounds,),
**kwargs,
) -> Tuple[PointData, List]:
"""Merge multiple assets.
Args:
mosaic_assets (sequence): List of assets.
reader (callable): Reader function. The function MUST take `(asset, *args, **kwargs)` as arguments, and MUST return a PointData object.
args (Any): Argument to forward to the reader function.
pixel_selection (MosaicMethod, optional): Instance of MosaicMethodBase class. Defaults to `rio_tiler.mosaic.methods.defaults.FirstMethod`.
chunk_size (int, optional): Control the number of asset to process per loop.
threads (int, optional): Number of threads to use. If <= 1, runs single threaded without an event loop. By default reads from the MAX_THREADS environment variable, and if not found defaults to multiprocessing.cpu_count() * 5.
allowed_exceptions (tuple, optional): List of exceptions which will be ignored. Note: `PointOutsideBounds` is likely to be raised and should be included in the allowed_exceptions. Defaults to `(TileOutsideBounds, )`.
kwargs (optional): Reader callable's keywords options.
Returns:
tuple: PointData and assets (list).
Examples:
>>> def reader(asset: str, *args, **kwargs) -> PointData:
with Reader(asset) as src:
return src.point(*args, **kwargs)
pt = mosaic_point_reader(["cog.tif", "cog2.tif"], reader, 0, 0)
"""
if isclass(pixel_selection):
pixel_selection = cast(Type[MosaicMethodBase], pixel_selection)
if issubclass(pixel_selection, MosaicMethodBase):
pixel_selection = pixel_selection()
if not isinstance(pixel_selection, MosaicMethodBase):
raise InvalidMosaicMethod(
"Mosaic filling algorithm should be an instance of "
"'rio_tiler.mosaic.methods.base.MosaicMethodBase'"
)
if not chunk_size:
chunk_size = threads if threads > 1 else len(mosaic_assets)
assets_used: List = []
crs: Optional[CRS]
coordinates: Optional[Tuple[float, float]]
band_names: List[str]
for chunks in _chunks(mosaic_assets, chunk_size):
tasks = create_tasks(reader, chunks, threads, *args, **kwargs)
for pt, asset in filter_tasks(
tasks,
allowed_exceptions=allowed_exceptions,
):
crs = pt.crs
coordinates = pt.coordinates
band_names = pt.band_names
assets_used.append(asset)
pixel_selection.feed(pt.array)
if pixel_selection.is_done and pixel_selection.data is not None:
return (
PointData(
pixel_selection.data,
assets=assets_used,
crs=crs,
coordinates=coordinates,
band_names=band_names,
),
assets_used,
)
if pixel_selection.data is None:
raise EmptyMosaicError("Method returned an empty array")
return (
PointData(
pixel_selection.data,
assets=assets_used,
crs=crs,
coordinates=coordinates,
band_names=band_names,
),
assets_used,
) | /rio_tiler-6.0.2.tar.gz/rio_tiler-6.0.2/rio_tiler/mosaic/reader.py | 0.942055 | 0.366108 | reader.py | pypi |
from dataclasses import dataclass, field
from typing import List, Optional
import numpy
from rio_tiler.mosaic.methods.base import MosaicMethodBase
@dataclass
class FirstMethod(MosaicMethodBase):
"""Feed the mosaic array with the first pixel available."""
exit_when_filled: bool = field(default=True, init=False)
def feed(self, array: Optional[numpy.ma.MaskedArray]):
"""Add data to the mosaic array."""
if self.mosaic is None:
self.mosaic = array
else:
pidex = self.mosaic.mask & ~array.mask
mask = numpy.where(pidex, array.mask, self.mosaic.mask)
self.mosaic = numpy.ma.where(pidex, array, self.mosaic)
self.mosaic.mask = mask
@dataclass
class HighestMethod(MosaicMethodBase):
"""Feed the mosaic array with the highest pixel values."""
def feed(self, array: Optional[numpy.ma.MaskedArray]):
"""Add data to the mosaic array."""
if self.mosaic is None:
self.mosaic = array
else:
pidex = (
numpy.bitwise_and(array.data > self.mosaic.data, ~array.mask)
| self.mosaic.mask
)
mask = numpy.where(pidex, array.mask, self.mosaic.mask)
self.mosaic = numpy.ma.where(pidex, array, self.mosaic)
self.mosaic.mask = mask
@dataclass
class LowestMethod(MosaicMethodBase):
"""Feed the mosaic array with the lowest pixel values."""
def feed(self, array: Optional[numpy.ma.MaskedArray]):
"""Add data to the mosaic array."""
if self.mosaic is None:
self.mosaic = array
else:
pidex = (
numpy.bitwise_and(array.data < self.mosaic.data, ~array.mask)
| self.mosaic.mask
)
mask = numpy.where(pidex, array.mask, self.mosaic.mask)
self.mosaic = numpy.ma.where(pidex, array, self.mosaic)
self.mosaic.mask = mask
@dataclass
class MeanMethod(MosaicMethodBase):
"""Stack the arrays and return the Mean pixel value."""
enforce_data_type: bool = True
stack: List[numpy.ma.MaskedArray] = field(default_factory=list, init=False)
@property
def data(self) -> Optional[numpy.ma.MaskedArray]:
"""Return Mean of the data stack."""
if self.stack:
array = numpy.ma.mean(numpy.ma.stack(self.stack, axis=0), axis=0)
if self.enforce_data_type:
array = array.astype(self.stack[0].dtype)
return array
return None
def feed(self, array: numpy.ma.MaskedArray):
"""Add array to the stack."""
self.stack.append(array)
@dataclass
class MedianMethod(MosaicMethodBase):
"""Stack the arrays and return the Median pixel value."""
enforce_data_type: bool = True
stack: List[numpy.ma.MaskedArray] = field(default_factory=list, init=False)
@property
def data(self) -> Optional[numpy.ma.MaskedArray]:
"""Return Median of the data stack."""
if self.stack:
array = numpy.ma.median(numpy.ma.stack(self.stack, axis=0), axis=0)
if self.enforce_data_type:
array = array.astype(self.stack[0].dtype)
return array
return None
def feed(self, array: Optional[numpy.ma.MaskedArray]):
"""Add array to the stack."""
self.stack.append(array)
@dataclass
class StdevMethod(MosaicMethodBase):
"""Stack the arrays and return the Standard Deviation value."""
stack: List[numpy.ma.MaskedArray] = field(default_factory=list, init=False)
@property
def data(self) -> Optional[numpy.ma.MaskedArray]:
"""Return STDDEV of the data stack."""
if self.stack:
return numpy.ma.std(numpy.ma.stack(self.stack, axis=0), axis=0)
return None
def feed(self, array: Optional[numpy.ma.MaskedArray]):
"""Add array to the stack."""
self.stack.append(array)
@dataclass
class LastBandHighMethod(MosaicMethodBase):
"""Feed the mosaic array using the last band as decision factor (highest value)."""
@property
def data(self) -> Optional[numpy.ma.MaskedArray]:
"""Return data."""
if self.mosaic is not None:
return self.mosaic[:-1].copy()
return None
def feed(self, array: Optional[numpy.ma.MaskedArray]):
"""Add data to the mosaic array."""
if self.mosaic is None:
self.mosaic = array
else:
pidex = (
numpy.bitwise_and(array.data[-1] > self.mosaic.data[-1], ~array.mask)
| self.mosaic.mask
)
mask = numpy.where(pidex, array.mask, self.mosaic.mask)
self.mosaic = numpy.ma.where(pidex, array, self.mosaic)
self.mosaic.mask = mask
@dataclass
class LastBandLowMethod(MosaicMethodBase):
"""Feed the mosaic array using the last band as decision factor (lowest value)."""
@property
def data(self) -> Optional[numpy.ma.MaskedArray]:
"""Return data."""
if self.mosaic is not None:
return self.mosaic[:-1].copy()
return None
def feed(self, array: Optional[numpy.ma.MaskedArray]):
"""Add data to the mosaic array."""
if self.mosaic is None:
self.mosaic = array
else:
pidex = (
numpy.bitwise_and(array.data[-1] < self.mosaic.data[-1], ~array.mask)
| self.mosaic.mask
)
mask = numpy.where(pidex, array.mask, self.mosaic.mask)
self.mosaic = numpy.ma.where(pidex, array, self.mosaic)
self.mosaic.mask = mask | /rio_tiler-6.0.2.tar.gz/rio_tiler-6.0.2/rio_tiler/mosaic/methods/defaults.py | 0.951667 | 0.622258 | defaults.py | pypi |
import re
import datetime
import numpy as np
def parse_utc_string(collected_date, collected_time_utc):
"""
Given a string in the format:
YYYY-MM-DD HH:MM:SS.SSSSSSSSZ
Parse and convert into a datetime object
Fractional seconds are ignored
Parameters
-----------
collected_date_utc: str
Format: YYYY-MM-DD
collected_time: str
Format: HH:MM:SS.SSSSSSSSZ
Returns
--------
datetime object
parsed scene center time
"""
utcstr = collected_date + ' ' + collected_time_utc
if not re.match(r'\d{4}\-\d{2}\-\d{2}\ \d{2}\:\d{2}\:\d{2}\.\d+Z',
utcstr):
raise ValueError("%s is an invalid utc time" % utcstr)
return datetime.datetime.strptime(
utcstr.split(".")[0],
"%Y-%m-%d %H:%M:%S")
def time_to_dec_hour(parsedtime):
"""
Calculate the decimal hour from a datetime object
Parameters
-----------
parsedtime: datetime object
Returns
--------
decimal hour: float
time in decimal hours
"""
return (parsedtime.hour +
(parsedtime.minute / 60.0) +
(parsedtime.second / 60.0 ** 2)
)
def calculate_declination(d):
"""
Calculate the declination of the sun in radians based on a given day.
As reference +23.26 degrees at the northern summer solstice, -23.26
degrees at the southern summer solstice.
See: https://en.wikipedia.org/wiki/Position_of_the_Sun#Calculations
Parameters
-----------
d: int
days from midnight on January 1st
Returns
--------
declination in radians: float
the declination on day d
"""
return np.arcsin(
np.sin(np.deg2rad(23.45)) *
np.sin(np.deg2rad(360. / 365.) *
(d - 81))
)
def solar_angle(day, utc_hour, longitude):
"""
Given a day, utc decimal hour, and longitudes, compute the solar angle
for these longitudes
Parameters
-----------
day: int
days of the year with jan 1 as day = 1
utc_hour: float
decimal hour of the day in utc time to compute solar angle for
longitude: ndarray or float
longitude of the point(s) to compute solar angle for
Returns
--------
solar angle in degrees for these longitudes
"""
localtime = (longitude / 180.0) * 12 + utc_hour
lstm = 15 * (localtime - utc_hour)
B = np.deg2rad((360. / 365.) * (day - 81))
eot = (9.87 *
np.sin(2 * B) -
7.53 * np.cos(B) -
1.5 * np.sin(B))
return 15 * (localtime +
(4 * (longitude - lstm) + eot) / 60.0 - 12)
def _calculate_sun_elevation(longitude, latitude, declination, day, utc_hour):
"""
Calculates the solar elevation angle
https://en.wikipedia.org/wiki/Solar_zenith_angle
Parameters
-----------
longitude: ndarray or float
longitudes of the point(s) to compute solar angle for
latitude: ndarray or float
latitudes of the point(s) to compute solar angle for
declination: float
declination of the sun in radians
day: int
days of the year with jan 1 as day = 1
utc_hour: float
decimal hour from a datetime object
Returns
--------
the solar elevation angle in degrees
"""
hour_angle = np.deg2rad(solar_angle(day, utc_hour, longitude))
latitude = np.deg2rad(latitude)
return np.rad2deg(np.arcsin(
np.sin(declination) *
np.sin(latitude) +
np.cos(declination) *
np.cos(latitude) *
np.cos(hour_angle)
))
def _create_lnglats(shape, bbox):
"""
Creates a (lng, lat) array tuple with cells that respectively
represent a longitude and a latitude at that location
Parameters
-----------
shape: tuple
the shape of the arrays to create
bbox: tuple or list
the bounds of the arrays to create in [w, s, e, n]
Returns
--------
(lngs, lats): tuple of (rows, cols) shape ndarrays
"""
rows, cols = shape
w, s, e, n = bbox
xCell = (e - w) / float(cols)
yCell = (n - s) / float(rows)
lat, lng = np.indices(shape, dtype=np.float32)
return ((lng * xCell) + w + (xCell / 2.0),
(np.flipud(lat) * yCell) + s + (yCell / 2.0))
def sun_elevation(bounds, shape, date_collected, time_collected_utc):
"""
Given a raster's bounds + dimensions, calculate the
sun elevation angle in degrees for each input pixel
based on metadata from a Landsat MTL file
Parameters
-----------
bounds: BoundingBox
bounding box of the input raster
shape: tuple
tuple of (rows, cols) or (depth, rows, cols) for input raster
collected_date_utc: str
Format: YYYY-MM-DD
collected_time: str
Format: HH:MM:SS.SSSSSSSSZ
Returns
--------
ndarray
ndarray with shape = (rows, cols) with sun elevation
in degrees calculated for each pixel
"""
utc_time = parse_utc_string(date_collected, time_collected_utc)
if len(shape) == 3:
_, rows, cols = shape
else:
rows, cols = shape
lng, lat = _create_lnglats((rows, cols),
list(bounds))
decimal_hour = time_to_dec_hour(utc_time)
declination = calculate_declination(utc_time.timetuple().tm_yday)
return _calculate_sun_elevation(lng, lat, declination,
utc_time.timetuple().tm_yday,
decimal_hour) | /rio-toa-0.3.0.tar.gz/rio-toa-0.3.0/rio_toa/sun_utils.py | 0.91052 | 0.523603 | sun_utils.py | pypi |
import numpy as np
import rasterio
from rasterio.coords import BoundingBox
from rasterio import warp
import riomucho
from rio_toa import toa_utils
from rio_toa import sun_utils
def reflectance(img, MR, AR, E, src_nodata=0):
"""Calculate top of atmosphere reflectance of Landsat 8
as outlined here: http://landsat.usgs.gov/Landsat8_Using_Product.php
R_raw = MR * Q + AR
R = R_raw / cos(Z) = R_raw / sin(E)
Z = 90 - E (in degrees)
where:
R_raw = TOA planetary reflectance, without correction for solar angle.
R = TOA reflectance with a correction for the sun angle.
MR = Band-specific multiplicative rescaling factor from the metadata
(REFLECTANCE_MULT_BAND_x, where x is the band number)
AR = Band-specific additive rescaling factor from the metadata
(REFLECTANCE_ADD_BAND_x, where x is the band number)
Q = Quantized and calibrated standard product pixel values (DN)
E = Local sun elevation angle. The scene center sun elevation angle
in degrees is provided in the metadata (SUN_ELEVATION).
Z = Local solar zenith angle (same angle as E, but measured from the
zenith instead of from the horizon).
Parameters
-----------
img: ndarray
array of input pixels of shape (rows, cols) or (rows, cols, depth)
MR: float or list of floats
multiplicative rescaling factor from scene metadata
AR: float or list of floats
additive rescaling factor from scene metadata
E: float or numpy array of floats
local sun elevation angle in degrees
Returns
--------
ndarray:
float32 ndarray with shape == input shape
"""
if np.any(E < 0.0):
raise ValueError("Sun elevation must be nonnegative "
"(sun must be above horizon for entire scene)")
input_shape = img.shape
if len(input_shape) > 2:
img = np.rollaxis(img, 0, len(input_shape))
rf = ((MR * img.astype(np.float32)) + AR) / np.sin(np.deg2rad(E))
if src_nodata is not None:
rf[img == src_nodata] = 0.0
if len(input_shape) > 2:
if np.rollaxis(rf, len(input_shape) - 1, 0).shape != input_shape:
raise ValueError(
"Output shape %s is not equal to input shape %s"
% (rf.shape, input_shape))
else:
return np.rollaxis(rf, len(input_shape) - 1, 0)
else:
return rf
def _reflectance_worker(open_files, window, ij, g_args):
"""rio mucho worker for reflectance. It reads input
files and perform reflectance calculations on each window.
Parameters
------------
open_files: list of rasterio open files
window: tuples
g_args: dictionary
Returns
---------
out: None
Output is written to dst_path
"""
data = riomucho.utils.array_stack([
src.read(window=window).astype(np.float32)
for src in open_files
])
depth, rows, cols = data.shape
if g_args['pixel_sunangle']:
bbox = BoundingBox(
*warp.transform_bounds(
g_args['src_crs'],
{'init': u'epsg:4326'},
*open_files[0].window_bounds(window)))
E = sun_utils.sun_elevation(
bbox,
(rows, cols),
g_args['date_collected'],
g_args['time_collected_utc']).reshape(rows, cols, 1)
else:
# We're doing whole-scene (instead of per-pixel) sunangle:
E = np.array([g_args['E'] for i in range(depth)])
output = toa_utils.rescale(
reflectance(
data,
g_args['M'],
g_args['A'],
E,
g_args['src_nodata']),
g_args['rescale_factor'],
g_args['dst_dtype'],
clip=g_args['clip'])
return output
def calculate_landsat_reflectance(src_paths, src_mtl, dst_path, rescale_factor,
creation_options, bands, dst_dtype,
processes, pixel_sunangle, clip=True):
"""
Parameters
------------
src_paths: list of strings
src_mtl: string
dst_path: string
rescale_factor: float
creation_options: dict
bands: list
dst_dtype: string
processes: integer
pixel_sunangle: boolean
clip: boolean
Returns
---------
None
Output is written to dst_path
"""
mtl = toa_utils._load_mtl(src_mtl)
metadata = mtl['L1_METADATA_FILE']
M = [metadata['RADIOMETRIC_RESCALING']
['REFLECTANCE_MULT_BAND_{}'.format(b)]
for b in bands]
A = [metadata['RADIOMETRIC_RESCALING']
['REFLECTANCE_ADD_BAND_{}'.format(b)]
for b in bands]
E = metadata['IMAGE_ATTRIBUTES']['SUN_ELEVATION']
date_collected = metadata['PRODUCT_METADATA']['DATE_ACQUIRED']
time_collected_utc = metadata['PRODUCT_METADATA']['SCENE_CENTER_TIME']
rescale_factor = toa_utils.normalize_scale(rescale_factor, dst_dtype)
dst_dtype = np.__dict__[dst_dtype]
for src_path in src_paths:
with rasterio.open(src_path) as src:
dst_profile = src.profile.copy()
src_nodata = src.nodata
for co in creation_options:
dst_profile[co] = creation_options[co]
dst_profile['dtype'] = dst_dtype
global_args = {
'A': A,
'M': M,
'E': E,
'src_nodata': src_nodata,
'src_crs': dst_profile['crs'],
'dst_dtype': dst_dtype,
'rescale_factor': rescale_factor,
'clip': clip,
'pixel_sunangle': pixel_sunangle,
'date_collected': date_collected,
'time_collected_utc': time_collected_utc,
'bands': len(bands)
}
dst_profile.update(count=len(bands))
if len(bands) == 3:
dst_profile.update(photometric='rgb')
else:
dst_profile.update(photometric='minisblack')
with riomucho.RioMucho(list(src_paths),
dst_path,
_reflectance_worker,
options=dst_profile,
global_args=global_args,
mode='manual_read') as rm:
rm.run(processes) | /rio-toa-0.3.0.tar.gz/rio-toa-0.3.0/rio_toa/reflectance.py | 0.828558 | 0.553686 | reflectance.py | pypi |
import numpy as np
import rasterio
import riomucho
from rio_toa import toa_utils
def radiance(img, ML, AL, src_nodata=0):
"""Calculate top of atmosphere radiance of Landsat 8
as outlined here: http://landsat.usgs.gov/Landsat8_Using_Product.php
L = ML * Q + AL
where:
L = TOA spectral radiance (Watts / (m2 * srad * mm))
ML = Band-specific multiplicative rescaling factor from the metadata
(RADIANCE_MULT_BAND_x, where x is the band number)
AL = Band-specific additive rescaling factor from the metadata
(RADIANCE_ADD_BAND_x, where x is the band number)
Q = Quantized and calibrated standard product pixel values (DN)
(ndarray img)
Parameters
-----------
img: ndarray
array of input pixels
ML: float
multiplicative rescaling factor from scene metadata
AL: float
additive rescaling factor from scene metadata
Returns
--------
ndarray:
float32 ndarray with shape == input shape
"""
rs = ML * img.astype(np.float32) + AL
if src_nodata is not None:
rs[img == src_nodata] = 0.0
return rs
def _radiance_worker(data, window, ij, g_args):
"""
rio mucho worker for radiance
TODO: integrate rescaling functionality for
different output datatypes
"""
output = toa_utils.rescale(
radiance(
data[0],
g_args['M'],
g_args['A'],
g_args['src_nodata']),
g_args['rescale_factor'],
g_args['dst_dtype'],
clip=g_args['clip'])
return output
def calculate_landsat_radiance(src_path, src_mtl, dst_path, rescale_factor,
creation_options, band, dst_dtype, processes,
clip=True):
"""
Parameters
------------
src_path: strings
src_mtl: string
dst_path: string
rescale_factor: float
creation_options: dict
bands: list
dst_dtype: string
processes: integer
pixel_sunangle: boolean
clip: boolean
Returns
---------
None
Output is written to dst_path
"""
mtl = toa_utils._load_mtl(src_mtl)
M = toa_utils._load_mtl_key(mtl,
['L1_METADATA_FILE',
'RADIOMETRIC_RESCALING',
'RADIANCE_MULT_BAND_'],
band)
A = toa_utils._load_mtl_key(mtl,
['L1_METADATA_FILE',
'RADIOMETRIC_RESCALING',
'RADIANCE_ADD_BAND_'],
band)
rescale_factor = toa_utils.normalize_scale(rescale_factor, dst_dtype)
dst_dtype = np.__dict__[dst_dtype]
with rasterio.open(src_path) as src:
dst_profile = src.profile.copy()
src_nodata = src.nodata
for co in creation_options:
dst_profile[co] = creation_options[co]
dst_profile['dtype'] = dst_dtype
global_args = {
'A': A,
'M': M,
'src_nodata': src_nodata,
'rescale_factor': rescale_factor,
'clip': clip,
'dst_dtype': dst_dtype
}
with riomucho.RioMucho([src_path],
dst_path,
_radiance_worker,
options=dst_profile,
global_args=global_args) as rm:
rm.run(processes) | /rio-toa-0.3.0.tar.gz/rio-toa-0.3.0/rio_toa/radiance.py | 0.844953 | 0.514522 | radiance.py | pypi |
import json
import numpy as np
import rasterio as rio
import collections
from rasterio.coords import BoundingBox
import riomucho
from rasterio import warp
from rio_toa import radiance
from rio_toa import toa_utils
from rio_toa import sun_utils
def brightness_temp(img, ML, AL, K1, K2, src_nodata=0):
"""Calculate brightness temperature of Landsat 8
as outlined here: http://landsat.usgs.gov/Landsat8_Using_Product.php
T = K2 / np.log((K1 / L) + 1)
and
L = ML * Q + AL
where:
T = At-satellite brightness temperature (degrees kelvin)
L = TOA spectral radiance (Watts / (m2 * srad * mm))
ML = Band-specific multiplicative rescaling factor from the metadata
(RADIANCE_MULT_BAND_x, where x is the band number)
AL = Band-specific additive rescaling factor from the metadata
(RADIANCE_ADD_BAND_x, where x is the band number)
Q = Quantized and calibrated standard product pixel values (DN)
(ndarray img)
K1 = Band-specific thermal conversion constant from the metadata
(K1_CONSTANT_BAND_x, where x is the thermal band number)
K2 = Band-specific thermal conversion constant from the metadata
(K1_CONSTANT_BAND_x, where x is the thermal band number)
Parameters
-----------
img: ndarray
array of input pixels
ML: float
multiplicative rescaling factor from scene metadata
AL: float
additive rescaling factor from scene metadata
K1: float
thermal conversion constant from scene metadata
K2: float
thermal conversion constant from scene metadata
Returns
--------
ndarray:
float32 ndarray with shape == input shape
"""
L = radiance.radiance(img, ML, AL, src_nodata=0)
L[img == src_nodata] = np.NaN
T = K2 / np.log((K1 / L) + 1)
return T
def _brightness_temp_worker(data, window, ij, g_args):
"""rio mucho worker for brightness temperature. It reads input
files and perform reflectance calculations on each window.
Parameters
------------
open_files: list of rasterio open files
window: tuples
g_args: dictionary
Returns
---------
out: None
Output is written to dst_path
"""
output = toa_utils.temp_rescale(
brightness_temp(
data[0],
g_args['M'],
g_args['A'],
g_args['K1'],
g_args['K2'],
g_args['src_nodata']),
g_args['temp_scale'])
return output.astype(g_args['dst_dtype'])
def calculate_landsat_brightness_temperature(
src_path, src_mtl, dst_path, temp_scale,
creation_options, band, dst_dtype, processes):
"""Parameters
------------
src_path: list
list of src_paths(strings)
src_mtl: string
mtl file path
dst_path: string
destination file path
rescale_factor: float [default] float(55000.0/2**16)
rescale post-TOA tifs to 55,000 or to full 16-bit
creation_options: dictionary
rio.options.creation_options
band: list
list of integers
dst_dtype: strings [default] uint16
destination data dtype
Returns
---------
out: None
Output is written to dst_path
"""
mtl = toa_utils._load_mtl(src_mtl)
M = toa_utils._load_mtl_key(mtl,
['L1_METADATA_FILE',
'RADIOMETRIC_RESCALING',
'RADIANCE_MULT_BAND_'],
band)
A = toa_utils._load_mtl_key(mtl,
['L1_METADATA_FILE',
'RADIOMETRIC_RESCALING',
'RADIANCE_ADD_BAND_'],
band)
K1 = toa_utils._load_mtl_key(mtl,
['L1_METADATA_FILE',
'TIRS_THERMAL_CONSTANTS',
'K1_CONSTANT_BAND_'],
band)
K2 = toa_utils._load_mtl_key(mtl,
['L1_METADATA_FILE',
'TIRS_THERMAL_CONSTANTS',
'K2_CONSTANT_BAND_'],
band)
dst_dtype = np.__dict__[dst_dtype]
with rio.open(src_path) as src:
dst_profile = src.profile.copy()
src_nodata = src.nodata
for co in creation_options:
dst_profile[co] = creation_options[co]
dst_profile['dtype'] = dst_dtype
global_args = {
'M': M,
'A': A,
'K1': K1,
'K2': K2,
'src_nodata': 0,
'temp_scale': temp_scale,
'dst_dtype': dst_dtype
}
with riomucho.RioMucho([src_path],
dst_path,
_brightness_temp_worker,
options=dst_profile,
global_args=global_args) as rm:
rm.run(processes) | /rio-toa-0.3.0.tar.gz/rio-toa-0.3.0/rio_toa/brightness_temp.py | 0.724578 | 0.427217 | brightness_temp.py | pypi |
import numpy as np
from .utils import epsilon
from .colorspace import saturate_rgb
# Color manipulation functions
def sigmoidal(arr, contrast, bias):
r"""
Sigmoidal contrast is type of contrast control that
adjusts the contrast without saturating highlights or shadows.
It allows control over two factors:
the contrast range from light to dark, and where the middle value
of the mid-tones falls. The result is a non-linear and smooth
contrast change.
Parameters
----------
arr : ndarray, float, 0 .. 1
Array of color values to adjust
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5)
Notes
----------
Sigmoidal contrast is based on the sigmoidal transfer function:
.. math:: g(u) = ( 1/(1 + e^{- \alpha * u + \beta)})
This sigmoid function is scaled so that the output is bound by
the interval [0, 1].
.. math:: ( 1/(1 + e^(\beta * (\alpha - u))) - 1/(1 + e^(\beta * \alpha)))/
( 1/(1 + e^(\beta*(\alpha - 1))) - 1/(1 + e^(\beta * \alpha)) )
Where :math: `\alpha` is the threshold level, and :math: `\beta` the
contrast factor to be applied.
References
----------
.. [CT] Hany Farid "Fundamentals of Image Processing"
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf
"""
if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):
raise ValueError("Input array must have float values between 0 and 1")
if (bias > 1.0 + epsilon) or (bias < 0 - epsilon):
raise ValueError("bias must be a scalar float between 0 and 1")
alpha, beta = bias, contrast
# We use the names a and b to match documentation.
if alpha == 0:
alpha = epsilon
if beta == 0:
return arr
np.seterr(divide="ignore", invalid="ignore")
if beta > 0:
numerator = 1 / (1 + np.exp(beta * (alpha - arr))) - 1 / (
1 + np.exp(beta * alpha)
)
denominator = 1 / (1 + np.exp(beta * (alpha - 1))) - 1 / (
1 + np.exp(beta * alpha)
)
output = numerator / denominator
else:
# Inverse sigmoidal function:
# todo: account for 0s
# todo: formatting ;)
output = (
(beta * alpha)
- np.log(
(
1
/ (
(arr / (1 + np.exp(beta * alpha - beta)))
- (arr / (1 + np.exp(beta * alpha)))
+ (1 / (1 + np.exp(beta * alpha)))
)
)
- 1
)
) / beta
return output
def gamma(arr, g):
r"""
Gamma correction is a nonlinear operation that
adjusts the image's channel values pixel-by-pixel according
to a power-law:
.. math:: pixel_{out} = pixel_{in} ^ {\gamma}
Setting gamma (:math:`\gamma`) to be less than 1.0 darkens the image and
setting gamma to be greater than 1.0 lightens it.
Parameters
----------
gamma (:math:`\gamma`): float
Reasonable values range from 0.8 to 2.4.
"""
if (arr.max() > 1.0 + epsilon) or (arr.min() < 0 - epsilon):
raise ValueError("Input array must have float values between 0 and 1")
if g <= 0 or np.isnan(g):
raise ValueError("gamma must be greater than 0")
return arr ** (1.0 / g)
def saturation(arr, proportion):
"""Apply saturation to an RGB array (in LCH color space)
Multiply saturation by proportion in LCH color space to adjust the intensity
of color in the image. As saturation increases, colors appear
more "pure." As saturation decreases, colors appear more "washed-out."
Parameters
----------
arr: ndarray with shape (3, ..., ...)
proportion: number
"""
if arr.shape[0] != 3:
raise ValueError("saturation requires a 3-band array")
return saturate_rgb(arr, proportion)
def simple_atmo_opstring(haze, contrast, bias):
"""Make a simple atmospheric correction formula."""
gamma_b = 1 - haze
gamma_g = 1 - (haze / 3.0)
ops = (
"gamma g {gamma_g}, " "gamma b {gamma_b}, " "sigmoidal rgb {contrast} {bias}"
).format(gamma_g=gamma_g, gamma_b=gamma_b, contrast=contrast, bias=bias)
return ops
def simple_atmo(rgb, haze, contrast, bias):
"""
A simple, static (non-adaptive) atmospheric correction function.
Parameters
----------
haze: float
Amount of haze to adjust for. For example, 0.03
contrast : integer
Enhances the intensity differences between the lighter and darker
elements of the image. For example, 0 is none, 3 is typical and
20 is a lot.
bias : float, between 0 and 1
Threshold level for the contrast function to center on
(typically centered at 0.5 or 50%)
"""
gamma_b = 1 - haze
gamma_g = 1 - (haze / 3.0)
arr = np.empty(shape=(3, rgb.shape[1], rgb.shape[2]))
arr[0] = rgb[0]
arr[1] = gamma(rgb[1], gamma_g)
arr[2] = gamma(rgb[2], gamma_b)
output = rgb.copy()
output[0:3] = sigmoidal(arr, contrast, bias)
return output
def _op_factory(func, kwargs, opname, bands, rgb_op=False):
"""create an operation function closure
don't call directly, use parse_operations
returns a function which itself takes and returns ndarrays
"""
def f(arr):
# Avoid mutation by copying
newarr = arr.copy()
if rgb_op:
# apply func to array's first 3 bands, assumed r,g,b
# additional band(s) are untouched
newarr[0:3] = func(newarr[0:3], **kwargs)
else:
# apply func to array band at a time
for b in bands:
newarr[b - 1] = func(arr[b - 1], **kwargs)
return newarr
f.__name__ = str(opname)
return f
def parse_operations(ops_string):
"""Takes a string of operations written with a handy DSL
"OPERATION-NAME BANDS ARG1 ARG2 OPERATION-NAME BANDS ARG"
And returns a list of functions, each of which take and return ndarrays
"""
band_lookup = {"r": 1, "g": 2, "b": 3}
count = len(band_lookup)
opfuncs = {"saturation": saturation, "sigmoidal": sigmoidal, "gamma": gamma}
opkwargs = {
"saturation": ("proportion",),
"sigmoidal": ("contrast", "bias"),
"gamma": ("g",),
}
# Operations that assume RGB colorspace
rgb_ops = ("saturation",)
# split into tokens, commas are optional whitespace
tokens = [x.strip() for x in ops_string.replace(",", "").split(" ")]
operations = []
current = []
for token in tokens:
if token.lower() in opfuncs.keys():
if len(current) > 0:
operations.append(current)
current = []
current.append(token.lower())
if len(current) > 0:
operations.append(current)
result = []
for parts in operations:
opname = parts[0]
bandstr = parts[1]
args = parts[2:]
try:
func = opfuncs[opname]
except KeyError:
raise ValueError("{} is not a valid operation".format(opname))
if opname in rgb_ops:
# ignore bands, assumed to be in rgb
# push 2nd arg into args
args = [bandstr] + args
bands = (1, 2, 3)
else:
# 2nd arg is bands
# parse r,g,b ~= 1,2,3
bands = set()
for bs in bandstr:
try:
band = int(bs)
except ValueError:
band = band_lookup[bs.lower()]
if band < 1 or band > count:
raise ValueError(
"{} BAND must be between 1 and {}".format(opname, count)
)
bands.add(band)
# assume all args are float
args = [float(arg) for arg in args]
kwargs = dict(zip(opkwargs[opname], args))
# Create opperation function
f = _op_factory(
func=func,
kwargs=kwargs,
opname=opname,
bands=bands,
rgb_op=(opname in rgb_ops),
)
result.append(f)
return result | /rio_trend-1.2.1-py3-none-any.whl/rio_trend/operations.py | 0.895597 | 0.844345 | operations.py | pypi |
import numpy as np
import re
# The type to be used for all intermediate math
# operations. Should be a float because values will
# be scaled to the range 0..1 for all work.
math_type = np.float64
epsilon = np.finfo(math_type).eps
def to_math_type(arr):
"""Convert an array from native integer dtype range to 0..1
scaling down linearly
"""
max_int = np.iinfo(arr.dtype).max
return arr.astype(math_type) / max_int
def scale_dtype(arr, dtype):
"""Convert an array from 0..1 to dtype, scaling up linearly
"""
max_int = np.iinfo(dtype).max
return (arr * max_int).astype(dtype)
def magick_to_rio(convert_opts):
"""Translate a limited subset of imagemagick convert commands
to rio color operations
Parameters
----------
convert_opts: String, imagemagick convert options
Returns
-------
operations string, ordered rio color operations
"""
ops = []
bands = None
def set_band(x):
global bands
if x.upper() == "RGB":
x = "RGB"
bands = x.upper()
set_band("RGB")
def append_sig(arg):
global bands
args = list(filter(None, re.split("[,x]+", arg)))
if len(args) == 1:
args.append(0.5)
elif len(args) == 2:
args[1] = float(args[1].replace("%", "")) / 100.0
ops.append("sigmoidal {} {} {}".format(bands, *args))
def append_gamma(arg):
global bands
ops.append("gamma {} {}".format(bands, arg))
def append_sat(arg):
args = list(filter(None, re.split("[,x]+", arg)))
# ignore args[0]
# convert to proportion
prop = float(args[1]) / 100
ops.append("saturation {}".format(prop))
nextf = None
for part in convert_opts.strip().split(" "):
if part == "-channel":
nextf = set_band
elif part == "+channel":
set_band("RGB")
nextf = None
elif part == "-sigmoidal-contrast":
nextf = append_sig
elif part == "-gamma":
nextf = append_gamma
elif part == "-modulate":
nextf = append_sat
else:
if nextf:
nextf(part)
nextf = None
return " ".join(ops) | /rio_trend-1.2.1-py3-none-any.whl/rio_trend/utils.py | 0.848188 | 0.446676 | utils.py | pypi |
from typing import Any, Dict, List, Type
import attr
from braceexpand import braceexpand
from morecantile import TileMatrixSet
from rio_tiler.constants import WEB_MERCATOR_TMS
from rio_tiler.errors import InvalidBandName
from rio_tiler.io import BaseReader, MultiBandReader, MultiBaseReader, Reader
from rio_tiler.types import AssetInfo
@attr.s
class MultiFilesBandsReader(MultiBandReader):
"""Multiple Files as Bands."""
input: Any = attr.ib()
tms: TileMatrixSet = attr.ib(default=WEB_MERCATOR_TMS)
reader_options: Dict = attr.ib(factory=dict)
reader: Type[BaseReader] = attr.ib(default=Reader)
files: List[str] = attr.ib(init=False)
minzoom: int = attr.ib()
maxzoom: int = attr.ib()
@minzoom.default
def _minzoom(self):
return self.tms.minzoom
@maxzoom.default
def _maxzoom(self):
return self.tms.maxzoom
def __attrs_post_init__(self):
"""Fetch Reference band to get the bounds."""
self.files = list(braceexpand(self.input))
self.bands = [f"b{ix + 1}" for ix in range(len(self.files))]
with self.reader(self.files[0], tms=self.tms, **self.reader_options) as cog:
self.bounds = cog.bounds
self.crs = cog.crs
self.minzoom = cog.minzoom
self.maxzoom = cog.maxzoom
def _get_band_url(self, band: str) -> str:
"""Validate band's name and return band's url."""
if band not in self.bands:
raise InvalidBandName(f"{band} is not valid")
index = self.bands.index(band)
return self.files[index]
@attr.s
class MultiFilesAssetsReader(MultiBaseReader):
"""Multiple Files as Assets."""
input: Any = attr.ib()
tms: TileMatrixSet = attr.ib(default=WEB_MERCATOR_TMS)
reader_options: Dict = attr.ib(factory=dict)
reader: Type[BaseReader] = attr.ib(default=Reader)
files: List[str] = attr.ib(init=False)
minzoom: int = attr.ib()
maxzoom: int = attr.ib()
@minzoom.default
def _minzoom(self):
return self.tms.minzoom
@maxzoom.default
def _maxzoom(self):
return self.tms.maxzoom
def __attrs_post_init__(self):
"""Fetch Reference band to get the bounds."""
self.files = list(braceexpand(self.input))
self.assets = [f"asset{ix + 1}" for ix in range(len(self.files))]
with self.reader(self.files[0], tms=self.tms, **self.reader_options) as cog:
self.bounds = cog.bounds
self.crs = cog.crs
self.minzoom = cog.minzoom
self.maxzoom = cog.maxzoom
def _get_asset_info(self, asset: str) -> AssetInfo:
"""Validate band's name and return band's url."""
if asset not in self.assets:
raise InvalidBandName(f"{asset} is not valid")
index = self.assets.index(asset)
return AssetInfo(url=self.files[index]) | /rio_viz-0.11.0.tar.gz/rio_viz-0.11.0/rio_viz/io/reader.py | 0.897757 | 0.239683 | reader.py | pypi |
### Use rio-viz in Jupyter Notebook
```
import time
import httpx
from ipyleaflet import Map, ScaleControl, FullScreenControl, SplitMapControl, TileLayer
from rio_tiler.io import STACReader
from rio_viz.app import Client
# Create rio-viz Client (using server-thread to launch backgroud task)
client = Client(
"https://earth-search.aws.element84.com/v0/collections/sentinel-s2-l2a-cogs/items/S2A_34SGA_20200318_0_L2A",
reader=STACReader,
# By default STACReader min/max zoom is 0->24
# Knowledge of the Sentinel-2 data tell us it's more 8->14
minzoom=8,
maxzoom=14,
)
# Gives some time for the server to setup
time.sleep(1)
# Check that client is running
print("Client is alive: ", client.server.is_alive())
tilejson = httpx.get(
f"{client.endpoint}/tilejson.json",
params = {
"assets": ["B04", "B03", "B02"],
"rescale": "0,10000"
}
).json()
bounds = ((client.bounds[0], client.bounds[2]), (client.bounds[1], client.bounds[3]))
center = ((client.bounds[1] + client.bounds[3]) / 2, (client.bounds[0] + client.bounds[2]) / 2)
layer = TileLayer(
url=tilejson["tiles"][0],
min_zoom=tilejson["minzoom"],
max_zoom=tilejson["maxzoom"],
bounds=bounds,
)
# Make the ipyleaflet map
m = Map(center=center, zoom=client.minzoom)
m.add_layer(layer)
m
left_tilejson = httpx.get(
f"{client.endpoint}/tilejson.json",
params = {
"assets": ["B04", "B03", "B02"],
"rescale": "0,10000"
}
).json()
right_tilejson = httpx.get(
f"{client.endpoint}/tilejson.json",
params = {
"assets": ["B05", "B04", "B03"],
"rescale": "0,10000"
}
).json()
bounds = ((client.bounds[0], client.bounds[2]), (client.bounds[1], client.bounds[3]))
center = ((client.bounds[1] + client.bounds[3]) / 2, (client.bounds[0] + client.bounds[2]) / 2)
left = TileLayer(
url=left_tilejson["tiles"][0],
min_zoom=left_tilejson["minzoom"],
max_zoom=left_tilejson["maxzoom"],
bounds=bounds,
)
right = TileLayer(
url=right_tilejson["tiles"][0],
min_zoom=right_tilejson["minzoom"],
max_zoom=right_tilejson["maxzoom"],
bounds=bounds,
)
# Make the ipyleaflet map
m = Map(center=center, zoom=client.minzoom)
control = SplitMapControl(left_layer=left, right_layer=right)
m.add_control(control)
m.add_control(ScaleControl(position='bottomleft'))
m.add_control(FullScreenControl())
m
```
| /rio_viz-0.11.0.tar.gz/rio_viz-0.11.0/examples/STAC_in_notebook.ipynb | 0.402744 | 0.588121 | STAC_in_notebook.ipynb | pypi |
from rios.core import (
ValidationError,
validate_instrument,
validate_form,
validate_calculationset,
)
from rios.conversion.redcap import RedcapToRios, RedcapFromRios
from rios.conversion.base import structures
from rios.conversion.qualtrics import QualtricsToRios, QualtricsFromRios
from rios.conversion.exception import (
Error,
ConversionFailureError,
QualtricsFormatError,
ConversionValidationError,
RiosRelationshipError,
)
from rios.conversion.utils import JsonReader
__all__ = (
'redcap_to_rios',
'qualtrics_to_rios',
'rios_to_redcap',
'rios_to_qualtrics',
)
class _JsonReaderMetaDataProcessor(JsonReader):
""" Process Qualtrics data dictionary/instrument metadata """
def processor(self, data): # noqa:F821
""" Extract metadata into a dict """
try:
survey_entry = data['SurveyEntry']
metadata = {
'id': survey_entry['SurveyID'],
'title': survey_entry['SurveyName'],
'localization': survey_entry['SurveyLanguage'].lower(),
'description': survey_entry['SurveyDescription'],
}
except Exception as exc:
error = QualtricsFormatError(
'Processor read error:',
str(exc)
)
raise error
else:
return metadata
def _check_rios_relationship(instrument, form, calculationset=None):
instrument = structures.InstrumentReferenceObject(instrument)
if form['instrument'] != instrument:
raise RiosRelationshipError(
'Form and Instrument do not match:',
'{} is not {}'.format(form['instrument'], instrument)
)
if (calculationset and calculationset['instrument'] != instrument):
raise RiosRelationshipError(
'Calculationset and Instrument do not match:',
'{} is not {}'.format(calculationset['instrument'], instrument)
)
def _validate_rios(instrument, form, calculationset=None):
try:
exc_type = "instrument"
validate_instrument(instrument)
exc_type = "form"
validate_form(form, instrument=instrument)
exc_type = "calculationset"
if calculationset and calculationset.get('calculations', False):
validate_calculationset(calculationset, instrument=instrument)
except ValidationError as exc:
raise ConversionValidationError(
'The supplied RIOS ' + exc_type + ' configuration'
' is invalid. Error:',
str(exc)
)
def redcap_to_rios(id, title, description, stream, localization=None,
instrument_version=None, suppress=False):
"""
Converts a REDCap configuration into a RIOS configuration.
:param id: The RIOS specification formatted instrument ID.
:type id: str
:param title: The RIOS specification formatted instrument title.
:type title: str
:param description: The instrument description.
:type description: str
:param stream:
A file stream containing a foriegn data dictionary to convert to the
RIOS specification.
:type stream: File-like object
:param localization:
Localization must be in the form of an RFC5646 Language Tag. Defaults
to 'en' if not supplied.
:type localization: str or None
:param instrument_version:
Version of the instrument. Defaults to '1.0' if none supplied. Must be
in a decimal format with precision to one decimal place.
:type instrument_version: str or None
:param suppress:
Supress exceptions and log return as a dict with a single 'failure'
key that contains the exception message. Implementations should check
for this key to make sure a conversion completed sucessfully, because
the returned dict will not contain key-value pairs with conversion
data if exception suppression is set.
:type suppress: bool
:returns:
The RIOS instrument, form, and calculationset configuration. Includes
logging data if a logger is suplied.
:rtype: dictionary
"""
converter = RedcapToRios(
id=id,
instrument_version=instrument_version,
title=title,
localization=localization,
description=description,
stream=stream
)
payload = dict()
try:
converter()
except Exception as exc:
error = ConversionFailureError(
'Unable to convert REDCap data dictionary. Error:',
(str(exc) if isinstance(exc, Error) else repr(exc))
)
if suppress:
payload['failure'] = str(error)
else:
raise error
else:
payload.update(converter.package)
return payload
def qualtrics_to_rios(stream, instrument_version=None, title=None,
localization=None, description=None, id=None,
filemetadata=False, suppress=False):
"""
Converts a Qualtrics configuration into a RIOS configuration.
:param id: The RIOS specification formatted instrument ID.
:type id: str
:param title: The RIOS specification formatted instrument title.
:type title: str
:param description: The instrument description.
:type description: str
:param stream:
A file stream containing a foriegn data dictionary to convert to the
RIOS specification.
:type stream: File-like object
:param localization:
Localization must be in the form of an RFC5646 Language Tag. Defaults
to 'en' if not supplied.
:type localization: str or None
:param instrument_version:
Version of the instrument. Defaults to '1.0' if none supplied. Must be
in a decimal format with precision to one decimal place.
:type instrument_version: str or None
:param filemetadata:
Flag to tell converter API to pull meta data from the stream file.
:type filemetadata: bool
:param suppress:
Supress exceptions and log return as a dict with a single 'failure'
key that contains the exception message. Implementations should check
for this key to make sure a conversion completed sucessfully, because
the returned dict will not contain key-value pairs with conversion
data if exception suppression is set.
:type suppress: bool
:returns:
The RIOS instrument, form, and calculationset configuration. Includes
logging data if a logger is suplied.
:rtype: dictionary
"""
# Make sure function parameters are passed proper values if not getting
# metadata from the data dictionary file
if filemetadata is False and (id is None or description is None
or title is None):
raise ValueError(
'Missing id, description, and/or title attributes'
)
payload = dict()
if filemetadata:
# Process properties from the stream
try:
reader = _JsonReaderMetaDataProcessor(stream)
reader.process()
except Exception as exc:
error = ConversionFailureError(
"Unable to parse Qualtrics data dictionary:",
"Invalid JSON formatted text"
)
error.wrap(
"Parse error:",
str(exc)
)
if suppress:
payload['failure'] = str(error)
return payload
else:
raise error
else:
id = reader.data['id']
description = reader.data['description']
title = reader.data['title']
localization = reader.data['localization']
converter = QualtricsToRios(
id=id,
instrument_version=instrument_version,
title=title,
localization=localization,
description=description,
stream=stream
)
try:
converter()
except Exception as exc:
error = ConversionFailureError(
'Unable to convert Qualtrics data dictionary. Error:',
(str(exc) if isinstance(exc, Error) else repr(exc))
)
if suppress:
payload['failure'] = str(error)
else:
raise error
else:
payload.update(converter.package)
return payload
def rios_to_redcap(instrument, form, calculationset=None,
localization=None, suppress=False):
"""
Converts a RIOS configuration into a REDCap configuration.
:param instrument: The RIOS instrument definition
:type instrument: dict
:param form: The RIOS form definition
:type form: dict
:param calculationset: The RIOS calculationset instrument definition
:type calculationset: dict
:param localization:
Localization must be in the form of an RFC5646 Language Tag. Defaults
to 'en' if not supplied.
:type localization: str or None
:param suppress:
Supress exceptions and log return as a dict with a single 'failure'
key that contains the exception message. Implementations should check
for this key to make sure a conversion completed sucessfully, because
the returned dict will not contain key-value pairs with conversion
data if exception suppression is set.
:type suppress: bool
:returns:
A list where each element is a row. The first row is the header row.
:rtype: list
"""
payload = dict()
try:
_validate_rios(instrument, form, calculationset)
_check_rios_relationship(instrument, form, calculationset)
except Exception as exc:
error = ConversionFailureError(
'The supplied RIOS configurations are invalid:',
str(exc)
)
if suppress:
payload['failure'] = str(error)
return payload
else:
raise error
converter = RedcapFromRios(
instrument=instrument,
form=form,
calculationset=calculationset,
localization=localization,
)
try:
converter()
except Exception as exc:
error = ConversionFailureError(
'Unable to convert RIOS data dictionary. Error:',
(str(exc) if isinstance(exc, Error) else repr(exc))
)
if suppress:
payload['failure'] = str(error)
else:
raise error
else:
payload.update(converter.package)
return payload
def rios_to_qualtrics(instrument, form, calculationset=None,
localization=None, suppress=False):
"""
Converts a RIOS configuration into a Qualtrics configuration.
:param instrument: The RIOS instrument definition
:type instrument: dict
:param form: The RIOS form definition
:type form: dict
:param calculationset: The RIOS calculationset instrument definition
:type calculationset: dict
:param localization:
Localization must be in the form of an RFC5646 Language Tag. Defaults
to 'en' if not supplied.
:type localization: str or None
:param suppress:
Supress exceptions and log return as a dict with a single 'failure'
key that contains the exception message. Implementations should check
for this key to make sure a conversion completed sucessfully, because
the returned dict will not contain key-value pairs with conversion
data if exception suppression is set.
:type suppress: bool
:returns: The RIOS instrument, form, and calculationset configuration.
:rtype: dictionary
"""
payload = dict()
try:
_validate_rios(instrument, form, calculationset)
_check_rios_relationship(instrument, form, calculationset)
except Exception as exc:
error = ConversionFailureError(
'The supplied RIOS configurations are invalid:',
str(exc)
)
if suppress:
payload['failure'] = str(error)
return payload
else:
raise error
converter = QualtricsFromRios(
instrument=instrument,
form=form,
calculationset=calculationset,
localization=localization,
)
try:
converter()
except Exception as exc:
error = ConversionFailureError(
'Unable to convert RIOS data dictionary. Error:',
(str(exc) if isinstance(exc, Error) else repr(exc))
)
if suppress:
payload['failure'] = str(error)
else:
raise error
else:
payload.update(converter.package)
return payload | /rios.conversion-0.6.2.tar.gz/rios.conversion-0.6.2/src/rios/conversion/__init__.py | 0.732496 | 0.370709 | __init__.py | pypi |
from . import structures
from rios.conversion.utils import InMemoryLogger
__all__ = (
'ConversionBase',
'localized_string_object',
'DEFAULT_VERSION',
'DEFAULT_LOCALIZATION',
'SUCCESS_MESSAGE',
)
DEFAULT_LOCALIZATION = 'en'
DEFAULT_VERSION = '1.0'
SUCCESS_MESSAGE = 'Conversion process was successful'
def localized_string_object(localization, string):
return structures.LocalizedStringObject({localization: string})
class ConversionBase(object):
""" Base class for building conversion objects """
def __init__(self, *args, **kwargs):
"""
Initializes a conversion tool for converting from one instrument
definition to another.
Implementations must override this method and specify necessary
function parameters.
"""
raise NotImplementedError(
'{}.__call__'.format(self.__class__.__name__)
)
def __call__(self):
"""
Converts the given instrument definition into a another instrument
definition.
Implementations must override this method.
"""
raise NotImplementedError(
'{}.__call__'.format(self.__class__.__name__)
)
@property
def logger(self):
"""
Logger interface. Builds a new logging instance if one doesn't
already exist. It is up to implementations to use this logging feature
within a subclass implementation's __call__ method.
"""
try:
return self._logger
except AttributeError:
self._logger = InMemoryLogger()
return self._logger
@property
def pplogs(self):
"""
Pretty print logs by joining into a single, formatted string for use
in displaying informative error messages to users.
"""
return self.logger.pplogs
@property
def logs(self):
return self.logger.logs
@property
def instrument(self):
"""
Returns the instrument definition output as a dictionary or a list.
Implementations must override this method.
"""
raise NotImplementedError(
"{}.instrument".format(self.__class__.__name__)
)
@property
def package(self):
"""
Returns a dictionary with an ``instrument`` key containing the
converted definitions. May also add a ``logger`` key if logs exist.
Implementations must override this method.
"""
raise NotImplementedError(
"{}.package".format(self.__class__.__name__)
) | /rios.conversion-0.6.2.tar.gz/rios.conversion-0.6.2/src/rios/conversion/base/base.py | 0.808861 | 0.166337 | base.py | pypi |
from rios.core import ValidationError
from rios.conversion.exception import (
ConversionValidationError,
)
from rios.conversion.base import structures
from rios.conversion.base import (
ConversionBase,
localized_string_object,
DEFAULT_VERSION,
DEFAULT_LOCALIZATION,
SUCCESS_MESSAGE,
)
from rios.core.validation import (
validate_instrument,
validate_form,
validate_calculationset,
)
__all__ = (
'ToRios',
)
class ToRios(ConversionBase):
""" Converts a foreign instrument into a valid RIOS specification """
def __init__(self, id, title, description, stream, localization=None,
instrument_version=None, *args, **kwargs):
"""
Expects `stream` to be a file-like object. Implementations must process
the data dictionary first before passing to this class.
"""
# Set attributes
self.id = (id if 'urn:' in str(id) else ('urn:' + str(id)))
self.instrument_version = instrument_version or DEFAULT_VERSION
self.title = title
self.localization = localization or DEFAULT_LOCALIZATION
self.description = description
self.stream = stream
# Inserted into self._form
self.page_container = dict()
# Inserted into self._instrument
self.field_container = list()
# Inserted into self._calculationset
self.calc_container = dict()
# Generate yet-to-be-configured RIOS definitions
self._instrument = structures.Instrument(
id=self.id,
version=self.instrument_version,
title=self.title,
description=self.description
)
self._calculationset = structures.CalculationSetObject(
instrument=structures.InstrumentReferenceObject(self._instrument),
)
self._form = structures.WebForm(
instrument=structures.InstrumentReferenceObject(self._instrument),
defaultLocalization=self.localization,
title=localized_string_object(self.localization, self.title),
)
@property
def instrument(self):
self._instrument.clean()
return self._instrument.as_dict()
@property
def form(self):
self._form.clean()
return self._form.as_dict()
@property
def calculationset(self):
if self._calculationset.get('calculations', False):
self._calculationset.clean()
return self._calculationset.as_dict()
else:
return dict()
def validate(self):
"""
Validation interface. Must be called at the end of all subclass
implementations of the __call__ method.
"""
try:
val_type = "Instrument"
validate_instrument(self.instrument)
val_type = "Form"
validate_form(
self.form,
instrument=self.instrument,
)
if self.calculationset.get('calculations', False):
val_type = "Calculationset"
validate_calculationset(
self.calculationset,
instrument=self.instrument
)
except ValidationError as exc:
error = ConversionValidationError(
(val_type + ' validation error:'),
str(exc)
)
self.logger.error(str(error))
raise error
else:
if SUCCESS_MESSAGE:
self.logger.info(SUCCESS_MESSAGE)
@property
def package(self):
"""
Returns a dictionary with ``instrument``, ``form``, and possibly
``calculationset`` keys containing their corresponding, converted
definitions. May also add a ``logger`` key if logs exist.
"""
payload = {
'instrument': self.instrument,
'form': self.form,
}
if self._calculationset.get('calculations', False):
payload.update(
{'calculationset': self.calculations}
)
if self.logger.check:
payload.update(
{'logs': self.logs}
)
return payload | /rios.conversion-0.6.2.tar.gz/rios.conversion-0.6.2/src/rios/conversion/base/to_rios.py | 0.804098 | 0.174164 | to_rios.py | pypi |
import collections
__all__ = (
'DefinitionSpecification',
'Instrument',
'FieldObject',
'TypeCollectionObject',
'TypeObject',
'ColumnObject',
'RowObject',
'BoundConstraintObject',
'EnumerationCollectionObject',
'EnumerationObject',
'CalculationSetObject',
'InstrumentReferenceObject',
'CalculationObject',
'WebForm',
'PageObject',
'ElementObject',
'QuestionObject',
'DescriptorObject',
'EventObject',
'WidgetConfigurationObject',
'AudioSourceObject',
'ParameterCollectionObject',
'ParameterObject',
'LocalizedStringObject',
)
class DefinitionSpecification(collections.OrderedDict):
props = collections.OrderedDict()
"""
props == {(key, type), ...}
"""
def __init__(self, props={}, **kwargs):
"""
if ``self.props`` has items, filter out any keys
in ``props`` and ``kwargs`` not in self.props;
otherwise initialize from props and/or kwargs.
"""
super(DefinitionSpecification, self).__init__()
self.update({k: v() for k, v in self.props.items()})
self.update({
k: v
for k, v in props.items()
if not self.props or k in self.props})
self.update({
k: v
for k, v in kwargs.items()
if not self.props or k in self.props})
def clean(self):
"""Removes "empty" items from self.
items whose values are empty arrays, dicts, and strings
are deleted.
All arrays are assumed to be arrays of DefinitionSpecification.
"""
items = list(self.items())
for k, v in items:
if v not in [False, 0, 0.0, None]:
if bool(v):
if isinstance(v, DefinitionSpecification):
v.clean()
elif isinstance(v, list):
v = [x for x in v if x.clean()]
if not bool(v):
del self[k]
else:
del self[k]
return self
def as_dict(self):
out = dict()
for key, value in self.items():
if isinstance(value, DefinitionSpecification):
out[key] = value.as_dict()
elif isinstance(value, (list, tuple)):
out[key] = []
for v in value:
if isinstance(v, DefinitionSpecification):
out[key].append(v.as_dict())
else:
out[key] = v
else:
out[key] = value
return out
class AudioSourceObject(DefinitionSpecification):
pass
class EnumerationCollectionObject(DefinitionSpecification):
def add(self, name, description=''):
self[name] = (
EnumerationObject(description=description)
if description
else None)
class LocalizedStringObject(DefinitionSpecification):
pass
class ParameterCollectionObject(DefinitionSpecification):
pass
class TypeCollectionObject(DefinitionSpecification):
pass
class Instrument(DefinitionSpecification):
props = collections.OrderedDict([
('id', str),
('version', str),
('title', str),
('description', str),
('types', TypeCollectionObject),
('record', list),
])
def add_field(self, field_object):
assert isinstance(field_object, FieldObject), field_object
self['record'].append(field_object)
def add_type(self, type_name, type_object):
assert isinstance(type_name, str), type_name
assert isinstance(type_object, TypeObject), type_object
self['types'][type_name] = type_object
class FieldObject(DefinitionSpecification):
props = collections.OrderedDict([
('id', str),
('description', str),
('type', str),
('required', bool),
('annotation', str),
('explanation', str),
('identifiable', bool),
])
class BoundConstraintObject(DefinitionSpecification):
"""Must have at least one of ['max', 'min']
"""
props = collections.OrderedDict([
('min', str),
('max', str),
])
class TypeObject(DefinitionSpecification):
props = collections.OrderedDict([
('base', str),
('range', BoundConstraintObject),
('length', BoundConstraintObject),
('pattern', str),
('enumerations', EnumerationCollectionObject),
('record', list),
('columns', list),
('rows', list),
])
def add_column(self, column_object):
assert isinstance(column_object, ColumnObject), column_object
self['columns'].append(column_object)
def add_enumeration(self, name, description=''):
self['enumerations'].add(name, description)
def add_field(self, field_object):
assert isinstance(field_object, FieldObject), field_object
self['record'].append(field_object)
def add_row(self, row_object):
assert isinstance(row_object, RowObject), row_object
self['rows'].append(row_object)
class ColumnObject(DefinitionSpecification):
props = collections.OrderedDict([
('id', str),
('description', str),
('type', str),
('required', bool),
('identifiable', bool),
])
class RowObject(DefinitionSpecification):
props = collections.OrderedDict([
('id', str),
('description', str),
('required', bool),
])
class EnumerationObject(DefinitionSpecification):
props = collections.OrderedDict([
('description', str),
])
class InstrumentReferenceObject(DefinitionSpecification):
props = collections.OrderedDict([
('id', str),
('version', str),
])
class CalculationSetObject(DefinitionSpecification):
props = collections.OrderedDict([
('instrument', InstrumentReferenceObject),
('calculations', list),
])
def add(self, calc_object):
assert isinstance(calc_object, CalculationObject), calc_object
self['calculations'].append(calc_object)
class CalculationObject(DefinitionSpecification):
props = collections.OrderedDict([
('id', str),
('description', str),
('type', str),
('method', str),
('options', DefinitionSpecification),
])
class WebForm(DefinitionSpecification):
props = collections.OrderedDict([
('instrument', InstrumentReferenceObject),
('defaultLocalization', str),
('title', str),
('pages', list),
('parameters', DefinitionSpecification),
])
def add_page(self, page_object):
assert isinstance(page_object, PageObject), page_object
self['pages'].append(page_object)
def add_parameter(self, parameter_name, parameter_object):
assert isinstance(parameter_name, str), parameter_name
assert isinstance(parameter_object, ParameterObject), parameter_object
self['parameters'][parameter_name] = parameter_object
class PageObject(DefinitionSpecification):
props = collections.OrderedDict([
('id', str),
('elements', list),
])
def add_element(self, element_object):
element_list = (
element_object
if isinstance(element_object, list)
else [element_object])
for element in element_list:
assert isinstance(element, ElementObject), element
self['elements'].append(element)
class ElementObject(DefinitionSpecification):
props = collections.OrderedDict([
('type', str),
('options', DefinitionSpecification),
('tags', list),
])
class WidgetConfigurationObject(DefinitionSpecification):
props = collections.OrderedDict([
('type', str),
('options', DefinitionSpecification),
])
class QuestionObject(DefinitionSpecification):
props = collections.OrderedDict([
('fieldId', str),
('text', LocalizedStringObject),
('audio', AudioSourceObject),
('help', LocalizedStringObject),
('error', LocalizedStringObject),
('enumerations', list),
('questions', list),
('rows', list),
('widget', WidgetConfigurationObject),
('events', list),
])
def add_enumeration(self, descriptor_object):
assert isinstance(
descriptor_object,
DescriptorObject), descriptor_object
self['enumerations'].append(descriptor_object)
def add_question(self, question_object):
assert isinstance(question_object, QuestionObject), question_object
self['questions'].append(question_object)
def add_row(self, descriptor_object):
assert isinstance(
descriptor_object,
DescriptorObject), descriptor_object
self['rows'].append(descriptor_object)
def add_event(self, event_object):
assert isinstance(event_object, EventObject), event_object
self['events'].append(event_object)
def set_widget(self, widget):
assert isinstance(widget, WidgetConfigurationObject), widget
self['widget'] = widget
class DescriptorObject(DefinitionSpecification):
props = collections.OrderedDict([
('id', str),
('text', LocalizedStringObject),
('audio', AudioSourceObject),
('help', LocalizedStringObject),
])
class EventObject(DefinitionSpecification):
props = collections.OrderedDict([
('trigger', str),
('action', str),
('targets', list),
('options', DefinitionSpecification),
])
class ParameterObject(DefinitionSpecification):
props = collections.OrderedDict([
('type', str),
]) | /rios.conversion-0.6.2.tar.gz/rios.conversion-0.6.2/src/rios/conversion/base/structures.py | 0.683736 | 0.218878 | structures.py | pypi |
from rios.core.validation.instrument import get_full_type_definition
from rios.conversion.base import FromRios
from rios.conversion.exception import (
ConversionValueError,
QualtricsFormatError,
Error,
)
__all__ = (
'QualtricsFromRios',
)
class QuestionNumber:
def __init__(self):
self.number = 0
def next(self):
self.number += 1
return self.number
class QualtricsFromRios(FromRios):
"""
Converts RIOS instrument and form definitions into a Qualtrics data
dictionary.
"""
def __call__(self):
self.lines = []
self.question_number = QuestionNumber()
for page in self._form['pages']:
try:
self.page_processor(page)
except Exception as exc:
if isinstance(exc, ConversionValueError):
# Don't need to specify what's being skipped here, because
# deeper level exceptions access that data.
self.logger.warning(str(exc))
elif isinstance(exc, QualtricsFormatError):
error = Error(
"RIOS data dictionary conversion failure:",
"Unable to parse the data dictionary"
)
self.logger.error(str(error))
raise error
else:
error = Error(
"An unknown or unexpected error occured:",
repr(exc)
)
error.wrap(
"RIOS data dictionary conversion failure:",
"Unable to parse the data dictionary"
)
self.logger.error(str(error))
raise error
# Skip the first line ([[PageBreak]]) and the last 2 lines (blank)
def rmv_extra_strings(lst):
if len(lst) > 0:
if lst[0] == '[[PageBreak]]':
rmv_extra_strings(lst[1:])
elif lst[-1] == "":
rmv_extra_strings(lst[:-1])
return lst[:]
for line in rmv_extra_strings(self.lines):
self._definition.append(line)
def page_processor(self, page):
# Start the page
self.lines.append('[[PageBreak]]')
elements = page['elements']
# Process question elements
for question in elements:
question_options = question['options']
# Get question ID for exception/error messages
# Get question/form element ID value for error messages
try:
identifier = question['options']['fieldId']
except:
identifier = question['options']['text'].get(
self.localization,
None
)
if not identifier:
raise ConversionValueError(
'Form element has no identifier.'
' Invalid element data:',
str(question)
)
# Handle form element if a question
if question['type'] == 'question':
try:
self.question_processor(question_options)
except Exception as exc:
error = ConversionValueError(
("Skipping form field with ID: " + str(identifier)
+ ". Error:"),
(str(exc) if isinstance(exc, Error) else repr(exc))
)
raise error
else:
# Qualtrics only handles form questions
error = ConversionValueError(
'Skipping form field with ID:',
str(identifier)
)
error.wrap(
'Form element type is not \"question\". Got:',
str(question['type'])
)
raise error
def question_processor(self, question_options):
field_id = question_options['fieldId']
field = self.fields[field_id]
type_object = get_full_type_definition(
self._instrument,
field['type']
)
base = type_object['base']
if base not in ('enumeration', 'enumerationSet',):
error = ConversionValueError(
"Invalid question type:",
"Type is not \"enumeration\" or \"enumerationSet\""
)
error.wrap("Got invalid value for type:", str(base))
raise error
self.lines.append(
'%d. %s' % (
self.question_number.next(),
self.get_local_text(question_options['text']),
)
)
if base == 'enumerationSet':
self.lines.append('[[MultipleAnswer]]')
# Blank line separates question from choices.
self.lines.append('')
for enumeration in question_options['enumerations']:
self.lines.append(
self.get_local_text(enumeration['text'])
)
# Two blank lines between questions
self.lines.append('')
self.lines.append('') | /rios.conversion-0.6.2.tar.gz/rios.conversion-0.6.2/src/rios/conversion/qualtrics/from_rios.py | 0.52975 | 0.223674 | from_rios.py | pypi |
import collections
import six
from rios.conversion.base import ToRios, localized_string_object, structures
from rios.conversion.utils import JsonReader
from rios.conversion.exception import (
Error,
ConversionValueError,
QualtricsFormatError,
)
__all__ = (
'QualtricsToRios',
)
class PageName(object):
""" Provides easy naming for pages """
def __init__(self, start=0):
self.page_id = start
def next(self):
self.page_id += 1
return "page_{0:0=2d}".format(self.page_id)
class JsonReaderMainProcessor(JsonReader):
""" Process Qualtrics JSON data """
def processor(self, data):
""" Extract instrument data into a dict. """
try:
qualtrics = {
'block_elements': [],
'questions': {}, # QuestionID: payload (dict)
}
for survey_element in data['SurveyElements']:
element = (
survey_element.get('Element', None)
if isinstance(survey_element, dict)
else None
)
# Payload may be a "null" value in *.qsf files
payload = (
survey_element.get('Payload', None)
if isinstance(survey_element, dict)
else None
)
if not element:
raise QualtricsFormatError(
'Missing \"Element\" field in \"SurveyElements\":',
repr(survey_element)
)
if element == 'BL':
# Element: BL
# Payload is either a list of Block or a dict of Block.
# Sets qualtrics['block_element'] to the first non-empty
# BlockElements.
if isinstance(payload, dict):
payload = payload.values()
for block in payload:
if block['BlockElements']:
qualtrics['block_elements'].extend(
block['BlockElements']
)
break
elif element == 'SQ':
if not payload:
raise QualtricsFormatError(
'Missing \"Payload\" field in \"SurveyElements\":',
repr(survey_element)
)
qualtrics['questions'][payload['QuestionID']] = payload
except Exception as exc:
if isinstance(exc, QualtricsFormatError):
error = QualtricsFormatError(
'Processor read error:',
str(exc)
)
else:
error = QualtricsFormatError(
'Processor read error:',
repr(exc)
)
raise error
else:
return qualtrics
class QualtricsToRios(ToRios):
""" Converts a Qualtrics *.qsf file to the RIOS specification format """
def __init__(self, filemetadata=False, *args, **kwargs):
super(QualtricsToRios, self).__init__(*args, **kwargs)
self.page_name = PageName()
def __call__(self):
""" Process the qsf input, and create output files """
# Preprocessing
try:
self.reader = JsonReaderMainProcessor(self.stream)
self.reader.process()
except Exception as exc:
error = Error(
"Unable to parse Qualtrics data dictionary:",
"Invalid JSON formatted text"
)
error.wrap(
"Parse error:",
str(exc)
)
self.logger.error(str(error))
raise error
# Initialize processor
process = Processor(self.reader, self.localization)
# MAIN PROCESSING
# Occures in two steps:
# 1) Process data and page names into containers
# 2) Iterate over containers to construct RIOS definitions
# NOTE:
# 1) Each CSV row is an ordered dict (see CsvReader in utils/)
# 2) Start=2, because spread sheet programs set header row to 1
# and first data row to 2 (for user friendly errors)
question_data = self.reader.data['questions']
page_question_map = collections.OrderedDict()
page_names = set()
page_name = self.page_name.next()
page_names.add(page_name)
for form_element in self.reader.data['block_elements']:
element_type = form_element.get('Type', None)
if element_type == 'Page Break':
page_name = self.page_name.next()
page_names.add(page_name)
elif element_type == 'Question':
question_id = form_element.get('QuestionID', None)
if question_id is None:
error = QualtricsFormatError(
"Block element QuestionID value not found in:",
str(form_element)
)
self.logger.error(str(error))
raise error
elif question_id not in question_data:
error = QualtricsFormatError(
"QuestionID value not found in question data. Got ID:",
str(question_id)
)
self.logger.error(str(error))
raise error
elif page_name not in page_question_map:
page_question_map[page_name] = {
question_id: question_data[question_id],
}
else:
page_question_map[page_name].update(
{question_id: question_data[question_id]}
)
else:
error = QualtricsFormatError(
"Invalid type for block element. Expected types:",
"\"Page Break\" or \"Question\""
)
if element_type:
error.wrap("But got invalid type value:",
str(element_type))
else:
error.wrap("Missing type value")
self.logger.error(str(error))
raise error
for page_name in page_names:
self.page_container.update(
{page_name: structures.PageObject(id=page_name), }
)
for page_name, page in six.iteritems(self.page_container):
mapping_data = page_question_map[page_name]
for question_id, question_data in six.iteritems(mapping_data):
try:
# WHERE THE MAGIC HAPPENS
fields = process(page, question_data)
# Clear processor's internal storage for next question
process.clear_storage()
for field in fields:
self.field_container.append(field)
except Exception as exc:
if isinstance(exc, ConversionValueError):
error = Error(
"Skipping question: " + str(question_id)
+ ". Error:",
str(exc)
)
self.logger.warning(str(error))
else:
error = Error(
"An unknown error occured:",
repr(exc)
)
error.wrap(
"REDCap data dictionary conversion failure:",
"Unable to parse REDCap data dictionary CSV"
)
self.logger.error(str(error))
raise exc
# Construct insrument objects
for field in self.field_container:
self._instrument.add_field(field)
# Page container is a dict instead of a list, so iterate over vals
for page in six.itervalues(self.page_container):
self._form.add_page(page)
# Post-processing/validation
self.validate()
class Processor(object):
""" Processor class for Qualtrics data dictionaries """
def __init__(self, reader, localization):
self.reader = reader
self.localization = localization
# For storing fields
self._field_storage = []
# Object to store pointer to instrument field object
self._field = None
self._field_type = None
# Object to store pointers to question choices
self._choices = None
def __call__(self, page, question_data):
""" Processes a Qualtrics data dictionary question per form page """
# Generate question element object
question = structures.ElementObject()
try:
self.question_field_processor(question_data, question)
except Exception as exc:
# Reset storage if conversion of current page/question fails
self.clear_storage()
error = ConversionValueError(
"Invalid questions data. Got error:",
repr(exc)
)
raise error
# Add the configured question to the page
page.add_element(question)
fields = self._field_storage
return fields
@staticmethod
def clean_question(text):
return text.replace('<br>', '')
def clear_storage(self):
self._field_storage = []
def question_field_processor(self, question_data, question):
""" Processe questions and fields """
question_type = question_data['QuestionType']
question_text = localized_string_object(
self.localization,
self.clean_question(question_data['QuestionText'])
)
if question_type == 'DB':
# Question is only display text
question['type'] = 'text'
question['options'] = {'text': question_text}
else:
# Question is an interactive form element
question['type'] = 'question'
question['options'] = structures.QuestionObject(
fieldId=question_data['DataExportTag'].lower(),
text=localized_string_object(
self.localization,
self.clean_question(
question_data['QuestionText']
)
),
)
# Choices are generated, where "choices" is an array of
# tuples: (id, choice)
self._choices = question_data.get('Choices', [])
order = question_data.get('ChoiceOrder', [])
if self._choices:
if isinstance(self._choices, dict):
if not order:
keys = self._choices.keys()
if all([k.isdigit() for k in keys]):
keys = [int(k) for k in keys]
order = sorted(keys)
self._choices = [(x, self._choices[str(x)]) for x in order]
elif isinstance(self._choices, list):
self._choices = [i for i in enumerate(self._choices)]
else:
error = ConversionValueError(
"Choices are not formatted correctly. Got choices:",
str(self._choices)
)
error.wrap("With question data:", str(question))
raise error
self._choices = [
(str(i).lower(), c['Display'])
for i, c in self._choices
]
# Process question object and field type object
question_obj = question['options']
field_type = structures.TypeObject(base='enumeration', )
for _id, choice in self._choices:
question_obj.add_enumeration(
structures.DescriptorObject(
id=_id,
text=localized_string_object(
self.localization,
choice
),
)
)
field_type.add_enumeration(str(_id))
else:
field_type = 'text'
# Consruct field for instrument definition
field = structures.FieldObject(
id=question_data['DataExportTag'].lower(),
description=question_data['QuestionDescription'],
type=field_type,
required=False,
identifiable=False,
)
self._field_storage.append(field) | /rios.conversion-0.6.2.tar.gz/rios.conversion-0.6.2/src/rios/conversion/qualtrics/to_rios.py | 0.676834 | 0.19546 | to_rios.py | pypi |
import re
import collections
from rios.core.validation.instrument import get_full_type_definition
from rios.conversion.base import FromRios
from rios.conversion.exception import (
ConversionValueError,
RiosFormatError,
Error,
)
from rios.conversion.redcap.to_rios import (
FUNCTION_TO_PYTHON,
OPERATOR_TO_REXL,
)
__all__ = (
'RedcapFromRios',
)
COLUMNS = [
"Variable / Field Name",
"Form Name",
"Section Header",
"Field Type",
"Field Label",
"Choices, Calculations, OR Slider Labels",
"Field Note",
"Text Validation Type OR Show Slider Number",
"Text Validation Min",
"Text Validation Max",
"Identifier?",
"Branching Logic (Show field only if...)",
"Required Field?",
"Custom Alignment",
"Question Number (surveys only)",
"Matrix Group Name",
"Matrix Ranking?",
"Field Annotation",
]
# dict: each item => rios.conversion name: REDCap name
FUNCTION_TO_REDCAP = {rios: red for red, rios in FUNCTION_TO_PYTHON.items()}
# dict of function name: pattern which finds "name("
RE_funcs = {
k: re.compile(r'\b%s\(' % k)
for k in FUNCTION_TO_REDCAP.keys()}
# array of (regex pattern, replacement)
RE_ops = [(re.compile(rexl), redcap) for redcap, rexl in OPERATOR_TO_REXL]
# Find math.pow function: math.pow(base, exponent)
# \1 => base, \2 => exponent
RE_pow_function = re.compile(r'\bmath.pow\(\s*(.+)\s*,\s*(.+)\s*\)')
# Find variable reference: table["field"] or table['field']
# \1 => table, \2 => quote \3 => field
RE_variable_reference = re.compile(
r'''\b([a-zA-Z][\w_]*)'''
r'''\[\s*(["'])'''
r'''([^\2\]]*)'''
r'''\2\s*\]''')
class RedcapFromRios(FromRios):
""" Converts a RIOS configuration into a REDCap configuration """
def __call__(self):
self._rows = collections.deque()
self._rows.append(COLUMNS)
self.section_header = ''
if 'pages' not in self._form or not self._form['pages']:
raise RiosFormatError(
"RIOS data dictionary conversion failure. Error:"
"RIOS form configuration does not contain page data"
)
# Process form and instrument configurations
for page in self._form['pages']:
try:
self.page_processor(page)
except Exception as exc:
if isinstance(exc, ConversionValueError):
# Don't need to create a new error instance, b/c
# ConversionValueErrors caught here already contain
# identifying information.
self.logger.warning(str(exc))
elif isinstance(exc, RiosFormatError):
error = Error(
"Error parsing the data dictionary:",
str(exc)
)
error.wrap(
"RIOS data dictionary conversion failure:",
"Unable to parse the data dictionary"
)
self.logger.error(str(error))
raise error
else:
error = Error(
"An unknown or unexpected error occured:",
repr(exc)
)
error.wrap(
"RIOS data dictionary conversion failure:",
"Unable to parse the data dictionary"
)
self.logger.error(repr(error))
raise exc
# Process calculations
if self._calculationset:
for calculation in self._calculationset['calculations']:
try:
calc_id = calculation.get('id', None)
calc_description = calculation.get('id', None)
if not calc_id or not calc_description:
raise RiosFormatError(
"Missing ID or description for a calculation:",
str(
calc_id
or calc_description
or "Calculation is not identifiable"
)
)
self.process_calculation(calculation)
except Exception as exc:
if isinstance(exc, ConversionValueError):
error = Error(
"Skipping calculation element with ID:",
str(calc_id)
)
error.wrap("Error:", str(exc))
self.logger.warning(str(exc))
else:
raise exc
self._definition.append(self._rows)
def page_processor(self, page):
self.form_name = page.get('id', None)
self.elements = page.get('elements', None)
if not self.form_name or not self.elements:
raise RiosFormatError(
"Error:",
"RIOS form does not contain valid page data"
)
# Iterate over form elements and process them accordingly
for element in self.elements:
# Get question/form element ID value for error messages
try:
identifier = element['options']['fieldId']
except:
identifier = element['options']['text'].get(
self.localization,
None
)
if not identifier:
raise ConversionValueError(
'Form element has no identifier.'
' Invalid element data:',
str(element)
)
try:
self.process_element(element)
except Exception as exc:
if isinstance(exc, ConversionValueError):
error = Error(
"Skipping form element with ID:",
str(identifier)
)
error.wrap('Error:', str(exc))
else:
raise exc
def convert_rexl_expression(self, rexl):
"""
Convert REXL expression into REDCap expressions
- convert operators
- convert caret to pow
- convert redcap function names to python
- convert database reference: a["b"] => [a][b]
- convert assessment variable reference: assessment["a"] => [a]
- convert calculation variable reference: calculations["c"] => [c]
"""
s = rexl
for pattern, replacement in RE_ops:
s = pattern.sub(replacement, s)
s = RE_pow_function.sub(r'(\1)^(\2)', s)
for name, pattern in RE_funcs.items():
# the matched pattern includes the '('
s = pattern.sub('%s(' % FUNCTION_TO_REDCAP[name], s)
s = self.convert_variables(s)
return s
@staticmethod
def convert_variables(s):
start = 0
answer = ''
while 1:
match = RE_variable_reference.search(s[start:])
if match:
table, quote, field = match.groups()
if table in ['assessment', 'calculations']:
replacement = '[%s]' % field
else:
replacement = '[%s][%s]' % (table, field)
answer += s[start: start + match.start()] + replacement
start += match.end()
else:
break
answer += s[start:]
return answer
def get_choices(self, array):
return ' | '.join(['%s, %s' % (
str(d['id']),
self.get_local_text(self.localization, d['text']))
for d in array])
def get_type_tuple(self, base, question):
widget_type = question.get('widget', {}).get('type', '')
if base == 'float':
return 'text', 'number'
elif base == 'integer':
return 'text', 'integer'
elif base == 'text':
return {'textArea': 'notes'}.get(widget_type, 'text'), ''
elif base == 'enumeration':
enums = {'radioGroup': 'radio', 'dropDown': 'dropdown'}
return enums.get(widget_type, 'dropdown'), ''
elif base == 'enumerationSet':
return 'checkbox', ''
elif base == 'matrix':
return 'radio', ''
else:
return 'text', ''
def process_calculation(self, calculation):
def get_expression():
expression = calculation['options']['expression']
if calculation['method'] == 'python':
expression = self.convert_rexl_expression(expression)
return expression
self._rows.append(
[
calculation['id'],
'calculations',
'',
'calc',
calculation['description'],
get_expression(),
'', '', '', '', '', '', '', '', '', '', '', '',
]
)
def process_element(self, element):
_type = element['type']
options = element['options']
if _type in ['header', 'text']:
self.process_header(options)
elif _type == 'question':
self.process_question(options)
else:
error = ConversionValueError(
"Invalid form element type. Got:",
str(_type)
)
error.wrap("Expected values:", "header, text, question")
raise error
def process_header(self, header):
self.section_header = self.get_local_text(
self.localization,
header['text']
)
def process_matrix(self, question):
questions = question['questions']
if isinstance(questions, list):
if len(questions) > 1:
error = ConversionValueError(
'REDCap matrices support only one question. Got:',
", ".join([str(q) for q in questions])
)
raise error
column = questions[0]
else:
column = questions
if 'enumerations' not in column:
error = ConversionValueError(
'Form element skipped with ID:',
str(question.get('fieldId', 'Unknown field ID'))
)
error.wrap(
'REDCap matrix column must be an enumeration. Got column:',
str(column)
)
raise error
choices = self.get_choices(column['enumerations'])
section_header = self.section_header
matrix_group_name = question['fieldId']
field = self.fields[matrix_group_name]
type_object = get_full_type_definition(
self._instrument,
field['type']
)
base = type_object['base']
field_type, valid_type = self.get_type_tuple(base, question)
for row in question['rows']:
self._rows.append(
[
row['id'],
self.form_name,
section_header,
field_type,
self.get_local_text(self.localization, row['text']),
choices,
self.get_local_text(self.localization,
row.get('help', {})),
valid_type,
'',
'',
'y' if field.get('identifiable', False) else '',
'',
'y' if field.get('required', False) else '',
'',
'',
matrix_group_name,
'y',
'',
]
)
section_header = ''
def process_question(self, question):
def get_choices():
return (
self.get_choices(question['enumerations'])
if 'enumerations' in question
else ''
)
def get_range(type_object):
r = type_object.get('range', {})
min_value = str(r.get('min', ''))
max_value = str(r.get('max', ''))
return min_value, max_value
def get_trigger():
return (
question['events'][0]['trigger']
if 'events' in question and question['events']
else ''
)
branching = self.convert_rexl_expression(get_trigger())
if 'rows' in question and 'questions' in question:
self.process_matrix(question)
else:
field_id = question['fieldId']
field = self.fields[field_id]
type_object = get_full_type_definition(
self._instrument,
field['type'])
base = type_object['base']
field_type, valid_type = self.get_type_tuple(base, question)
min_value, max_value = get_range(type_object)
self._rows.append(
[
field_id,
self.form_name,
self.section_header,
field_type,
self.get_local_text(self.localization, question['text']),
get_choices(),
self.get_local_text(self.localization,
question.get('help', {})),
valid_type,
min_value,
max_value,
'y' if field.get('identifiable', False) else '',
branching,
'y' if field.get('required', False) else '',
'',
'',
'',
'',
'',
]
)
self.section_header = '' | /rios.conversion-0.6.2.tar.gz/rios.conversion-0.6.2/src/rios/conversion/redcap/from_rios.py | 0.528777 | 0.207014 | from_rios.py | pypi |
from __future__ import division
import datetime
__all__ = (
'datediff',
'mean',
'median',
'round_',
'rounddown',
'roundup',
'stdev',
'sum_',
)
def datediff(date1, date2, units, date_fmt="ymd"):
def _datetime(date):
return datetime.datetime(**dict(zip(
[{'y': 'year', 'm': 'month', 'd': 'day'}[x]
for x in date_fmt],
map(int, date.split('-')) )))
def _timedelta(timedelta):
days = timedelta.days
if units == 'y':
return days / 365
elif units == 'M':
return days / 30
elif units == 'd':
return days
else:
seconds = days * 24 * 3600 + timedelta.seconds
if units == 'h':
return seconds / 3600
elif units == 'm':
return seconds / 60
elif units == 's':
return seconds
else:
raise ValueError(units)
if "today" in [date1, date2]:
today = datetime.datetime.today()
minuend = today if date1 == "today" else _datetime(date1)
subtrahend = today if date2 == "today" else _datetime(date2)
difference = minuend - subtrahend
return _timedelta(difference)
def mean(*data):
return sum(data) / float(len(data)) if data else 0.0
def median(*data):
if data:
sorted_data = sorted(data)
n = len(sorted_data)
if n % 2 == 1:
return float(sorted_data[n // 2])
else:
m = n // 2
return (sorted_data[m - 1] + sorted_data[m]) / 2.0
else:
return None
def round_(number, decimal_places):
x = 10.0 ** decimal_places
return round(x * number) / x
def rounddown(number, decimal_places):
rounded = round_(number, decimal_places)
if rounded <= number:
return rounded
else:
x = 0.5 * 10 ** -decimal_places
return round_(number - x, decimal_places)
def roundup(number, decimal_places):
rounded = round_(number, decimal_places)
if rounded >= number:
return rounded
else:
x = 0.5 * 10 ** -decimal_places
return round_(number + x, decimal_places)
def stdev(*data):
"""Calculates the population standard deviation."""
n = len(data)
if n < 2:
return 0.0
else:
m = mean(*data)
ss = sum((x - m) ** 2 for x in data)
pvar = ss / n # the population variance
return pvar ** 0.5
def sum_(*data):
return sum(data, 0) | /rios.conversion-0.6.2.tar.gz/rios.conversion-0.6.2/src/rios/conversion/redcap/functions.py | 0.757884 | 0.305076 | functions.py | pypi |
import collections
import json
import yaml
from six import text_type, add_metaclass
__all__ = (
'get_json',
'get_yaml',
'OrderedDict',
'SortedDict',
'TypedSortedDict',
'DefinedOrderDict',
'TypedDefinedOrderDict',
'InstrumentReference',
'Descriptor',
)
def get_json(data, pretty=False, **kwargs):
"""
A convenience wrapper around ``json.dumps`` that respects the ordering of
keys in classes like ``OrderedDict``, ``SortedDict``, and
``DefinedOrderDict``.
:param data: the object to encode in JSON
:param pretty:
whether or not the output should be indented in human-friendly ways
:type pretty: boolean
:returns: a JSON-encoded string
"""
kwargs['ensure_ascii'] = False
kwargs['sort_keys'] = False
if pretty:
kwargs['indent'] = 2
kwargs['separators'] = (',', ': ')
return json.dumps(data, **kwargs)
def get_yaml(data, pretty=False, **kwargs):
"""
A convenience wrapper around ``yaml.dump`` that respects the ordering of
keys in classes like ``OrderedDict``, ``SortedDict``, and
``DefinedOrderDict``.
:param data: the object to encode in YAML
:param pretty:
whether or not the output should be indented in human-friendly ways
:type pretty: boolean
:returns: a YAML-encoded string
"""
kwargs['Dumper'] = OrderedDumper
kwargs['allow_unicode'] = True
kwargs['default_flow_style'] = False if pretty else None
return yaml.dump(data, **kwargs).rstrip() # noqa: DUO109
class OrderedDumper(yaml.Dumper): # noqa: too-many-ancestors
pass
def unicode_representer(dumper, ustr):
return dumper.represent_scalar(
'tag:yaml.org,2002:str',
ustr,
)
OrderedDumper.add_representer(text_type, unicode_representer)
def dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
list(data.items())
)
class OrderedDumperMetaclass(type):
def __init__(cls, name, bases, dct): # noqa
super(OrderedDumperMetaclass, cls).__init__(name, bases, dct)
OrderedDumper.add_representer(cls, dict_representer)
@add_metaclass(OrderedDumperMetaclass)
class OrderedDict(collections.OrderedDict):
"""
A functional equivalent to ``collections.OrderedDict``.
"""
@add_metaclass(OrderedDumperMetaclass)
class SortedDict(dict):
"""
A dictionary class that sorts its keys alphabetically.
"""
def get_keys(self):
return sorted(super(SortedDict, self).keys())
def keys(self):
return self.get_keys()
def __iter__(self):
return iter(self.get_keys())
def items(self):
return [
(key, self[key])
for key in self.get_keys()
]
def iteritems(self):
for key in self.get_keys():
yield (key, self[key])
class TypedSortedDict(SortedDict):
"""
A variety of the ``SortedDict`` class that automatically casts the value of
all keys to the type specified on the ``subtype`` property.
"""
#: The type to cast all values in the dictionary to.
subtype = None
def __init__(self, obj=None):
super(TypedSortedDict, self).__init__(obj or {})
for key in self:
self[key] = self[key]
def __setitem__(self, key, value):
if self.subtype:
# pylint: disable=not-callable
value = self.subtype(value)
super(TypedSortedDict, self).__setitem__(key, value)
class DefinedOrderDict(SortedDict):
"""
A dictionary class that orders its keys according to its ``order``
property (any unnamed keys are then sorted alphabetically).
"""
#: A list of key names in the order you want them to be output.
order = []
def get_keys(self):
existing_keys = super(DefinedOrderDict, self).get_keys()
keys = []
for key in self.order:
if key in existing_keys:
keys.append(key)
for key in existing_keys:
if key not in self.order:
keys.append(key)
return keys
class TypedDefinedOrderDict(DefinedOrderDict):
"""
A variety of the ``DefinedOrderDict`` class that provides for the automatic
casting of values in the dictionary based on their key. This conversion is
driven by the ``key_types`` property. E.g.::
key_types = {
'foo': SortedOrderDict,
'bar': [SortedOrderDict],
}
"""
#: The mapping of key names to types. To indicate that the key should
#: contain a list of casted values, place the type in a list with one
# element.
key_types = {}
def __init__(self, obj=None):
super(TypedDefinedOrderDict, self).__init__(obj or {})
for key in self.key_types:
if key in self:
# Force an invokation of our __setitem__
self[key] = self[key]
def __setitem__(self, key, value):
if key in self.key_types:
type_ = self.key_types[key]
if isinstance(type_, list):
value = [
type_[0](val)
for val in value
]
else:
value = type_(value)
super(TypedDefinedOrderDict, self).__setitem__(key, value)
class InstrumentReference(TypedDefinedOrderDict):
order = [
'id',
'version',
]
key_types = {
'version': str,
}
class Descriptor(TypedDefinedOrderDict):
order = [
'id',
'text',
'help',
'audio',
]
key_types = {
'id': str,
'text': SortedDict,
'help': SortedDict,
'audio': SortedDict,
} | /rios.core-0.9.0.tar.gz/rios.core-0.9.0/src/rios/core/output/common.py | 0.725357 | 0.345243 | common.py | pypi |
import json
import six
from .common import get_json, get_yaml
from .assessment import Assessment
from .calculationset import CalculationSet
from .form import Form
from .instrument import Instrument
from .interaction import Interaction
__all__ = (
'get_instrument_json',
'get_instrument_yaml',
'get_assessment_json',
'get_assessment_yaml',
'get_form_json',
'get_form_yaml',
'get_calculationset_json',
'get_calculationset_yaml',
'get_interaction_json',
'get_interaction_yaml',
)
def _get_struct(src):
if isinstance(src, six.string_types):
src = json.loads(src)
elif hasattr(src, 'read'):
src = json.load(src)
return src
def get_instrument_json(instrument, pretty=True, **kwargs):
"""
Generates a JSON-formatted string containing the specified Instrument
Definition.
:param instrument: The Instrument Definition generate the JSON for
:type instrument: JSON string, dict, or file-like object
:param pretty:
Whether or not to format the JSON in a human-friendly way. If not
specified, defaults to ``True``.
:type pretty: bool
:param kwargs:
Any extra keyword arguments are passed to the underlying ``json.dumps``
function.
:returns: The JSON-formatted string representing the Instrument
:rtype: string
"""
instrument = _get_struct(instrument)
kwargs['pretty'] = pretty
return get_json(Instrument(instrument), **kwargs)
def get_instrument_yaml(instrument, pretty=True, **kwargs):
"""
Generates a YAML-formatted string containing the specified Instrument
Definition.
:param instrument: The Instrument Definition generate the YAML for
:type instrument: JSON string, dict, or file-like object
:param pretty:
Whether or not to format the YAML in a human-friendly way. If not
specified, defaults to ``True``.
:type pretty: bool
:param kwargs:
Any extra keyword arguments are passed to the underlying ``yaml.dump``
function.
:returns: The YAML-formatted string representing the Instrument
:rtype: string
"""
instrument = _get_struct(instrument)
kwargs['pretty'] = pretty
return get_yaml(Instrument(instrument), **kwargs)
def get_assessment_json(assessment, pretty=True, **kwargs):
"""
Generates a JSON-formatted string containing the specified Assessment
Document.
:param instrument: The Assessment Document generate the JSON for
:type instrument: JSON string, dict, or file-like object
:param pretty:
Whether or not to format the JSON in a human-friendly way. If not
specified, defaults to ``True``.
:type pretty: bool
:param kwargs:
Any extra keyword arguments are passed to the underlying ``json.dumps``
function.
:returns: The JSON-formatted string representing the Assessment
:rtype: string
"""
assessment = _get_struct(assessment)
kwargs['pretty'] = pretty
return get_json(Assessment(assessment), **kwargs)
def get_assessment_yaml(assessment, pretty=True, **kwargs):
"""
Generates a YAML-formatted string containing the specified Assessment
Document.
:param instrument: The Assessment Document generate the YAML for
:type instrument: JSON string, dict, or file-like object
:param pretty:
Whether or not to format the YAML in a human-friendly way. If not
specified, defaults to ``True``.
:type pretty: bool
:param kwargs:
Any extra keyword arguments are passed to the underlying ``yaml.dump``
function.
:returns: The YAML-formatted string representing the Assessment
:rtype: string
"""
assessment = _get_struct(assessment)
kwargs['pretty'] = pretty
return get_yaml(Assessment(assessment), **kwargs)
def get_form_json(form, pretty=True, **kwargs):
"""
Generates a JSON-formatted string containing the specified Web Form
Configuration.
:param instrument: The Web Form Configuration generate the JSON for
:type instrument: JSON string, dict, or file-like object
:param pretty:
Whether or not to format the JSON in a human-friendly way. If not
specified, defaults to ``True``.
:type pretty: bool
:param kwargs:
Any extra keyword arguments are passed to the underlying ``json.dumps``
function.
:returns: The JSON-formatted string representing the Form
:rtype: string
"""
form = _get_struct(form)
kwargs['pretty'] = pretty
return get_json(Form(form), **kwargs)
def get_form_yaml(form, pretty=True, **kwargs):
"""
Generates a YAML-formatted string containing the specified Web Form
Configuration.
:param instrument: The Web Form Configuration generate the YAML for
:type instrument: JSON string, dict, or file-like object
:param pretty:
Whether or not to format the YAML in a human-friendly way. If not
specified, defaults to ``True``.
:type pretty: bool
:param kwargs:
Any extra keyword arguments are passed to the underlying ``yaml.dump``
function.
:returns: The YAML-formatted string representing the Form
:rtype: string
"""
form = _get_struct(form)
kwargs['pretty'] = pretty
return get_yaml(Form(form), **kwargs)
def get_calculationset_json(calculationset, pretty=True, **kwargs):
"""
Generates a JSON-formatted string containing the specified Calculation Set
Definition.
:param instrument: The Calculation Set Definition generate the JSON for
:type instrument: JSON string, dict, or file-like object
:param pretty:
Whether or not to format the JSON in a human-friendly way. If not
specified, defaults to ``True``.
:type pretty: bool
:param kwargs:
Any extra keyword arguments are passed to the underlying ``json.dumps``
function.
:returns: The JSON-formatted string representing the Calculation Set
:rtype: string
"""
calculationset = _get_struct(calculationset)
kwargs['pretty'] = pretty
return get_json(CalculationSet(calculationset), **kwargs)
def get_calculationset_yaml(calculationset, pretty=True, **kwargs):
"""
Generates a YAML-formatted string containing the specified Calculation Set
Definition.
:param instrument: The Calculation Set Definition generate the YAML for
:type instrument: JSON string, dict, or file-like object
:param pretty:
Whether or not to format the YAML in a human-friendly way. If not
specified, defaults to ``True``.
:type pretty: bool
:param kwargs:
Any extra keyword arguments are passed to the underlying ``yaml.dump``
function.
:returns: The YAML-formatted string representing the Calculation Set
:rtype: string
"""
calculationset = _get_struct(calculationset)
kwargs['pretty'] = pretty
return get_yaml(CalculationSet(calculationset), **kwargs)
def get_interaction_json(interaction, pretty=True, **kwargs):
"""
Generates a JSON-formatted string containing the specified SMS Interaction
Configuration.
:param instrument: The SMS Interaction Configuration generate the JSON for
:type instrument: JSON string, dict, or file-like object
:param pretty:
Whether or not to format the JSON in a human-friendly way. If not
specified, defaults to ``True``.
:type pretty: bool
:param kwargs:
Any extra keyword arguments are passed to the underlying ``json.dumps``
function.
:returns: The JSON-formatted string representing the Form
:rtype: string
"""
interaction = _get_struct(interaction)
kwargs['pretty'] = pretty
return get_json(Interaction(interaction), **kwargs)
def get_interaction_yaml(interaction, pretty=True, **kwargs):
"""
Generates a YAML-formatted string containing the specified SMS Interaction
Configuration.
:param instrument: The SMS Interaction Configuration generate the YAML for
:type instrument: JSON string, dict, or file-like object
:param pretty:
Whether or not to format the YAML in a human-friendly way. If not
specified, defaults to ``True``.
:type pretty: bool
:param kwargs:
Any extra keyword arguments are passed to the underlying ``yaml.dump``
function.
:returns: The YAML-formatted string representing the Form
:rtype: string
"""
interaction = _get_struct(interaction)
kwargs['pretty'] = pretty
return get_yaml(Interaction(interaction), **kwargs) | /rios.core-0.9.0.tar.gz/rios.core-0.9.0/src/rios/core/output/__init__.py | 0.81648 | 0.381969 | __init__.py | pypi |
import string
import colander
from six import iteritems
from .common import ValidationError, sub_schema, LanguageTag, \
LocalizedMapping, IdentifierString, Options, LocalizedString, \
Descriptor as BaseDescriptor, LocalizationChecker, \
validate_instrument_version, CompoundIdentifierString, StrictBooleanType, \
guard_sequence, MetadataCollection, RE_PRODUCT_TOKENS
from .instrument import InstrumentReference, get_full_type_definition
__all__ = (
'ELEMENT_TYPES_ALL',
'EVENT_ACTIONS_ALL',
'PARAMETER_TYPES_ALL',
'WIDGET_SIZES_ALL',
'WIDGET_ORIENTATIONS_ALL',
'METADATA_PROPS',
'STANDARD_WIDGET_DATATYPES',
'Descriptor',
'DescriptorList',
'UrlList',
'AudioSource',
'TagList',
'ElementType',
'TextElementOptions',
'AudioElementOptions',
'Options',
'Widget',
'WidgetSize',
'WidgetOrientation',
'TextWidgetOptions',
'TextAreaWidgetOptions',
'RecordListWidgetOptions',
'Hotkey',
'HotkeyCollection',
'EnumerationWidgetOptions',
'Expression',
'EventAction',
'EventTargetList',
'FailEventOptions',
'EnumerationList',
'HideEnumerationEventOptions',
'EventList',
'Event',
'QuestionList',
'QuestionElementOptions',
'Element',
'ElementList',
'Page',
'PageList',
'ParameterType',
'ParameterCollection',
'Form',
)
ELEMENT_TYPES_ALL = (
'question',
'header',
'text',
'divider',
'audio',
)
EVENT_ACTIONS_ALL = (
'hide',
'disable',
'hideEnumeration',
'fail',
)
PARAMETER_TYPES_ALL = (
'text',
'numeric',
'boolean',
)
WIDGET_SIZES_ALL = (
'small',
'medium',
'large',
)
WIDGET_ORIENTATIONS_ALL = (
'vertical',
'horizontal',
)
STANDARD_WIDGET_DATATYPES = {
'inputText': [
'text',
],
'inputNumber': [
'integer',
'float',
],
'textArea': [
'text',
],
'radioGroup': [
'enumeration',
'boolean',
],
'checkGroup': [
'enumerationSet',
],
'dropDown': [
'enumeration',
'boolean',
],
'datePicker': [
'date',
],
'timePicker': [
'time',
],
'dateTimePicker': [
'dateTime',
],
'recordList': [
'recordList',
],
'matrix': [
'matrix',
],
}
METADATA_PROPS = {
'author': colander.SchemaNode(
colander.String(),
),
'copyright': colander.SchemaNode(
colander.String(),
),
'homepage': colander.SchemaNode(
colander.String(),
validator=colander.url,
),
'generator': colander.SchemaNode(
colander.String(),
validator=colander.Regex(RE_PRODUCT_TOKENS),
),
}
# pylint: disable=abstract-method
class UrlList(colander.SequenceSchema):
url = colander.SchemaNode(colander.String())
validator = colander.Length(min=1)
class AudioSource(LocalizedMapping):
def __init__(self, *args, **kwargs):
super(AudioSource, self).__init__(
UrlList(),
*args,
**kwargs
)
class TagList(colander.SequenceSchema):
tag = IdentifierString()
validator = colander.Length(min=1)
class ElementType(colander.SchemaNode):
schema_type = colander.String
validator = colander.OneOf(ELEMENT_TYPES_ALL)
class TextElementOptions(colander.SchemaNode):
text = LocalizedString()
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(TextElementOptions, self).__init__(*args, **kwargs)
class AudioElementOptions(colander.SchemaNode):
source = AudioSource()
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(AudioElementOptions, self).__init__(*args, **kwargs)
class WidgetSize(colander.SchemaNode):
schema_type = colander.String
validator = colander.OneOf(WIDGET_SIZES_ALL)
class WidgetOrientation(colander.SchemaNode):
schema_type = colander.String
validator = colander.OneOf(WIDGET_ORIENTATIONS_ALL)
class TextWidgetOptions(colander.SchemaNode):
width = WidgetSize(missing=colander.drop)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='ignore')
super(TextWidgetOptions, self).__init__(*args, **kwargs)
class TextAreaWidgetOptions(TextWidgetOptions):
height = WidgetSize(missing=colander.drop)
class RecordListWidgetOptions(colander.SchemaNode):
addLabel = LocalizedString(missing=colander.drop) # noqa: N815
removeLabel = LocalizedString(missing=colander.drop) # noqa: N815
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='ignore')
super(RecordListWidgetOptions, self).__init__(*args, **kwargs)
class Hotkey(colander.SchemaNode):
schema_type = colander.String
validator = colander.OneOf(string.digits)
class HotkeyCollection(colander.SchemaNode):
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='preserve')
super(HotkeyCollection, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
cstruct = cstruct or {}
if not cstruct:
raise ValidationError(
node,
'At least one Hotkey must be defined',
)
for _, hotkey in iteritems(cstruct):
sub_schema(Hotkey, node, hotkey)
class EnumerationWidgetOptions(colander.SchemaNode):
hotkeys = HotkeyCollection(missing=colander.drop)
autoHotkeys = colander.SchemaNode( # noqa: N815
StrictBooleanType(),
missing=colander.drop,
)
orientation = WidgetOrientation(missing=colander.drop)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='ignore')
super(EnumerationWidgetOptions, self).__init__(*args, **kwargs)
WIDGET_TYPE_OPTION_VALIDATORS = {
'inputText': TextWidgetOptions(),
'inputNumber': TextWidgetOptions(),
'textArea': TextAreaWidgetOptions(),
'recordList': RecordListWidgetOptions(),
'radioGroup': EnumerationWidgetOptions(),
'checkGroup': EnumerationWidgetOptions(),
}
class Widget(colander.SchemaNode):
type = colander.SchemaNode(colander.String())
options = Options(missing=colander.drop)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Widget, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
widget_type = cstruct.get('type', None)
validator = WIDGET_TYPE_OPTION_VALIDATORS.get(widget_type, None)
options = cstruct.get('options', None)
if validator and options:
sub_schema(
validator,
node.get('options'),
options,
)
class Expression(colander.SchemaNode):
schema_type = colander.String
class EventAction(colander.SchemaNode):
schema_type = colander.String
validator = colander.OneOf(EVENT_ACTIONS_ALL)
class EventTargetList(colander.SequenceSchema):
target = CompoundIdentifierString()
validator = colander.Length(min=1)
class FailEventOptions(colander.SchemaNode):
text = LocalizedString()
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(FailEventOptions, self).__init__(*args, **kwargs)
class EnumerationList(colander.SequenceSchema):
enumeration = colander.SchemaNode(colander.String())
validator = colander.Length(min=1)
class HideEnumerationEventOptions(colander.SchemaNode):
enumerations = EnumerationList()
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(HideEnumerationEventOptions, self).__init__(*args, **kwargs)
EVENT_ACTION_OPTION_VALIDATORS = {
'fail': FailEventOptions(),
'hideEnumeration': HideEnumerationEventOptions(),
}
class Event(colander.SchemaNode):
trigger = Expression()
action = EventAction()
targets = EventTargetList(missing=colander.drop)
options = Options(missing=colander.drop)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Event, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
action = cstruct.get('action', None)
validator = EVENT_ACTION_OPTION_VALIDATORS.get(action, None)
options = cstruct.get('options', None)
if validator:
sub_schema(
validator,
node.get('options'),
options,
)
elif options is not None:
raise ValidationError(
node.get('options'),
'"%s" events do not accept options' % action,
)
class EventList(colander.SequenceSchema):
event = Event()
validator = colander.Length(min=1)
class QuestionList(colander.SchemaNode):
validator = colander.Length(min=1)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Sequence()
super(QuestionList, self).__init__(*args, **kwargs)
self.add(QuestionElementOptions(
allow_complex=False,
name='question',
))
class Descriptor(BaseDescriptor):
audio = AudioSource(missing=colander.drop)
class DescriptorList(colander.SequenceSchema):
descriptor = Descriptor()
validator = colander.Length(min=1)
class QuestionElementOptions(colander.SchemaNode):
fieldId = IdentifierString() # noqa: N815
text = LocalizedString()
audio = AudioSource(missing=colander.drop)
help = LocalizedString(missing=colander.drop)
error = LocalizedString(missing=colander.drop)
enumerations = DescriptorList(missing=colander.drop)
widget = Widget(missing=colander.drop)
events = EventList(missing=colander.drop)
def __init__(self, *args, **kwargs):
self.allow_complex = kwargs.pop('allow_complex', True)
kwargs['typ'] = colander.Mapping(unknown='raise')
super(QuestionElementOptions, self).__init__(*args, **kwargs)
if self.allow_complex:
self.add(QuestionList(
name='questions',
missing=colander.drop,
))
self.add(DescriptorList(
name='rows',
missing=colander.drop,
))
ELEMENT_TYPE_OPTION_VALIDATORS = {
'question': QuestionElementOptions(),
'text': TextElementOptions(),
'header': TextElementOptions(),
'audio': AudioElementOptions(),
}
class Element(colander.SchemaNode):
type = ElementType()
options = Options(missing=colander.drop)
tags = TagList(missing=colander.drop)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Element, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
element_type = cstruct.get('type', None)
validator = ELEMENT_TYPE_OPTION_VALIDATORS.get(element_type, None)
options = cstruct.get('options', None)
if validator:
sub_schema(
validator,
node.get('options'),
options,
)
elif options is not None:
raise ValidationError(
node.get('options'),
'"%s" elements do not accept options' % element_type,
)
tags = cstruct.get('tags', [])
if tags:
duplicates = list(set([x for x in tags if tags.count(x) > 1]))
if duplicates:
raise ValidationError(
node.get('tags'),
'Tags can only be assigned to an element once:'
' %s' % (
', '.join(duplicates)
),
)
class ElementList(colander.SequenceSchema):
element = Element()
validator = colander.Length(min=1)
class Page(colander.SchemaNode):
id = IdentifierString()
elements = ElementList()
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Page, self).__init__(*args, **kwargs)
class PageList(colander.SequenceSchema):
page = Page()
def validator(self, node, cstruct):
if len(cstruct) < 1:
raise ValidationError(
node,
'Shorter than minimum length 1',
)
ids = [page['id'] for page in cstruct]
duplicates = list(set([x for x in ids if ids.count(x) > 1]))
if duplicates:
raise ValidationError(
node,
'Page IDs must be unique: %s' % ', '.join(duplicates),
)
class ParameterType(colander.SchemaNode):
schema_type = colander.String
validator = colander.OneOf(PARAMETER_TYPES_ALL)
class ParameterOptions(colander.SchemaNode):
type = ParameterType()
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(ParameterOptions, self).__init__(*args, **kwargs)
class ParameterCollection(colander.SchemaNode):
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='preserve')
super(ParameterCollection, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
cstruct = cstruct or {}
if not cstruct:
raise ValidationError(
node,
'At least one key/value pair must be defined',
)
for name, options in iteritems(cstruct):
sub_schema(IdentifierString, node, name)
sub_schema(ParameterOptions, node, options)
class Form(colander.SchemaNode):
instrument = InstrumentReference()
defaultLocalization = LanguageTag() # noqa: N815
title = LocalizedString(missing=colander.drop)
pages = PageList()
parameters = ParameterCollection(missing=colander.drop)
meta = MetadataCollection(
METADATA_PROPS,
missing=colander.drop,
)
def __init__(self, *args, **kwargs):
self.instrument = kwargs.pop('instrument', None)
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Form, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
self._check_localizations(node, cstruct)
if not self.instrument:
self._standalone_checks(node, cstruct)
else:
self._instrument_checks(node, cstruct)
def _check_tags(self, node, element, invalid_tags):
if element.get('tags', []):
tags = set(element['tags'])
duped = invalid_tags & tags
if duped:
raise ValidationError(
node.get('tags'),
'Tag(s) are duplicates of existing'
' identifiers: %s' % (
', '.join(sorted(duped)),
)
)
def _standalone_checks(self, node, cstruct):
invalid_tags = set([page['id'] for page in cstruct['pages']])
for pidx, page in enumerate(cstruct['pages']):
with guard_sequence(node, 'page', pidx) as enode:
for eidx, element in enumerate(page['elements']):
with guard_sequence(enode, 'element', eidx) as onode:
self._check_tags(onode, element, invalid_tags)
def _instrument_checks(self, node, cstruct):
validate_instrument_version(
self.instrument,
cstruct,
node.get('instrument'),
)
self._check_fields_covered(node, cstruct)
invalid_tags = [page['id'] for page in cstruct['pages']]
invalid_tags.extend([
field['id']
for field in self.instrument['record']
])
invalid_tags = set(invalid_tags)
for pidx, page in enumerate(cstruct['pages']):
with guard_sequence(node, 'page', pidx) as enode:
for eidx, element in enumerate(page['elements']):
with guard_sequence(enode, 'element', eidx) as onode:
self._check_tags(onode, element, invalid_tags)
if element['type'] != 'question':
continue
self._check_type_specifics(
onode.get('options'),
element.get('options', {}),
)
def _check_localizations(self, node, cstruct):
checker = LocalizationChecker(node, cstruct['defaultLocalization'])
checker.ensure(cstruct, 'title', node=node.get('title'))
def _ensure_element(element, subnode):
if 'options' not in element:
return
options = element['options']
checker = LocalizationChecker(
subnode.get('options'),
cstruct['defaultLocalization'],
)
checker.ensure(options, 'text', scope='Element Text')
checker.ensure(options, 'help', scope='Element Help')
checker.ensure(options, 'error', scope='Element Error')
checker.ensure(options, 'audio', scope='Element Audio')
checker.ensure(options, 'source', scope='Audio Source')
for question in options.get('questions', []):
_ensure_element(question, subnode)
for enumeration in options.get('enumerations', []):
checker.ensure_descriptor(enumeration, scope='Enumeration')
for row in options.get('rows', []):
checker.ensure_descriptor(row, scope='Matrix Row')
for event in options.get('events', []):
checker.ensure(
event.get('options', {}),
'text',
scope='Event Text',
node=subnode.get('options').get('events'),
)
for pidx, page in enumerate(cstruct['pages']):
with guard_sequence(node, 'page', pidx) as enode:
for eidx, element in enumerate(page['elements']):
with guard_sequence(enode, 'element', eidx) as onode:
_ensure_element(element, onode)
def _check_fields_covered(self, node, cstruct):
instrument_fields = set([
field['id']
for field in self.instrument['record']
])
form_fields = set()
for pidx, page in enumerate(cstruct['pages']):
with guard_sequence(node, 'page', pidx) as enode:
for eidx, element in enumerate(page['elements']):
if element['type'] != 'question':
continue
with guard_sequence(enode, 'element', eidx):
field_id = element['options']['fieldId']
if field_id in form_fields:
raise ValidationError(
node,
'Field "%s" is addressed by more than one'
' question' % (
field_id,
)
)
form_fields.add(field_id)
missing = instrument_fields - form_fields
if missing:
raise ValidationError(
node,
'There are Instrument fields which are missing: %s' % (
', '.join(missing),
)
)
extra = form_fields - instrument_fields
if extra:
raise ValidationError(
node,
'There are extra fields referenced by questions: %s' % (
', '.join(extra),
)
)
def _get_instrument_field_type(self, name, record=None):
record = record or self.instrument['record']
for field in record:
if field['id'] == name:
return get_full_type_definition(
self.instrument,
field['type'],
)
return None
def _check_type_specifics(self, node, options, record=None):
type_def = self._get_instrument_field_type(
options['fieldId'],
record=record,
)
if 'enumerations' in options:
if type_def['base'] in ('enumeration', 'enumerationSet'):
described_choices = [
desc['id']
for desc in options['enumerations']
]
actual_choices = list(type_def['enumerations'].keys())
for described_choice in described_choices:
if described_choice not in actual_choices:
raise ValidationError(
node,
'Field "%s" describes an invalid'
' enumeration "%s"' % (
options['fieldId'],
described_choice,
),
)
else:
raise ValidationError(
node,
'Field "%s" cannot have an enumerations'
' configuration' % (
options['fieldId'],
),
)
self._check_matrix(node, type_def, options)
self._check_subquestions(node, type_def, options)
self._check_widget_assignment(node, type_def, options)
def _check_widget_assignment(self, node, type_def, options):
widget = options.get('widget', {}).get('type')
if not widget:
return
if widget in STANDARD_WIDGET_DATATYPES \
and type_def['base'] not in STANDARD_WIDGET_DATATYPES[widget]:
raise ValidationError(
node,
'Standard widget "%s" cannot be used with fields of type'
' "%s"' % (
widget,
type_def['base'],
),
)
def _check_matrix(self, node, type_def, options):
if type_def['base'] == 'matrix':
instrument_rows = set([
row['id']
for row in type_def['rows']
])
form_rows = set()
for row in options.get('rows', []):
if row['id'] in form_rows:
raise ValidationError(
node,
'Row %s is addressed by more than one descriptor in'
' %s' % (
row['id'],
options['fieldId'],
),
)
form_rows.add(row['id'])
missing = instrument_rows - form_rows
if missing:
raise ValidationError(
node,
'There are missing rows in %s: %s' % (
options['fieldId'],
', '.join(missing),
)
)
extra = form_rows - instrument_rows
if extra:
raise ValidationError(
node,
'There are extra rows referenced by %s: %s' % (
options['fieldId'],
', '.join(extra),
)
)
elif 'rows' in options:
raise ValidationError(
node,
'Field "%s" cannot have a rows configuration' % (
options['fieldId'],
),
)
def _check_subquestions(self, node, type_def, options):
if type_def['base'] in ('matrix', 'recordList'):
record = type_def[
'columns' if type_def['base'] == 'matrix' else 'record'
]
instrument_fields = set([field['id'] for field in record])
form_fields = set()
for subfield in options.get('questions', []):
if subfield['fieldId'] in form_fields:
raise ValidationError(
node,
'Subfield %s is addressed by more than one question in'
' %s' % (
subfield['fieldId'],
options['fieldId'],
),
)
form_fields.add(subfield['fieldId'])
missing = instrument_fields - form_fields
if missing:
raise ValidationError(
node,
'There are missing subfields in %s: %s' % (
options['fieldId'],
', '.join(missing),
)
)
extra = form_fields - instrument_fields
if extra:
raise ValidationError(
node,
'There are extra subfields referenced by %s: %s' % (
options['fieldId'],
', '.join(extra),
)
)
for question in options['questions']:
self._check_type_specifics(node, question, record=record)
elif 'questions' in options:
raise ValidationError(
node,
'Field "%s" cannot have a questions configuration' % (
options['fieldId'],
),
) | /rios.core-0.9.0.tar.gz/rios.core-0.9.0/src/rios/core/validation/form.py | 0.597843 | 0.157752 | form.py | pypi |
import colander
from six import PY3
from .common import ValidationError, sub_schema, Options, \
validate_instrument_version, StrictBooleanType, MetadataCollection, \
RE_PRODUCT_TOKENS
from .instrument import InstrumentReference, IdentifierString, Description
CAN_CHECK_HTSQL = False
if not PY3:
try:
from htsql import HTSQL
from htsql.core.error import Error as HtsqlError
from htsql.core.syn.parse import parse as parse_htsql
except ImportError: # pragma: no cover
pass
else:
CAN_CHECK_HTSQL = True
__all__ = (
'RESULT_TYPES',
'METHODS_ALL',
'METADATA_PROPS',
'CalculationResultType',
'CalculationMethod',
'Expression',
'PythonOptions',
'HtsqlOptions',
'Calculation',
'CalculationList',
'CalculationSet',
)
RESULT_TYPES = (
'text',
'integer',
'float',
'boolean',
'date',
'time',
'dateTime',
)
METHODS_ALL = (
'python',
'htsql',
)
METADATA_PROPS = {
'author': colander.SchemaNode(
colander.String(),
),
'copyright': colander.SchemaNode(
colander.String(),
),
'homepage': colander.SchemaNode(
colander.String(),
validator=colander.url,
),
'generator': colander.SchemaNode(
colander.String(),
validator=colander.Regex(RE_PRODUCT_TOKENS),
),
}
_HTSQL = None
def get_htsql():
global _HTSQL # pylint: disable=global-statement
if not _HTSQL and CAN_CHECK_HTSQL:
_HTSQL = HTSQL({'engine': 'sqlite', 'database': ':memory:'})
return _HTSQL
# pylint: disable=abstract-method
class CalculationResultType(colander.SchemaNode):
schema_type = colander.String
validator = colander.OneOf(RESULT_TYPES)
class CalculationMethod(colander.SchemaNode):
schema_type = colander.String
validator = colander.OneOf(METHODS_ALL)
class Expression(colander.SchemaNode):
schema_type = colander.String
class PythonOptions(colander.SchemaNode):
expression = Expression(missing=colander.drop)
callable = Expression(missing=colander.drop)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(PythonOptions, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
if ('expression' in cstruct) == ('callable' in cstruct):
raise ValidationError(
node,
'Exactly one option of "expression" or "callable" must be'
' specified',
)
expr = cstruct.get('expression', None)
if expr and not PY3:
try:
compile(expr, '<string>', 'eval') # noqa: DUO110
except SyntaxError as exc:
raise ValidationError(
node.get('expression'),
'The Python expression "%s" is invalid: %s' % (
expr,
exc,
),
)
class HtsqlOptions(colander.SchemaNode):
expression = Expression()
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(HtsqlOptions, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
expr = cstruct.get('expression', None)
if expr:
htsql = get_htsql()
if htsql:
try:
with htsql:
parse_htsql(expr)
except HtsqlError as exc:
raise ValidationError(
node.get('expression'),
'The HTSQL expression "%s" is invalid: %s' % (
expr,
exc,
),
)
METHOD_OPTION_VALIDATORS = {
'python': PythonOptions(),
'htsql': HtsqlOptions(),
}
class Calculation(colander.SchemaNode):
id = IdentifierString()
description = Description()
identifiable = colander.SchemaNode(
StrictBooleanType(),
missing=colander.drop,
)
type = CalculationResultType()
method = CalculationMethod()
options = Options(missing=colander.drop)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Calculation, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
method = cstruct.get('method', None)
validator = METHOD_OPTION_VALIDATORS.get(method, None)
options = cstruct.get('options', None)
if validator:
sub_schema(
validator,
node.get('options'),
options,
)
elif options is not None:
raise ValidationError(
node.get('options'),
'The "%s" method does not accept options' % method,
)
class CalculationList(colander.SequenceSchema):
calculation = Calculation()
def validator(self, node, cstruct):
if len(cstruct) < 1:
raise ValidationError(
node,
'Shorter than minimum length 1',
)
ids = [calculation['id'] for calculation in cstruct]
duplicates = list(set([x for x in ids if ids.count(x) > 1]))
if duplicates:
raise ValidationError(
node,
'Calculation IDs must be unique: ' + ', '.join(duplicates),
)
class CalculationSet(colander.SchemaNode):
instrument = InstrumentReference()
calculations = CalculationList()
meta = MetadataCollection(
METADATA_PROPS,
missing=colander.drop,
)
def __init__(self, *args, **kwargs):
self.instrument = kwargs.pop('instrument', None)
kwargs['typ'] = colander.Mapping(unknown='raise')
super(CalculationSet, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
if not self.instrument:
return
validate_instrument_version(
self.instrument,
cstruct,
node.get('instrument'),
)
calculation_ids = set([
calc['id']
for calc in cstruct['calculations']
])
instrument_ids = set([
field['id']
for field in self.instrument['record']
])
duped = calculation_ids & instrument_ids
if duped:
raise ValidationError(
node.get('calculations'),
'Calculation IDs cannot be the same as Instrument Field IDs:'
' %s' % (
', '.join(duped),
),
) | /rios.core-0.9.0.tar.gz/rios.core-0.9.0/src/rios/core/validation/calculationset.py | 0.509276 | 0.233368 | calculationset.py | pypi |
import re
from copy import deepcopy
from datetime import datetime
import colander
from six import iteritems, string_types, integer_types
from .common import ValidationError, sub_schema, AnyType, LanguageTag, \
validate_instrument_version, MetadataCollection, RE_PRODUCT_TOKENS
from .instrument import InstrumentReference, IdentifierString, \
get_full_type_definition
__all__ = (
'METADATA_PROPS_ASSESSMENT',
'METADATA_PROPS_VALUE',
'ValueCollection',
'Assessment',
)
RE_DATE = re.compile(r'^\d{4}-\d{2}-\d{2}$')
RE_TIME = re.compile(r'^\d{2}:\d{2}:\d{2}$')
RE_DATETIME = re.compile(r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$')
METADATA_PROPS_ASSESSMENT = {
'language': LanguageTag(),
'application': colander.SchemaNode(
colander.String(),
validator=colander.Regex(RE_PRODUCT_TOKENS),
),
'dateCompleted': colander.SchemaNode(
colander.DateTime(),
),
'timeTaken': colander.SchemaNode(
colander.Integer(),
),
}
METADATA_PROPS_VALUE = {
'timeTaken': colander.SchemaNode(
colander.Integer(),
),
}
# pylint: disable=abstract-method
class Value(colander.SchemaNode):
value = colander.SchemaNode(
AnyType(),
)
explanation = colander.SchemaNode(
colander.String(),
missing=colander.drop,
)
annotation = colander.SchemaNode(
colander.String(),
missing=colander.drop,
)
meta = MetadataCollection(
METADATA_PROPS_VALUE,
missing=colander.drop,
)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Value, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
if isinstance(cstruct['value'], list):
for subtype in (
colander.SchemaNode(colander.String()),
ValueCollection):
for value in cstruct['value']:
try:
sub_schema(subtype, node, value)
except ValidationError:
break
else:
return
raise ValidationError(
node,
'Lists must be consist only of Strings or ValueCollections',
)
if isinstance(cstruct['value'], dict):
sub_schema(ValueCollectionMapping, node, cstruct['value'])
class ValueCollection(colander.SchemaNode):
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='preserve')
super(ValueCollection, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
cstruct = cstruct or {}
if not cstruct:
raise ValidationError(
node,
'At least one Value must be defined',
)
for field_id, value in iteritems(cstruct):
sub_schema(IdentifierString, node, field_id)
sub_schema(Value, node, value)
class ValueCollectionMapping(colander.SchemaNode):
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='preserve')
super(ValueCollectionMapping, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
cstruct = cstruct or {}
if not cstruct:
raise ValidationError(
node,
'At least one Row must be defined',
)
for field_id, values in iteritems(cstruct):
sub_schema(IdentifierString, node, field_id)
sub_schema(ValueCollection, node, values)
VALUE_TYPE_CHECKS = {
'integer': lambda val: isinstance(val, integer_types),
'float': lambda val: isinstance(val, (float,) + integer_types),
'text': lambda val: isinstance(val, string_types),
'enumeration': lambda val: isinstance(val, string_types),
'boolean': lambda val: isinstance(val, bool),
'date': lambda val: isinstance(val, string_types) and RE_DATE.match(val),
'time': lambda val: isinstance(val, string_types) and RE_TIME.match(val),
'dateTime':
lambda val: isinstance(val, string_types) and RE_DATETIME.match(val),
'enumerationSet': lambda val: isinstance(val, list),
'recordList': lambda val: isinstance(val, list),
'matrix': lambda val: isinstance(val, dict),
}
class Assessment(colander.SchemaNode):
instrument = InstrumentReference()
meta = MetadataCollection(
METADATA_PROPS_ASSESSMENT,
missing=colander.drop,
)
values = ValueCollection()
def __init__(self, *args, **kwargs):
self.instrument = kwargs.pop('instrument', None)
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Assessment, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
if not self.instrument:
return
validate_instrument_version(
self.instrument,
cstruct,
node.get('instrument'),
)
self.check_has_all_fields(
node.get('values'),
cstruct['values'],
self.instrument['record'],
)
def check_has_all_fields(self, node, values, fields):
values = deepcopy(values)
if not isinstance(values, dict):
raise ValidationError(
node,
'Value expected to contain a mapping: %s' % values
)
for field in fields:
value = values.pop(field['id'], None)
fid = field['id']
if value is None:
raise ValidationError(
node,
'No value exists for field ID "%s"' % fid,
)
if value['value'] is None and field.get('required', False):
raise ValidationError(
node,
'No value present for required field ID "%s"' % fid,
)
full_type_def = get_full_type_definition(
self.instrument,
field['type'],
)
self._check_value_type(node, value['value'], field, full_type_def)
self._check_value_constraints(
node,
value['value'],
field,
full_type_def,
)
self._check_metafields(node, value, field)
self._check_complex_subfields(node, full_type_def, value)
if values:
raise ValidationError(
node,
'Unknown field IDs found: %s' % ', '.join(list(values.keys())),
)
def _check_value_type(self, node, value, field, type_def):
if value is None:
return
wrong_type_error = ValidationError(
node,
'Value for "%s" is not of the correct type' % (
field['id'],
),
)
bad_choice_error = ValidationError(
node,
'Value for "%s" is not an accepted enumeration' % (
field['id'],
),
)
# Basic checks
if not VALUE_TYPE_CHECKS[type_def['base']](value):
raise wrong_type_error
# Deeper checks
if type_def['base'] == 'enumerationSet':
choices = list(type_def['enumerations'].keys())
for subval in value:
if not isinstance(subval, string_types):
raise wrong_type_error
if subval not in choices:
raise bad_choice_error
elif type_def['base'] == 'enumeration':
choices = list(type_def['enumerations'].keys())
if value not in choices:
raise bad_choice_error
def _check_value_constraints(self, node, value, field, type_def):
if value is None:
return
if type_def.get('pattern'):
regex = re.compile(type_def['pattern'])
if not regex.match(value):
raise ValidationError(
node,
'Value for "%s" does not match the specified pattern' % (
field['id'],
),
)
if type_def.get('length'):
if type_def['length'].get('min') is not None \
and len(value) < type_def['length']['min']:
raise ValidationError(
node,
'Value for "%s" is less than acceptible minimum'
' length' % (
field['id'],
),
)
if type_def['length'].get('max') is not None \
and len(value) > type_def['length']['max']:
raise ValidationError(
node,
'Value for "%s" is greater than acceptible maximum'
' length' % (
field['id'],
),
)
if type_def.get('range'):
casted_value = self._cast_range(value, type_def['base'])
casted_min = self._cast_range(
type_def['range']['min'],
type_def['base'],
)
if type_def['range'].get('min') is not None \
and casted_value < casted_min:
raise ValidationError(
node,
'Value for "%s" is less than acceptible minimum' % (
field['id'],
),
)
casted_max = self._cast_range(
type_def['range']['max'],
type_def['base'],
)
if type_def['range'].get('max') is not None \
and casted_value > casted_max:
raise ValidationError(
node,
'Value for "%s" is greater than acceptible maximum' % (
field['id'],
),
)
def _cast_range(self, value, type_base):
if type_base in ('integer', 'float'):
return value
if type_base == 'date':
return datetime.strptime(value, '%Y-%m-%d').date()
if type_base == 'dateTime':
return datetime.strptime(value, '%Y-%m-%dT%H:%M:%S')
if type_base == 'time':
return datetime.strptime(value, '%H:%M:%S').time()
return None
def _check_metafields(self, node, value, field):
explanation = field.get('explanation', 'none')
fid = field['id']
if 'explanation' in value \
and value['explanation'] is not None \
and explanation == 'none':
raise ValidationError(
node,
'Explanation present where not allowed in field ID "%s"' % fid,
)
if 'explanation' not in value and explanation == 'required':
raise ValidationError(
node,
'Explanation missing for field ID "%s"' % fid,
)
annotation = field.get('annotation', 'none')
if 'annotation' in value and value['annotation'] is not None:
if annotation == 'none':
raise ValidationError(
node,
'Annotation present where not allowed: %s' % fid,
)
if value['value'] is not None:
raise ValidationError(
node,
'Annotation provided for non-empty value: %s' % fid,
)
elif 'annotation' not in value \
and annotation == 'required' \
and value['value'] is None:
raise ValidationError(
node,
'Annotation missing for field ID "%s"' % fid,
)
def _check_complex_subfields(self, node, full_type_def, value):
if value['value'] is None:
return
if 'record' in full_type_def:
for rec in value['value']:
self.check_has_all_fields(
node,
rec,
full_type_def['record'],
)
elif 'rows' in full_type_def:
self._check_matrix_subfields(node, full_type_def, value)
def _check_matrix_subfields(self, node, full_type_def, value):
for row in full_type_def['rows']:
row_value = value['value'].pop(row['id'], None)
if row_value is None:
raise ValidationError(
node,
'Missing values for row ID "%s"' % row['id'],
)
# Make sure all the columns exist.
columns = set([
column['id']
for column in full_type_def['columns']
])
existing_columns = set(row_value.keys())
missing_columns = columns - existing_columns
if missing_columns:
raise ValidationError(
node,
'Row ID "%s" is missing values for columns: %s' % (
row['id'],
', '.join(list(missing_columns)),
),
)
extra_columns = existing_columns - columns
if extra_columns:
raise ValidationError(
node,
'Row ID "%s" contains unknown column IDs: %s' % (
row['id'],
', '.join(list(extra_columns)),
),
)
# Enforce row requirements.
columns_with_values = [
column
for column in existing_columns
if row_value[column]['value'] is not None
]
if row.get('required', False) and not columns_with_values:
raise ValidationError(
node,
'Row ID "%s" requires at least one column with a'
' value' % (
row['id'],
),
)
# Enforce column requirements.
required_columns = [
column['id']
for column in full_type_def['columns']
if column.get('required', False)
]
if required_columns and columns_with_values:
missing = set(required_columns) - set(columns_with_values)
if missing:
raise ValidationError(
node,
'Row ID "%s" is missing values for columns: %s' % (
row['id'],
', '.join(list(missing)),
),
)
for column in full_type_def['columns']:
type_def = get_full_type_definition(
self.instrument,
column['type'],
)
self._check_value_type(
node,
row_value[column['id']]['value'],
column,
type_def,
)
self._check_value_constraints(
node,
row_value[column['id']]['value'],
column,
type_def,
)
if value['value']:
raise ValidationError(
node,
'Unknown row IDs found: %s' % (
', '.join(list(value['value'].keys())),
),
) | /rios.core-0.9.0.tar.gz/rios.core-0.9.0/src/rios/core/validation/assessment.py | 0.553264 | 0.228759 | assessment.py | pypi |
import re
from copy import deepcopy
import colander
from six import iteritems, iterkeys, string_types
from .common import ValidationError, RE_IDENTIFIER, IdentifierString, \
sub_schema, AnyType, OneOfType, StrictBooleanType, OptionalStringType, \
MetadataCollection, RE_PRODUCT_TOKENS
__all__ = (
'TYPES_SIMPLE',
'TYPES_COMPLEX',
'TYPES_ALL',
'CONSTRAINTS_ALL',
'TYPES_CONSTRAINED',
'TYPES_CONSTRAINED_REQUIRED',
'RE_ENUMERATION_ID',
'METADATA_PROPS',
'get_full_type_definition',
'InstrumentIdentifier',
'InstrumentReference',
'Version',
'Description',
'EnumerationIdentifier',
'Enumeration',
'EnumerationCollection',
'BoundConstraint',
'IntegerBoundConstraint',
'Column',
'ColumnCollection',
'Row',
'RowCollection',
'TypeDefinition',
'RequiredOptionalField',
'InstrumentTypes',
'FieldType',
'Field',
'Record',
'Instrument',
)
TYPES_SIMPLE = (
'text',
'integer',
'float',
'boolean',
'enumeration',
'enumerationSet',
'date',
'time',
'dateTime',
)
TYPES_COMPLEX = (
'recordList',
'matrix',
)
TYPES_ALL = TYPES_SIMPLE + TYPES_COMPLEX
CONSTRAINTS_ALL = (
'range',
'length',
'pattern',
'enumerations',
'record',
'columns',
'rows',
)
TYPES_CONSTRAINED = {
'integer': [
'range',
],
'float': [
'range',
],
'date': [
'range',
],
'time': [
'range',
],
'dateTime': [
'range',
],
'text': [
'length',
'pattern',
],
'enumeration': [
'enumerations',
],
'enumerationSet': [
'length',
'enumerations',
],
'recordList': [
'length',
'record',
],
'matrix': [
'rows',
'columns',
],
}
TYPES_CONSTRAINED_REQUIRED = {
'enumeration': [
'enumerations',
],
'enumerationSet': [
'enumerations',
],
'recordList': [
'record',
],
'matrix': [
'rows',
'columns',
],
}
RANGE_CONSTRAINT_TYPES = {
'integer': colander.Integer(),
'float': colander.Float(),
'date': colander.Date(),
'time': colander.Time(),
'dateTime': colander.DateTime(),
}
METADATA_PROPS = {
'author': colander.SchemaNode(
colander.String(),
),
'copyright': colander.SchemaNode(
colander.String(),
),
'homepage': colander.SchemaNode(
colander.String(),
validator=colander.url,
),
'generator': colander.SchemaNode(
colander.String(),
validator=colander.Regex(RE_PRODUCT_TOKENS),
),
}
RE_VERSION = re.compile(r'(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)')
RE_ENUMERATION_ID = re.compile(
r'^(?:[a-z0-9]{1,2}|[a-z0-9](?:[a-z0-9]|[_-](?![_-]))+[a-z0-9])$'
)
# pylint: disable=abstract-method
class Uri(object):
RE_ID = re.compile(
# From https://tools.ietf.org/html/rfc3986#appendix-B
r'^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?'
)
def __call__(self, node, value):
match = self.RE_ID.match(value)
if match is None:
raise colander.Invalid(node, 'Value does not resemble a URI')
if not match.groups()[1]:
raise colander.Invalid(node, 'No scheme specified in URI')
class InstrumentIdentifier(colander.SchemaNode):
schema_type = colander.String
validator = Uri()
class Version(colander.SchemaNode):
schema_type = colander.String
validator = colander.Regex(RE_VERSION)
class InstrumentReference(colander.SchemaNode):
id = InstrumentIdentifier()
version = Version()
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(InstrumentReference, self).__init__(*args, **kwargs)
class Description(colander.SchemaNode):
schema_type = OptionalStringType
validator = colander.Length(min=1)
missing = colander.drop
class Enumeration(colander.SchemaNode):
description = Description()
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Enumeration, self).__init__(*args, **kwargs)
class EnumerationIdentifier(colander.SchemaNode):
schema_type = colander.String
validator = colander.Regex(RE_ENUMERATION_ID)
class EnumerationCollection(colander.SchemaNode):
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='preserve')
super(EnumerationCollection, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
cstruct = cstruct or {}
if not cstruct:
raise ValidationError(
node,
'At least one Enumeration must be defined',
)
for enum_id, enum_def in iteritems(cstruct):
sub_schema(EnumerationIdentifier, node, enum_id)
if enum_def is not None:
sub_schema(Enumeration, node, enum_def)
class BoundConstraint(colander.SchemaNode):
def __init__(self, schema_type=None, **kwargs):
self.schema_type = schema_type
schema_type = schema_type or AnyType()
super(BoundConstraint, self).__init__(
colander.Mapping(unknown='raise'),
colander.SchemaNode(
schema_type,
name='min',
missing=colander.drop,
),
colander.SchemaNode(
schema_type,
name='max',
missing=colander.drop,
),
**kwargs
)
def validator(self, node, cstruct):
if len(cstruct) < 1:
raise ValidationError(
node,
'At least one bound must be specified',
)
if self.schema_type:
min_value = cstruct.get('min', None)
max_value = cstruct.get('max', None)
if min_value is not None \
and max_value is not None \
and min_value > max_value:
raise ValidationError(
node,
'The minimum bound must be lower than'
' the maximum: %s < %s' % (min_value, max_value),
)
class IntegerBoundConstraint(BoundConstraint):
def __init__(self, *args, **kwargs):
super(IntegerBoundConstraint, self).__init__(
*args,
schema_type=colander.Integer(),
**kwargs
)
class FieldType(colander.SchemaNode):
def __init__(self, *args, **kwargs):
kwargs['typ'] = OneOfType(
colander.String,
colander.Mapping(unknown='preserve'),
)
super(FieldType, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
if isinstance(cstruct, string_types):
if cstruct not in TYPES_ALL \
and not RE_IDENTIFIER.match(cstruct):
raise ValidationError(
node,
'"%r" is not a valid type identifier' % (cstruct,),
)
else:
sub_schema(TypeDefinition, node, cstruct)
class Column(colander.SchemaNode):
id = IdentifierString()
description = Description()
required = colander.SchemaNode(
StrictBooleanType(),
missing=colander.drop,
)
identifiable = colander.SchemaNode(
StrictBooleanType(),
missing=colander.drop,
)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Column, self).__init__(*args, **kwargs)
self.add(FieldType(name='type'))
class ColumnCollection(colander.SequenceSchema):
column = Column()
def validator(self, node, cstruct):
if len(cstruct) < 1:
raise ValidationError(
node,
'Shorter than minimum length 1',
)
ids = [col['id'] for col in cstruct]
duplicates = list(set([x for x in ids if ids.count(x) > 1]))
if duplicates:
raise ValidationError(
node,
'Column IDs must be unique within a collection:'
' %s' % ', '.join(duplicates),
)
class Row(colander.SchemaNode):
id = IdentifierString()
description = Description()
required = colander.SchemaNode(
StrictBooleanType(),
missing=colander.drop,
)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Row, self).__init__(*args, **kwargs)
class RowCollection(colander.SequenceSchema):
row = Row()
def validator(self, node, cstruct):
if len(cstruct) < 1:
raise ValidationError(
node,
'Shorter than minimum length 1',
)
ids = [row['id'] for row in cstruct]
duplicates = list(set([x for x in ids if ids.count(x) > 1]))
if duplicates:
raise ValidationError(
node,
'Row IDs must be unique within a collection:'
' %s' % ', '.join(duplicates),
)
class RangeConstraint(BoundConstraint):
def __init__(self, *args, **kwargs):
super(RangeConstraint, self).__init__(
*args,
**kwargs
)
class TypeDefinition(colander.SchemaNode):
base = colander.SchemaNode(colander.String())
range = RangeConstraint(missing=colander.drop)
length = IntegerBoundConstraint(missing=colander.drop)
pattern = colander.SchemaNode(
colander.String(),
missing=colander.drop,
)
enumerations = EnumerationCollection(missing=colander.drop)
columns = ColumnCollection(missing=colander.drop)
rows = RowCollection(missing=colander.drop)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(TypeDefinition, self).__init__(*args, **kwargs)
self.add(Record(name='record', missing=colander.drop))
class RequiredOptionalField(colander.SchemaNode):
schema_type = colander.String
validator = colander.OneOf([
'required',
'optional',
'none',
])
missing = colander.drop
class Field(colander.SchemaNode):
id = IdentifierString()
description = Description()
type = FieldType()
required = colander.SchemaNode(
StrictBooleanType(),
missing=colander.drop,
)
identifiable = colander.SchemaNode(
StrictBooleanType(),
missing=colander.drop,
)
annotation = RequiredOptionalField()
explanation = RequiredOptionalField()
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Field, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
if 'annotation' in cstruct and cstruct['annotation'] != 'none':
if 'required' in cstruct and cstruct['required']:
raise ValidationError(
node,
'A Field cannot have an annotation'
' if it is required: %s' % cstruct['id'],
)
class Record(colander.SequenceSchema):
field = Field()
def validator(self, node, cstruct):
if len(cstruct) < 1:
raise ValidationError(
node,
'Shorter than minimum length 1',
)
ids = [field['id'] for field in cstruct]
duplicates = list(set([x for x in ids if ids.count(x) > 1]))
if duplicates:
raise ValidationError(
node,
'Field IDs must be unique within a record:'
' %s' % ', '.join(duplicates),
)
class InstrumentTypes(colander.SchemaNode):
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='preserve')
super(InstrumentTypes, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
cstruct = cstruct or {}
for type_id, type_def in iteritems(cstruct):
if type_id in TYPES_ALL or not RE_IDENTIFIER.match(type_id):
raise ValidationError(
node,
'"%r" is not a valid custom type ID' % type_id,
)
sub_schema(TypeDefinition, node, type_def)
class Instrument(colander.SchemaNode):
id = InstrumentIdentifier()
version = Version()
title = colander.SchemaNode(colander.String())
description = Description()
types = InstrumentTypes(missing=colander.drop)
record = Record()
meta = MetadataCollection(
METADATA_PROPS,
missing=colander.drop,
)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Instrument, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
for _, type_def in iteritems(cstruct.get('types', {})):
self.check_type(type_def, node.get('types'), cstruct)
for field in cstruct['record']:
self.check_type(field['type'], node.get('record'), cstruct)
def check_type(self, type_def, node, cstruct):
try:
full_type_def = get_full_type_definition(cstruct, type_def)
except Exception as exc:
raise ValidationError(
node,
str(exc),
)
self._check_required_constraints(full_type_def, node, type_def)
self._check_appropriate_constraints(full_type_def, node)
self._check_range_constraints(full_type_def, node)
self._check_complex_subfields(full_type_def, node, cstruct)
return full_type_def
def _check_required_constraints(self, full_type_def, node, cstruct):
if full_type_def['base'] in iterkeys(TYPES_CONSTRAINED_REQUIRED):
for con in TYPES_CONSTRAINED_REQUIRED[full_type_def['base']]:
if con not in full_type_def:
raise ValidationError(
node,
'Type definition "%r" missing required constraint'
' "%s"' % (
cstruct,
con,
),
)
def _check_appropriate_constraints(self, full_type_def, node):
for con in CONSTRAINTS_ALL:
if con in full_type_def:
if con not in TYPES_CONSTRAINED.get(full_type_def['base'], []):
raise ValidationError(
node,
'Constraint "%s" cannot be used on types based on'
' "%s"' % (
con,
full_type_def['base'],
),
)
def _check_range_constraints(self, full_type_def, node):
if 'range' in full_type_def \
and full_type_def['base'] in RANGE_CONSTRAINT_TYPES:
sub_schema(
BoundConstraint(RANGE_CONSTRAINT_TYPES[full_type_def['base']]),
node,
full_type_def['range'],
)
def _check_complex_subfields(self, full_type_def, node, instrument):
for sub_field_constraint in ('record', 'columns'):
if sub_field_constraint in full_type_def:
for field in full_type_def[sub_field_constraint]:
sub_type = self.check_type(field['type'], node, instrument)
if sub_type['base'] in TYPES_COMPLEX:
raise ValidationError(
node,
'Complex types cannot contain other complex'
' types.',
)
def get_full_type_definition(instrument, type_def):
"""
Returns a fully merged version of an Instrument Type Object that
includes all constraints inherited from parent data types.
The ``base`` property of this object will always reflect the base RIOS
data type that the specified type definition is an implementation of.
:param instrument:
the full Instrument definition that the Field in question is a part of
:type instrument: dict
:param type_def:
the contents of the ``type`` property from an Instrument Field
definition
:type type_def: dict or str
:rtype: dict
"""
if isinstance(type_def, string_types):
if type_def in TYPES_ALL:
return {
'base': type_def
}
if type_def in iterkeys(instrument.get('types', {})):
return get_full_type_definition(
instrument,
instrument['types'][type_def],
)
raise ValueError(
'no type is defined for identifier "%s"' % (
type_def,
)
)
if isinstance(type_def, dict):
type_def = deepcopy(type_def)
base_type = type_def.pop('base')
try:
parent_type_def = get_full_type_definition(instrument, base_type)
except ValueError:
raise ValueError(
'invalid definition, references undefined base type "%s"' % (
base_type,
)
)
parent_type_def.update(type_def)
return parent_type_def
raise TypeError(
'type_def must be a string or dict, got "%r"' % (
type_def,
)
) | /rios.core-0.9.0.tar.gz/rios.core-0.9.0/src/rios/core/validation/instrument.py | 0.617974 | 0.217275 | instrument.py | pypi |
import colander
from .common import ValidationError, sub_schema, LanguageTag, \
IdentifierString, Options, LocalizedString, DescriptorList, \
LocalizationChecker, validate_instrument_version, guard, guard_sequence, \
MetadataCollection, RE_PRODUCT_TOKENS
from .instrument import InstrumentReference, TYPES_COMPLEX, \
get_full_type_definition
__all__ = (
'Interaction',
)
STEP_TYPES_ALL = (
'question',
'text',
)
METADATA_PROPS = {
'author': colander.SchemaNode(
colander.String(),
),
'copyright': colander.SchemaNode(
colander.String(),
),
'homepage': colander.SchemaNode(
colander.String(),
validator=colander.url,
),
'generator': colander.SchemaNode(
colander.String(),
validator=colander.Regex(RE_PRODUCT_TOKENS),
),
}
# pylint: disable=abstract-method
class StepType(colander.SchemaNode):
schema_type = colander.String
validator = colander.OneOf(STEP_TYPES_ALL)
class TextStepOptions(colander.SchemaNode):
text = LocalizedString()
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(TextStepOptions, self).__init__(*args, **kwargs)
class QuestionStepOptions(colander.SchemaNode):
fieldId = IdentifierString() # noqa: N815
text = LocalizedString()
error = LocalizedString(missing=colander.drop)
enumerations = DescriptorList(missing=colander.drop)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(QuestionStepOptions, self).__init__(*args, **kwargs)
STEP_TYPE_OPTION_VALIDATORS = {
'question': QuestionStepOptions(),
'text': TextStepOptions(),
}
class Step(colander.SchemaNode):
type = StepType()
options = Options(missing=colander.drop)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Step, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
step_type = cstruct.get('type', None)
validator = STEP_TYPE_OPTION_VALIDATORS.get(step_type, None)
options = cstruct.get('options', None)
if validator:
sub_schema(
validator,
node.get('options'),
options,
)
elif options is not None:
raise ValidationError(
node.get('options'),
'"%s" step do not accept options' % step_type,
)
class StepList(colander.SequenceSchema):
step = Step()
validator = colander.Length(min=1)
class Threshold(colander.SchemaNode):
schema_type = colander.Integer
validator = colander.Range(min=1)
class TimeoutDetails(colander.SchemaNode):
threshold = Threshold()
text = LocalizedString()
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(TimeoutDetails, self).__init__(*args, **kwargs)
class Timeout(colander.SchemaNode):
warn = TimeoutDetails(missing=colander.drop)
abort = TimeoutDetails(missing=colander.drop)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Timeout, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
if not cstruct.get('warn') and not cstruct.get('abort'):
raise ValidationError(
node,
'At least one of "warn" or "abort" must be defined',
)
class Interaction(colander.SchemaNode):
instrument = InstrumentReference()
defaultLocalization = LanguageTag() # noqa: N815
defaultTimeout = Timeout(missing=colander.drop) # noqa: N815
steps = StepList()
meta = MetadataCollection(
METADATA_PROPS,
missing=colander.drop,
)
def __init__(self, *args, **kwargs):
self.instrument = kwargs.pop('instrument', None)
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Interaction, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
self._check_localizations(node, cstruct)
if not self.instrument:
return
validate_instrument_version(
self.instrument,
cstruct,
node.get('instrument'),
)
self._check_fields_covered(node, cstruct)
self._check_type_specifics(node, cstruct)
def _check_localizations(self, node, cstruct):
with guard(node.get('defaultTimeout')) as dtnode:
timeouts = cstruct.get('defaultTimeout', {})
for level in ('warn', 'abort'):
if level in timeouts:
checker = LocalizationChecker(
dtnode.get(level),
cstruct['defaultLocalization'],
)
checker.ensure(
timeouts[level],
'text',
scope='Timeout %s Text' % level,
)
for sidx, step in enumerate(cstruct['steps']):
with guard_sequence(node, 'step', sidx) as snode:
if 'options' not in step: # pragma: no cover
return
checker = LocalizationChecker(
snode.get('options'),
cstruct['defaultLocalization'],
)
options = step['options']
checker.ensure(options, 'text', scope='Step Text')
checker.ensure(options, 'error', scope='Step Error')
for enumeration in options.get('enumerations', []):
checker.ensure_descriptor(enumeration, scope='Enumeration')
def _check_fields_covered(self, node, cstruct):
instrument_fields = set([
field['id']
for field in self.instrument['record']
])
intr_fields = set()
for step in cstruct['steps']:
if step['type'] != 'question':
continue
field_id = step['options']['fieldId']
if field_id in intr_fields:
raise ValidationError(
node.get('steps'),
'Field "%s" is addressed by more than one question' % (
field_id,
)
)
intr_fields.add(field_id)
missing = instrument_fields - intr_fields
if missing:
raise ValidationError(
node.get('steps'),
'There are Instrument fields which are missing: %s' % (
', '.join(missing),
)
)
extra = intr_fields - instrument_fields
if extra:
raise ValidationError(
node.get('steps'),
'There are extra fields referenced by questions: %s' % (
', '.join(extra),
)
)
def _get_instrument_field(self, name):
for field in self.instrument['record']:
if field['id'] == name:
return field
return None
def _check_type_specifics(self, node, cstruct):
for sidx, step in enumerate(cstruct['steps']):
with guard_sequence(node, 'step', sidx) as snode:
if step['type'] != 'question':
continue
type_def = get_full_type_definition(
self.instrument,
self._get_instrument_field(
step['options']['fieldId'],
)['type'],
)
if type_def['base'] in TYPES_COMPLEX:
raise ValidationError(
snode.get('options'),
'Complex Instrument Types are not allowed in'
' Interactions',
)
if 'enumerations' in step['options']:
if type_def['base'] in ('enumeration', 'enumerationSet'):
described_choices = [
desc['id']
for desc in step['options']['enumerations']
]
actual_choices = list(type_def['enumerations'].keys())
for described_choice in described_choices:
if described_choice not in actual_choices:
raise ValidationError(
snode.get('options'),
'Field "%s" describes an invalid'
' enumeration "%s"' % (
step['options']['fieldId'],
described_choice,
),
)
else:
raise ValidationError(
snode.get('options'),
'Field "%s" cannot have an enumerations'
' configuration' % (
step['options']['fieldId'],
),
) | /rios.core-0.9.0.tar.gz/rios.core-0.9.0/src/rios/core/validation/interaction.py | 0.615319 | 0.162579 | interaction.py | pypi |
import json
import six
from .common import ValidationError
from .assessment import Assessment
from .calculationset import CalculationSet
from .form import Form
from .instrument import Instrument, get_full_type_definition
from .interaction import Interaction
__all__ = (
'ValidationError',
'validate_instrument',
'validate_assessment',
'validate_calculationset',
'validate_form',
'validate_interaction',
'get_full_type_definition',
)
JSON_LOAD_KW = {}
if six.PY2:
JSON_LOAD_KW['encoding'] = 'utf-8'
def _get_struct(src):
if isinstance(src, six.string_types):
src = json.loads(src, **JSON_LOAD_KW)
elif hasattr(src, 'read'):
src = json.load(src, **JSON_LOAD_KW)
return src
def validate_instrument(instrument):
"""
Validates the input against the RIOS Instrument Definition
specification.
:param instrument: The Instrument Definition to validate
:type instrument: JSON string, dict, or file-like object
:raises ValidationError: If the input fails any part of the specification
"""
instrument = _get_struct(instrument)
validator = Instrument()
validator.deserialize(instrument)
def validate_assessment(assessment, instrument=None):
"""
Validates the input against the RIOS Assessment Document specification.
:param assessment: The Assessment Document to validate
:type assessment: JSON string, dict, or file-like object
:param instrument:
The Instrument Definition to validate the Assessment against. If not
specified, this defaults to ``None``, which means that only the basic
structure of the Assessment will be validated -- not its conformance to
the Instrument.
:type instrument: JSON string, dict, or file-like object
:raises ValidationError: If the input fails any part of the specification
"""
assessment = _get_struct(assessment)
if instrument:
instrument = _get_struct(instrument)
validate_instrument(instrument)
validator = Assessment(instrument=instrument)
validator.deserialize(assessment)
def validate_calculationset(calculationset, instrument=None):
"""
Validates the input against the RIOS Calculation Set Definition
specification.
:param form: The Calculation Set Definition to validate
:type form: JSON string, dict, or file-like object
:param instrument:
The Instrument Definition to validate the Calculation Set against. If
not specified, this defaults to ``None``, which means that only the
basic structure of the Calculation Set will be validated -- not its
conformance to the Instrument.
:type instrument: JSON string, dict, or file-like object
:raises ValidationError: If the input fails any part of the specification
"""
calculationset = _get_struct(calculationset)
if instrument:
instrument = _get_struct(instrument)
validate_instrument(instrument)
validator = CalculationSet(instrument=instrument)
validator.deserialize(calculationset)
def validate_form(form, instrument=None):
"""
Validates the input against the RIOS Web Form Configuration
specification.
:param form: The Web Form Configuration to validate
:type form: JSON string, dict, or file-like object
:param instrument:
The Instrument Definition to validate the Form against. If not
specified, this defaults to ``None``, which means that only the basic
structure of the Form will be validated -- not its conformance to
the Instrument.
:type instrument: JSON string, dict, or file-like object
:raises ValidationError: If the input fails any part of the specification
"""
form = _get_struct(form)
if instrument:
instrument = _get_struct(instrument)
validate_instrument(instrument)
validator = Form(instrument=instrument)
validator.deserialize(form)
def validate_interaction(interaction, instrument=None):
"""
Validates the input against the RIOS SMS Interaction Configuration
specification.
:param interaction: The SMS Interaction Configuration to validate
:type interaction: JSON string, dict, or file-like object
:param instrument:
The Instrument Definition to validate the Interaction against. If not
specified, this defaults to ``None``, which means that only the basic
structure of the Interaction will be validated -- not its conformance
to the Instrument.
:type instrument: JSON string, dict, or file-like object
:raises ValidationError: If the input fails any part of the specification
"""
interaction = _get_struct(interaction)
if instrument:
instrument = _get_struct(instrument)
validate_instrument(instrument)
validator = Interaction(instrument=instrument)
validator.deserialize(interaction) | /rios.core-0.9.0.tar.gz/rios.core-0.9.0/src/rios/core/validation/__init__.py | 0.802013 | 0.412471 | __init__.py | pypi |
import requests
from ..classes import LeagueList, LeagueEntry
class LeagueAPI:
def __init__(self, api_key):
self.api_key = api_key
def get_challenger_queue(self, queue: str, region: str):
"""
Get information about the Challenger queue in the given region and queue type.
:param str queue: Queue type (RANKED_SOLO_5x5 or RANKED_FLEX_SR)
:param str region: League region
:rtype: LeagueList
"""
raw = requests.get(f'https://{region}.api.riotgames.com/lol/league/v4/challengerleagues/by-queue/{queue}?api_key={self.api_key}').json()
queue = LeagueList(raw)
return queue
def get_grandmaster_queue(self, queue: str, region: str):
"""
Get information about the Grandmaster queue in the given region and queue type.
:param str queue: Queue type (RANKED_SOLO_5x5 or RANKED_FLEX_SR)
:param str region: League region
:rtype: LeagueList
"""
raw = requests.get(f'https://{region}.api.riotgames.com/lol/league/v4/grandmasterleagues/by-queue/{queue}?api_key={self.api_key}').json()
queue = LeagueList(raw)
return queue
def get_master_queue(self, queue: str, region: str):
"""
Get information about the Master queue in the given region and queue type.
:param str queue: Queue type (RANKED_SOLO_5x5 or RANKED_FLEX_SR)
:param str region: League region
:rtype: LeagueList
"""
raw = requests.get(f'https://{region}.api.riotgames.com/lol/league/v4/masterleagues/by-queue/{queue}?api_key={self.api_key}').json()
queue = LeagueList(raw)
return queue
def get_league_entries(self, id: str, region: str):
"""
Get information about the ranked positions from the given summoner ID.
:param str id: Summoner ID
:param str region: League region
:rtype: List[LeagueEntry]
"""
raw = requests.get(f'https://{region}.api.riotgames.com/lol/league/v4/entries/by-summoner/{id}?api_key={self.api_key}').json()
entries = [LeagueEntry(entry) for entry in raw]
return entries
def get_league(self, id: str, region: str):
"""
Get the :class:`~riot_apy.classes.LeagueList` given its ID.
:param str id: League ID
:param str region: League region
:rtype: LeagueList
"""
raw = requests.get(f'https://{region}.api.riotgames.com/lol/league/v4/leagues/{id}?api_key={self.api_key}').json()
list = LeagueList(raw)
return list | /riot_apy-0.1.0.tar.gz/riot_apy-0.1.0/riot_apy/apis/LeagueAPI.py | 0.701304 | 0.19095 | LeagueAPI.py | pypi |
import requests
from ..classes import Champion
class DataDragonAPI:
def __init__(self):
self.latest = self.get_versions()[0]
def get_versions(self):
"""
Get a list of all versions.
:rtype: List[str]
"""
list = requests.get('https://ddragon.leagueoflegends.com/api/versions.json').json()
return list
def get_languages(self):
"""
Get a list of all languages.
:rtype: List[str]
"""
list = requests.get('https://ddragon.leagueoflegends.com/cdn/languages.json').json()
return list
def get_champions_list(self, version: str = None, language: str = 'en_US'):
"""
Get a dictionary containing each champion's ID, key and name.
:param str version: League version
:param str language: League language
The syntax for this dictionary is as follows:
.. code-block:: python
{champion_id (int): {'key': champion_key (str), 'name':champion_name (str)}, ...}
"""
if not version:
version = self.latest
champions_dict_raw = requests.get(f'http://ddragon.leagueoflegends.com/cdn/{version}/data/{language}/champion.json').json()['data']
champions_dict = {int(champ['key']): {"key": champ['id'], "name": champ['name']} for champ in champions_dict_raw.values()}
return champions_dict
def get_champion_from_id(self, id: int, version: str = None, language: str = 'en_US'):
"""
Get the :class:`~riot_apy.classes.Champion` given its ID.
:param int id: Champion ID
:param str version: League version
:param str language: League language
:rtype: Champion
"""
if not version:
version = self.latest
key = self.get_champions_list(version=version, language=language)[id]['key']
raw = requests.get(f'http://ddragon.leagueoflegends.com/cdn/{version}/data/{language}/champion/{key}.json').json()['data'][key]
return Champion(raw) | /riot_apy-0.1.0.tar.gz/riot_apy-0.1.0/riot_apy/apis/DataDragonAPI.py | 0.611034 | 0.2227 | DataDragonAPI.py | pypi |
import requests
from ..classes import Match, Matchlist, MatchTimeline
class MatchAPI:
def __init__(self, api_key):
self.api_key = api_key
def get_match(self, id: int, region: str):
"""
Get the :class:`~riot_apy.classes.Match` given its ID.
:param int id: Match ID
:param str region: League region
:rtype: Match
"""
raw = requests.get(f'https://{region}.api.riotgames.com/lol/match/v4/matches/{id}?api_key={self.api_key}').json()
match = Match(raw)
return match
def get_matchlist(self, accountId: str, region: str, champion: list = None,
queue: list = None, season: list = None,
end_time: int = None, begin_time: int = None,
end_index: int = None, begin_index: int = None):
"""
Get the :class:`~riot_apy.classes.Matchlist` for a certain player.
Additional filters can be set.
:param str accountId: Account ID
:param str region: League region
:param List[int] champion: List of champion IDs
:param List[int] queue: List of queue IDs
:param List[int] season: List of season IDs
:param int end_time: End time in epoch milliseconds
:param int begin_time: Begin time in epoch milliseconds
:param int end_index: End index
:param int begin_index: Begin index
:rtype: Matchlist
"""
raw = requests.get(f'https://{region}.api.riotgames.com/lol/match/v4/matchlists/by-account/{accountId}?api_key={self.api_key}',
params={'champion': champion, 'queue': queue, 'season': season, 'endTime': end_time,
'beginTime': begin_time, 'endIndex': end_index, 'beginIndex': begin_index}).json()
matchlist = Matchlist(raw)
return matchlist
def get_timeline(self, id: int, region: str):
"""
Get the :class:`~riot_apy.classes.MatchTimeline` of a :class:`~riot_apy.classes.Match` given its ID.
:param int id: Match ID
:param str region: League region
:rtype: MatchTimeline
"""
raw = requests.get(f'https://{region}.api.riotgames.com/lol/match/v4/timelines/by-match/{id}?api_key={self.api_key}').json()
timeline = MatchTimeline(raw)
return timeline | /riot_apy-0.1.0.tar.gz/riot_apy-0.1.0/riot_apy/apis/MatchAPI.py | 0.762424 | 0.248591 | MatchAPI.py | pypi |
from typing import Optional, List
from .base import RiotGamesApiBase
from .models.match import MatchDto, MatchlistDto, MatchTimelineDto
class MatchApiV4(RiotGamesApiBase):
def get_match(
self,
match_id: int,
platform: Optional[str] = None
) -> MatchDto:
data = self._request(f"/lol/match/v4/matches/{match_id}", platform)
return MatchDto.parse_obj(data)
def get_matchlists(
self,
account_id: str,
platform: Optional[str] = None,
*,
champion: Optional[List[int]] = None,
queue: Optional[List[int]] = None,
season: Optional[List[int]] = None,
end_time: Optional[int] = None,
begin_time: Optional[int] = None,
end_index: Optional[int] = None,
begin_index: Optional[int] = None
) -> MatchlistDto:
params = {
"champion": champion,
"queue": queue,
"season": season,
"endTime": end_time,
"beginTime": begin_time,
"endIndex": end_index,
"beginIndex": begin_index
}
data = self._request(f"/lol/match/v4/matchlists/by-account/{account_id}", platform, params)
return MatchlistDto.parse_obj(data)
def get_match_timelines(
self,
match_id: int,
platform: Optional[str] = None
) -> MatchTimelineDto:
data = self._request(f"/lol/match/v4/timelines/by-match/{match_id}", platform)
return MatchTimelineDto.parse_obj(data)
def get_match_ids_by_tournament_code(
self,
tournament_code: str,
platform: Optional[str] = None
) -> List[int]:
return self._request(f"/lol/match/v4/matches/by-tournament-code/{tournament_code}/ids", platform)
def get_match_by_tournament_code(
self,
match_id: int,
tournament_code: str,
platform: Optional[str] = None
) -> MatchDto:
data = self._request(f"/lol/match/v4/matches/{match_id}/by-tournament-code/{tournament_code}", platform)
return MatchDto.parse_obj(data) | /riot-games-api-1.0.3.tar.gz/riot-games-api-1.0.3/src/riot_games_api/match.py | 0.852598 | 0.24344 | match.py | pypi |
from typing import Optional, Dict, List
from .base import BaseModel
class PlayerDto(BaseModel):
platform_id: str
account_id: str
summoner_name: str
summoner_id: str
current_platform_id: str
current_account_id: str
match_history_uri: str
profile_icon: int
class ParticipantIdentityDto(BaseModel):
participant_id: int
player: PlayerDto
class ParticipantTimelineDto(BaseModel):
participant_id: int
creeps_per_min_deltas: Dict[str, int]
xp_per_min_deltas: Dict[str, int]
gold_per_min_deltas: Dict[str, int]
cs_diff_per_min_deltas: Optional[Dict[str, int]]
xp_diff_per_min_deltas: Optional[Dict[str, int]]
damage_taken_per_min_deltas: Dict[str, int]
damage_taken_diff_per_min_deltas: Optional[Dict[str, int]]
role: str
lane: str
class ParticipantStatsDto(BaseModel):
participant_id: int
win: bool
item0: int
item1: int
item2: int
item3: int
item4: int
item5: int
item6: int
kills: int
deaths: int
assists: int
largest_killing_spree: int
largest_multi_kill: int
killing_sprees: int
longest_time_spent_living: int
double_kills: int
triple_kills: int
quadra_kills: int
penta_kills: int
unreal_kills: int
total_damage_dealt: int
magic_damage_dealt: int
physical_damage_dealt: int
true_damage_dealt: int
largest_critical_strike: int
total_damage_dealt_to_champions: int
magic_damage_dealt_to_champions: int
physical_damage_dealt_to_champions: int
true_damage_dealt_to_champions: int
total_heal: int
total_units_healed: int
damage_self_mitigated: int
damage_dealt_to_objectives: int
damage_dealt_to_turrets: int
vision_score: int
time_c_cing_others: int
total_damage_taken: int
magical_damage_taken: int
physical_damage_taken: int
true_damage_taken: int
gold_earned: int
gold_spent: int
turret_kills: int
inhibitor_kills: int
total_minions_killed: int
neutral_minions_killed: int
neutral_minions_killed_team_jungle: int
neutral_minions_killed_enemy_jungle: int
total_time_crowd_control_dealt: int
champ_level: int
vision_wards_bought_in_game: int
sight_wards_bought_in_game: int
wards_placed: int
wards_killed: int
first_blood_kill: bool
first_blood_assist: bool
first_tower_kill: bool
first_tower_assist: bool
combat_player_score: int
objective_player_score: int
total_player_score: int
total_score_rank: int
player_score0: int
player_score1: int
player_score2: int
player_score3: int
player_score4: int
player_score5: int
player_score6: int
player_score7: int
player_score8: int
player_score9: int
perk0: int
perk0_var1: int
perk0_var2: int
perk0_var3: int
perk1: int
perk1_var1: int
perk1_var2: int
perk1_var3: int
perk2: int
perk2_var1: int
perk2_var2: int
perk2_var3: int
perk3: int
perk3_var1: int
perk3_var2: int
perk3_var3: int
perk4: int
perk4_var1: int
perk4_var2: int
perk4_var3: int
perk5: int
perk5_var1: int
perk5_var2: int
perk5_var3: int
perk_primary_style: int
perk_sub_style: int
stat_perk0: int
stat_perk1: int
stat_perk2: int
class MasteryDto(BaseModel):
rank: int
mastery_id: int
class ParticipantDto(BaseModel):
participant_id: int
team_id: int
champion_id: int
spell1_id: int
spell2_id: int
stats: ParticipantStatsDto
timeline: ParticipantTimelineDto
highest_achieved_season_tier: Optional[str]
masteries: Optional[List[MasteryDto]]
class TeamBansDto(BaseModel):
champion_id: int
pick_turn: int
class TeamStatsDto(BaseModel):
team_id: int
win: str
first_blood: bool
first_tower: bool
first_inhibitor: bool
first_baron: bool
first_dragon: bool
first_rift_herald: bool
tower_kills: int
inhibitor_kills: int
baron_kills: int
dragon_kills: int
vilemaw_kills: int
rift_herald_kills: int
dominion_victory_score: int
bans: List[TeamBansDto]
class MatchDto(BaseModel):
game_id: int
platform_id: str
game_creation: int
game_duration: int
queue_id: int
map_id: int
season_id: int
game_version: str
game_mode: str
game_type: str
teams: List[TeamStatsDto]
participants: List[ParticipantDto]
participant_identities: List[ParticipantIdentityDto]
class MatchPositionDto(BaseModel):
x: int
y: int
class MatchEventDto(BaseModel):
lane_type: Optional[str]
skill_slot: Optional[int]
ascended_type: Optional[str]
creator_id: Optional[int]
after_id: Optional[int]
event_type: Optional[str]
type: str
level_up_type: Optional[str]
ward_type: Optional[str]
participant_id: Optional[int]
tower_type: Optional[str]
item_id: Optional[int]
before_id: Optional[int]
point_captured: Optional[str]
monster_type: Optional[str]
monster_sub_type: Optional[str]
team_id: Optional[int]
position: Optional[MatchPositionDto]
killer_id: Optional[int]
timestamp: int
assisting_participant_ids: Optional[List[int]]
building_type: Optional[str]
victim_id: Optional[int]
class MatchParticipantFrameDto(BaseModel):
participant_id: int
minions_killed: int
team_score: int
dominion_score: int
total_gold: int
level: int
xp: int
current_gold: int
position: MatchPositionDto
jungle_minions_killed: int
class MatchFrameDto(BaseModel):
participant_frames: Dict[str, MatchParticipantFrameDto]
events: List[MatchEventDto]
timestamp: int
class MatchTimelineDto(BaseModel):
frames: List[MatchFrameDto]
frame_interval: int
class MatchReferenceDto(BaseModel):
platform_id: str
game_id: int
champion: int
queue: int
season: int
timestamp: int
role: str
lane: str
class MatchlistDto(BaseModel):
matches: List[MatchReferenceDto]
start_index: int
end_index: int
total_games: int | /riot-games-api-1.0.3.tar.gz/riot-games-api-1.0.3/src/riot_games_api/models/match.py | 0.792022 | 0.291775 | match.py | pypi |
from datetime import datetime, timezone
from lol_dto.classes import game as game_dto
from lol_dto.classes.game import LolGame
from lol_dto.classes.sources.riot_lol_api import RiotGameSource, RiotPlayerSource
from riot_transmute.common.constants import clean_roles
from riot_transmute.common.iso_date_from_ms import get_iso_date_from_ms_timestamp
def match_to_game(match_dto: dict) -> LolGame:
"""
Returns a LolGame from a MatchDto
Currently works for both MatchV3 and MatchV4 from season 9 and later
Args:
match_dto: A MatchDto from Riot’s API
Returns:
The LolGame representation of the game
"""
log_prefix = (
f"gameId {match_dto['gameId']}|" f"platformId {match_dto['platformId']}:\t"
)
info_log = set()
iso_date = get_iso_date_from_ms_timestamp(match_dto["gameCreation"])
patch = ".".join(match_dto["gameVersion"].split(".")[:2])
winner = (
"BLUE"
if (match_dto["teams"][0]["teamId"] == 100)
== (match_dto["teams"][0]["win"] == "Win")
else "RED"
)
game = game_dto.LolGame(
duration=match_dto["gameDuration"],
start=iso_date,
patch=patch,
gameVersion=match_dto["gameVersion"],
winner=winner,
)
setattr(
game.sources,
"riotLolApi",
RiotGameSource(gameId=match_dto["gameId"], platformId=match_dto["platformId"]),
)
for team in match_dto["teams"]:
side = "BLUE" if team["teamId"] == 100 else "RED"
team_dto = game_dto.LolGameTeam(
endOfGameStats=game_dto.LolGameTeamEndOfGameStats(
riftHeraldKills=team.get("riftHeraldKills"),
dragonKills=team.get("dragonKills"),
baronKills=team.get("baronKills"),
turretKills=team.get("towerKills"),
inhibitorKills=team.get("inhibitorKills"),
firstTurret=team.get("firstTower"),
firstInhibitor=team.get("firstInhibitor"),
firstRiftHerald=team.get("firstRiftHerald"),
firstDragon=team.get("firstDragon"),
firstBaron=team.get("firstBaron"),
)
)
team_dto.bans = [b["championId"] for b in team["bans"]]
for participant in match_dto["participants"]:
if participant["teamId"] != team["teamId"]:
continue
try:
participant_identity = next(
identity["player"]
for identity in match_dto["participantIdentities"]
if identity["participantId"] == participant["participantId"]
)
# Custom games also don’t have identity info
except KeyError:
participant_identity = None
runes = [
game_dto.LolGamePlayerRune(
id=participant["stats"].get(f"perk{i}"),
slot=i,
stats=[
participant["stats"].get(f"perk{i}Var{j}") for j in range(1, 4)
],
)
for i in range(0, 6)
]
# Adding stats perks
runes.extend(
[
game_dto.LolGamePlayerRune(
id=participant["stats"].get(f"statPerk{i}"),
slot=i + 6,
)
for i in range(0, 3)
]
)
items = [
game_dto.LolGamePlayerItem(
id=participant["stats"].get(f"item{i}"), slot=i
)
for i in range(0, 7)
]
summoner_spells = [
game_dto.LolGamePlayerSummonerSpell(
id=participant.get(f"spell{i}Id"), slot=i - 1
)
for i in range(1, 3)
]
end_of_game_stats = game_dto.LolGamePlayerEndOfGameStats(
items=items,
firstBlood=participant["stats"].get("firstBloodKill"),
firstBloodAssist=participant["stats"].get(
"firstBloodAssist"
), # This field is wrong by default
kills=participant["stats"].get("kills"),
deaths=participant["stats"].get("deaths"),
assists=participant["stats"].get("assists"),
gold=participant["stats"].get("goldEarned"),
cs=int(participant["stats"].get("totalMinionsKilled") or 0)
+ int(participant["stats"].get("neutralMinionsKilled") or 0),
level=participant["stats"].get("champLevel"),
wardsPlaced=participant["stats"].get("wardsPlaced"),
wardsKilled=participant["stats"].get("wardsKilled"),
visionWardsBought=participant["stats"].get("visionWardsBoughtInGame"),
visionScore=participant["stats"].get("visionScore"),
killingSprees=participant["stats"].get("killingSprees"),
largestKillingSpree=participant["stats"].get("largestKillingSpree"),
doubleKills=participant["stats"].get("doubleKills"),
tripleKills=participant["stats"].get("tripleKills"),
quadraKills=participant["stats"].get("quadraKills"),
pentaKills=participant["stats"].get("pentaKills"),
monsterKills=participant["stats"].get("neutralMinionsKilled"),
monsterKillsInAlliedJungle=participant["stats"].get(
"neutralMinionsKilledTeamJungle"
),
monsterKillsInEnemyJungle=participant["stats"].get(
"neutralMinionsKilledEnemyJungle"
),
totalDamageDealt=participant["stats"].get("totalDamageDealt"),
physicalDamageDealt=participant["stats"].get("physicalDamageDealt"),
magicDamageDealt=participant["stats"].get("magicDamageDealt"),
totalDamageDealtToChampions=participant["stats"].get(
"totalDamageDealtToChampions"
),
physicalDamageDealtToChampions=participant["stats"].get(
"physicalDamageDealtToChampions"
),
magicDamageDealtToChampions=participant["stats"].get(
"magicDamageDealtToChampions"
),
damageDealtToObjectives=participant["stats"].get(
"damageDealtToObjectives"
),
damageDealtToTurrets=participant["stats"].get("damageDealtToTurrets"),
totalDamageTaken=participant["stats"].get("totalDamageTaken"),
physicalDamageTaken=participant["stats"].get("physicalDamageTaken"),
magicDamageTaken=participant["stats"].get("magicalDamageTaken"),
longestTimeSpentLiving=participant["stats"].get(
"longestTimeSpentLiving"
),
totalDamageShieldedOnTeammates=participant["stats"].get(
"totalDamageShieldedOnTeammates"
),
largestCriticalStrike=participant["stats"].get("largestCriticalStrike"),
goldSpent=participant["stats"].get("goldSpent"),
totalHeal=participant["stats"].get("totalHeal"),
totalUnitsHealed=participant["stats"].get("totalUnitsHealed"),
damageSelfMitigated=participant["stats"].get("damageSelfMitigated"),
totalTimeCCDealt=participant["stats"].get("totalTimeCrowdControlDealt"),
timeCCingOthers=participant["stats"].get("timeCCingOthers"),
)
# The following fields have proved to be missing or buggy in multiple games
if "firstTowerKill" in participant["stats"]:
end_of_game_stats.firstTurret = participant["stats"]["firstTowerKill"]
end_of_game_stats.firstTurretAssist = participant["stats"].get(
"firstTowerAssist"
)
else:
info_log.add(f"{log_prefix}Missing ['player']['firstTower']")
if "firstInhibitorKill" in participant["stats"]:
end_of_game_stats.firstInhibitor = participant["stats"][
"firstInhibitorKill"
]
end_of_game_stats.firstInhibitorAssist = participant["stats"].get(
"firstInhibitorAssist"
)
else:
info_log.add(f"{log_prefix}Missing ['player']['firstInhibitor']")
player = game_dto.LolGamePlayer(
id=participant["participantId"],
championId=participant["championId"],
primaryRuneTreeId=participant["stats"].get("perkPrimaryStyle"),
secondaryRuneTreeId=participant["stats"].get("perkSubStyle"),
runes=runes,
summonerSpells=summoner_spells,
endOfGameStats=end_of_game_stats,
)
# Esports matches do not have an accountId field, so we need to test here
if participant_identity and "accountId" in participant_identity:
setattr(
player.sources,
"riotLolApi",
RiotPlayerSource(
accountId=participant_identity["accountId"],
platformId=participant_identity["platformId"],
),
)
if participant_identity:
player.inGameName = participant_identity["summonerName"]
player.profileIconId = participant_identity["profileIcon"]
# roleml compatibility
if "role" in participant:
if participant["role"] not in {"TOP", "JGL", "MID", "BOT", "SUP"}:
participant["role"] = clean_roles[participant["role"]]
player.role = participant["role"]
team_dto.players.append(player)
# We want to make extra sure players are always ordered by Riot’s given id
team_dto.players = sorted(team_dto.players, key=lambda x: x.id)
setattr(game.teams, side, team_dto)
return game | /riot_transmute-2.1.2-py3-none-any.whl/riot_transmute/v4/match_to_game.py | 0.563498 | 0.286518 | match_to_game.py | pypi |
import lol_dto.classes.game as dto
from lol_dto.classes.sources.riot_lol_api import RiotGameSource, RiotPlayerSource
from riot_transmute.common.iso_date_from_ms import get_iso_date_from_ms_timestamp
role_trigrams = {
"TOP": "TOP",
"JUNGLE": "JGL",
"MIDDLE": "MID",
"BOTTOM": "BOT",
"UTILITY": "SUP",
}
from dataclasses import dataclass
@dataclass
class RiotGameRankedSource(RiotGameSource):
tournamentCode: str = None
def match_to_game(match_dto: dict) -> dto.LolGame:
"""
Returns a LolGame from a MatchDto from match-v5 endpoints
Args:
match_dto: A MatchDto from Riot's API,
Returns:
The LolGame representation of the game
"""
# Creating some data fields in a friendlier format
# ms timestamp -> ISO format
iso_creation_date = get_iso_date_from_ms_timestamp(match_dto["gameCreation"])
# v5 has game start as well
iso_start_date = get_iso_date_from_ms_timestamp(match_dto["gameStartTimestamp"])
# only 2 values for the patch key (gameVersion is also saved)
patch = ".".join(match_dto["gameVersion"].split(".")[:2])
# Saving winner as BLUE or RED
if not any(match_dto["teams"][i]["win"] in ["Win", True] for i in range(0, 2)):
raise ValueError
winner = (
"BLUE"
if (match_dto["teams"][0]["teamId"] == 100)
== (
(
# I saw both between esports games and live games
match_dto["teams"][0]["win"] == "Win"
or match_dto["teams"][0]["win"] == True
)
)
else "RED"
)
# Riot made changes to duration on 11.20
# Prior to patch 11.20, this field returns the game length in milliseconds calculated from gameEndTimestamp - gameStartTimestamp.
# Post patch 11.20, this field returns the max timePlayed of any participant in the game in seconds, which makes the behavior of this field consistent with that of match-v4.
# The best way to handling the change in this field is to treat the value as milliseconds if the gameEndTimestamp field isn't in the response and
# to treat the value as seconds if gameEndTimestamp is in the response.
if not match_dto.get("gameEndTimestamp"):
duration = int(match_dto["gameDuration"] / 1000)
else:
duration = int(match_dto["gameDuration"])
# Creating our object's structure
game = dto.LolGame(
duration=duration,
creation=iso_creation_date,
start=iso_start_date,
patch=patch,
gameVersion=match_dto["gameVersion"],
winner=winner,
lobbyName=match_dto["gameName"],
type=match_dto["gameType"],
queue_id=match_dto["queueId"],
)
setattr(
game.sources,
"riotLolApi",
RiotGameRankedSource(
gameId=match_dto["gameId"],
platformId=match_dto["platformId"],
tournamentCode=match_dto.get("tournamentCode"),
),
)
for dto_team in match_dto["teams"]:
if dto_team["teamId"] == 100:
game_team = game.teams.BLUE
elif dto_team["teamId"] == 200:
game_team = game.teams.RED
else:
raise ValueError(f"{dto_team['teamId']=} value not supported")
game_team.bans = [b["championId"] for b in dto_team["bans"]]
game_team.endOfGameStats = dto.LolGameTeamEndOfGameStats(
firstTurret=dto_team["objectives"]["tower"]["first"],
turretKills=dto_team["objectives"]["tower"]["kills"],
firstRiftHerald=dto_team["objectives"]["riftHerald"]["first"],
riftHeraldKills=dto_team["objectives"]["riftHerald"]["kills"],
firstDragon=dto_team["objectives"]["dragon"]["first"],
dragonKills=dto_team["objectives"]["dragon"]["kills"],
firstBaron=dto_team["objectives"]["baron"]["first"],
baronKills=dto_team["objectives"]["baron"]["kills"],
firstInhibitor=dto_team["objectives"]["inhibitor"]["first"],
inhibitorKills=dto_team["objectives"]["inhibitor"]["kills"],
)
for dto_player in match_dto["participants"]:
if dto_player["teamId"] == 100:
game_team = game.teams.BLUE
elif dto_player["teamId"] == 200:
game_team = game.teams.RED
else:
raise ValueError(f"{dto_player['teamId']=} value not supported")
game_player = dto.LolGamePlayer(
id=dto_player["participantId"],
inGameName=dto_player["summonerName"],
role=role_trigrams.get(dto_player["individualPosition"]),
championId=dto_player["championId"],
primaryRuneTreeId=dto_player["perks"]["styles"][0]["style"],
secondaryRuneTreeId=dto_player["perks"]["styles"][1]["style"],
)
setattr(
game_player.sources,
"riotLolApi",
RiotPlayerSource(
# We have to use get to also be compatible with esports games
puuid=dto_player.get("puuid"),
summonerId=dto_player.get("summonerId"),
),
)
# We extend the runes with the primary and secondary trees
game_player.runes.extend(
dto.LolGamePlayerRune(
slot=len(game_player.runes),
id=r["perk"],
stats=[r["var1"], r["var2"], r["var3"]],
)
for style in dto_player["perks"]["styles"]
for r in style["selections"]
)
# We then add stats perks
game_player.runes.extend(
[
dto.LolGamePlayerRune(
slot=len(game_player.runes),
id=dto_player["perks"]["statPerks"]["offense"],
),
dto.LolGamePlayerRune(
slot=len(game_player.runes) + 1,
id=dto_player["perks"]["statPerks"]["flex"],
),
dto.LolGamePlayerRune(
slot=len(game_player.runes) + 2,
id=dto_player["perks"]["statPerks"]["defense"],
),
]
)
game_player.summonerSpells.extend(
dto.LolGamePlayerSummonerSpell(
# Bayes' GAMH data uses spell1Id instead of summoner1Id
id=dto_player.get(f"summoner{spell_id}Id")
or dto_player.get(f"spell{spell_id}Id"),
slot=spell_id - 1,
casts=dto_player[f"summoner{spell_id}Casts"],
)
for spell_id in (1, 2)
)
game_team.earlySurrendered = dto_player["teamEarlySurrendered"]
items = [
dto.LolGamePlayerItem(id=dto_player.get(f"item{i}"), slot=i)
for i in range(0, 7)
]
end_of_game_stats = dto.LolGamePlayerEndOfGameStats(
items=items,
firstBlood=dto_player["firstBloodKill"],
firstBloodAssist=dto_player["firstBloodAssist"],
kills=dto_player["kills"],
deaths=dto_player["deaths"],
assists=dto_player["assists"],
gold=dto_player["goldEarned"],
cs=int(dto_player["totalMinionsKilled"] or 0)
+ int(dto_player["neutralMinionsKilled"] or 0),
level=dto_player["champLevel"],
wardsPlaced=dto_player["wardsPlaced"],
wardsKilled=dto_player["wardsKilled"],
visionWardsBought=dto_player["visionWardsBoughtInGame"],
visionScore=dto_player["visionScore"],
killingSprees=dto_player["killingSprees"],
largestKillingSpree=dto_player["largestKillingSpree"],
doubleKills=dto_player["doubleKills"],
tripleKills=dto_player["tripleKills"],
quadraKills=dto_player["quadraKills"],
pentaKills=dto_player["pentaKills"],
monsterKills=dto_player["neutralMinionsKilled"],
totalDamageDealt=dto_player["totalDamageDealt"],
physicalDamageDealt=dto_player["physicalDamageDealt"],
magicDamageDealt=dto_player["magicDamageDealt"],
totalDamageDealtToChampions=dto_player["totalDamageDealtToChampions"],
physicalDamageDealtToChampions=dto_player["physicalDamageDealtToChampions"],
magicDamageDealtToChampions=dto_player["magicDamageDealtToChampions"],
damageDealtToObjectives=dto_player["damageDealtToObjectives"],
damageDealtToTurrets=dto_player["damageDealtToTurrets"],
damageDealtToBuildings=dto_player["damageDealtToBuildings"],
totalDamageTaken=dto_player["totalDamageTaken"],
physicalDamageTaken=dto_player["physicalDamageTaken"],
magicDamageTaken=dto_player["magicDamageTaken"],
longestTimeSpentLiving=dto_player["longestTimeSpentLiving"],
largestCriticalStrike=dto_player["largestCriticalStrike"],
goldSpent=dto_player["goldSpent"],
totalHeal=dto_player["totalHeal"],
totalUnitsHealed=dto_player["totalUnitsHealed"],
damageSelfMitigated=dto_player["damageSelfMitigated"],
totalTimeCCDealt=dto_player["totalTimeCCDealt"],
# New match-v5 fields
xp=dto_player["champExperience"],
bountyLevel=dto_player["bountyLevel"],
baronKills=dto_player["baronKills"],
dragonKills=dto_player["dragonKills"],
inhibitorKills=dto_player["inhibitorKills"],
inhibitorTakedowns=dto_player["inhibitorTakedowns"],
championTransform=dto_player["championTransform"],
consumablesPurchased=dto_player["consumablesPurchased"],
detectorWardsPlaced=dto_player["detectorWardsPlaced"],
itemsPurchased=dto_player["itemsPurchased"],
nexusKills=dto_player["nexusKills"],
nexusTakedowns=dto_player["nexusTakedowns"],
objectivesStolen=dto_player["objectivesStolen"],
objectivesStolenAssists=dto_player["objectivesStolenAssists"],
sightWardsBoughtInGame=dto_player["sightWardsBoughtInGame"],
totalDamageShieldedOnTeammates=dto_player["totalDamageShieldedOnTeammates"],
totalHealsOnTeammates=dto_player["totalHealsOnTeammates"],
totalTimeSpentDead=dto_player["totalTimeSpentDead"],
turretTakedowns=dto_player["turretTakedowns"],
turretKills=dto_player["turretKills"],
)
game_player.endOfGameStats = end_of_game_stats
game_team.players.append(game_player)
return game | /riot_transmute-2.1.2-py3-none-any.whl/riot_transmute/v5/match_to_game.py | 0.569613 | 0.41253 | match_to_game.py | pypi |
from pydantic import BaseModel
from pydantic import validator
from pydantic import Field
from datetime import datetime
import numpy as np
import base64
class PacketBase(BaseModel):
data: bytes
pkt_id: int | None
@validator("data", check_fields=False)
def is_data_base64(cls, val):
val_bytes = base64.urlsafe_b64decode(val)
if len(val_bytes) > 247:
raise ValueError("data too long")
return val
@validator("dev_id", check_fields=False)
def is_device_id(cls, val):
val_bytes = base64.urlsafe_b64decode(val)
if len(val_bytes) != 4:
raise ValueError("device id has wrong size")
return val
@validator("pkt_id", "ack_id", check_fields=False)
def is_uint16(cls, val):
if val is None:
return val
if val < 0 or val >= 2**16:
raise ValueError("outside range for uint16")
else:
return val
class PacketApiSend(PacketBase):
"""Packet sent to the Gateway server via API to be forwarded to a device."""
@classmethod
def from_binary(cls, data: bytes, pkt_id: np.int16 = None):
data_enc = base64.urlsafe_b64encode(data)
return cls(data=data_enc, pkt_id=pkt_id)
class PacketTransceiverSend(PacketBase):
"""Packet sent to the transceiver via USB CDC ACM."""
dev_id: bytes
pkt_id: int
@classmethod
def from_PacketApiSend(cls, pkt: PacketApiSend, dev_id: bytes):
# assign a random packet ID if none is specified
if pkt.pkt_id is None:
pkt_id = np.random.randint(0, 2**16)
else:
pkt_id = pkt.pkt_id
return cls(pkt_id=pkt_id, data=pkt.data, dev_id=dev_id)
def to_uart(self):
"""Returns a string ready to be sent to the gateway transceiver."""
dev_id_enc = str(self.dev_id, "utf-8")
data_enc = str(self.data, "utf-8")
pkt_id_enc = str(base64.urlsafe_b64encode(np.uint16(self.pkt_id)), "utf-8")
return bytes(f"[{dev_id_enc}\0{pkt_id_enc}\0{data_enc}\0]", encoding="utf-8")
class PacketApiReceive(PacketBase):
"""Packet received by the Gateway server from a device to be retrieved via the API."""
dev_id: bytes
pkt_id: int
ack_id: int
timestamp: datetime
@staticmethod
def str_extract(pkt_str: bytes):
"""Extracts a null-terminated base64 string from pkt_str and converts it to utf-8."""
term_idx = pkt_str.find(b"\0")
if term_idx < 0:
raise Exception("Could not find terminating character")
return pkt_str[:term_idx], term_idx
@staticmethod
def base64_to_bytes(pkt_str: bytes):
"""Extracts a null-terminated base64 string from pkt_str and converts it to utf-8."""
pkt_str_cut, term_idx = PacketApiReceive.str_extract(pkt_str)
return base64.urlsafe_b64decode(pkt_str_cut), term_idx
@staticmethod
def base64_to_bin(pkt_str: bytes, dtype):
"""Extracts a null-terminated base64 string from pkt_str and converts it to specified type."""
binbytes, term_idx = PacketApiReceive.base64_to_bytes(pkt_str)
return np.frombuffer(binbytes, dtype)[0], term_idx
@classmethod
def from_uart(cls, pkt_str: str, timestamp: datetime):
"""Populates class from a pkt_str received from the gateway transceiver."""
dev_id, term_idx = cls.str_extract(pkt_str)
pkt_str = pkt_str[term_idx + 1 :]
pkt_id, term_idx = cls.base64_to_bin(pkt_str, np.uint16)
pkt_str = pkt_str[term_idx + 1 :]
ack_id, term_idx = cls.base64_to_bin(pkt_str, np.uint16)
data, _ = cls.str_extract(pkt_str[term_idx + 1 :])
return cls(dev_id=dev_id, pkt_id=pkt_id, ack_id=ack_id, data=data, timestamp=timestamp)
@classmethod
def from_json(cls, json_dict: dict):
return cls(
dev_id=json_dict["dev_id"],
pkt_id=json_dict["pkt_id"],
ack_id=json_dict["ack_id"],
data=json_dict["data"],
timestamp=json_dict["timestamp"],
)
def to_json(self):
json_dict = {"ack_id": self.ack_id, "pkt_id": self.pkt_id}
json_dict["dev_id"] = str(self.dev_id, encoding="utf-8")
json_dict["data"] = str(self.data, encoding="utf-8")
json_dict["timestamp"] = str(self.timestamp)
return json_dict | /riotee_gateway-0.0.14.tar.gz/riotee_gateway-0.0.14/riotee_gateway/packet_model.py | 0.755817 | 0.190028 | packet_model.py | pypi |
from pyocd.flash.file_programmer import FileProgrammer
from pathlib import Path
import numpy as np
import struct
from typing import Sequence
from typing import Union
from typing import Callable
from riotee_probe.protocol import *
from riotee_probe.session import RioteeProbeSession
from riotee_probe.intelhex import IntelHex16bitReader
class Target(object):
def __init__(self, session: RioteeProbeSession):
self._session = session
def __enter__(self):
return self
def __exit__(self, *exc):
pass
def halt(self):
raise NotImplementedError
def reset(self):
raise NotImplementedError
def resume(self):
raise NotImplementedError
def write(self, addr, data):
raise NotImplementedError
def read(self, addr, n: int):
raise NotImplementedError
def program(self, fw_path: Path, progress: Callable = None):
raise NotImplementedError
class TargetMSP430(Target):
def __enter__(self):
self._session.vendor_cmd(ReqType.ID_DAP_VENDOR_SBW_CONNECT)
return self
def __exit__(self, *exc):
self._session.vendor_cmd(ReqType.ID_DAP_VENDOR_SBW_DISCONNECT)
def reset(self):
self._session.vendor_cmd(ReqType.ID_DAP_VENDOR_SBW_RESET)
def resume(self):
self._session.vendor_cmd(ReqType.ID_DAP_VENDOR_SBW_RESUME)
def halt(self):
self._session.vendor_cmd(ReqType.ID_DAP_VENDOR_SBW_HALT)
def write(self, addr: int, data: Union[Sequence[np.uint16], np.uint16]):
if hasattr(data, "__len__"):
pkt = struct.pack(f"=IB{len(data)}H", addr, len(data), *data)
else:
pkt = struct.pack(f"=IBH", addr, 1, data)
# One Byte is required for request type
if len(pkt) >= DAP_VENDOR_MAX_PKT_SIZE:
raise ValueError("Data length exceeds maximum packet size")
rsp = self._session.vendor_cmd(ReqType.ID_DAP_VENDOR_SBW_WRITE, pkt)
def read(self, addr, n_words: int = 1):
pkt = struct.pack(f"=IB", addr, n_words)
rsp = self._session.vendor_cmd(ReqType.ID_DAP_VENDOR_SBW_READ, pkt)
rsp_arr = np.frombuffer(rsp, dtype=np.uint16)
if n_words == 1:
return rsp_arr[0]
else:
return rsp_arr
def program(self, fw_path: Path, progress: Callable = None, verify: bool = True):
ih = IntelHex16bitReader()
ih.loadhex(fw_path)
self.halt()
# Overhead: 1B request, 4B address, 1B len -> 6B
pkts = list(ih.iter_packets((DAP_VENDOR_MAX_PKT_SIZE - 6) // 2))
for i, pkt in enumerate(pkts):
self.write(pkt.address, pkt.values)
if verify:
rb = self.read(pkt.address, len(pkt))
if (rb != pkt.values).any():
raise Exception(f"Verification failed at 0x{pkt.address:08X}!")
if progress:
progress((i + 1) / len(pkts))
self.resume()
class TargetNRF52(Target):
def __enter__(self):
self._session._board.init()
self._session._inited = True
return self
def __exit__(self, *exc):
try:
self._session._board.uninit()
except Exception as e:
print("Error during board uninit:", e)
self._session._inited = False
def program(self, fw_path: Path, progress: Callable = None):
if progress is None:
def progress(arg):
pass
FileProgrammer(self._session, progress=progress).program(str(fw_path))
def halt(self):
self._session.board.target.halt()
def reset(self):
self._session.board.target.reset()
def resume(self):
self._session.board.target.resume()
def write(self, addr, data: Union[Sequence[np.uint32], np.uint32]):
if hasattr(data, "__len__"):
self._session.board.target.write_memory_block32(addr, data)
else:
self._session.board.target.write_memory(addr, data)
def read(self, addr, n_words: int = 1):
if n_words == 1:
return self._session.board.target.read_memory(addr)
else:
return self._session.board.target.read_memory_block32(addr, n_words) | /riotee_probe-1.1.0.tar.gz/riotee_probe-1.1.0/riotee_probe/target.py | 0.649579 | 0.216777 | target.py | pypi |
from enum import Enum
import struct
from contextlib import contextmanager
from riotee_probe.protocol import ReqType
from riotee_probe.protocol import IOSetState
from riotee_probe.session import RioteeProbeSession
from riotee_probe.target import TargetNRF52
from riotee_probe.target import TargetMSP430
class GpioDir(Enum):
GPIO_DIR_IN = 0
GPIO_DIR_OUT = 1
@contextmanager
def get_connected_probe():
with RioteeProbeSession() as session:
if session.product_name == "Riotee Board":
yield RioteeProbeBoard(session)
elif session.product_name == "Riotee Probe":
yield RioteeProbeProbe(session)
else:
raise Exception(f"Unsupported probe {session.product_name} selected")
class RioteeProbe(object):
def __init__(self, session):
self._session = session
@contextmanager
def msp430(self):
with TargetMSP430(self._session) as msp430:
yield msp430
@contextmanager
def nrf52(self):
with TargetNRF52(self._session) as nrf52:
yield nrf52
def target_power(self, state: bool):
pkt = struct.pack("=B", state)
return self._session.vendor_cmd(ReqType.ID_DAP_VENDOR_POWER, pkt)
def gpio_dir(self, pin: int, dir: GpioDir):
raise NotImplementedError
def gpio_set(self, pin: int, state: bool):
raise NotImplementedError
def gpio_get(self, pin: int):
raise NotImplementedError
def bypass(self, state):
raise NotImplementedError
def fw_version(self) -> str:
ret = self._session.vendor_cmd(ReqType.ID_DAP_VENDOR_VERSION)
# Firmware versions before 1.1.0 send a trailing nul over the wire
return str(ret.strip(b"\0"), encoding="utf-8")
class RioteeProbeProbe(RioteeProbe):
def gpio_set(self, pin: int, state: bool):
if state:
pkt = struct.pack("=BB", pin, IOSetState.IOSET_OUT_HIGH)
else:
pkt = struct.pack("=BB", pin, IOSetState.IOSET_OUT_LOW)
self._session.vendor_cmd(ReqType.ID_DAP_VENDOR_GPIO_SET, pkt)
def gpio_get(self, pin: int) -> bool:
pkt = struct.pack("=B", pin)
rsp = self._session.vendor_cmd(ReqType.ID_DAP_VENDOR_GPIO_GET, pkt)
return bool(rsp[0])
def gpio_dir(self, pin: int, dir: GpioDir):
if dir == GpioDir.GPIO_DIR_IN:
pkt = struct.pack("=BB", pin, IOSetState.IOSET_IN)
else:
pkt = struct.pack("=BB", pin, IOSetState.IOSET_OUT_LOW)
self._session.vendor_cmd(ReqType.ID_DAP_VENDOR_GPIO_SET, pkt)
class RioteeProbeBoard(RioteeProbe):
def bypass(self, state: bool):
pkt = struct.pack("=B", state)
return self._session.vendor_cmd(ReqType.ID_DAP_VENDOR_BYPASS, pkt) | /riotee_probe-1.1.0.tar.gz/riotee_probe-1.1.0/riotee_probe/probe.py | 0.551332 | 0.202345 | probe.py | pypi |
# RIP Counter
Count vote on [https://www.referendum.interieur.gouv.fr/consultation_publique/8](https://www.referendum.interieur.gouv.fr/consultation_publique/8).
RIP Counter can use multiple captcha solvers:
- A manual solver where an operator needs to enter the captcha.
- An automatic solver using captchanet: [https://github.com/hadim/captchanet](https://github.com/hadim/captchanet).
- *The current version of CaptchaNet (version 4) has a sucess rate of about 33% per captcha images.*
## Usage
### Docker
Using Docker is easier because you don't have to setup a Python environment:
```bash
export RIP_COUNTER_COOKIE_NAME="incap_ses_531_2043128"
export RIP_COUNTER_COOKIE_VALUE="3V4yAmW8fmLvJA7G7n5eBxRbHV0AAAAAmB4th13GWlgFvUSW5IyFbw=="
export RIP_COUNTER_DATA_DIR="<PATH_TO DATA_DIRECTORY>"
docker run -ti --rm \
-v $RIP_COUNTER_DATA_DIR:/data \
-e RIP_COUNTER_COOKIE_NAME \
-e RIP_COUNTER_COOKIE_VALUE \
hadim/rip-counter
```
- The process usually takes a few hours.
- Once it's done, `$RIP_COUNTER_DATA_DIR` should contains your data as CSV files.
- The last output of the script (`scrap-my-rip`) will also gives you the number of votes.
- `scrap-my-rip` will generate a bunch of files:
- `scraper.log`: Log of the scraper.
- `all_urls.json`: Contains URLs of all the scraped pages ordered by the first two letters. It is used to restore scraping (this file is removed after each scraping session).
- `2019-07-04_data.csv`: A table generated at each run. It contains one vote per row.
- `master_data.csv`: A summary containing the sum of votes for each date.
- `data_by_communes.csv`: Vote count for each day of scrap and also each communes. Non French communes (without INSEE code) are also present in the file.
You can also build the Docker image locally and use docker-compose:
```bash
git clone https://github.com/hadim/rip-counter.git
cd rip-counter/
docker-compose build
export RIP_COUNTER_COOKIE_NAME="incap_ses_531_2043128"
export RIP_COUNTER_COOKIE_VALUE="3V4yAmW8fmLvJA7G7n5eBxRbHV0AAAAAmB4th13GWlgFvUSW5IyFbw=="
export RIP_COUNTER_DATA_DIR="<PATH_TO DATA_DIRECTORY>"
docker-compose run rip
docker-compose rm -f
```
### Local
You first need to create a Python environment. We encourage you to use the [Anconda distribution](https://www.anaconda.com/distribution/):
```bash
conda create -n rip_env
conda activate rip_env
conda env update -f environment.yml
# Then install libraries not available in conda-forge.
pip install --no-deps -U tensorflow-datasets tensorflow_metadata tensorboard tensorflow-estimator
pip install --no-deps -U tensorflow==2.0.0-beta1
```
Now install the `rip-counter` library:
```bash
conda activate rip_env
pip install https://github.com/hadim/rip-counter/archive/master.zip
```
Then you can start counting:
```bash
export RIP_COUNTER_COOKIE_NAME="incap_ses_531_2043128"
export RIP_COUNTER_COOKIE_VALUE="3V4yAmW8fmLvJA7G7n5eBxRbHV0AAAAAmB4th13GWlgFvUSW5IyFbw=="
export RIP_COUNTER_DATA_DIR="<PATH_TO DATA_DIRECTORY>"
scrap-my-rip
```
## Telegram Bot
`rip-counter` also has a Telegram bot, that you can run on a server. Then you cn control scraping daily using a few simple commandes you send to the bot.
- You first need to create a bot and get the token associated to it at [https://core.telegram.org/bots#6-botfather](https://core.telegram.org/bots#6-botfather).
- Then get your user id associated to your personal account so the bot will talk to you and only to you. Talk to [@myidbot](https://telegram.me/myidbot) to get your user id.
- Start the bot using Docker:
```bash
export RIP_COUNTER_BOT_TOKEN="<TELEGRAM_BOT_TOKEN>"
# Use comma to allow multiple users.
export RIP_COUNTER_BOT_ALLOWED_USERS="99999,2827364"
export RIP_COUNTER_DATA_DIR="<PATH_TO DATA_DIRECTORY>"
docker run -ti --rm \
-v $RIP_COUNTER_DATA_DIR:/data \
-e RIP_COUNTER_BOT_TOKEN \
-e RIP_COUNTER_BOT_ALLOWED_USERS \
hadim/rip-counter
```
The available commands are:
- `/start`: Start a scraping session.
- `/stop`: Stop a scraping session.
- `/status`: Check if a scraping session is currently running.
- `/log`: Display the lst 10 lines of the log.
- `/set_cookie`: Set Incapsula cookie name:
```bash
/set_cookie incap_ses_1226_2043128 WtIoIix+tSfAQxu1tqADEfhmI10AAAAArK81JEbV3YaB02Y7AUcxaw==
```
## API
You can also use `rip-counter` in a Python script:
```python
import os
import rip_counter
rip_data_dir = '<PATH TO DATA DIR>'
os.environ['RIP_COUNTER_COOKIE_NAME'] = 'incap_ses_303_2043128'
os.environ['RIP_COUNTER_COOKIE_VALUE'] = "M+otKL5POk133ss9Jnk0BExMH10AAAAAyM8+JLZw1q3nfwIZMFZehA=="
captcha_solver = rip_counter.CaptchaNetSolver(preload_model=True)
scraper = rip_counter.RIPScraper(captcha_solver=captcha_solver, save_dir=rip_data_dir,
max_captcha_try=20, shuffle_urls=False)
# Scrap !
await scraper.scrap(batch_size=64, show_progress=True, _test_mode=False)
# Post process data
data = scraper.process_data()
print(data.loc[:, data.columns.str.startswith('vote_count')].sum())
```
## License
Under BSD license. See [LICENSE](LICENSE).
## Authors
- Hadrien Mary <hadrien.mary@gmail.com>
| /rip-counter-0.2.0.tar.gz/rip-counter-0.2.0/README.md | 0.454472 | 0.80525 | README.md | pypi |
from pathlib import Path
import urllib
import tempfile
import logging
import os
import zipfile
from tqdm.auto import tqdm
class _TqdmUpTo(tqdm):
"""Alternative Class-based version of the above.
Provides `update_to(n)` which uses `tqdm.update(delta_n)`.
Inspired by [twine#242](https://github.com/pypa/twine/pull/242),
[here](https://github.com/pypa/twine/commit/42e55e06).
"""
def update_to(self, b=1, bsize=1, tsize=None):
"""
Args:
b: int, optional
Number of blocks transferred so far [default: 1].
bsize: int, optional
Size of each block (in tqdm units) [default: 1].
tsize: int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n) # will also set self.n = b * bsize
def download_zip(zip_url, extract_folder_path, progressbar=True):
"""Download a ZIP file from an URL and extract to a given local folder.
Args:
zip_url: The URL to the ZIP file as a str.
extract_folder_path: The path to the local folder for the extraction.
"""
temp_path = tempfile.mktemp(suffix=".zip")
with _TqdmUpTo(unit='B', unit_scale=True, unit_divisor=1024, miniters=1, disable=not progressbar) as t:
urllib.request.urlretrieve(
zip_url, filename=temp_path, reporthook=t.update_to, data=None)
with zipfile.ZipFile(temp_path) as zf:
zf.extractall(extract_folder_path)
os.remove(temp_path)
def download_file(file_url, local_file_path, force=False, progressbar=True):
"""Download a file.
Args:
file_url: string
local_file_path: string or Path
force: bool, if False, don't download if the file already exist.
progressbar: bool, show a progress bar.
"""
if Path(local_file_path).is_file() and not force:
return True
try:
with _TqdmUpTo(unit='B', unit_scale=True, unit_divisor=1024, miniters=1, disable=not progressbar) as t:
urllib.request.urlretrieve(
file_url, filename=local_file_path, reporthook=t.update_to, data=None)
except Exception as e: # pylint: disable=broad-except
logging.error("'{file_url}' cannot be downloaded.") # pylint: disable=
logging.error(e)
return True | /rip-counter-0.2.0.tar.gz/rip-counter-0.2.0/rip_counter/utils/download.py | 0.693161 | 0.35031 | download.py | pypi |
import appier
class UtilAPI(object):
@classmethod
def _query_to_spec(cls, query):
options = cls._unpack_query(query)
brand = options.get("brand", None)
model = options.get("model", None)
variant = options.get("variant", None)
version = options.get("version", None)
description = options.get("description", None)
initials = options.get("initials", None)
engraving = options.get("engraving", None)
gender = options.get("gender", None)
size = options.get("size", None)
meta = options.get("meta", [])
tuples = options.get("p", [])
tuples = tuples if isinstance(tuples, list) else [tuples]
initials_extra = options.get("initials_extra", [])
initials_extra = (
initials_extra if isinstance(initials_extra, list) else [initials_extra]
)
initials_extra = cls._parse_extra_s(initials_extra)
parts = cls._tuples_to_parts(tuples)
parts_m = cls._parts_to_parts_m(parts)
spec = dict(
brand=brand,
model=model,
parts=parts_m,
initials=initials,
engraving=engraving,
initials_extra=initials_extra,
)
if variant:
spec["variant"] = variant
if version:
spec["version"] = version
if description:
spec["description"] = description
if gender:
spec["gender"] = gender
if size:
spec["size"] = size
if meta:
spec["meta"] = cls._normalize_meta(meta)
return spec
@classmethod
def _unpack_query(cls, query):
query = query.strip("?")
parts = appier.split_unescape(query, "&")
options = dict()
for part in parts:
key, value = part.split("=")
if not key in options:
options[key] = value
elif isinstance(options[key], list):
options[key].append(value)
else:
options[key] = [options[key], value]
return options
@classmethod
def _parse_extra_s(cls, extra_s):
extra = dict()
for extra_i in extra_s:
name, initials, engraving = appier.split_unescape(extra_i, ":", 2)
extra[name] = dict(initials=initials, engraving=engraving or None)
return extra
@classmethod
def _tuples_to_parts(cls, tuples):
parts = []
for triplet in tuples:
name, material, color = appier.split_unescape(triplet, ":", 2)
part = dict(name=name, material=material, color=color)
parts.append(part)
return parts
@classmethod
def _parts_to_parts_m(cls, parts):
parts_m = dict()
for part in parts:
name = part["name"]
material = part["material"]
color = part["color"]
parts_m[name] = dict(material=material, color=color)
return parts_m
@classmethod
def _normalize_meta(cls, meta):
meta_d = {}
meta_l = (
[
appier.split_unescape(element, ":", 2)
if element.startswith("$")
else appier.split_unescape(element, ":", 1)
for element in meta
]
if meta
else []
)
for parts in meta_l:
if len(parts) == 2:
parts = None, parts[0], parts[1]
type, key, value = parts
if key in meta_d:
old = meta_d[key]
is_sequence = isinstance(old, (list, tuple))
if not is_sequence:
old = [old]
old.append(value)
value = old
if type == "$list" and not isinstance(value, list):
value = [value]
if type == "$int":
value = int(value)
if type == "$float":
value = float(value)
if type == "$bool":
value = value in ("1", "true", "True")
meta_d[key] = value
return meta_d | /ripe-api-0.8.0.tar.gz/ripe-api-0.8.0/src/ripe/util.py | 0.534855 | 0.284996 | util.py | pypi |
class SizeAPI(object):
def get_sizes(self):
url = self.base_url + "sizes"
contents = self.get(url, auth=False)
return contents
def size_to_native(self, scale, value, gender):
url = self.base_url + "sizes/size_to_native"
contents = self.get(url, auth=False, scale=scale, value=value, gender=gender)
return contents
def size_to_native_b(self, scales, values, genders):
url = self.base_url + "sizes/size_to_native_b"
contents = self.get(
url, auth=False, scales=scales, values=values, genders=genders
)
return contents
def native_to_size(self, scale, value, gender):
url = self.base_url + "sizes/native_to_size"
contents = self.get(url, auth=False, scale=scale, value=value, gender=gender)
return contents
def native_to_size_b(self, scales, values, genders):
url = self.base_url + "sizes/native_to_size_b"
contents = self.get(
url, auth=False, scales=scales, values=values, genders=genders
)
return contents
def size_to_locale(self, scale, value, gender):
url = self.base_url + "sizes/size_to_locale"
contents = self.get(url, auth=False, scale=scale, value=value, gender=gender)
return contents
def size_to_locale_b(self, scales, values, genders):
url = self.base_url + "sizes/size_to_locale_b"
contents = self.get(
url, auth=False, scales=scales, values=values, genders=genders
)
return contents
def native_to_locale(self, scale, value, gender):
url = self.base_url + "sizes/native_to_locale"
contents = self.get(url, auth=False, scale=scale, value=value, gender=gender)
return contents
def native_to_locale_b(self, scales, values, genders):
url = self.base_url + "sizes/native_to_locale_b"
contents = self.get(
url, auth=False, scales=scales, values=values, genders=genders
)
return contents
def locale_to_native(self, scale, value, gender):
url = self.base_url + "sizes/locale_to_native"
contents = self.get(url, auth=False, scale=scale, value=value, gender=gender)
return contents
def locale_to_native_b(self, scales, values, genders):
url = self.base_url + "sizes/locale_to_native_b"
contents = self.get(
url, auth=False, scales=scales, values=values, genders=genders
)
return contents | /ripe-api-0.8.0.tar.gz/ripe-api-0.8.0/src/ripe/size.py | 0.723798 | 0.169922 | size.py | pypi |
class BulkOrderAPI(object):
def get_bulk_orders(self, **kwargs):
url = self.base_url + "bulk_orders"
contents = self.get(url, **kwargs)
return contents
def get_bulk_order(self, number):
url = self.base_url + "bulk_orders/%d" % number
contents = self.get(url)
return contents
def set_bulk_order_status(self, number, status, **kwargs):
url = self.base_url + "bulk_orders/%d/%s" % (number, status)
contents = self.put(url, **kwargs)
return contents
def create_bulk_order(self, number, **kwargs):
return self.set_bulk_order_status(number, "create", **kwargs)
def produce_bulk_order(self, number, **kwargs):
return self.set_bulk_order_status(number, "produce", **kwargs)
def quality_assure_bulk_order(self, number, **kwargs):
return self.set_bulk_order_status(number, "quality_assure", **kwargs)
def reject_bulk_order(self, number, **kwargs):
return self.set_bulk_order_status(number, "reject", **kwargs)
def ready_bulk_order(self, number, **kwargs):
return self.set_bulk_order_status(number, "ready", **kwargs)
def send_bulk_order(self, number, **kwargs):
return self.set_bulk_order_status(number, "send", **kwargs)
def block_bulk_order(self, number, **kwargs):
return self.set_bulk_order_status(number, "block", **kwargs)
def receive_bulk_order(self, number, **kwargs):
return self.set_bulk_order_status(number, "receive", **kwargs)
def return_bulk_order(self, number, **kwargs):
return self.set_bulk_order_status(number, "return", **kwargs)
def cancel_bulk_order(self, number, **kwargs):
return self.set_bulk_order_status(number, "cancel", **kwargs)
def import_bulk_order(self, name, orders, **kwargs):
url = self.base_url + "bulk_orders"
brand = kwargs.get("brand", None)
description = kwargs.get("description", None)
data_j = dict(name=name, orders=orders)
if brand:
data_j["brand"] = brand
if description:
data_j["description"] = description
contents = self.post(url, data_j=data_j, **kwargs)
return contents
def attachments_bulk_order(self, number):
url = self.base_url + "bulk_orders/%d/attachments" % number
contents = self.get(url)
return contents
def create_attachment_bulk_order(self, number, files, **kwargs):
url = self.base_url + "bulk_orders/%d/attachments" % number
data_m = dict(files=files)
contents = self.post(url, data_m=data_m, **kwargs)
return contents | /ripe-api-0.8.0.tar.gz/ripe-api-0.8.0/src/ripe/bulk_order.py | 0.609757 | 0.197599 | bulk_order.py | pypi |
from email.mime.text import MIMEText
import logging
from os import environ
from smtplib import SMTP, SMTP_SSL, SMTPException
import socket
from subprocess import call
from .Config import Config
from .emailsettings import read_email_settings
from .Errors import ConfigError, ProgramError
from .Helpers import BasicConfigElement
from .Logging import logger, CustomSysLogLogger
class Action(BasicConfigElement):
"""Action
Action performed on the basis of expected results processing for probes
which match the `matching_rules` rules.
`kind`: type of action.
`descr` (optional): brief description of the action.
`when` (optional): when the action must be performed (with regards of
expected results processing output); one of "on_match", "on_mismatch",
"always". Default: "on_mismatch".
When a probe matches a rule, it's expected results are processed; on the
basis of the output, actions given in the rule's `actions` list are
performed.
For each expected result, if the probe's collected result matches the
expectation actions whose `when` = "on_match" or "always" are performed.
If the collected result does not match the expected result, actions
whose `when` = "on_mismatch" or "always" are performed.
"""
CFG_ACTION_KIND = None
MANDATORY_CFG_FIELDS = ["kind"]
OPTIONAL_CFG_FIELDS = ["descr", "when"]
@classmethod
def get_cfg_fields(cls):
m = set(Action.MANDATORY_CFG_FIELDS)
o = set(Action.OPTIONAL_CFG_FIELDS)
if cls != Action:
m.update(set(cls.MANDATORY_CFG_FIELDS))
o.update(set(cls.OPTIONAL_CFG_FIELDS))
return m, o
def __init__(self, monitor, name, cfg):
BasicConfigElement.__init__(self, cfg)
self.monitor = monitor
self.normalize_fields()
self.name = name
self.descr = cfg["descr"]
self.kind = self._enforce_param("kind", str)
if not self.kind:
raise ConfigError("Missing action kind.")
if self.kind != self.CFG_ACTION_KIND:
raise ConfigError(
"Wrong action kind: {} expected, {} found.".format(
self.CFG_ACTION_KIND, self.kind
)
)
self.when = self._enforce_param("when", str) or "on_mismatch"
WHEN = ("on_match", "on_mismatch", "always")
if self.when not in WHEN:
raise ConfigError(
"Unexpected when ({}): must be one of {}".format(
self.when, ", ".join(WHEN)
)
)
def __str__(self):
raise NotImplementedError()
def perform(self, result, expres, result_matches):
raise NotImplementedError()
def ping_output(self, result):
# Stolen from https://github.com/RIPE-NCC/ripe-atlas-tools
# (/blob/master/ripe/atlas/tools/renderers/ping.py)
packets = result.packets
if not packets:
return "No packets found"
# Because the origin value is more reliable as "from" in v4 and as
# "packet.source_address" in v6.
origin = result.origin
if ":" in origin:
origin = packets[0].source_address
line = "{} bytes from probe #{:<5} {:15} to {} ({}): ttl={} times:{}\n"
return line.format(
result.packet_size,
result.probe_id,
origin,
result.destination_name,
result.destination_address,
packets[0].ttl,
" ".join(["{:8}".format(str(_.rtt) + ",") for _ in packets])
)
def traceroute_output(self, result):
# Based on https://github.com/RIPE-NCC/ripe-atlas-tools
r = ""
for hop in result.hops:
if hop.is_error:
r += "{}\n".format(hop.error_message)
continue
name = ""
rtts = []
ip_info = None
asn = ""
details = ""
for packet in hop.packets:
if packet.origin and packet.origin != "*":
if not ip_info:
ip_info = self.monitor.ip_cache.get_ip_info(
packet.origin
)
asn = ip_info["ASN"]
ixp = ip_info["IsIXP"]
ixp_name = ip_info["IXPName"]
if asn and asn.isdigit():
details = "AS{}".format(asn)
elif ixp:
details = "IX {}".format(ixp_name)
else:
details = ""
name = name or packet.origin or "*"
if packet.rtt:
rtts.append("{:8} ms".format(packet.rtt))
else:
rtts.append(" *")
r += "{:>3} {:39} {}\n".format(
hop.index,
"{} {}".format(name, details),
" ".join(rtts)
)
return r
def sslcert_output(self, result):
r = ""
TPL = ("SHA256 Fingerprint={sha256fp}\n"
" Issuer: C={issuer_c}, O={issuer_o}, CN={issuer_cn}\n"
" Subject: C={subject_c}, O={subject_o}, CN={subject_cn}\n")
for certificate in result.certificates:
r += TPL.format(
issuer_c=certificate.issuer_c,
issuer_o=certificate.issuer_o,
issuer_cn=certificate.issuer_cn,
subject_c=certificate.subject_c,
subject_o=certificate.subject_o,
subject_cn=certificate.subject_cn,
sha256fp=certificate.checksum_sha256
)
return r
def dns_output(self, result):
r = ""
if not result.responses:
return "No response found"
response_idx = 0
indent = ""
for response in result.responses:
response_idx += 1
if response_idx > 1:
r += "\n"
if len(result.responses) > 1:
r += "Response n. {}\n\n".format(response_idx)
indent = " "
if not response.abuf:
r += indent + "Can't parse response's abuf\n"
continue
r += indent + "Header: {}, {}, id: {}\n".format(
response.abuf.header.opcode,
response.abuf.header.return_code,
response.abuf.header.id
)
header_flags = []
for flag in ("aa", "ad", "cd", "qr", "ra", "rd",):
if getattr(response.abuf.header, flag):
header_flags.append(flag)
r += indent + "Header flags: {}\n".format(", ".join(header_flags))
if response.abuf.edns0:
r += indent + "EDNS: version {}, size {}{}\n".format(
response.abuf.edns0.version,
response.abuf.edns0.udp_size,
", DO flag" if response.abuf.edns0.do else ""
)
for section in ("answers", "authorities", "additionals"):
r += indent + "Section: {}\n".format(section)
section = getattr(response.abuf, section)
if len(section) > 0:
for record in section:
r += indent + " " + str(record) + "\n"
else:
r += indent + " " + "no records\n"
return r
def get_result_text(self, result, expres):
if self.monitor.msm_type == "traceroute":
return self.traceroute_output(result)
elif self.monitor.msm_type == "ping":
return self.ping_output(result)
elif self.monitor.msm_type == "sslcert":
return self.sslcert_output(result)
elif self.monitor.msm_type == "dns":
return self.dns_output(result)
else:
raise NotImplementedError(
"Action non implemented for {} "
"measurements.".format(self.monitor.msm_type)
)
def get_notification_text(self, result, expres):
probe = self.monitor.get_probe(result)
r = ("{monitor}\n\n"
"Received result from {probe} at {time}\n\n"
"Expected result: {expres}\n\n"
"{result}").format(
monitor=self._capitalize_first(str(self.monitor)),
expres=str(expres) if expres else "none",
probe=str(probe),
time=str(result.created),
result=self.get_result_text(result, expres)
)
return r
class ActionLog(Action):
"""Action log
Log the match/mismatch along with the collected result.
No parameters required.
"""
CFG_ACTION_KIND = "log"
MANDATORY_CFG_FIELDS = []
OPTIONAL_CFG_FIELDS = []
def __init__(self, monitor, name, cfg):
Action.__init__(self, monitor, name, cfg)
def __str__(self):
if self.descr:
return self.descr
else:
return "Log the received result"
def perform(self, result, expres, result_matches):
r = self.get_notification_text(result, expres)
logger.action_log(r)
class ActionSysLog(Action):
"""Action syslog
Log the match/mismatch along with the collected result using syslog.
`socket` (optional): where the syslog message has to be logged. One of
"file", "udp", "tcp".
`host` (optional): meaningful only when `socket` is "udp" or "tcp". Host
where send the syslog message to.
`port` (optional): meaningful only when `socket` is "udp" or "tcp".
UDP/TCP port where send the syslog message to.
`file` (optional): meaningful only when `socket` is "file". File where the
syslog message has to be written to.
`facility` (optional): syslog facility that must be used to log the
message.
`priority` (optional): syslog priority that must be used to log the
message.
Parameters which are not given are read from the global configuration
file `default_syslog` section.
"""
CFG_ACTION_KIND = "syslog"
MANDATORY_CFG_FIELDS = []
OPTIONAL_CFG_FIELDS = ["socket", "host", "port", "file", "facility",
"priority"]
@staticmethod
def _get_level(name):
if name == "alert":
return logging.CRITICAL
elif name in ["crit", "critical"]:
return logging.CRITICAL
elif name == "debug":
return logging.DEBUG
elif name in ["emerg", "panic"]:
return logging.CRITICAL
elif name in ["err", "error"]:
return logging.ERROR
elif name == "info":
return logging.INFO
elif name == "notice":
return logging.INFO
elif name in ["warn", "warning"]:
return logging.WARNING
else:
return None
def __init__(self, monitor, name, cfg):
Action.__init__(self, monitor, name, cfg)
self.socket = self._enforce_param("socket", str) or \
Config.get("default_syslog.socket")
if not self.socket:
raise ConfigError("Missing socket.")
elif self.socket not in ["udp", "tcp", "file"]:
raise ConfigError(
"Invalid socket type: {}. It must be one "
"of 'udp', 'tcp' or 'file'.".format(self.socket)
)
self.host = None
self.port = None
self.file = None
if self.socket in ["udp", "tcp"]:
self.host = self._enforce_param("host", str) or \
Config.get("default_syslog.host")
if not self.host:
raise ConfigError(
"Missing host. It's mandatory when socket "
"is 'tcp' or 'udp'."
)
self.port = self._enforce_param("port", int) or \
Config.get("default_syslog.port")
if not self.port:
raise ConfigError(
"Missing port. It's mandatory when socket "
"is 'tcp' or 'udp'."
)
else:
self.file = self._enforce_param("file", str) or \
Config.get("default_syslog.file")
if not self.file:
raise ConfigError(
"Missing file. It's mandatory when socket "
"is 'file'."
)
self.facility = self._enforce_param("facility", str) or \
Config.get("default_syslog.facility")
if not self.facility:
raise ConfigError("Missing facility.")
if self.facility not in ["auth", "authpriv", "cron", "daemon", "ftp",
"kern", "lpr", "mail", "news", "syslog",
"user", "uucp", "local0", "local1", "local2",
"local3", "local4", "local5", "local6",
"local7"]:
raise ConfigError("Invalid facility: {}".format(self.facility))
self.priority = self._enforce_param("priority", str) or \
Config.get("default_syslog.priority")
if not self.priority:
raise ConfigError("Missing priority.")
self.log_level = self._get_level(self.priority)
if not self.log_level:
raise ConfigError(
"Invalid priority: {}. Must be one of "
"'alert', 'crit', 'critical', 'debug', 'emerg', 'panic', "
"'err', 'error', 'info', 'notice', 'warn', "
"'warning'.".format(self.priority)
)
self.logger_name = "{socket}-{address}-{facility}-{priority}".format(
socket=self.socket,
address=self.file if self.socket == "file" else "{}:{}".format(
self.host, self.port
),
facility=self.facility,
priority=self.priority
)
self.logger = None
self.logger_ready = False
def _setup_logger(self):
if self.logger:
return self.logger_ready
self.logger = CustomSysLogLogger(self.logger_name)
try:
self.logger.setup(
self.socket,
self.file if self.socket == "file" else (self.host, self.port),
self.facility
)
except Exception:
logger.error(
"Error while setting up the syslog logger "
"for action {}".format(str(self)),
exc_info=True
)
self.logger_ready = True
return True
def __str__(self):
if self.descr:
return self.descr
else:
return "Send a syslog message to {}".format(
self.file if self.file else "{}:{}:{}".format(
self.socket, self.host, self.port
)
)
def perform(self, result, expres, result_matches):
if not self._setup_logger():
return
probe = self.monitor.get_probe(result)
if result_matches is None:
status = "Received"
else:
status = "Expected" if result_matches else "Unexpected"
msg = "{monitor} - {status} result from {probe} at {time}".format(
status=status,
monitor=self._capitalize_first(str(self.monitor)),
probe=str(probe),
time=str(result.created),
)
self.logger.log(self.log_level, msg)
class ActionSendEMail(Action):
"""Action email
Send an email with the expected result processing output.
`from_addr` (optional): email address used in the From field.
`to_addr` (optional): email address used in the To field.
`subject` (optional): subject of the email message.
`smtp_host` (optional): SMTP server's host.
`smtp_port` (optional): SMTP server's port.
`use_ssl` (optional): boolean indicating whether the connection
toward SMTP server must use encryption.
`username` (optional): username for SMTP authentication.
`password` (optional): password for SMTP authentication.
`timeout` (optional): timeout, in seconds.
Parameters which are not given are read from the global configuration
file `default_smtp` section.
"""
CFG_ACTION_KIND = "email"
MANDATORY_CFG_FIELDS = []
OPTIONAL_CFG_FIELDS = ["from_addr", "to_addr", "subject", "smtp_host",
"smtp_port", "use_ssl", "username", "password",
"timeout"]
def __init__(self, monitor, name, cfg):
Action.__init__(self, monitor, name, cfg)
email_settings = read_email_settings(
from_addr=self._enforce_param("from_addr", str),
to_addr=self._enforce_list("to_addr", str),
subject=self._enforce_param("subject", str),
smtp_host=self._enforce_param("smtp_host", str),
smtp_port=self._enforce_param("smtp_port", int),
timeout=self._enforce_param("timeout", int),
use_ssl=self._enforce_param("use_ssl", bool),
username=self._enforce_param("username", str),
password=self._enforce_param("password", str)
)
for _ in email_settings:
setattr(self, _, email_settings[_])
def __str__(self):
if self.descr:
return self.descr
else:
return "Send an email to {}".format(", ".join(self.to_addr))
def perform(self, result, expres, result_matches):
r = self.get_notification_text(result, expres)
probe = self.monitor.get_probe(result)
if result_matches is None:
status = "has been received"
elif result_matches:
status = "matched expected values"
else:
status = "did not match expected values"
body = ("A result from {monitor} {status}.\n\n"
"{probe} - expected result {expres}\n\n"
"-------------------------------------\n\n{res}").format(
monitor=str(self.monitor), status=status,
probe=str(probe), expres=str(expres) if expres else "none",
res=r)
if self.use_ssl:
smtp_class = SMTP_SSL
else:
smtp_class = SMTP
msg = MIMEText(body)
msg["Subject"] = self.subject
msg["From"] = self.from_addr
msg["To"] = ",".join(self.to_addr)
try:
smtp = smtp_class(host=self.smtp_host, port=self.smtp_port,
timeout=self.timeout)
if self.username:
smtp.login(self.username, self.password)
if hasattr(smtp, "send_message"):
smtp.send_message(msg, self.from_addr, self.to_addr)
else:
smtp.sendmail(self.from_addr, self.to_addr, msg.as_string())
smtp.quit()
except (SMTPException, socket.error, socket.herror, socket.gaierror,
socket.timeout):
raise ProgramError(
"Error while sending email to {} via {}:{}".format(
", ".join(self.to_addr), self.smtp_host, self.smtp_port
)
)
class ActionRunProgram(Action):
"""Action run
Run an external program.
`path`: path of the program to run.
`env_prefix` (optional): prefix used to build environment variables.
`args` (optional): list of arguments which have to be passed to the
program. If the argument starts with "$" it is replaced with the
value of the variable with the same name.
If `env_prefix` is not given, it's value is taken from the global
configuration file `misc.env_prefix` parameter.
Variables are:
- `ResultMatches`: True, False or None
- `MsmID`: measurement's ID
- `MsmType`: measurement's type (ping, traceroute, sslcert, dns)
- `MsmAF`: measurement's address family (4, 6)
- `MsmStatus`: measurement's status (Running, Stopped)
[https://atlas.ripe.net/docs/rest/]
- `MsmStatusID`: measurement's status ID
[https://atlas.ripe.net/docs/rest/]
- `Stream`: True or False
- `ProbeID`: probe's ID
- `ProbeCC`: probe's ISO Country Code
- `ProbeASNv4`: probe's ASN (IPv4)
- `ProbeASNv6`: probe's ASN (IPv6)
- `ProbeASN`: probe's ASN related to measurement's address family
- `ResultCreated`: timestamp of result's creation date/time
Example:
actions:
RunMyProgram:
kind: run
path: /path/to/my-program
args:
- command
- -o
- --msm
- $MsmID
- --probe
- $ProbeID
"""
CFG_ACTION_KIND = "run"
MANDATORY_CFG_FIELDS = ["path"]
OPTIONAL_CFG_FIELDS = ["env_prefix", "args"]
VARIABLES = (
"ResultMatches",
"MsmID",
"MsmType",
"MsmAF",
"MsmStatus",
"MsmStatusID",
"Stream",
"ProbeID",
"ProbeCC",
"ProbeASNv4",
"ProbeASNv6",
"ProbeASN",
"ResultCreated"
)
def __init__(self, monitor, name, cfg):
Action.__init__(self, monitor, name, cfg)
self.path = self._enforce_param("path", str)
self.env_prefix = self._enforce_param("env_prefix", str) or \
Config.get("misc.env_prefix")
self.args = self._enforce_list("args", str) or []
self.get_args()
def get_args(self, env_base=None):
args = []
for arg in self.args:
if arg.startswith("$"):
arg = arg[1:]
if arg not in self.VARIABLES:
raise ConfigError(
"Invalid variable: ${}. It must be one "
"of ${}.".format(arg,
", $".join(self.VARIABLES))
)
else:
if env_base:
arg = str(env_base[arg])
else:
arg = "$" + arg
args.append(arg)
return args
def _get_full_path(self):
args = [self.path]
args += self.get_args()
return " ".join(args)
def __str__(self):
if self.descr:
return self.descr
else:
return "Run external program {}".format(
self._get_full_path()
)
def perform(self, result, expres, result_matches):
probe = self.monitor.get_probe(result)
env_base = {
"ResultMatches": result_matches,
"MsmID": self.monitor.msm_id,
"MsmType": self.monitor.msm_type,
"MsmAF": self.monitor.msm_af,
"MsmStatus": self.monitor.msm_status,
"MsmStatusID": self.monitor.msm_status_id,
"Stream": self.monitor.stream,
"ProbeID": probe.id,
"ProbeCC": probe.country_code,
"ProbeASNv4": probe.asn_v4,
"ProbeASNv6": probe.asn_v6,
"ProbeASN": probe.asn,
"ResultCreated": result.created
}
# verify all the env_base variables are known
if set(env_base.keys()) != set(self.VARIABLES):
raise ProgramError(
"Error in ActionRunProgram class: variables mismatch"
)
env = environ
for k in env_base:
env["{}{}".format(self.env_prefix, k)] = str(env_base[k])
args = [self.path] + self.get_args(env_base)
try:
call(args, env=env)
except:
raise ProgramError(
"Error while running external program {}".format(
self._get_full_path()
)
)
class ActionLabel(Action):
"""Action label
Add or remove custom labels to/from probes.
`op`: operation; one of "add" or "del".
`label_name`: label to be added/removed.
`scope` (optional): scope of the label; one of "result" or "probe".
Default: "result".
Labels can be added to probes and subsequently used to match those probes
in other rules (`internal_labels` criterion).
If scope is "result", the operation is significative only within the
current result processing (that is, within the current `matching_rules`
processing for the current result). Labels added to probe are
removed when the current result processing is completed.
If scope is "probe", the operation is persistent across results processing.
"""
CFG_ACTION_KIND = "label"
MANDATORY_CFG_FIELDS = ["op", "label_name"]
OPTIONAL_CFG_FIELDS = ["scope"]
def __init__(self, monitor, name, cfg):
Action.__init__(self, monitor, name, cfg)
self.op = self._enforce_param("op", str)
TAG_OPS = ["add", "del"]
if self.op not in TAG_OPS:
raise ConfigError(
"Invalid label operation: {}. Must be one of {}".format(
self.op, ", ".join(TAG_OPS)
)
)
self.label_name = self._enforce_param("label_name", str)
self.scope = self._enforce_param("scope", str) or "result"
TAG_SCOPES = ["probe", "result"]
if self.scope and self.scope not in TAG_SCOPES:
raise ConfigError(
"Invalid label scope: {}. Must be one of {}".format(
self.scope, ", ".join(TAG_SCOPES)
)
)
def __str__(self):
if self.descr:
return self.descr
else:
if self.op == "add":
return "Add label {} to {}".format(
self.label_name, self.scope
)
elif self.op == "del":
return "Remove label {} from {}".format(
self.label_name, self.scope
)
else:
raise NotImplementedError()
def perform(self, result, expres, result_matches):
probe = self.monitor.get_probe(result)
lbl_key = str(probe.id)
if self.scope == "probe":
labels = self.monitor.internal_labels["probes"]
elif self.scope == "result":
labels = self.monitor.internal_labels["results"]
else:
raise NotImplementedError()
if self.op == "add":
if lbl_key not in labels:
labels[lbl_key] = set()
tpl = "adding label {name} to {scope} {key}"
labels[lbl_key].add(self.label_name)
elif self.op == "del":
if lbl_key in labels and self.label_name in labels[lbl_key]:
tpl = "removing label {name} from {scope} {key}"
labels[lbl_key].remove(self.label_name)
else:
tpl = "label {name} already missing from {scope} {key}"
else:
raise NotImplementedError()
logger.debug(
tpl.format(
name=self.label_name,
scope=self.scope,
key=lbl_key
)
)
ACTION_CLASSES = [
ActionLog,
ActionSendEMail,
ActionRunProgram,
ActionSysLog,
ActionLabel
] | /ripe-atlas-monitor-0.1.10.tar.gz/ripe-atlas-monitor-0.1.10/pierky/ripeatlasmonitor/Action.py | 0.644673 | 0.239372 | Action.py | pypi |
import re
from .Errors import ConfigError, RIPEAtlasMonitorError
from .Helpers import BasicConfigElement
from .Logging import logger
class Rule(BasicConfigElement):
"""Rule
Probes which produced the results fetched from the measurement are matched
against these rules to determine whether those results must be processed
or not.
`descr` (optional): a brief description of the rule.
`process_next` (optional): determine whether the rule following the current
one has to be elaborated or nor. More details on the description below.
`src_country` (optional): list of two letters country ISO codes.
`src_as` (optional): list of Autonomous System numbers.
`probe_id` (optional): list of probes' IDs.
`internal_labels` (optional): list of internal labels. More details on the
description below.
`reverse` (optional): boolean, indicating if the aforementioned criteria
identify probes which have to be exluded from the matching.
`expected_results` (optional): list of expected results' names which
have to be processed on match. Must be one or more of the expected results
defined in Monitor.`expected_results`. If empty or missing, the rule will
be treated as if a match occurred and its actions are performed.
`actions` (optional): list of actions' names which have to be perormed for
matching probes. Must be one or more of the actions defined in
Monitor.`actions`.
The `src_country` criterion matches when probe's source country is one of
the country ISO codes given in the list.
The `src_as` criterion matches when probe's source AS is one of the ASN
given in the list. Since RIPE Atlas defines two ASs for each probe (ASN_v4
and ASN_v6) the one corresponding to the measurement's address family is
taken into account.
The `probe_id` criterion matches when probe's ID is one of the IDs given
in the list.
The `internal_labels` criterion matches when a probe has been previously
tagged with a label falling in the given list. See the `label` Action for
more details.
A probe matches the rule when all the given criteria are satisfied or when
no criteria are defined at all. If `reverse` is True, a probe matches when
none of the criteria is satisfied.
When a probe matches the rule, the expected results given
in `expected_results` are processed; actions given in the `actions` list
are performed on the basis of expected results processing output. If
no `expected_results` are given, actions will be performed too.
When a probe matches the current rule's criteria:
- if `process_next` is True, the rule which follows the current one is
forcedly elaborated;
- if `process_next` if False or missing, the rules processing is stopped.
If a probe does not match the current rule's criteria:
- if `process_next` is False, the rule processing is forcedly stopped;
- if `process_next` is True or missing, the rule which follows the current
one is regularly processed.
Examples:
matching_rules:
- descr: Do not process results for probe ID 123 and 456
probe_id:
- 123
- 456
- descr: Check dst AS for any probe, errors to NOC; process next rule
expected_results: DstAS
actions: SendEMailToNOC
process_next: True
- descr: Italian probes must reach target via AS64496
src_country: IT
expected_results: ViaAS64496
actions: LogErrors
- descr: German and French probes must reach target with low RTT
src_country:
- DE
- FR
expected_results: LowRTT
actions: LogErrors
matching_rules:
- descr: Set 'VIP' (Very Important Probe) label to ID 123 and 456
probe_id:
- 123
- 456
process_next: True
actions: SetVIPLabel
- descr: Set 'VIP' label to Italian probes too
src_country: IT
process_next: True
actions: SetVIPLabel
- descr: VIPs must have low RTT
internal_labels: VIP
expected_results: LowRTT
"""
MANDATORY_CFG_FIELDS = []
OPTIONAL_CFG_FIELDS = ["expected_results", "descr", "process_next",
"src_country", "src_as", "probe_id", "reverse",
"internal_labels", "actions"]
def __init__(self, monitor, cfg):
BasicConfigElement.__init__(self, cfg)
self.monitor = monitor
self.normalize_fields()
self.descr = cfg["descr"]
self.process_next = self._enforce_param("process_next", bool)
self.src_country = self._enforce_list("src_country", str)
self.src_as = self._enforce_list("src_as", int)
self.probe_id = self._enforce_list("probe_id", int)
self.internal_labels = self._enforce_list("internal_labels", str)
self.reverse = self._enforce_param("reverse", bool) or False
self.expected_results = self._enforce_list("expected_results", str)
if self.expected_results is None:
self.expected_results = []
self.actions = self._enforce_list("actions", str)
if self.src_country:
for cc in self.src_country:
if not re.match(r"^[a-zA-Z][a-zA-Z]$", cc):
raise ConfigError(
"Invalid country code: {}. "
"Countries must be defined with a two-letter "
"ISO code.".format(cc)
)
def _str_src_county(self):
return ", ".join(self.src_country)
def _str_src_as(self):
return ", ".join(map(str, self.src_as))
def _str_probe_id(self):
return ", ".join(map(str, self.probe_id))
def _str_internal_labels(self):
return ", ".join(self.internal_labels)
def __str__(self):
if self.descr:
return self.descr
else:
ret = []
if self.reverse:
ret.append("Reverse")
if len(self.src_country) > 0:
ret.append("Country: {}".format(self._str_src_county()))
if len(self.src_as) > 0:
ret.append("Source AS: {}".format(self._str_src_as()))
if len(self.probe_id) > 0:
ret.append("Probe ID: {}".format(self._str_probe_id()))
if len(self.internal_labels) > 0:
ret.append("Internal labels: {}".format(
self._str_internal_labels())
)
if len(ret) > 0:
return "; ".join(ret)
else:
return "Match any probe"
def display(self):
criteria_found = 0
if self.descr:
print(" Description : {}".format(self.descr))
print("")
if self.reverse:
print(" Reverse : {}".format(self.reverse))
if len(self.src_country) > 0:
criteria_found += 1
print(" Country : {}".format(self._str_src_county()))
if len(self.src_as) > 0:
criteria_found += 1
print(" Source AS : {}".format(self._str_src_as()))
if len(self.probe_id) > 0:
criteria_found += 1
print(" Probe ID : {}".format(self._str_probe_id()))
if len(self.internal_labels) > 0:
criteria_found += 1
print(" Internal labels: {}".format(self._str_internal_labels()))
if criteria_found > 1:
print("")
print(
" The rule matches for source probes that {}satisfy all the "
"above criteria.".format("do not " if self.reverse else "")
)
elif criteria_found == 1:
print("")
print(
" The rule matches for source probes that {}satisty the "
"above criterion.".format("do not " if self.reverse else "")
)
else:
print(
" No criteria defined for the rule: it {} matches for "
"any source probe.".format(
"never " if self.reverse else "always"
)
)
print("")
if self.process_next:
if self.process_next is True:
print(
" The rules following this one are processed even if a "
"matching condition is found."
)
else:
print(
" If a matching condition is not found, the rule that "
"follows this one is not elaborated and the rules "
"processing is forcedly stopped."
)
else:
print(
" If a matching condition is not found, the rule that "
"follows this one is elaborated."
)
print(
" Once a matching condition is found, the rules following "
"this one are not processed and the execution is stopped."
)
print("")
def probe_matches(self, probe):
criteria_cnt = 0
match_cnt = 0
if self.reverse:
logger.debug(" excluding rule!")
if len(self.src_country) > 0:
criteria_cnt += 1
logger.debug(
" testing probe ID {}: "
"country [{}] in {}".format(probe.id, probe.country_code,
self._str_src_county())
)
if probe.country_code in self.src_country:
match_cnt += 1
if len(self.src_as) > 0:
criteria_cnt += 1
logger.debug(
" testing probe ID {}: "
"src AS [{}] in {}".format(
probe.id,
probe.asn,
self._str_src_as())
)
if probe.asn in self.src_as:
match_cnt += 1
if len(self.probe_id) > 0:
criteria_cnt += 1
logger.debug(" testing probe ID {}: ID in {}".format(
probe.id, self._str_probe_id())
)
if probe.id in self.probe_id:
match_cnt += 1
if len(self.internal_labels) > 0:
criteria_cnt += 1
probe_labels = set()
for scope in ["probes", "results"]:
if str(probe.id) in self.monitor.internal_labels[scope]:
probe_labels.update(
self.monitor.internal_labels[scope][str(probe.id)]
)
logger.debug(
" testing probe ID {}: "
"internal labels: {}, expected labels: {}".format(
probe.id,
", ".join(probe_labels) if probe_labels else "none",
self._str_internal_labels()
)
)
for label in self.internal_labels:
if label in probe_labels:
match_cnt += 1
break
if self.reverse:
if criteria_cnt == 0:
logger.debug(
" excluding rule: probe did not pass because "
"no criteria are defined for this rule"
)
return False
elif criteria_cnt == match_cnt:
logger.debug(
" excluding rule: probe did not pass because "
"it matched all the criteria for this rule"
)
return False
else:
return True
else:
return criteria_cnt == match_cnt
def perform_actions(self, result=None, expres=None, result_matches=None):
for action_name in self.actions:
action = self.monitor.actions[action_name]
if action.when == "always" or \
(action.when == "on_match" and result_matches is None) or \
(action.when == "on_match" and result_matches is True) or \
(action.when == "on_mismatch" and result_matches is False):
try:
action.perform(result, expres, result_matches)
except RIPEAtlasMonitorError as e:
logger.error(
"Error while performing action '{}': {}".format(
str(action), str(e)
)
) | /ripe-atlas-monitor-0.1.10.tar.gz/ripe-atlas-monitor-0.1.10/pierky/ripeatlasmonitor/Rule.py | 0.797162 | 0.677794 | Rule.py | pypi |
import re
from .Errors import ConfigError
from .ExpResCriteriaBase import ExpResCriterion
from .Logging import logger
from .ParsedResults import ParsedResult_CertFps
class ExpResCriterion_CertFP(ExpResCriterion):
"""Criterion: cert_fp
Verify SSL certificates' fingerprints.
Available for: sslcert
`cert_fp`: list of certificates' SHA256 fingerprints or SHA256
fingerprints of the chain.
A fingerprint must be in the format 12:34:AB:CD:EF:... 32 blocks of 2
characters hex values separated by colon (":").
The `cert_fp` parameter can contain stand-alone fingerprints or bundle of
fingerprints in the format "fingerprint1,fingerprint2,fingerprintN".
A result matches if any of its certificates' fingerprint is in the list
of stand-alone expected fingerprints or if the full chain fingerprints is
in the list of bundle fingerprints.
Examples:
expected_results:
MatchLeafCertificate:
cert_fp: 01:02:[...]:31:32
MatchLeacCertificates:
cert_fp:
- 01:02:[...]:31:32
- 12:34:[...]:CD:EF
MatchLeafOrChain:
cert_fp:
- 01:02:[...]:31:32
- 12:34:[...]:CD:EF,56:78:[...]:AB:CD
"""
CRITERION_NAME = "cert_fp"
AVAILABLE_FOR_MSM_TYPE = ["sslcert"]
MANDATORY_CFG_FIELDS = []
OPTIONAL_CFG_FIELDS = []
FP_PATTERN = re.compile("^[0-9A-F]{2}(:[0-9A-F]{2}){31}$",
flags=re.IGNORECASE)
def _validate_fp(self, v):
# v can be str or list
if isinstance(v, str):
fp = v
if self.FP_PATTERN.match(fp):
return fp.upper()
else:
raise ConfigError(
"Invalid SHA256 fingerprint for cert_fp: {}. "
"It must be in the format "
"12:34:AB:CD:EF:...[,56:78:9A...]: ".format(
fp
)
)
elif isinstance(v, list):
ret = []
for fp in v:
ret.append(self._validate_fp(fp))
return ret
else:
raise ConfigError(
"Invalid type for fp: {}".format(type(fp))
)
def __init__(self, cfg, expres):
ExpResCriterion.__init__(self, cfg, expres)
# list of stand-alone fingerprints or chain of fps
# [ fp1, fp2, [fp3, fp4] ]
# will be converted in self.standalone_fps and self.chain_fps
# a match occurs when
# - at least one certificate's fp == a stand-alone fp
# - all the certificates' fps == a chain of fps
self.cert_fp = self._enforce_list("cert_fp", str)
self.standalone_fps = []
self.chain_fps = []
for fp in self.cert_fp:
if "," in fp:
self.chain_fps.append(self._validate_fp(fp.split(",")))
else:
self.standalone_fps.append(self._validate_fp(fp))
def __str__(self):
return "Certificate SHA256 fingerpring: {}".format(
self._str_list()
)
@staticmethod
def _str_fp(s):
return "{}:[...]:{}".format(
":".join(s.split(":")[0:2]),
":".join(s.split(":")[-2:])
)
def _str_list(self):
res = []
for fp in self.standalone_fps:
res.append(self._str_fp(fp))
for chain in self.chain_fps:
res.append(
"({})".format(
", ".join(map(self._str_fp, chain))
)
)
return ", ".join(res)
def display_string(self):
more_than_one = len(self.cert_fp) > 1
return(
" - certificate SHA256 fingerprint must be {}the following: "
"{}".format(
"one of " if more_than_one else "",
self._str_list()
)
)
def prepare(self, result):
res = ParsedResult_CertFps(self.expres.monitor, result)
self.res_cer_fps = res.cer_fps
def result_matches(self, result):
cer_fps = self.res_cer_fps
logger.debug(
" verifying if certificates fingerprints {} in {}...".format(
", ".join(cer_fps), self._str_list()
)
)
for cer in result.certificates:
if cer.checksum_sha256.upper() in self.standalone_fps:
return True
for chain in self.chain_fps:
if sorted(cer_fps) == sorted(chain):
return True
return False | /ripe-atlas-monitor-0.1.10.tar.gz/ripe-atlas-monitor-0.1.10/pierky/ripeatlasmonitor/ExpResCriteriaSSL.py | 0.727975 | 0.24326 | ExpResCriteriaSSL.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.