repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
idlesign/torrentool
|
torrentool/utils.py
|
get_open_trackers_from_remote
|
python
|
def get_open_trackers_from_remote():
url_base = 'https://raw.githubusercontent.com/idlesign/torrentool/master/torrentool/repo'
url = '%s/%s' % (url_base, OPEN_TRACKERS_FILENAME)
try:
import requests
response = requests.get(url, timeout=REMOTE_TIMEOUT)
response.raise_for_status()
open_trackers = response.text.splitlines()
except (ImportError, requests.RequestException) as e:
# Now trace is lost. `raise from` to consider.
raise RemoteDownloadError('Unable to download from %s: %s' % (url, e))
return open_trackers
|
Returns open trackers announce URLs list from remote repo.
|
train
|
https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/utils.py#L64-L83
| null |
import math
from os import path
from .exceptions import RemoteUploadError, RemoteDownloadError
OPEN_TRACKERS_FILENAME = 'open_trackers.ini'
REMOTE_TIMEOUT = 4
def get_app_version():
"""Returns full version string including application name
suitable for putting into Torrent.created_by.
"""
from torrentool import VERSION
return 'torrentool/%s' % '.'.join(map(str, VERSION))
def humanize_filesize(bytes_size):
"""Returns human readable filesize.
:param int bytes_size:
:rtype: str
"""
if not bytes_size:
return '0 B'
names = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')
name_idx = int(math.floor(math.log(bytes_size, 1024)))
size = round(bytes_size / math.pow(1024, name_idx), 2)
return '%s %s' % (size, names[name_idx])
def upload_to_cache_server(fpath):
"""Uploads .torrent file to a cache server.
Returns upload file URL.
:rtype: str
"""
url_base = 'http://torrage.info'
url_upload = '%s/autoupload.php' % url_base
url_download = '%s/torrent.php?h=' % url_base
file_field = 'torrent'
try:
import requests
response = requests.post(url_upload, files={file_field: open(fpath, 'rb')}, timeout=REMOTE_TIMEOUT)
response.raise_for_status()
info_cache = response.text
return url_download + info_cache
except (ImportError, requests.RequestException) as e:
# Now trace is lost. `raise from` to consider.
raise RemoteUploadError('Unable to upload to %s: %s' % (url_upload, e))
def get_open_trackers_from_local():
"""Returns open trackers announce URLs list from local backup."""
with open(path.join(path.dirname(__file__), 'repo', OPEN_TRACKERS_FILENAME)) as f:
open_trackers = map(str.strip, f.readlines())
return list(open_trackers)
|
idlesign/torrentool
|
torrentool/utils.py
|
get_open_trackers_from_local
|
python
|
def get_open_trackers_from_local():
with open(path.join(path.dirname(__file__), 'repo', OPEN_TRACKERS_FILENAME)) as f:
open_trackers = map(str.strip, f.readlines())
return list(open_trackers)
|
Returns open trackers announce URLs list from local backup.
|
train
|
https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/utils.py#L86-L91
| null |
import math
from os import path
from .exceptions import RemoteUploadError, RemoteDownloadError
OPEN_TRACKERS_FILENAME = 'open_trackers.ini'
REMOTE_TIMEOUT = 4
def get_app_version():
"""Returns full version string including application name
suitable for putting into Torrent.created_by.
"""
from torrentool import VERSION
return 'torrentool/%s' % '.'.join(map(str, VERSION))
def humanize_filesize(bytes_size):
"""Returns human readable filesize.
:param int bytes_size:
:rtype: str
"""
if not bytes_size:
return '0 B'
names = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')
name_idx = int(math.floor(math.log(bytes_size, 1024)))
size = round(bytes_size / math.pow(1024, name_idx), 2)
return '%s %s' % (size, names[name_idx])
def upload_to_cache_server(fpath):
"""Uploads .torrent file to a cache server.
Returns upload file URL.
:rtype: str
"""
url_base = 'http://torrage.info'
url_upload = '%s/autoupload.php' % url_base
url_download = '%s/torrent.php?h=' % url_base
file_field = 'torrent'
try:
import requests
response = requests.post(url_upload, files={file_field: open(fpath, 'rb')}, timeout=REMOTE_TIMEOUT)
response.raise_for_status()
info_cache = response.text
return url_download + info_cache
except (ImportError, requests.RequestException) as e:
# Now trace is lost. `raise from` to consider.
raise RemoteUploadError('Unable to upload to %s: %s' % (url_upload, e))
def get_open_trackers_from_remote():
"""Returns open trackers announce URLs list from remote repo."""
url_base = 'https://raw.githubusercontent.com/idlesign/torrentool/master/torrentool/repo'
url = '%s/%s' % (url_base, OPEN_TRACKERS_FILENAME)
try:
import requests
response = requests.get(url, timeout=REMOTE_TIMEOUT)
response.raise_for_status()
open_trackers = response.text.splitlines()
except (ImportError, requests.RequestException) as e:
# Now trace is lost. `raise from` to consider.
raise RemoteDownloadError('Unable to download from %s: %s' % (url, e))
return open_trackers
|
idlesign/torrentool
|
torrentool/bencode.py
|
Bencode.encode
|
python
|
def encode(cls, value):
val_encoding = 'utf-8'
def encode_str(v):
try:
v_enc = encode(v, val_encoding)
except UnicodeDecodeError:
if PY3:
raise
else:
# Suppose bytestring
v_enc = v
prefix = encode('%s:' % len(v_enc), val_encoding)
return prefix + v_enc
def encode_(val):
if isinstance(val, str_type):
result = encode_str(val)
elif isinstance(val, int_types):
result = encode(('i%se' % val), val_encoding)
elif isinstance(val, (list, set, tuple)):
result = encode('l', val_encoding)
for item in val:
result += encode_(item)
result += encode('e', val_encoding)
elif isinstance(val, dict):
result = encode('d', val_encoding)
# Dictionaries are expected to be sorted by key.
for k, v in OrderedDict(sorted(val.items(), key=itemgetter(0))).items():
result += (encode_str(k) + encode_(v))
result += encode('e', val_encoding)
elif isinstance(val, byte_types):
result = encode('%s:' % len(val), val_encoding)
result += val
else:
raise BencodeEncodingError('Unable to encode `%s` %s' % (type(val), val))
return result
return encode_(value)
|
Encodes a value into bencoded bytes.
:param value: Python object to be encoded (str, int, list, dict).
:param str val_encoding: Encoding used by strings in a given object.
:rtype: bytes
|
train
|
https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/bencode.py#L27-L81
|
[
"def encode_(val):\n if isinstance(val, str_type):\n result = encode_str(val)\n\n elif isinstance(val, int_types):\n result = encode(('i%se' % val), val_encoding)\n\n elif isinstance(val, (list, set, tuple)):\n result = encode('l', val_encoding)\n for item in val:\n result += encode_(item)\n result += encode('e', val_encoding)\n\n elif isinstance(val, dict):\n result = encode('d', val_encoding)\n\n # Dictionaries are expected to be sorted by key.\n for k, v in OrderedDict(sorted(val.items(), key=itemgetter(0))).items():\n result += (encode_str(k) + encode_(v))\n\n result += encode('e', val_encoding)\n\n elif isinstance(val, byte_types):\n result = encode('%s:' % len(val), val_encoding)\n result += val\n\n else:\n raise BencodeEncodingError('Unable to encode `%s` %s' % (type(val), val))\n\n return result\n"
] |
class Bencode(object):
"""Exposes utilities for bencoding."""
@classmethod
@classmethod
def decode(cls, encoded):
"""Decodes bencoded data introduced as bytes.
Returns decoded structure(s).
:param bytes encoded:
"""
def create_dict(items):
# Let's guarantee that dictionaries are sorted.
k_v_pair = zip(*[iter(items)] * 2)
return OrderedDict(sorted(k_v_pair, key=itemgetter(0)))
def create_list(items):
return list(items)
stack_items = []
stack_containers = []
def compress_stack():
target_container = stack_containers.pop()
subitems = []
while True:
subitem = stack_items.pop()
subitems.append(subitem)
if subitem is target_container:
break
container_creator = subitems.pop()
container = container_creator(reversed(subitems))
stack_items.append(container)
def parse_forward(till_char, sequence):
number = ''
char_sub_idx = 0
for char_sub_idx, char_sub in enumerate(sequence):
char_sub = chr_(char_sub)
if char_sub == till_char:
break
number += char_sub
number = int(number or 0)
char_sub_idx += 1
return number, char_sub_idx
while encoded:
char = encoded[0]
char = chr_(char)
if char == 'd': # Dictionary
stack_items.append(create_dict)
stack_containers.append(create_dict)
encoded = encoded[1:]
elif char == 'l': # List
stack_items.append(create_list)
stack_containers.append(create_list)
encoded = encoded[1:]
elif char == 'i': # Integer
number, char_sub_idx = parse_forward('e', encoded[1:])
char_sub_idx += 1
stack_items.append(number)
encoded = encoded[char_sub_idx:]
elif char.isdigit(): # String
str_len, char_sub_idx = parse_forward(':', encoded)
last_char_idx = char_sub_idx + str_len
string = encoded[char_sub_idx:last_char_idx]
try:
string = string.decode('utf-8')
except UnicodeDecodeError:
# Considered bytestring (e.g. `pieces` hashes concatenation).
pass
stack_items.append(string)
encoded = encoded[last_char_idx:]
elif char == 'e': # End of a dictionary or a list.
compress_stack()
encoded = encoded[1:]
else:
raise BencodeDecodingError('Unable to interpret `%s` char.' % char)
if len(stack_items) == 1:
stack_items = stack_items.pop()
return stack_items
@classmethod
def read_string(cls, string):
"""Decodes a given bencoded string or bytestring.
Returns decoded structure(s).
:param str string:
:rtype: list
"""
if PY3 and not isinstance(string, byte_types):
string = string.encode()
return cls.decode(string)
@classmethod
def read_file(cls, filepath):
"""Decodes bencoded data of a given file.
Returns decoded structure(s).
:param str filepath:
:rtype: list
"""
with open(filepath, mode='rb') as f:
contents = f.read()
return cls.decode(contents)
|
idlesign/torrentool
|
torrentool/bencode.py
|
Bencode.decode
|
python
|
def decode(cls, encoded):
def create_dict(items):
# Let's guarantee that dictionaries are sorted.
k_v_pair = zip(*[iter(items)] * 2)
return OrderedDict(sorted(k_v_pair, key=itemgetter(0)))
def create_list(items):
return list(items)
stack_items = []
stack_containers = []
def compress_stack():
target_container = stack_containers.pop()
subitems = []
while True:
subitem = stack_items.pop()
subitems.append(subitem)
if subitem is target_container:
break
container_creator = subitems.pop()
container = container_creator(reversed(subitems))
stack_items.append(container)
def parse_forward(till_char, sequence):
number = ''
char_sub_idx = 0
for char_sub_idx, char_sub in enumerate(sequence):
char_sub = chr_(char_sub)
if char_sub == till_char:
break
number += char_sub
number = int(number or 0)
char_sub_idx += 1
return number, char_sub_idx
while encoded:
char = encoded[0]
char = chr_(char)
if char == 'd': # Dictionary
stack_items.append(create_dict)
stack_containers.append(create_dict)
encoded = encoded[1:]
elif char == 'l': # List
stack_items.append(create_list)
stack_containers.append(create_list)
encoded = encoded[1:]
elif char == 'i': # Integer
number, char_sub_idx = parse_forward('e', encoded[1:])
char_sub_idx += 1
stack_items.append(number)
encoded = encoded[char_sub_idx:]
elif char.isdigit(): # String
str_len, char_sub_idx = parse_forward(':', encoded)
last_char_idx = char_sub_idx + str_len
string = encoded[char_sub_idx:last_char_idx]
try:
string = string.decode('utf-8')
except UnicodeDecodeError:
# Considered bytestring (e.g. `pieces` hashes concatenation).
pass
stack_items.append(string)
encoded = encoded[last_char_idx:]
elif char == 'e': # End of a dictionary or a list.
compress_stack()
encoded = encoded[1:]
else:
raise BencodeDecodingError('Unable to interpret `%s` char.' % char)
if len(stack_items) == 1:
stack_items = stack_items.pop()
return stack_items
|
Decodes bencoded data introduced as bytes.
Returns decoded structure(s).
:param bytes encoded:
|
train
|
https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/bencode.py#L84-L177
|
[
"def parse_forward(till_char, sequence):\n number = ''\n char_sub_idx = 0\n\n for char_sub_idx, char_sub in enumerate(sequence):\n char_sub = chr_(char_sub)\n if char_sub == till_char:\n break\n\n number += char_sub\n\n number = int(number or 0)\n char_sub_idx += 1\n\n return number, char_sub_idx\n"
] |
class Bencode(object):
"""Exposes utilities for bencoding."""
@classmethod
def encode(cls, value):
"""Encodes a value into bencoded bytes.
:param value: Python object to be encoded (str, int, list, dict).
:param str val_encoding: Encoding used by strings in a given object.
:rtype: bytes
"""
val_encoding = 'utf-8'
def encode_str(v):
try:
v_enc = encode(v, val_encoding)
except UnicodeDecodeError:
if PY3:
raise
else:
# Suppose bytestring
v_enc = v
prefix = encode('%s:' % len(v_enc), val_encoding)
return prefix + v_enc
def encode_(val):
if isinstance(val, str_type):
result = encode_str(val)
elif isinstance(val, int_types):
result = encode(('i%se' % val), val_encoding)
elif isinstance(val, (list, set, tuple)):
result = encode('l', val_encoding)
for item in val:
result += encode_(item)
result += encode('e', val_encoding)
elif isinstance(val, dict):
result = encode('d', val_encoding)
# Dictionaries are expected to be sorted by key.
for k, v in OrderedDict(sorted(val.items(), key=itemgetter(0))).items():
result += (encode_str(k) + encode_(v))
result += encode('e', val_encoding)
elif isinstance(val, byte_types):
result = encode('%s:' % len(val), val_encoding)
result += val
else:
raise BencodeEncodingError('Unable to encode `%s` %s' % (type(val), val))
return result
return encode_(value)
@classmethod
@classmethod
def read_string(cls, string):
"""Decodes a given bencoded string or bytestring.
Returns decoded structure(s).
:param str string:
:rtype: list
"""
if PY3 and not isinstance(string, byte_types):
string = string.encode()
return cls.decode(string)
@classmethod
def read_file(cls, filepath):
"""Decodes bencoded data of a given file.
Returns decoded structure(s).
:param str filepath:
:rtype: list
"""
with open(filepath, mode='rb') as f:
contents = f.read()
return cls.decode(contents)
|
idlesign/torrentool
|
torrentool/bencode.py
|
Bencode.read_string
|
python
|
def read_string(cls, string):
if PY3 and not isinstance(string, byte_types):
string = string.encode()
return cls.decode(string)
|
Decodes a given bencoded string or bytestring.
Returns decoded structure(s).
:param str string:
:rtype: list
|
train
|
https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/bencode.py#L180-L191
|
[
"def decode(cls, encoded):\n \"\"\"Decodes bencoded data introduced as bytes.\n\n Returns decoded structure(s).\n\n :param bytes encoded:\n \"\"\"\n def create_dict(items):\n # Let's guarantee that dictionaries are sorted.\n k_v_pair = zip(*[iter(items)] * 2)\n return OrderedDict(sorted(k_v_pair, key=itemgetter(0)))\n\n def create_list(items):\n return list(items)\n\n stack_items = []\n stack_containers = []\n\n def compress_stack():\n target_container = stack_containers.pop()\n subitems = []\n\n while True:\n subitem = stack_items.pop()\n subitems.append(subitem)\n if subitem is target_container:\n break\n\n container_creator = subitems.pop()\n container = container_creator(reversed(subitems))\n stack_items.append(container)\n\n def parse_forward(till_char, sequence):\n number = ''\n char_sub_idx = 0\n\n for char_sub_idx, char_sub in enumerate(sequence):\n char_sub = chr_(char_sub)\n if char_sub == till_char:\n break\n\n number += char_sub\n\n number = int(number or 0)\n char_sub_idx += 1\n\n return number, char_sub_idx\n\n while encoded:\n char = encoded[0]\n char = chr_(char)\n\n if char == 'd': # Dictionary\n stack_items.append(create_dict)\n stack_containers.append(create_dict)\n encoded = encoded[1:]\n\n elif char == 'l': # List\n stack_items.append(create_list)\n stack_containers.append(create_list)\n encoded = encoded[1:]\n\n elif char == 'i': # Integer\n number, char_sub_idx = parse_forward('e', encoded[1:])\n char_sub_idx += 1\n\n stack_items.append(number)\n encoded = encoded[char_sub_idx:]\n\n elif char.isdigit(): # String\n str_len, char_sub_idx = parse_forward(':', encoded)\n last_char_idx = char_sub_idx + str_len\n\n string = encoded[char_sub_idx:last_char_idx]\n try:\n string = string.decode('utf-8')\n except UnicodeDecodeError:\n # Considered bytestring (e.g. `pieces` hashes concatenation).\n pass\n\n stack_items.append(string)\n encoded = encoded[last_char_idx:]\n\n elif char == 'e': # End of a dictionary or a list.\n compress_stack()\n encoded = encoded[1:]\n\n else:\n raise BencodeDecodingError('Unable to interpret `%s` char.' % char)\n\n if len(stack_items) == 1:\n stack_items = stack_items.pop()\n\n return stack_items\n"
] |
class Bencode(object):
"""Exposes utilities for bencoding."""
@classmethod
def encode(cls, value):
"""Encodes a value into bencoded bytes.
:param value: Python object to be encoded (str, int, list, dict).
:param str val_encoding: Encoding used by strings in a given object.
:rtype: bytes
"""
val_encoding = 'utf-8'
def encode_str(v):
try:
v_enc = encode(v, val_encoding)
except UnicodeDecodeError:
if PY3:
raise
else:
# Suppose bytestring
v_enc = v
prefix = encode('%s:' % len(v_enc), val_encoding)
return prefix + v_enc
def encode_(val):
if isinstance(val, str_type):
result = encode_str(val)
elif isinstance(val, int_types):
result = encode(('i%se' % val), val_encoding)
elif isinstance(val, (list, set, tuple)):
result = encode('l', val_encoding)
for item in val:
result += encode_(item)
result += encode('e', val_encoding)
elif isinstance(val, dict):
result = encode('d', val_encoding)
# Dictionaries are expected to be sorted by key.
for k, v in OrderedDict(sorted(val.items(), key=itemgetter(0))).items():
result += (encode_str(k) + encode_(v))
result += encode('e', val_encoding)
elif isinstance(val, byte_types):
result = encode('%s:' % len(val), val_encoding)
result += val
else:
raise BencodeEncodingError('Unable to encode `%s` %s' % (type(val), val))
return result
return encode_(value)
@classmethod
def decode(cls, encoded):
"""Decodes bencoded data introduced as bytes.
Returns decoded structure(s).
:param bytes encoded:
"""
def create_dict(items):
# Let's guarantee that dictionaries are sorted.
k_v_pair = zip(*[iter(items)] * 2)
return OrderedDict(sorted(k_v_pair, key=itemgetter(0)))
def create_list(items):
return list(items)
stack_items = []
stack_containers = []
def compress_stack():
target_container = stack_containers.pop()
subitems = []
while True:
subitem = stack_items.pop()
subitems.append(subitem)
if subitem is target_container:
break
container_creator = subitems.pop()
container = container_creator(reversed(subitems))
stack_items.append(container)
def parse_forward(till_char, sequence):
number = ''
char_sub_idx = 0
for char_sub_idx, char_sub in enumerate(sequence):
char_sub = chr_(char_sub)
if char_sub == till_char:
break
number += char_sub
number = int(number or 0)
char_sub_idx += 1
return number, char_sub_idx
while encoded:
char = encoded[0]
char = chr_(char)
if char == 'd': # Dictionary
stack_items.append(create_dict)
stack_containers.append(create_dict)
encoded = encoded[1:]
elif char == 'l': # List
stack_items.append(create_list)
stack_containers.append(create_list)
encoded = encoded[1:]
elif char == 'i': # Integer
number, char_sub_idx = parse_forward('e', encoded[1:])
char_sub_idx += 1
stack_items.append(number)
encoded = encoded[char_sub_idx:]
elif char.isdigit(): # String
str_len, char_sub_idx = parse_forward(':', encoded)
last_char_idx = char_sub_idx + str_len
string = encoded[char_sub_idx:last_char_idx]
try:
string = string.decode('utf-8')
except UnicodeDecodeError:
# Considered bytestring (e.g. `pieces` hashes concatenation).
pass
stack_items.append(string)
encoded = encoded[last_char_idx:]
elif char == 'e': # End of a dictionary or a list.
compress_stack()
encoded = encoded[1:]
else:
raise BencodeDecodingError('Unable to interpret `%s` char.' % char)
if len(stack_items) == 1:
stack_items = stack_items.pop()
return stack_items
@classmethod
@classmethod
def read_file(cls, filepath):
"""Decodes bencoded data of a given file.
Returns decoded structure(s).
:param str filepath:
:rtype: list
"""
with open(filepath, mode='rb') as f:
contents = f.read()
return cls.decode(contents)
|
idlesign/torrentool
|
torrentool/bencode.py
|
Bencode.read_file
|
python
|
def read_file(cls, filepath):
with open(filepath, mode='rb') as f:
contents = f.read()
return cls.decode(contents)
|
Decodes bencoded data of a given file.
Returns decoded structure(s).
:param str filepath:
:rtype: list
|
train
|
https://github.com/idlesign/torrentool/blob/78c474c2ecddbad2e3287b390ac8a043957f3563/torrentool/bencode.py#L194-L204
|
[
"def decode(cls, encoded):\n \"\"\"Decodes bencoded data introduced as bytes.\n\n Returns decoded structure(s).\n\n :param bytes encoded:\n \"\"\"\n def create_dict(items):\n # Let's guarantee that dictionaries are sorted.\n k_v_pair = zip(*[iter(items)] * 2)\n return OrderedDict(sorted(k_v_pair, key=itemgetter(0)))\n\n def create_list(items):\n return list(items)\n\n stack_items = []\n stack_containers = []\n\n def compress_stack():\n target_container = stack_containers.pop()\n subitems = []\n\n while True:\n subitem = stack_items.pop()\n subitems.append(subitem)\n if subitem is target_container:\n break\n\n container_creator = subitems.pop()\n container = container_creator(reversed(subitems))\n stack_items.append(container)\n\n def parse_forward(till_char, sequence):\n number = ''\n char_sub_idx = 0\n\n for char_sub_idx, char_sub in enumerate(sequence):\n char_sub = chr_(char_sub)\n if char_sub == till_char:\n break\n\n number += char_sub\n\n number = int(number or 0)\n char_sub_idx += 1\n\n return number, char_sub_idx\n\n while encoded:\n char = encoded[0]\n char = chr_(char)\n\n if char == 'd': # Dictionary\n stack_items.append(create_dict)\n stack_containers.append(create_dict)\n encoded = encoded[1:]\n\n elif char == 'l': # List\n stack_items.append(create_list)\n stack_containers.append(create_list)\n encoded = encoded[1:]\n\n elif char == 'i': # Integer\n number, char_sub_idx = parse_forward('e', encoded[1:])\n char_sub_idx += 1\n\n stack_items.append(number)\n encoded = encoded[char_sub_idx:]\n\n elif char.isdigit(): # String\n str_len, char_sub_idx = parse_forward(':', encoded)\n last_char_idx = char_sub_idx + str_len\n\n string = encoded[char_sub_idx:last_char_idx]\n try:\n string = string.decode('utf-8')\n except UnicodeDecodeError:\n # Considered bytestring (e.g. `pieces` hashes concatenation).\n pass\n\n stack_items.append(string)\n encoded = encoded[last_char_idx:]\n\n elif char == 'e': # End of a dictionary or a list.\n compress_stack()\n encoded = encoded[1:]\n\n else:\n raise BencodeDecodingError('Unable to interpret `%s` char.' % char)\n\n if len(stack_items) == 1:\n stack_items = stack_items.pop()\n\n return stack_items\n"
] |
class Bencode(object):
"""Exposes utilities for bencoding."""
@classmethod
def encode(cls, value):
"""Encodes a value into bencoded bytes.
:param value: Python object to be encoded (str, int, list, dict).
:param str val_encoding: Encoding used by strings in a given object.
:rtype: bytes
"""
val_encoding = 'utf-8'
def encode_str(v):
try:
v_enc = encode(v, val_encoding)
except UnicodeDecodeError:
if PY3:
raise
else:
# Suppose bytestring
v_enc = v
prefix = encode('%s:' % len(v_enc), val_encoding)
return prefix + v_enc
def encode_(val):
if isinstance(val, str_type):
result = encode_str(val)
elif isinstance(val, int_types):
result = encode(('i%se' % val), val_encoding)
elif isinstance(val, (list, set, tuple)):
result = encode('l', val_encoding)
for item in val:
result += encode_(item)
result += encode('e', val_encoding)
elif isinstance(val, dict):
result = encode('d', val_encoding)
# Dictionaries are expected to be sorted by key.
for k, v in OrderedDict(sorted(val.items(), key=itemgetter(0))).items():
result += (encode_str(k) + encode_(v))
result += encode('e', val_encoding)
elif isinstance(val, byte_types):
result = encode('%s:' % len(val), val_encoding)
result += val
else:
raise BencodeEncodingError('Unable to encode `%s` %s' % (type(val), val))
return result
return encode_(value)
@classmethod
def decode(cls, encoded):
"""Decodes bencoded data introduced as bytes.
Returns decoded structure(s).
:param bytes encoded:
"""
def create_dict(items):
# Let's guarantee that dictionaries are sorted.
k_v_pair = zip(*[iter(items)] * 2)
return OrderedDict(sorted(k_v_pair, key=itemgetter(0)))
def create_list(items):
return list(items)
stack_items = []
stack_containers = []
def compress_stack():
target_container = stack_containers.pop()
subitems = []
while True:
subitem = stack_items.pop()
subitems.append(subitem)
if subitem is target_container:
break
container_creator = subitems.pop()
container = container_creator(reversed(subitems))
stack_items.append(container)
def parse_forward(till_char, sequence):
number = ''
char_sub_idx = 0
for char_sub_idx, char_sub in enumerate(sequence):
char_sub = chr_(char_sub)
if char_sub == till_char:
break
number += char_sub
number = int(number or 0)
char_sub_idx += 1
return number, char_sub_idx
while encoded:
char = encoded[0]
char = chr_(char)
if char == 'd': # Dictionary
stack_items.append(create_dict)
stack_containers.append(create_dict)
encoded = encoded[1:]
elif char == 'l': # List
stack_items.append(create_list)
stack_containers.append(create_list)
encoded = encoded[1:]
elif char == 'i': # Integer
number, char_sub_idx = parse_forward('e', encoded[1:])
char_sub_idx += 1
stack_items.append(number)
encoded = encoded[char_sub_idx:]
elif char.isdigit(): # String
str_len, char_sub_idx = parse_forward(':', encoded)
last_char_idx = char_sub_idx + str_len
string = encoded[char_sub_idx:last_char_idx]
try:
string = string.decode('utf-8')
except UnicodeDecodeError:
# Considered bytestring (e.g. `pieces` hashes concatenation).
pass
stack_items.append(string)
encoded = encoded[last_char_idx:]
elif char == 'e': # End of a dictionary or a list.
compress_stack()
encoded = encoded[1:]
else:
raise BencodeDecodingError('Unable to interpret `%s` char.' % char)
if len(stack_items) == 1:
stack_items = stack_items.pop()
return stack_items
@classmethod
def read_string(cls, string):
"""Decodes a given bencoded string or bytestring.
Returns decoded structure(s).
:param str string:
:rtype: list
"""
if PY3 and not isinstance(string, byte_types):
string = string.encode()
return cls.decode(string)
@classmethod
|
perrygeo/python-rasterstats
|
src/rasterstats/cli.py
|
zonalstats
|
python
|
def zonalstats(features, raster, all_touched, band, categorical,
indent, info, nodata, prefix, stats, sequence, use_rs):
'''zonalstats generates summary statistics of geospatial raster datasets
based on vector features.
The input arguments to zonalstats should be valid GeoJSON Features. (see cligj)
The output GeoJSON will be mostly unchanged but have additional properties per feature
describing the summary statistics (min, max, mean, etc.) of the underlying raster dataset.
The raster is specified by the required -r/--raster argument.
Example, calculate rainfall stats for each state and output to file:
\b
rio zonalstats states.geojson -r rainfall.tif > mean_rainfall_by_state.geojson
'''
if info:
logging.basicConfig(level=logging.INFO)
if stats is not None:
stats = stats.split(" ")
if 'all' in [x.lower() for x in stats]:
stats = "ALL"
zonal_results = gen_zonal_stats(
features,
raster,
all_touched=all_touched,
band=band,
categorical=categorical,
nodata=nodata,
stats=stats,
prefix=prefix,
geojson_out=True)
if sequence:
for feature in zonal_results:
if use_rs:
click.echo(b'\x1e', nl=False)
click.echo(json.dumps(feature))
else:
click.echo(json.dumps(
{'type': 'FeatureCollection',
'features': list(zonal_results)}))
|
zonalstats generates summary statistics of geospatial raster datasets
based on vector features.
The input arguments to zonalstats should be valid GeoJSON Features. (see cligj)
The output GeoJSON will be mostly unchanged but have additional properties per feature
describing the summary statistics (min, max, mean, etc.) of the underlying raster dataset.
The raster is specified by the required -r/--raster argument.
Example, calculate rainfall stats for each state and output to file:
\b
rio zonalstats states.geojson -r rainfall.tif > mean_rainfall_by_state.geojson
|
train
|
https://github.com/perrygeo/python-rasterstats/blob/910455cd7c9c21eadf464927db72b38ef62b7dfb/src/rasterstats/cli.py#L29-L74
|
[
"def gen_zonal_stats(\n vectors, raster,\n layer=0,\n band=1,\n nodata=None,\n affine=None,\n stats=None,\n all_touched=False,\n categorical=False,\n category_map=None,\n add_stats=None,\n zone_func=None,\n raster_out=False,\n prefix=None,\n geojson_out=False, **kwargs):\n \"\"\"Zonal statistics of raster values aggregated to vector geometries.\n\n Parameters\n ----------\n vectors: path to an vector source or geo-like python objects\n\n raster: ndarray or path to a GDAL raster source\n If ndarray is passed, the ``affine`` kwarg is required.\n\n layer: int or string, optional\n If `vectors` is a path to an fiona source,\n specify the vector layer to use either by name or number.\n defaults to 0\n\n band: int, optional\n If `raster` is a GDAL source, the band number to use (counting from 1).\n defaults to 1.\n\n nodata: float, optional\n If `raster` is a GDAL source, this value overrides any NODATA value\n specified in the file's metadata.\n If `None`, the file's metadata's NODATA value (if any) will be used.\n defaults to `None`.\n\n affine: Affine instance\n required only for ndarrays, otherwise it is read from src\n\n stats: list of str, or space-delimited str, optional\n Which statistics to calculate for each zone.\n All possible choices are listed in ``utils.VALID_STATS``.\n defaults to ``DEFAULT_STATS``, a subset of these.\n\n all_touched: bool, optional\n Whether to include every raster cell touched by a geometry, or only\n those having a center point within the polygon.\n defaults to `False`\n\n categorical: bool, optional\n\n category_map: dict\n A dictionary mapping raster values to human-readable categorical names.\n Only applies when categorical is True\n\n add_stats: dict\n with names and functions of additional stats to compute, optional\n\n zone_func: callable\n function to apply to zone ndarray prior to computing stats\n\n raster_out: boolean\n Include the masked numpy array for each feature?, optional\n\n Each feature dictionary will have the following additional keys:\n mini_raster_array: The clipped and masked numpy array\n mini_raster_affine: Affine transformation\n mini_raster_nodata: NoData Value\n\n prefix: string\n add a prefix to the keys (default: None)\n\n geojson_out: boolean\n Return list of GeoJSON-like features (default: False)\n Original feature geometry and properties will be retained\n with zonal stats appended as additional properties.\n Use with `prefix` to ensure unique and meaningful property names.\n\n Returns\n -------\n generator of dicts (if geojson_out is False)\n Each item corresponds to a single vector feature and\n contains keys for each of the specified stats.\n\n generator of geojson features (if geojson_out is True)\n GeoJSON-like Feature as python dict\n \"\"\"\n stats, run_count = check_stats(stats, categorical)\n\n # Handle 1.0 deprecations\n transform = kwargs.get('transform')\n if transform:\n warnings.warn(\"GDAL-style transforms will disappear in 1.0. \"\n \"Use affine=Affine.from_gdal(*transform) instead\",\n DeprecationWarning)\n if not affine:\n affine = Affine.from_gdal(*transform)\n\n cp = kwargs.get('copy_properties')\n if cp:\n warnings.warn(\"Use `geojson_out` to preserve feature properties\",\n DeprecationWarning)\n\n band_num = kwargs.get('band_num')\n if band_num:\n warnings.warn(\"Use `band` to specify band number\", DeprecationWarning)\n band = band_num\n\n with Raster(raster, affine, nodata, band) as rast:\n features_iter = read_features(vectors, layer)\n for _, feat in enumerate(features_iter):\n geom = shape(feat['geometry'])\n\n if 'Point' in geom.type:\n geom = boxify_points(geom, rast)\n\n geom_bounds = tuple(geom.bounds)\n\n fsrc = rast.read(bounds=geom_bounds)\n\n # rasterized geometry\n rv_array = rasterize_geom(geom, like=fsrc, all_touched=all_touched)\n\n # nodata mask\n isnodata = (fsrc.array == fsrc.nodata)\n\n # add nan mask (if necessary)\n has_nan = (\n np.issubdtype(fsrc.array.dtype, np.floating)\n and np.isnan(fsrc.array.min()))\n if has_nan:\n isnodata = (isnodata | np.isnan(fsrc.array))\n\n # Mask the source data array\n # mask everything that is not a valid value or not within our geom\n masked = np.ma.MaskedArray(\n fsrc.array,\n mask=(isnodata | ~rv_array))\n\n # If we're on 64 bit platform and the array is an integer type\n # make sure we cast to 64 bit to avoid overflow.\n # workaround for https://github.com/numpy/numpy/issues/8433\n if sysinfo.platform_bits == 64 and \\\n masked.dtype != np.int64 and \\\n issubclass(masked.dtype.type, np.integer):\n masked = masked.astype(np.int64)\n\n # execute zone_func on masked zone ndarray\n if zone_func is not None:\n if not callable(zone_func):\n raise TypeError(('zone_func must be a callable '\n 'which accepts function a '\n 'single `zone_array` arg.'))\n zone_func(masked)\n\n if masked.compressed().size == 0:\n # nothing here, fill with None and move on\n feature_stats = dict([(stat, None) for stat in stats])\n if 'count' in stats: # special case, zero makes sense here\n feature_stats['count'] = 0\n else:\n if run_count:\n keys, counts = np.unique(masked.compressed(), return_counts=True)\n pixel_count = dict(zip([np.asscalar(k) for k in keys],\n [np.asscalar(c) for c in counts]))\n\n if categorical:\n feature_stats = dict(pixel_count)\n if category_map:\n feature_stats = remap_categories(category_map, feature_stats)\n else:\n feature_stats = {}\n\n if 'min' in stats:\n feature_stats['min'] = float(masked.min())\n if 'max' in stats:\n feature_stats['max'] = float(masked.max())\n if 'mean' in stats:\n feature_stats['mean'] = float(masked.mean())\n if 'count' in stats:\n feature_stats['count'] = int(masked.count())\n # optional\n if 'sum' in stats:\n feature_stats['sum'] = float(masked.sum())\n if 'std' in stats:\n feature_stats['std'] = float(masked.std())\n if 'median' in stats:\n feature_stats['median'] = float(np.median(masked.compressed()))\n if 'majority' in stats:\n feature_stats['majority'] = float(key_assoc_val(pixel_count, max))\n if 'minority' in stats:\n feature_stats['minority'] = float(key_assoc_val(pixel_count, min))\n if 'unique' in stats:\n feature_stats['unique'] = len(list(pixel_count.keys()))\n if 'range' in stats:\n try:\n rmin = feature_stats['min']\n except KeyError:\n rmin = float(masked.min())\n try:\n rmax = feature_stats['max']\n except KeyError:\n rmax = float(masked.max())\n feature_stats['range'] = rmax - rmin\n\n for pctile in [s for s in stats if s.startswith('percentile_')]:\n q = get_percentile(pctile)\n pctarr = masked.compressed()\n feature_stats[pctile] = np.percentile(pctarr, q)\n\n if 'nodata' in stats or 'nan' in stats:\n featmasked = np.ma.MaskedArray(fsrc.array, mask=(~rv_array))\n\n if 'nodata' in stats:\n feature_stats['nodata'] = float((featmasked == fsrc.nodata).sum())\n if 'nan' in stats:\n feature_stats['nan'] = float(np.isnan(featmasked).sum()) if has_nan else 0\n\n if add_stats is not None:\n for stat_name, stat_func in add_stats.items():\n feature_stats[stat_name] = stat_func(masked)\n\n if raster_out:\n feature_stats['mini_raster_array'] = masked\n feature_stats['mini_raster_affine'] = fsrc.affine\n feature_stats['mini_raster_nodata'] = fsrc.nodata\n\n if prefix is not None:\n prefixed_feature_stats = {}\n for key, val in feature_stats.items():\n newkey = \"{}{}\".format(prefix, key)\n prefixed_feature_stats[newkey] = val\n feature_stats = prefixed_feature_stats\n\n if geojson_out:\n for key, val in feature_stats.items():\n if 'properties' not in feat:\n feat['properties'] = {}\n feat['properties'][key] = val\n yield feat\n else:\n yield feature_stats\n"
] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import logging
import click
import cligj
import simplejson as json
from rasterstats import gen_zonal_stats, gen_point_query
from rasterstats._version import __version__ as version
SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=SETTINGS)
@cligj.features_in_arg
@click.version_option(version=version, message='%(version)s')
@click.option('--raster', '-r', required=True)
@click.option('--all-touched/--no-all-touched', default=False)
@click.option('--band', type=int, default=1)
@click.option('--categorical/--no-categorical', default=False)
@click.option('--indent', type=int, default=None)
@click.option('--info/--no-info', default=False)
@click.option('--nodata', type=int, default=None)
@click.option('--prefix', type=str, default='_')
@click.option('--stats', type=str, default=None)
@cligj.sequence_opt
@cligj.use_rs_opt
@click.command(context_settings=SETTINGS)
@cligj.features_in_arg
@click.version_option(version=version, message='%(version)s')
@click.option('--raster', '-r', required=True)
@click.option('--band', type=int, default=1)
@click.option('--nodata', type=int, default=None)
@click.option('--indent', type=int, default=None)
@click.option('--interpolate', type=str, default='bilinear')
@click.option('--property-name', type=str, default='value')
@cligj.sequence_opt
@cligj.use_rs_opt
def pointquery(features, raster, band, indent, nodata,
interpolate, property_name, sequence, use_rs):
"""
Queries the raster values at the points of the input GeoJSON Features.
The raster values are added to the features properties and output as GeoJSON
Feature Collection.
If the Features are Points, the point geometery is used.
For other Feauture types, all of the verticies of the geometry will be queried.
For example, you can provide a linestring and get the profile along the line
if the verticies are spaced properly.
You can use either bilinear (default) or nearest neighbor interpolation.
"""
results = gen_point_query(
features,
raster,
band=band,
nodata=nodata,
interpolate=interpolate,
property_name=property_name,
geojson_out=True)
if sequence:
for feature in results:
if use_rs:
click.echo(b'\x1e', nl=False)
click.echo(json.dumps(feature))
else:
click.echo(json.dumps(
{'type': 'FeatureCollection',
'features': list(results)}))
|
perrygeo/python-rasterstats
|
src/rasterstats/cli.py
|
pointquery
|
python
|
def pointquery(features, raster, band, indent, nodata,
interpolate, property_name, sequence, use_rs):
results = gen_point_query(
features,
raster,
band=band,
nodata=nodata,
interpolate=interpolate,
property_name=property_name,
geojson_out=True)
if sequence:
for feature in results:
if use_rs:
click.echo(b'\x1e', nl=False)
click.echo(json.dumps(feature))
else:
click.echo(json.dumps(
{'type': 'FeatureCollection',
'features': list(results)}))
|
Queries the raster values at the points of the input GeoJSON Features.
The raster values are added to the features properties and output as GeoJSON
Feature Collection.
If the Features are Points, the point geometery is used.
For other Feauture types, all of the verticies of the geometry will be queried.
For example, you can provide a linestring and get the profile along the line
if the verticies are spaced properly.
You can use either bilinear (default) or nearest neighbor interpolation.
|
train
|
https://github.com/perrygeo/python-rasterstats/blob/910455cd7c9c21eadf464927db72b38ef62b7dfb/src/rasterstats/cli.py#L88-L120
|
[
"def gen_point_query(\n vectors,\n raster,\n band=1,\n layer=0,\n nodata=None,\n affine=None,\n interpolate='bilinear',\n property_name='value',\n geojson_out=False):\n \"\"\"\n Given a set of vector features and a raster,\n generate raster values at each vertex of the geometry\n\n For features with point geometry,\n the values will be a 1D with the index refering to the feature\n\n For features with other geometry types,\n it effectively creates a 2D list, such that\n the first index is the feature, the second is the vertex within the geometry\n\n Parameters\n ----------\n vectors: path to an vector source or geo-like python objects\n\n raster: ndarray or path to a GDAL raster source\n If ndarray is passed, the `transform` kwarg is required.\n\n layer: int or string, optional\n If `vectors` is a path to an fiona source,\n specify the vector layer to use either by name or number.\n defaults to 0\n\n band: int, optional\n If `raster` is a GDAL source, the band number to use (counting from 1).\n defaults to 1.\n\n nodata: float, optional\n If `raster` is a GDAL source, this value overrides any NODATA value\n specified in the file's metadata.\n If `None`, the file's metadata's NODATA value (if any) will be used.\n defaults to `None`.\n\n affine: Affine instance\n required only for ndarrays, otherwise it is read from src\n\n interpolate: string\n 'bilinear' or 'nearest' interpolation\n\n property_name: string\n name of property key if geojson_out\n\n geojson_out: boolean\n generate GeoJSON-like features (default: False)\n original feature geometry and properties will be retained\n point query values appended as additional properties.\n\n Returns\n -------\n generator of arrays (if ``geojson_out`` is False)\n generator of geojson features (if ``geojson_out`` is True)\n \"\"\"\n if interpolate not in ['nearest', 'bilinear']:\n raise ValueError(\"interpolate must be nearest or bilinear\")\n\n features_iter = read_features(vectors, layer)\n\n with Raster(raster, nodata=nodata, affine=affine, band=band) as rast:\n\n for feat in features_iter:\n geom = shape(feat['geometry'])\n vals = []\n for x, y in geom_xys(geom):\n if interpolate == 'nearest':\n r, c = rast.index(x, y)\n window = ((int(r), int(r+1)), (int(c), int(c+1)))\n src_array = rast.read(window=window, masked=True).array\n val = src_array[0, 0]\n if val is masked:\n vals.append(None)\n else:\n vals.append(asscalar(val))\n\n elif interpolate == 'bilinear':\n window, unitxy = point_window_unitxy(x, y, rast.affine)\n src_array = rast.read(window=window, masked=True).array\n vals.append(bilinear(src_array, *unitxy))\n\n if len(vals) == 1:\n vals = vals[0] # flatten single-element lists\n\n if geojson_out:\n if 'properties' not in feat:\n feat['properties'] = {}\n feat['properties'][property_name] = vals\n yield feat\n else:\n yield vals\n"
] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import logging
import click
import cligj
import simplejson as json
from rasterstats import gen_zonal_stats, gen_point_query
from rasterstats._version import __version__ as version
SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=SETTINGS)
@cligj.features_in_arg
@click.version_option(version=version, message='%(version)s')
@click.option('--raster', '-r', required=True)
@click.option('--all-touched/--no-all-touched', default=False)
@click.option('--band', type=int, default=1)
@click.option('--categorical/--no-categorical', default=False)
@click.option('--indent', type=int, default=None)
@click.option('--info/--no-info', default=False)
@click.option('--nodata', type=int, default=None)
@click.option('--prefix', type=str, default='_')
@click.option('--stats', type=str, default=None)
@cligj.sequence_opt
@cligj.use_rs_opt
def zonalstats(features, raster, all_touched, band, categorical,
indent, info, nodata, prefix, stats, sequence, use_rs):
'''zonalstats generates summary statistics of geospatial raster datasets
based on vector features.
The input arguments to zonalstats should be valid GeoJSON Features. (see cligj)
The output GeoJSON will be mostly unchanged but have additional properties per feature
describing the summary statistics (min, max, mean, etc.) of the underlying raster dataset.
The raster is specified by the required -r/--raster argument.
Example, calculate rainfall stats for each state and output to file:
\b
rio zonalstats states.geojson -r rainfall.tif > mean_rainfall_by_state.geojson
'''
if info:
logging.basicConfig(level=logging.INFO)
if stats is not None:
stats = stats.split(" ")
if 'all' in [x.lower() for x in stats]:
stats = "ALL"
zonal_results = gen_zonal_stats(
features,
raster,
all_touched=all_touched,
band=band,
categorical=categorical,
nodata=nodata,
stats=stats,
prefix=prefix,
geojson_out=True)
if sequence:
for feature in zonal_results:
if use_rs:
click.echo(b'\x1e', nl=False)
click.echo(json.dumps(feature))
else:
click.echo(json.dumps(
{'type': 'FeatureCollection',
'features': list(zonal_results)}))
@click.command(context_settings=SETTINGS)
@cligj.features_in_arg
@click.version_option(version=version, message='%(version)s')
@click.option('--raster', '-r', required=True)
@click.option('--band', type=int, default=1)
@click.option('--nodata', type=int, default=None)
@click.option('--indent', type=int, default=None)
@click.option('--interpolate', type=str, default='bilinear')
@click.option('--property-name', type=str, default='value')
@cligj.sequence_opt
@cligj.use_rs_opt
|
perrygeo/python-rasterstats
|
src/rasterstats/point.py
|
point_window_unitxy
|
python
|
def point_window_unitxy(x, y, affine):
fcol, frow = ~affine * (x, y)
r, c = int(round(frow)), int(round(fcol))
# The new source window for our 2x2 array
new_win = ((r - 1, r + 1), (c - 1, c + 1))
# the new x, y coords on the unit square
unitxy = (0.5 - (c - fcol),
0.5 + (r - frow))
return new_win, unitxy
|
Given an x, y and a geotransform
Returns
- rasterio window representing 2x2 window whose center points encompass point
- the cartesian x, y coordinates of the point on the unit square
defined by the array center points.
((row1, row2), (col1, col2)), (unitx, unity)
|
train
|
https://github.com/perrygeo/python-rasterstats/blob/910455cd7c9c21eadf464927db72b38ef62b7dfb/src/rasterstats/point.py#L10-L29
| null |
from __future__ import absolute_import
from __future__ import division
from shapely.geometry import shape
from shapely import wkt
from numpy.ma import masked
from numpy import asscalar
from .io import read_features, Raster
def bilinear(arr, x, y):
""" Given a 2x2 array, an x, and y, treat center points as a unit square
return the value for the fractional row/col
using bilinear interpolation between the cells
+---+---+
| A | B | +----+
+---+---+ => | |
| C | D | +----+
+---+---+
e.g.: Center of A is at (0, 1) on unit square, D is at (1, 0), etc
"""
# for now, only 2x2 arrays
assert arr.shape == (2, 2)
ulv, urv, llv, lrv = arr[0:2, 0:2].flatten().tolist()
# not valid if not on unit square
assert 0.0 <= x <= 1.0
assert 0.0 <= y <= 1.0
if hasattr(arr, 'count') and arr.count() != 4:
# a masked array with at least one nodata
# fall back to nearest neighbor
val = arr[int(round(1 - y)), int(round(x))]
if val is masked:
return None
else:
return asscalar(val)
# bilinear interp on unit square
return ((llv * (1 - x) * (1 - y)) +
(lrv * x * (1 - y)) +
(ulv * (1 - x) * y) +
(urv * x * y))
def geom_xys(geom):
"""Given a shapely geometry,
generate a flattened series of 2D points as x,y tuples
"""
if geom.has_z:
# hack to convert to 2D, https://gist.github.com/ThomasG77/cad711667942826edc70
geom = wkt.loads(geom.to_wkt())
assert not geom.has_z
if hasattr(geom, "geoms"):
geoms = geom.geoms
else:
geoms = [geom]
for g in geoms:
arr = g.array_interface_base['data']
for pair in zip(arr[::2], arr[1::2]):
yield pair
def point_query(*args, **kwargs):
"""The primary point query entry point.
All arguments are passed directly to ``gen_point_query``.
See its docstring for details.
The only difference is that ``point_query`` will
return a list rather than a generator."""
return list(gen_point_query(*args, **kwargs))
def gen_point_query(
vectors,
raster,
band=1,
layer=0,
nodata=None,
affine=None,
interpolate='bilinear',
property_name='value',
geojson_out=False):
"""
Given a set of vector features and a raster,
generate raster values at each vertex of the geometry
For features with point geometry,
the values will be a 1D with the index refering to the feature
For features with other geometry types,
it effectively creates a 2D list, such that
the first index is the feature, the second is the vertex within the geometry
Parameters
----------
vectors: path to an vector source or geo-like python objects
raster: ndarray or path to a GDAL raster source
If ndarray is passed, the `transform` kwarg is required.
layer: int or string, optional
If `vectors` is a path to an fiona source,
specify the vector layer to use either by name or number.
defaults to 0
band: int, optional
If `raster` is a GDAL source, the band number to use (counting from 1).
defaults to 1.
nodata: float, optional
If `raster` is a GDAL source, this value overrides any NODATA value
specified in the file's metadata.
If `None`, the file's metadata's NODATA value (if any) will be used.
defaults to `None`.
affine: Affine instance
required only for ndarrays, otherwise it is read from src
interpolate: string
'bilinear' or 'nearest' interpolation
property_name: string
name of property key if geojson_out
geojson_out: boolean
generate GeoJSON-like features (default: False)
original feature geometry and properties will be retained
point query values appended as additional properties.
Returns
-------
generator of arrays (if ``geojson_out`` is False)
generator of geojson features (if ``geojson_out`` is True)
"""
if interpolate not in ['nearest', 'bilinear']:
raise ValueError("interpolate must be nearest or bilinear")
features_iter = read_features(vectors, layer)
with Raster(raster, nodata=nodata, affine=affine, band=band) as rast:
for feat in features_iter:
geom = shape(feat['geometry'])
vals = []
for x, y in geom_xys(geom):
if interpolate == 'nearest':
r, c = rast.index(x, y)
window = ((int(r), int(r+1)), (int(c), int(c+1)))
src_array = rast.read(window=window, masked=True).array
val = src_array[0, 0]
if val is masked:
vals.append(None)
else:
vals.append(asscalar(val))
elif interpolate == 'bilinear':
window, unitxy = point_window_unitxy(x, y, rast.affine)
src_array = rast.read(window=window, masked=True).array
vals.append(bilinear(src_array, *unitxy))
if len(vals) == 1:
vals = vals[0] # flatten single-element lists
if geojson_out:
if 'properties' not in feat:
feat['properties'] = {}
feat['properties'][property_name] = vals
yield feat
else:
yield vals
|
perrygeo/python-rasterstats
|
src/rasterstats/point.py
|
bilinear
|
python
|
def bilinear(arr, x, y):
# for now, only 2x2 arrays
assert arr.shape == (2, 2)
ulv, urv, llv, lrv = arr[0:2, 0:2].flatten().tolist()
# not valid if not on unit square
assert 0.0 <= x <= 1.0
assert 0.0 <= y <= 1.0
if hasattr(arr, 'count') and arr.count() != 4:
# a masked array with at least one nodata
# fall back to nearest neighbor
val = arr[int(round(1 - y)), int(round(x))]
if val is masked:
return None
else:
return asscalar(val)
# bilinear interp on unit square
return ((llv * (1 - x) * (1 - y)) +
(lrv * x * (1 - y)) +
(ulv * (1 - x) * y) +
(urv * x * y))
|
Given a 2x2 array, an x, and y, treat center points as a unit square
return the value for the fractional row/col
using bilinear interpolation between the cells
+---+---+
| A | B | +----+
+---+---+ => | |
| C | D | +----+
+---+---+
e.g.: Center of A is at (0, 1) on unit square, D is at (1, 0), etc
|
train
|
https://github.com/perrygeo/python-rasterstats/blob/910455cd7c9c21eadf464927db72b38ef62b7dfb/src/rasterstats/point.py#L32-L66
| null |
from __future__ import absolute_import
from __future__ import division
from shapely.geometry import shape
from shapely import wkt
from numpy.ma import masked
from numpy import asscalar
from .io import read_features, Raster
def point_window_unitxy(x, y, affine):
""" Given an x, y and a geotransform
Returns
- rasterio window representing 2x2 window whose center points encompass point
- the cartesian x, y coordinates of the point on the unit square
defined by the array center points.
((row1, row2), (col1, col2)), (unitx, unity)
"""
fcol, frow = ~affine * (x, y)
r, c = int(round(frow)), int(round(fcol))
# The new source window for our 2x2 array
new_win = ((r - 1, r + 1), (c - 1, c + 1))
# the new x, y coords on the unit square
unitxy = (0.5 - (c - fcol),
0.5 + (r - frow))
return new_win, unitxy
def geom_xys(geom):
"""Given a shapely geometry,
generate a flattened series of 2D points as x,y tuples
"""
if geom.has_z:
# hack to convert to 2D, https://gist.github.com/ThomasG77/cad711667942826edc70
geom = wkt.loads(geom.to_wkt())
assert not geom.has_z
if hasattr(geom, "geoms"):
geoms = geom.geoms
else:
geoms = [geom]
for g in geoms:
arr = g.array_interface_base['data']
for pair in zip(arr[::2], arr[1::2]):
yield pair
def point_query(*args, **kwargs):
"""The primary point query entry point.
All arguments are passed directly to ``gen_point_query``.
See its docstring for details.
The only difference is that ``point_query`` will
return a list rather than a generator."""
return list(gen_point_query(*args, **kwargs))
def gen_point_query(
vectors,
raster,
band=1,
layer=0,
nodata=None,
affine=None,
interpolate='bilinear',
property_name='value',
geojson_out=False):
"""
Given a set of vector features and a raster,
generate raster values at each vertex of the geometry
For features with point geometry,
the values will be a 1D with the index refering to the feature
For features with other geometry types,
it effectively creates a 2D list, such that
the first index is the feature, the second is the vertex within the geometry
Parameters
----------
vectors: path to an vector source or geo-like python objects
raster: ndarray or path to a GDAL raster source
If ndarray is passed, the `transform` kwarg is required.
layer: int or string, optional
If `vectors` is a path to an fiona source,
specify the vector layer to use either by name or number.
defaults to 0
band: int, optional
If `raster` is a GDAL source, the band number to use (counting from 1).
defaults to 1.
nodata: float, optional
If `raster` is a GDAL source, this value overrides any NODATA value
specified in the file's metadata.
If `None`, the file's metadata's NODATA value (if any) will be used.
defaults to `None`.
affine: Affine instance
required only for ndarrays, otherwise it is read from src
interpolate: string
'bilinear' or 'nearest' interpolation
property_name: string
name of property key if geojson_out
geojson_out: boolean
generate GeoJSON-like features (default: False)
original feature geometry and properties will be retained
point query values appended as additional properties.
Returns
-------
generator of arrays (if ``geojson_out`` is False)
generator of geojson features (if ``geojson_out`` is True)
"""
if interpolate not in ['nearest', 'bilinear']:
raise ValueError("interpolate must be nearest or bilinear")
features_iter = read_features(vectors, layer)
with Raster(raster, nodata=nodata, affine=affine, band=band) as rast:
for feat in features_iter:
geom = shape(feat['geometry'])
vals = []
for x, y in geom_xys(geom):
if interpolate == 'nearest':
r, c = rast.index(x, y)
window = ((int(r), int(r+1)), (int(c), int(c+1)))
src_array = rast.read(window=window, masked=True).array
val = src_array[0, 0]
if val is masked:
vals.append(None)
else:
vals.append(asscalar(val))
elif interpolate == 'bilinear':
window, unitxy = point_window_unitxy(x, y, rast.affine)
src_array = rast.read(window=window, masked=True).array
vals.append(bilinear(src_array, *unitxy))
if len(vals) == 1:
vals = vals[0] # flatten single-element lists
if geojson_out:
if 'properties' not in feat:
feat['properties'] = {}
feat['properties'][property_name] = vals
yield feat
else:
yield vals
|
perrygeo/python-rasterstats
|
src/rasterstats/point.py
|
geom_xys
|
python
|
def geom_xys(geom):
if geom.has_z:
# hack to convert to 2D, https://gist.github.com/ThomasG77/cad711667942826edc70
geom = wkt.loads(geom.to_wkt())
assert not geom.has_z
if hasattr(geom, "geoms"):
geoms = geom.geoms
else:
geoms = [geom]
for g in geoms:
arr = g.array_interface_base['data']
for pair in zip(arr[::2], arr[1::2]):
yield pair
|
Given a shapely geometry,
generate a flattened series of 2D points as x,y tuples
|
train
|
https://github.com/perrygeo/python-rasterstats/blob/910455cd7c9c21eadf464927db72b38ef62b7dfb/src/rasterstats/point.py#L69-L86
| null |
from __future__ import absolute_import
from __future__ import division
from shapely.geometry import shape
from shapely import wkt
from numpy.ma import masked
from numpy import asscalar
from .io import read_features, Raster
def point_window_unitxy(x, y, affine):
""" Given an x, y and a geotransform
Returns
- rasterio window representing 2x2 window whose center points encompass point
- the cartesian x, y coordinates of the point on the unit square
defined by the array center points.
((row1, row2), (col1, col2)), (unitx, unity)
"""
fcol, frow = ~affine * (x, y)
r, c = int(round(frow)), int(round(fcol))
# The new source window for our 2x2 array
new_win = ((r - 1, r + 1), (c - 1, c + 1))
# the new x, y coords on the unit square
unitxy = (0.5 - (c - fcol),
0.5 + (r - frow))
return new_win, unitxy
def bilinear(arr, x, y):
""" Given a 2x2 array, an x, and y, treat center points as a unit square
return the value for the fractional row/col
using bilinear interpolation between the cells
+---+---+
| A | B | +----+
+---+---+ => | |
| C | D | +----+
+---+---+
e.g.: Center of A is at (0, 1) on unit square, D is at (1, 0), etc
"""
# for now, only 2x2 arrays
assert arr.shape == (2, 2)
ulv, urv, llv, lrv = arr[0:2, 0:2].flatten().tolist()
# not valid if not on unit square
assert 0.0 <= x <= 1.0
assert 0.0 <= y <= 1.0
if hasattr(arr, 'count') and arr.count() != 4:
# a masked array with at least one nodata
# fall back to nearest neighbor
val = arr[int(round(1 - y)), int(round(x))]
if val is masked:
return None
else:
return asscalar(val)
# bilinear interp on unit square
return ((llv * (1 - x) * (1 - y)) +
(lrv * x * (1 - y)) +
(ulv * (1 - x) * y) +
(urv * x * y))
def point_query(*args, **kwargs):
"""The primary point query entry point.
All arguments are passed directly to ``gen_point_query``.
See its docstring for details.
The only difference is that ``point_query`` will
return a list rather than a generator."""
return list(gen_point_query(*args, **kwargs))
def gen_point_query(
vectors,
raster,
band=1,
layer=0,
nodata=None,
affine=None,
interpolate='bilinear',
property_name='value',
geojson_out=False):
"""
Given a set of vector features and a raster,
generate raster values at each vertex of the geometry
For features with point geometry,
the values will be a 1D with the index refering to the feature
For features with other geometry types,
it effectively creates a 2D list, such that
the first index is the feature, the second is the vertex within the geometry
Parameters
----------
vectors: path to an vector source or geo-like python objects
raster: ndarray or path to a GDAL raster source
If ndarray is passed, the `transform` kwarg is required.
layer: int or string, optional
If `vectors` is a path to an fiona source,
specify the vector layer to use either by name or number.
defaults to 0
band: int, optional
If `raster` is a GDAL source, the band number to use (counting from 1).
defaults to 1.
nodata: float, optional
If `raster` is a GDAL source, this value overrides any NODATA value
specified in the file's metadata.
If `None`, the file's metadata's NODATA value (if any) will be used.
defaults to `None`.
affine: Affine instance
required only for ndarrays, otherwise it is read from src
interpolate: string
'bilinear' or 'nearest' interpolation
property_name: string
name of property key if geojson_out
geojson_out: boolean
generate GeoJSON-like features (default: False)
original feature geometry and properties will be retained
point query values appended as additional properties.
Returns
-------
generator of arrays (if ``geojson_out`` is False)
generator of geojson features (if ``geojson_out`` is True)
"""
if interpolate not in ['nearest', 'bilinear']:
raise ValueError("interpolate must be nearest or bilinear")
features_iter = read_features(vectors, layer)
with Raster(raster, nodata=nodata, affine=affine, band=band) as rast:
for feat in features_iter:
geom = shape(feat['geometry'])
vals = []
for x, y in geom_xys(geom):
if interpolate == 'nearest':
r, c = rast.index(x, y)
window = ((int(r), int(r+1)), (int(c), int(c+1)))
src_array = rast.read(window=window, masked=True).array
val = src_array[0, 0]
if val is masked:
vals.append(None)
else:
vals.append(asscalar(val))
elif interpolate == 'bilinear':
window, unitxy = point_window_unitxy(x, y, rast.affine)
src_array = rast.read(window=window, masked=True).array
vals.append(bilinear(src_array, *unitxy))
if len(vals) == 1:
vals = vals[0] # flatten single-element lists
if geojson_out:
if 'properties' not in feat:
feat['properties'] = {}
feat['properties'][property_name] = vals
yield feat
else:
yield vals
|
perrygeo/python-rasterstats
|
src/rasterstats/point.py
|
gen_point_query
|
python
|
def gen_point_query(
vectors,
raster,
band=1,
layer=0,
nodata=None,
affine=None,
interpolate='bilinear',
property_name='value',
geojson_out=False):
if interpolate not in ['nearest', 'bilinear']:
raise ValueError("interpolate must be nearest or bilinear")
features_iter = read_features(vectors, layer)
with Raster(raster, nodata=nodata, affine=affine, band=band) as rast:
for feat in features_iter:
geom = shape(feat['geometry'])
vals = []
for x, y in geom_xys(geom):
if interpolate == 'nearest':
r, c = rast.index(x, y)
window = ((int(r), int(r+1)), (int(c), int(c+1)))
src_array = rast.read(window=window, masked=True).array
val = src_array[0, 0]
if val is masked:
vals.append(None)
else:
vals.append(asscalar(val))
elif interpolate == 'bilinear':
window, unitxy = point_window_unitxy(x, y, rast.affine)
src_array = rast.read(window=window, masked=True).array
vals.append(bilinear(src_array, *unitxy))
if len(vals) == 1:
vals = vals[0] # flatten single-element lists
if geojson_out:
if 'properties' not in feat:
feat['properties'] = {}
feat['properties'][property_name] = vals
yield feat
else:
yield vals
|
Given a set of vector features and a raster,
generate raster values at each vertex of the geometry
For features with point geometry,
the values will be a 1D with the index refering to the feature
For features with other geometry types,
it effectively creates a 2D list, such that
the first index is the feature, the second is the vertex within the geometry
Parameters
----------
vectors: path to an vector source or geo-like python objects
raster: ndarray or path to a GDAL raster source
If ndarray is passed, the `transform` kwarg is required.
layer: int or string, optional
If `vectors` is a path to an fiona source,
specify the vector layer to use either by name or number.
defaults to 0
band: int, optional
If `raster` is a GDAL source, the band number to use (counting from 1).
defaults to 1.
nodata: float, optional
If `raster` is a GDAL source, this value overrides any NODATA value
specified in the file's metadata.
If `None`, the file's metadata's NODATA value (if any) will be used.
defaults to `None`.
affine: Affine instance
required only for ndarrays, otherwise it is read from src
interpolate: string
'bilinear' or 'nearest' interpolation
property_name: string
name of property key if geojson_out
geojson_out: boolean
generate GeoJSON-like features (default: False)
original feature geometry and properties will be retained
point query values appended as additional properties.
Returns
-------
generator of arrays (if ``geojson_out`` is False)
generator of geojson features (if ``geojson_out`` is True)
|
train
|
https://github.com/perrygeo/python-rasterstats/blob/910455cd7c9c21eadf464927db72b38ef62b7dfb/src/rasterstats/point.py#L100-L197
|
[
"def bilinear(arr, x, y):\n \"\"\" Given a 2x2 array, an x, and y, treat center points as a unit square\n return the value for the fractional row/col\n using bilinear interpolation between the cells\n\n +---+---+\n | A | B | +----+\n +---+---+ => | |\n | C | D | +----+\n +---+---+\n\n e.g.: Center of A is at (0, 1) on unit square, D is at (1, 0), etc\n \"\"\"\n # for now, only 2x2 arrays\n assert arr.shape == (2, 2)\n ulv, urv, llv, lrv = arr[0:2, 0:2].flatten().tolist()\n\n # not valid if not on unit square\n assert 0.0 <= x <= 1.0\n assert 0.0 <= y <= 1.0\n\n if hasattr(arr, 'count') and arr.count() != 4:\n # a masked array with at least one nodata\n # fall back to nearest neighbor\n val = arr[int(round(1 - y)), int(round(x))]\n if val is masked:\n return None\n else:\n return asscalar(val)\n\n # bilinear interp on unit square\n return ((llv * (1 - x) * (1 - y)) +\n (lrv * x * (1 - y)) +\n (ulv * (1 - x) * y) +\n (urv * x * y))\n",
"def read_features(obj, layer=0):\n features_iter = None\n\n if isinstance(obj, string_types):\n try:\n # test it as fiona data source\n with fiona.open(obj, 'r', layer=layer) as src:\n assert len(src) > 0\n\n def fiona_generator(obj):\n with fiona.open(obj, 'r', layer=layer) as src:\n for feature in src:\n yield feature\n\n features_iter = fiona_generator(obj)\n except (AssertionError, TypeError, IOError, OSError, DriverError, UnicodeDecodeError):\n try:\n mapping = json.loads(obj)\n if 'type' in mapping and mapping['type'] == 'FeatureCollection':\n features_iter = mapping['features']\n elif mapping['type'] in geom_types + ['Feature']:\n features_iter = [parse_feature(mapping)]\n except (ValueError, JSONDecodeError):\n # Single feature-like string\n features_iter = [parse_feature(obj)]\n elif isinstance(obj, Mapping):\n if 'type' in obj and obj['type'] == 'FeatureCollection':\n features_iter = obj['features']\n else:\n features_iter = [parse_feature(obj)]\n elif isinstance(obj, bytes):\n # Single binary object, probably a wkb\n features_iter = [parse_feature(obj)]\n elif hasattr(obj, '__geo_interface__'):\n mapping = obj.__geo_interface__\n if mapping['type'] == 'FeatureCollection':\n features_iter = mapping['features']\n else:\n features_iter = [parse_feature(mapping)]\n elif isinstance(obj, Iterable):\n # Iterable of feature-like objects\n features_iter = (parse_feature(x) for x in obj)\n\n if not features_iter:\n raise ValueError(\"Object is not a recognized source of Features\")\n return features_iter\n",
"def point_window_unitxy(x, y, affine):\n \"\"\" Given an x, y and a geotransform\n Returns\n - rasterio window representing 2x2 window whose center points encompass point\n - the cartesian x, y coordinates of the point on the unit square\n defined by the array center points.\n\n ((row1, row2), (col1, col2)), (unitx, unity)\n \"\"\"\n fcol, frow = ~affine * (x, y)\n r, c = int(round(frow)), int(round(fcol))\n\n # The new source window for our 2x2 array\n new_win = ((r - 1, r + 1), (c - 1, c + 1))\n\n # the new x, y coords on the unit square\n unitxy = (0.5 - (c - fcol),\n 0.5 + (r - frow))\n\n return new_win, unitxy\n",
"def geom_xys(geom):\n \"\"\"Given a shapely geometry,\n generate a flattened series of 2D points as x,y tuples\n \"\"\"\n if geom.has_z:\n # hack to convert to 2D, https://gist.github.com/ThomasG77/cad711667942826edc70\n geom = wkt.loads(geom.to_wkt())\n assert not geom.has_z\n\n if hasattr(geom, \"geoms\"):\n geoms = geom.geoms\n else:\n geoms = [geom]\n\n for g in geoms:\n arr = g.array_interface_base['data']\n for pair in zip(arr[::2], arr[1::2]):\n yield pair\n"
] |
from __future__ import absolute_import
from __future__ import division
from shapely.geometry import shape
from shapely import wkt
from numpy.ma import masked
from numpy import asscalar
from .io import read_features, Raster
def point_window_unitxy(x, y, affine):
""" Given an x, y and a geotransform
Returns
- rasterio window representing 2x2 window whose center points encompass point
- the cartesian x, y coordinates of the point on the unit square
defined by the array center points.
((row1, row2), (col1, col2)), (unitx, unity)
"""
fcol, frow = ~affine * (x, y)
r, c = int(round(frow)), int(round(fcol))
# The new source window for our 2x2 array
new_win = ((r - 1, r + 1), (c - 1, c + 1))
# the new x, y coords on the unit square
unitxy = (0.5 - (c - fcol),
0.5 + (r - frow))
return new_win, unitxy
def bilinear(arr, x, y):
""" Given a 2x2 array, an x, and y, treat center points as a unit square
return the value for the fractional row/col
using bilinear interpolation between the cells
+---+---+
| A | B | +----+
+---+---+ => | |
| C | D | +----+
+---+---+
e.g.: Center of A is at (0, 1) on unit square, D is at (1, 0), etc
"""
# for now, only 2x2 arrays
assert arr.shape == (2, 2)
ulv, urv, llv, lrv = arr[0:2, 0:2].flatten().tolist()
# not valid if not on unit square
assert 0.0 <= x <= 1.0
assert 0.0 <= y <= 1.0
if hasattr(arr, 'count') and arr.count() != 4:
# a masked array with at least one nodata
# fall back to nearest neighbor
val = arr[int(round(1 - y)), int(round(x))]
if val is masked:
return None
else:
return asscalar(val)
# bilinear interp on unit square
return ((llv * (1 - x) * (1 - y)) +
(lrv * x * (1 - y)) +
(ulv * (1 - x) * y) +
(urv * x * y))
def geom_xys(geom):
"""Given a shapely geometry,
generate a flattened series of 2D points as x,y tuples
"""
if geom.has_z:
# hack to convert to 2D, https://gist.github.com/ThomasG77/cad711667942826edc70
geom = wkt.loads(geom.to_wkt())
assert not geom.has_z
if hasattr(geom, "geoms"):
geoms = geom.geoms
else:
geoms = [geom]
for g in geoms:
arr = g.array_interface_base['data']
for pair in zip(arr[::2], arr[1::2]):
yield pair
def point_query(*args, **kwargs):
"""The primary point query entry point.
All arguments are passed directly to ``gen_point_query``.
See its docstring for details.
The only difference is that ``point_query`` will
return a list rather than a generator."""
return list(gen_point_query(*args, **kwargs))
|
perrygeo/python-rasterstats
|
src/rasterstats/utils.py
|
rasterize_geom
|
python
|
def rasterize_geom(geom, like, all_touched=False):
geoms = [(geom, 1)]
rv_array = features.rasterize(
geoms,
out_shape=like.shape,
transform=like.affine,
fill=0,
dtype='uint8',
all_touched=all_touched)
return rv_array.astype(bool)
|
Parameters
----------
geom: GeoJSON geometry
like: raster object with desired shape and transform
all_touched: rasterization strategy
Returns
-------
ndarray: boolean
|
train
|
https://github.com/perrygeo/python-rasterstats/blob/910455cd7c9c21eadf464927db72b38ef62b7dfb/src/rasterstats/utils.py#L28-L49
| null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import sys
from rasterio import features
from shapely.geometry import box, MultiPolygon
from .io import window_bounds
DEFAULT_STATS = ['count', 'min', 'max', 'mean']
VALID_STATS = DEFAULT_STATS + \
['sum', 'std', 'median', 'majority', 'minority', 'unique', 'range', 'nodata', 'nan']
# also percentile_{q} but that is handled as special case
def get_percentile(stat):
if not stat.startswith('percentile_'):
raise ValueError("must start with 'percentile_'")
qstr = stat.replace("percentile_", '')
q = float(qstr)
if q > 100.0:
raise ValueError('percentiles must be <= 100')
if q < 0.0:
raise ValueError('percentiles must be >= 0')
return q
def stats_to_csv(stats):
if sys.version_info[0] >= 3:
from io import StringIO as IO # pragma: no cover
else:
from cStringIO import StringIO as IO # pragma: no cover
import csv
csv_fh = IO()
keys = set()
for stat in stats:
for key in list(stat.keys()):
keys.add(key)
fieldnames = sorted(list(keys), key=str)
csvwriter = csv.DictWriter(csv_fh, delimiter=str(","), fieldnames=fieldnames)
csvwriter.writerow(dict((fn, fn) for fn in fieldnames))
for row in stats:
csvwriter.writerow(row)
contents = csv_fh.getvalue()
csv_fh.close()
return contents
def check_stats(stats, categorical):
if not stats:
if not categorical:
stats = DEFAULT_STATS
else:
stats = []
else:
if isinstance(stats, str):
if stats in ['*', 'ALL']:
stats = VALID_STATS
else:
stats = stats.split()
for x in stats:
if x.startswith("percentile_"):
get_percentile(x)
elif x not in VALID_STATS:
raise ValueError(
"Stat `%s` not valid; "
"must be one of \n %r" % (x, VALID_STATS))
run_count = False
if categorical or 'majority' in stats or 'minority' in stats or 'unique' in stats:
# run the counter once, only if needed
run_count = True
return stats, run_count
def remap_categories(category_map, stats):
def lookup(m, k):
""" Dict lookup but returns original key if not found
"""
try:
return m[k]
except KeyError:
return k
return {lookup(category_map, k): v
for k, v in stats.items()}
def key_assoc_val(d, func, exclude=None):
"""return the key associated with the value returned by func
"""
vs = list(d.values())
ks = list(d.keys())
key = ks[vs.index(func(vs))]
return key
def boxify_points(geom, rast):
"""
Point and MultiPoint don't play well with GDALRasterize
convert them into box polygons 99% cellsize, centered on the raster cell
"""
if 'Point' not in geom.type:
raise ValueError("Points or multipoints only")
buff = -0.01 * abs(min(rast.affine.a, rast.affine.e))
if geom.type == 'Point':
pts = [geom]
elif geom.type == "MultiPoint":
pts = geom.geoms
geoms = []
for pt in pts:
row, col = rast.index(pt.x, pt.y)
win = ((row, row + 1), (col, col + 1))
geoms.append(box(*window_bounds(win, rast.affine)).buffer(buff))
return MultiPolygon(geoms)
|
perrygeo/python-rasterstats
|
src/rasterstats/utils.py
|
key_assoc_val
|
python
|
def key_assoc_val(d, func, exclude=None):
vs = list(d.values())
ks = list(d.keys())
key = ks[vs.index(func(vs))]
return key
|
return the key associated with the value returned by func
|
train
|
https://github.com/perrygeo/python-rasterstats/blob/910455cd7c9c21eadf464927db72b38ef62b7dfb/src/rasterstats/utils.py#L119-L125
| null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import sys
from rasterio import features
from shapely.geometry import box, MultiPolygon
from .io import window_bounds
DEFAULT_STATS = ['count', 'min', 'max', 'mean']
VALID_STATS = DEFAULT_STATS + \
['sum', 'std', 'median', 'majority', 'minority', 'unique', 'range', 'nodata', 'nan']
# also percentile_{q} but that is handled as special case
def get_percentile(stat):
if not stat.startswith('percentile_'):
raise ValueError("must start with 'percentile_'")
qstr = stat.replace("percentile_", '')
q = float(qstr)
if q > 100.0:
raise ValueError('percentiles must be <= 100')
if q < 0.0:
raise ValueError('percentiles must be >= 0')
return q
def rasterize_geom(geom, like, all_touched=False):
"""
Parameters
----------
geom: GeoJSON geometry
like: raster object with desired shape and transform
all_touched: rasterization strategy
Returns
-------
ndarray: boolean
"""
geoms = [(geom, 1)]
rv_array = features.rasterize(
geoms,
out_shape=like.shape,
transform=like.affine,
fill=0,
dtype='uint8',
all_touched=all_touched)
return rv_array.astype(bool)
def stats_to_csv(stats):
if sys.version_info[0] >= 3:
from io import StringIO as IO # pragma: no cover
else:
from cStringIO import StringIO as IO # pragma: no cover
import csv
csv_fh = IO()
keys = set()
for stat in stats:
for key in list(stat.keys()):
keys.add(key)
fieldnames = sorted(list(keys), key=str)
csvwriter = csv.DictWriter(csv_fh, delimiter=str(","), fieldnames=fieldnames)
csvwriter.writerow(dict((fn, fn) for fn in fieldnames))
for row in stats:
csvwriter.writerow(row)
contents = csv_fh.getvalue()
csv_fh.close()
return contents
def check_stats(stats, categorical):
if not stats:
if not categorical:
stats = DEFAULT_STATS
else:
stats = []
else:
if isinstance(stats, str):
if stats in ['*', 'ALL']:
stats = VALID_STATS
else:
stats = stats.split()
for x in stats:
if x.startswith("percentile_"):
get_percentile(x)
elif x not in VALID_STATS:
raise ValueError(
"Stat `%s` not valid; "
"must be one of \n %r" % (x, VALID_STATS))
run_count = False
if categorical or 'majority' in stats or 'minority' in stats or 'unique' in stats:
# run the counter once, only if needed
run_count = True
return stats, run_count
def remap_categories(category_map, stats):
def lookup(m, k):
""" Dict lookup but returns original key if not found
"""
try:
return m[k]
except KeyError:
return k
return {lookup(category_map, k): v
for k, v in stats.items()}
def boxify_points(geom, rast):
"""
Point and MultiPoint don't play well with GDALRasterize
convert them into box polygons 99% cellsize, centered on the raster cell
"""
if 'Point' not in geom.type:
raise ValueError("Points or multipoints only")
buff = -0.01 * abs(min(rast.affine.a, rast.affine.e))
if geom.type == 'Point':
pts = [geom]
elif geom.type == "MultiPoint":
pts = geom.geoms
geoms = []
for pt in pts:
row, col = rast.index(pt.x, pt.y)
win = ((row, row + 1), (col, col + 1))
geoms.append(box(*window_bounds(win, rast.affine)).buffer(buff))
return MultiPolygon(geoms)
|
perrygeo/python-rasterstats
|
src/rasterstats/utils.py
|
boxify_points
|
python
|
def boxify_points(geom, rast):
if 'Point' not in geom.type:
raise ValueError("Points or multipoints only")
buff = -0.01 * abs(min(rast.affine.a, rast.affine.e))
if geom.type == 'Point':
pts = [geom]
elif geom.type == "MultiPoint":
pts = geom.geoms
geoms = []
for pt in pts:
row, col = rast.index(pt.x, pt.y)
win = ((row, row + 1), (col, col + 1))
geoms.append(box(*window_bounds(win, rast.affine)).buffer(buff))
return MultiPolygon(geoms)
|
Point and MultiPoint don't play well with GDALRasterize
convert them into box polygons 99% cellsize, centered on the raster cell
|
train
|
https://github.com/perrygeo/python-rasterstats/blob/910455cd7c9c21eadf464927db72b38ef62b7dfb/src/rasterstats/utils.py#L128-L148
|
[
"def window_bounds(window, affine):\n (row_start, row_stop), (col_start, col_stop) = window\n w, s = (col_start, row_stop) * affine\n e, n = (col_stop, row_start) * affine\n return w, s, e, n\n"
] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import sys
from rasterio import features
from shapely.geometry import box, MultiPolygon
from .io import window_bounds
DEFAULT_STATS = ['count', 'min', 'max', 'mean']
VALID_STATS = DEFAULT_STATS + \
['sum', 'std', 'median', 'majority', 'minority', 'unique', 'range', 'nodata', 'nan']
# also percentile_{q} but that is handled as special case
def get_percentile(stat):
if not stat.startswith('percentile_'):
raise ValueError("must start with 'percentile_'")
qstr = stat.replace("percentile_", '')
q = float(qstr)
if q > 100.0:
raise ValueError('percentiles must be <= 100')
if q < 0.0:
raise ValueError('percentiles must be >= 0')
return q
def rasterize_geom(geom, like, all_touched=False):
"""
Parameters
----------
geom: GeoJSON geometry
like: raster object with desired shape and transform
all_touched: rasterization strategy
Returns
-------
ndarray: boolean
"""
geoms = [(geom, 1)]
rv_array = features.rasterize(
geoms,
out_shape=like.shape,
transform=like.affine,
fill=0,
dtype='uint8',
all_touched=all_touched)
return rv_array.astype(bool)
def stats_to_csv(stats):
if sys.version_info[0] >= 3:
from io import StringIO as IO # pragma: no cover
else:
from cStringIO import StringIO as IO # pragma: no cover
import csv
csv_fh = IO()
keys = set()
for stat in stats:
for key in list(stat.keys()):
keys.add(key)
fieldnames = sorted(list(keys), key=str)
csvwriter = csv.DictWriter(csv_fh, delimiter=str(","), fieldnames=fieldnames)
csvwriter.writerow(dict((fn, fn) for fn in fieldnames))
for row in stats:
csvwriter.writerow(row)
contents = csv_fh.getvalue()
csv_fh.close()
return contents
def check_stats(stats, categorical):
if not stats:
if not categorical:
stats = DEFAULT_STATS
else:
stats = []
else:
if isinstance(stats, str):
if stats in ['*', 'ALL']:
stats = VALID_STATS
else:
stats = stats.split()
for x in stats:
if x.startswith("percentile_"):
get_percentile(x)
elif x not in VALID_STATS:
raise ValueError(
"Stat `%s` not valid; "
"must be one of \n %r" % (x, VALID_STATS))
run_count = False
if categorical or 'majority' in stats or 'minority' in stats or 'unique' in stats:
# run the counter once, only if needed
run_count = True
return stats, run_count
def remap_categories(category_map, stats):
def lookup(m, k):
""" Dict lookup but returns original key if not found
"""
try:
return m[k]
except KeyError:
return k
return {lookup(category_map, k): v
for k, v in stats.items()}
def key_assoc_val(d, func, exclude=None):
"""return the key associated with the value returned by func
"""
vs = list(d.values())
ks = list(d.keys())
key = ks[vs.index(func(vs))]
return key
|
perrygeo/python-rasterstats
|
src/rasterstats/io.py
|
parse_feature
|
python
|
def parse_feature(obj):
# object implementing geo_interface
if hasattr(obj, '__geo_interface__'):
gi = obj.__geo_interface__
if gi['type'] in geom_types:
return wrap_geom(gi)
elif gi['type'] == 'Feature':
return gi
# wkt
try:
shape = wkt.loads(obj)
return wrap_geom(shape.__geo_interface__)
except (ReadingError, TypeError, AttributeError):
pass
# wkb
try:
shape = wkb.loads(obj)
return wrap_geom(shape.__geo_interface__)
except (ReadingError, TypeError):
pass
# geojson-like python mapping
try:
if obj['type'] in geom_types:
return wrap_geom(obj)
elif obj['type'] == 'Feature':
return obj
except (AssertionError, TypeError):
pass
raise ValueError("Can't parse %s as a geojson Feature object" % obj)
|
Given a python object
attemp to a GeoJSON-like Feature from it
|
train
|
https://github.com/perrygeo/python-rasterstats/blob/910455cd7c9c21eadf464927db72b38ef62b7dfb/src/rasterstats/io.py#L43-L79
|
[
"def wrap_geom(geom):\n \"\"\" Wraps a geometry dict in an GeoJSON Feature\n \"\"\"\n return {'type': 'Feature',\n 'properties': {},\n 'geometry': geom}\n"
] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import sys
import json
import math
import fiona
from fiona.errors import DriverError
import rasterio
import warnings
from rasterio.transform import guard_transform
from affine import Affine
import numpy as np
try:
from shapely.errors import ReadingError
except:
from shapely.geos import ReadingError
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
from shapely import wkt, wkb
from collections import Iterable, Mapping
geom_types = ["Point", "LineString", "Polygon",
"MultiPoint", "MultiLineString", "MultiPolygon"]
PY3 = sys.version_info[0] >= 3
if PY3:
string_types = str, # pragma: no cover
else:
string_types = basestring, # pragma: no cover
def wrap_geom(geom):
""" Wraps a geometry dict in an GeoJSON Feature
"""
return {'type': 'Feature',
'properties': {},
'geometry': geom}
def read_features(obj, layer=0):
features_iter = None
if isinstance(obj, string_types):
try:
# test it as fiona data source
with fiona.open(obj, 'r', layer=layer) as src:
assert len(src) > 0
def fiona_generator(obj):
with fiona.open(obj, 'r', layer=layer) as src:
for feature in src:
yield feature
features_iter = fiona_generator(obj)
except (AssertionError, TypeError, IOError, OSError, DriverError, UnicodeDecodeError):
try:
mapping = json.loads(obj)
if 'type' in mapping and mapping['type'] == 'FeatureCollection':
features_iter = mapping['features']
elif mapping['type'] in geom_types + ['Feature']:
features_iter = [parse_feature(mapping)]
except (ValueError, JSONDecodeError):
# Single feature-like string
features_iter = [parse_feature(obj)]
elif isinstance(obj, Mapping):
if 'type' in obj and obj['type'] == 'FeatureCollection':
features_iter = obj['features']
else:
features_iter = [parse_feature(obj)]
elif isinstance(obj, bytes):
# Single binary object, probably a wkb
features_iter = [parse_feature(obj)]
elif hasattr(obj, '__geo_interface__'):
mapping = obj.__geo_interface__
if mapping['type'] == 'FeatureCollection':
features_iter = mapping['features']
else:
features_iter = [parse_feature(mapping)]
elif isinstance(obj, Iterable):
# Iterable of feature-like objects
features_iter = (parse_feature(x) for x in obj)
if not features_iter:
raise ValueError("Object is not a recognized source of Features")
return features_iter
def read_featurecollection(obj, layer=0):
features = read_features(obj, layer=layer)
fc = {'type': 'FeatureCollection', 'features': []}
fc['features'] = [f for f in features]
return fc
def rowcol(x, y, affine, op=math.floor):
""" Get row/col for a x/y
"""
r = int(op((y - affine.f) / affine.e))
c = int(op((x - affine.c) / affine.a))
return r, c
def bounds_window(bounds, affine):
"""Create a full cover rasterio-style window
"""
w, s, e, n = bounds
row_start, col_start = rowcol(w, n, affine)
row_stop, col_stop = rowcol(e, s, affine, op=math.ceil)
return (row_start, row_stop), (col_start, col_stop)
def window_bounds(window, affine):
(row_start, row_stop), (col_start, col_stop) = window
w, s = (col_start, row_stop) * affine
e, n = (col_stop, row_start) * affine
return w, s, e, n
def boundless_array(arr, window, nodata, masked=False):
dim3 = False
if len(arr.shape) == 3:
dim3 = True
elif len(arr.shape) != 2:
raise ValueError("Must be a 2D or 3D array")
# unpack for readability
(wr_start, wr_stop), (wc_start, wc_stop) = window
# Calculate overlap
olr_start = max(min(window[0][0], arr.shape[-2:][0]), 0)
olr_stop = max(min(window[0][1], arr.shape[-2:][0]), 0)
olc_start = max(min(window[1][0], arr.shape[-2:][1]), 0)
olc_stop = max(min(window[1][1], arr.shape[-2:][1]), 0)
# Calc dimensions
overlap_shape = (olr_stop - olr_start, olc_stop - olc_start)
if dim3:
window_shape = (arr.shape[0], wr_stop - wr_start, wc_stop - wc_start)
else:
window_shape = (wr_stop - wr_start, wc_stop - wc_start)
# create an array of nodata values
out = np.ones(shape=window_shape) * nodata
# Fill with data where overlapping
nr_start = olr_start - wr_start
nr_stop = nr_start + overlap_shape[0]
nc_start = olc_start - wc_start
nc_stop = nc_start + overlap_shape[1]
if dim3:
out[:, nr_start:nr_stop, nc_start:nc_stop] = \
arr[:, olr_start:olr_stop, olc_start:olc_stop]
else:
out[nr_start:nr_stop, nc_start:nc_stop] = \
arr[olr_start:olr_stop, olc_start:olc_stop]
if masked:
out = np.ma.MaskedArray(out, mask=(out == nodata))
return out
class Raster(object):
""" Raster abstraction for data access to 2/3D array-like things
Use as a context manager to ensure dataset gets closed properly::
>>> with Raster(path) as rast:
...
Parameters
----------
raster: 2/3D array-like data source, required
Currently supports paths to rasterio-supported rasters and
numpy arrays with Affine transforms.
affine: Affine object
Maps row/col to coordinate reference system
required if raster is ndarray
nodata: nodata value, optional
Overrides the datasource's internal nodata if specified
band: integer
raster band number, optional (default: 1)
Methods
-------
index
read
"""
def __init__(self, raster, affine=None, nodata=None, band=1):
self.array = None
self.src = None
if isinstance(raster, np.ndarray):
if affine is None:
raise ValueError("Specify affine transform for numpy arrays")
self.array = raster
self.affine = affine
self.shape = raster.shape
self.nodata = nodata
else:
self.src = rasterio.open(raster, 'r')
self.affine = guard_transform(self.src.transform)
self.shape = (self.src.height, self.src.width)
self.band = band
if nodata is not None:
# override with specified nodata
self.nodata = float(nodata)
else:
self.nodata = self.src.nodata
def index(self, x, y):
""" Given (x, y) in crs, return the (row, column) on the raster
"""
col, row = [math.floor(a) for a in (~self.affine * (x, y))]
return row, col
def read(self, bounds=None, window=None, masked=False):
""" Performs a boundless read against the underlying array source
Parameters
----------
bounds: bounding box
in w, s, e, n order, iterable, optional
window: rasterio-style window, optional
bounds OR window are required,
specifying both or neither will raise exception
masked: boolean
return a masked numpy array, default: False
bounds OR window are required, specifying both or neither will raise exception
Returns
-------
Raster object with update affine and array info
"""
# Calculate the window
if bounds and window:
raise ValueError("Specify either bounds or window")
if bounds:
win = bounds_window(bounds, self.affine)
elif window:
win = window
else:
raise ValueError("Specify either bounds or window")
c, _, _, f = window_bounds(win, self.affine) # c ~ west, f ~ north
a, b, _, d, e, _, _, _, _ = tuple(self.affine)
new_affine = Affine(a, b, c, d, e, f)
nodata = self.nodata
if nodata is None:
nodata = -999
warnings.warn("Setting nodata to -999; specify nodata explicitly")
if self.array is not None:
# It's an ndarray already
new_array = boundless_array(
self.array, window=win, nodata=nodata, masked=masked)
elif self.src:
# It's an open rasterio dataset
new_array = self.src.read(
self.band, window=win, boundless=True, masked=masked)
return Raster(new_array, new_affine, nodata)
def __enter__(self):
return self
def __exit__(self, *args):
if self.src is not None:
# close the rasterio reader
self.src.close()
|
perrygeo/python-rasterstats
|
src/rasterstats/io.py
|
rowcol
|
python
|
def rowcol(x, y, affine, op=math.floor):
r = int(op((y - affine.f) / affine.e))
c = int(op((x - affine.c) / affine.a))
return r, c
|
Get row/col for a x/y
|
train
|
https://github.com/perrygeo/python-rasterstats/blob/910455cd7c9c21eadf464927db72b38ef62b7dfb/src/rasterstats/io.py#L137-L142
| null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import sys
import json
import math
import fiona
from fiona.errors import DriverError
import rasterio
import warnings
from rasterio.transform import guard_transform
from affine import Affine
import numpy as np
try:
from shapely.errors import ReadingError
except:
from shapely.geos import ReadingError
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
from shapely import wkt, wkb
from collections import Iterable, Mapping
geom_types = ["Point", "LineString", "Polygon",
"MultiPoint", "MultiLineString", "MultiPolygon"]
PY3 = sys.version_info[0] >= 3
if PY3:
string_types = str, # pragma: no cover
else:
string_types = basestring, # pragma: no cover
def wrap_geom(geom):
""" Wraps a geometry dict in an GeoJSON Feature
"""
return {'type': 'Feature',
'properties': {},
'geometry': geom}
def parse_feature(obj):
""" Given a python object
attemp to a GeoJSON-like Feature from it
"""
# object implementing geo_interface
if hasattr(obj, '__geo_interface__'):
gi = obj.__geo_interface__
if gi['type'] in geom_types:
return wrap_geom(gi)
elif gi['type'] == 'Feature':
return gi
# wkt
try:
shape = wkt.loads(obj)
return wrap_geom(shape.__geo_interface__)
except (ReadingError, TypeError, AttributeError):
pass
# wkb
try:
shape = wkb.loads(obj)
return wrap_geom(shape.__geo_interface__)
except (ReadingError, TypeError):
pass
# geojson-like python mapping
try:
if obj['type'] in geom_types:
return wrap_geom(obj)
elif obj['type'] == 'Feature':
return obj
except (AssertionError, TypeError):
pass
raise ValueError("Can't parse %s as a geojson Feature object" % obj)
def read_features(obj, layer=0):
features_iter = None
if isinstance(obj, string_types):
try:
# test it as fiona data source
with fiona.open(obj, 'r', layer=layer) as src:
assert len(src) > 0
def fiona_generator(obj):
with fiona.open(obj, 'r', layer=layer) as src:
for feature in src:
yield feature
features_iter = fiona_generator(obj)
except (AssertionError, TypeError, IOError, OSError, DriverError, UnicodeDecodeError):
try:
mapping = json.loads(obj)
if 'type' in mapping and mapping['type'] == 'FeatureCollection':
features_iter = mapping['features']
elif mapping['type'] in geom_types + ['Feature']:
features_iter = [parse_feature(mapping)]
except (ValueError, JSONDecodeError):
# Single feature-like string
features_iter = [parse_feature(obj)]
elif isinstance(obj, Mapping):
if 'type' in obj and obj['type'] == 'FeatureCollection':
features_iter = obj['features']
else:
features_iter = [parse_feature(obj)]
elif isinstance(obj, bytes):
# Single binary object, probably a wkb
features_iter = [parse_feature(obj)]
elif hasattr(obj, '__geo_interface__'):
mapping = obj.__geo_interface__
if mapping['type'] == 'FeatureCollection':
features_iter = mapping['features']
else:
features_iter = [parse_feature(mapping)]
elif isinstance(obj, Iterable):
# Iterable of feature-like objects
features_iter = (parse_feature(x) for x in obj)
if not features_iter:
raise ValueError("Object is not a recognized source of Features")
return features_iter
def read_featurecollection(obj, layer=0):
features = read_features(obj, layer=layer)
fc = {'type': 'FeatureCollection', 'features': []}
fc['features'] = [f for f in features]
return fc
def bounds_window(bounds, affine):
"""Create a full cover rasterio-style window
"""
w, s, e, n = bounds
row_start, col_start = rowcol(w, n, affine)
row_stop, col_stop = rowcol(e, s, affine, op=math.ceil)
return (row_start, row_stop), (col_start, col_stop)
def window_bounds(window, affine):
(row_start, row_stop), (col_start, col_stop) = window
w, s = (col_start, row_stop) * affine
e, n = (col_stop, row_start) * affine
return w, s, e, n
def boundless_array(arr, window, nodata, masked=False):
dim3 = False
if len(arr.shape) == 3:
dim3 = True
elif len(arr.shape) != 2:
raise ValueError("Must be a 2D or 3D array")
# unpack for readability
(wr_start, wr_stop), (wc_start, wc_stop) = window
# Calculate overlap
olr_start = max(min(window[0][0], arr.shape[-2:][0]), 0)
olr_stop = max(min(window[0][1], arr.shape[-2:][0]), 0)
olc_start = max(min(window[1][0], arr.shape[-2:][1]), 0)
olc_stop = max(min(window[1][1], arr.shape[-2:][1]), 0)
# Calc dimensions
overlap_shape = (olr_stop - olr_start, olc_stop - olc_start)
if dim3:
window_shape = (arr.shape[0], wr_stop - wr_start, wc_stop - wc_start)
else:
window_shape = (wr_stop - wr_start, wc_stop - wc_start)
# create an array of nodata values
out = np.ones(shape=window_shape) * nodata
# Fill with data where overlapping
nr_start = olr_start - wr_start
nr_stop = nr_start + overlap_shape[0]
nc_start = olc_start - wc_start
nc_stop = nc_start + overlap_shape[1]
if dim3:
out[:, nr_start:nr_stop, nc_start:nc_stop] = \
arr[:, olr_start:olr_stop, olc_start:olc_stop]
else:
out[nr_start:nr_stop, nc_start:nc_stop] = \
arr[olr_start:olr_stop, olc_start:olc_stop]
if masked:
out = np.ma.MaskedArray(out, mask=(out == nodata))
return out
class Raster(object):
""" Raster abstraction for data access to 2/3D array-like things
Use as a context manager to ensure dataset gets closed properly::
>>> with Raster(path) as rast:
...
Parameters
----------
raster: 2/3D array-like data source, required
Currently supports paths to rasterio-supported rasters and
numpy arrays with Affine transforms.
affine: Affine object
Maps row/col to coordinate reference system
required if raster is ndarray
nodata: nodata value, optional
Overrides the datasource's internal nodata if specified
band: integer
raster band number, optional (default: 1)
Methods
-------
index
read
"""
def __init__(self, raster, affine=None, nodata=None, band=1):
self.array = None
self.src = None
if isinstance(raster, np.ndarray):
if affine is None:
raise ValueError("Specify affine transform for numpy arrays")
self.array = raster
self.affine = affine
self.shape = raster.shape
self.nodata = nodata
else:
self.src = rasterio.open(raster, 'r')
self.affine = guard_transform(self.src.transform)
self.shape = (self.src.height, self.src.width)
self.band = band
if nodata is not None:
# override with specified nodata
self.nodata = float(nodata)
else:
self.nodata = self.src.nodata
def index(self, x, y):
""" Given (x, y) in crs, return the (row, column) on the raster
"""
col, row = [math.floor(a) for a in (~self.affine * (x, y))]
return row, col
def read(self, bounds=None, window=None, masked=False):
""" Performs a boundless read against the underlying array source
Parameters
----------
bounds: bounding box
in w, s, e, n order, iterable, optional
window: rasterio-style window, optional
bounds OR window are required,
specifying both or neither will raise exception
masked: boolean
return a masked numpy array, default: False
bounds OR window are required, specifying both or neither will raise exception
Returns
-------
Raster object with update affine and array info
"""
# Calculate the window
if bounds and window:
raise ValueError("Specify either bounds or window")
if bounds:
win = bounds_window(bounds, self.affine)
elif window:
win = window
else:
raise ValueError("Specify either bounds or window")
c, _, _, f = window_bounds(win, self.affine) # c ~ west, f ~ north
a, b, _, d, e, _, _, _, _ = tuple(self.affine)
new_affine = Affine(a, b, c, d, e, f)
nodata = self.nodata
if nodata is None:
nodata = -999
warnings.warn("Setting nodata to -999; specify nodata explicitly")
if self.array is not None:
# It's an ndarray already
new_array = boundless_array(
self.array, window=win, nodata=nodata, masked=masked)
elif self.src:
# It's an open rasterio dataset
new_array = self.src.read(
self.band, window=win, boundless=True, masked=masked)
return Raster(new_array, new_affine, nodata)
def __enter__(self):
return self
def __exit__(self, *args):
if self.src is not None:
# close the rasterio reader
self.src.close()
|
perrygeo/python-rasterstats
|
src/rasterstats/io.py
|
bounds_window
|
python
|
def bounds_window(bounds, affine):
w, s, e, n = bounds
row_start, col_start = rowcol(w, n, affine)
row_stop, col_stop = rowcol(e, s, affine, op=math.ceil)
return (row_start, row_stop), (col_start, col_stop)
|
Create a full cover rasterio-style window
|
train
|
https://github.com/perrygeo/python-rasterstats/blob/910455cd7c9c21eadf464927db72b38ef62b7dfb/src/rasterstats/io.py#L145-L151
|
[
"def rowcol(x, y, affine, op=math.floor):\n \"\"\" Get row/col for a x/y\n \"\"\"\n r = int(op((y - affine.f) / affine.e))\n c = int(op((x - affine.c) / affine.a))\n return r, c\n"
] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import sys
import json
import math
import fiona
from fiona.errors import DriverError
import rasterio
import warnings
from rasterio.transform import guard_transform
from affine import Affine
import numpy as np
try:
from shapely.errors import ReadingError
except:
from shapely.geos import ReadingError
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
from shapely import wkt, wkb
from collections import Iterable, Mapping
geom_types = ["Point", "LineString", "Polygon",
"MultiPoint", "MultiLineString", "MultiPolygon"]
PY3 = sys.version_info[0] >= 3
if PY3:
string_types = str, # pragma: no cover
else:
string_types = basestring, # pragma: no cover
def wrap_geom(geom):
""" Wraps a geometry dict in an GeoJSON Feature
"""
return {'type': 'Feature',
'properties': {},
'geometry': geom}
def parse_feature(obj):
""" Given a python object
attemp to a GeoJSON-like Feature from it
"""
# object implementing geo_interface
if hasattr(obj, '__geo_interface__'):
gi = obj.__geo_interface__
if gi['type'] in geom_types:
return wrap_geom(gi)
elif gi['type'] == 'Feature':
return gi
# wkt
try:
shape = wkt.loads(obj)
return wrap_geom(shape.__geo_interface__)
except (ReadingError, TypeError, AttributeError):
pass
# wkb
try:
shape = wkb.loads(obj)
return wrap_geom(shape.__geo_interface__)
except (ReadingError, TypeError):
pass
# geojson-like python mapping
try:
if obj['type'] in geom_types:
return wrap_geom(obj)
elif obj['type'] == 'Feature':
return obj
except (AssertionError, TypeError):
pass
raise ValueError("Can't parse %s as a geojson Feature object" % obj)
def read_features(obj, layer=0):
features_iter = None
if isinstance(obj, string_types):
try:
# test it as fiona data source
with fiona.open(obj, 'r', layer=layer) as src:
assert len(src) > 0
def fiona_generator(obj):
with fiona.open(obj, 'r', layer=layer) as src:
for feature in src:
yield feature
features_iter = fiona_generator(obj)
except (AssertionError, TypeError, IOError, OSError, DriverError, UnicodeDecodeError):
try:
mapping = json.loads(obj)
if 'type' in mapping and mapping['type'] == 'FeatureCollection':
features_iter = mapping['features']
elif mapping['type'] in geom_types + ['Feature']:
features_iter = [parse_feature(mapping)]
except (ValueError, JSONDecodeError):
# Single feature-like string
features_iter = [parse_feature(obj)]
elif isinstance(obj, Mapping):
if 'type' in obj and obj['type'] == 'FeatureCollection':
features_iter = obj['features']
else:
features_iter = [parse_feature(obj)]
elif isinstance(obj, bytes):
# Single binary object, probably a wkb
features_iter = [parse_feature(obj)]
elif hasattr(obj, '__geo_interface__'):
mapping = obj.__geo_interface__
if mapping['type'] == 'FeatureCollection':
features_iter = mapping['features']
else:
features_iter = [parse_feature(mapping)]
elif isinstance(obj, Iterable):
# Iterable of feature-like objects
features_iter = (parse_feature(x) for x in obj)
if not features_iter:
raise ValueError("Object is not a recognized source of Features")
return features_iter
def read_featurecollection(obj, layer=0):
features = read_features(obj, layer=layer)
fc = {'type': 'FeatureCollection', 'features': []}
fc['features'] = [f for f in features]
return fc
def rowcol(x, y, affine, op=math.floor):
""" Get row/col for a x/y
"""
r = int(op((y - affine.f) / affine.e))
c = int(op((x - affine.c) / affine.a))
return r, c
def window_bounds(window, affine):
(row_start, row_stop), (col_start, col_stop) = window
w, s = (col_start, row_stop) * affine
e, n = (col_stop, row_start) * affine
return w, s, e, n
def boundless_array(arr, window, nodata, masked=False):
dim3 = False
if len(arr.shape) == 3:
dim3 = True
elif len(arr.shape) != 2:
raise ValueError("Must be a 2D or 3D array")
# unpack for readability
(wr_start, wr_stop), (wc_start, wc_stop) = window
# Calculate overlap
olr_start = max(min(window[0][0], arr.shape[-2:][0]), 0)
olr_stop = max(min(window[0][1], arr.shape[-2:][0]), 0)
olc_start = max(min(window[1][0], arr.shape[-2:][1]), 0)
olc_stop = max(min(window[1][1], arr.shape[-2:][1]), 0)
# Calc dimensions
overlap_shape = (olr_stop - olr_start, olc_stop - olc_start)
if dim3:
window_shape = (arr.shape[0], wr_stop - wr_start, wc_stop - wc_start)
else:
window_shape = (wr_stop - wr_start, wc_stop - wc_start)
# create an array of nodata values
out = np.ones(shape=window_shape) * nodata
# Fill with data where overlapping
nr_start = olr_start - wr_start
nr_stop = nr_start + overlap_shape[0]
nc_start = olc_start - wc_start
nc_stop = nc_start + overlap_shape[1]
if dim3:
out[:, nr_start:nr_stop, nc_start:nc_stop] = \
arr[:, olr_start:olr_stop, olc_start:olc_stop]
else:
out[nr_start:nr_stop, nc_start:nc_stop] = \
arr[olr_start:olr_stop, olc_start:olc_stop]
if masked:
out = np.ma.MaskedArray(out, mask=(out == nodata))
return out
class Raster(object):
""" Raster abstraction for data access to 2/3D array-like things
Use as a context manager to ensure dataset gets closed properly::
>>> with Raster(path) as rast:
...
Parameters
----------
raster: 2/3D array-like data source, required
Currently supports paths to rasterio-supported rasters and
numpy arrays with Affine transforms.
affine: Affine object
Maps row/col to coordinate reference system
required if raster is ndarray
nodata: nodata value, optional
Overrides the datasource's internal nodata if specified
band: integer
raster band number, optional (default: 1)
Methods
-------
index
read
"""
def __init__(self, raster, affine=None, nodata=None, band=1):
self.array = None
self.src = None
if isinstance(raster, np.ndarray):
if affine is None:
raise ValueError("Specify affine transform for numpy arrays")
self.array = raster
self.affine = affine
self.shape = raster.shape
self.nodata = nodata
else:
self.src = rasterio.open(raster, 'r')
self.affine = guard_transform(self.src.transform)
self.shape = (self.src.height, self.src.width)
self.band = band
if nodata is not None:
# override with specified nodata
self.nodata = float(nodata)
else:
self.nodata = self.src.nodata
def index(self, x, y):
""" Given (x, y) in crs, return the (row, column) on the raster
"""
col, row = [math.floor(a) for a in (~self.affine * (x, y))]
return row, col
def read(self, bounds=None, window=None, masked=False):
""" Performs a boundless read against the underlying array source
Parameters
----------
bounds: bounding box
in w, s, e, n order, iterable, optional
window: rasterio-style window, optional
bounds OR window are required,
specifying both or neither will raise exception
masked: boolean
return a masked numpy array, default: False
bounds OR window are required, specifying both or neither will raise exception
Returns
-------
Raster object with update affine and array info
"""
# Calculate the window
if bounds and window:
raise ValueError("Specify either bounds or window")
if bounds:
win = bounds_window(bounds, self.affine)
elif window:
win = window
else:
raise ValueError("Specify either bounds or window")
c, _, _, f = window_bounds(win, self.affine) # c ~ west, f ~ north
a, b, _, d, e, _, _, _, _ = tuple(self.affine)
new_affine = Affine(a, b, c, d, e, f)
nodata = self.nodata
if nodata is None:
nodata = -999
warnings.warn("Setting nodata to -999; specify nodata explicitly")
if self.array is not None:
# It's an ndarray already
new_array = boundless_array(
self.array, window=win, nodata=nodata, masked=masked)
elif self.src:
# It's an open rasterio dataset
new_array = self.src.read(
self.band, window=win, boundless=True, masked=masked)
return Raster(new_array, new_affine, nodata)
def __enter__(self):
return self
def __exit__(self, *args):
if self.src is not None:
# close the rasterio reader
self.src.close()
|
perrygeo/python-rasterstats
|
src/rasterstats/io.py
|
Raster.index
|
python
|
def index(self, x, y):
col, row = [math.floor(a) for a in (~self.affine * (x, y))]
return row, col
|
Given (x, y) in crs, return the (row, column) on the raster
|
train
|
https://github.com/perrygeo/python-rasterstats/blob/910455cd7c9c21eadf464927db72b38ef62b7dfb/src/rasterstats/io.py#L258-L262
| null |
class Raster(object):
""" Raster abstraction for data access to 2/3D array-like things
Use as a context manager to ensure dataset gets closed properly::
>>> with Raster(path) as rast:
...
Parameters
----------
raster: 2/3D array-like data source, required
Currently supports paths to rasterio-supported rasters and
numpy arrays with Affine transforms.
affine: Affine object
Maps row/col to coordinate reference system
required if raster is ndarray
nodata: nodata value, optional
Overrides the datasource's internal nodata if specified
band: integer
raster band number, optional (default: 1)
Methods
-------
index
read
"""
def __init__(self, raster, affine=None, nodata=None, band=1):
self.array = None
self.src = None
if isinstance(raster, np.ndarray):
if affine is None:
raise ValueError("Specify affine transform for numpy arrays")
self.array = raster
self.affine = affine
self.shape = raster.shape
self.nodata = nodata
else:
self.src = rasterio.open(raster, 'r')
self.affine = guard_transform(self.src.transform)
self.shape = (self.src.height, self.src.width)
self.band = band
if nodata is not None:
# override with specified nodata
self.nodata = float(nodata)
else:
self.nodata = self.src.nodata
def read(self, bounds=None, window=None, masked=False):
""" Performs a boundless read against the underlying array source
Parameters
----------
bounds: bounding box
in w, s, e, n order, iterable, optional
window: rasterio-style window, optional
bounds OR window are required,
specifying both or neither will raise exception
masked: boolean
return a masked numpy array, default: False
bounds OR window are required, specifying both or neither will raise exception
Returns
-------
Raster object with update affine and array info
"""
# Calculate the window
if bounds and window:
raise ValueError("Specify either bounds or window")
if bounds:
win = bounds_window(bounds, self.affine)
elif window:
win = window
else:
raise ValueError("Specify either bounds or window")
c, _, _, f = window_bounds(win, self.affine) # c ~ west, f ~ north
a, b, _, d, e, _, _, _, _ = tuple(self.affine)
new_affine = Affine(a, b, c, d, e, f)
nodata = self.nodata
if nodata is None:
nodata = -999
warnings.warn("Setting nodata to -999; specify nodata explicitly")
if self.array is not None:
# It's an ndarray already
new_array = boundless_array(
self.array, window=win, nodata=nodata, masked=masked)
elif self.src:
# It's an open rasterio dataset
new_array = self.src.read(
self.band, window=win, boundless=True, masked=masked)
return Raster(new_array, new_affine, nodata)
def __enter__(self):
return self
def __exit__(self, *args):
if self.src is not None:
# close the rasterio reader
self.src.close()
|
perrygeo/python-rasterstats
|
src/rasterstats/io.py
|
Raster.read
|
python
|
def read(self, bounds=None, window=None, masked=False):
# Calculate the window
if bounds and window:
raise ValueError("Specify either bounds or window")
if bounds:
win = bounds_window(bounds, self.affine)
elif window:
win = window
else:
raise ValueError("Specify either bounds or window")
c, _, _, f = window_bounds(win, self.affine) # c ~ west, f ~ north
a, b, _, d, e, _, _, _, _ = tuple(self.affine)
new_affine = Affine(a, b, c, d, e, f)
nodata = self.nodata
if nodata is None:
nodata = -999
warnings.warn("Setting nodata to -999; specify nodata explicitly")
if self.array is not None:
# It's an ndarray already
new_array = boundless_array(
self.array, window=win, nodata=nodata, masked=masked)
elif self.src:
# It's an open rasterio dataset
new_array = self.src.read(
self.band, window=win, boundless=True, masked=masked)
return Raster(new_array, new_affine, nodata)
|
Performs a boundless read against the underlying array source
Parameters
----------
bounds: bounding box
in w, s, e, n order, iterable, optional
window: rasterio-style window, optional
bounds OR window are required,
specifying both or neither will raise exception
masked: boolean
return a masked numpy array, default: False
bounds OR window are required, specifying both or neither will raise exception
Returns
-------
Raster object with update affine and array info
|
train
|
https://github.com/perrygeo/python-rasterstats/blob/910455cd7c9c21eadf464927db72b38ef62b7dfb/src/rasterstats/io.py#L264-L311
|
[
"def bounds_window(bounds, affine):\n \"\"\"Create a full cover rasterio-style window\n \"\"\"\n w, s, e, n = bounds\n row_start, col_start = rowcol(w, n, affine)\n row_stop, col_stop = rowcol(e, s, affine, op=math.ceil)\n return (row_start, row_stop), (col_start, col_stop)\n",
"def window_bounds(window, affine):\n (row_start, row_stop), (col_start, col_stop) = window\n w, s = (col_start, row_stop) * affine\n e, n = (col_stop, row_start) * affine\n return w, s, e, n\n",
"def boundless_array(arr, window, nodata, masked=False):\n dim3 = False\n if len(arr.shape) == 3:\n dim3 = True\n elif len(arr.shape) != 2:\n raise ValueError(\"Must be a 2D or 3D array\")\n\n # unpack for readability\n (wr_start, wr_stop), (wc_start, wc_stop) = window\n\n # Calculate overlap\n olr_start = max(min(window[0][0], arr.shape[-2:][0]), 0)\n olr_stop = max(min(window[0][1], arr.shape[-2:][0]), 0)\n olc_start = max(min(window[1][0], arr.shape[-2:][1]), 0)\n olc_stop = max(min(window[1][1], arr.shape[-2:][1]), 0)\n\n # Calc dimensions\n overlap_shape = (olr_stop - olr_start, olc_stop - olc_start)\n if dim3:\n window_shape = (arr.shape[0], wr_stop - wr_start, wc_stop - wc_start)\n else:\n window_shape = (wr_stop - wr_start, wc_stop - wc_start)\n\n # create an array of nodata values\n out = np.ones(shape=window_shape) * nodata\n\n # Fill with data where overlapping\n nr_start = olr_start - wr_start\n nr_stop = nr_start + overlap_shape[0]\n nc_start = olc_start - wc_start\n nc_stop = nc_start + overlap_shape[1]\n if dim3:\n out[:, nr_start:nr_stop, nc_start:nc_stop] = \\\n arr[:, olr_start:olr_stop, olc_start:olc_stop]\n else:\n out[nr_start:nr_stop, nc_start:nc_stop] = \\\n arr[olr_start:olr_stop, olc_start:olc_stop]\n\n if masked:\n out = np.ma.MaskedArray(out, mask=(out == nodata))\n\n return out\n"
] |
class Raster(object):
""" Raster abstraction for data access to 2/3D array-like things
Use as a context manager to ensure dataset gets closed properly::
>>> with Raster(path) as rast:
...
Parameters
----------
raster: 2/3D array-like data source, required
Currently supports paths to rasterio-supported rasters and
numpy arrays with Affine transforms.
affine: Affine object
Maps row/col to coordinate reference system
required if raster is ndarray
nodata: nodata value, optional
Overrides the datasource's internal nodata if specified
band: integer
raster band number, optional (default: 1)
Methods
-------
index
read
"""
def __init__(self, raster, affine=None, nodata=None, band=1):
self.array = None
self.src = None
if isinstance(raster, np.ndarray):
if affine is None:
raise ValueError("Specify affine transform for numpy arrays")
self.array = raster
self.affine = affine
self.shape = raster.shape
self.nodata = nodata
else:
self.src = rasterio.open(raster, 'r')
self.affine = guard_transform(self.src.transform)
self.shape = (self.src.height, self.src.width)
self.band = band
if nodata is not None:
# override with specified nodata
self.nodata = float(nodata)
else:
self.nodata = self.src.nodata
def index(self, x, y):
""" Given (x, y) in crs, return the (row, column) on the raster
"""
col, row = [math.floor(a) for a in (~self.affine * (x, y))]
return row, col
def __enter__(self):
return self
def __exit__(self, *args):
if self.src is not None:
# close the rasterio reader
self.src.close()
|
perrygeo/python-rasterstats
|
src/rasterstats/main.py
|
gen_zonal_stats
|
python
|
def gen_zonal_stats(
vectors, raster,
layer=0,
band=1,
nodata=None,
affine=None,
stats=None,
all_touched=False,
categorical=False,
category_map=None,
add_stats=None,
zone_func=None,
raster_out=False,
prefix=None,
geojson_out=False, **kwargs):
stats, run_count = check_stats(stats, categorical)
# Handle 1.0 deprecations
transform = kwargs.get('transform')
if transform:
warnings.warn("GDAL-style transforms will disappear in 1.0. "
"Use affine=Affine.from_gdal(*transform) instead",
DeprecationWarning)
if not affine:
affine = Affine.from_gdal(*transform)
cp = kwargs.get('copy_properties')
if cp:
warnings.warn("Use `geojson_out` to preserve feature properties",
DeprecationWarning)
band_num = kwargs.get('band_num')
if band_num:
warnings.warn("Use `band` to specify band number", DeprecationWarning)
band = band_num
with Raster(raster, affine, nodata, band) as rast:
features_iter = read_features(vectors, layer)
for _, feat in enumerate(features_iter):
geom = shape(feat['geometry'])
if 'Point' in geom.type:
geom = boxify_points(geom, rast)
geom_bounds = tuple(geom.bounds)
fsrc = rast.read(bounds=geom_bounds)
# rasterized geometry
rv_array = rasterize_geom(geom, like=fsrc, all_touched=all_touched)
# nodata mask
isnodata = (fsrc.array == fsrc.nodata)
# add nan mask (if necessary)
has_nan = (
np.issubdtype(fsrc.array.dtype, np.floating)
and np.isnan(fsrc.array.min()))
if has_nan:
isnodata = (isnodata | np.isnan(fsrc.array))
# Mask the source data array
# mask everything that is not a valid value or not within our geom
masked = np.ma.MaskedArray(
fsrc.array,
mask=(isnodata | ~rv_array))
# If we're on 64 bit platform and the array is an integer type
# make sure we cast to 64 bit to avoid overflow.
# workaround for https://github.com/numpy/numpy/issues/8433
if sysinfo.platform_bits == 64 and \
masked.dtype != np.int64 and \
issubclass(masked.dtype.type, np.integer):
masked = masked.astype(np.int64)
# execute zone_func on masked zone ndarray
if zone_func is not None:
if not callable(zone_func):
raise TypeError(('zone_func must be a callable '
'which accepts function a '
'single `zone_array` arg.'))
zone_func(masked)
if masked.compressed().size == 0:
# nothing here, fill with None and move on
feature_stats = dict([(stat, None) for stat in stats])
if 'count' in stats: # special case, zero makes sense here
feature_stats['count'] = 0
else:
if run_count:
keys, counts = np.unique(masked.compressed(), return_counts=True)
pixel_count = dict(zip([np.asscalar(k) for k in keys],
[np.asscalar(c) for c in counts]))
if categorical:
feature_stats = dict(pixel_count)
if category_map:
feature_stats = remap_categories(category_map, feature_stats)
else:
feature_stats = {}
if 'min' in stats:
feature_stats['min'] = float(masked.min())
if 'max' in stats:
feature_stats['max'] = float(masked.max())
if 'mean' in stats:
feature_stats['mean'] = float(masked.mean())
if 'count' in stats:
feature_stats['count'] = int(masked.count())
# optional
if 'sum' in stats:
feature_stats['sum'] = float(masked.sum())
if 'std' in stats:
feature_stats['std'] = float(masked.std())
if 'median' in stats:
feature_stats['median'] = float(np.median(masked.compressed()))
if 'majority' in stats:
feature_stats['majority'] = float(key_assoc_val(pixel_count, max))
if 'minority' in stats:
feature_stats['minority'] = float(key_assoc_val(pixel_count, min))
if 'unique' in stats:
feature_stats['unique'] = len(list(pixel_count.keys()))
if 'range' in stats:
try:
rmin = feature_stats['min']
except KeyError:
rmin = float(masked.min())
try:
rmax = feature_stats['max']
except KeyError:
rmax = float(masked.max())
feature_stats['range'] = rmax - rmin
for pctile in [s for s in stats if s.startswith('percentile_')]:
q = get_percentile(pctile)
pctarr = masked.compressed()
feature_stats[pctile] = np.percentile(pctarr, q)
if 'nodata' in stats or 'nan' in stats:
featmasked = np.ma.MaskedArray(fsrc.array, mask=(~rv_array))
if 'nodata' in stats:
feature_stats['nodata'] = float((featmasked == fsrc.nodata).sum())
if 'nan' in stats:
feature_stats['nan'] = float(np.isnan(featmasked).sum()) if has_nan else 0
if add_stats is not None:
for stat_name, stat_func in add_stats.items():
feature_stats[stat_name] = stat_func(masked)
if raster_out:
feature_stats['mini_raster_array'] = masked
feature_stats['mini_raster_affine'] = fsrc.affine
feature_stats['mini_raster_nodata'] = fsrc.nodata
if prefix is not None:
prefixed_feature_stats = {}
for key, val in feature_stats.items():
newkey = "{}{}".format(prefix, key)
prefixed_feature_stats[newkey] = val
feature_stats = prefixed_feature_stats
if geojson_out:
for key, val in feature_stats.items():
if 'properties' not in feat:
feat['properties'] = {}
feat['properties'][key] = val
yield feat
else:
yield feature_stats
|
Zonal statistics of raster values aggregated to vector geometries.
Parameters
----------
vectors: path to an vector source or geo-like python objects
raster: ndarray or path to a GDAL raster source
If ndarray is passed, the ``affine`` kwarg is required.
layer: int or string, optional
If `vectors` is a path to an fiona source,
specify the vector layer to use either by name or number.
defaults to 0
band: int, optional
If `raster` is a GDAL source, the band number to use (counting from 1).
defaults to 1.
nodata: float, optional
If `raster` is a GDAL source, this value overrides any NODATA value
specified in the file's metadata.
If `None`, the file's metadata's NODATA value (if any) will be used.
defaults to `None`.
affine: Affine instance
required only for ndarrays, otherwise it is read from src
stats: list of str, or space-delimited str, optional
Which statistics to calculate for each zone.
All possible choices are listed in ``utils.VALID_STATS``.
defaults to ``DEFAULT_STATS``, a subset of these.
all_touched: bool, optional
Whether to include every raster cell touched by a geometry, or only
those having a center point within the polygon.
defaults to `False`
categorical: bool, optional
category_map: dict
A dictionary mapping raster values to human-readable categorical names.
Only applies when categorical is True
add_stats: dict
with names and functions of additional stats to compute, optional
zone_func: callable
function to apply to zone ndarray prior to computing stats
raster_out: boolean
Include the masked numpy array for each feature?, optional
Each feature dictionary will have the following additional keys:
mini_raster_array: The clipped and masked numpy array
mini_raster_affine: Affine transformation
mini_raster_nodata: NoData Value
prefix: string
add a prefix to the keys (default: None)
geojson_out: boolean
Return list of GeoJSON-like features (default: False)
Original feature geometry and properties will be retained
with zonal stats appended as additional properties.
Use with `prefix` to ensure unique and meaningful property names.
Returns
-------
generator of dicts (if geojson_out is False)
Each item corresponds to a single vector feature and
contains keys for each of the specified stats.
generator of geojson features (if geojson_out is True)
GeoJSON-like Feature as python dict
|
train
|
https://github.com/perrygeo/python-rasterstats/blob/910455cd7c9c21eadf464927db72b38ef62b7dfb/src/rasterstats/main.py#L34-L278
|
[
"def read_features(obj, layer=0):\n features_iter = None\n\n if isinstance(obj, string_types):\n try:\n # test it as fiona data source\n with fiona.open(obj, 'r', layer=layer) as src:\n assert len(src) > 0\n\n def fiona_generator(obj):\n with fiona.open(obj, 'r', layer=layer) as src:\n for feature in src:\n yield feature\n\n features_iter = fiona_generator(obj)\n except (AssertionError, TypeError, IOError, OSError, DriverError, UnicodeDecodeError):\n try:\n mapping = json.loads(obj)\n if 'type' in mapping and mapping['type'] == 'FeatureCollection':\n features_iter = mapping['features']\n elif mapping['type'] in geom_types + ['Feature']:\n features_iter = [parse_feature(mapping)]\n except (ValueError, JSONDecodeError):\n # Single feature-like string\n features_iter = [parse_feature(obj)]\n elif isinstance(obj, Mapping):\n if 'type' in obj and obj['type'] == 'FeatureCollection':\n features_iter = obj['features']\n else:\n features_iter = [parse_feature(obj)]\n elif isinstance(obj, bytes):\n # Single binary object, probably a wkb\n features_iter = [parse_feature(obj)]\n elif hasattr(obj, '__geo_interface__'):\n mapping = obj.__geo_interface__\n if mapping['type'] == 'FeatureCollection':\n features_iter = mapping['features']\n else:\n features_iter = [parse_feature(mapping)]\n elif isinstance(obj, Iterable):\n # Iterable of feature-like objects\n features_iter = (parse_feature(x) for x in obj)\n\n if not features_iter:\n raise ValueError(\"Object is not a recognized source of Features\")\n return features_iter\n",
"def rasterize_geom(geom, like, all_touched=False):\n \"\"\"\n Parameters\n ----------\n geom: GeoJSON geometry\n like: raster object with desired shape and transform\n all_touched: rasterization strategy\n\n Returns\n -------\n ndarray: boolean\n \"\"\"\n geoms = [(geom, 1)]\n rv_array = features.rasterize(\n geoms,\n out_shape=like.shape,\n transform=like.affine,\n fill=0,\n dtype='uint8',\n all_touched=all_touched)\n\n return rv_array.astype(bool)\n",
"def check_stats(stats, categorical):\n if not stats:\n if not categorical:\n stats = DEFAULT_STATS\n else:\n stats = []\n else:\n if isinstance(stats, str):\n if stats in ['*', 'ALL']:\n stats = VALID_STATS\n else:\n stats = stats.split()\n for x in stats:\n if x.startswith(\"percentile_\"):\n get_percentile(x)\n elif x not in VALID_STATS:\n raise ValueError(\n \"Stat `%s` not valid; \"\n \"must be one of \\n %r\" % (x, VALID_STATS))\n\n run_count = False\n if categorical or 'majority' in stats or 'minority' in stats or 'unique' in stats:\n # run the counter once, only if needed\n run_count = True\n\n return stats, run_count\n",
"def boxify_points(geom, rast):\n \"\"\"\n Point and MultiPoint don't play well with GDALRasterize\n convert them into box polygons 99% cellsize, centered on the raster cell\n \"\"\"\n if 'Point' not in geom.type:\n raise ValueError(\"Points or multipoints only\")\n\n buff = -0.01 * abs(min(rast.affine.a, rast.affine.e))\n\n if geom.type == 'Point':\n pts = [geom]\n elif geom.type == \"MultiPoint\":\n pts = geom.geoms\n geoms = []\n for pt in pts:\n row, col = rast.index(pt.x, pt.y)\n win = ((row, row + 1), (col, col + 1))\n geoms.append(box(*window_bounds(win, rast.affine)).buffer(buff))\n\n return MultiPolygon(geoms)\n"
] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from affine import Affine
from shapely.geometry import shape
import numpy as np
import numpy.distutils.system_info as sysinfo
import warnings
from .io import read_features, Raster
from .utils import (rasterize_geom, get_percentile, check_stats,
remap_categories, key_assoc_val, boxify_points)
def raster_stats(*args, **kwargs):
"""Deprecated. Use zonal_stats instead."""
warnings.warn("'raster_stats' is an alias to 'zonal_stats'"
" and will disappear in 1.0", DeprecationWarning)
return zonal_stats(*args, **kwargs)
def zonal_stats(*args, **kwargs):
"""The primary zonal statistics entry point.
All arguments are passed directly to ``gen_zonal_stats``.
See its docstring for details.
The only difference is that ``zonal_stats`` will
return a list rather than a generator."""
return list(gen_zonal_stats(*args, **kwargs))
|
perrygeo/python-rasterstats
|
examples/multiproc.py
|
chunks
|
python
|
def chunks(data, n):
for i in range(0, len(data), n):
yield data[i:i+n]
|
Yield successive n-sized chunks from a slice-able iterable.
|
train
|
https://github.com/perrygeo/python-rasterstats/blob/910455cd7c9c21eadf464927db72b38ef62b7dfb/examples/multiproc.py#L13-L16
| null |
#!/usr/bin/env python
import itertools
import multiprocessing
from rasterstats import zonal_stats
import fiona
shp = "benchmark_data/ne_50m_admin_0_countries.shp"
tif = "benchmark_data/srtm.tif"
def zonal_stats_partial(feats):
"""Wrapper for zonal stats, takes a list of features"""
return zonal_stats(feats, tif, all_touched=True)
if __name__ == "__main__":
with fiona.open(shp) as src:
features = list(src)
# Create a process pool using all cores
cores = multiprocessing.cpu_count()
p = multiprocessing.Pool(cores)
# parallel map
stats_lists = p.map(zonal_stats_partial, chunks(features, cores))
# flatten to a single list
stats = list(itertools.chain(*stats_lists))
assert len(stats) == len(features)
|
ecmwf/cfgrib
|
cfgrib/cfmessage.py
|
from_grib_date_time
|
python
|
def from_grib_date_time(message, date_key='dataDate', time_key='dataTime', epoch=DEFAULT_EPOCH):
# type: (T.Mapping, str, str, datetime.datetime) -> int
date = message[date_key]
time = message[time_key]
hour = time // 100
minute = time % 100
year = date // 10000
month = date // 100 % 100
day = date % 100
data_datetime = datetime.datetime(year, month, day, hour, minute)
# Python 2 compatible timestamp implementation without timezone hurdle
# see: https://docs.python.org/3/library/datetime.html#datetime.datetime.timestamp
return int((data_datetime - epoch).total_seconds())
|
Return the number of seconds since the ``epoch`` from the values of the ``message`` keys,
using datetime.total_seconds().
:param message: the target GRIB message
:param date_key: the date key, defaults to "dataDate"
:param time_key: the time key, defaults to "dataTime"
:param epoch: the reference datetime
|
train
|
https://github.com/ecmwf/cfgrib/blob/d6d533f49c1eebf78f2f16ed0671c666de08c666/cfgrib/cfmessage.py#L40-L61
| null |
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Baudouin Raoult - ECMWF - https://ecmwf.int
# Alessandro Amici - B-Open - https://bopen.eu
#
import datetime
import logging
import typing as T # noqa
import attr
import numpy as np # noqa
from . import messages
LOG = logging.getLogger(__name__)
# taken from eccodes stepUnits.table
GRIB_STEP_UNITS_TO_SECONDS = [
60, 3600, 86400, None, None, None, None, None, None, None,
10800, 21600, 43200, 1, 900, 1800,
]
DEFAULT_EPOCH = datetime.datetime(1970, 1, 1)
def to_grib_date_time(
message, time_ns, date_key='dataDate', time_key='dataTime', epoch=DEFAULT_EPOCH,
):
# type: (T.MutableMapping, np.datetime64, str, str, datetime.datetime) -> None
time_s = int(time_ns) * 1e-9
time = epoch + datetime.timedelta(seconds=time_s)
datetime_iso = str(time)
message[date_key] = int(datetime_iso[:10].replace('-', ''))
message[time_key] = int(datetime_iso[11:16].replace(':', ''))
def from_grib_step(message, step_key='endStep', step_unit_key='stepUnits'):
# type: (T.Mapping, str, str) -> float
to_seconds = GRIB_STEP_UNITS_TO_SECONDS[message[step_unit_key]]
return message[step_key] * to_seconds / 3600.
def to_grib_step(message, step_ns, step_unit=1, step_key='endStep', step_unit_key='stepUnits'):
# type: (T.MutableMapping, int, int, str, str) -> None
# step_seconds = np.timedelta64(step, 's').astype(int)
step_s = int(step_ns) * 1e-9
to_seconds = GRIB_STEP_UNITS_TO_SECONDS[step_unit]
if to_seconds is None:
raise ValueError("unsupported stepUnit %r" % step_unit)
message[step_key] = step_s / to_seconds
message[step_unit_key] = step_unit
def build_valid_time(time, step):
# type: (np.ndarray, np.ndarray) -> T.Tuple[T.Tuple[str, ...], np.ndarray]
"""
Return dimensions and data of the valid_time corresponding to the given ``time`` and ``step``.
The data is seconds from the same epoch as ``time`` and may have one or two dimensions.
:param time: given in seconds from an epoch, as returned by ``from_grib_date_time``
:param step: given in hours, as returned by ``from_grib_step``
"""
step_s = step * 3600
if len(time.shape) == 0 and len(step.shape) == 0:
data = time + step_s
dims = () # type: T.Tuple[str, ...]
elif len(time.shape) > 0 and len(step.shape) == 0:
data = time + step_s
dims = ('time',)
elif len(time.shape) == 0 and len(step.shape) > 0:
data = time + step_s
dims = ('step',)
else:
data = time[:, None] + step_s[None, :]
dims = ('time', 'step')
return dims, data
COMPUTED_KEYS = {
'time': (from_grib_date_time, to_grib_date_time),
'step': (from_grib_step, to_grib_step),
}
@attr.attrs()
class CfMessage(messages.ComputedKeysMessage):
computed_keys = attr.attrib(default=COMPUTED_KEYS)
|
ecmwf/cfgrib
|
cfgrib/cfmessage.py
|
build_valid_time
|
python
|
def build_valid_time(time, step):
# type: (np.ndarray, np.ndarray) -> T.Tuple[T.Tuple[str, ...], np.ndarray]
step_s = step * 3600
if len(time.shape) == 0 and len(step.shape) == 0:
data = time + step_s
dims = () # type: T.Tuple[str, ...]
elif len(time.shape) > 0 and len(step.shape) == 0:
data = time + step_s
dims = ('time',)
elif len(time.shape) == 0 and len(step.shape) > 0:
data = time + step_s
dims = ('step',)
else:
data = time[:, None] + step_s[None, :]
dims = ('time', 'step')
return dims, data
|
Return dimensions and data of the valid_time corresponding to the given ``time`` and ``step``.
The data is seconds from the same epoch as ``time`` and may have one or two dimensions.
:param time: given in seconds from an epoch, as returned by ``from_grib_date_time``
:param step: given in hours, as returned by ``from_grib_step``
|
train
|
https://github.com/ecmwf/cfgrib/blob/d6d533f49c1eebf78f2f16ed0671c666de08c666/cfgrib/cfmessage.py#L92-L114
| null |
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Baudouin Raoult - ECMWF - https://ecmwf.int
# Alessandro Amici - B-Open - https://bopen.eu
#
import datetime
import logging
import typing as T # noqa
import attr
import numpy as np # noqa
from . import messages
LOG = logging.getLogger(__name__)
# taken from eccodes stepUnits.table
GRIB_STEP_UNITS_TO_SECONDS = [
60, 3600, 86400, None, None, None, None, None, None, None,
10800, 21600, 43200, 1, 900, 1800,
]
DEFAULT_EPOCH = datetime.datetime(1970, 1, 1)
def from_grib_date_time(message, date_key='dataDate', time_key='dataTime', epoch=DEFAULT_EPOCH):
# type: (T.Mapping, str, str, datetime.datetime) -> int
"""
Return the number of seconds since the ``epoch`` from the values of the ``message`` keys,
using datetime.total_seconds().
:param message: the target GRIB message
:param date_key: the date key, defaults to "dataDate"
:param time_key: the time key, defaults to "dataTime"
:param epoch: the reference datetime
"""
date = message[date_key]
time = message[time_key]
hour = time // 100
minute = time % 100
year = date // 10000
month = date // 100 % 100
day = date % 100
data_datetime = datetime.datetime(year, month, day, hour, minute)
# Python 2 compatible timestamp implementation without timezone hurdle
# see: https://docs.python.org/3/library/datetime.html#datetime.datetime.timestamp
return int((data_datetime - epoch).total_seconds())
def to_grib_date_time(
message, time_ns, date_key='dataDate', time_key='dataTime', epoch=DEFAULT_EPOCH,
):
# type: (T.MutableMapping, np.datetime64, str, str, datetime.datetime) -> None
time_s = int(time_ns) * 1e-9
time = epoch + datetime.timedelta(seconds=time_s)
datetime_iso = str(time)
message[date_key] = int(datetime_iso[:10].replace('-', ''))
message[time_key] = int(datetime_iso[11:16].replace(':', ''))
def from_grib_step(message, step_key='endStep', step_unit_key='stepUnits'):
# type: (T.Mapping, str, str) -> float
to_seconds = GRIB_STEP_UNITS_TO_SECONDS[message[step_unit_key]]
return message[step_key] * to_seconds / 3600.
def to_grib_step(message, step_ns, step_unit=1, step_key='endStep', step_unit_key='stepUnits'):
# type: (T.MutableMapping, int, int, str, str) -> None
# step_seconds = np.timedelta64(step, 's').astype(int)
step_s = int(step_ns) * 1e-9
to_seconds = GRIB_STEP_UNITS_TO_SECONDS[step_unit]
if to_seconds is None:
raise ValueError("unsupported stepUnit %r" % step_unit)
message[step_key] = step_s / to_seconds
message[step_unit_key] = step_unit
COMPUTED_KEYS = {
'time': (from_grib_date_time, to_grib_date_time),
'step': (from_grib_step, to_grib_step),
}
@attr.attrs()
class CfMessage(messages.ComputedKeysMessage):
computed_keys = attr.attrib(default=COMPUTED_KEYS)
|
ecmwf/cfgrib
|
cfgrib/dataset.py
|
open_file
|
python
|
def open_file(path, grib_errors='warn', **kwargs):
if 'mode' in kwargs:
warnings.warn("the `mode` keyword argument is ignored and deprecated", FutureWarning)
kwargs.pop('mode')
stream = messages.FileStream(path, message_class=cfmessage.CfMessage, errors=grib_errors)
return Dataset(*build_dataset_components(stream, **kwargs))
|
Open a GRIB file as a ``cfgrib.Dataset``.
|
train
|
https://github.com/ecmwf/cfgrib/blob/d6d533f49c1eebf78f2f16ed0671c666de08c666/cfgrib/dataset.py#L502-L508
|
[
"def build_dataset_components(\n stream, indexpath='{path}.{short_hash}.idx', filter_by_keys={}, errors='warn',\n encode_cf=('parameter', 'time', 'geography', 'vertical'), timestamp=None, log=LOG,\n):\n filter_by_keys = dict(filter_by_keys)\n index = stream.index(ALL_KEYS, indexpath=indexpath).subindex(filter_by_keys)\n dimensions = collections.OrderedDict()\n variables = collections.OrderedDict()\n for param_id in index['paramId']:\n var_index = index.subindex(paramId=param_id)\n first = var_index.first()\n short_name = first['shortName']\n var_name = first['cfVarName']\n try:\n dims, data_var, coord_vars = build_variable_components(\n var_index, encode_cf, filter_by_keys, errors=errors,\n )\n except DatasetBuildError as ex:\n # NOTE: When a variable has more than one value for an attribute we need to raise all\n # the values in the file, not just the ones associated with that variable. See #54.\n key = ex.args[1]\n error_message = \"multiple values for unique key, try re-open the file with one of:\"\n fbks = []\n for value in index[key]:\n fbk = {key: value}\n fbk.update(filter_by_keys)\n fbks.append(fbk)\n error_message += \"\\n filter_by_keys=%r\" % fbk\n raise DatasetBuildError(error_message, key, fbks)\n if 'parameter' in encode_cf and var_name not in ('undef', 'unknown'):\n short_name = var_name\n try:\n dict_merge(variables, coord_vars)\n dict_merge(variables, {short_name: data_var})\n dict_merge(dimensions, dims)\n except ValueError:\n if errors == 'ignore':\n pass\n elif errors == 'raise':\n raise\n else:\n log.exception(\"skipping variable: paramId==%r shortName=%r\", param_id, short_name)\n attributes = enforce_unique_attributes(index, GLOBAL_ATTRIBUTES_KEYS, filter_by_keys)\n encoding = {\n 'source': stream.path,\n 'filter_by_keys': filter_by_keys,\n 'encode_cf': encode_cf,\n }\n attributes['Conventions'] = 'CF-1.7'\n attributes['institution'] = attributes['GRIB_centreDescription']\n attributes_namespace = {\n 'cfgrib_version': __version__,\n 'cfgrib_open_kwargs': json.dumps(encoding),\n 'eccodes_version': messages.eccodes_version,\n 'timestamp': timestamp or datetime.datetime.now().isoformat().partition('.')[0]\n }\n history_in = '{timestamp} GRIB to CDM+CF via ' \\\n 'cfgrib-{cfgrib_version}/ecCodes-{eccodes_version} with {cfgrib_open_kwargs}'\n attributes['history'] = history_in.format(**attributes_namespace)\n return dimensions, variables, attributes, encoding\n"
] |
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
import collections
import datetime
import json
import logging
import typing as T
import warnings
import attr
import numpy as np
from . import __version__
from . import cfmessage
from . import messages
LOG = logging.getLogger(__name__)
#
# Edition-independent keys in ecCodes namespaces. Documented in:
# https://software.ecmwf.int/wiki/display/ECC/GRIB%3A+Namespaces
#
GLOBAL_ATTRIBUTES_KEYS = ['edition', 'centre', 'centreDescription', 'subCentre']
DATA_ATTRIBUTES_KEYS = [
'paramId', 'shortName', 'units', 'name', 'cfName', 'cfVarName',
'dataType', 'missingValue', 'numberOfPoints',
'totalNumber',
'typeOfLevel', 'NV',
'stepUnits', 'stepType',
'gridType', 'gridDefinitionDescription',
]
GRID_TYPE_MAP = {
'regular_ll': [
'Nx', 'iDirectionIncrementInDegrees', 'iScansNegatively',
'longitudeOfFirstGridPointInDegrees', 'longitudeOfLastGridPointInDegrees',
'Ny', 'jDirectionIncrementInDegrees', 'jPointsAreConsecutive', 'jScansPositively',
'latitudeOfFirstGridPointInDegrees', 'latitudeOfLastGridPointInDegrees',
],
'rotated_ll': [
'Nx', 'Ny', 'angleOfRotationInDegrees',
'iDirectionIncrementInDegrees', 'iScansNegatively',
'jDirectionIncrementInDegrees', 'jPointsAreConsecutive', 'jScansPositively',
'latitudeOfFirstGridPointInDegrees', 'latitudeOfLastGridPointInDegrees',
'latitudeOfSouthernPoleInDegrees',
'longitudeOfFirstGridPointInDegrees', 'longitudeOfLastGridPointInDegrees',
'longitudeOfSouthernPoleInDegrees',
],
'reduced_ll': [
'Ny', 'jDirectionIncrementInDegrees', 'jPointsAreConsecutive', 'jScansPositively',
'latitudeOfFirstGridPointInDegrees', 'latitudeOfLastGridPointInDegrees',
],
'regular_gg': [
'Nx', 'iDirectionIncrementInDegrees', 'iScansNegatively',
'longitudeOfFirstGridPointInDegrees', 'longitudeOfLastGridPointInDegrees',
'N', 'Ny',
],
'rotated_gg': [
'Nx', 'Ny', 'angleOfRotationInDegrees',
'iDirectionIncrementInDegrees', 'iScansNegatively',
'jPointsAreConsecutive', 'jScansPositively',
'latitudeOfFirstGridPointInDegrees', 'latitudeOfLastGridPointInDegrees',
'latitudeOfSouthernPoleInDegrees',
'longitudeOfFirstGridPointInDegrees', 'longitudeOfLastGridPointInDegrees',
'longitudeOfSouthernPoleInDegrees', 'N',
],
'lambert': [
'LaDInDegrees', 'LoVInDegrees', 'iScansNegatively',
'jPointsAreConsecutive', 'jScansPositively',
'latitudeOfFirstGridPointInDegrees', 'latitudeOfSouthernPoleInDegrees',
'longitudeOfFirstGridPointInDegrees', 'longitudeOfSouthernPoleInDegrees',
'DyInMetres', 'DxInMetres', 'Latin2InDegrees', 'Latin1InDegrees', 'Ny', 'Nx',
],
'reduced_gg': ['N', 'pl'],
'sh': ['M', 'K', 'J'],
}
GRID_TYPE_KEYS = sorted(set(k for _, ks in GRID_TYPE_MAP.items() for k in ks))
ENSEMBLE_KEYS = ['number']
VERTICAL_KEYS = ['level']
DATA_TIME_KEYS = ['dataDate', 'dataTime', 'endStep']
REF_TIME_KEYS = ['time', 'step']
ALL_HEADER_DIMS = ENSEMBLE_KEYS + VERTICAL_KEYS + DATA_TIME_KEYS + REF_TIME_KEYS
ALL_KEYS = sorted(GLOBAL_ATTRIBUTES_KEYS + DATA_ATTRIBUTES_KEYS + GRID_TYPE_KEYS + ALL_HEADER_DIMS)
COORD_ATTRS = {
# geography
'latitude': {
'units': 'degrees_north',
'standard_name': 'latitude', 'long_name': 'latitude',
},
'longitude': {
'units': 'degrees_east',
'standard_name': 'longitude', 'long_name': 'longitude',
},
# vertical
'depthBelowLand': {
'units': 'm', 'positive': 'down', 'long_name': 'soil depth',
'standard_name': 'depth',
},
'depthBelowLandLayer': {
'units': 'm', 'positive': 'down', 'long_name': 'soil depth',
'standard_name': 'depth',
},
'hybrid': {
'units': '1', 'positive': 'down', 'long_name': 'hybrid level',
'standard_name': 'atmosphere_hybrid_sigma_pressure_coordinate',
},
'heightAboveGround': {
'units': 'm', 'positive': 'up', 'long_name': 'height above the surface',
'standard_name': 'height',
},
'isobaricInhPa': {
'units': 'hPa', 'positive': 'down', 'stored_direction': 'decreasing',
'standard_name': 'air_pressure', 'long_name': 'pressure',
},
'isobaricInPa': {
'units': 'Pa', 'positive': 'down', 'stored_direction': 'decreasing',
'standard_name': 'air_pressure', 'long_name': 'pressure',
},
'isobaricLayer': {
'units': 'Pa', 'positive': 'down',
'standard_name': 'air_pressure', 'long_name': 'pressure',
},
# ensemble
'number': {
'units': '1',
'standard_name': 'realization', 'long_name': 'ensemble member numerical id',
},
# time
'step': {
'units': 'hours',
'standard_name': 'forecast_period', 'long_name': 'time since forecast_reference_time',
},
'time': {
'units': 'seconds since 1970-01-01T00:00:00', 'calendar': 'proleptic_gregorian',
'standard_name': 'forecast_reference_time', 'long_name': 'initial time of forecast',
},
'valid_time': {
'units': 'seconds since 1970-01-01T00:00:00', 'calendar': 'proleptic_gregorian',
'standard_name': 'time', 'long_name': 'time',
},
}
class DatasetBuildError(ValueError):
def __str__(self):
return str(self.args[0])
def enforce_unique_attributes(index, attributes_keys, filter_by_keys={}):
# type: (messages.FileIndex, T.Sequence[str], dict) -> T.Dict[str, T.Any]
attributes = collections.OrderedDict() # type: T.Dict[str, T.Any]
for key in attributes_keys:
values = index[key]
if len(values) > 1:
fbks = []
for value in values:
fbk = {key: value}
fbk.update(filter_by_keys)
fbks.append(fbk)
raise DatasetBuildError("multiple values for key %r" % key, key, fbks)
if values and values[0] not in ('undef', 'unknown'):
attributes['GRIB_' + key] = values[0]
return attributes
@attr.attrs(cmp=False)
class Variable(object):
dimensions = attr.attrib(type=T.Tuple[str, ...])
data = attr.attrib(type=np.ndarray)
attributes = attr.attrib(default={}, type=T.Dict[str, T.Any], repr=False)
def __eq__(self, other):
if other.__class__ is not self.__class__:
return NotImplemented
equal = (self.dimensions, self.attributes) == (other.dimensions, other.attributes)
return equal and np.array_equal(self.data, other.data)
def expand_item(item, shape):
expanded_item = []
for i, size in zip(item, shape):
if isinstance(i, list):
expanded_item.append(i)
elif isinstance(i, np.ndarray):
expanded_item.append(i.tolist())
elif isinstance(i, slice):
expanded_item.append(list(range(i.start or 0, i.stop or size, i.step or 1)))
elif isinstance(i, int):
expanded_item.append([i])
else:
raise TypeError("Unsupported index type %r" % type(i))
return tuple(expanded_item)
@attr.attrs()
class OnDiskArray(object):
stream = attr.attrib()
shape = attr.attrib(type=T.Tuple[int, ...])
offsets = attr.attrib(repr=False, type=T.Dict[T.Tuple[T.Any, ...], T.List[int]])
missing_value = attr.attrib()
geo_ndim = attr.attrib(default=1, repr=False)
dtype = np.dtype('float32')
def build_array(self):
"""Helper method used to test __getitem__"""
# type: () -> np.ndarray
array = np.full(self.shape, fill_value=np.nan, dtype='float32')
with open(self.stream.path) as file:
for header_indexes, offset in self.offsets.items():
# NOTE: fill a single field as found in the message
message = self.stream.message_from_file(file, offset=offset[0])
values = message.message_get('values', float)
array.__getitem__(header_indexes).flat[:] = values
array[array == self.missing_value] = np.nan
return array
def __getitem__(self, item):
assert isinstance(item, tuple), "Item type must be tuple not %r" % type(item)
assert len(item) == len(self.shape), "Item len must be %r not %r" % (self.shape, len(item))
header_item = expand_item(item[:-self.geo_ndim], self.shape)
array_field_shape = tuple(len(l) for l in header_item) + self.shape[-self.geo_ndim:]
array_field = np.full(array_field_shape, fill_value=np.nan, dtype='float32')
with open(self.stream.path) as file:
for header_indexes, offset in self.offsets.items():
try:
array_field_indexes = []
for it, ix in zip(header_item, header_indexes):
array_field_indexes.append(it.index(ix))
except ValueError:
continue
# NOTE: fill a single field as found in the message
message = self.stream.message_from_file(file, offset=offset[0])
values = message.message_get('values', float)
array_field.__getitem__(tuple(array_field_indexes)).flat[:] = values
array = array_field[(Ellipsis,) + item[-self.geo_ndim:]]
array[array == self.missing_value] = np.nan
for i, it in reversed(list(enumerate(item[:-self.geo_ndim]))):
if isinstance(it, int):
array = array[(slice(None, None, None),) * i + (0,)]
return array
GRID_TYPES_DIMENSION_COORDS = ['regular_ll', 'regular_gg']
GRID_TYPES_2D_NON_DIMENSION_COORDS = [
'rotated_ll', 'rotated_gg', 'lambert', 'albers', 'polar_stereographic',
]
def build_geography_coordinates(
index, # type: messages.FileIndex
encode_cf, # type: T.Sequence[str]
errors, # type: str
log=LOG, # type: logging.Logger
):
# type: (...) -> T.Tuple[T.Tuple[str, ...], T.Tuple[int, ...], T.Dict[str, Variable]]
first = index.first()
geo_coord_vars = collections.OrderedDict() # type: T.Dict[str, Variable]
grid_type = index.getone('gridType')
if 'geography' in encode_cf and grid_type in GRID_TYPES_DIMENSION_COORDS:
geo_dims = ('latitude', 'longitude') # type: T.Tuple[str, ...]
geo_shape = (index.getone('Ny'), index.getone('Nx')) # type: T.Tuple[int, ...]
latitudes = np.array(first['distinctLatitudes'])
geo_coord_vars['latitude'] = Variable(
dimensions=('latitude',), data=latitudes, attributes=COORD_ATTRS['latitude'].copy(),
)
if latitudes[0] > latitudes[-1]:
geo_coord_vars['latitude'].attributes['stored_direction'] = 'decreasing'
geo_coord_vars['longitude'] = Variable(
dimensions=('longitude',), data=np.array(first['distinctLongitudes']),
attributes=COORD_ATTRS['longitude'],
)
elif 'geography' in encode_cf and grid_type in GRID_TYPES_2D_NON_DIMENSION_COORDS:
geo_dims = ('y', 'x')
geo_shape = (index.getone('Ny'), index.getone('Nx'))
try:
geo_coord_vars['latitude'] = Variable(
dimensions=('y', 'x'), data=np.array(first['latitudes']).reshape(geo_shape),
attributes=COORD_ATTRS['latitude'],
)
geo_coord_vars['longitude'] = Variable(
dimensions=('y', 'x'), data=np.array(first['longitudes']).reshape(geo_shape),
attributes=COORD_ATTRS['longitude'],
)
except KeyError: # pragma: no cover
if errors != 'ignore':
log.warning('ecCodes provides no latitudes/longitudes for gridType=%r', grid_type)
else:
geo_dims = ('values',)
geo_shape = (index.getone('numberOfPoints'),)
# add secondary coordinates if ecCodes provides them
try:
latitude = first['latitudes']
geo_coord_vars['latitude'] = Variable(
dimensions=('values',), data=np.array(latitude),
attributes=COORD_ATTRS['latitude'],
)
longitude = first['longitudes']
geo_coord_vars['longitude'] = Variable(
dimensions=('values',), data=np.array(longitude),
attributes=COORD_ATTRS['longitude'],
)
except KeyError: # pragma: no cover
if errors != 'ignore':
log.warning('ecCodes provides no latitudes/longitudes for gridType=%r', grid_type)
return geo_dims, geo_shape, geo_coord_vars
def encode_cf_first(data_var_attrs, encode_cf=('parameter', 'time')):
coords_map = ENSEMBLE_KEYS[:]
param_id = data_var_attrs.get('GRIB_paramId', 'undef')
data_var_attrs['long_name'] = 'original GRIB paramId: %s' % param_id
data_var_attrs['units'] = '1'
if 'parameter' in encode_cf:
if 'GRIB_cfName' in data_var_attrs:
data_var_attrs['standard_name'] = data_var_attrs['GRIB_cfName']
if 'GRIB_name' in data_var_attrs:
data_var_attrs['long_name'] = data_var_attrs['GRIB_name']
if 'GRIB_units' in data_var_attrs:
data_var_attrs['units'] = data_var_attrs['GRIB_units']
if 'time' in encode_cf:
coords_map.extend(REF_TIME_KEYS)
else:
coords_map.extend(DATA_TIME_KEYS)
coords_map.extend(VERTICAL_KEYS)
return coords_map
def build_variable_components(index, encode_cf=(), filter_by_keys={}, log=LOG, errors='warn'):
data_var_attrs_keys = DATA_ATTRIBUTES_KEYS[:]
data_var_attrs_keys.extend(GRID_TYPE_MAP.get(index.getone('gridType'), []))
data_var_attrs = enforce_unique_attributes(index, data_var_attrs_keys, filter_by_keys)
coords_map = encode_cf_first(data_var_attrs, encode_cf)
coord_name_key_map = {}
coord_vars = collections.OrderedDict()
for coord_key in coords_map:
values = index[coord_key]
if len(values) == 1 and values[0] == 'undef':
log.info("missing from GRIB stream: %r" % coord_key)
continue
coord_name = coord_key
if 'vertical' in encode_cf and coord_key == 'level' and \
'GRIB_typeOfLevel' in data_var_attrs:
coord_name = data_var_attrs['GRIB_typeOfLevel']
coord_name_key_map[coord_name] = coord_key
attributes = {
'long_name': 'original GRIB coordinate for key: %s(%s)' % (coord_key, coord_name),
'units': '1',
}
attributes.update(COORD_ATTRS.get(coord_name, {}).copy())
data = np.array(sorted(values, reverse=attributes.get('stored_direction') == 'decreasing'))
dimensions = (coord_name,)
if len(values) == 1:
data = data[0]
dimensions = ()
coord_vars[coord_name] = Variable(dimensions=dimensions, data=data, attributes=attributes)
header_dimensions = tuple(d for d, c in coord_vars.items() if c.data.size > 1)
header_shape = tuple(coord_vars[d].data.size for d in header_dimensions)
geo_dims, geo_shape, geo_coord_vars = build_geography_coordinates(index, encode_cf, errors)
dimensions = header_dimensions + geo_dims
shape = header_shape + geo_shape
coord_vars.update(geo_coord_vars)
offsets = collections.OrderedDict()
for header_values, offset in index.offsets:
header_indexes = [] # type: T.List[int]
for dim in header_dimensions:
header_value = header_values[index.index_keys.index(coord_name_key_map.get(dim, dim))]
header_indexes.append(coord_vars[dim].data.tolist().index(header_value))
offsets[tuple(header_indexes)] = offset
missing_value = data_var_attrs.get('missingValue', 9999)
data = OnDiskArray(
stream=index.filestream, shape=shape, offsets=offsets, missing_value=missing_value,
geo_ndim=len(geo_dims),
)
if 'time' in coord_vars and 'time' in encode_cf:
# add the 'valid_time' secondary coordinate
step_data = coord_vars['step'].data if 'step' in coord_vars else np.array(0.)
dims, time_data = cfmessage.build_valid_time(
coord_vars['time'].data, step_data,
)
attrs = COORD_ATTRS['valid_time']
coord_vars['valid_time'] = Variable(dimensions=dims, data=time_data, attributes=attrs)
data_var_attrs['coordinates'] = ' '.join(coord_vars.keys())
data_var = Variable(dimensions=dimensions, data=data, attributes=data_var_attrs)
dims = collections.OrderedDict((d, s) for d, s in zip(dimensions, data_var.data.shape))
return dims, data_var, coord_vars
def dict_merge(master, update):
for key, value in update.items():
if key not in master:
master[key] = value
elif master[key] == value:
pass
else:
raise DatasetBuildError("key present and new value is different: "
"key=%r value=%r new_value=%r" % (key, master[key], value))
def build_dataset_components(
stream, indexpath='{path}.{short_hash}.idx', filter_by_keys={}, errors='warn',
encode_cf=('parameter', 'time', 'geography', 'vertical'), timestamp=None, log=LOG,
):
filter_by_keys = dict(filter_by_keys)
index = stream.index(ALL_KEYS, indexpath=indexpath).subindex(filter_by_keys)
dimensions = collections.OrderedDict()
variables = collections.OrderedDict()
for param_id in index['paramId']:
var_index = index.subindex(paramId=param_id)
first = var_index.first()
short_name = first['shortName']
var_name = first['cfVarName']
try:
dims, data_var, coord_vars = build_variable_components(
var_index, encode_cf, filter_by_keys, errors=errors,
)
except DatasetBuildError as ex:
# NOTE: When a variable has more than one value for an attribute we need to raise all
# the values in the file, not just the ones associated with that variable. See #54.
key = ex.args[1]
error_message = "multiple values for unique key, try re-open the file with one of:"
fbks = []
for value in index[key]:
fbk = {key: value}
fbk.update(filter_by_keys)
fbks.append(fbk)
error_message += "\n filter_by_keys=%r" % fbk
raise DatasetBuildError(error_message, key, fbks)
if 'parameter' in encode_cf and var_name not in ('undef', 'unknown'):
short_name = var_name
try:
dict_merge(variables, coord_vars)
dict_merge(variables, {short_name: data_var})
dict_merge(dimensions, dims)
except ValueError:
if errors == 'ignore':
pass
elif errors == 'raise':
raise
else:
log.exception("skipping variable: paramId==%r shortName=%r", param_id, short_name)
attributes = enforce_unique_attributes(index, GLOBAL_ATTRIBUTES_KEYS, filter_by_keys)
encoding = {
'source': stream.path,
'filter_by_keys': filter_by_keys,
'encode_cf': encode_cf,
}
attributes['Conventions'] = 'CF-1.7'
attributes['institution'] = attributes['GRIB_centreDescription']
attributes_namespace = {
'cfgrib_version': __version__,
'cfgrib_open_kwargs': json.dumps(encoding),
'eccodes_version': messages.eccodes_version,
'timestamp': timestamp or datetime.datetime.now().isoformat().partition('.')[0]
}
history_in = '{timestamp} GRIB to CDM+CF via ' \
'cfgrib-{cfgrib_version}/ecCodes-{eccodes_version} with {cfgrib_open_kwargs}'
attributes['history'] = history_in.format(**attributes_namespace)
return dimensions, variables, attributes, encoding
@attr.attrs()
class Dataset(object):
"""
Map a GRIB file to the NetCDF Common Data Model with CF Conventions.
"""
dimensions = attr.attrib(type=T.Dict[str, int])
variables = attr.attrib(type=T.Dict[str, Variable])
attributes = attr.attrib(type=T.Dict[str, T.Any])
encoding = attr.attrib(type=T.Dict[str, T.Any])
|
ecmwf/cfgrib
|
cfgrib/xarray_to_grib.py
|
canonical_dataarray_to_grib
|
python
|
def canonical_dataarray_to_grib(
data_var, file, grib_keys={}, default_grib_keys=DEFAULT_GRIB_KEYS, **kwargs
):
# type: (T.IO[bytes], xr.DataArray, T.Dict[str, T.Any], T.Dict[str, T.Any], T.Any) -> None
# validate Dataset keys, DataArray names, and attr keys/values
detected_keys, suggested_keys = detect_grib_keys(data_var, default_grib_keys, grib_keys)
merged_grib_keys = merge_grib_keys(grib_keys, detected_keys, suggested_keys)
if 'gridType' not in merged_grib_keys:
raise ValueError("required grib_key 'gridType' not passed nor auto-detected")
template_message = make_template_message(merged_grib_keys, **kwargs)
coords_names, data_var = expand_dims(data_var)
header_coords_values = [data_var.coords[name].values.tolist() for name in coords_names]
for items in itertools.product(*header_coords_values):
select = {n: v for n, v in zip(coords_names, items)}
field_values = data_var.sel(**select).values.flat[:]
# Missing values handling
invalid_field_values = np.logical_not(np.isfinite(field_values))
# There's no need to save a message full of missing values
if invalid_field_values.all():
continue
missing_value = merged_grib_keys.get('missingValue', 9999)
field_values[invalid_field_values] = missing_value
message = cfgrib.CfMessage.from_message(template_message)
for coord_name, coord_value in zip(coords_names, items):
if coord_name in ALL_TYPE_OF_LEVELS:
coord_name = 'level'
message[coord_name] = coord_value
# OPTIMIZE: convert to list because Message.message_set doesn't support np.ndarray
message['values'] = field_values.tolist()
message.write(file)
|
Write a ``xr.DataArray`` in *canonical* form to a GRIB file.
|
train
|
https://github.com/ecmwf/cfgrib/blob/d6d533f49c1eebf78f2f16ed0671c666de08c666/cfgrib/xarray_to_grib.py#L197-L239
|
[
"def detect_grib_keys(data_var, default_grib_keys, grib_keys={}):\n # type: (xr.DataArray, T.Dict[str, T.Any], T.Dict[str, T.Any]) -> T.Tuple[dict, dict]\n detected_grib_keys = {}\n suggested_grib_keys = default_grib_keys.copy()\n\n for key, value in data_var.attrs.items():\n if key[:5] == 'GRIB_':\n suggested_grib_keys[key[5:]] = value\n\n if 'latitude' in data_var.dims and 'longitude' in data_var.dims:\n try:\n regular_ll_keys = detect_regular_ll_grib_keys(data_var.longitude, data_var.latitude)\n detected_grib_keys.update(regular_ll_keys)\n except:\n pass\n\n for tol in ALL_TYPE_OF_LEVELS:\n if tol in data_var.dims or tol in data_var.coords:\n detected_grib_keys['typeOfLevel'] = tol\n\n if 'number' in data_var.dims or 'number' in data_var.coords and grib_keys.get('edition') != 1:\n # cannot set 'number' key without setting a productDefinitionTemplateNumber in GRIB2\n detected_grib_keys['productDefinitionTemplateNumber'] = 1\n\n if 'values' in data_var.dims:\n detected_grib_keys['numberOfPoints'] = data_var.shape[data_var.dims.index('values')]\n\n return detected_grib_keys, suggested_grib_keys\n",
"def merge_grib_keys(grib_keys, detected_grib_keys, default_grib_keys):\n merged_grib_keys = {k: v for k, v in grib_keys.items()}\n dataset.dict_merge(merged_grib_keys, detected_grib_keys)\n for key, value in default_grib_keys.items():\n if key not in merged_grib_keys:\n merged_grib_keys[key] = value\n return merged_grib_keys\n",
"def expand_dims(data_var):\n coords_names = []\n for coord_name in dataset.ALL_HEADER_DIMS + ALL_TYPE_OF_LEVELS:\n if coord_name in set(data_var.coords):\n coords_names.append(coord_name)\n if coord_name not in data_var.dims:\n data_var = data_var.expand_dims(coord_name)\n return coords_names, data_var\n",
"def make_template_message(merged_grib_keys, template_path=None, sample_name=None):\n # type: (T.Dict[str, T.Any], str, str) -> cfgrib.CfMessage\n if template_path and sample_name:\n raise ValueError(\"template_path and sample_name should not be both set\")\n\n if template_path:\n with open(template_path) as file:\n template_message = cfgrib.CfMessage.from_file(file)\n else:\n if sample_name is None:\n sample_name = detect_sample_name(merged_grib_keys)\n template_message = cfgrib.CfMessage.from_sample_name(sample_name)\n\n for key in MESSAGE_DEFINITION_KEYS:\n if key in list(merged_grib_keys):\n template_message[key] = merged_grib_keys[key]\n merged_grib_keys.pop(key)\n\n for key, value in merged_grib_keys.items():\n try:\n template_message[key] = value\n except KeyError:\n LOGGER.exception(\"skipping key due to errors: %r\" % key)\n\n return template_message\n"
] |
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
# Aureliana Barghini - B-Open - https://bopen.eu
# Leonardo Barcaroli - B-Open - https://bopen.eu
#
import itertools
import logging
import typing as T # noqa
import warnings
import numpy as np
import xarray as xr
import cfgrib
from cfgrib import dataset # FIXME: write support needs internal functions
LOGGER = logging.getLogger(__name__)
DEFAULT_GRIB_KEYS = {
'centre': 255, # missing value, see: http://apps.ecmwf.int/codes/grib/format/grib1/centre/0/
'typeOfLevel': 'surface',
}
TYPE_OF_LEVELS_SFC = ['surface', 'meanSea', 'cloudBase', 'cloudTop']
TYPE_OF_LEVELS_PL = ['isobaricInhPa', 'isobaricInPa']
TYPE_OF_LEVELS_ML = ['hybrid']
ALL_TYPE_OF_LEVELS = TYPE_OF_LEVELS_SFC + TYPE_OF_LEVELS_PL + TYPE_OF_LEVELS_ML
GRID_TYPES = [
'polar_stereographic', 'reduced_gg', 'reduced_ll', 'regular_gg', 'regular_ll', 'rotated_gg',
'rotated_ll', 'sh',
]
MESSAGE_DEFINITION_KEYS = [
# for the GRIB 2 sample we must set this before setting 'totalNumber'
'productDefinitionTemplateNumber',
# NO IDEA WHAT IS GOING ON HERE: saving regular_ll_msl.grib results in the wrong `paramId`
# unless `units` is set before some other unknown key, this happens at random and only in
# Python 3.5, so it must be linked to dict key stability.
'units',
]
def regular_ll_params(values, min_value=-180., max_value=360.):
# type: (T.Sequence, float, float) -> T.Tuple[float, float, int]
start, stop, num = float(values[0]), float(values[-1]), len(values)
if min(start, stop) < min_value or max(start, stop) > max_value:
raise ValueError("Unsupported spatial grid: out of bounds (%r, %r)" % (start, stop))
check_values = np.linspace(start, stop, num)
if not np.allclose(check_values, values):
raise ValueError("Unsupported spatial grid: not regular %r" % (check_values,))
return (start, stop, num)
def detect_regular_ll_grib_keys(lon, lat):
# type: (np.ndarray, np.ndarray) -> T.Dict[str, T.Any]
grib_keys = {} # type: T.Dict[str, T.Any]
lon_start, lon_stop, lon_num = regular_ll_params(lon)
lon_scan_negatively = lon_stop < lon_start
lon_step = abs(lon_stop - lon_start) / (lon_num - 1.)
if lon_start < 0.:
lon_start += 360.
if lon_stop < 0.:
lon_stop += 360.
grib_keys['longitudeOfFirstGridPointInDegrees'] = lon_start
grib_keys['longitudeOfLastGridPointInDegrees'] = lon_stop
grib_keys['Ni'] = lon_num
grib_keys['iDirectionIncrementInDegrees'] = lon_step
grib_keys['iScansNegatively'] = lon_scan_negatively
lat_start, lat_stop, lat_num = regular_ll_params(lat, min_value=-90., max_value=90.)
grib_keys['latitudeOfFirstGridPointInDegrees'] = lat_start
grib_keys['latitudeOfLastGridPointInDegrees'] = lat_stop
grib_keys['Nj'] = lat_num
grib_keys['jDirectionIncrementInDegrees'] = abs(lat_stop - lat_start) / (lat_num - 1.)
grib_keys['jScansPositively'] = lat_stop > lat_start
grib_keys['gridType'] = 'regular_ll'
return grib_keys
def detect_grib_keys(data_var, default_grib_keys, grib_keys={}):
# type: (xr.DataArray, T.Dict[str, T.Any], T.Dict[str, T.Any]) -> T.Tuple[dict, dict]
detected_grib_keys = {}
suggested_grib_keys = default_grib_keys.copy()
for key, value in data_var.attrs.items():
if key[:5] == 'GRIB_':
suggested_grib_keys[key[5:]] = value
if 'latitude' in data_var.dims and 'longitude' in data_var.dims:
try:
regular_ll_keys = detect_regular_ll_grib_keys(data_var.longitude, data_var.latitude)
detected_grib_keys.update(regular_ll_keys)
except:
pass
for tol in ALL_TYPE_OF_LEVELS:
if tol in data_var.dims or tol in data_var.coords:
detected_grib_keys['typeOfLevel'] = tol
if 'number' in data_var.dims or 'number' in data_var.coords and grib_keys.get('edition') != 1:
# cannot set 'number' key without setting a productDefinitionTemplateNumber in GRIB2
detected_grib_keys['productDefinitionTemplateNumber'] = 1
if 'values' in data_var.dims:
detected_grib_keys['numberOfPoints'] = data_var.shape[data_var.dims.index('values')]
return detected_grib_keys, suggested_grib_keys
def detect_sample_name(grib_keys, sample_name_template='{geography}_{vertical}_grib{edition}'):
# type: (T.Mapping, str) -> str
edition = grib_keys.get('edition', 2)
if grib_keys['gridType'] in GRID_TYPES:
geography = grib_keys['gridType']
else:
LOGGER.info("unknown 'gridType': %r. Using GRIB2 template", grib_keys['gridType'])
return 'GRIB2'
if grib_keys['typeOfLevel'] in TYPE_OF_LEVELS_PL:
vertical = 'pl'
elif grib_keys['typeOfLevel'] in TYPE_OF_LEVELS_SFC:
vertical = 'sfc'
elif grib_keys['typeOfLevel'] in TYPE_OF_LEVELS_ML:
vertical = 'ml'
else:
LOGGER.info("unknown 'typeOfLevel': %r. Using GRIB2 template", grib_keys['typeOfLevel'])
return 'GRIB2'
sample_name = sample_name_template.format(**locals())
return sample_name
def merge_grib_keys(grib_keys, detected_grib_keys, default_grib_keys):
merged_grib_keys = {k: v for k, v in grib_keys.items()}
dataset.dict_merge(merged_grib_keys, detected_grib_keys)
for key, value in default_grib_keys.items():
if key not in merged_grib_keys:
merged_grib_keys[key] = value
return merged_grib_keys
def expand_dims(data_var):
coords_names = []
for coord_name in dataset.ALL_HEADER_DIMS + ALL_TYPE_OF_LEVELS:
if coord_name in set(data_var.coords):
coords_names.append(coord_name)
if coord_name not in data_var.dims:
data_var = data_var.expand_dims(coord_name)
return coords_names, data_var
def make_template_message(merged_grib_keys, template_path=None, sample_name=None):
# type: (T.Dict[str, T.Any], str, str) -> cfgrib.CfMessage
if template_path and sample_name:
raise ValueError("template_path and sample_name should not be both set")
if template_path:
with open(template_path) as file:
template_message = cfgrib.CfMessage.from_file(file)
else:
if sample_name is None:
sample_name = detect_sample_name(merged_grib_keys)
template_message = cfgrib.CfMessage.from_sample_name(sample_name)
for key in MESSAGE_DEFINITION_KEYS:
if key in list(merged_grib_keys):
template_message[key] = merged_grib_keys[key]
merged_grib_keys.pop(key)
for key, value in merged_grib_keys.items():
try:
template_message[key] = value
except KeyError:
LOGGER.exception("skipping key due to errors: %r" % key)
return template_message
def canonical_dataset_to_grib(dataset, path, mode='wb', no_warn=False, grib_keys={}, **kwargs):
# type: (xr.Dataset, str, str, bool, T.Dict[str, T.Any] T.Any) -> None
"""
Write a ``xr.Dataset`` in *canonical* form to a GRIB file.
"""
if not no_warn:
warnings.warn("GRIB write support is experimental, DO NOT RELY ON IT!", FutureWarning)
# validate Dataset keys, DataArray names, and attr keys/values
xr.backends.api._validate_dataset_names(dataset)
xr.backends.api._validate_attrs(dataset)
real_grib_keys = {k[5:]: v for k, v in dataset.attrs.items() if k[:5] == 'GRIB_'}
real_grib_keys.update(grib_keys)
with open(path, mode=mode) as file:
for data_var in dataset.data_vars.values():
canonical_dataarray_to_grib(data_var, file, grib_keys=real_grib_keys, **kwargs)
def to_grib(*args, **kwargs):
return canonical_dataset_to_grib(*args, **kwargs)
|
ecmwf/cfgrib
|
cfgrib/xarray_to_grib.py
|
canonical_dataset_to_grib
|
python
|
def canonical_dataset_to_grib(dataset, path, mode='wb', no_warn=False, grib_keys={}, **kwargs):
# type: (xr.Dataset, str, str, bool, T.Dict[str, T.Any] T.Any) -> None
if not no_warn:
warnings.warn("GRIB write support is experimental, DO NOT RELY ON IT!", FutureWarning)
# validate Dataset keys, DataArray names, and attr keys/values
xr.backends.api._validate_dataset_names(dataset)
xr.backends.api._validate_attrs(dataset)
real_grib_keys = {k[5:]: v for k, v in dataset.attrs.items() if k[:5] == 'GRIB_'}
real_grib_keys.update(grib_keys)
with open(path, mode=mode) as file:
for data_var in dataset.data_vars.values():
canonical_dataarray_to_grib(data_var, file, grib_keys=real_grib_keys, **kwargs)
|
Write a ``xr.Dataset`` in *canonical* form to a GRIB file.
|
train
|
https://github.com/ecmwf/cfgrib/blob/d6d533f49c1eebf78f2f16ed0671c666de08c666/cfgrib/xarray_to_grib.py#L242-L259
|
[
"def canonical_dataarray_to_grib(\n data_var, file, grib_keys={}, default_grib_keys=DEFAULT_GRIB_KEYS, **kwargs\n):\n # type: (T.IO[bytes], xr.DataArray, T.Dict[str, T.Any], T.Dict[str, T.Any], T.Any) -> None\n \"\"\"\n Write a ``xr.DataArray`` in *canonical* form to a GRIB file.\n \"\"\"\n # validate Dataset keys, DataArray names, and attr keys/values\n detected_keys, suggested_keys = detect_grib_keys(data_var, default_grib_keys, grib_keys)\n merged_grib_keys = merge_grib_keys(grib_keys, detected_keys, suggested_keys)\n\n if 'gridType' not in merged_grib_keys:\n raise ValueError(\"required grib_key 'gridType' not passed nor auto-detected\")\n\n template_message = make_template_message(merged_grib_keys, **kwargs)\n\n coords_names, data_var = expand_dims(data_var)\n\n header_coords_values = [data_var.coords[name].values.tolist() for name in coords_names]\n for items in itertools.product(*header_coords_values):\n select = {n: v for n, v in zip(coords_names, items)}\n field_values = data_var.sel(**select).values.flat[:]\n\n # Missing values handling\n invalid_field_values = np.logical_not(np.isfinite(field_values))\n\n # There's no need to save a message full of missing values\n if invalid_field_values.all():\n continue\n\n missing_value = merged_grib_keys.get('missingValue', 9999)\n field_values[invalid_field_values] = missing_value\n\n message = cfgrib.CfMessage.from_message(template_message)\n for coord_name, coord_value in zip(coords_names, items):\n if coord_name in ALL_TYPE_OF_LEVELS:\n coord_name = 'level'\n message[coord_name] = coord_value\n\n # OPTIMIZE: convert to list because Message.message_set doesn't support np.ndarray\n message['values'] = field_values.tolist()\n\n message.write(file)\n"
] |
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
# Aureliana Barghini - B-Open - https://bopen.eu
# Leonardo Barcaroli - B-Open - https://bopen.eu
#
import itertools
import logging
import typing as T # noqa
import warnings
import numpy as np
import xarray as xr
import cfgrib
from cfgrib import dataset # FIXME: write support needs internal functions
LOGGER = logging.getLogger(__name__)
DEFAULT_GRIB_KEYS = {
'centre': 255, # missing value, see: http://apps.ecmwf.int/codes/grib/format/grib1/centre/0/
'typeOfLevel': 'surface',
}
TYPE_OF_LEVELS_SFC = ['surface', 'meanSea', 'cloudBase', 'cloudTop']
TYPE_OF_LEVELS_PL = ['isobaricInhPa', 'isobaricInPa']
TYPE_OF_LEVELS_ML = ['hybrid']
ALL_TYPE_OF_LEVELS = TYPE_OF_LEVELS_SFC + TYPE_OF_LEVELS_PL + TYPE_OF_LEVELS_ML
GRID_TYPES = [
'polar_stereographic', 'reduced_gg', 'reduced_ll', 'regular_gg', 'regular_ll', 'rotated_gg',
'rotated_ll', 'sh',
]
MESSAGE_DEFINITION_KEYS = [
# for the GRIB 2 sample we must set this before setting 'totalNumber'
'productDefinitionTemplateNumber',
# NO IDEA WHAT IS GOING ON HERE: saving regular_ll_msl.grib results in the wrong `paramId`
# unless `units` is set before some other unknown key, this happens at random and only in
# Python 3.5, so it must be linked to dict key stability.
'units',
]
def regular_ll_params(values, min_value=-180., max_value=360.):
# type: (T.Sequence, float, float) -> T.Tuple[float, float, int]
start, stop, num = float(values[0]), float(values[-1]), len(values)
if min(start, stop) < min_value or max(start, stop) > max_value:
raise ValueError("Unsupported spatial grid: out of bounds (%r, %r)" % (start, stop))
check_values = np.linspace(start, stop, num)
if not np.allclose(check_values, values):
raise ValueError("Unsupported spatial grid: not regular %r" % (check_values,))
return (start, stop, num)
def detect_regular_ll_grib_keys(lon, lat):
# type: (np.ndarray, np.ndarray) -> T.Dict[str, T.Any]
grib_keys = {} # type: T.Dict[str, T.Any]
lon_start, lon_stop, lon_num = regular_ll_params(lon)
lon_scan_negatively = lon_stop < lon_start
lon_step = abs(lon_stop - lon_start) / (lon_num - 1.)
if lon_start < 0.:
lon_start += 360.
if lon_stop < 0.:
lon_stop += 360.
grib_keys['longitudeOfFirstGridPointInDegrees'] = lon_start
grib_keys['longitudeOfLastGridPointInDegrees'] = lon_stop
grib_keys['Ni'] = lon_num
grib_keys['iDirectionIncrementInDegrees'] = lon_step
grib_keys['iScansNegatively'] = lon_scan_negatively
lat_start, lat_stop, lat_num = regular_ll_params(lat, min_value=-90., max_value=90.)
grib_keys['latitudeOfFirstGridPointInDegrees'] = lat_start
grib_keys['latitudeOfLastGridPointInDegrees'] = lat_stop
grib_keys['Nj'] = lat_num
grib_keys['jDirectionIncrementInDegrees'] = abs(lat_stop - lat_start) / (lat_num - 1.)
grib_keys['jScansPositively'] = lat_stop > lat_start
grib_keys['gridType'] = 'regular_ll'
return grib_keys
def detect_grib_keys(data_var, default_grib_keys, grib_keys={}):
# type: (xr.DataArray, T.Dict[str, T.Any], T.Dict[str, T.Any]) -> T.Tuple[dict, dict]
detected_grib_keys = {}
suggested_grib_keys = default_grib_keys.copy()
for key, value in data_var.attrs.items():
if key[:5] == 'GRIB_':
suggested_grib_keys[key[5:]] = value
if 'latitude' in data_var.dims and 'longitude' in data_var.dims:
try:
regular_ll_keys = detect_regular_ll_grib_keys(data_var.longitude, data_var.latitude)
detected_grib_keys.update(regular_ll_keys)
except:
pass
for tol in ALL_TYPE_OF_LEVELS:
if tol in data_var.dims or tol in data_var.coords:
detected_grib_keys['typeOfLevel'] = tol
if 'number' in data_var.dims or 'number' in data_var.coords and grib_keys.get('edition') != 1:
# cannot set 'number' key without setting a productDefinitionTemplateNumber in GRIB2
detected_grib_keys['productDefinitionTemplateNumber'] = 1
if 'values' in data_var.dims:
detected_grib_keys['numberOfPoints'] = data_var.shape[data_var.dims.index('values')]
return detected_grib_keys, suggested_grib_keys
def detect_sample_name(grib_keys, sample_name_template='{geography}_{vertical}_grib{edition}'):
# type: (T.Mapping, str) -> str
edition = grib_keys.get('edition', 2)
if grib_keys['gridType'] in GRID_TYPES:
geography = grib_keys['gridType']
else:
LOGGER.info("unknown 'gridType': %r. Using GRIB2 template", grib_keys['gridType'])
return 'GRIB2'
if grib_keys['typeOfLevel'] in TYPE_OF_LEVELS_PL:
vertical = 'pl'
elif grib_keys['typeOfLevel'] in TYPE_OF_LEVELS_SFC:
vertical = 'sfc'
elif grib_keys['typeOfLevel'] in TYPE_OF_LEVELS_ML:
vertical = 'ml'
else:
LOGGER.info("unknown 'typeOfLevel': %r. Using GRIB2 template", grib_keys['typeOfLevel'])
return 'GRIB2'
sample_name = sample_name_template.format(**locals())
return sample_name
def merge_grib_keys(grib_keys, detected_grib_keys, default_grib_keys):
merged_grib_keys = {k: v for k, v in grib_keys.items()}
dataset.dict_merge(merged_grib_keys, detected_grib_keys)
for key, value in default_grib_keys.items():
if key not in merged_grib_keys:
merged_grib_keys[key] = value
return merged_grib_keys
def expand_dims(data_var):
coords_names = []
for coord_name in dataset.ALL_HEADER_DIMS + ALL_TYPE_OF_LEVELS:
if coord_name in set(data_var.coords):
coords_names.append(coord_name)
if coord_name not in data_var.dims:
data_var = data_var.expand_dims(coord_name)
return coords_names, data_var
def make_template_message(merged_grib_keys, template_path=None, sample_name=None):
# type: (T.Dict[str, T.Any], str, str) -> cfgrib.CfMessage
if template_path and sample_name:
raise ValueError("template_path and sample_name should not be both set")
if template_path:
with open(template_path) as file:
template_message = cfgrib.CfMessage.from_file(file)
else:
if sample_name is None:
sample_name = detect_sample_name(merged_grib_keys)
template_message = cfgrib.CfMessage.from_sample_name(sample_name)
for key in MESSAGE_DEFINITION_KEYS:
if key in list(merged_grib_keys):
template_message[key] = merged_grib_keys[key]
merged_grib_keys.pop(key)
for key, value in merged_grib_keys.items():
try:
template_message[key] = value
except KeyError:
LOGGER.exception("skipping key due to errors: %r" % key)
return template_message
def canonical_dataarray_to_grib(
data_var, file, grib_keys={}, default_grib_keys=DEFAULT_GRIB_KEYS, **kwargs
):
# type: (T.IO[bytes], xr.DataArray, T.Dict[str, T.Any], T.Dict[str, T.Any], T.Any) -> None
"""
Write a ``xr.DataArray`` in *canonical* form to a GRIB file.
"""
# validate Dataset keys, DataArray names, and attr keys/values
detected_keys, suggested_keys = detect_grib_keys(data_var, default_grib_keys, grib_keys)
merged_grib_keys = merge_grib_keys(grib_keys, detected_keys, suggested_keys)
if 'gridType' not in merged_grib_keys:
raise ValueError("required grib_key 'gridType' not passed nor auto-detected")
template_message = make_template_message(merged_grib_keys, **kwargs)
coords_names, data_var = expand_dims(data_var)
header_coords_values = [data_var.coords[name].values.tolist() for name in coords_names]
for items in itertools.product(*header_coords_values):
select = {n: v for n, v in zip(coords_names, items)}
field_values = data_var.sel(**select).values.flat[:]
# Missing values handling
invalid_field_values = np.logical_not(np.isfinite(field_values))
# There's no need to save a message full of missing values
if invalid_field_values.all():
continue
missing_value = merged_grib_keys.get('missingValue', 9999)
field_values[invalid_field_values] = missing_value
message = cfgrib.CfMessage.from_message(template_message)
for coord_name, coord_value in zip(coords_names, items):
if coord_name in ALL_TYPE_OF_LEVELS:
coord_name = 'level'
message[coord_name] = coord_value
# OPTIMIZE: convert to list because Message.message_set doesn't support np.ndarray
message['values'] = field_values.tolist()
message.write(file)
def to_grib(*args, **kwargs):
return canonical_dataset_to_grib(*args, **kwargs)
|
ecmwf/cfgrib
|
cfgrib/xarray_store.py
|
open_dataset
|
python
|
def open_dataset(path, **kwargs):
# type: (str, T.Any) -> xr.Dataset
if 'engine' in kwargs and kwargs['engine'] != 'cfgrib':
raise ValueError("only engine=='cfgrib' is supported")
kwargs['engine'] = 'cfgrib'
return xr.backends.api.open_dataset(path, **kwargs)
|
Return a ``xr.Dataset`` with the requested ``backend_kwargs`` from a GRIB file.
|
train
|
https://github.com/ecmwf/cfgrib/blob/d6d533f49c1eebf78f2f16ed0671c666de08c666/cfgrib/xarray_store.py#L31-L39
| null |
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
import logging
import typing as T # noqa
import warnings
import xarray as xr
from . import DatasetBuildError
LOGGER = logging.getLogger(__name__)
def open_datasets(path, backend_kwargs={}, no_warn=False, **kwargs):
# type: (str, T.Dict[str, T.Any], bool, T.Any) -> T.List[xr.Dataset]
"""
Open a GRIB file groupping incompatible hypercubes to different datasets via simple heuristics.
"""
if not no_warn:
warnings.warn("open_datasets is an experimental API, DO NOT RELY ON IT!", FutureWarning)
fbks = []
datasets = []
try:
datasets.append(open_dataset(path, backend_kwargs=backend_kwargs, **kwargs))
except DatasetBuildError as ex:
fbks.extend(ex.args[2])
# NOTE: the recursive call needs to stay out of the exception handler to avoid showing
# to the user a confusing error message due to exception chaining
for fbk in fbks:
bks = backend_kwargs.copy()
bks['filter_by_keys'] = fbk
datasets.extend(open_datasets(path, backend_kwargs=bks, no_warn=True, **kwargs))
return datasets
|
ecmwf/cfgrib
|
cfgrib/xarray_store.py
|
open_datasets
|
python
|
def open_datasets(path, backend_kwargs={}, no_warn=False, **kwargs):
# type: (str, T.Dict[str, T.Any], bool, T.Any) -> T.List[xr.Dataset]
if not no_warn:
warnings.warn("open_datasets is an experimental API, DO NOT RELY ON IT!", FutureWarning)
fbks = []
datasets = []
try:
datasets.append(open_dataset(path, backend_kwargs=backend_kwargs, **kwargs))
except DatasetBuildError as ex:
fbks.extend(ex.args[2])
# NOTE: the recursive call needs to stay out of the exception handler to avoid showing
# to the user a confusing error message due to exception chaining
for fbk in fbks:
bks = backend_kwargs.copy()
bks['filter_by_keys'] = fbk
datasets.extend(open_datasets(path, backend_kwargs=bks, no_warn=True, **kwargs))
return datasets
|
Open a GRIB file groupping incompatible hypercubes to different datasets via simple heuristics.
|
train
|
https://github.com/ecmwf/cfgrib/blob/d6d533f49c1eebf78f2f16ed0671c666de08c666/cfgrib/xarray_store.py#L42-L62
|
[
"def open_dataset(path, **kwargs):\n # type: (str, T.Any) -> xr.Dataset\n \"\"\"\n Return a ``xr.Dataset`` with the requested ``backend_kwargs`` from a GRIB file.\n \"\"\"\n if 'engine' in kwargs and kwargs['engine'] != 'cfgrib':\n raise ValueError(\"only engine=='cfgrib' is supported\")\n kwargs['engine'] = 'cfgrib'\n return xr.backends.api.open_dataset(path, **kwargs)\n",
"def open_datasets(path, backend_kwargs={}, no_warn=False, **kwargs):\n # type: (str, T.Dict[str, T.Any], bool, T.Any) -> T.List[xr.Dataset]\n \"\"\"\n Open a GRIB file groupping incompatible hypercubes to different datasets via simple heuristics.\n \"\"\"\n if not no_warn:\n warnings.warn(\"open_datasets is an experimental API, DO NOT RELY ON IT!\", FutureWarning)\n\n fbks = []\n datasets = []\n try:\n datasets.append(open_dataset(path, backend_kwargs=backend_kwargs, **kwargs))\n except DatasetBuildError as ex:\n fbks.extend(ex.args[2])\n # NOTE: the recursive call needs to stay out of the exception handler to avoid showing\n # to the user a confusing error message due to exception chaining\n for fbk in fbks:\n bks = backend_kwargs.copy()\n bks['filter_by_keys'] = fbk\n datasets.extend(open_datasets(path, backend_kwargs=bks, no_warn=True, **kwargs))\n return datasets\n"
] |
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
import logging
import typing as T # noqa
import warnings
import xarray as xr
from . import DatasetBuildError
LOGGER = logging.getLogger(__name__)
def open_dataset(path, **kwargs):
# type: (str, T.Any) -> xr.Dataset
"""
Return a ``xr.Dataset`` with the requested ``backend_kwargs`` from a GRIB file.
"""
if 'engine' in kwargs and kwargs['engine'] != 'cfgrib':
raise ValueError("only engine=='cfgrib' is supported")
kwargs['engine'] = 'cfgrib'
return xr.backends.api.open_dataset(path, **kwargs)
|
ecmwf/cfgrib
|
cfgrib/bindings.py
|
codes_get_size
|
python
|
def codes_get_size(handle, key):
# type: (cffi.FFI.CData, str) -> int
size = ffi.new('size_t *')
_codes_get_size(handle, key.encode(ENC), size)
return size[0]
|
Get the number of coded value from a key.
If several keys of the same name are present, the total sum is returned.
:param bytes key: the keyword to get the size of
:rtype: int
|
train
|
https://github.com/ecmwf/cfgrib/blob/d6d533f49c1eebf78f2f16ed0671c666de08c666/cfgrib/bindings.py#L212-L224
| null |
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
import functools
import logging
import pkgutil
import typing as T # noqa
import cffi
LOG = logging.getLogger(__name__)
ffi = cffi.FFI()
ffi.cdef(
pkgutil.get_data(__name__, 'grib_api.h').decode('utf-8') +
pkgutil.get_data(__name__, 'eccodes.h').decode('utf-8')
)
class RaiseOnAttributeAccess(object):
def __init__(self, exc, message):
self.message = message
self.exc = exc
def __getattr__(self, attr):
raise RuntimeError(self.message) from self.exc
for libname in ['eccodes', 'libeccodes.so', 'libeccodes']:
try:
lib = ffi.dlopen(libname)
LOG.info("ecCodes library found using name '%s'.", libname)
break
except OSError as exc:
# lazy exception
lib = RaiseOnAttributeAccess(exc, 'ecCodes library not found on the system.')
LOG.info("ecCodes library not found using name '%s'.", libname)
# default encoding for ecCodes strings
ENC = 'ascii'
#
# from gribapi.py
#
CODES_PRODUCT_ANY = 0
""" Generic product kind """
CODES_PRODUCT_GRIB = 1
""" GRIB product kind """
CODES_PRODUCT_BUFR = 2
""" BUFR product kind """
CODES_PRODUCT_METAR = 3
""" METAR product kind """
CODES_PRODUCT_GTS = 4
""" GTS product kind """
CODES_PRODUCT_TAF = 5
""" TAF product kind """
# Constants for 'missing'
GRIB_MISSING_DOUBLE = -1e+100
GRIB_MISSING_LONG = 2147483647
CODES_MISSING_DOUBLE = GRIB_MISSING_DOUBLE
CODES_MISSING_LONG = GRIB_MISSING_LONG
#
# Helper values to discriminate key types
#
CODES_TYPE_UNDEFINED = lib.GRIB_TYPE_UNDEFINED
CODES_TYPE_LONG = lib.GRIB_TYPE_LONG
CODES_TYPE_DOUBLE = lib.GRIB_TYPE_DOUBLE
CODES_TYPE_STRING = lib.GRIB_TYPE_STRING
CODES_TYPE_BYTES = lib.GRIB_TYPE_BYTES
CODES_TYPE_SECTION = lib.GRIB_TYPE_SECTION
CODES_TYPE_LABEL = lib.GRIB_TYPE_LABEL
CODES_TYPE_MISSING = lib.GRIB_TYPE_MISSING
KEYTYPES = {
1: int,
2: float,
3: str,
}
CODES_KEYS_ITERATOR_ALL_KEYS = 0
CODES_KEYS_ITERATOR_SKIP_READ_ONLY = (1 << 0)
CODES_KEYS_ITERATOR_SKIP_OPTIONAL = (1 << 1)
CODES_KEYS_ITERATOR_SKIP_EDITION_SPECIFIC = (1 << 2)
CODES_KEYS_ITERATOR_SKIP_CODED = (1 << 3)
CODES_KEYS_ITERATOR_SKIP_COMPUTED = (1 << 4)
CODES_KEYS_ITERATOR_SKIP_DUPLICATES = (1 << 5)
CODES_KEYS_ITERATOR_SKIP_FUNCTION = (1 << 6)
CODES_KEYS_ITERATOR_DUMP_ONLY = (1 << 7)
#
# Helper functions for error reporting
#
def grib_get_error_message(code):
# type: (int) -> str
message = lib.grib_get_error_message(code)
return ffi.string(message).decode(ENC)
class GribInternalError(Exception):
def __init__(self, code, message=None, *args):
self.code = code
self.eccode_message = grib_get_error_message(code)
if message is None:
message = '%s (%s).' % (self.eccode_message, code)
super(GribInternalError, self).__init__(message, code, *args)
class KeyValueNotFoundError(GribInternalError):
"""Key/value not found."""
class ReadOnlyError(GribInternalError):
"""Value is read only."""
class FileNotFoundError(GribInternalError):
"""File not found."""
ERROR_MAP = {
-18: ReadOnlyError,
-10: KeyValueNotFoundError,
-7: FileNotFoundError,
}
def check_last(func):
@functools.wraps(func)
def wrapper(*args):
code = ffi.new('int *')
args += (code,)
retval = func(*args)
if code[0] != lib.GRIB_SUCCESS:
if code[0] in ERROR_MAP:
raise ERROR_MAP[code[0]](code[0])
else:
raise GribInternalError(code[0])
return retval
return wrapper
def check_return(func):
@functools.wraps(func)
def wrapper(*args):
code = func(*args)
if code != lib.GRIB_SUCCESS:
if code in ERROR_MAP:
raise ERROR_MAP[code](code)
else:
raise GribInternalError(code)
return wrapper
#
# CFFI reimplementation of gribapi.py functions with codes names
#
def codes_grib_new_from_file(fileobj, product_kind=CODES_PRODUCT_GRIB, context=None):
if context is None:
context = ffi.NULL
try:
retval = check_last(lib.codes_handle_new_from_file)(context, fileobj, product_kind)
if retval == ffi.NULL:
raise EOFError("End of file: %r" % fileobj)
else:
return retval
except GribInternalError as ex:
if ex.code == lib.GRIB_END_OF_FILE:
raise EOFError("End of file: %r" % fileobj)
raise
def codes_clone(handle):
# type: (cffi.FFI.CData) -> cffi.FFI.CData
cloned_handle = lib.codes_handle_clone(handle)
if cloned_handle is ffi.NULL:
raise GribInternalError(lib.GRIB_NULL_POINTER)
return cloned_handle
codes_release = lib.codes_handle_delete
_codes_get_size = check_return(lib.codes_get_size)
_codes_get_length = check_return(lib.codes_get_length)
def codes_get_string_length(handle, key):
# type: (cffi.FFI.CData, str) -> int
"""
Get the length of the string representation of the key.
If several keys of the same name are present, the maximum length is returned.
:param bytes key: the keyword to get the string representation size of.
:rtype: int
"""
size = ffi.new('size_t *')
_codes_get_length(handle, key.encode(ENC), size)
return size[0]
_codes_get_bytes = check_return(lib.codes_get_bytes)
def codes_get_bytes_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
"""
Get unsigned chars array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
values = ffi.new('unsigned char[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_bytes(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_long_array = check_return(lib.codes_get_long_array)
def codes_get_long_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
"""
Get long array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
values = ffi.new('long[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_long_array(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_double_array = check_return(lib.codes_get_double_array)
def codes_get_double_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[float]
"""
Get double array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List(float)
"""
values = ffi.new('double[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_double_array(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_string_array = check_return(lib.codes_get_string_array)
def codes_get_string_array(handle, key, size, length=None):
# type: (cffi.FFI.CData, bytes, int, int) -> T.List[bytes]
"""
Get string array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List[bytes]
"""
if length is None:
length = codes_get_string_length(handle, key)
values_keepalive = [ffi.new('char[]', length) for _ in range(size)]
values = ffi.new('char*[]', values_keepalive)
size_p = ffi.new('size_t *', size)
_codes_get_string_array(handle, key.encode(ENC), values, size_p)
return [ffi.string(values[i]).decode(ENC) for i in range(size_p[0])]
def codes_get_long(handle, key):
# type: (cffi.FFI.CData, str) -> int
value = ffi.new('long *')
_codes_get_long = check_return(lib.codes_get_long)
_codes_get_long(handle, key.encode(ENC), value)
return value[0]
def codes_get_double(handle, key):
# type: (cffi.FFI.CData, str) -> int
value = ffi.new('double *')
_codes_get_long = check_return(lib.codes_get_double)
_codes_get_long(handle, key.encode(ENC), value)
return value[0]
def codes_get_string(handle, key, length=None):
# type: (cffi.FFI.CData, str, int) -> str
"""
Get string element from a key.
It may or may not fail in case there are more than one key in a message.
Outputs the last element.
:param bytes key: the keyword to select the value of
:param bool strict: flag to select if the method should fail in case of
more than one key in single message
:rtype: bytes
"""
if length is None:
length = codes_get_string_length(handle, key)
values = ffi.new('char[]', length)
length_p = ffi.new('size_t *', length)
_codes_get_string = check_return(lib.codes_get_string)
_codes_get_string(handle, key.encode(ENC), values, length_p)
return ffi.string(values, length_p[0]).decode(ENC)
_codes_get_native_type = check_return(lib.codes_get_native_type)
def codes_get_native_type(handle, key):
# type: (cffi.FFI.CData, str) -> int
grib_type = ffi.new('int *')
_codes_get_native_type(handle, key.encode(ENC), grib_type)
return KEYTYPES.get(grib_type[0], grib_type[0])
def codes_get_array(handle, key, key_type=None, size=None, length=None, log=LOG):
# type: (cffi.FFI.CData, str, int, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if size is None:
size = codes_get_size(handle, key)
if key_type == int:
return codes_get_long_array(handle, key, size)
elif key_type == float:
return codes_get_double_array(handle, key, size)
elif key_type == str:
return codes_get_string_array(handle, key, size, length=length)
elif key_type == CODES_TYPE_BYTES:
return codes_get_bytes_array(handle, key, size)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_get(handle, key, key_type=None, length=None, log=LOG):
# type: (cffi.FFI.CData, str, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if key_type == int:
return codes_get_long(handle, key)
elif key_type == float:
return codes_get_double(handle, key)
elif key_type == str:
return codes_get_string(handle, key, length=length)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_keys_iterator_new(handle, flags=CODES_KEYS_ITERATOR_ALL_KEYS, namespace=None):
# type: (cffi.FFI.CData, int, str) -> cffi.FFI.CData
if namespace is None:
bnamespace = ffi.NULL
else:
bnamespace = namespace.encode(ENC)
codes_keys_iterator_new = lib.codes_keys_iterator_new
return codes_keys_iterator_new(handle, flags, bnamespace)
def codes_keys_iterator_next(iterator_id):
return lib.codes_keys_iterator_next(iterator_id)
def codes_keys_iterator_get_name(iterator):
ret = lib.codes_keys_iterator_get_name(iterator)
return ffi.string(ret).decode(ENC)
def codes_keys_iterator_delete(iterator_id):
codes_keys_iterator_delete = check_return(lib.codes_keys_iterator_delete)
codes_keys_iterator_delete(iterator_id)
def codes_get_api_version():
"""
Get the API version.
Returns the version of the API as a string in the format "major.minor.revision".
"""
ver = lib.codes_get_api_version()
patch = ver % 100
ver = ver // 100
minor = ver % 100
major = ver // 100
return "%d.%d.%d" % (major, minor, patch)
def portable_handle_new_from_samples(samplename, product_kind):
#
# re-implement codes_grib_handle_new_from_samples in a portable way.
# imports are here not to pollute the head of the file with (hopfully!) temporary stuff
#
import os.path
import platform
handle = ffi.NULL
if platform.platform().startswith('Windows'):
samples_folder = ffi.string(lib.codes_samples_path(ffi.NULL))
sample_path = os.path.join(samples_folder, samplename + b'.tmpl')
try:
with open(sample_path) as file:
handle = codes_grib_new_from_file(file, product_kind)
except Exception:
pass
return handle
def codes_new_from_samples(samplename, product_kind=CODES_PRODUCT_GRIB):
# type: (str, int) -> cffi.FFI.CData
# work around an ecCodes bug on Windows, hopefully this will go away soon
handle = portable_handle_new_from_samples(samplename, product_kind)
if handle != ffi.NULL:
return handle
# end of work-around
if product_kind == CODES_PRODUCT_GRIB:
handle = lib.codes_grib_handle_new_from_samples(ffi.NULL, samplename.encode(ENC))
elif product_kind == CODES_PRODUCT_BUFR:
handle = lib.codes_bufr_handle_new_from_samples(ffi.NULL, samplename.encode(ENC))
else:
raise NotImplementedError("product kind not supported: %r" % product_kind)
if handle == ffi.NULL:
raise ValueError("sample not found: %r" % samplename)
return handle
def codes_set_long(handle, key, value):
# type: (cffi.FFI.CData, str, int) -> None
codes_set_long = check_return(lib.codes_set_long)
codes_set_long(handle, key.encode(ENC), value)
def codes_set_double(handle, key, value):
# type: (cffi.FFI.CData, str, float) -> None
codes_set_double = check_return(lib.codes_set_double)
codes_set_double(handle, key.encode(ENC), value)
def codes_set_string(handle, key, value):
# type: (cffi.FFI.CData, str, str) -> None
size = ffi.new('size_t *', len(value))
codes_set_string = check_return(lib.codes_set_string)
codes_set_string(handle, key.encode(ENC), value.encode(ENC), size)
def codes_set(handle, key, value):
""""""
if isinstance(value, int):
codes_set_long(handle, key, value)
elif isinstance(value, float):
codes_set_double(handle, key, value)
elif isinstance(value, str):
codes_set_string(handle, key, value)
else:
raise TypeError("Unsupported type %r" % type(value))
def codes_set_double_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[float]) -> None
size = len(values)
c_values = ffi.new("double []", values)
codes_set_double_array = check_return(lib.codes_set_double_array)
codes_set_double_array(handle, key.encode(ENC), c_values, size)
def codes_set_long_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[int]) -> None
size = len(values)
c_values = ffi.new("long []", values)
codes_set_long_array = check_return(lib.codes_set_long_array)
codes_set_long_array(handle, key.encode(ENC), c_values, size)
def codes_set_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[T.Any]) -> None
if len(values) > 0:
if isinstance(values[0], float):
codes_set_double_array(handle, key, values)
elif isinstance(values[0], int):
codes_set_long_array(handle, key, values)
else:
raise TypeError("Unsupported value type: %r" % type(values[0]))
else:
raise ValueError("Cannot set an empty list.")
def codes_grib_multi_support_on(context=None):
if context is None:
context = ffi.NULL
lib.codes_grib_multi_support_on(context)
def codes_grib_multi_support_off(context=None):
if context is None:
context = ffi.NULL
lib.codes_grib_multi_support_off(context)
def codes_write(handle, outfile):
# type: (cffi.FFI.CData, T.BinaryIO) -> None
"""
Write a coded message to a file. If the file does not exist, it is created.
:param str path: (optional) the path to the GRIB file;
defaults to the one of the open index.
"""
mess = ffi.new('const void **')
mess_len = ffi.new('size_t*')
codes_get_message = check_return(lib.codes_get_message)
codes_get_message(handle, mess, mess_len)
message = ffi.buffer(mess[0], size=mess_len[0])
outfile.write(message)
|
ecmwf/cfgrib
|
cfgrib/bindings.py
|
codes_get_string_length
|
python
|
def codes_get_string_length(handle, key):
# type: (cffi.FFI.CData, str) -> int
size = ffi.new('size_t *')
_codes_get_length(handle, key.encode(ENC), size)
return size[0]
|
Get the length of the string representation of the key.
If several keys of the same name are present, the maximum length is returned.
:param bytes key: the keyword to get the string representation size of.
:rtype: int
|
train
|
https://github.com/ecmwf/cfgrib/blob/d6d533f49c1eebf78f2f16ed0671c666de08c666/cfgrib/bindings.py#L230-L242
| null |
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
import functools
import logging
import pkgutil
import typing as T # noqa
import cffi
LOG = logging.getLogger(__name__)
ffi = cffi.FFI()
ffi.cdef(
pkgutil.get_data(__name__, 'grib_api.h').decode('utf-8') +
pkgutil.get_data(__name__, 'eccodes.h').decode('utf-8')
)
class RaiseOnAttributeAccess(object):
def __init__(self, exc, message):
self.message = message
self.exc = exc
def __getattr__(self, attr):
raise RuntimeError(self.message) from self.exc
for libname in ['eccodes', 'libeccodes.so', 'libeccodes']:
try:
lib = ffi.dlopen(libname)
LOG.info("ecCodes library found using name '%s'.", libname)
break
except OSError as exc:
# lazy exception
lib = RaiseOnAttributeAccess(exc, 'ecCodes library not found on the system.')
LOG.info("ecCodes library not found using name '%s'.", libname)
# default encoding for ecCodes strings
ENC = 'ascii'
#
# from gribapi.py
#
CODES_PRODUCT_ANY = 0
""" Generic product kind """
CODES_PRODUCT_GRIB = 1
""" GRIB product kind """
CODES_PRODUCT_BUFR = 2
""" BUFR product kind """
CODES_PRODUCT_METAR = 3
""" METAR product kind """
CODES_PRODUCT_GTS = 4
""" GTS product kind """
CODES_PRODUCT_TAF = 5
""" TAF product kind """
# Constants for 'missing'
GRIB_MISSING_DOUBLE = -1e+100
GRIB_MISSING_LONG = 2147483647
CODES_MISSING_DOUBLE = GRIB_MISSING_DOUBLE
CODES_MISSING_LONG = GRIB_MISSING_LONG
#
# Helper values to discriminate key types
#
CODES_TYPE_UNDEFINED = lib.GRIB_TYPE_UNDEFINED
CODES_TYPE_LONG = lib.GRIB_TYPE_LONG
CODES_TYPE_DOUBLE = lib.GRIB_TYPE_DOUBLE
CODES_TYPE_STRING = lib.GRIB_TYPE_STRING
CODES_TYPE_BYTES = lib.GRIB_TYPE_BYTES
CODES_TYPE_SECTION = lib.GRIB_TYPE_SECTION
CODES_TYPE_LABEL = lib.GRIB_TYPE_LABEL
CODES_TYPE_MISSING = lib.GRIB_TYPE_MISSING
KEYTYPES = {
1: int,
2: float,
3: str,
}
CODES_KEYS_ITERATOR_ALL_KEYS = 0
CODES_KEYS_ITERATOR_SKIP_READ_ONLY = (1 << 0)
CODES_KEYS_ITERATOR_SKIP_OPTIONAL = (1 << 1)
CODES_KEYS_ITERATOR_SKIP_EDITION_SPECIFIC = (1 << 2)
CODES_KEYS_ITERATOR_SKIP_CODED = (1 << 3)
CODES_KEYS_ITERATOR_SKIP_COMPUTED = (1 << 4)
CODES_KEYS_ITERATOR_SKIP_DUPLICATES = (1 << 5)
CODES_KEYS_ITERATOR_SKIP_FUNCTION = (1 << 6)
CODES_KEYS_ITERATOR_DUMP_ONLY = (1 << 7)
#
# Helper functions for error reporting
#
def grib_get_error_message(code):
# type: (int) -> str
message = lib.grib_get_error_message(code)
return ffi.string(message).decode(ENC)
class GribInternalError(Exception):
def __init__(self, code, message=None, *args):
self.code = code
self.eccode_message = grib_get_error_message(code)
if message is None:
message = '%s (%s).' % (self.eccode_message, code)
super(GribInternalError, self).__init__(message, code, *args)
class KeyValueNotFoundError(GribInternalError):
"""Key/value not found."""
class ReadOnlyError(GribInternalError):
"""Value is read only."""
class FileNotFoundError(GribInternalError):
"""File not found."""
ERROR_MAP = {
-18: ReadOnlyError,
-10: KeyValueNotFoundError,
-7: FileNotFoundError,
}
def check_last(func):
@functools.wraps(func)
def wrapper(*args):
code = ffi.new('int *')
args += (code,)
retval = func(*args)
if code[0] != lib.GRIB_SUCCESS:
if code[0] in ERROR_MAP:
raise ERROR_MAP[code[0]](code[0])
else:
raise GribInternalError(code[0])
return retval
return wrapper
def check_return(func):
@functools.wraps(func)
def wrapper(*args):
code = func(*args)
if code != lib.GRIB_SUCCESS:
if code in ERROR_MAP:
raise ERROR_MAP[code](code)
else:
raise GribInternalError(code)
return wrapper
#
# CFFI reimplementation of gribapi.py functions with codes names
#
def codes_grib_new_from_file(fileobj, product_kind=CODES_PRODUCT_GRIB, context=None):
if context is None:
context = ffi.NULL
try:
retval = check_last(lib.codes_handle_new_from_file)(context, fileobj, product_kind)
if retval == ffi.NULL:
raise EOFError("End of file: %r" % fileobj)
else:
return retval
except GribInternalError as ex:
if ex.code == lib.GRIB_END_OF_FILE:
raise EOFError("End of file: %r" % fileobj)
raise
def codes_clone(handle):
# type: (cffi.FFI.CData) -> cffi.FFI.CData
cloned_handle = lib.codes_handle_clone(handle)
if cloned_handle is ffi.NULL:
raise GribInternalError(lib.GRIB_NULL_POINTER)
return cloned_handle
codes_release = lib.codes_handle_delete
_codes_get_size = check_return(lib.codes_get_size)
def codes_get_size(handle, key):
# type: (cffi.FFI.CData, str) -> int
"""
Get the number of coded value from a key.
If several keys of the same name are present, the total sum is returned.
:param bytes key: the keyword to get the size of
:rtype: int
"""
size = ffi.new('size_t *')
_codes_get_size(handle, key.encode(ENC), size)
return size[0]
_codes_get_length = check_return(lib.codes_get_length)
_codes_get_bytes = check_return(lib.codes_get_bytes)
def codes_get_bytes_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
"""
Get unsigned chars array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
values = ffi.new('unsigned char[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_bytes(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_long_array = check_return(lib.codes_get_long_array)
def codes_get_long_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
"""
Get long array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
values = ffi.new('long[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_long_array(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_double_array = check_return(lib.codes_get_double_array)
def codes_get_double_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[float]
"""
Get double array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List(float)
"""
values = ffi.new('double[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_double_array(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_string_array = check_return(lib.codes_get_string_array)
def codes_get_string_array(handle, key, size, length=None):
# type: (cffi.FFI.CData, bytes, int, int) -> T.List[bytes]
"""
Get string array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List[bytes]
"""
if length is None:
length = codes_get_string_length(handle, key)
values_keepalive = [ffi.new('char[]', length) for _ in range(size)]
values = ffi.new('char*[]', values_keepalive)
size_p = ffi.new('size_t *', size)
_codes_get_string_array(handle, key.encode(ENC), values, size_p)
return [ffi.string(values[i]).decode(ENC) for i in range(size_p[0])]
def codes_get_long(handle, key):
# type: (cffi.FFI.CData, str) -> int
value = ffi.new('long *')
_codes_get_long = check_return(lib.codes_get_long)
_codes_get_long(handle, key.encode(ENC), value)
return value[0]
def codes_get_double(handle, key):
# type: (cffi.FFI.CData, str) -> int
value = ffi.new('double *')
_codes_get_long = check_return(lib.codes_get_double)
_codes_get_long(handle, key.encode(ENC), value)
return value[0]
def codes_get_string(handle, key, length=None):
# type: (cffi.FFI.CData, str, int) -> str
"""
Get string element from a key.
It may or may not fail in case there are more than one key in a message.
Outputs the last element.
:param bytes key: the keyword to select the value of
:param bool strict: flag to select if the method should fail in case of
more than one key in single message
:rtype: bytes
"""
if length is None:
length = codes_get_string_length(handle, key)
values = ffi.new('char[]', length)
length_p = ffi.new('size_t *', length)
_codes_get_string = check_return(lib.codes_get_string)
_codes_get_string(handle, key.encode(ENC), values, length_p)
return ffi.string(values, length_p[0]).decode(ENC)
_codes_get_native_type = check_return(lib.codes_get_native_type)
def codes_get_native_type(handle, key):
# type: (cffi.FFI.CData, str) -> int
grib_type = ffi.new('int *')
_codes_get_native_type(handle, key.encode(ENC), grib_type)
return KEYTYPES.get(grib_type[0], grib_type[0])
def codes_get_array(handle, key, key_type=None, size=None, length=None, log=LOG):
# type: (cffi.FFI.CData, str, int, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if size is None:
size = codes_get_size(handle, key)
if key_type == int:
return codes_get_long_array(handle, key, size)
elif key_type == float:
return codes_get_double_array(handle, key, size)
elif key_type == str:
return codes_get_string_array(handle, key, size, length=length)
elif key_type == CODES_TYPE_BYTES:
return codes_get_bytes_array(handle, key, size)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_get(handle, key, key_type=None, length=None, log=LOG):
# type: (cffi.FFI.CData, str, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if key_type == int:
return codes_get_long(handle, key)
elif key_type == float:
return codes_get_double(handle, key)
elif key_type == str:
return codes_get_string(handle, key, length=length)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_keys_iterator_new(handle, flags=CODES_KEYS_ITERATOR_ALL_KEYS, namespace=None):
# type: (cffi.FFI.CData, int, str) -> cffi.FFI.CData
if namespace is None:
bnamespace = ffi.NULL
else:
bnamespace = namespace.encode(ENC)
codes_keys_iterator_new = lib.codes_keys_iterator_new
return codes_keys_iterator_new(handle, flags, bnamespace)
def codes_keys_iterator_next(iterator_id):
return lib.codes_keys_iterator_next(iterator_id)
def codes_keys_iterator_get_name(iterator):
ret = lib.codes_keys_iterator_get_name(iterator)
return ffi.string(ret).decode(ENC)
def codes_keys_iterator_delete(iterator_id):
codes_keys_iterator_delete = check_return(lib.codes_keys_iterator_delete)
codes_keys_iterator_delete(iterator_id)
def codes_get_api_version():
"""
Get the API version.
Returns the version of the API as a string in the format "major.minor.revision".
"""
ver = lib.codes_get_api_version()
patch = ver % 100
ver = ver // 100
minor = ver % 100
major = ver // 100
return "%d.%d.%d" % (major, minor, patch)
def portable_handle_new_from_samples(samplename, product_kind):
#
# re-implement codes_grib_handle_new_from_samples in a portable way.
# imports are here not to pollute the head of the file with (hopfully!) temporary stuff
#
import os.path
import platform
handle = ffi.NULL
if platform.platform().startswith('Windows'):
samples_folder = ffi.string(lib.codes_samples_path(ffi.NULL))
sample_path = os.path.join(samples_folder, samplename + b'.tmpl')
try:
with open(sample_path) as file:
handle = codes_grib_new_from_file(file, product_kind)
except Exception:
pass
return handle
def codes_new_from_samples(samplename, product_kind=CODES_PRODUCT_GRIB):
# type: (str, int) -> cffi.FFI.CData
# work around an ecCodes bug on Windows, hopefully this will go away soon
handle = portable_handle_new_from_samples(samplename, product_kind)
if handle != ffi.NULL:
return handle
# end of work-around
if product_kind == CODES_PRODUCT_GRIB:
handle = lib.codes_grib_handle_new_from_samples(ffi.NULL, samplename.encode(ENC))
elif product_kind == CODES_PRODUCT_BUFR:
handle = lib.codes_bufr_handle_new_from_samples(ffi.NULL, samplename.encode(ENC))
else:
raise NotImplementedError("product kind not supported: %r" % product_kind)
if handle == ffi.NULL:
raise ValueError("sample not found: %r" % samplename)
return handle
def codes_set_long(handle, key, value):
# type: (cffi.FFI.CData, str, int) -> None
codes_set_long = check_return(lib.codes_set_long)
codes_set_long(handle, key.encode(ENC), value)
def codes_set_double(handle, key, value):
# type: (cffi.FFI.CData, str, float) -> None
codes_set_double = check_return(lib.codes_set_double)
codes_set_double(handle, key.encode(ENC), value)
def codes_set_string(handle, key, value):
# type: (cffi.FFI.CData, str, str) -> None
size = ffi.new('size_t *', len(value))
codes_set_string = check_return(lib.codes_set_string)
codes_set_string(handle, key.encode(ENC), value.encode(ENC), size)
def codes_set(handle, key, value):
""""""
if isinstance(value, int):
codes_set_long(handle, key, value)
elif isinstance(value, float):
codes_set_double(handle, key, value)
elif isinstance(value, str):
codes_set_string(handle, key, value)
else:
raise TypeError("Unsupported type %r" % type(value))
def codes_set_double_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[float]) -> None
size = len(values)
c_values = ffi.new("double []", values)
codes_set_double_array = check_return(lib.codes_set_double_array)
codes_set_double_array(handle, key.encode(ENC), c_values, size)
def codes_set_long_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[int]) -> None
size = len(values)
c_values = ffi.new("long []", values)
codes_set_long_array = check_return(lib.codes_set_long_array)
codes_set_long_array(handle, key.encode(ENC), c_values, size)
def codes_set_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[T.Any]) -> None
if len(values) > 0:
if isinstance(values[0], float):
codes_set_double_array(handle, key, values)
elif isinstance(values[0], int):
codes_set_long_array(handle, key, values)
else:
raise TypeError("Unsupported value type: %r" % type(values[0]))
else:
raise ValueError("Cannot set an empty list.")
def codes_grib_multi_support_on(context=None):
if context is None:
context = ffi.NULL
lib.codes_grib_multi_support_on(context)
def codes_grib_multi_support_off(context=None):
if context is None:
context = ffi.NULL
lib.codes_grib_multi_support_off(context)
def codes_write(handle, outfile):
# type: (cffi.FFI.CData, T.BinaryIO) -> None
"""
Write a coded message to a file. If the file does not exist, it is created.
:param str path: (optional) the path to the GRIB file;
defaults to the one of the open index.
"""
mess = ffi.new('const void **')
mess_len = ffi.new('size_t*')
codes_get_message = check_return(lib.codes_get_message)
codes_get_message(handle, mess, mess_len)
message = ffi.buffer(mess[0], size=mess_len[0])
outfile.write(message)
|
ecmwf/cfgrib
|
cfgrib/bindings.py
|
codes_get_bytes_array
|
python
|
def codes_get_bytes_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
values = ffi.new('unsigned char[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_bytes(handle, key.encode(ENC), values, size_p)
return list(values)
|
Get unsigned chars array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
|
train
|
https://github.com/ecmwf/cfgrib/blob/d6d533f49c1eebf78f2f16ed0671c666de08c666/cfgrib/bindings.py#L248-L260
| null |
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
import functools
import logging
import pkgutil
import typing as T # noqa
import cffi
LOG = logging.getLogger(__name__)
ffi = cffi.FFI()
ffi.cdef(
pkgutil.get_data(__name__, 'grib_api.h').decode('utf-8') +
pkgutil.get_data(__name__, 'eccodes.h').decode('utf-8')
)
class RaiseOnAttributeAccess(object):
def __init__(self, exc, message):
self.message = message
self.exc = exc
def __getattr__(self, attr):
raise RuntimeError(self.message) from self.exc
for libname in ['eccodes', 'libeccodes.so', 'libeccodes']:
try:
lib = ffi.dlopen(libname)
LOG.info("ecCodes library found using name '%s'.", libname)
break
except OSError as exc:
# lazy exception
lib = RaiseOnAttributeAccess(exc, 'ecCodes library not found on the system.')
LOG.info("ecCodes library not found using name '%s'.", libname)
# default encoding for ecCodes strings
ENC = 'ascii'
#
# from gribapi.py
#
CODES_PRODUCT_ANY = 0
""" Generic product kind """
CODES_PRODUCT_GRIB = 1
""" GRIB product kind """
CODES_PRODUCT_BUFR = 2
""" BUFR product kind """
CODES_PRODUCT_METAR = 3
""" METAR product kind """
CODES_PRODUCT_GTS = 4
""" GTS product kind """
CODES_PRODUCT_TAF = 5
""" TAF product kind """
# Constants for 'missing'
GRIB_MISSING_DOUBLE = -1e+100
GRIB_MISSING_LONG = 2147483647
CODES_MISSING_DOUBLE = GRIB_MISSING_DOUBLE
CODES_MISSING_LONG = GRIB_MISSING_LONG
#
# Helper values to discriminate key types
#
CODES_TYPE_UNDEFINED = lib.GRIB_TYPE_UNDEFINED
CODES_TYPE_LONG = lib.GRIB_TYPE_LONG
CODES_TYPE_DOUBLE = lib.GRIB_TYPE_DOUBLE
CODES_TYPE_STRING = lib.GRIB_TYPE_STRING
CODES_TYPE_BYTES = lib.GRIB_TYPE_BYTES
CODES_TYPE_SECTION = lib.GRIB_TYPE_SECTION
CODES_TYPE_LABEL = lib.GRIB_TYPE_LABEL
CODES_TYPE_MISSING = lib.GRIB_TYPE_MISSING
KEYTYPES = {
1: int,
2: float,
3: str,
}
CODES_KEYS_ITERATOR_ALL_KEYS = 0
CODES_KEYS_ITERATOR_SKIP_READ_ONLY = (1 << 0)
CODES_KEYS_ITERATOR_SKIP_OPTIONAL = (1 << 1)
CODES_KEYS_ITERATOR_SKIP_EDITION_SPECIFIC = (1 << 2)
CODES_KEYS_ITERATOR_SKIP_CODED = (1 << 3)
CODES_KEYS_ITERATOR_SKIP_COMPUTED = (1 << 4)
CODES_KEYS_ITERATOR_SKIP_DUPLICATES = (1 << 5)
CODES_KEYS_ITERATOR_SKIP_FUNCTION = (1 << 6)
CODES_KEYS_ITERATOR_DUMP_ONLY = (1 << 7)
#
# Helper functions for error reporting
#
def grib_get_error_message(code):
# type: (int) -> str
message = lib.grib_get_error_message(code)
return ffi.string(message).decode(ENC)
class GribInternalError(Exception):
def __init__(self, code, message=None, *args):
self.code = code
self.eccode_message = grib_get_error_message(code)
if message is None:
message = '%s (%s).' % (self.eccode_message, code)
super(GribInternalError, self).__init__(message, code, *args)
class KeyValueNotFoundError(GribInternalError):
"""Key/value not found."""
class ReadOnlyError(GribInternalError):
"""Value is read only."""
class FileNotFoundError(GribInternalError):
"""File not found."""
ERROR_MAP = {
-18: ReadOnlyError,
-10: KeyValueNotFoundError,
-7: FileNotFoundError,
}
def check_last(func):
@functools.wraps(func)
def wrapper(*args):
code = ffi.new('int *')
args += (code,)
retval = func(*args)
if code[0] != lib.GRIB_SUCCESS:
if code[0] in ERROR_MAP:
raise ERROR_MAP[code[0]](code[0])
else:
raise GribInternalError(code[0])
return retval
return wrapper
def check_return(func):
@functools.wraps(func)
def wrapper(*args):
code = func(*args)
if code != lib.GRIB_SUCCESS:
if code in ERROR_MAP:
raise ERROR_MAP[code](code)
else:
raise GribInternalError(code)
return wrapper
#
# CFFI reimplementation of gribapi.py functions with codes names
#
def codes_grib_new_from_file(fileobj, product_kind=CODES_PRODUCT_GRIB, context=None):
if context is None:
context = ffi.NULL
try:
retval = check_last(lib.codes_handle_new_from_file)(context, fileobj, product_kind)
if retval == ffi.NULL:
raise EOFError("End of file: %r" % fileobj)
else:
return retval
except GribInternalError as ex:
if ex.code == lib.GRIB_END_OF_FILE:
raise EOFError("End of file: %r" % fileobj)
raise
def codes_clone(handle):
# type: (cffi.FFI.CData) -> cffi.FFI.CData
cloned_handle = lib.codes_handle_clone(handle)
if cloned_handle is ffi.NULL:
raise GribInternalError(lib.GRIB_NULL_POINTER)
return cloned_handle
codes_release = lib.codes_handle_delete
_codes_get_size = check_return(lib.codes_get_size)
def codes_get_size(handle, key):
# type: (cffi.FFI.CData, str) -> int
"""
Get the number of coded value from a key.
If several keys of the same name are present, the total sum is returned.
:param bytes key: the keyword to get the size of
:rtype: int
"""
size = ffi.new('size_t *')
_codes_get_size(handle, key.encode(ENC), size)
return size[0]
_codes_get_length = check_return(lib.codes_get_length)
def codes_get_string_length(handle, key):
# type: (cffi.FFI.CData, str) -> int
"""
Get the length of the string representation of the key.
If several keys of the same name are present, the maximum length is returned.
:param bytes key: the keyword to get the string representation size of.
:rtype: int
"""
size = ffi.new('size_t *')
_codes_get_length(handle, key.encode(ENC), size)
return size[0]
_codes_get_bytes = check_return(lib.codes_get_bytes)
_codes_get_long_array = check_return(lib.codes_get_long_array)
def codes_get_long_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
"""
Get long array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
values = ffi.new('long[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_long_array(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_double_array = check_return(lib.codes_get_double_array)
def codes_get_double_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[float]
"""
Get double array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List(float)
"""
values = ffi.new('double[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_double_array(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_string_array = check_return(lib.codes_get_string_array)
def codes_get_string_array(handle, key, size, length=None):
# type: (cffi.FFI.CData, bytes, int, int) -> T.List[bytes]
"""
Get string array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List[bytes]
"""
if length is None:
length = codes_get_string_length(handle, key)
values_keepalive = [ffi.new('char[]', length) for _ in range(size)]
values = ffi.new('char*[]', values_keepalive)
size_p = ffi.new('size_t *', size)
_codes_get_string_array(handle, key.encode(ENC), values, size_p)
return [ffi.string(values[i]).decode(ENC) for i in range(size_p[0])]
def codes_get_long(handle, key):
# type: (cffi.FFI.CData, str) -> int
value = ffi.new('long *')
_codes_get_long = check_return(lib.codes_get_long)
_codes_get_long(handle, key.encode(ENC), value)
return value[0]
def codes_get_double(handle, key):
# type: (cffi.FFI.CData, str) -> int
value = ffi.new('double *')
_codes_get_long = check_return(lib.codes_get_double)
_codes_get_long(handle, key.encode(ENC), value)
return value[0]
def codes_get_string(handle, key, length=None):
# type: (cffi.FFI.CData, str, int) -> str
"""
Get string element from a key.
It may or may not fail in case there are more than one key in a message.
Outputs the last element.
:param bytes key: the keyword to select the value of
:param bool strict: flag to select if the method should fail in case of
more than one key in single message
:rtype: bytes
"""
if length is None:
length = codes_get_string_length(handle, key)
values = ffi.new('char[]', length)
length_p = ffi.new('size_t *', length)
_codes_get_string = check_return(lib.codes_get_string)
_codes_get_string(handle, key.encode(ENC), values, length_p)
return ffi.string(values, length_p[0]).decode(ENC)
_codes_get_native_type = check_return(lib.codes_get_native_type)
def codes_get_native_type(handle, key):
# type: (cffi.FFI.CData, str) -> int
grib_type = ffi.new('int *')
_codes_get_native_type(handle, key.encode(ENC), grib_type)
return KEYTYPES.get(grib_type[0], grib_type[0])
def codes_get_array(handle, key, key_type=None, size=None, length=None, log=LOG):
# type: (cffi.FFI.CData, str, int, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if size is None:
size = codes_get_size(handle, key)
if key_type == int:
return codes_get_long_array(handle, key, size)
elif key_type == float:
return codes_get_double_array(handle, key, size)
elif key_type == str:
return codes_get_string_array(handle, key, size, length=length)
elif key_type == CODES_TYPE_BYTES:
return codes_get_bytes_array(handle, key, size)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_get(handle, key, key_type=None, length=None, log=LOG):
# type: (cffi.FFI.CData, str, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if key_type == int:
return codes_get_long(handle, key)
elif key_type == float:
return codes_get_double(handle, key)
elif key_type == str:
return codes_get_string(handle, key, length=length)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_keys_iterator_new(handle, flags=CODES_KEYS_ITERATOR_ALL_KEYS, namespace=None):
# type: (cffi.FFI.CData, int, str) -> cffi.FFI.CData
if namespace is None:
bnamespace = ffi.NULL
else:
bnamespace = namespace.encode(ENC)
codes_keys_iterator_new = lib.codes_keys_iterator_new
return codes_keys_iterator_new(handle, flags, bnamespace)
def codes_keys_iterator_next(iterator_id):
return lib.codes_keys_iterator_next(iterator_id)
def codes_keys_iterator_get_name(iterator):
ret = lib.codes_keys_iterator_get_name(iterator)
return ffi.string(ret).decode(ENC)
def codes_keys_iterator_delete(iterator_id):
codes_keys_iterator_delete = check_return(lib.codes_keys_iterator_delete)
codes_keys_iterator_delete(iterator_id)
def codes_get_api_version():
"""
Get the API version.
Returns the version of the API as a string in the format "major.minor.revision".
"""
ver = lib.codes_get_api_version()
patch = ver % 100
ver = ver // 100
minor = ver % 100
major = ver // 100
return "%d.%d.%d" % (major, minor, patch)
def portable_handle_new_from_samples(samplename, product_kind):
#
# re-implement codes_grib_handle_new_from_samples in a portable way.
# imports are here not to pollute the head of the file with (hopfully!) temporary stuff
#
import os.path
import platform
handle = ffi.NULL
if platform.platform().startswith('Windows'):
samples_folder = ffi.string(lib.codes_samples_path(ffi.NULL))
sample_path = os.path.join(samples_folder, samplename + b'.tmpl')
try:
with open(sample_path) as file:
handle = codes_grib_new_from_file(file, product_kind)
except Exception:
pass
return handle
def codes_new_from_samples(samplename, product_kind=CODES_PRODUCT_GRIB):
# type: (str, int) -> cffi.FFI.CData
# work around an ecCodes bug on Windows, hopefully this will go away soon
handle = portable_handle_new_from_samples(samplename, product_kind)
if handle != ffi.NULL:
return handle
# end of work-around
if product_kind == CODES_PRODUCT_GRIB:
handle = lib.codes_grib_handle_new_from_samples(ffi.NULL, samplename.encode(ENC))
elif product_kind == CODES_PRODUCT_BUFR:
handle = lib.codes_bufr_handle_new_from_samples(ffi.NULL, samplename.encode(ENC))
else:
raise NotImplementedError("product kind not supported: %r" % product_kind)
if handle == ffi.NULL:
raise ValueError("sample not found: %r" % samplename)
return handle
def codes_set_long(handle, key, value):
# type: (cffi.FFI.CData, str, int) -> None
codes_set_long = check_return(lib.codes_set_long)
codes_set_long(handle, key.encode(ENC), value)
def codes_set_double(handle, key, value):
# type: (cffi.FFI.CData, str, float) -> None
codes_set_double = check_return(lib.codes_set_double)
codes_set_double(handle, key.encode(ENC), value)
def codes_set_string(handle, key, value):
# type: (cffi.FFI.CData, str, str) -> None
size = ffi.new('size_t *', len(value))
codes_set_string = check_return(lib.codes_set_string)
codes_set_string(handle, key.encode(ENC), value.encode(ENC), size)
def codes_set(handle, key, value):
""""""
if isinstance(value, int):
codes_set_long(handle, key, value)
elif isinstance(value, float):
codes_set_double(handle, key, value)
elif isinstance(value, str):
codes_set_string(handle, key, value)
else:
raise TypeError("Unsupported type %r" % type(value))
def codes_set_double_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[float]) -> None
size = len(values)
c_values = ffi.new("double []", values)
codes_set_double_array = check_return(lib.codes_set_double_array)
codes_set_double_array(handle, key.encode(ENC), c_values, size)
def codes_set_long_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[int]) -> None
size = len(values)
c_values = ffi.new("long []", values)
codes_set_long_array = check_return(lib.codes_set_long_array)
codes_set_long_array(handle, key.encode(ENC), c_values, size)
def codes_set_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[T.Any]) -> None
if len(values) > 0:
if isinstance(values[0], float):
codes_set_double_array(handle, key, values)
elif isinstance(values[0], int):
codes_set_long_array(handle, key, values)
else:
raise TypeError("Unsupported value type: %r" % type(values[0]))
else:
raise ValueError("Cannot set an empty list.")
def codes_grib_multi_support_on(context=None):
if context is None:
context = ffi.NULL
lib.codes_grib_multi_support_on(context)
def codes_grib_multi_support_off(context=None):
if context is None:
context = ffi.NULL
lib.codes_grib_multi_support_off(context)
def codes_write(handle, outfile):
# type: (cffi.FFI.CData, T.BinaryIO) -> None
"""
Write a coded message to a file. If the file does not exist, it is created.
:param str path: (optional) the path to the GRIB file;
defaults to the one of the open index.
"""
mess = ffi.new('const void **')
mess_len = ffi.new('size_t*')
codes_get_message = check_return(lib.codes_get_message)
codes_get_message(handle, mess, mess_len)
message = ffi.buffer(mess[0], size=mess_len[0])
outfile.write(message)
|
ecmwf/cfgrib
|
cfgrib/bindings.py
|
codes_get_long_array
|
python
|
def codes_get_long_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
values = ffi.new('long[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_long_array(handle, key.encode(ENC), values, size_p)
return list(values)
|
Get long array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
|
train
|
https://github.com/ecmwf/cfgrib/blob/d6d533f49c1eebf78f2f16ed0671c666de08c666/cfgrib/bindings.py#L266-L278
| null |
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
import functools
import logging
import pkgutil
import typing as T # noqa
import cffi
LOG = logging.getLogger(__name__)
ffi = cffi.FFI()
ffi.cdef(
pkgutil.get_data(__name__, 'grib_api.h').decode('utf-8') +
pkgutil.get_data(__name__, 'eccodes.h').decode('utf-8')
)
class RaiseOnAttributeAccess(object):
def __init__(self, exc, message):
self.message = message
self.exc = exc
def __getattr__(self, attr):
raise RuntimeError(self.message) from self.exc
for libname in ['eccodes', 'libeccodes.so', 'libeccodes']:
try:
lib = ffi.dlopen(libname)
LOG.info("ecCodes library found using name '%s'.", libname)
break
except OSError as exc:
# lazy exception
lib = RaiseOnAttributeAccess(exc, 'ecCodes library not found on the system.')
LOG.info("ecCodes library not found using name '%s'.", libname)
# default encoding for ecCodes strings
ENC = 'ascii'
#
# from gribapi.py
#
CODES_PRODUCT_ANY = 0
""" Generic product kind """
CODES_PRODUCT_GRIB = 1
""" GRIB product kind """
CODES_PRODUCT_BUFR = 2
""" BUFR product kind """
CODES_PRODUCT_METAR = 3
""" METAR product kind """
CODES_PRODUCT_GTS = 4
""" GTS product kind """
CODES_PRODUCT_TAF = 5
""" TAF product kind """
# Constants for 'missing'
GRIB_MISSING_DOUBLE = -1e+100
GRIB_MISSING_LONG = 2147483647
CODES_MISSING_DOUBLE = GRIB_MISSING_DOUBLE
CODES_MISSING_LONG = GRIB_MISSING_LONG
#
# Helper values to discriminate key types
#
CODES_TYPE_UNDEFINED = lib.GRIB_TYPE_UNDEFINED
CODES_TYPE_LONG = lib.GRIB_TYPE_LONG
CODES_TYPE_DOUBLE = lib.GRIB_TYPE_DOUBLE
CODES_TYPE_STRING = lib.GRIB_TYPE_STRING
CODES_TYPE_BYTES = lib.GRIB_TYPE_BYTES
CODES_TYPE_SECTION = lib.GRIB_TYPE_SECTION
CODES_TYPE_LABEL = lib.GRIB_TYPE_LABEL
CODES_TYPE_MISSING = lib.GRIB_TYPE_MISSING
KEYTYPES = {
1: int,
2: float,
3: str,
}
CODES_KEYS_ITERATOR_ALL_KEYS = 0
CODES_KEYS_ITERATOR_SKIP_READ_ONLY = (1 << 0)
CODES_KEYS_ITERATOR_SKIP_OPTIONAL = (1 << 1)
CODES_KEYS_ITERATOR_SKIP_EDITION_SPECIFIC = (1 << 2)
CODES_KEYS_ITERATOR_SKIP_CODED = (1 << 3)
CODES_KEYS_ITERATOR_SKIP_COMPUTED = (1 << 4)
CODES_KEYS_ITERATOR_SKIP_DUPLICATES = (1 << 5)
CODES_KEYS_ITERATOR_SKIP_FUNCTION = (1 << 6)
CODES_KEYS_ITERATOR_DUMP_ONLY = (1 << 7)
#
# Helper functions for error reporting
#
def grib_get_error_message(code):
# type: (int) -> str
message = lib.grib_get_error_message(code)
return ffi.string(message).decode(ENC)
class GribInternalError(Exception):
def __init__(self, code, message=None, *args):
self.code = code
self.eccode_message = grib_get_error_message(code)
if message is None:
message = '%s (%s).' % (self.eccode_message, code)
super(GribInternalError, self).__init__(message, code, *args)
class KeyValueNotFoundError(GribInternalError):
"""Key/value not found."""
class ReadOnlyError(GribInternalError):
"""Value is read only."""
class FileNotFoundError(GribInternalError):
"""File not found."""
ERROR_MAP = {
-18: ReadOnlyError,
-10: KeyValueNotFoundError,
-7: FileNotFoundError,
}
def check_last(func):
@functools.wraps(func)
def wrapper(*args):
code = ffi.new('int *')
args += (code,)
retval = func(*args)
if code[0] != lib.GRIB_SUCCESS:
if code[0] in ERROR_MAP:
raise ERROR_MAP[code[0]](code[0])
else:
raise GribInternalError(code[0])
return retval
return wrapper
def check_return(func):
@functools.wraps(func)
def wrapper(*args):
code = func(*args)
if code != lib.GRIB_SUCCESS:
if code in ERROR_MAP:
raise ERROR_MAP[code](code)
else:
raise GribInternalError(code)
return wrapper
#
# CFFI reimplementation of gribapi.py functions with codes names
#
def codes_grib_new_from_file(fileobj, product_kind=CODES_PRODUCT_GRIB, context=None):
if context is None:
context = ffi.NULL
try:
retval = check_last(lib.codes_handle_new_from_file)(context, fileobj, product_kind)
if retval == ffi.NULL:
raise EOFError("End of file: %r" % fileobj)
else:
return retval
except GribInternalError as ex:
if ex.code == lib.GRIB_END_OF_FILE:
raise EOFError("End of file: %r" % fileobj)
raise
def codes_clone(handle):
# type: (cffi.FFI.CData) -> cffi.FFI.CData
cloned_handle = lib.codes_handle_clone(handle)
if cloned_handle is ffi.NULL:
raise GribInternalError(lib.GRIB_NULL_POINTER)
return cloned_handle
codes_release = lib.codes_handle_delete
_codes_get_size = check_return(lib.codes_get_size)
def codes_get_size(handle, key):
# type: (cffi.FFI.CData, str) -> int
"""
Get the number of coded value from a key.
If several keys of the same name are present, the total sum is returned.
:param bytes key: the keyword to get the size of
:rtype: int
"""
size = ffi.new('size_t *')
_codes_get_size(handle, key.encode(ENC), size)
return size[0]
_codes_get_length = check_return(lib.codes_get_length)
def codes_get_string_length(handle, key):
# type: (cffi.FFI.CData, str) -> int
"""
Get the length of the string representation of the key.
If several keys of the same name are present, the maximum length is returned.
:param bytes key: the keyword to get the string representation size of.
:rtype: int
"""
size = ffi.new('size_t *')
_codes_get_length(handle, key.encode(ENC), size)
return size[0]
_codes_get_bytes = check_return(lib.codes_get_bytes)
def codes_get_bytes_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
"""
Get unsigned chars array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
values = ffi.new('unsigned char[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_bytes(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_long_array = check_return(lib.codes_get_long_array)
_codes_get_double_array = check_return(lib.codes_get_double_array)
def codes_get_double_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[float]
"""
Get double array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List(float)
"""
values = ffi.new('double[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_double_array(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_string_array = check_return(lib.codes_get_string_array)
def codes_get_string_array(handle, key, size, length=None):
# type: (cffi.FFI.CData, bytes, int, int) -> T.List[bytes]
"""
Get string array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List[bytes]
"""
if length is None:
length = codes_get_string_length(handle, key)
values_keepalive = [ffi.new('char[]', length) for _ in range(size)]
values = ffi.new('char*[]', values_keepalive)
size_p = ffi.new('size_t *', size)
_codes_get_string_array(handle, key.encode(ENC), values, size_p)
return [ffi.string(values[i]).decode(ENC) for i in range(size_p[0])]
def codes_get_long(handle, key):
# type: (cffi.FFI.CData, str) -> int
value = ffi.new('long *')
_codes_get_long = check_return(lib.codes_get_long)
_codes_get_long(handle, key.encode(ENC), value)
return value[0]
def codes_get_double(handle, key):
# type: (cffi.FFI.CData, str) -> int
value = ffi.new('double *')
_codes_get_long = check_return(lib.codes_get_double)
_codes_get_long(handle, key.encode(ENC), value)
return value[0]
def codes_get_string(handle, key, length=None):
# type: (cffi.FFI.CData, str, int) -> str
"""
Get string element from a key.
It may or may not fail in case there are more than one key in a message.
Outputs the last element.
:param bytes key: the keyword to select the value of
:param bool strict: flag to select if the method should fail in case of
more than one key in single message
:rtype: bytes
"""
if length is None:
length = codes_get_string_length(handle, key)
values = ffi.new('char[]', length)
length_p = ffi.new('size_t *', length)
_codes_get_string = check_return(lib.codes_get_string)
_codes_get_string(handle, key.encode(ENC), values, length_p)
return ffi.string(values, length_p[0]).decode(ENC)
_codes_get_native_type = check_return(lib.codes_get_native_type)
def codes_get_native_type(handle, key):
# type: (cffi.FFI.CData, str) -> int
grib_type = ffi.new('int *')
_codes_get_native_type(handle, key.encode(ENC), grib_type)
return KEYTYPES.get(grib_type[0], grib_type[0])
def codes_get_array(handle, key, key_type=None, size=None, length=None, log=LOG):
# type: (cffi.FFI.CData, str, int, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if size is None:
size = codes_get_size(handle, key)
if key_type == int:
return codes_get_long_array(handle, key, size)
elif key_type == float:
return codes_get_double_array(handle, key, size)
elif key_type == str:
return codes_get_string_array(handle, key, size, length=length)
elif key_type == CODES_TYPE_BYTES:
return codes_get_bytes_array(handle, key, size)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_get(handle, key, key_type=None, length=None, log=LOG):
# type: (cffi.FFI.CData, str, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if key_type == int:
return codes_get_long(handle, key)
elif key_type == float:
return codes_get_double(handle, key)
elif key_type == str:
return codes_get_string(handle, key, length=length)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_keys_iterator_new(handle, flags=CODES_KEYS_ITERATOR_ALL_KEYS, namespace=None):
# type: (cffi.FFI.CData, int, str) -> cffi.FFI.CData
if namespace is None:
bnamespace = ffi.NULL
else:
bnamespace = namespace.encode(ENC)
codes_keys_iterator_new = lib.codes_keys_iterator_new
return codes_keys_iterator_new(handle, flags, bnamespace)
def codes_keys_iterator_next(iterator_id):
return lib.codes_keys_iterator_next(iterator_id)
def codes_keys_iterator_get_name(iterator):
ret = lib.codes_keys_iterator_get_name(iterator)
return ffi.string(ret).decode(ENC)
def codes_keys_iterator_delete(iterator_id):
codes_keys_iterator_delete = check_return(lib.codes_keys_iterator_delete)
codes_keys_iterator_delete(iterator_id)
def codes_get_api_version():
"""
Get the API version.
Returns the version of the API as a string in the format "major.minor.revision".
"""
ver = lib.codes_get_api_version()
patch = ver % 100
ver = ver // 100
minor = ver % 100
major = ver // 100
return "%d.%d.%d" % (major, minor, patch)
def portable_handle_new_from_samples(samplename, product_kind):
#
# re-implement codes_grib_handle_new_from_samples in a portable way.
# imports are here not to pollute the head of the file with (hopfully!) temporary stuff
#
import os.path
import platform
handle = ffi.NULL
if platform.platform().startswith('Windows'):
samples_folder = ffi.string(lib.codes_samples_path(ffi.NULL))
sample_path = os.path.join(samples_folder, samplename + b'.tmpl')
try:
with open(sample_path) as file:
handle = codes_grib_new_from_file(file, product_kind)
except Exception:
pass
return handle
def codes_new_from_samples(samplename, product_kind=CODES_PRODUCT_GRIB):
# type: (str, int) -> cffi.FFI.CData
# work around an ecCodes bug on Windows, hopefully this will go away soon
handle = portable_handle_new_from_samples(samplename, product_kind)
if handle != ffi.NULL:
return handle
# end of work-around
if product_kind == CODES_PRODUCT_GRIB:
handle = lib.codes_grib_handle_new_from_samples(ffi.NULL, samplename.encode(ENC))
elif product_kind == CODES_PRODUCT_BUFR:
handle = lib.codes_bufr_handle_new_from_samples(ffi.NULL, samplename.encode(ENC))
else:
raise NotImplementedError("product kind not supported: %r" % product_kind)
if handle == ffi.NULL:
raise ValueError("sample not found: %r" % samplename)
return handle
def codes_set_long(handle, key, value):
# type: (cffi.FFI.CData, str, int) -> None
codes_set_long = check_return(lib.codes_set_long)
codes_set_long(handle, key.encode(ENC), value)
def codes_set_double(handle, key, value):
# type: (cffi.FFI.CData, str, float) -> None
codes_set_double = check_return(lib.codes_set_double)
codes_set_double(handle, key.encode(ENC), value)
def codes_set_string(handle, key, value):
# type: (cffi.FFI.CData, str, str) -> None
size = ffi.new('size_t *', len(value))
codes_set_string = check_return(lib.codes_set_string)
codes_set_string(handle, key.encode(ENC), value.encode(ENC), size)
def codes_set(handle, key, value):
""""""
if isinstance(value, int):
codes_set_long(handle, key, value)
elif isinstance(value, float):
codes_set_double(handle, key, value)
elif isinstance(value, str):
codes_set_string(handle, key, value)
else:
raise TypeError("Unsupported type %r" % type(value))
def codes_set_double_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[float]) -> None
size = len(values)
c_values = ffi.new("double []", values)
codes_set_double_array = check_return(lib.codes_set_double_array)
codes_set_double_array(handle, key.encode(ENC), c_values, size)
def codes_set_long_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[int]) -> None
size = len(values)
c_values = ffi.new("long []", values)
codes_set_long_array = check_return(lib.codes_set_long_array)
codes_set_long_array(handle, key.encode(ENC), c_values, size)
def codes_set_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[T.Any]) -> None
if len(values) > 0:
if isinstance(values[0], float):
codes_set_double_array(handle, key, values)
elif isinstance(values[0], int):
codes_set_long_array(handle, key, values)
else:
raise TypeError("Unsupported value type: %r" % type(values[0]))
else:
raise ValueError("Cannot set an empty list.")
def codes_grib_multi_support_on(context=None):
if context is None:
context = ffi.NULL
lib.codes_grib_multi_support_on(context)
def codes_grib_multi_support_off(context=None):
if context is None:
context = ffi.NULL
lib.codes_grib_multi_support_off(context)
def codes_write(handle, outfile):
# type: (cffi.FFI.CData, T.BinaryIO) -> None
"""
Write a coded message to a file. If the file does not exist, it is created.
:param str path: (optional) the path to the GRIB file;
defaults to the one of the open index.
"""
mess = ffi.new('const void **')
mess_len = ffi.new('size_t*')
codes_get_message = check_return(lib.codes_get_message)
codes_get_message(handle, mess, mess_len)
message = ffi.buffer(mess[0], size=mess_len[0])
outfile.write(message)
|
ecmwf/cfgrib
|
cfgrib/bindings.py
|
codes_get_double_array
|
python
|
def codes_get_double_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[float]
values = ffi.new('double[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_double_array(handle, key.encode(ENC), values, size_p)
return list(values)
|
Get double array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List(float)
|
train
|
https://github.com/ecmwf/cfgrib/blob/d6d533f49c1eebf78f2f16ed0671c666de08c666/cfgrib/bindings.py#L284-L296
| null |
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
import functools
import logging
import pkgutil
import typing as T # noqa
import cffi
LOG = logging.getLogger(__name__)
ffi = cffi.FFI()
ffi.cdef(
pkgutil.get_data(__name__, 'grib_api.h').decode('utf-8') +
pkgutil.get_data(__name__, 'eccodes.h').decode('utf-8')
)
class RaiseOnAttributeAccess(object):
def __init__(self, exc, message):
self.message = message
self.exc = exc
def __getattr__(self, attr):
raise RuntimeError(self.message) from self.exc
for libname in ['eccodes', 'libeccodes.so', 'libeccodes']:
try:
lib = ffi.dlopen(libname)
LOG.info("ecCodes library found using name '%s'.", libname)
break
except OSError as exc:
# lazy exception
lib = RaiseOnAttributeAccess(exc, 'ecCodes library not found on the system.')
LOG.info("ecCodes library not found using name '%s'.", libname)
# default encoding for ecCodes strings
ENC = 'ascii'
#
# from gribapi.py
#
CODES_PRODUCT_ANY = 0
""" Generic product kind """
CODES_PRODUCT_GRIB = 1
""" GRIB product kind """
CODES_PRODUCT_BUFR = 2
""" BUFR product kind """
CODES_PRODUCT_METAR = 3
""" METAR product kind """
CODES_PRODUCT_GTS = 4
""" GTS product kind """
CODES_PRODUCT_TAF = 5
""" TAF product kind """
# Constants for 'missing'
GRIB_MISSING_DOUBLE = -1e+100
GRIB_MISSING_LONG = 2147483647
CODES_MISSING_DOUBLE = GRIB_MISSING_DOUBLE
CODES_MISSING_LONG = GRIB_MISSING_LONG
#
# Helper values to discriminate key types
#
CODES_TYPE_UNDEFINED = lib.GRIB_TYPE_UNDEFINED
CODES_TYPE_LONG = lib.GRIB_TYPE_LONG
CODES_TYPE_DOUBLE = lib.GRIB_TYPE_DOUBLE
CODES_TYPE_STRING = lib.GRIB_TYPE_STRING
CODES_TYPE_BYTES = lib.GRIB_TYPE_BYTES
CODES_TYPE_SECTION = lib.GRIB_TYPE_SECTION
CODES_TYPE_LABEL = lib.GRIB_TYPE_LABEL
CODES_TYPE_MISSING = lib.GRIB_TYPE_MISSING
KEYTYPES = {
1: int,
2: float,
3: str,
}
CODES_KEYS_ITERATOR_ALL_KEYS = 0
CODES_KEYS_ITERATOR_SKIP_READ_ONLY = (1 << 0)
CODES_KEYS_ITERATOR_SKIP_OPTIONAL = (1 << 1)
CODES_KEYS_ITERATOR_SKIP_EDITION_SPECIFIC = (1 << 2)
CODES_KEYS_ITERATOR_SKIP_CODED = (1 << 3)
CODES_KEYS_ITERATOR_SKIP_COMPUTED = (1 << 4)
CODES_KEYS_ITERATOR_SKIP_DUPLICATES = (1 << 5)
CODES_KEYS_ITERATOR_SKIP_FUNCTION = (1 << 6)
CODES_KEYS_ITERATOR_DUMP_ONLY = (1 << 7)
#
# Helper functions for error reporting
#
def grib_get_error_message(code):
# type: (int) -> str
message = lib.grib_get_error_message(code)
return ffi.string(message).decode(ENC)
class GribInternalError(Exception):
def __init__(self, code, message=None, *args):
self.code = code
self.eccode_message = grib_get_error_message(code)
if message is None:
message = '%s (%s).' % (self.eccode_message, code)
super(GribInternalError, self).__init__(message, code, *args)
class KeyValueNotFoundError(GribInternalError):
"""Key/value not found."""
class ReadOnlyError(GribInternalError):
"""Value is read only."""
class FileNotFoundError(GribInternalError):
"""File not found."""
ERROR_MAP = {
-18: ReadOnlyError,
-10: KeyValueNotFoundError,
-7: FileNotFoundError,
}
def check_last(func):
@functools.wraps(func)
def wrapper(*args):
code = ffi.new('int *')
args += (code,)
retval = func(*args)
if code[0] != lib.GRIB_SUCCESS:
if code[0] in ERROR_MAP:
raise ERROR_MAP[code[0]](code[0])
else:
raise GribInternalError(code[0])
return retval
return wrapper
def check_return(func):
@functools.wraps(func)
def wrapper(*args):
code = func(*args)
if code != lib.GRIB_SUCCESS:
if code in ERROR_MAP:
raise ERROR_MAP[code](code)
else:
raise GribInternalError(code)
return wrapper
#
# CFFI reimplementation of gribapi.py functions with codes names
#
def codes_grib_new_from_file(fileobj, product_kind=CODES_PRODUCT_GRIB, context=None):
if context is None:
context = ffi.NULL
try:
retval = check_last(lib.codes_handle_new_from_file)(context, fileobj, product_kind)
if retval == ffi.NULL:
raise EOFError("End of file: %r" % fileobj)
else:
return retval
except GribInternalError as ex:
if ex.code == lib.GRIB_END_OF_FILE:
raise EOFError("End of file: %r" % fileobj)
raise
def codes_clone(handle):
# type: (cffi.FFI.CData) -> cffi.FFI.CData
cloned_handle = lib.codes_handle_clone(handle)
if cloned_handle is ffi.NULL:
raise GribInternalError(lib.GRIB_NULL_POINTER)
return cloned_handle
codes_release = lib.codes_handle_delete
_codes_get_size = check_return(lib.codes_get_size)
def codes_get_size(handle, key):
# type: (cffi.FFI.CData, str) -> int
"""
Get the number of coded value from a key.
If several keys of the same name are present, the total sum is returned.
:param bytes key: the keyword to get the size of
:rtype: int
"""
size = ffi.new('size_t *')
_codes_get_size(handle, key.encode(ENC), size)
return size[0]
_codes_get_length = check_return(lib.codes_get_length)
def codes_get_string_length(handle, key):
# type: (cffi.FFI.CData, str) -> int
"""
Get the length of the string representation of the key.
If several keys of the same name are present, the maximum length is returned.
:param bytes key: the keyword to get the string representation size of.
:rtype: int
"""
size = ffi.new('size_t *')
_codes_get_length(handle, key.encode(ENC), size)
return size[0]
_codes_get_bytes = check_return(lib.codes_get_bytes)
def codes_get_bytes_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
"""
Get unsigned chars array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
values = ffi.new('unsigned char[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_bytes(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_long_array = check_return(lib.codes_get_long_array)
def codes_get_long_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
"""
Get long array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
values = ffi.new('long[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_long_array(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_double_array = check_return(lib.codes_get_double_array)
_codes_get_string_array = check_return(lib.codes_get_string_array)
def codes_get_string_array(handle, key, size, length=None):
# type: (cffi.FFI.CData, bytes, int, int) -> T.List[bytes]
"""
Get string array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List[bytes]
"""
if length is None:
length = codes_get_string_length(handle, key)
values_keepalive = [ffi.new('char[]', length) for _ in range(size)]
values = ffi.new('char*[]', values_keepalive)
size_p = ffi.new('size_t *', size)
_codes_get_string_array(handle, key.encode(ENC), values, size_p)
return [ffi.string(values[i]).decode(ENC) for i in range(size_p[0])]
def codes_get_long(handle, key):
# type: (cffi.FFI.CData, str) -> int
value = ffi.new('long *')
_codes_get_long = check_return(lib.codes_get_long)
_codes_get_long(handle, key.encode(ENC), value)
return value[0]
def codes_get_double(handle, key):
# type: (cffi.FFI.CData, str) -> int
value = ffi.new('double *')
_codes_get_long = check_return(lib.codes_get_double)
_codes_get_long(handle, key.encode(ENC), value)
return value[0]
def codes_get_string(handle, key, length=None):
# type: (cffi.FFI.CData, str, int) -> str
"""
Get string element from a key.
It may or may not fail in case there are more than one key in a message.
Outputs the last element.
:param bytes key: the keyword to select the value of
:param bool strict: flag to select if the method should fail in case of
more than one key in single message
:rtype: bytes
"""
if length is None:
length = codes_get_string_length(handle, key)
values = ffi.new('char[]', length)
length_p = ffi.new('size_t *', length)
_codes_get_string = check_return(lib.codes_get_string)
_codes_get_string(handle, key.encode(ENC), values, length_p)
return ffi.string(values, length_p[0]).decode(ENC)
_codes_get_native_type = check_return(lib.codes_get_native_type)
def codes_get_native_type(handle, key):
# type: (cffi.FFI.CData, str) -> int
grib_type = ffi.new('int *')
_codes_get_native_type(handle, key.encode(ENC), grib_type)
return KEYTYPES.get(grib_type[0], grib_type[0])
def codes_get_array(handle, key, key_type=None, size=None, length=None, log=LOG):
# type: (cffi.FFI.CData, str, int, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if size is None:
size = codes_get_size(handle, key)
if key_type == int:
return codes_get_long_array(handle, key, size)
elif key_type == float:
return codes_get_double_array(handle, key, size)
elif key_type == str:
return codes_get_string_array(handle, key, size, length=length)
elif key_type == CODES_TYPE_BYTES:
return codes_get_bytes_array(handle, key, size)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_get(handle, key, key_type=None, length=None, log=LOG):
# type: (cffi.FFI.CData, str, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if key_type == int:
return codes_get_long(handle, key)
elif key_type == float:
return codes_get_double(handle, key)
elif key_type == str:
return codes_get_string(handle, key, length=length)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_keys_iterator_new(handle, flags=CODES_KEYS_ITERATOR_ALL_KEYS, namespace=None):
# type: (cffi.FFI.CData, int, str) -> cffi.FFI.CData
if namespace is None:
bnamespace = ffi.NULL
else:
bnamespace = namespace.encode(ENC)
codes_keys_iterator_new = lib.codes_keys_iterator_new
return codes_keys_iterator_new(handle, flags, bnamespace)
def codes_keys_iterator_next(iterator_id):
return lib.codes_keys_iterator_next(iterator_id)
def codes_keys_iterator_get_name(iterator):
ret = lib.codes_keys_iterator_get_name(iterator)
return ffi.string(ret).decode(ENC)
def codes_keys_iterator_delete(iterator_id):
codes_keys_iterator_delete = check_return(lib.codes_keys_iterator_delete)
codes_keys_iterator_delete(iterator_id)
def codes_get_api_version():
"""
Get the API version.
Returns the version of the API as a string in the format "major.minor.revision".
"""
ver = lib.codes_get_api_version()
patch = ver % 100
ver = ver // 100
minor = ver % 100
major = ver // 100
return "%d.%d.%d" % (major, minor, patch)
def portable_handle_new_from_samples(samplename, product_kind):
#
# re-implement codes_grib_handle_new_from_samples in a portable way.
# imports are here not to pollute the head of the file with (hopfully!) temporary stuff
#
import os.path
import platform
handle = ffi.NULL
if platform.platform().startswith('Windows'):
samples_folder = ffi.string(lib.codes_samples_path(ffi.NULL))
sample_path = os.path.join(samples_folder, samplename + b'.tmpl')
try:
with open(sample_path) as file:
handle = codes_grib_new_from_file(file, product_kind)
except Exception:
pass
return handle
def codes_new_from_samples(samplename, product_kind=CODES_PRODUCT_GRIB):
# type: (str, int) -> cffi.FFI.CData
# work around an ecCodes bug on Windows, hopefully this will go away soon
handle = portable_handle_new_from_samples(samplename, product_kind)
if handle != ffi.NULL:
return handle
# end of work-around
if product_kind == CODES_PRODUCT_GRIB:
handle = lib.codes_grib_handle_new_from_samples(ffi.NULL, samplename.encode(ENC))
elif product_kind == CODES_PRODUCT_BUFR:
handle = lib.codes_bufr_handle_new_from_samples(ffi.NULL, samplename.encode(ENC))
else:
raise NotImplementedError("product kind not supported: %r" % product_kind)
if handle == ffi.NULL:
raise ValueError("sample not found: %r" % samplename)
return handle
def codes_set_long(handle, key, value):
# type: (cffi.FFI.CData, str, int) -> None
codes_set_long = check_return(lib.codes_set_long)
codes_set_long(handle, key.encode(ENC), value)
def codes_set_double(handle, key, value):
# type: (cffi.FFI.CData, str, float) -> None
codes_set_double = check_return(lib.codes_set_double)
codes_set_double(handle, key.encode(ENC), value)
def codes_set_string(handle, key, value):
# type: (cffi.FFI.CData, str, str) -> None
size = ffi.new('size_t *', len(value))
codes_set_string = check_return(lib.codes_set_string)
codes_set_string(handle, key.encode(ENC), value.encode(ENC), size)
def codes_set(handle, key, value):
""""""
if isinstance(value, int):
codes_set_long(handle, key, value)
elif isinstance(value, float):
codes_set_double(handle, key, value)
elif isinstance(value, str):
codes_set_string(handle, key, value)
else:
raise TypeError("Unsupported type %r" % type(value))
def codes_set_double_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[float]) -> None
size = len(values)
c_values = ffi.new("double []", values)
codes_set_double_array = check_return(lib.codes_set_double_array)
codes_set_double_array(handle, key.encode(ENC), c_values, size)
def codes_set_long_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[int]) -> None
size = len(values)
c_values = ffi.new("long []", values)
codes_set_long_array = check_return(lib.codes_set_long_array)
codes_set_long_array(handle, key.encode(ENC), c_values, size)
def codes_set_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[T.Any]) -> None
if len(values) > 0:
if isinstance(values[0], float):
codes_set_double_array(handle, key, values)
elif isinstance(values[0], int):
codes_set_long_array(handle, key, values)
else:
raise TypeError("Unsupported value type: %r" % type(values[0]))
else:
raise ValueError("Cannot set an empty list.")
def codes_grib_multi_support_on(context=None):
if context is None:
context = ffi.NULL
lib.codes_grib_multi_support_on(context)
def codes_grib_multi_support_off(context=None):
if context is None:
context = ffi.NULL
lib.codes_grib_multi_support_off(context)
def codes_write(handle, outfile):
# type: (cffi.FFI.CData, T.BinaryIO) -> None
"""
Write a coded message to a file. If the file does not exist, it is created.
:param str path: (optional) the path to the GRIB file;
defaults to the one of the open index.
"""
mess = ffi.new('const void **')
mess_len = ffi.new('size_t*')
codes_get_message = check_return(lib.codes_get_message)
codes_get_message(handle, mess, mess_len)
message = ffi.buffer(mess[0], size=mess_len[0])
outfile.write(message)
|
ecmwf/cfgrib
|
cfgrib/bindings.py
|
codes_get_string_array
|
python
|
def codes_get_string_array(handle, key, size, length=None):
# type: (cffi.FFI.CData, bytes, int, int) -> T.List[bytes]
if length is None:
length = codes_get_string_length(handle, key)
values_keepalive = [ffi.new('char[]', length) for _ in range(size)]
values = ffi.new('char*[]', values_keepalive)
size_p = ffi.new('size_t *', size)
_codes_get_string_array(handle, key.encode(ENC), values, size_p)
return [ffi.string(values[i]).decode(ENC) for i in range(size_p[0])]
|
Get string array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List[bytes]
|
train
|
https://github.com/ecmwf/cfgrib/blob/d6d533f49c1eebf78f2f16ed0671c666de08c666/cfgrib/bindings.py#L302-L317
|
[
"def codes_get_string_length(handle, key):\n # type: (cffi.FFI.CData, str) -> int\n \"\"\"\n Get the length of the string representation of the key.\n If several keys of the same name are present, the maximum length is returned.\n\n :param bytes key: the keyword to get the string representation size of.\n\n :rtype: int\n \"\"\"\n size = ffi.new('size_t *')\n _codes_get_length(handle, key.encode(ENC), size)\n return size[0]\n"
] |
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
import functools
import logging
import pkgutil
import typing as T # noqa
import cffi
LOG = logging.getLogger(__name__)
ffi = cffi.FFI()
ffi.cdef(
pkgutil.get_data(__name__, 'grib_api.h').decode('utf-8') +
pkgutil.get_data(__name__, 'eccodes.h').decode('utf-8')
)
class RaiseOnAttributeAccess(object):
def __init__(self, exc, message):
self.message = message
self.exc = exc
def __getattr__(self, attr):
raise RuntimeError(self.message) from self.exc
for libname in ['eccodes', 'libeccodes.so', 'libeccodes']:
try:
lib = ffi.dlopen(libname)
LOG.info("ecCodes library found using name '%s'.", libname)
break
except OSError as exc:
# lazy exception
lib = RaiseOnAttributeAccess(exc, 'ecCodes library not found on the system.')
LOG.info("ecCodes library not found using name '%s'.", libname)
# default encoding for ecCodes strings
ENC = 'ascii'
#
# from gribapi.py
#
CODES_PRODUCT_ANY = 0
""" Generic product kind """
CODES_PRODUCT_GRIB = 1
""" GRIB product kind """
CODES_PRODUCT_BUFR = 2
""" BUFR product kind """
CODES_PRODUCT_METAR = 3
""" METAR product kind """
CODES_PRODUCT_GTS = 4
""" GTS product kind """
CODES_PRODUCT_TAF = 5
""" TAF product kind """
# Constants for 'missing'
GRIB_MISSING_DOUBLE = -1e+100
GRIB_MISSING_LONG = 2147483647
CODES_MISSING_DOUBLE = GRIB_MISSING_DOUBLE
CODES_MISSING_LONG = GRIB_MISSING_LONG
#
# Helper values to discriminate key types
#
CODES_TYPE_UNDEFINED = lib.GRIB_TYPE_UNDEFINED
CODES_TYPE_LONG = lib.GRIB_TYPE_LONG
CODES_TYPE_DOUBLE = lib.GRIB_TYPE_DOUBLE
CODES_TYPE_STRING = lib.GRIB_TYPE_STRING
CODES_TYPE_BYTES = lib.GRIB_TYPE_BYTES
CODES_TYPE_SECTION = lib.GRIB_TYPE_SECTION
CODES_TYPE_LABEL = lib.GRIB_TYPE_LABEL
CODES_TYPE_MISSING = lib.GRIB_TYPE_MISSING
KEYTYPES = {
1: int,
2: float,
3: str,
}
CODES_KEYS_ITERATOR_ALL_KEYS = 0
CODES_KEYS_ITERATOR_SKIP_READ_ONLY = (1 << 0)
CODES_KEYS_ITERATOR_SKIP_OPTIONAL = (1 << 1)
CODES_KEYS_ITERATOR_SKIP_EDITION_SPECIFIC = (1 << 2)
CODES_KEYS_ITERATOR_SKIP_CODED = (1 << 3)
CODES_KEYS_ITERATOR_SKIP_COMPUTED = (1 << 4)
CODES_KEYS_ITERATOR_SKIP_DUPLICATES = (1 << 5)
CODES_KEYS_ITERATOR_SKIP_FUNCTION = (1 << 6)
CODES_KEYS_ITERATOR_DUMP_ONLY = (1 << 7)
#
# Helper functions for error reporting
#
def grib_get_error_message(code):
# type: (int) -> str
message = lib.grib_get_error_message(code)
return ffi.string(message).decode(ENC)
class GribInternalError(Exception):
def __init__(self, code, message=None, *args):
self.code = code
self.eccode_message = grib_get_error_message(code)
if message is None:
message = '%s (%s).' % (self.eccode_message, code)
super(GribInternalError, self).__init__(message, code, *args)
class KeyValueNotFoundError(GribInternalError):
"""Key/value not found."""
class ReadOnlyError(GribInternalError):
"""Value is read only."""
class FileNotFoundError(GribInternalError):
"""File not found."""
ERROR_MAP = {
-18: ReadOnlyError,
-10: KeyValueNotFoundError,
-7: FileNotFoundError,
}
def check_last(func):
@functools.wraps(func)
def wrapper(*args):
code = ffi.new('int *')
args += (code,)
retval = func(*args)
if code[0] != lib.GRIB_SUCCESS:
if code[0] in ERROR_MAP:
raise ERROR_MAP[code[0]](code[0])
else:
raise GribInternalError(code[0])
return retval
return wrapper
def check_return(func):
@functools.wraps(func)
def wrapper(*args):
code = func(*args)
if code != lib.GRIB_SUCCESS:
if code in ERROR_MAP:
raise ERROR_MAP[code](code)
else:
raise GribInternalError(code)
return wrapper
#
# CFFI reimplementation of gribapi.py functions with codes names
#
def codes_grib_new_from_file(fileobj, product_kind=CODES_PRODUCT_GRIB, context=None):
if context is None:
context = ffi.NULL
try:
retval = check_last(lib.codes_handle_new_from_file)(context, fileobj, product_kind)
if retval == ffi.NULL:
raise EOFError("End of file: %r" % fileobj)
else:
return retval
except GribInternalError as ex:
if ex.code == lib.GRIB_END_OF_FILE:
raise EOFError("End of file: %r" % fileobj)
raise
def codes_clone(handle):
# type: (cffi.FFI.CData) -> cffi.FFI.CData
cloned_handle = lib.codes_handle_clone(handle)
if cloned_handle is ffi.NULL:
raise GribInternalError(lib.GRIB_NULL_POINTER)
return cloned_handle
codes_release = lib.codes_handle_delete
_codes_get_size = check_return(lib.codes_get_size)
def codes_get_size(handle, key):
# type: (cffi.FFI.CData, str) -> int
"""
Get the number of coded value from a key.
If several keys of the same name are present, the total sum is returned.
:param bytes key: the keyword to get the size of
:rtype: int
"""
size = ffi.new('size_t *')
_codes_get_size(handle, key.encode(ENC), size)
return size[0]
_codes_get_length = check_return(lib.codes_get_length)
def codes_get_string_length(handle, key):
# type: (cffi.FFI.CData, str) -> int
"""
Get the length of the string representation of the key.
If several keys of the same name are present, the maximum length is returned.
:param bytes key: the keyword to get the string representation size of.
:rtype: int
"""
size = ffi.new('size_t *')
_codes_get_length(handle, key.encode(ENC), size)
return size[0]
_codes_get_bytes = check_return(lib.codes_get_bytes)
def codes_get_bytes_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
"""
Get unsigned chars array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
values = ffi.new('unsigned char[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_bytes(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_long_array = check_return(lib.codes_get_long_array)
def codes_get_long_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
"""
Get long array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
values = ffi.new('long[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_long_array(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_double_array = check_return(lib.codes_get_double_array)
def codes_get_double_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[float]
"""
Get double array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List(float)
"""
values = ffi.new('double[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_double_array(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_string_array = check_return(lib.codes_get_string_array)
def codes_get_long(handle, key):
# type: (cffi.FFI.CData, str) -> int
value = ffi.new('long *')
_codes_get_long = check_return(lib.codes_get_long)
_codes_get_long(handle, key.encode(ENC), value)
return value[0]
def codes_get_double(handle, key):
# type: (cffi.FFI.CData, str) -> int
value = ffi.new('double *')
_codes_get_long = check_return(lib.codes_get_double)
_codes_get_long(handle, key.encode(ENC), value)
return value[0]
def codes_get_string(handle, key, length=None):
# type: (cffi.FFI.CData, str, int) -> str
"""
Get string element from a key.
It may or may not fail in case there are more than one key in a message.
Outputs the last element.
:param bytes key: the keyword to select the value of
:param bool strict: flag to select if the method should fail in case of
more than one key in single message
:rtype: bytes
"""
if length is None:
length = codes_get_string_length(handle, key)
values = ffi.new('char[]', length)
length_p = ffi.new('size_t *', length)
_codes_get_string = check_return(lib.codes_get_string)
_codes_get_string(handle, key.encode(ENC), values, length_p)
return ffi.string(values, length_p[0]).decode(ENC)
_codes_get_native_type = check_return(lib.codes_get_native_type)
def codes_get_native_type(handle, key):
# type: (cffi.FFI.CData, str) -> int
grib_type = ffi.new('int *')
_codes_get_native_type(handle, key.encode(ENC), grib_type)
return KEYTYPES.get(grib_type[0], grib_type[0])
def codes_get_array(handle, key, key_type=None, size=None, length=None, log=LOG):
# type: (cffi.FFI.CData, str, int, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if size is None:
size = codes_get_size(handle, key)
if key_type == int:
return codes_get_long_array(handle, key, size)
elif key_type == float:
return codes_get_double_array(handle, key, size)
elif key_type == str:
return codes_get_string_array(handle, key, size, length=length)
elif key_type == CODES_TYPE_BYTES:
return codes_get_bytes_array(handle, key, size)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_get(handle, key, key_type=None, length=None, log=LOG):
# type: (cffi.FFI.CData, str, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if key_type == int:
return codes_get_long(handle, key)
elif key_type == float:
return codes_get_double(handle, key)
elif key_type == str:
return codes_get_string(handle, key, length=length)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_keys_iterator_new(handle, flags=CODES_KEYS_ITERATOR_ALL_KEYS, namespace=None):
# type: (cffi.FFI.CData, int, str) -> cffi.FFI.CData
if namespace is None:
bnamespace = ffi.NULL
else:
bnamespace = namespace.encode(ENC)
codes_keys_iterator_new = lib.codes_keys_iterator_new
return codes_keys_iterator_new(handle, flags, bnamespace)
def codes_keys_iterator_next(iterator_id):
return lib.codes_keys_iterator_next(iterator_id)
def codes_keys_iterator_get_name(iterator):
ret = lib.codes_keys_iterator_get_name(iterator)
return ffi.string(ret).decode(ENC)
def codes_keys_iterator_delete(iterator_id):
codes_keys_iterator_delete = check_return(lib.codes_keys_iterator_delete)
codes_keys_iterator_delete(iterator_id)
def codes_get_api_version():
"""
Get the API version.
Returns the version of the API as a string in the format "major.minor.revision".
"""
ver = lib.codes_get_api_version()
patch = ver % 100
ver = ver // 100
minor = ver % 100
major = ver // 100
return "%d.%d.%d" % (major, minor, patch)
def portable_handle_new_from_samples(samplename, product_kind):
#
# re-implement codes_grib_handle_new_from_samples in a portable way.
# imports are here not to pollute the head of the file with (hopfully!) temporary stuff
#
import os.path
import platform
handle = ffi.NULL
if platform.platform().startswith('Windows'):
samples_folder = ffi.string(lib.codes_samples_path(ffi.NULL))
sample_path = os.path.join(samples_folder, samplename + b'.tmpl')
try:
with open(sample_path) as file:
handle = codes_grib_new_from_file(file, product_kind)
except Exception:
pass
return handle
def codes_new_from_samples(samplename, product_kind=CODES_PRODUCT_GRIB):
# type: (str, int) -> cffi.FFI.CData
# work around an ecCodes bug on Windows, hopefully this will go away soon
handle = portable_handle_new_from_samples(samplename, product_kind)
if handle != ffi.NULL:
return handle
# end of work-around
if product_kind == CODES_PRODUCT_GRIB:
handle = lib.codes_grib_handle_new_from_samples(ffi.NULL, samplename.encode(ENC))
elif product_kind == CODES_PRODUCT_BUFR:
handle = lib.codes_bufr_handle_new_from_samples(ffi.NULL, samplename.encode(ENC))
else:
raise NotImplementedError("product kind not supported: %r" % product_kind)
if handle == ffi.NULL:
raise ValueError("sample not found: %r" % samplename)
return handle
def codes_set_long(handle, key, value):
# type: (cffi.FFI.CData, str, int) -> None
codes_set_long = check_return(lib.codes_set_long)
codes_set_long(handle, key.encode(ENC), value)
def codes_set_double(handle, key, value):
# type: (cffi.FFI.CData, str, float) -> None
codes_set_double = check_return(lib.codes_set_double)
codes_set_double(handle, key.encode(ENC), value)
def codes_set_string(handle, key, value):
# type: (cffi.FFI.CData, str, str) -> None
size = ffi.new('size_t *', len(value))
codes_set_string = check_return(lib.codes_set_string)
codes_set_string(handle, key.encode(ENC), value.encode(ENC), size)
def codes_set(handle, key, value):
""""""
if isinstance(value, int):
codes_set_long(handle, key, value)
elif isinstance(value, float):
codes_set_double(handle, key, value)
elif isinstance(value, str):
codes_set_string(handle, key, value)
else:
raise TypeError("Unsupported type %r" % type(value))
def codes_set_double_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[float]) -> None
size = len(values)
c_values = ffi.new("double []", values)
codes_set_double_array = check_return(lib.codes_set_double_array)
codes_set_double_array(handle, key.encode(ENC), c_values, size)
def codes_set_long_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[int]) -> None
size = len(values)
c_values = ffi.new("long []", values)
codes_set_long_array = check_return(lib.codes_set_long_array)
codes_set_long_array(handle, key.encode(ENC), c_values, size)
def codes_set_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[T.Any]) -> None
if len(values) > 0:
if isinstance(values[0], float):
codes_set_double_array(handle, key, values)
elif isinstance(values[0], int):
codes_set_long_array(handle, key, values)
else:
raise TypeError("Unsupported value type: %r" % type(values[0]))
else:
raise ValueError("Cannot set an empty list.")
def codes_grib_multi_support_on(context=None):
if context is None:
context = ffi.NULL
lib.codes_grib_multi_support_on(context)
def codes_grib_multi_support_off(context=None):
if context is None:
context = ffi.NULL
lib.codes_grib_multi_support_off(context)
def codes_write(handle, outfile):
# type: (cffi.FFI.CData, T.BinaryIO) -> None
"""
Write a coded message to a file. If the file does not exist, it is created.
:param str path: (optional) the path to the GRIB file;
defaults to the one of the open index.
"""
mess = ffi.new('const void **')
mess_len = ffi.new('size_t*')
codes_get_message = check_return(lib.codes_get_message)
codes_get_message(handle, mess, mess_len)
message = ffi.buffer(mess[0], size=mess_len[0])
outfile.write(message)
|
ecmwf/cfgrib
|
cfgrib/bindings.py
|
codes_get_string
|
python
|
def codes_get_string(handle, key, length=None):
# type: (cffi.FFI.CData, str, int) -> str
if length is None:
length = codes_get_string_length(handle, key)
values = ffi.new('char[]', length)
length_p = ffi.new('size_t *', length)
_codes_get_string = check_return(lib.codes_get_string)
_codes_get_string(handle, key.encode(ENC), values, length_p)
return ffi.string(values, length_p[0]).decode(ENC)
|
Get string element from a key.
It may or may not fail in case there are more than one key in a message.
Outputs the last element.
:param bytes key: the keyword to select the value of
:param bool strict: flag to select if the method should fail in case of
more than one key in single message
:rtype: bytes
|
train
|
https://github.com/ecmwf/cfgrib/blob/d6d533f49c1eebf78f2f16ed0671c666de08c666/cfgrib/bindings.py#L336-L355
|
[
"def check_return(func):\n\n @functools.wraps(func)\n def wrapper(*args):\n code = func(*args)\n if code != lib.GRIB_SUCCESS:\n if code in ERROR_MAP:\n raise ERROR_MAP[code](code)\n else:\n raise GribInternalError(code)\n\n return wrapper\n",
"def codes_get_string_length(handle, key):\n # type: (cffi.FFI.CData, str) -> int\n \"\"\"\n Get the length of the string representation of the key.\n If several keys of the same name are present, the maximum length is returned.\n\n :param bytes key: the keyword to get the string representation size of.\n\n :rtype: int\n \"\"\"\n size = ffi.new('size_t *')\n _codes_get_length(handle, key.encode(ENC), size)\n return size[0]\n"
] |
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
import functools
import logging
import pkgutil
import typing as T # noqa
import cffi
LOG = logging.getLogger(__name__)
ffi = cffi.FFI()
ffi.cdef(
pkgutil.get_data(__name__, 'grib_api.h').decode('utf-8') +
pkgutil.get_data(__name__, 'eccodes.h').decode('utf-8')
)
class RaiseOnAttributeAccess(object):
def __init__(self, exc, message):
self.message = message
self.exc = exc
def __getattr__(self, attr):
raise RuntimeError(self.message) from self.exc
for libname in ['eccodes', 'libeccodes.so', 'libeccodes']:
try:
lib = ffi.dlopen(libname)
LOG.info("ecCodes library found using name '%s'.", libname)
break
except OSError as exc:
# lazy exception
lib = RaiseOnAttributeAccess(exc, 'ecCodes library not found on the system.')
LOG.info("ecCodes library not found using name '%s'.", libname)
# default encoding for ecCodes strings
ENC = 'ascii'
#
# from gribapi.py
#
CODES_PRODUCT_ANY = 0
""" Generic product kind """
CODES_PRODUCT_GRIB = 1
""" GRIB product kind """
CODES_PRODUCT_BUFR = 2
""" BUFR product kind """
CODES_PRODUCT_METAR = 3
""" METAR product kind """
CODES_PRODUCT_GTS = 4
""" GTS product kind """
CODES_PRODUCT_TAF = 5
""" TAF product kind """
# Constants for 'missing'
GRIB_MISSING_DOUBLE = -1e+100
GRIB_MISSING_LONG = 2147483647
CODES_MISSING_DOUBLE = GRIB_MISSING_DOUBLE
CODES_MISSING_LONG = GRIB_MISSING_LONG
#
# Helper values to discriminate key types
#
CODES_TYPE_UNDEFINED = lib.GRIB_TYPE_UNDEFINED
CODES_TYPE_LONG = lib.GRIB_TYPE_LONG
CODES_TYPE_DOUBLE = lib.GRIB_TYPE_DOUBLE
CODES_TYPE_STRING = lib.GRIB_TYPE_STRING
CODES_TYPE_BYTES = lib.GRIB_TYPE_BYTES
CODES_TYPE_SECTION = lib.GRIB_TYPE_SECTION
CODES_TYPE_LABEL = lib.GRIB_TYPE_LABEL
CODES_TYPE_MISSING = lib.GRIB_TYPE_MISSING
KEYTYPES = {
1: int,
2: float,
3: str,
}
CODES_KEYS_ITERATOR_ALL_KEYS = 0
CODES_KEYS_ITERATOR_SKIP_READ_ONLY = (1 << 0)
CODES_KEYS_ITERATOR_SKIP_OPTIONAL = (1 << 1)
CODES_KEYS_ITERATOR_SKIP_EDITION_SPECIFIC = (1 << 2)
CODES_KEYS_ITERATOR_SKIP_CODED = (1 << 3)
CODES_KEYS_ITERATOR_SKIP_COMPUTED = (1 << 4)
CODES_KEYS_ITERATOR_SKIP_DUPLICATES = (1 << 5)
CODES_KEYS_ITERATOR_SKIP_FUNCTION = (1 << 6)
CODES_KEYS_ITERATOR_DUMP_ONLY = (1 << 7)
#
# Helper functions for error reporting
#
def grib_get_error_message(code):
# type: (int) -> str
message = lib.grib_get_error_message(code)
return ffi.string(message).decode(ENC)
class GribInternalError(Exception):
def __init__(self, code, message=None, *args):
self.code = code
self.eccode_message = grib_get_error_message(code)
if message is None:
message = '%s (%s).' % (self.eccode_message, code)
super(GribInternalError, self).__init__(message, code, *args)
class KeyValueNotFoundError(GribInternalError):
"""Key/value not found."""
class ReadOnlyError(GribInternalError):
"""Value is read only."""
class FileNotFoundError(GribInternalError):
"""File not found."""
ERROR_MAP = {
-18: ReadOnlyError,
-10: KeyValueNotFoundError,
-7: FileNotFoundError,
}
def check_last(func):
@functools.wraps(func)
def wrapper(*args):
code = ffi.new('int *')
args += (code,)
retval = func(*args)
if code[0] != lib.GRIB_SUCCESS:
if code[0] in ERROR_MAP:
raise ERROR_MAP[code[0]](code[0])
else:
raise GribInternalError(code[0])
return retval
return wrapper
def check_return(func):
@functools.wraps(func)
def wrapper(*args):
code = func(*args)
if code != lib.GRIB_SUCCESS:
if code in ERROR_MAP:
raise ERROR_MAP[code](code)
else:
raise GribInternalError(code)
return wrapper
#
# CFFI reimplementation of gribapi.py functions with codes names
#
def codes_grib_new_from_file(fileobj, product_kind=CODES_PRODUCT_GRIB, context=None):
if context is None:
context = ffi.NULL
try:
retval = check_last(lib.codes_handle_new_from_file)(context, fileobj, product_kind)
if retval == ffi.NULL:
raise EOFError("End of file: %r" % fileobj)
else:
return retval
except GribInternalError as ex:
if ex.code == lib.GRIB_END_OF_FILE:
raise EOFError("End of file: %r" % fileobj)
raise
def codes_clone(handle):
# type: (cffi.FFI.CData) -> cffi.FFI.CData
cloned_handle = lib.codes_handle_clone(handle)
if cloned_handle is ffi.NULL:
raise GribInternalError(lib.GRIB_NULL_POINTER)
return cloned_handle
codes_release = lib.codes_handle_delete
_codes_get_size = check_return(lib.codes_get_size)
def codes_get_size(handle, key):
# type: (cffi.FFI.CData, str) -> int
"""
Get the number of coded value from a key.
If several keys of the same name are present, the total sum is returned.
:param bytes key: the keyword to get the size of
:rtype: int
"""
size = ffi.new('size_t *')
_codes_get_size(handle, key.encode(ENC), size)
return size[0]
_codes_get_length = check_return(lib.codes_get_length)
def codes_get_string_length(handle, key):
# type: (cffi.FFI.CData, str) -> int
"""
Get the length of the string representation of the key.
If several keys of the same name are present, the maximum length is returned.
:param bytes key: the keyword to get the string representation size of.
:rtype: int
"""
size = ffi.new('size_t *')
_codes_get_length(handle, key.encode(ENC), size)
return size[0]
_codes_get_bytes = check_return(lib.codes_get_bytes)
def codes_get_bytes_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
"""
Get unsigned chars array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
values = ffi.new('unsigned char[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_bytes(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_long_array = check_return(lib.codes_get_long_array)
def codes_get_long_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
"""
Get long array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
values = ffi.new('long[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_long_array(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_double_array = check_return(lib.codes_get_double_array)
def codes_get_double_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[float]
"""
Get double array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List(float)
"""
values = ffi.new('double[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_double_array(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_string_array = check_return(lib.codes_get_string_array)
def codes_get_string_array(handle, key, size, length=None):
# type: (cffi.FFI.CData, bytes, int, int) -> T.List[bytes]
"""
Get string array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List[bytes]
"""
if length is None:
length = codes_get_string_length(handle, key)
values_keepalive = [ffi.new('char[]', length) for _ in range(size)]
values = ffi.new('char*[]', values_keepalive)
size_p = ffi.new('size_t *', size)
_codes_get_string_array(handle, key.encode(ENC), values, size_p)
return [ffi.string(values[i]).decode(ENC) for i in range(size_p[0])]
def codes_get_long(handle, key):
# type: (cffi.FFI.CData, str) -> int
value = ffi.new('long *')
_codes_get_long = check_return(lib.codes_get_long)
_codes_get_long(handle, key.encode(ENC), value)
return value[0]
def codes_get_double(handle, key):
# type: (cffi.FFI.CData, str) -> int
value = ffi.new('double *')
_codes_get_long = check_return(lib.codes_get_double)
_codes_get_long(handle, key.encode(ENC), value)
return value[0]
_codes_get_native_type = check_return(lib.codes_get_native_type)
def codes_get_native_type(handle, key):
# type: (cffi.FFI.CData, str) -> int
grib_type = ffi.new('int *')
_codes_get_native_type(handle, key.encode(ENC), grib_type)
return KEYTYPES.get(grib_type[0], grib_type[0])
def codes_get_array(handle, key, key_type=None, size=None, length=None, log=LOG):
# type: (cffi.FFI.CData, str, int, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if size is None:
size = codes_get_size(handle, key)
if key_type == int:
return codes_get_long_array(handle, key, size)
elif key_type == float:
return codes_get_double_array(handle, key, size)
elif key_type == str:
return codes_get_string_array(handle, key, size, length=length)
elif key_type == CODES_TYPE_BYTES:
return codes_get_bytes_array(handle, key, size)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_get(handle, key, key_type=None, length=None, log=LOG):
# type: (cffi.FFI.CData, str, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if key_type == int:
return codes_get_long(handle, key)
elif key_type == float:
return codes_get_double(handle, key)
elif key_type == str:
return codes_get_string(handle, key, length=length)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_keys_iterator_new(handle, flags=CODES_KEYS_ITERATOR_ALL_KEYS, namespace=None):
# type: (cffi.FFI.CData, int, str) -> cffi.FFI.CData
if namespace is None:
bnamespace = ffi.NULL
else:
bnamespace = namespace.encode(ENC)
codes_keys_iterator_new = lib.codes_keys_iterator_new
return codes_keys_iterator_new(handle, flags, bnamespace)
def codes_keys_iterator_next(iterator_id):
return lib.codes_keys_iterator_next(iterator_id)
def codes_keys_iterator_get_name(iterator):
ret = lib.codes_keys_iterator_get_name(iterator)
return ffi.string(ret).decode(ENC)
def codes_keys_iterator_delete(iterator_id):
codes_keys_iterator_delete = check_return(lib.codes_keys_iterator_delete)
codes_keys_iterator_delete(iterator_id)
def codes_get_api_version():
"""
Get the API version.
Returns the version of the API as a string in the format "major.minor.revision".
"""
ver = lib.codes_get_api_version()
patch = ver % 100
ver = ver // 100
minor = ver % 100
major = ver // 100
return "%d.%d.%d" % (major, minor, patch)
def portable_handle_new_from_samples(samplename, product_kind):
#
# re-implement codes_grib_handle_new_from_samples in a portable way.
# imports are here not to pollute the head of the file with (hopfully!) temporary stuff
#
import os.path
import platform
handle = ffi.NULL
if platform.platform().startswith('Windows'):
samples_folder = ffi.string(lib.codes_samples_path(ffi.NULL))
sample_path = os.path.join(samples_folder, samplename + b'.tmpl')
try:
with open(sample_path) as file:
handle = codes_grib_new_from_file(file, product_kind)
except Exception:
pass
return handle
def codes_new_from_samples(samplename, product_kind=CODES_PRODUCT_GRIB):
# type: (str, int) -> cffi.FFI.CData
# work around an ecCodes bug on Windows, hopefully this will go away soon
handle = portable_handle_new_from_samples(samplename, product_kind)
if handle != ffi.NULL:
return handle
# end of work-around
if product_kind == CODES_PRODUCT_GRIB:
handle = lib.codes_grib_handle_new_from_samples(ffi.NULL, samplename.encode(ENC))
elif product_kind == CODES_PRODUCT_BUFR:
handle = lib.codes_bufr_handle_new_from_samples(ffi.NULL, samplename.encode(ENC))
else:
raise NotImplementedError("product kind not supported: %r" % product_kind)
if handle == ffi.NULL:
raise ValueError("sample not found: %r" % samplename)
return handle
def codes_set_long(handle, key, value):
# type: (cffi.FFI.CData, str, int) -> None
codes_set_long = check_return(lib.codes_set_long)
codes_set_long(handle, key.encode(ENC), value)
def codes_set_double(handle, key, value):
# type: (cffi.FFI.CData, str, float) -> None
codes_set_double = check_return(lib.codes_set_double)
codes_set_double(handle, key.encode(ENC), value)
def codes_set_string(handle, key, value):
# type: (cffi.FFI.CData, str, str) -> None
size = ffi.new('size_t *', len(value))
codes_set_string = check_return(lib.codes_set_string)
codes_set_string(handle, key.encode(ENC), value.encode(ENC), size)
def codes_set(handle, key, value):
""""""
if isinstance(value, int):
codes_set_long(handle, key, value)
elif isinstance(value, float):
codes_set_double(handle, key, value)
elif isinstance(value, str):
codes_set_string(handle, key, value)
else:
raise TypeError("Unsupported type %r" % type(value))
def codes_set_double_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[float]) -> None
size = len(values)
c_values = ffi.new("double []", values)
codes_set_double_array = check_return(lib.codes_set_double_array)
codes_set_double_array(handle, key.encode(ENC), c_values, size)
def codes_set_long_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[int]) -> None
size = len(values)
c_values = ffi.new("long []", values)
codes_set_long_array = check_return(lib.codes_set_long_array)
codes_set_long_array(handle, key.encode(ENC), c_values, size)
def codes_set_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[T.Any]) -> None
if len(values) > 0:
if isinstance(values[0], float):
codes_set_double_array(handle, key, values)
elif isinstance(values[0], int):
codes_set_long_array(handle, key, values)
else:
raise TypeError("Unsupported value type: %r" % type(values[0]))
else:
raise ValueError("Cannot set an empty list.")
def codes_grib_multi_support_on(context=None):
if context is None:
context = ffi.NULL
lib.codes_grib_multi_support_on(context)
def codes_grib_multi_support_off(context=None):
if context is None:
context = ffi.NULL
lib.codes_grib_multi_support_off(context)
def codes_write(handle, outfile):
# type: (cffi.FFI.CData, T.BinaryIO) -> None
"""
Write a coded message to a file. If the file does not exist, it is created.
:param str path: (optional) the path to the GRIB file;
defaults to the one of the open index.
"""
mess = ffi.new('const void **')
mess_len = ffi.new('size_t*')
codes_get_message = check_return(lib.codes_get_message)
codes_get_message(handle, mess, mess_len)
message = ffi.buffer(mess[0], size=mess_len[0])
outfile.write(message)
|
ecmwf/cfgrib
|
cfgrib/bindings.py
|
codes_get_api_version
|
python
|
def codes_get_api_version():
ver = lib.codes_get_api_version()
patch = ver % 100
ver = ver // 100
minor = ver % 100
major = ver // 100
return "%d.%d.%d" % (major, minor, patch)
|
Get the API version.
Returns the version of the API as a string in the format "major.minor.revision".
|
train
|
https://github.com/ecmwf/cfgrib/blob/d6d533f49c1eebf78f2f16ed0671c666de08c666/cfgrib/bindings.py#L427-L439
| null |
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
import functools
import logging
import pkgutil
import typing as T # noqa
import cffi
LOG = logging.getLogger(__name__)
ffi = cffi.FFI()
ffi.cdef(
pkgutil.get_data(__name__, 'grib_api.h').decode('utf-8') +
pkgutil.get_data(__name__, 'eccodes.h').decode('utf-8')
)
class RaiseOnAttributeAccess(object):
def __init__(self, exc, message):
self.message = message
self.exc = exc
def __getattr__(self, attr):
raise RuntimeError(self.message) from self.exc
for libname in ['eccodes', 'libeccodes.so', 'libeccodes']:
try:
lib = ffi.dlopen(libname)
LOG.info("ecCodes library found using name '%s'.", libname)
break
except OSError as exc:
# lazy exception
lib = RaiseOnAttributeAccess(exc, 'ecCodes library not found on the system.')
LOG.info("ecCodes library not found using name '%s'.", libname)
# default encoding for ecCodes strings
ENC = 'ascii'
#
# from gribapi.py
#
CODES_PRODUCT_ANY = 0
""" Generic product kind """
CODES_PRODUCT_GRIB = 1
""" GRIB product kind """
CODES_PRODUCT_BUFR = 2
""" BUFR product kind """
CODES_PRODUCT_METAR = 3
""" METAR product kind """
CODES_PRODUCT_GTS = 4
""" GTS product kind """
CODES_PRODUCT_TAF = 5
""" TAF product kind """
# Constants for 'missing'
GRIB_MISSING_DOUBLE = -1e+100
GRIB_MISSING_LONG = 2147483647
CODES_MISSING_DOUBLE = GRIB_MISSING_DOUBLE
CODES_MISSING_LONG = GRIB_MISSING_LONG
#
# Helper values to discriminate key types
#
CODES_TYPE_UNDEFINED = lib.GRIB_TYPE_UNDEFINED
CODES_TYPE_LONG = lib.GRIB_TYPE_LONG
CODES_TYPE_DOUBLE = lib.GRIB_TYPE_DOUBLE
CODES_TYPE_STRING = lib.GRIB_TYPE_STRING
CODES_TYPE_BYTES = lib.GRIB_TYPE_BYTES
CODES_TYPE_SECTION = lib.GRIB_TYPE_SECTION
CODES_TYPE_LABEL = lib.GRIB_TYPE_LABEL
CODES_TYPE_MISSING = lib.GRIB_TYPE_MISSING
KEYTYPES = {
1: int,
2: float,
3: str,
}
CODES_KEYS_ITERATOR_ALL_KEYS = 0
CODES_KEYS_ITERATOR_SKIP_READ_ONLY = (1 << 0)
CODES_KEYS_ITERATOR_SKIP_OPTIONAL = (1 << 1)
CODES_KEYS_ITERATOR_SKIP_EDITION_SPECIFIC = (1 << 2)
CODES_KEYS_ITERATOR_SKIP_CODED = (1 << 3)
CODES_KEYS_ITERATOR_SKIP_COMPUTED = (1 << 4)
CODES_KEYS_ITERATOR_SKIP_DUPLICATES = (1 << 5)
CODES_KEYS_ITERATOR_SKIP_FUNCTION = (1 << 6)
CODES_KEYS_ITERATOR_DUMP_ONLY = (1 << 7)
#
# Helper functions for error reporting
#
def grib_get_error_message(code):
# type: (int) -> str
message = lib.grib_get_error_message(code)
return ffi.string(message).decode(ENC)
class GribInternalError(Exception):
def __init__(self, code, message=None, *args):
self.code = code
self.eccode_message = grib_get_error_message(code)
if message is None:
message = '%s (%s).' % (self.eccode_message, code)
super(GribInternalError, self).__init__(message, code, *args)
class KeyValueNotFoundError(GribInternalError):
"""Key/value not found."""
class ReadOnlyError(GribInternalError):
"""Value is read only."""
class FileNotFoundError(GribInternalError):
"""File not found."""
ERROR_MAP = {
-18: ReadOnlyError,
-10: KeyValueNotFoundError,
-7: FileNotFoundError,
}
def check_last(func):
@functools.wraps(func)
def wrapper(*args):
code = ffi.new('int *')
args += (code,)
retval = func(*args)
if code[0] != lib.GRIB_SUCCESS:
if code[0] in ERROR_MAP:
raise ERROR_MAP[code[0]](code[0])
else:
raise GribInternalError(code[0])
return retval
return wrapper
def check_return(func):
@functools.wraps(func)
def wrapper(*args):
code = func(*args)
if code != lib.GRIB_SUCCESS:
if code in ERROR_MAP:
raise ERROR_MAP[code](code)
else:
raise GribInternalError(code)
return wrapper
#
# CFFI reimplementation of gribapi.py functions with codes names
#
def codes_grib_new_from_file(fileobj, product_kind=CODES_PRODUCT_GRIB, context=None):
if context is None:
context = ffi.NULL
try:
retval = check_last(lib.codes_handle_new_from_file)(context, fileobj, product_kind)
if retval == ffi.NULL:
raise EOFError("End of file: %r" % fileobj)
else:
return retval
except GribInternalError as ex:
if ex.code == lib.GRIB_END_OF_FILE:
raise EOFError("End of file: %r" % fileobj)
raise
def codes_clone(handle):
# type: (cffi.FFI.CData) -> cffi.FFI.CData
cloned_handle = lib.codes_handle_clone(handle)
if cloned_handle is ffi.NULL:
raise GribInternalError(lib.GRIB_NULL_POINTER)
return cloned_handle
codes_release = lib.codes_handle_delete
_codes_get_size = check_return(lib.codes_get_size)
def codes_get_size(handle, key):
# type: (cffi.FFI.CData, str) -> int
"""
Get the number of coded value from a key.
If several keys of the same name are present, the total sum is returned.
:param bytes key: the keyword to get the size of
:rtype: int
"""
size = ffi.new('size_t *')
_codes_get_size(handle, key.encode(ENC), size)
return size[0]
_codes_get_length = check_return(lib.codes_get_length)
def codes_get_string_length(handle, key):
# type: (cffi.FFI.CData, str) -> int
"""
Get the length of the string representation of the key.
If several keys of the same name are present, the maximum length is returned.
:param bytes key: the keyword to get the string representation size of.
:rtype: int
"""
size = ffi.new('size_t *')
_codes_get_length(handle, key.encode(ENC), size)
return size[0]
_codes_get_bytes = check_return(lib.codes_get_bytes)
def codes_get_bytes_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
"""
Get unsigned chars array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
values = ffi.new('unsigned char[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_bytes(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_long_array = check_return(lib.codes_get_long_array)
def codes_get_long_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
"""
Get long array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
values = ffi.new('long[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_long_array(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_double_array = check_return(lib.codes_get_double_array)
def codes_get_double_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[float]
"""
Get double array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List(float)
"""
values = ffi.new('double[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_double_array(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_string_array = check_return(lib.codes_get_string_array)
def codes_get_string_array(handle, key, size, length=None):
# type: (cffi.FFI.CData, bytes, int, int) -> T.List[bytes]
"""
Get string array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List[bytes]
"""
if length is None:
length = codes_get_string_length(handle, key)
values_keepalive = [ffi.new('char[]', length) for _ in range(size)]
values = ffi.new('char*[]', values_keepalive)
size_p = ffi.new('size_t *', size)
_codes_get_string_array(handle, key.encode(ENC), values, size_p)
return [ffi.string(values[i]).decode(ENC) for i in range(size_p[0])]
def codes_get_long(handle, key):
# type: (cffi.FFI.CData, str) -> int
value = ffi.new('long *')
_codes_get_long = check_return(lib.codes_get_long)
_codes_get_long(handle, key.encode(ENC), value)
return value[0]
def codes_get_double(handle, key):
# type: (cffi.FFI.CData, str) -> int
value = ffi.new('double *')
_codes_get_long = check_return(lib.codes_get_double)
_codes_get_long(handle, key.encode(ENC), value)
return value[0]
def codes_get_string(handle, key, length=None):
# type: (cffi.FFI.CData, str, int) -> str
"""
Get string element from a key.
It may or may not fail in case there are more than one key in a message.
Outputs the last element.
:param bytes key: the keyword to select the value of
:param bool strict: flag to select if the method should fail in case of
more than one key in single message
:rtype: bytes
"""
if length is None:
length = codes_get_string_length(handle, key)
values = ffi.new('char[]', length)
length_p = ffi.new('size_t *', length)
_codes_get_string = check_return(lib.codes_get_string)
_codes_get_string(handle, key.encode(ENC), values, length_p)
return ffi.string(values, length_p[0]).decode(ENC)
_codes_get_native_type = check_return(lib.codes_get_native_type)
def codes_get_native_type(handle, key):
# type: (cffi.FFI.CData, str) -> int
grib_type = ffi.new('int *')
_codes_get_native_type(handle, key.encode(ENC), grib_type)
return KEYTYPES.get(grib_type[0], grib_type[0])
def codes_get_array(handle, key, key_type=None, size=None, length=None, log=LOG):
# type: (cffi.FFI.CData, str, int, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if size is None:
size = codes_get_size(handle, key)
if key_type == int:
return codes_get_long_array(handle, key, size)
elif key_type == float:
return codes_get_double_array(handle, key, size)
elif key_type == str:
return codes_get_string_array(handle, key, size, length=length)
elif key_type == CODES_TYPE_BYTES:
return codes_get_bytes_array(handle, key, size)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_get(handle, key, key_type=None, length=None, log=LOG):
# type: (cffi.FFI.CData, str, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if key_type == int:
return codes_get_long(handle, key)
elif key_type == float:
return codes_get_double(handle, key)
elif key_type == str:
return codes_get_string(handle, key, length=length)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_keys_iterator_new(handle, flags=CODES_KEYS_ITERATOR_ALL_KEYS, namespace=None):
# type: (cffi.FFI.CData, int, str) -> cffi.FFI.CData
if namespace is None:
bnamespace = ffi.NULL
else:
bnamespace = namespace.encode(ENC)
codes_keys_iterator_new = lib.codes_keys_iterator_new
return codes_keys_iterator_new(handle, flags, bnamespace)
def codes_keys_iterator_next(iterator_id):
return lib.codes_keys_iterator_next(iterator_id)
def codes_keys_iterator_get_name(iterator):
ret = lib.codes_keys_iterator_get_name(iterator)
return ffi.string(ret).decode(ENC)
def codes_keys_iterator_delete(iterator_id):
codes_keys_iterator_delete = check_return(lib.codes_keys_iterator_delete)
codes_keys_iterator_delete(iterator_id)
def portable_handle_new_from_samples(samplename, product_kind):
#
# re-implement codes_grib_handle_new_from_samples in a portable way.
# imports are here not to pollute the head of the file with (hopfully!) temporary stuff
#
import os.path
import platform
handle = ffi.NULL
if platform.platform().startswith('Windows'):
samples_folder = ffi.string(lib.codes_samples_path(ffi.NULL))
sample_path = os.path.join(samples_folder, samplename + b'.tmpl')
try:
with open(sample_path) as file:
handle = codes_grib_new_from_file(file, product_kind)
except Exception:
pass
return handle
def codes_new_from_samples(samplename, product_kind=CODES_PRODUCT_GRIB):
# type: (str, int) -> cffi.FFI.CData
# work around an ecCodes bug on Windows, hopefully this will go away soon
handle = portable_handle_new_from_samples(samplename, product_kind)
if handle != ffi.NULL:
return handle
# end of work-around
if product_kind == CODES_PRODUCT_GRIB:
handle = lib.codes_grib_handle_new_from_samples(ffi.NULL, samplename.encode(ENC))
elif product_kind == CODES_PRODUCT_BUFR:
handle = lib.codes_bufr_handle_new_from_samples(ffi.NULL, samplename.encode(ENC))
else:
raise NotImplementedError("product kind not supported: %r" % product_kind)
if handle == ffi.NULL:
raise ValueError("sample not found: %r" % samplename)
return handle
def codes_set_long(handle, key, value):
# type: (cffi.FFI.CData, str, int) -> None
codes_set_long = check_return(lib.codes_set_long)
codes_set_long(handle, key.encode(ENC), value)
def codes_set_double(handle, key, value):
# type: (cffi.FFI.CData, str, float) -> None
codes_set_double = check_return(lib.codes_set_double)
codes_set_double(handle, key.encode(ENC), value)
def codes_set_string(handle, key, value):
# type: (cffi.FFI.CData, str, str) -> None
size = ffi.new('size_t *', len(value))
codes_set_string = check_return(lib.codes_set_string)
codes_set_string(handle, key.encode(ENC), value.encode(ENC), size)
def codes_set(handle, key, value):
""""""
if isinstance(value, int):
codes_set_long(handle, key, value)
elif isinstance(value, float):
codes_set_double(handle, key, value)
elif isinstance(value, str):
codes_set_string(handle, key, value)
else:
raise TypeError("Unsupported type %r" % type(value))
def codes_set_double_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[float]) -> None
size = len(values)
c_values = ffi.new("double []", values)
codes_set_double_array = check_return(lib.codes_set_double_array)
codes_set_double_array(handle, key.encode(ENC), c_values, size)
def codes_set_long_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[int]) -> None
size = len(values)
c_values = ffi.new("long []", values)
codes_set_long_array = check_return(lib.codes_set_long_array)
codes_set_long_array(handle, key.encode(ENC), c_values, size)
def codes_set_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[T.Any]) -> None
if len(values) > 0:
if isinstance(values[0], float):
codes_set_double_array(handle, key, values)
elif isinstance(values[0], int):
codes_set_long_array(handle, key, values)
else:
raise TypeError("Unsupported value type: %r" % type(values[0]))
else:
raise ValueError("Cannot set an empty list.")
def codes_grib_multi_support_on(context=None):
if context is None:
context = ffi.NULL
lib.codes_grib_multi_support_on(context)
def codes_grib_multi_support_off(context=None):
if context is None:
context = ffi.NULL
lib.codes_grib_multi_support_off(context)
def codes_write(handle, outfile):
# type: (cffi.FFI.CData, T.BinaryIO) -> None
"""
Write a coded message to a file. If the file does not exist, it is created.
:param str path: (optional) the path to the GRIB file;
defaults to the one of the open index.
"""
mess = ffi.new('const void **')
mess_len = ffi.new('size_t*')
codes_get_message = check_return(lib.codes_get_message)
codes_get_message(handle, mess, mess_len)
message = ffi.buffer(mess[0], size=mess_len[0])
outfile.write(message)
|
ecmwf/cfgrib
|
cfgrib/bindings.py
|
codes_write
|
python
|
def codes_write(handle, outfile):
# type: (cffi.FFI.CData, T.BinaryIO) -> None
mess = ffi.new('const void **')
mess_len = ffi.new('size_t*')
codes_get_message = check_return(lib.codes_get_message)
codes_get_message(handle, mess, mess_len)
message = ffi.buffer(mess[0], size=mess_len[0])
outfile.write(message)
|
Write a coded message to a file. If the file does not exist, it is created.
:param str path: (optional) the path to the GRIB file;
defaults to the one of the open index.
|
train
|
https://github.com/ecmwf/cfgrib/blob/d6d533f49c1eebf78f2f16ed0671c666de08c666/cfgrib/bindings.py#L553-L566
|
[
"def check_return(func):\n\n @functools.wraps(func)\n def wrapper(*args):\n code = func(*args)\n if code != lib.GRIB_SUCCESS:\n if code in ERROR_MAP:\n raise ERROR_MAP[code](code)\n else:\n raise GribInternalError(code)\n\n return wrapper\n"
] |
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
import functools
import logging
import pkgutil
import typing as T # noqa
import cffi
LOG = logging.getLogger(__name__)
ffi = cffi.FFI()
ffi.cdef(
pkgutil.get_data(__name__, 'grib_api.h').decode('utf-8') +
pkgutil.get_data(__name__, 'eccodes.h').decode('utf-8')
)
class RaiseOnAttributeAccess(object):
def __init__(self, exc, message):
self.message = message
self.exc = exc
def __getattr__(self, attr):
raise RuntimeError(self.message) from self.exc
for libname in ['eccodes', 'libeccodes.so', 'libeccodes']:
try:
lib = ffi.dlopen(libname)
LOG.info("ecCodes library found using name '%s'.", libname)
break
except OSError as exc:
# lazy exception
lib = RaiseOnAttributeAccess(exc, 'ecCodes library not found on the system.')
LOG.info("ecCodes library not found using name '%s'.", libname)
# default encoding for ecCodes strings
ENC = 'ascii'
#
# from gribapi.py
#
CODES_PRODUCT_ANY = 0
""" Generic product kind """
CODES_PRODUCT_GRIB = 1
""" GRIB product kind """
CODES_PRODUCT_BUFR = 2
""" BUFR product kind """
CODES_PRODUCT_METAR = 3
""" METAR product kind """
CODES_PRODUCT_GTS = 4
""" GTS product kind """
CODES_PRODUCT_TAF = 5
""" TAF product kind """
# Constants for 'missing'
GRIB_MISSING_DOUBLE = -1e+100
GRIB_MISSING_LONG = 2147483647
CODES_MISSING_DOUBLE = GRIB_MISSING_DOUBLE
CODES_MISSING_LONG = GRIB_MISSING_LONG
#
# Helper values to discriminate key types
#
CODES_TYPE_UNDEFINED = lib.GRIB_TYPE_UNDEFINED
CODES_TYPE_LONG = lib.GRIB_TYPE_LONG
CODES_TYPE_DOUBLE = lib.GRIB_TYPE_DOUBLE
CODES_TYPE_STRING = lib.GRIB_TYPE_STRING
CODES_TYPE_BYTES = lib.GRIB_TYPE_BYTES
CODES_TYPE_SECTION = lib.GRIB_TYPE_SECTION
CODES_TYPE_LABEL = lib.GRIB_TYPE_LABEL
CODES_TYPE_MISSING = lib.GRIB_TYPE_MISSING
KEYTYPES = {
1: int,
2: float,
3: str,
}
CODES_KEYS_ITERATOR_ALL_KEYS = 0
CODES_KEYS_ITERATOR_SKIP_READ_ONLY = (1 << 0)
CODES_KEYS_ITERATOR_SKIP_OPTIONAL = (1 << 1)
CODES_KEYS_ITERATOR_SKIP_EDITION_SPECIFIC = (1 << 2)
CODES_KEYS_ITERATOR_SKIP_CODED = (1 << 3)
CODES_KEYS_ITERATOR_SKIP_COMPUTED = (1 << 4)
CODES_KEYS_ITERATOR_SKIP_DUPLICATES = (1 << 5)
CODES_KEYS_ITERATOR_SKIP_FUNCTION = (1 << 6)
CODES_KEYS_ITERATOR_DUMP_ONLY = (1 << 7)
#
# Helper functions for error reporting
#
def grib_get_error_message(code):
# type: (int) -> str
message = lib.grib_get_error_message(code)
return ffi.string(message).decode(ENC)
class GribInternalError(Exception):
def __init__(self, code, message=None, *args):
self.code = code
self.eccode_message = grib_get_error_message(code)
if message is None:
message = '%s (%s).' % (self.eccode_message, code)
super(GribInternalError, self).__init__(message, code, *args)
class KeyValueNotFoundError(GribInternalError):
"""Key/value not found."""
class ReadOnlyError(GribInternalError):
"""Value is read only."""
class FileNotFoundError(GribInternalError):
"""File not found."""
ERROR_MAP = {
-18: ReadOnlyError,
-10: KeyValueNotFoundError,
-7: FileNotFoundError,
}
def check_last(func):
@functools.wraps(func)
def wrapper(*args):
code = ffi.new('int *')
args += (code,)
retval = func(*args)
if code[0] != lib.GRIB_SUCCESS:
if code[0] in ERROR_MAP:
raise ERROR_MAP[code[0]](code[0])
else:
raise GribInternalError(code[0])
return retval
return wrapper
def check_return(func):
@functools.wraps(func)
def wrapper(*args):
code = func(*args)
if code != lib.GRIB_SUCCESS:
if code in ERROR_MAP:
raise ERROR_MAP[code](code)
else:
raise GribInternalError(code)
return wrapper
#
# CFFI reimplementation of gribapi.py functions with codes names
#
def codes_grib_new_from_file(fileobj, product_kind=CODES_PRODUCT_GRIB, context=None):
if context is None:
context = ffi.NULL
try:
retval = check_last(lib.codes_handle_new_from_file)(context, fileobj, product_kind)
if retval == ffi.NULL:
raise EOFError("End of file: %r" % fileobj)
else:
return retval
except GribInternalError as ex:
if ex.code == lib.GRIB_END_OF_FILE:
raise EOFError("End of file: %r" % fileobj)
raise
def codes_clone(handle):
# type: (cffi.FFI.CData) -> cffi.FFI.CData
cloned_handle = lib.codes_handle_clone(handle)
if cloned_handle is ffi.NULL:
raise GribInternalError(lib.GRIB_NULL_POINTER)
return cloned_handle
codes_release = lib.codes_handle_delete
_codes_get_size = check_return(lib.codes_get_size)
def codes_get_size(handle, key):
# type: (cffi.FFI.CData, str) -> int
"""
Get the number of coded value from a key.
If several keys of the same name are present, the total sum is returned.
:param bytes key: the keyword to get the size of
:rtype: int
"""
size = ffi.new('size_t *')
_codes_get_size(handle, key.encode(ENC), size)
return size[0]
_codes_get_length = check_return(lib.codes_get_length)
def codes_get_string_length(handle, key):
# type: (cffi.FFI.CData, str) -> int
"""
Get the length of the string representation of the key.
If several keys of the same name are present, the maximum length is returned.
:param bytes key: the keyword to get the string representation size of.
:rtype: int
"""
size = ffi.new('size_t *')
_codes_get_length(handle, key.encode(ENC), size)
return size[0]
_codes_get_bytes = check_return(lib.codes_get_bytes)
def codes_get_bytes_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
"""
Get unsigned chars array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
values = ffi.new('unsigned char[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_bytes(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_long_array = check_return(lib.codes_get_long_array)
def codes_get_long_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[int]
"""
Get long array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: List(int)
"""
values = ffi.new('long[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_long_array(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_double_array = check_return(lib.codes_get_double_array)
def codes_get_double_array(handle, key, size):
# type: (cffi.FFI.CData, str, int) -> T.List[float]
"""
Get double array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List(float)
"""
values = ffi.new('double[]', size)
size_p = ffi.new('size_t *', size)
_codes_get_double_array(handle, key.encode(ENC), values, size_p)
return list(values)
_codes_get_string_array = check_return(lib.codes_get_string_array)
def codes_get_string_array(handle, key, size, length=None):
# type: (cffi.FFI.CData, bytes, int, int) -> T.List[bytes]
"""
Get string array values from a key.
:param bytes key: the keyword whose value(s) are to be extracted
:rtype: T.List[bytes]
"""
if length is None:
length = codes_get_string_length(handle, key)
values_keepalive = [ffi.new('char[]', length) for _ in range(size)]
values = ffi.new('char*[]', values_keepalive)
size_p = ffi.new('size_t *', size)
_codes_get_string_array(handle, key.encode(ENC), values, size_p)
return [ffi.string(values[i]).decode(ENC) for i in range(size_p[0])]
def codes_get_long(handle, key):
# type: (cffi.FFI.CData, str) -> int
value = ffi.new('long *')
_codes_get_long = check_return(lib.codes_get_long)
_codes_get_long(handle, key.encode(ENC), value)
return value[0]
def codes_get_double(handle, key):
# type: (cffi.FFI.CData, str) -> int
value = ffi.new('double *')
_codes_get_long = check_return(lib.codes_get_double)
_codes_get_long(handle, key.encode(ENC), value)
return value[0]
def codes_get_string(handle, key, length=None):
# type: (cffi.FFI.CData, str, int) -> str
"""
Get string element from a key.
It may or may not fail in case there are more than one key in a message.
Outputs the last element.
:param bytes key: the keyword to select the value of
:param bool strict: flag to select if the method should fail in case of
more than one key in single message
:rtype: bytes
"""
if length is None:
length = codes_get_string_length(handle, key)
values = ffi.new('char[]', length)
length_p = ffi.new('size_t *', length)
_codes_get_string = check_return(lib.codes_get_string)
_codes_get_string(handle, key.encode(ENC), values, length_p)
return ffi.string(values, length_p[0]).decode(ENC)
_codes_get_native_type = check_return(lib.codes_get_native_type)
def codes_get_native_type(handle, key):
# type: (cffi.FFI.CData, str) -> int
grib_type = ffi.new('int *')
_codes_get_native_type(handle, key.encode(ENC), grib_type)
return KEYTYPES.get(grib_type[0], grib_type[0])
def codes_get_array(handle, key, key_type=None, size=None, length=None, log=LOG):
# type: (cffi.FFI.CData, str, int, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if size is None:
size = codes_get_size(handle, key)
if key_type == int:
return codes_get_long_array(handle, key, size)
elif key_type == float:
return codes_get_double_array(handle, key, size)
elif key_type == str:
return codes_get_string_array(handle, key, size, length=length)
elif key_type == CODES_TYPE_BYTES:
return codes_get_bytes_array(handle, key, size)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_get(handle, key, key_type=None, length=None, log=LOG):
# type: (cffi.FFI.CData, str, int, int, logging.Logger) -> T.Any
if key_type is None:
key_type = codes_get_native_type(handle, key)
if key_type == int:
return codes_get_long(handle, key)
elif key_type == float:
return codes_get_double(handle, key)
elif key_type == str:
return codes_get_string(handle, key, length=length)
else:
log.warning("Unknown GRIB key type: %r", key_type)
def codes_keys_iterator_new(handle, flags=CODES_KEYS_ITERATOR_ALL_KEYS, namespace=None):
# type: (cffi.FFI.CData, int, str) -> cffi.FFI.CData
if namespace is None:
bnamespace = ffi.NULL
else:
bnamespace = namespace.encode(ENC)
codes_keys_iterator_new = lib.codes_keys_iterator_new
return codes_keys_iterator_new(handle, flags, bnamespace)
def codes_keys_iterator_next(iterator_id):
return lib.codes_keys_iterator_next(iterator_id)
def codes_keys_iterator_get_name(iterator):
ret = lib.codes_keys_iterator_get_name(iterator)
return ffi.string(ret).decode(ENC)
def codes_keys_iterator_delete(iterator_id):
codes_keys_iterator_delete = check_return(lib.codes_keys_iterator_delete)
codes_keys_iterator_delete(iterator_id)
def codes_get_api_version():
"""
Get the API version.
Returns the version of the API as a string in the format "major.minor.revision".
"""
ver = lib.codes_get_api_version()
patch = ver % 100
ver = ver // 100
minor = ver % 100
major = ver // 100
return "%d.%d.%d" % (major, minor, patch)
def portable_handle_new_from_samples(samplename, product_kind):
#
# re-implement codes_grib_handle_new_from_samples in a portable way.
# imports are here not to pollute the head of the file with (hopfully!) temporary stuff
#
import os.path
import platform
handle = ffi.NULL
if platform.platform().startswith('Windows'):
samples_folder = ffi.string(lib.codes_samples_path(ffi.NULL))
sample_path = os.path.join(samples_folder, samplename + b'.tmpl')
try:
with open(sample_path) as file:
handle = codes_grib_new_from_file(file, product_kind)
except Exception:
pass
return handle
def codes_new_from_samples(samplename, product_kind=CODES_PRODUCT_GRIB):
# type: (str, int) -> cffi.FFI.CData
# work around an ecCodes bug on Windows, hopefully this will go away soon
handle = portable_handle_new_from_samples(samplename, product_kind)
if handle != ffi.NULL:
return handle
# end of work-around
if product_kind == CODES_PRODUCT_GRIB:
handle = lib.codes_grib_handle_new_from_samples(ffi.NULL, samplename.encode(ENC))
elif product_kind == CODES_PRODUCT_BUFR:
handle = lib.codes_bufr_handle_new_from_samples(ffi.NULL, samplename.encode(ENC))
else:
raise NotImplementedError("product kind not supported: %r" % product_kind)
if handle == ffi.NULL:
raise ValueError("sample not found: %r" % samplename)
return handle
def codes_set_long(handle, key, value):
# type: (cffi.FFI.CData, str, int) -> None
codes_set_long = check_return(lib.codes_set_long)
codes_set_long(handle, key.encode(ENC), value)
def codes_set_double(handle, key, value):
# type: (cffi.FFI.CData, str, float) -> None
codes_set_double = check_return(lib.codes_set_double)
codes_set_double(handle, key.encode(ENC), value)
def codes_set_string(handle, key, value):
# type: (cffi.FFI.CData, str, str) -> None
size = ffi.new('size_t *', len(value))
codes_set_string = check_return(lib.codes_set_string)
codes_set_string(handle, key.encode(ENC), value.encode(ENC), size)
def codes_set(handle, key, value):
""""""
if isinstance(value, int):
codes_set_long(handle, key, value)
elif isinstance(value, float):
codes_set_double(handle, key, value)
elif isinstance(value, str):
codes_set_string(handle, key, value)
else:
raise TypeError("Unsupported type %r" % type(value))
def codes_set_double_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[float]) -> None
size = len(values)
c_values = ffi.new("double []", values)
codes_set_double_array = check_return(lib.codes_set_double_array)
codes_set_double_array(handle, key.encode(ENC), c_values, size)
def codes_set_long_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[int]) -> None
size = len(values)
c_values = ffi.new("long []", values)
codes_set_long_array = check_return(lib.codes_set_long_array)
codes_set_long_array(handle, key.encode(ENC), c_values, size)
def codes_set_array(handle, key, values):
# type: (cffi.FFI.CData, str, T.List[T.Any]) -> None
if len(values) > 0:
if isinstance(values[0], float):
codes_set_double_array(handle, key, values)
elif isinstance(values[0], int):
codes_set_long_array(handle, key, values)
else:
raise TypeError("Unsupported value type: %r" % type(values[0]))
else:
raise ValueError("Cannot set an empty list.")
def codes_grib_multi_support_on(context=None):
if context is None:
context = ffi.NULL
lib.codes_grib_multi_support_on(context)
def codes_grib_multi_support_off(context=None):
if context is None:
context = ffi.NULL
lib.codes_grib_multi_support_off(context)
|
hyperledger-archives/indy-anoncreds
|
anoncreds/protocol/utils.py
|
get_hash_as_int
|
python
|
def get_hash_as_int(*args, group: cmod.PairingGroup = None):
group = group if group else cmod.PairingGroup(PAIRING_GROUP)
h_challenge = sha256()
serialedArgs = [group.serialize(arg) if isGroupElement(arg)
else cmod.Conversion.IP2OS(arg)
for arg in args]
for arg in sorted(serialedArgs):
h_challenge.update(arg)
return bytes_to_int(h_challenge.digest())
|
Enumerate over the input tuple and generate a hash using the tuple values
:param args: sequence of either group or integer elements
:param group: pairing group if an element is a group element
:return:
|
train
|
https://github.com/hyperledger-archives/indy-anoncreds/blob/9d9cda3d505c312257d99a13d74d8f05dac3091a/anoncreds/protocol/utils.py#L29-L47
|
[
"def bytes_to_int(bytesHash):\n return int.from_bytes(bytesHash, byteorder=byteorder)\n"
] |
import logging
import string
import time
from collections import OrderedDict
from enum import Enum
from hashlib import sha256
from math import sqrt, floor
from random import randint, sample
from sys import byteorder
from typing import Dict, List, Set
import base58
from anoncreds.protocol.globals import KEYS, PK_R
from anoncreds.protocol.globals import LARGE_PRIME, LARGE_MASTER_SECRET, \
LARGE_VPRIME, PAIRING_GROUP
from config.config import cmod
import sys
def encodeAttr(attrValue):
return cmod.Conversion.bytes2integer(sha256(str(attrValue).encode()).digest())
def randomQR(n):
return cmod.random(n) ** 2
CRYPTO_INT_PREFIX = 'CryptoInt_'
INT_PREFIX = 'Int_'
GROUP_PREFIX = 'Group_'
BYTES_PREFIX = 'Bytes_'
def serializeToStr(n):
if isCryptoInteger(n):
return CRYPTO_INT_PREFIX + cmod.serialize(n).decode()
if isInteger(n):
return INT_PREFIX + str(n)
if isGroupElement(n):
return GROUP_PREFIX + cmod.PairingGroup(PAIRING_GROUP).serialize(
n).decode()
return n
def deserializeFromStr(n: str):
if isStr(n) and n.startswith(CRYPTO_INT_PREFIX):
n = n[len(CRYPTO_INT_PREFIX):].encode()
return cmod.deserialize(n)
if isStr(n) and n.startswith(INT_PREFIX):
n = n[len(INT_PREFIX):]
return int(n)
if isStr(n) and n.startswith(GROUP_PREFIX):
n = n[len(GROUP_PREFIX):].encode()
res = cmod.PairingGroup(PAIRING_GROUP).deserialize(n)
# A fix for Identity element as serialized/deserialized not correctly
if str(res) == '[0, 0]':
return groupIdentityG1()
return res
return n
def isCryptoInteger(n):
return isinstance(n, cmod.integer)
def isGroupElement(n):
return isinstance(n, cmod.pc_element)
def isInteger(n):
return isinstance(n, int)
def isStr(n):
return isinstance(n, str)
def isNamedTuple(n):
return isinstance(n, tuple) # TODO: assume it's a named tuple
def toDictWithStrValues(d):
if isNamedTuple(d):
return toDictWithStrValues(d._asdict())
if not isinstance(d, Dict):
return serializeToStr(d)
result = OrderedDict()
for key, value in d.items():
if isinstance(value, Dict):
result[serializeToStr(key)] = toDictWithStrValues(value)
elif isinstance(value, str):
result[serializeToStr(key)] = serializeToStr(value)
elif isNamedTuple(value):
result[serializeToStr(key)] = toDictWithStrValues(value._asdict())
elif isinstance(value, Set):
result[serializeToStr(key)] = {toDictWithStrValues(v) for v in
value}
elif isinstance(value, List):
result[serializeToStr(key)] = [toDictWithStrValues(v) for v in
value]
elif value:
result[serializeToStr(key)] = serializeToStr(value)
return result
def fromDictWithStrValues(d):
if not isinstance(d, Dict) and not isinstance(d, tuple):
return deserializeFromStr(d)
result = OrderedDict()
for key, value in d.items():
if isinstance(value, Dict):
result[deserializeFromStr(key)] = fromDictWithStrValues(value)
elif isinstance(value, str):
result[deserializeFromStr(key)] = deserializeFromStr(value)
elif isinstance(value, Set):
result[deserializeFromStr(key)] = {fromDictWithStrValues(v) for v in
value}
elif isinstance(value, List):
result[deserializeFromStr(key)] = [fromDictWithStrValues(v) for v in
value]
elif value:
result[deserializeFromStr(key)] = deserializeFromStr(value)
return result
def bytes_to_int(bytesHash):
return int.from_bytes(bytesHash, byteorder=byteorder)
def int_to_ZR(intHash, group):
return group.init(cmod.ZR, intHash)
def groupIdentityG1():
return cmod.PairingGroup(PAIRING_GROUP).init(cmod.G1, 0)
def groupIdentityG2():
return cmod.PairingGroup(PAIRING_GROUP).init(cmod.G2, 0)
def get_values_of_dicts(*args):
_l = list()
for d in args:
_l.extend(list(d.values()))
return _l
def get_prime_in_range(start, end):
n = 0
maxIter = 100000
while n < maxIter:
r = randint(start, end)
if cmod.isPrime(r):
logging.debug("Found prime in {} iterations".format(n))
return r
n += 1
raise Exception("Cannot find prime in {} iterations".format(maxIter))
def splitRevealedAttrs(encodedAttrs, revealedAttrs):
# Revealed attributes
Ar = {}
# Unrevealed attributes
Aur = {}
for k, value in encodedAttrs.items():
if k in revealedAttrs:
Ar[k] = value.encoded
else:
Aur[k] = value.encoded
return Ar, Aur
def randomString(size: int = 20,
chars: str = string.ascii_letters + string.digits) -> str:
"""
Generate a random string of the specified size.
Ensure that the size is less than the length of chars as this function uses random.choice
which uses random sampling without replacement.
:param size: size of the random string to generate
:param chars: the set of characters to use to generate the random string. Uses alphanumerics by default.
:return: the random string generated
"""
return ''.join(sample(chars, size))
def getUnrevealedAttrs(encodedAttrs, revealedAttrsList):
revealedAttrs, unrevealedAttrs = splitRevealedAttrs(encodedAttrs,
revealedAttrsList)
return unrevealedAttrs
def flattenDict(attrs):
return {x: y for z in attrs.values()
for x, y in z.items()}
def largestSquareLessThan(x: int):
sqrtx = int(floor(sqrt(x)))
return sqrtx
def fourSquares(delta: int):
u1 = largestSquareLessThan(delta)
u2 = largestSquareLessThan(delta - (u1 ** 2))
u3 = largestSquareLessThan(delta - (u1 ** 2) - (u2 ** 2))
u4 = largestSquareLessThan(delta - (u1 ** 2) - (u2 ** 2) - (u3 ** 2))
if (u1 ** 2) + (u2 ** 2) + (u3 ** 2) + (u4 ** 2) == delta:
return {'0': u1, '1': u2, '2': u3, '3': u4}
else:
raise Exception(
"Cannot get the four squares for delta {0}".format(delta))
def strToCryptoInteger(n):
if "mod" in n:
a, b = n.split("mod")
return cmod.integer(int(a.strip())) % cmod.integer(int(b.strip()))
else:
return cmod.integer(int(n))
def to_crypto_int(a, b=None):
return strToCryptoInteger(a + 'mod' + b) if b else strToCryptoInteger(a)
def crypto_int_to_str(n):
return cmod.toInt(n)
def strToInt(s):
return bytes_to_int(sha256(s.encode()).digest())
def genPrime():
"""
Generate 2 large primes `p_prime` and `q_prime` and use them
to generate another 2 primes `p` and `q` of 1024 bits
"""
prime = cmod.randomPrime(LARGE_PRIME)
i = 0
while not cmod.isPrime(2 * prime + 1):
prime = cmod.randomPrime(LARGE_PRIME)
i += 1
return prime
def base58encode(i):
return base58.b58encode(str(i).encode())
def base58decode(i):
return base58.b58decode(str(i)).decode()
def base58decodedInt(i):
try:
return int(base58.b58decode(str(i)).decode())
except Exception as ex:
raise AttributeError from ex
class SerFmt(Enum):
default = 1
py3Int = 2
base58 = 3
SerFuncs = {
SerFmt.py3Int: int,
SerFmt.default: cmod.integer,
SerFmt.base58: base58encode,
}
def serialize(data, serFmt):
serfunc = SerFuncs[serFmt]
if KEYS in data:
for k, v in data[KEYS].items():
if isinstance(v, cmod.integer):
# int casting works with Python 3 only.
# for Python 2, charm's serialization api must be used.
data[KEYS][k] = serfunc(v)
if k == PK_R:
data[KEYS][k] = {key: serfunc(val) for key, val in v.items()}
return data
def generateMasterSecret():
# Generate the master secret
return cmod.integer(
cmod.randomBits(LARGE_MASTER_SECRET))
def generateVPrime():
return cmod.randomBits(LARGE_VPRIME)
def shorten(s, size=None):
size = size or 10
if isinstance(s, str):
if len(s) <= size:
return s
else:
head = int((size - 2) * 5 / 8)
tail = int(size) - 2 - head
return s[:head] + '..' + s[-tail:]
else: # assume it's an iterable
return [shorten(x, size) for x in iter(s)]
def shortenMod(s, size=None):
return ' mod '.join(shorten(str(s).split(' mod '), size))
def shortenDictVals(d, size=None):
r = {}
for k, v in d.items():
if isinstance(v, dict):
r[k] = shortenDictVals(v, size)
else:
r[k] = shortenMod(v, size)
return r
def currentTimestampMillisec():
return int(time.time() * 1000) # millisec
def intToArrayBytes(value):
value = int(value)
result = []
for i in range(0, sys.getsizeof(value)):
b = value >> (i * 8) & 0xff
result.append(b)
result.reverse()
first_non_zero = next((i for i, x in enumerate(result) if x), None)
result = result[first_non_zero::]
return result
def bytesToInt(bytes):
result = 0
for b in bytes:
result = result * 256 + int(b)
return result
|
hyperledger-archives/indy-anoncreds
|
anoncreds/protocol/utils.py
|
randomString
|
python
|
def randomString(size: int = 20,
chars: str = string.ascii_letters + string.digits) -> str:
return ''.join(sample(chars, size))
|
Generate a random string of the specified size.
Ensure that the size is less than the length of chars as this function uses random.choice
which uses random sampling without replacement.
:param size: size of the random string to generate
:param chars: the set of characters to use to generate the random string. Uses alphanumerics by default.
:return: the random string generated
|
train
|
https://github.com/hyperledger-archives/indy-anoncreds/blob/9d9cda3d505c312257d99a13d74d8f05dac3091a/anoncreds/protocol/utils.py#L200-L213
| null |
import logging
import string
import time
from collections import OrderedDict
from enum import Enum
from hashlib import sha256
from math import sqrt, floor
from random import randint, sample
from sys import byteorder
from typing import Dict, List, Set
import base58
from anoncreds.protocol.globals import KEYS, PK_R
from anoncreds.protocol.globals import LARGE_PRIME, LARGE_MASTER_SECRET, \
LARGE_VPRIME, PAIRING_GROUP
from config.config import cmod
import sys
def encodeAttr(attrValue):
return cmod.Conversion.bytes2integer(sha256(str(attrValue).encode()).digest())
def randomQR(n):
return cmod.random(n) ** 2
def get_hash_as_int(*args, group: cmod.PairingGroup = None):
"""
Enumerate over the input tuple and generate a hash using the tuple values
:param args: sequence of either group or integer elements
:param group: pairing group if an element is a group element
:return:
"""
group = group if group else cmod.PairingGroup(PAIRING_GROUP)
h_challenge = sha256()
serialedArgs = [group.serialize(arg) if isGroupElement(arg)
else cmod.Conversion.IP2OS(arg)
for arg in args]
for arg in sorted(serialedArgs):
h_challenge.update(arg)
return bytes_to_int(h_challenge.digest())
CRYPTO_INT_PREFIX = 'CryptoInt_'
INT_PREFIX = 'Int_'
GROUP_PREFIX = 'Group_'
BYTES_PREFIX = 'Bytes_'
def serializeToStr(n):
if isCryptoInteger(n):
return CRYPTO_INT_PREFIX + cmod.serialize(n).decode()
if isInteger(n):
return INT_PREFIX + str(n)
if isGroupElement(n):
return GROUP_PREFIX + cmod.PairingGroup(PAIRING_GROUP).serialize(
n).decode()
return n
def deserializeFromStr(n: str):
if isStr(n) and n.startswith(CRYPTO_INT_PREFIX):
n = n[len(CRYPTO_INT_PREFIX):].encode()
return cmod.deserialize(n)
if isStr(n) and n.startswith(INT_PREFIX):
n = n[len(INT_PREFIX):]
return int(n)
if isStr(n) and n.startswith(GROUP_PREFIX):
n = n[len(GROUP_PREFIX):].encode()
res = cmod.PairingGroup(PAIRING_GROUP).deserialize(n)
# A fix for Identity element as serialized/deserialized not correctly
if str(res) == '[0, 0]':
return groupIdentityG1()
return res
return n
def isCryptoInteger(n):
return isinstance(n, cmod.integer)
def isGroupElement(n):
return isinstance(n, cmod.pc_element)
def isInteger(n):
return isinstance(n, int)
def isStr(n):
return isinstance(n, str)
def isNamedTuple(n):
return isinstance(n, tuple) # TODO: assume it's a named tuple
def toDictWithStrValues(d):
if isNamedTuple(d):
return toDictWithStrValues(d._asdict())
if not isinstance(d, Dict):
return serializeToStr(d)
result = OrderedDict()
for key, value in d.items():
if isinstance(value, Dict):
result[serializeToStr(key)] = toDictWithStrValues(value)
elif isinstance(value, str):
result[serializeToStr(key)] = serializeToStr(value)
elif isNamedTuple(value):
result[serializeToStr(key)] = toDictWithStrValues(value._asdict())
elif isinstance(value, Set):
result[serializeToStr(key)] = {toDictWithStrValues(v) for v in
value}
elif isinstance(value, List):
result[serializeToStr(key)] = [toDictWithStrValues(v) for v in
value]
elif value:
result[serializeToStr(key)] = serializeToStr(value)
return result
def fromDictWithStrValues(d):
if not isinstance(d, Dict) and not isinstance(d, tuple):
return deserializeFromStr(d)
result = OrderedDict()
for key, value in d.items():
if isinstance(value, Dict):
result[deserializeFromStr(key)] = fromDictWithStrValues(value)
elif isinstance(value, str):
result[deserializeFromStr(key)] = deserializeFromStr(value)
elif isinstance(value, Set):
result[deserializeFromStr(key)] = {fromDictWithStrValues(v) for v in
value}
elif isinstance(value, List):
result[deserializeFromStr(key)] = [fromDictWithStrValues(v) for v in
value]
elif value:
result[deserializeFromStr(key)] = deserializeFromStr(value)
return result
def bytes_to_int(bytesHash):
return int.from_bytes(bytesHash, byteorder=byteorder)
def int_to_ZR(intHash, group):
return group.init(cmod.ZR, intHash)
def groupIdentityG1():
return cmod.PairingGroup(PAIRING_GROUP).init(cmod.G1, 0)
def groupIdentityG2():
return cmod.PairingGroup(PAIRING_GROUP).init(cmod.G2, 0)
def get_values_of_dicts(*args):
_l = list()
for d in args:
_l.extend(list(d.values()))
return _l
def get_prime_in_range(start, end):
n = 0
maxIter = 100000
while n < maxIter:
r = randint(start, end)
if cmod.isPrime(r):
logging.debug("Found prime in {} iterations".format(n))
return r
n += 1
raise Exception("Cannot find prime in {} iterations".format(maxIter))
def splitRevealedAttrs(encodedAttrs, revealedAttrs):
# Revealed attributes
Ar = {}
# Unrevealed attributes
Aur = {}
for k, value in encodedAttrs.items():
if k in revealedAttrs:
Ar[k] = value.encoded
else:
Aur[k] = value.encoded
return Ar, Aur
def getUnrevealedAttrs(encodedAttrs, revealedAttrsList):
revealedAttrs, unrevealedAttrs = splitRevealedAttrs(encodedAttrs,
revealedAttrsList)
return unrevealedAttrs
def flattenDict(attrs):
return {x: y for z in attrs.values()
for x, y in z.items()}
def largestSquareLessThan(x: int):
sqrtx = int(floor(sqrt(x)))
return sqrtx
def fourSquares(delta: int):
u1 = largestSquareLessThan(delta)
u2 = largestSquareLessThan(delta - (u1 ** 2))
u3 = largestSquareLessThan(delta - (u1 ** 2) - (u2 ** 2))
u4 = largestSquareLessThan(delta - (u1 ** 2) - (u2 ** 2) - (u3 ** 2))
if (u1 ** 2) + (u2 ** 2) + (u3 ** 2) + (u4 ** 2) == delta:
return {'0': u1, '1': u2, '2': u3, '3': u4}
else:
raise Exception(
"Cannot get the four squares for delta {0}".format(delta))
def strToCryptoInteger(n):
if "mod" in n:
a, b = n.split("mod")
return cmod.integer(int(a.strip())) % cmod.integer(int(b.strip()))
else:
return cmod.integer(int(n))
def to_crypto_int(a, b=None):
return strToCryptoInteger(a + 'mod' + b) if b else strToCryptoInteger(a)
def crypto_int_to_str(n):
return cmod.toInt(n)
def strToInt(s):
return bytes_to_int(sha256(s.encode()).digest())
def genPrime():
"""
Generate 2 large primes `p_prime` and `q_prime` and use them
to generate another 2 primes `p` and `q` of 1024 bits
"""
prime = cmod.randomPrime(LARGE_PRIME)
i = 0
while not cmod.isPrime(2 * prime + 1):
prime = cmod.randomPrime(LARGE_PRIME)
i += 1
return prime
def base58encode(i):
return base58.b58encode(str(i).encode())
def base58decode(i):
return base58.b58decode(str(i)).decode()
def base58decodedInt(i):
try:
return int(base58.b58decode(str(i)).decode())
except Exception as ex:
raise AttributeError from ex
class SerFmt(Enum):
default = 1
py3Int = 2
base58 = 3
SerFuncs = {
SerFmt.py3Int: int,
SerFmt.default: cmod.integer,
SerFmt.base58: base58encode,
}
def serialize(data, serFmt):
serfunc = SerFuncs[serFmt]
if KEYS in data:
for k, v in data[KEYS].items():
if isinstance(v, cmod.integer):
# int casting works with Python 3 only.
# for Python 2, charm's serialization api must be used.
data[KEYS][k] = serfunc(v)
if k == PK_R:
data[KEYS][k] = {key: serfunc(val) for key, val in v.items()}
return data
def generateMasterSecret():
# Generate the master secret
return cmod.integer(
cmod.randomBits(LARGE_MASTER_SECRET))
def generateVPrime():
return cmod.randomBits(LARGE_VPRIME)
def shorten(s, size=None):
size = size or 10
if isinstance(s, str):
if len(s) <= size:
return s
else:
head = int((size - 2) * 5 / 8)
tail = int(size) - 2 - head
return s[:head] + '..' + s[-tail:]
else: # assume it's an iterable
return [shorten(x, size) for x in iter(s)]
def shortenMod(s, size=None):
return ' mod '.join(shorten(str(s).split(' mod '), size))
def shortenDictVals(d, size=None):
r = {}
for k, v in d.items():
if isinstance(v, dict):
r[k] = shortenDictVals(v, size)
else:
r[k] = shortenMod(v, size)
return r
def currentTimestampMillisec():
return int(time.time() * 1000) # millisec
def intToArrayBytes(value):
value = int(value)
result = []
for i in range(0, sys.getsizeof(value)):
b = value >> (i * 8) & 0xff
result.append(b)
result.reverse()
first_non_zero = next((i for i, x in enumerate(result) if x), None)
result = result[first_non_zero::]
return result
def bytesToInt(bytes):
result = 0
for b in bytes:
result = result * 256 + int(b)
return result
|
hyperledger-archives/indy-anoncreds
|
anoncreds/protocol/utils.py
|
genPrime
|
python
|
def genPrime():
prime = cmod.randomPrime(LARGE_PRIME)
i = 0
while not cmod.isPrime(2 * prime + 1):
prime = cmod.randomPrime(LARGE_PRIME)
i += 1
return prime
|
Generate 2 large primes `p_prime` and `q_prime` and use them
to generate another 2 primes `p` and `q` of 1024 bits
|
train
|
https://github.com/hyperledger-archives/indy-anoncreds/blob/9d9cda3d505c312257d99a13d74d8f05dac3091a/anoncreds/protocol/utils.py#L264-L274
| null |
import logging
import string
import time
from collections import OrderedDict
from enum import Enum
from hashlib import sha256
from math import sqrt, floor
from random import randint, sample
from sys import byteorder
from typing import Dict, List, Set
import base58
from anoncreds.protocol.globals import KEYS, PK_R
from anoncreds.protocol.globals import LARGE_PRIME, LARGE_MASTER_SECRET, \
LARGE_VPRIME, PAIRING_GROUP
from config.config import cmod
import sys
def encodeAttr(attrValue):
return cmod.Conversion.bytes2integer(sha256(str(attrValue).encode()).digest())
def randomQR(n):
return cmod.random(n) ** 2
def get_hash_as_int(*args, group: cmod.PairingGroup = None):
"""
Enumerate over the input tuple and generate a hash using the tuple values
:param args: sequence of either group or integer elements
:param group: pairing group if an element is a group element
:return:
"""
group = group if group else cmod.PairingGroup(PAIRING_GROUP)
h_challenge = sha256()
serialedArgs = [group.serialize(arg) if isGroupElement(arg)
else cmod.Conversion.IP2OS(arg)
for arg in args]
for arg in sorted(serialedArgs):
h_challenge.update(arg)
return bytes_to_int(h_challenge.digest())
CRYPTO_INT_PREFIX = 'CryptoInt_'
INT_PREFIX = 'Int_'
GROUP_PREFIX = 'Group_'
BYTES_PREFIX = 'Bytes_'
def serializeToStr(n):
if isCryptoInteger(n):
return CRYPTO_INT_PREFIX + cmod.serialize(n).decode()
if isInteger(n):
return INT_PREFIX + str(n)
if isGroupElement(n):
return GROUP_PREFIX + cmod.PairingGroup(PAIRING_GROUP).serialize(
n).decode()
return n
def deserializeFromStr(n: str):
if isStr(n) and n.startswith(CRYPTO_INT_PREFIX):
n = n[len(CRYPTO_INT_PREFIX):].encode()
return cmod.deserialize(n)
if isStr(n) and n.startswith(INT_PREFIX):
n = n[len(INT_PREFIX):]
return int(n)
if isStr(n) and n.startswith(GROUP_PREFIX):
n = n[len(GROUP_PREFIX):].encode()
res = cmod.PairingGroup(PAIRING_GROUP).deserialize(n)
# A fix for Identity element as serialized/deserialized not correctly
if str(res) == '[0, 0]':
return groupIdentityG1()
return res
return n
def isCryptoInteger(n):
return isinstance(n, cmod.integer)
def isGroupElement(n):
return isinstance(n, cmod.pc_element)
def isInteger(n):
return isinstance(n, int)
def isStr(n):
return isinstance(n, str)
def isNamedTuple(n):
return isinstance(n, tuple) # TODO: assume it's a named tuple
def toDictWithStrValues(d):
if isNamedTuple(d):
return toDictWithStrValues(d._asdict())
if not isinstance(d, Dict):
return serializeToStr(d)
result = OrderedDict()
for key, value in d.items():
if isinstance(value, Dict):
result[serializeToStr(key)] = toDictWithStrValues(value)
elif isinstance(value, str):
result[serializeToStr(key)] = serializeToStr(value)
elif isNamedTuple(value):
result[serializeToStr(key)] = toDictWithStrValues(value._asdict())
elif isinstance(value, Set):
result[serializeToStr(key)] = {toDictWithStrValues(v) for v in
value}
elif isinstance(value, List):
result[serializeToStr(key)] = [toDictWithStrValues(v) for v in
value]
elif value:
result[serializeToStr(key)] = serializeToStr(value)
return result
def fromDictWithStrValues(d):
if not isinstance(d, Dict) and not isinstance(d, tuple):
return deserializeFromStr(d)
result = OrderedDict()
for key, value in d.items():
if isinstance(value, Dict):
result[deserializeFromStr(key)] = fromDictWithStrValues(value)
elif isinstance(value, str):
result[deserializeFromStr(key)] = deserializeFromStr(value)
elif isinstance(value, Set):
result[deserializeFromStr(key)] = {fromDictWithStrValues(v) for v in
value}
elif isinstance(value, List):
result[deserializeFromStr(key)] = [fromDictWithStrValues(v) for v in
value]
elif value:
result[deserializeFromStr(key)] = deserializeFromStr(value)
return result
def bytes_to_int(bytesHash):
return int.from_bytes(bytesHash, byteorder=byteorder)
def int_to_ZR(intHash, group):
return group.init(cmod.ZR, intHash)
def groupIdentityG1():
return cmod.PairingGroup(PAIRING_GROUP).init(cmod.G1, 0)
def groupIdentityG2():
return cmod.PairingGroup(PAIRING_GROUP).init(cmod.G2, 0)
def get_values_of_dicts(*args):
_l = list()
for d in args:
_l.extend(list(d.values()))
return _l
def get_prime_in_range(start, end):
n = 0
maxIter = 100000
while n < maxIter:
r = randint(start, end)
if cmod.isPrime(r):
logging.debug("Found prime in {} iterations".format(n))
return r
n += 1
raise Exception("Cannot find prime in {} iterations".format(maxIter))
def splitRevealedAttrs(encodedAttrs, revealedAttrs):
# Revealed attributes
Ar = {}
# Unrevealed attributes
Aur = {}
for k, value in encodedAttrs.items():
if k in revealedAttrs:
Ar[k] = value.encoded
else:
Aur[k] = value.encoded
return Ar, Aur
def randomString(size: int = 20,
chars: str = string.ascii_letters + string.digits) -> str:
"""
Generate a random string of the specified size.
Ensure that the size is less than the length of chars as this function uses random.choice
which uses random sampling without replacement.
:param size: size of the random string to generate
:param chars: the set of characters to use to generate the random string. Uses alphanumerics by default.
:return: the random string generated
"""
return ''.join(sample(chars, size))
def getUnrevealedAttrs(encodedAttrs, revealedAttrsList):
revealedAttrs, unrevealedAttrs = splitRevealedAttrs(encodedAttrs,
revealedAttrsList)
return unrevealedAttrs
def flattenDict(attrs):
return {x: y for z in attrs.values()
for x, y in z.items()}
def largestSquareLessThan(x: int):
sqrtx = int(floor(sqrt(x)))
return sqrtx
def fourSquares(delta: int):
u1 = largestSquareLessThan(delta)
u2 = largestSquareLessThan(delta - (u1 ** 2))
u3 = largestSquareLessThan(delta - (u1 ** 2) - (u2 ** 2))
u4 = largestSquareLessThan(delta - (u1 ** 2) - (u2 ** 2) - (u3 ** 2))
if (u1 ** 2) + (u2 ** 2) + (u3 ** 2) + (u4 ** 2) == delta:
return {'0': u1, '1': u2, '2': u3, '3': u4}
else:
raise Exception(
"Cannot get the four squares for delta {0}".format(delta))
def strToCryptoInteger(n):
if "mod" in n:
a, b = n.split("mod")
return cmod.integer(int(a.strip())) % cmod.integer(int(b.strip()))
else:
return cmod.integer(int(n))
def to_crypto_int(a, b=None):
return strToCryptoInteger(a + 'mod' + b) if b else strToCryptoInteger(a)
def crypto_int_to_str(n):
return cmod.toInt(n)
def strToInt(s):
return bytes_to_int(sha256(s.encode()).digest())
def base58encode(i):
return base58.b58encode(str(i).encode())
def base58decode(i):
return base58.b58decode(str(i)).decode()
def base58decodedInt(i):
try:
return int(base58.b58decode(str(i)).decode())
except Exception as ex:
raise AttributeError from ex
class SerFmt(Enum):
default = 1
py3Int = 2
base58 = 3
SerFuncs = {
SerFmt.py3Int: int,
SerFmt.default: cmod.integer,
SerFmt.base58: base58encode,
}
def serialize(data, serFmt):
serfunc = SerFuncs[serFmt]
if KEYS in data:
for k, v in data[KEYS].items():
if isinstance(v, cmod.integer):
# int casting works with Python 3 only.
# for Python 2, charm's serialization api must be used.
data[KEYS][k] = serfunc(v)
if k == PK_R:
data[KEYS][k] = {key: serfunc(val) for key, val in v.items()}
return data
def generateMasterSecret():
# Generate the master secret
return cmod.integer(
cmod.randomBits(LARGE_MASTER_SECRET))
def generateVPrime():
return cmod.randomBits(LARGE_VPRIME)
def shorten(s, size=None):
size = size or 10
if isinstance(s, str):
if len(s) <= size:
return s
else:
head = int((size - 2) * 5 / 8)
tail = int(size) - 2 - head
return s[:head] + '..' + s[-tail:]
else: # assume it's an iterable
return [shorten(x, size) for x in iter(s)]
def shortenMod(s, size=None):
return ' mod '.join(shorten(str(s).split(' mod '), size))
def shortenDictVals(d, size=None):
r = {}
for k, v in d.items():
if isinstance(v, dict):
r[k] = shortenDictVals(v, size)
else:
r[k] = shortenMod(v, size)
return r
def currentTimestampMillisec():
return int(time.time() * 1000) # millisec
def intToArrayBytes(value):
value = int(value)
result = []
for i in range(0, sys.getsizeof(value)):
b = value >> (i * 8) & 0xff
result.append(b)
result.reverse()
first_non_zero = next((i for i, x in enumerate(result) if x), None)
result = result[first_non_zero::]
return result
def bytesToInt(bytes):
result = 0
for b in bytes:
result = result * 256 + int(b)
return result
|
hyperledger-archives/indy-anoncreds
|
anoncreds/protocol/types.py
|
Attribs.encoded
|
python
|
def encoded(self):
encoded = {}
for i in range(len(self.credType.names)):
self.credType.names[i]
attr_types = self.credType.attrTypes[i]
for at in attr_types:
attrName = at.name
if attrName in self._vals:
if at.encode:
encoded[attrName] = encodeAttr(self._vals[attrName])
else:
encoded[attrName] = self._vals[at.name]
return encoded
|
This function will encode all the attributes to 256 bit integers
:return:
|
train
|
https://github.com/hyperledger-archives/indy-anoncreds/blob/9d9cda3d505c312257d99a13d74d8f05dac3091a/anoncreds/protocol/types.py#L77-L96
|
[
"def encodeAttr(attrValue):\n return cmod.Conversion.bytes2integer(sha256(str(attrValue).encode()).digest())\n"
] |
class Attribs:
def __init__(self, credType: AttribDef = None, **vals):
self.credType = credType if credType else AttribDef([], [])
self._vals = vals
def __add__(self, other):
vals = self._vals.copy()
vals.update(other._vals)
return Attribs(self.credType + other.credType, **vals)
def __iter__(self):
return self._vals.__iter__()
def __getitem__(self, key):
return self._vals[key]
def keys(self):
return self._vals.keys()
def values(self):
return self._vals.values()
def items(self):
return self._vals.items()
def __repr__(self):
return str(self.__dict__)
def __eq__(self, y):
return self.credType == y.credType \
and self._vals == y._vals
|
hyperledger-archives/indy-anoncreds
|
anoncreds/protocol/issuer.py
|
Issuer.genSchema
|
python
|
async def genSchema(self, name, version, attrNames) -> Schema:
schema = Schema(name, version, attrNames, self.issuerId)
return await self.wallet.submitSchema(schema)
|
Generates and submits Schema.
:param name: schema name
:param version: schema version
:param attrNames: a list of attributes the schema contains
:return: submitted Schema
|
train
|
https://github.com/hyperledger-archives/indy-anoncreds/blob/9d9cda3d505c312257d99a13d74d8f05dac3091a/anoncreds/protocol/issuer.py#L34-L44
|
[
"async def submitSchema(self,\n schema: Schema) -> Schema:\n schema = await self._repo.submitSchema(schema)\n if schema:\n self._cacheSchema(schema)\n return schema\n"
] |
class Issuer:
def __init__(self, wallet: IssuerWallet, attrRepo: AttributeRepo):
self.wallet = wallet
self._attrRepo = attrRepo
self._primaryIssuer = PrimaryClaimIssuer(wallet)
self._nonRevocationIssuer = NonRevocationClaimIssuer(wallet)
#
# PUBLIC
#
@property
def issuerId(self):
return self.wallet.walletId
def isSchemaExists(self, schemaKey):
return self.wallet._schemasByKey.get(schemaKey)
async def genKeys(self, schemaId: ID, p_prime=None, q_prime=None) -> (
PublicKey, RevocationPublicKey):
"""
Generates and submits keys (both public and secret, primary and
non-revocation).
:param schemaId: The schema ID (reference to claim
definition schema)
:param p_prime: optional p_prime parameter
:param q_prime: optional q_prime parameter
:return: Submitted Public keys (both primary and non-revocation)
"""
pk, sk = await self._primaryIssuer.genKeys(schemaId, p_prime, q_prime)
pkR, skR = await self._nonRevocationIssuer.genRevocationKeys()
pk = await self.wallet.submitPublicKeys(schemaId=schemaId, pk=pk,
pkR=pkR)
pkR = await self.wallet.submitSecretKeys(schemaId=schemaId, sk=sk,
skR=skR)
return pk, pkR
async def issueAccumulator(self, schemaId: ID, iA,
L) -> AccumulatorPublicKey:
"""
Issues and submits an accumulator used for non-revocation proof.
:param schemaId: The schema ID (reference to claim
definition schema)
:param iA: accumulator ID
:param L: maximum number of claims within accumulator.
:return: Submitted accumulator public key
"""
accum, tails, accPK, accSK = await self._nonRevocationIssuer.issueAccumulator(
schemaId, iA, L)
accPK = await self.wallet.submitAccumPublic(schemaId=schemaId,
accumPK=accPK,
accum=accum, tails=tails)
await self.wallet.submitAccumSecret(schemaId=schemaId,
accumSK=accSK)
return accPK
async def revoke(self, schemaId: ID, i):
"""
Performs revocation of a Claim.
:param schemaId: The schema ID (reference to claim
definition schema)
:param i: claim's sequence number within accumulator
"""
acc, ts = await self._nonRevocationIssuer.revoke(schemaId, i)
await self.wallet.submitAccumUpdate(schemaId=schemaId, accum=acc,
timestampMs=ts)
async def issueClaim(self, schemaId: ID, claimRequest: ClaimRequest,
iA=None,
i=None) -> (Claims, Dict[str, ClaimAttributeValues]):
"""
Issue a claim for the given user and schema.
:param schemaId: The schema ID (reference to claim
definition schema)
:param claimRequest: A claim request containing prover ID and
prover-generated values
:param iA: accumulator ID
:param i: claim's sequence number within accumulator
:return: The claim (both primary and non-revocation)
"""
schemaKey = (await self.wallet.getSchema(schemaId)).getKey()
attributes = self._attrRepo.getAttributes(schemaKey,
claimRequest.userId)
# TODO re-enable when revocation registry is implemented
# iA = iA if iA else (await self.wallet.getAccumulator(schemaId)).iA
# TODO this has un-obvious side-effects
await self._genContxt(schemaId, iA, claimRequest.userId)
(c1, claim) = await self._issuePrimaryClaim(schemaId, attributes,
claimRequest.U)
# TODO re-enable when revocation registry is fully implemented
c2 = await self._issueNonRevocationClaim(schemaId, claimRequest.Ur,
iA,
i) if claimRequest.Ur else None
signature = Claims(primaryClaim=c1, nonRevocClaim=c2)
return (signature, claim)
async def issueClaims(self, allClaimRequest: Dict[ID, ClaimRequest]) -> \
Dict[ID, Claims]:
"""
Issue claims for the given users and schemas.
:param allClaimRequest: a map of schema ID to a claim
request containing prover ID and prover-generated values
:return: The claims (both primary and non-revocation)
"""
res = {}
for schemaId, claimReq in allClaimRequest.items():
res[schemaId] = await self.issueClaim(schemaId, claimReq)
return res
#
# PRIVATE
#
async def _genContxt(self, schemaId: ID, iA, userId):
iA = strToInt(str(iA))
userId = strToInt(str(userId))
S = iA | userId
H = get_hash_as_int(S)
m2 = cmod.integer(H % (2 ** LARGE_MASTER_SECRET))
await self.wallet.submitContextAttr(schemaId, m2)
return m2
async def _issuePrimaryClaim(self, schemaId: ID, attributes: Attribs,
U) -> (PrimaryClaim, Dict[str, ClaimAttributeValues]):
return await self._primaryIssuer.issuePrimaryClaim(schemaId,
attributes, U)
async def _issueNonRevocationClaim(self, schemaId: ID, Ur, iA=None,
i=None) -> NonRevocationClaim:
claim, accum, ts = await self._nonRevocationIssuer.issueNonRevocationClaim(
schemaId, Ur, iA, i)
await self.wallet.submitAccumUpdate(schemaId=schemaId, accum=accum,
timestampMs=ts)
return claim
def __repr__(self):
return str(self.__dict__)
|
hyperledger-archives/indy-anoncreds
|
anoncreds/protocol/issuer.py
|
Issuer.genKeys
|
python
|
async def genKeys(self, schemaId: ID, p_prime=None, q_prime=None) -> (
PublicKey, RevocationPublicKey):
pk, sk = await self._primaryIssuer.genKeys(schemaId, p_prime, q_prime)
pkR, skR = await self._nonRevocationIssuer.genRevocationKeys()
pk = await self.wallet.submitPublicKeys(schemaId=schemaId, pk=pk,
pkR=pkR)
pkR = await self.wallet.submitSecretKeys(schemaId=schemaId, sk=sk,
skR=skR)
return pk, pkR
|
Generates and submits keys (both public and secret, primary and
non-revocation).
:param schemaId: The schema ID (reference to claim
definition schema)
:param p_prime: optional p_prime parameter
:param q_prime: optional q_prime parameter
:return: Submitted Public keys (both primary and non-revocation)
|
train
|
https://github.com/hyperledger-archives/indy-anoncreds/blob/9d9cda3d505c312257d99a13d74d8f05dac3091a/anoncreds/protocol/issuer.py#L46-L64
|
[
"async def submitPublicKeys(self, schemaId: ID, pk: PublicKey,\n pkR: RevocationPublicKey = None) -> (\n PublicKey, RevocationPublicKey):\n pk, pkR = await self._repo.submitPublicKeys(schemaId, pk, pkR)\n await self._cacheValueForId(self._pks, schemaId, pk)\n if pkR:\n await self._cacheValueForId(self._pkRs, schemaId, pkR)\n return pk, pkR\n",
"async def submitSecretKeys(self, schemaId: ID, sk: SecretKey,\n skR: RevocationSecretKey = None):\n await self._cacheValueForId(self._sks, schemaId, sk)\n if skR:\n await self._cacheValueForId(self._skRs, schemaId, skR)\n",
"async def genRevocationKeys(self) -> (\n RevocationPublicKey, RevocationSecretKey):\n group = cmod.PairingGroup(\n PAIRING_GROUP) # super singular curve, 1024 bits\n\n g = group.random(cmod.G1)\n gprime = group.random(cmod.G2)\n\n h = group.random(cmod.G1) # random element of the group G\n h0 = group.random(cmod.G1)\n h1 = group.random(cmod.G1)\n h2 = group.random(cmod.G1)\n htilde = group.random(cmod.G1)\n\n u = group.random(cmod.G2)\n hhat = group.random(cmod.G2)\n\n qr = group.order() # order q_R of the group\n\n x = group.random(cmod.ZR) # random(qr)\n sk = group.random(cmod.ZR) # random(qr)\n\n pk = g ** sk\n y = hhat ** x\n\n return (RevocationPublicKey(qr, g, gprime, h, h0, h1, h2, htilde, hhat, u, pk, y),\n RevocationSecretKey(x, sk))\n",
"async def genKeys(self, schemaId: ID, p_prime=None, q_prime=None) -> (\n PublicKey, SecretKey):\n schema = await self._wallet.getSchema(schemaId)\n if not schema.attrNames and isinstance(schema.attrNames, list):\n raise ValueError(\"List of attribute names is required to \"\n \"setup credential definition\")\n\n p_prime = p_prime if p_prime else PrimaryClaimIssuer._genPrime()\n p = 2 * p_prime + 1\n\n q_prime = q_prime if q_prime else PrimaryClaimIssuer._genPrime()\n q = 2 * q_prime + 1\n\n n = p * q\n\n # Generate a random quadratic number\n S = randomQR(n)\n\n # Generate random numbers corresponding to every attributes\n Xz = PrimaryClaimIssuer._genX(p_prime, q_prime)\n Xr = {}\n\n for name in schema.attrNames:\n Xr[str(name)] = PrimaryClaimIssuer._genX(p_prime, q_prime)\n\n # Generate `Z` as the exponentiation of the quadratic random 'S' .\n # over the random `Xz` in the group defined by modulus `n`\n Z = (S ** Xz) % n\n\n # Generate random numbers corresponding to every attributes\n R = {}\n for name in schema.attrNames:\n R[str(name)] = (S ** Xr[str(name)]) % n\n\n # Rms is a random number needed corresponding to master secret m1\n Rms = (S ** PrimaryClaimIssuer._genX(p_prime, q_prime)) % n\n\n # Rctxt is a random number needed corresponding to context attribute m2\n Rctxt = (S ** PrimaryClaimIssuer._genX(p_prime, q_prime)) % n\n\n return PublicKey(n, Rms, Rctxt, R, S, Z), SecretKey(p_prime, q_prime)\n"
] |
class Issuer:
def __init__(self, wallet: IssuerWallet, attrRepo: AttributeRepo):
self.wallet = wallet
self._attrRepo = attrRepo
self._primaryIssuer = PrimaryClaimIssuer(wallet)
self._nonRevocationIssuer = NonRevocationClaimIssuer(wallet)
#
# PUBLIC
#
@property
def issuerId(self):
return self.wallet.walletId
def isSchemaExists(self, schemaKey):
return self.wallet._schemasByKey.get(schemaKey)
async def genSchema(self, name, version, attrNames) -> Schema:
"""
Generates and submits Schema.
:param name: schema name
:param version: schema version
:param attrNames: a list of attributes the schema contains
:return: submitted Schema
"""
schema = Schema(name, version, attrNames, self.issuerId)
return await self.wallet.submitSchema(schema)
async def issueAccumulator(self, schemaId: ID, iA,
L) -> AccumulatorPublicKey:
"""
Issues and submits an accumulator used for non-revocation proof.
:param schemaId: The schema ID (reference to claim
definition schema)
:param iA: accumulator ID
:param L: maximum number of claims within accumulator.
:return: Submitted accumulator public key
"""
accum, tails, accPK, accSK = await self._nonRevocationIssuer.issueAccumulator(
schemaId, iA, L)
accPK = await self.wallet.submitAccumPublic(schemaId=schemaId,
accumPK=accPK,
accum=accum, tails=tails)
await self.wallet.submitAccumSecret(schemaId=schemaId,
accumSK=accSK)
return accPK
async def revoke(self, schemaId: ID, i):
"""
Performs revocation of a Claim.
:param schemaId: The schema ID (reference to claim
definition schema)
:param i: claim's sequence number within accumulator
"""
acc, ts = await self._nonRevocationIssuer.revoke(schemaId, i)
await self.wallet.submitAccumUpdate(schemaId=schemaId, accum=acc,
timestampMs=ts)
async def issueClaim(self, schemaId: ID, claimRequest: ClaimRequest,
iA=None,
i=None) -> (Claims, Dict[str, ClaimAttributeValues]):
"""
Issue a claim for the given user and schema.
:param schemaId: The schema ID (reference to claim
definition schema)
:param claimRequest: A claim request containing prover ID and
prover-generated values
:param iA: accumulator ID
:param i: claim's sequence number within accumulator
:return: The claim (both primary and non-revocation)
"""
schemaKey = (await self.wallet.getSchema(schemaId)).getKey()
attributes = self._attrRepo.getAttributes(schemaKey,
claimRequest.userId)
# TODO re-enable when revocation registry is implemented
# iA = iA if iA else (await self.wallet.getAccumulator(schemaId)).iA
# TODO this has un-obvious side-effects
await self._genContxt(schemaId, iA, claimRequest.userId)
(c1, claim) = await self._issuePrimaryClaim(schemaId, attributes,
claimRequest.U)
# TODO re-enable when revocation registry is fully implemented
c2 = await self._issueNonRevocationClaim(schemaId, claimRequest.Ur,
iA,
i) if claimRequest.Ur else None
signature = Claims(primaryClaim=c1, nonRevocClaim=c2)
return (signature, claim)
async def issueClaims(self, allClaimRequest: Dict[ID, ClaimRequest]) -> \
Dict[ID, Claims]:
"""
Issue claims for the given users and schemas.
:param allClaimRequest: a map of schema ID to a claim
request containing prover ID and prover-generated values
:return: The claims (both primary and non-revocation)
"""
res = {}
for schemaId, claimReq in allClaimRequest.items():
res[schemaId] = await self.issueClaim(schemaId, claimReq)
return res
#
# PRIVATE
#
async def _genContxt(self, schemaId: ID, iA, userId):
iA = strToInt(str(iA))
userId = strToInt(str(userId))
S = iA | userId
H = get_hash_as_int(S)
m2 = cmod.integer(H % (2 ** LARGE_MASTER_SECRET))
await self.wallet.submitContextAttr(schemaId, m2)
return m2
async def _issuePrimaryClaim(self, schemaId: ID, attributes: Attribs,
U) -> (PrimaryClaim, Dict[str, ClaimAttributeValues]):
return await self._primaryIssuer.issuePrimaryClaim(schemaId,
attributes, U)
async def _issueNonRevocationClaim(self, schemaId: ID, Ur, iA=None,
i=None) -> NonRevocationClaim:
claim, accum, ts = await self._nonRevocationIssuer.issueNonRevocationClaim(
schemaId, Ur, iA, i)
await self.wallet.submitAccumUpdate(schemaId=schemaId, accum=accum,
timestampMs=ts)
return claim
def __repr__(self):
return str(self.__dict__)
|
hyperledger-archives/indy-anoncreds
|
anoncreds/protocol/issuer.py
|
Issuer.issueAccumulator
|
python
|
async def issueAccumulator(self, schemaId: ID, iA,
L) -> AccumulatorPublicKey:
accum, tails, accPK, accSK = await self._nonRevocationIssuer.issueAccumulator(
schemaId, iA, L)
accPK = await self.wallet.submitAccumPublic(schemaId=schemaId,
accumPK=accPK,
accum=accum, tails=tails)
await self.wallet.submitAccumSecret(schemaId=schemaId,
accumSK=accSK)
return accPK
|
Issues and submits an accumulator used for non-revocation proof.
:param schemaId: The schema ID (reference to claim
definition schema)
:param iA: accumulator ID
:param L: maximum number of claims within accumulator.
:return: Submitted accumulator public key
|
train
|
https://github.com/hyperledger-archives/indy-anoncreds/blob/9d9cda3d505c312257d99a13d74d8f05dac3091a/anoncreds/protocol/issuer.py#L66-L84
|
[
"async def submitAccumPublic(self, schemaId: ID,\n accumPK: AccumulatorPublicKey,\n accum: Accumulator,\n tails: Tails) -> AccumulatorPublicKey:\n accumPK = await self._repo.submitAccumulator(schemaId, accumPK, accum,\n tails)\n await self._cacheValueForId(self._accums, schemaId, accum)\n await self._cacheValueForId(self._accumPks, schemaId, accumPK)\n await self._cacheValueForId(self._tails, schemaId, tails)\n return accumPK\n",
"async def submitAccumSecret(self, schemaId: ID,\n accumSK: AccumulatorSecretKey):\n await self._cacheValueForId(self._accumSks, schemaId, accumSK)\n",
"async def issueAccumulator(self, schemaId, iA, L) \\\n -> (Accumulator, Tails, AccumulatorPublicKey,\n AccumulatorSecretKey):\n pkR = await self._wallet.getPublicKeyRevocation(schemaId)\n group = cmod.PairingGroup(PAIRING_GROUP)\n gamma = group.random(cmod.ZR)\n\n tails = Tails()\n gCount = 2 * L\n for i in range(gCount):\n if i != L + 1:\n gVal = pkR.g ** (gamma ** i)\n gPrimeVal = pkR.gprime ** (gamma ** i)\n tails.addValue(i, gVal, gPrimeVal)\n z = cmod.pair(pkR.g, pkR.gprime) ** (gamma ** (L + 1))\n\n acc = 1\n V = set()\n\n accPK = AccumulatorPublicKey(z)\n accSK = AccumulatorSecretKey(gamma)\n accum = Accumulator(iA, acc, V, L)\n return accum, tails, accPK, accSK\n"
] |
class Issuer:
def __init__(self, wallet: IssuerWallet, attrRepo: AttributeRepo):
self.wallet = wallet
self._attrRepo = attrRepo
self._primaryIssuer = PrimaryClaimIssuer(wallet)
self._nonRevocationIssuer = NonRevocationClaimIssuer(wallet)
#
# PUBLIC
#
@property
def issuerId(self):
return self.wallet.walletId
def isSchemaExists(self, schemaKey):
return self.wallet._schemasByKey.get(schemaKey)
async def genSchema(self, name, version, attrNames) -> Schema:
"""
Generates and submits Schema.
:param name: schema name
:param version: schema version
:param attrNames: a list of attributes the schema contains
:return: submitted Schema
"""
schema = Schema(name, version, attrNames, self.issuerId)
return await self.wallet.submitSchema(schema)
async def genKeys(self, schemaId: ID, p_prime=None, q_prime=None) -> (
PublicKey, RevocationPublicKey):
"""
Generates and submits keys (both public and secret, primary and
non-revocation).
:param schemaId: The schema ID (reference to claim
definition schema)
:param p_prime: optional p_prime parameter
:param q_prime: optional q_prime parameter
:return: Submitted Public keys (both primary and non-revocation)
"""
pk, sk = await self._primaryIssuer.genKeys(schemaId, p_prime, q_prime)
pkR, skR = await self._nonRevocationIssuer.genRevocationKeys()
pk = await self.wallet.submitPublicKeys(schemaId=schemaId, pk=pk,
pkR=pkR)
pkR = await self.wallet.submitSecretKeys(schemaId=schemaId, sk=sk,
skR=skR)
return pk, pkR
async def revoke(self, schemaId: ID, i):
"""
Performs revocation of a Claim.
:param schemaId: The schema ID (reference to claim
definition schema)
:param i: claim's sequence number within accumulator
"""
acc, ts = await self._nonRevocationIssuer.revoke(schemaId, i)
await self.wallet.submitAccumUpdate(schemaId=schemaId, accum=acc,
timestampMs=ts)
async def issueClaim(self, schemaId: ID, claimRequest: ClaimRequest,
iA=None,
i=None) -> (Claims, Dict[str, ClaimAttributeValues]):
"""
Issue a claim for the given user and schema.
:param schemaId: The schema ID (reference to claim
definition schema)
:param claimRequest: A claim request containing prover ID and
prover-generated values
:param iA: accumulator ID
:param i: claim's sequence number within accumulator
:return: The claim (both primary and non-revocation)
"""
schemaKey = (await self.wallet.getSchema(schemaId)).getKey()
attributes = self._attrRepo.getAttributes(schemaKey,
claimRequest.userId)
# TODO re-enable when revocation registry is implemented
# iA = iA if iA else (await self.wallet.getAccumulator(schemaId)).iA
# TODO this has un-obvious side-effects
await self._genContxt(schemaId, iA, claimRequest.userId)
(c1, claim) = await self._issuePrimaryClaim(schemaId, attributes,
claimRequest.U)
# TODO re-enable when revocation registry is fully implemented
c2 = await self._issueNonRevocationClaim(schemaId, claimRequest.Ur,
iA,
i) if claimRequest.Ur else None
signature = Claims(primaryClaim=c1, nonRevocClaim=c2)
return (signature, claim)
async def issueClaims(self, allClaimRequest: Dict[ID, ClaimRequest]) -> \
Dict[ID, Claims]:
"""
Issue claims for the given users and schemas.
:param allClaimRequest: a map of schema ID to a claim
request containing prover ID and prover-generated values
:return: The claims (both primary and non-revocation)
"""
res = {}
for schemaId, claimReq in allClaimRequest.items():
res[schemaId] = await self.issueClaim(schemaId, claimReq)
return res
#
# PRIVATE
#
async def _genContxt(self, schemaId: ID, iA, userId):
iA = strToInt(str(iA))
userId = strToInt(str(userId))
S = iA | userId
H = get_hash_as_int(S)
m2 = cmod.integer(H % (2 ** LARGE_MASTER_SECRET))
await self.wallet.submitContextAttr(schemaId, m2)
return m2
async def _issuePrimaryClaim(self, schemaId: ID, attributes: Attribs,
U) -> (PrimaryClaim, Dict[str, ClaimAttributeValues]):
return await self._primaryIssuer.issuePrimaryClaim(schemaId,
attributes, U)
async def _issueNonRevocationClaim(self, schemaId: ID, Ur, iA=None,
i=None) -> NonRevocationClaim:
claim, accum, ts = await self._nonRevocationIssuer.issueNonRevocationClaim(
schemaId, Ur, iA, i)
await self.wallet.submitAccumUpdate(schemaId=schemaId, accum=accum,
timestampMs=ts)
return claim
def __repr__(self):
return str(self.__dict__)
|
hyperledger-archives/indy-anoncreds
|
anoncreds/protocol/issuer.py
|
Issuer.revoke
|
python
|
async def revoke(self, schemaId: ID, i):
acc, ts = await self._nonRevocationIssuer.revoke(schemaId, i)
await self.wallet.submitAccumUpdate(schemaId=schemaId, accum=acc,
timestampMs=ts)
|
Performs revocation of a Claim.
:param schemaId: The schema ID (reference to claim
definition schema)
:param i: claim's sequence number within accumulator
|
train
|
https://github.com/hyperledger-archives/indy-anoncreds/blob/9d9cda3d505c312257d99a13d74d8f05dac3091a/anoncreds/protocol/issuer.py#L86-L96
| null |
class Issuer:
def __init__(self, wallet: IssuerWallet, attrRepo: AttributeRepo):
self.wallet = wallet
self._attrRepo = attrRepo
self._primaryIssuer = PrimaryClaimIssuer(wallet)
self._nonRevocationIssuer = NonRevocationClaimIssuer(wallet)
#
# PUBLIC
#
@property
def issuerId(self):
return self.wallet.walletId
def isSchemaExists(self, schemaKey):
return self.wallet._schemasByKey.get(schemaKey)
async def genSchema(self, name, version, attrNames) -> Schema:
"""
Generates and submits Schema.
:param name: schema name
:param version: schema version
:param attrNames: a list of attributes the schema contains
:return: submitted Schema
"""
schema = Schema(name, version, attrNames, self.issuerId)
return await self.wallet.submitSchema(schema)
async def genKeys(self, schemaId: ID, p_prime=None, q_prime=None) -> (
PublicKey, RevocationPublicKey):
"""
Generates and submits keys (both public and secret, primary and
non-revocation).
:param schemaId: The schema ID (reference to claim
definition schema)
:param p_prime: optional p_prime parameter
:param q_prime: optional q_prime parameter
:return: Submitted Public keys (both primary and non-revocation)
"""
pk, sk = await self._primaryIssuer.genKeys(schemaId, p_prime, q_prime)
pkR, skR = await self._nonRevocationIssuer.genRevocationKeys()
pk = await self.wallet.submitPublicKeys(schemaId=schemaId, pk=pk,
pkR=pkR)
pkR = await self.wallet.submitSecretKeys(schemaId=schemaId, sk=sk,
skR=skR)
return pk, pkR
async def issueAccumulator(self, schemaId: ID, iA,
L) -> AccumulatorPublicKey:
"""
Issues and submits an accumulator used for non-revocation proof.
:param schemaId: The schema ID (reference to claim
definition schema)
:param iA: accumulator ID
:param L: maximum number of claims within accumulator.
:return: Submitted accumulator public key
"""
accum, tails, accPK, accSK = await self._nonRevocationIssuer.issueAccumulator(
schemaId, iA, L)
accPK = await self.wallet.submitAccumPublic(schemaId=schemaId,
accumPK=accPK,
accum=accum, tails=tails)
await self.wallet.submitAccumSecret(schemaId=schemaId,
accumSK=accSK)
return accPK
async def issueClaim(self, schemaId: ID, claimRequest: ClaimRequest,
iA=None,
i=None) -> (Claims, Dict[str, ClaimAttributeValues]):
"""
Issue a claim for the given user and schema.
:param schemaId: The schema ID (reference to claim
definition schema)
:param claimRequest: A claim request containing prover ID and
prover-generated values
:param iA: accumulator ID
:param i: claim's sequence number within accumulator
:return: The claim (both primary and non-revocation)
"""
schemaKey = (await self.wallet.getSchema(schemaId)).getKey()
attributes = self._attrRepo.getAttributes(schemaKey,
claimRequest.userId)
# TODO re-enable when revocation registry is implemented
# iA = iA if iA else (await self.wallet.getAccumulator(schemaId)).iA
# TODO this has un-obvious side-effects
await self._genContxt(schemaId, iA, claimRequest.userId)
(c1, claim) = await self._issuePrimaryClaim(schemaId, attributes,
claimRequest.U)
# TODO re-enable when revocation registry is fully implemented
c2 = await self._issueNonRevocationClaim(schemaId, claimRequest.Ur,
iA,
i) if claimRequest.Ur else None
signature = Claims(primaryClaim=c1, nonRevocClaim=c2)
return (signature, claim)
async def issueClaims(self, allClaimRequest: Dict[ID, ClaimRequest]) -> \
Dict[ID, Claims]:
"""
Issue claims for the given users and schemas.
:param allClaimRequest: a map of schema ID to a claim
request containing prover ID and prover-generated values
:return: The claims (both primary and non-revocation)
"""
res = {}
for schemaId, claimReq in allClaimRequest.items():
res[schemaId] = await self.issueClaim(schemaId, claimReq)
return res
#
# PRIVATE
#
async def _genContxt(self, schemaId: ID, iA, userId):
iA = strToInt(str(iA))
userId = strToInt(str(userId))
S = iA | userId
H = get_hash_as_int(S)
m2 = cmod.integer(H % (2 ** LARGE_MASTER_SECRET))
await self.wallet.submitContextAttr(schemaId, m2)
return m2
async def _issuePrimaryClaim(self, schemaId: ID, attributes: Attribs,
U) -> (PrimaryClaim, Dict[str, ClaimAttributeValues]):
return await self._primaryIssuer.issuePrimaryClaim(schemaId,
attributes, U)
async def _issueNonRevocationClaim(self, schemaId: ID, Ur, iA=None,
i=None) -> NonRevocationClaim:
claim, accum, ts = await self._nonRevocationIssuer.issueNonRevocationClaim(
schemaId, Ur, iA, i)
await self.wallet.submitAccumUpdate(schemaId=schemaId, accum=accum,
timestampMs=ts)
return claim
def __repr__(self):
return str(self.__dict__)
|
hyperledger-archives/indy-anoncreds
|
anoncreds/protocol/issuer.py
|
Issuer.issueClaim
|
python
|
async def issueClaim(self, schemaId: ID, claimRequest: ClaimRequest,
iA=None,
i=None) -> (Claims, Dict[str, ClaimAttributeValues]):
schemaKey = (await self.wallet.getSchema(schemaId)).getKey()
attributes = self._attrRepo.getAttributes(schemaKey,
claimRequest.userId)
# TODO re-enable when revocation registry is implemented
# iA = iA if iA else (await self.wallet.getAccumulator(schemaId)).iA
# TODO this has un-obvious side-effects
await self._genContxt(schemaId, iA, claimRequest.userId)
(c1, claim) = await self._issuePrimaryClaim(schemaId, attributes,
claimRequest.U)
# TODO re-enable when revocation registry is fully implemented
c2 = await self._issueNonRevocationClaim(schemaId, claimRequest.Ur,
iA,
i) if claimRequest.Ur else None
signature = Claims(primaryClaim=c1, nonRevocClaim=c2)
return (signature, claim)
|
Issue a claim for the given user and schema.
:param schemaId: The schema ID (reference to claim
definition schema)
:param claimRequest: A claim request containing prover ID and
prover-generated values
:param iA: accumulator ID
:param i: claim's sequence number within accumulator
:return: The claim (both primary and non-revocation)
|
train
|
https://github.com/hyperledger-archives/indy-anoncreds/blob/9d9cda3d505c312257d99a13d74d8f05dac3091a/anoncreds/protocol/issuer.py#L98-L132
|
[
"def getAttributes(self, schemaKey: SchemaKey, userId) -> Attribs:\n return self.attributes.get((schemaKey, userId))\n",
"async def _genContxt(self, schemaId: ID, iA, userId):\n iA = strToInt(str(iA))\n userId = strToInt(str(userId))\n S = iA | userId\n H = get_hash_as_int(S)\n m2 = cmod.integer(H % (2 ** LARGE_MASTER_SECRET))\n await self.wallet.submitContextAttr(schemaId, m2)\n return m2\n",
"async def _issuePrimaryClaim(self, schemaId: ID, attributes: Attribs,\n U) -> (PrimaryClaim, Dict[str, ClaimAttributeValues]):\n return await self._primaryIssuer.issuePrimaryClaim(schemaId,\n attributes, U)\n",
"async def _issueNonRevocationClaim(self, schemaId: ID, Ur, iA=None,\n i=None) -> NonRevocationClaim:\n claim, accum, ts = await self._nonRevocationIssuer.issueNonRevocationClaim(\n schemaId, Ur, iA, i)\n await self.wallet.submitAccumUpdate(schemaId=schemaId, accum=accum,\n timestampMs=ts)\n return claim\n",
"async def getSchema(self, schemaId: ID) -> Schema:\n if schemaId.schemaKey and schemaId.schemaKey in self._schemasByKey:\n return self._schemasByKey[schemaId.schemaKey]\n if schemaId.schemaId and schemaId.schemaId in self._schemasById:\n return self._schemasById[schemaId.schemaId]\n\n schema = await self._repo.getSchema(schemaId)\n\n self._cacheSchema(schema)\n\n return schema\n"
] |
class Issuer:
def __init__(self, wallet: IssuerWallet, attrRepo: AttributeRepo):
self.wallet = wallet
self._attrRepo = attrRepo
self._primaryIssuer = PrimaryClaimIssuer(wallet)
self._nonRevocationIssuer = NonRevocationClaimIssuer(wallet)
#
# PUBLIC
#
@property
def issuerId(self):
return self.wallet.walletId
def isSchemaExists(self, schemaKey):
return self.wallet._schemasByKey.get(schemaKey)
async def genSchema(self, name, version, attrNames) -> Schema:
"""
Generates and submits Schema.
:param name: schema name
:param version: schema version
:param attrNames: a list of attributes the schema contains
:return: submitted Schema
"""
schema = Schema(name, version, attrNames, self.issuerId)
return await self.wallet.submitSchema(schema)
async def genKeys(self, schemaId: ID, p_prime=None, q_prime=None) -> (
PublicKey, RevocationPublicKey):
"""
Generates and submits keys (both public and secret, primary and
non-revocation).
:param schemaId: The schema ID (reference to claim
definition schema)
:param p_prime: optional p_prime parameter
:param q_prime: optional q_prime parameter
:return: Submitted Public keys (both primary and non-revocation)
"""
pk, sk = await self._primaryIssuer.genKeys(schemaId, p_prime, q_prime)
pkR, skR = await self._nonRevocationIssuer.genRevocationKeys()
pk = await self.wallet.submitPublicKeys(schemaId=schemaId, pk=pk,
pkR=pkR)
pkR = await self.wallet.submitSecretKeys(schemaId=schemaId, sk=sk,
skR=skR)
return pk, pkR
async def issueAccumulator(self, schemaId: ID, iA,
L) -> AccumulatorPublicKey:
"""
Issues and submits an accumulator used for non-revocation proof.
:param schemaId: The schema ID (reference to claim
definition schema)
:param iA: accumulator ID
:param L: maximum number of claims within accumulator.
:return: Submitted accumulator public key
"""
accum, tails, accPK, accSK = await self._nonRevocationIssuer.issueAccumulator(
schemaId, iA, L)
accPK = await self.wallet.submitAccumPublic(schemaId=schemaId,
accumPK=accPK,
accum=accum, tails=tails)
await self.wallet.submitAccumSecret(schemaId=schemaId,
accumSK=accSK)
return accPK
async def revoke(self, schemaId: ID, i):
"""
Performs revocation of a Claim.
:param schemaId: The schema ID (reference to claim
definition schema)
:param i: claim's sequence number within accumulator
"""
acc, ts = await self._nonRevocationIssuer.revoke(schemaId, i)
await self.wallet.submitAccumUpdate(schemaId=schemaId, accum=acc,
timestampMs=ts)
async def issueClaims(self, allClaimRequest: Dict[ID, ClaimRequest]) -> \
Dict[ID, Claims]:
"""
Issue claims for the given users and schemas.
:param allClaimRequest: a map of schema ID to a claim
request containing prover ID and prover-generated values
:return: The claims (both primary and non-revocation)
"""
res = {}
for schemaId, claimReq in allClaimRequest.items():
res[schemaId] = await self.issueClaim(schemaId, claimReq)
return res
#
# PRIVATE
#
async def _genContxt(self, schemaId: ID, iA, userId):
iA = strToInt(str(iA))
userId = strToInt(str(userId))
S = iA | userId
H = get_hash_as_int(S)
m2 = cmod.integer(H % (2 ** LARGE_MASTER_SECRET))
await self.wallet.submitContextAttr(schemaId, m2)
return m2
async def _issuePrimaryClaim(self, schemaId: ID, attributes: Attribs,
U) -> (PrimaryClaim, Dict[str, ClaimAttributeValues]):
return await self._primaryIssuer.issuePrimaryClaim(schemaId,
attributes, U)
async def _issueNonRevocationClaim(self, schemaId: ID, Ur, iA=None,
i=None) -> NonRevocationClaim:
claim, accum, ts = await self._nonRevocationIssuer.issueNonRevocationClaim(
schemaId, Ur, iA, i)
await self.wallet.submitAccumUpdate(schemaId=schemaId, accum=accum,
timestampMs=ts)
return claim
def __repr__(self):
return str(self.__dict__)
|
hyperledger-archives/indy-anoncreds
|
anoncreds/protocol/issuer.py
|
Issuer.issueClaims
|
python
|
async def issueClaims(self, allClaimRequest: Dict[ID, ClaimRequest]) -> \
Dict[ID, Claims]:
res = {}
for schemaId, claimReq in allClaimRequest.items():
res[schemaId] = await self.issueClaim(schemaId, claimReq)
return res
|
Issue claims for the given users and schemas.
:param allClaimRequest: a map of schema ID to a claim
request containing prover ID and prover-generated values
:return: The claims (both primary and non-revocation)
|
train
|
https://github.com/hyperledger-archives/indy-anoncreds/blob/9d9cda3d505c312257d99a13d74d8f05dac3091a/anoncreds/protocol/issuer.py#L134-L146
|
[
"async def issueClaim(self, schemaId: ID, claimRequest: ClaimRequest,\n iA=None,\n i=None) -> (Claims, Dict[str, ClaimAttributeValues]):\n \"\"\"\n Issue a claim for the given user and schema.\n\n :param schemaId: The schema ID (reference to claim\n definition schema)\n :param claimRequest: A claim request containing prover ID and\n prover-generated values\n :param iA: accumulator ID\n :param i: claim's sequence number within accumulator\n :return: The claim (both primary and non-revocation)\n \"\"\"\n\n schemaKey = (await self.wallet.getSchema(schemaId)).getKey()\n attributes = self._attrRepo.getAttributes(schemaKey,\n claimRequest.userId)\n\n # TODO re-enable when revocation registry is implemented\n # iA = iA if iA else (await self.wallet.getAccumulator(schemaId)).iA\n\n # TODO this has un-obvious side-effects\n await self._genContxt(schemaId, iA, claimRequest.userId)\n\n (c1, claim) = await self._issuePrimaryClaim(schemaId, attributes,\n claimRequest.U)\n # TODO re-enable when revocation registry is fully implemented\n c2 = await self._issueNonRevocationClaim(schemaId, claimRequest.Ur,\n iA,\n i) if claimRequest.Ur else None\n\n signature = Claims(primaryClaim=c1, nonRevocClaim=c2)\n\n return (signature, claim)\n"
] |
class Issuer:
def __init__(self, wallet: IssuerWallet, attrRepo: AttributeRepo):
self.wallet = wallet
self._attrRepo = attrRepo
self._primaryIssuer = PrimaryClaimIssuer(wallet)
self._nonRevocationIssuer = NonRevocationClaimIssuer(wallet)
#
# PUBLIC
#
@property
def issuerId(self):
return self.wallet.walletId
def isSchemaExists(self, schemaKey):
return self.wallet._schemasByKey.get(schemaKey)
async def genSchema(self, name, version, attrNames) -> Schema:
"""
Generates and submits Schema.
:param name: schema name
:param version: schema version
:param attrNames: a list of attributes the schema contains
:return: submitted Schema
"""
schema = Schema(name, version, attrNames, self.issuerId)
return await self.wallet.submitSchema(schema)
async def genKeys(self, schemaId: ID, p_prime=None, q_prime=None) -> (
PublicKey, RevocationPublicKey):
"""
Generates and submits keys (both public and secret, primary and
non-revocation).
:param schemaId: The schema ID (reference to claim
definition schema)
:param p_prime: optional p_prime parameter
:param q_prime: optional q_prime parameter
:return: Submitted Public keys (both primary and non-revocation)
"""
pk, sk = await self._primaryIssuer.genKeys(schemaId, p_prime, q_prime)
pkR, skR = await self._nonRevocationIssuer.genRevocationKeys()
pk = await self.wallet.submitPublicKeys(schemaId=schemaId, pk=pk,
pkR=pkR)
pkR = await self.wallet.submitSecretKeys(schemaId=schemaId, sk=sk,
skR=skR)
return pk, pkR
async def issueAccumulator(self, schemaId: ID, iA,
L) -> AccumulatorPublicKey:
"""
Issues and submits an accumulator used for non-revocation proof.
:param schemaId: The schema ID (reference to claim
definition schema)
:param iA: accumulator ID
:param L: maximum number of claims within accumulator.
:return: Submitted accumulator public key
"""
accum, tails, accPK, accSK = await self._nonRevocationIssuer.issueAccumulator(
schemaId, iA, L)
accPK = await self.wallet.submitAccumPublic(schemaId=schemaId,
accumPK=accPK,
accum=accum, tails=tails)
await self.wallet.submitAccumSecret(schemaId=schemaId,
accumSK=accSK)
return accPK
async def revoke(self, schemaId: ID, i):
"""
Performs revocation of a Claim.
:param schemaId: The schema ID (reference to claim
definition schema)
:param i: claim's sequence number within accumulator
"""
acc, ts = await self._nonRevocationIssuer.revoke(schemaId, i)
await self.wallet.submitAccumUpdate(schemaId=schemaId, accum=acc,
timestampMs=ts)
async def issueClaim(self, schemaId: ID, claimRequest: ClaimRequest,
iA=None,
i=None) -> (Claims, Dict[str, ClaimAttributeValues]):
"""
Issue a claim for the given user and schema.
:param schemaId: The schema ID (reference to claim
definition schema)
:param claimRequest: A claim request containing prover ID and
prover-generated values
:param iA: accumulator ID
:param i: claim's sequence number within accumulator
:return: The claim (both primary and non-revocation)
"""
schemaKey = (await self.wallet.getSchema(schemaId)).getKey()
attributes = self._attrRepo.getAttributes(schemaKey,
claimRequest.userId)
# TODO re-enable when revocation registry is implemented
# iA = iA if iA else (await self.wallet.getAccumulator(schemaId)).iA
# TODO this has un-obvious side-effects
await self._genContxt(schemaId, iA, claimRequest.userId)
(c1, claim) = await self._issuePrimaryClaim(schemaId, attributes,
claimRequest.U)
# TODO re-enable when revocation registry is fully implemented
c2 = await self._issueNonRevocationClaim(schemaId, claimRequest.Ur,
iA,
i) if claimRequest.Ur else None
signature = Claims(primaryClaim=c1, nonRevocClaim=c2)
return (signature, claim)
#
# PRIVATE
#
async def _genContxt(self, schemaId: ID, iA, userId):
iA = strToInt(str(iA))
userId = strToInt(str(userId))
S = iA | userId
H = get_hash_as_int(S)
m2 = cmod.integer(H % (2 ** LARGE_MASTER_SECRET))
await self.wallet.submitContextAttr(schemaId, m2)
return m2
async def _issuePrimaryClaim(self, schemaId: ID, attributes: Attribs,
U) -> (PrimaryClaim, Dict[str, ClaimAttributeValues]):
return await self._primaryIssuer.issuePrimaryClaim(schemaId,
attributes, U)
async def _issueNonRevocationClaim(self, schemaId: ID, Ur, iA=None,
i=None) -> NonRevocationClaim:
claim, accum, ts = await self._nonRevocationIssuer.issueNonRevocationClaim(
schemaId, Ur, iA, i)
await self.wallet.submitAccumUpdate(schemaId=schemaId, accum=accum,
timestampMs=ts)
return claim
def __repr__(self):
return str(self.__dict__)
|
hyperledger-archives/indy-anoncreds
|
anoncreds/protocol/verifier.py
|
Verifier.verify
|
python
|
async def verify(self, proofRequest: ProofRequest, proof: FullProof):
if proofRequest.verifiableAttributes.keys() != proof.requestedProof.revealed_attrs.keys():
raise ValueError('Received attributes ={} do not correspond to requested={}'.format(
proof.requestedProof.revealed_attrs.keys(), proofRequest.verifiableAttributes.keys()))
if proofRequest.predicates.keys() != proof.requestedProof.predicates.keys():
raise ValueError('Received predicates ={} do not correspond to requested={}'.format(
proof.requestedProof.predicates.keys(), proofRequest.predicates.keys()))
TauList = []
for (uuid, proofItem) in proof.proofs.items():
if proofItem.proof.nonRevocProof:
TauList += await self._nonRevocVerifier.verifyNonRevocation(
proofRequest, proofItem.schema_seq_no, proof.aggregatedProof.cHash,
proofItem.proof.nonRevocProof)
if proofItem.proof.primaryProof:
TauList += await self._primaryVerifier.verify(proofItem.schema_seq_no,
proof.aggregatedProof.cHash,
proofItem.proof.primaryProof)
CHver = self._get_hash(proof.aggregatedProof.CList, self._prepare_collection(TauList),
cmod.integer(proofRequest.nonce))
return CHver == proof.aggregatedProof.cHash
|
Verifies a proof from the prover.
:param proofRequest: description of a proof to be presented (revealed
attributes, predicates, timestamps for non-revocation)
:param proof: a proof
:return: True if verified successfully and false otherwise.
|
train
|
https://github.com/hyperledger-archives/indy-anoncreds/blob/9d9cda3d505c312257d99a13d74d8f05dac3091a/anoncreds/protocol/verifier.py#L27-L59
| null |
class Verifier:
def __init__(self, wallet: Wallet):
self.wallet = wallet
self._primaryVerifier = PrimaryProofVerifier(wallet)
self._nonRevocVerifier = NonRevocationProofVerifier(wallet)
@property
def verifierId(self):
return self.wallet.walletId
def generateNonce(self):
return cmod.integer(cmod.randomBits(LARGE_NONCE))
def _prepare_collection(self, values):
return [cmod.toInt(el) if isCryptoInteger(el) else el for el in values]
def _get_hash(self, CList, TauList, nonce):
return get_hash_as_int(nonce,
*reduce(lambda x, y: x + y, [TauList, CList]))
|
hyperledger-archives/indy-anoncreds
|
anoncreds/protocol/prover.py
|
Prover.createClaimRequest
|
python
|
async def createClaimRequest(self, schemaId: ID, proverId=None,
reqNonRevoc=True) -> ClaimRequest:
await self._genMasterSecret(schemaId)
U = await self._genU(schemaId)
Ur = None if not reqNonRevoc else await self._genUr(schemaId)
proverId = proverId if proverId else self.proverId
return ClaimRequest(userId=proverId, U=U, Ur=Ur)
|
Creates a claim request to the issuer.
:param schemaId: The schema ID (reference to claim
definition schema)
:param proverId: a prover ID request a claim for (if None then
the current prover default ID is used)
:param reqNonRevoc: whether to request non-revocation claim
:return: Claim Request
|
train
|
https://github.com/hyperledger-archives/indy-anoncreds/blob/9d9cda3d505c312257d99a13d74d8f05dac3091a/anoncreds/protocol/prover.py#L37-L53
|
[
"async def _genMasterSecret(self, schemaId: ID):\n ms = cmod.integer(cmod.randomBits(LARGE_MASTER_SECRET))\n await self.wallet.submitMasterSecret(schemaId=schemaId, ms=ms)\n",
"async def _genU(self, schemaId: ID):\n claimInitData = await self._primaryClaimInitializer.genClaimInitData(\n schemaId)\n await self.wallet.submitPrimaryClaimInitData(schemaId=schemaId,\n claimInitData=claimInitData)\n return claimInitData.U\n",
"async def _genUr(self, schemaId: ID):\n claimInitData = await self._nonRevocClaimInitializer.genClaimInitData(\n schemaId)\n await self.wallet.submitNonRevocClaimInitData(schemaId=schemaId,\n claimInitData=claimInitData)\n return claimInitData.U\n"
] |
class Prover:
def __init__(self, wallet: ProverWallet):
self.wallet = wallet
self._primaryClaimInitializer = PrimaryClaimInitializer(wallet)
self._nonRevocClaimInitializer = NonRevocationClaimInitializer(wallet)
self._primaryProofBuilder = PrimaryProofBuilder(wallet)
self._nonRevocProofBuilder = NonRevocationProofBuilder(wallet)
#
# PUBLIC
#
@property
def proverId(self):
return self.wallet.walletId
async def createClaimRequests(self, schemaIds: Sequence[ID],
proverId=None,
reqNonRevoc=True) -> Dict[ID, ClaimRequest]:
"""
Creates a claim request to the issuer.
:param schemaIds: The schema IDs (references to claim
definition schema)
:param proverId: a prover ID request a claim for (if None then
the current prover default ID is used)
:param reqNonRevoc: whether to request non-revocation claim
:return: a dictionary of Claim Requests for each Schema.
"""
res = {}
for schemaId in schemaIds:
res[schemaId] = await self.createClaimRequest(schemaId,
proverId,
reqNonRevoc)
return res
async def processClaim(self, schemaId: ID, claimAttributes: Dict[str, ClaimAttributeValues], signature: Claims):
"""
Processes and saves a received Claim for the given Schema.
:param schemaId: The schema ID (reference to claim
definition schema)
:param claims: claims to be processed and saved
"""
await self.wallet.submitContextAttr(schemaId, signature.primaryClaim.m2)
await self.wallet.submitClaimAttributes(schemaId, claimAttributes)
await self._initPrimaryClaim(schemaId, signature.primaryClaim)
if signature.nonRevocClaim:
await self._initNonRevocationClaim(schemaId, signature.nonRevocClaim)
async def processClaims(self, allClaims: Dict[ID, Claims]):
"""
Processes and saves received Claims.
:param claims: claims to be processed and saved for each claim
definition.
"""
res = []
for schemaId, (claim_signature, claim_attributes) in allClaims.items():
res.append(await self.processClaim(schemaId, claim_attributes, claim_signature))
return res
async def presentProof(self, proofRequest: ProofRequest) -> FullProof:
"""
Presents a proof to the verifier.
:param proofRequest: description of a proof to be presented (revealed
attributes, predicates, timestamps for non-revocation)
:return: a proof (both primary and non-revocation) and revealed attributes (initial non-encoded values)
"""
claims, requestedProof = await self._findClaims(proofRequest)
proof = await self._prepareProof(claims, proofRequest.nonce, requestedProof)
return proof
#
# REQUEST CLAIMS
#
async def _genMasterSecret(self, schemaId: ID):
ms = cmod.integer(cmod.randomBits(LARGE_MASTER_SECRET))
await self.wallet.submitMasterSecret(schemaId=schemaId, ms=ms)
async def _genU(self, schemaId: ID):
claimInitData = await self._primaryClaimInitializer.genClaimInitData(
schemaId)
await self.wallet.submitPrimaryClaimInitData(schemaId=schemaId,
claimInitData=claimInitData)
return claimInitData.U
async def _genUr(self, schemaId: ID):
claimInitData = await self._nonRevocClaimInitializer.genClaimInitData(
schemaId)
await self.wallet.submitNonRevocClaimInitData(schemaId=schemaId,
claimInitData=claimInitData)
return claimInitData.U
async def _initPrimaryClaim(self, schemaId: ID, claim: PrimaryClaim):
claim = await self._primaryClaimInitializer.preparePrimaryClaim(
schemaId,
claim)
await self.wallet.submitPrimaryClaim(schemaId=schemaId, claim=claim)
async def _initNonRevocationClaim(self, schemaId: ID,
claim: NonRevocationClaim):
claim = await self._nonRevocClaimInitializer.initNonRevocationClaim(
schemaId,
claim)
await self.wallet.submitNonRevocClaim(schemaId=schemaId,
claim=claim)
#
# PRESENT PROOF
#
async def _findClaims(self, proofRequest: ProofRequest) -> (
Dict[SchemaKey, ProofClaims], Dict[str, Any]):
revealedAttrs, predicates = proofRequest.verifiableAttributes, proofRequest.predicates
foundRevealedAttrs = {}
foundPredicates = {}
proofClaims = {}
schemas = {}
allClaimsAttributes = await self.wallet.getAllClaimsAttributes()
async def addProof():
revealedAttrsForClaim = [
a for a in revealedAttrs.values() if a.name in claim.keys()]
revealedPredicatesForClaim = [
p for p in predicates.values() if p.attrName in claim.keys()]
claims = await self.wallet.getClaimSignature(ID(schemaId=schemaId))
proofClaim = ProofClaims(claims=claims, revealedAttrs=revealedAttrsForClaim,
predicates=revealedPredicatesForClaim)
proofClaims[schemaId] = proofClaim
for schemaKey, c in allClaimsAttributes.items():
schemas[schemaKey] = (await self.wallet.getSchema(ID(schemaKey)))
for uuid, revealedAttr in revealedAttrs.items():
matches = [(schemas[key].seqId, c) for key, c in allClaimsAttributes.items() if revealedAttr.name in c and
(schemas[key].seqId == revealedAttr.schema_seq_no if revealedAttr.schema_seq_no else True) and
(schemas[key].issuerId == revealedAttr.issuer_did if revealedAttr.issuer_did else True)]
if len(matches) == 0:
raise ValueError(
"A claim isn't found for the following attributes: {}", revealedAttr.name)
schemaId, claim = matches[0]
foundRevealedAttrs[uuid] = [str(schemaId), str(claim[revealedAttr.name].raw),
str(claim[revealedAttr.name].encoded)]
if schemaId not in proofClaims:
await addProof()
for uuid, predicate in predicates.items():
matches = [(schemas[key].seqId, c) for key, c in allClaimsAttributes.items() if predicate.attrName in c and
(schemas[key].seqId == predicate.schema_seq_no if predicate.schema_seq_no else True) and
(schemas[key].issuerId == predicate.issuer_did if predicate.issuer_did else True)]
if len(matches) == 0:
raise ValueError(
"A claim isn't found for the following predicate: {}", predicate)
schemaId, claim = matches[0]
foundPredicates[uuid] = str(schemaId)
if schemaId not in proofClaims:
await addProof()
requestedProof = RequestedProof(
revealed_attrs=foundRevealedAttrs, predicates=foundPredicates)
return proofClaims, requestedProof
async def _prepareProof(self, claims: Dict[SchemaKey, ProofClaims],
nonce, requestedProof) -> FullProof:
m1Tilde = cmod.integer(cmod.randomBits(LARGE_M2_TILDE))
initProofs = {}
CList = []
TauList = []
# 1. init proofs
for schemaId, val in claims.items():
c1, c2, revealedAttrs, predicates = val.claims.primaryClaim, val.claims.nonRevocClaim, val.revealedAttrs, val.predicates
claim = await self.wallet.getClaimAttributes(ID(schemaId=schemaId))
nonRevocInitProof = None
if c2:
nonRevocInitProof = await self._nonRevocProofBuilder.initProof(
schemaId, c2)
CList += nonRevocInitProof.asCList()
TauList += nonRevocInitProof.asTauList()
primaryInitProof = None
if c1:
m2Tilde = cmod.integer(int(
nonRevocInitProof.TauListParams.m2)) if nonRevocInitProof else None
primaryInitProof = await self._primaryProofBuilder.initProof(
schemaId, c1, revealedAttrs, predicates,
m1Tilde, m2Tilde, claim)
CList += primaryInitProof.asCList()
TauList += primaryInitProof.asTauList()
initProof = InitProof(nonRevocInitProof, primaryInitProof)
initProofs[schemaId] = initProof
# 2. hash
cH = self._get_hash(self._prepare_collection(
CList), self._prepare_collection(TauList), nonce)
# 3. finalize proofs
proofs = {}
for schemaId, initProof in initProofs.items():
nonRevocProof = None
if initProof.nonRevocInitProof:
nonRevocProof = await self._nonRevocProofBuilder.finalizeProof(
schemaId, cH, initProof.nonRevocInitProof)
primaryProof = await self._primaryProofBuilder.finalizeProof(
schemaId, cH, initProof.primaryInitProof)
schema = await self.wallet.getSchema(ID(schemaId=schemaId))
proof = Proof(primaryProof, nonRevocProof)
proofInfo = ProofInfo(
proof=proof, schema_seq_no=schemaId, issuer_did=schema.issuerId)
proofs[str(schemaId)] = proofInfo
aggregatedProof = AggregatedProof(cH, self._prepare_collection(CList))
return FullProof(proofs, aggregatedProof, requestedProof)
async def _getCList(self, initProofs: Dict[Schema, InitProof]):
CList = []
for initProof in initProofs.values():
CList += await initProof.nonRevocInitProof.asCList()
CList += await initProof.primaryInitProof.asCList()
return CList
async def _getTauList(self, initProofs: Dict[Schema, InitProof]):
TauList = []
for initProof in initProofs.values():
TauList += await initProof.nonRevocInitProof.asTauList()
TauList += await initProof.primaryInitProof.asTauList()
return TauList
def _prepare_collection(self, values):
return [cmod.toInt(el) if isCryptoInteger(el) else el for el in values]
def _get_hash(self, CList, TauList, nonce):
return get_hash_as_int(nonce,
*reduce(lambda x, y: x + y, [TauList, CList]))
|
hyperledger-archives/indy-anoncreds
|
anoncreds/protocol/prover.py
|
Prover.createClaimRequests
|
python
|
async def createClaimRequests(self, schemaIds: Sequence[ID],
proverId=None,
reqNonRevoc=True) -> Dict[ID, ClaimRequest]:
res = {}
for schemaId in schemaIds:
res[schemaId] = await self.createClaimRequest(schemaId,
proverId,
reqNonRevoc)
return res
|
Creates a claim request to the issuer.
:param schemaIds: The schema IDs (references to claim
definition schema)
:param proverId: a prover ID request a claim for (if None then
the current prover default ID is used)
:param reqNonRevoc: whether to request non-revocation claim
:return: a dictionary of Claim Requests for each Schema.
|
train
|
https://github.com/hyperledger-archives/indy-anoncreds/blob/9d9cda3d505c312257d99a13d74d8f05dac3091a/anoncreds/protocol/prover.py#L55-L73
|
[
"async def createClaimRequest(self, schemaId: ID, proverId=None,\n reqNonRevoc=True) -> ClaimRequest:\n \"\"\"\n Creates a claim request to the issuer.\n\n :param schemaId: The schema ID (reference to claim\n definition schema)\n :param proverId: a prover ID request a claim for (if None then\n the current prover default ID is used)\n :param reqNonRevoc: whether to request non-revocation claim\n :return: Claim Request\n \"\"\"\n await self._genMasterSecret(schemaId)\n U = await self._genU(schemaId)\n Ur = None if not reqNonRevoc else await self._genUr(schemaId)\n proverId = proverId if proverId else self.proverId\n return ClaimRequest(userId=proverId, U=U, Ur=Ur)\n"
] |
class Prover:
def __init__(self, wallet: ProverWallet):
self.wallet = wallet
self._primaryClaimInitializer = PrimaryClaimInitializer(wallet)
self._nonRevocClaimInitializer = NonRevocationClaimInitializer(wallet)
self._primaryProofBuilder = PrimaryProofBuilder(wallet)
self._nonRevocProofBuilder = NonRevocationProofBuilder(wallet)
#
# PUBLIC
#
@property
def proverId(self):
return self.wallet.walletId
async def createClaimRequest(self, schemaId: ID, proverId=None,
reqNonRevoc=True) -> ClaimRequest:
"""
Creates a claim request to the issuer.
:param schemaId: The schema ID (reference to claim
definition schema)
:param proverId: a prover ID request a claim for (if None then
the current prover default ID is used)
:param reqNonRevoc: whether to request non-revocation claim
:return: Claim Request
"""
await self._genMasterSecret(schemaId)
U = await self._genU(schemaId)
Ur = None if not reqNonRevoc else await self._genUr(schemaId)
proverId = proverId if proverId else self.proverId
return ClaimRequest(userId=proverId, U=U, Ur=Ur)
async def processClaim(self, schemaId: ID, claimAttributes: Dict[str, ClaimAttributeValues], signature: Claims):
"""
Processes and saves a received Claim for the given Schema.
:param schemaId: The schema ID (reference to claim
definition schema)
:param claims: claims to be processed and saved
"""
await self.wallet.submitContextAttr(schemaId, signature.primaryClaim.m2)
await self.wallet.submitClaimAttributes(schemaId, claimAttributes)
await self._initPrimaryClaim(schemaId, signature.primaryClaim)
if signature.nonRevocClaim:
await self._initNonRevocationClaim(schemaId, signature.nonRevocClaim)
async def processClaims(self, allClaims: Dict[ID, Claims]):
"""
Processes and saves received Claims.
:param claims: claims to be processed and saved for each claim
definition.
"""
res = []
for schemaId, (claim_signature, claim_attributes) in allClaims.items():
res.append(await self.processClaim(schemaId, claim_attributes, claim_signature))
return res
async def presentProof(self, proofRequest: ProofRequest) -> FullProof:
"""
Presents a proof to the verifier.
:param proofRequest: description of a proof to be presented (revealed
attributes, predicates, timestamps for non-revocation)
:return: a proof (both primary and non-revocation) and revealed attributes (initial non-encoded values)
"""
claims, requestedProof = await self._findClaims(proofRequest)
proof = await self._prepareProof(claims, proofRequest.nonce, requestedProof)
return proof
#
# REQUEST CLAIMS
#
async def _genMasterSecret(self, schemaId: ID):
ms = cmod.integer(cmod.randomBits(LARGE_MASTER_SECRET))
await self.wallet.submitMasterSecret(schemaId=schemaId, ms=ms)
async def _genU(self, schemaId: ID):
claimInitData = await self._primaryClaimInitializer.genClaimInitData(
schemaId)
await self.wallet.submitPrimaryClaimInitData(schemaId=schemaId,
claimInitData=claimInitData)
return claimInitData.U
async def _genUr(self, schemaId: ID):
claimInitData = await self._nonRevocClaimInitializer.genClaimInitData(
schemaId)
await self.wallet.submitNonRevocClaimInitData(schemaId=schemaId,
claimInitData=claimInitData)
return claimInitData.U
async def _initPrimaryClaim(self, schemaId: ID, claim: PrimaryClaim):
claim = await self._primaryClaimInitializer.preparePrimaryClaim(
schemaId,
claim)
await self.wallet.submitPrimaryClaim(schemaId=schemaId, claim=claim)
async def _initNonRevocationClaim(self, schemaId: ID,
claim: NonRevocationClaim):
claim = await self._nonRevocClaimInitializer.initNonRevocationClaim(
schemaId,
claim)
await self.wallet.submitNonRevocClaim(schemaId=schemaId,
claim=claim)
#
# PRESENT PROOF
#
async def _findClaims(self, proofRequest: ProofRequest) -> (
Dict[SchemaKey, ProofClaims], Dict[str, Any]):
revealedAttrs, predicates = proofRequest.verifiableAttributes, proofRequest.predicates
foundRevealedAttrs = {}
foundPredicates = {}
proofClaims = {}
schemas = {}
allClaimsAttributes = await self.wallet.getAllClaimsAttributes()
async def addProof():
revealedAttrsForClaim = [
a for a in revealedAttrs.values() if a.name in claim.keys()]
revealedPredicatesForClaim = [
p for p in predicates.values() if p.attrName in claim.keys()]
claims = await self.wallet.getClaimSignature(ID(schemaId=schemaId))
proofClaim = ProofClaims(claims=claims, revealedAttrs=revealedAttrsForClaim,
predicates=revealedPredicatesForClaim)
proofClaims[schemaId] = proofClaim
for schemaKey, c in allClaimsAttributes.items():
schemas[schemaKey] = (await self.wallet.getSchema(ID(schemaKey)))
for uuid, revealedAttr in revealedAttrs.items():
matches = [(schemas[key].seqId, c) for key, c in allClaimsAttributes.items() if revealedAttr.name in c and
(schemas[key].seqId == revealedAttr.schema_seq_no if revealedAttr.schema_seq_no else True) and
(schemas[key].issuerId == revealedAttr.issuer_did if revealedAttr.issuer_did else True)]
if len(matches) == 0:
raise ValueError(
"A claim isn't found for the following attributes: {}", revealedAttr.name)
schemaId, claim = matches[0]
foundRevealedAttrs[uuid] = [str(schemaId), str(claim[revealedAttr.name].raw),
str(claim[revealedAttr.name].encoded)]
if schemaId not in proofClaims:
await addProof()
for uuid, predicate in predicates.items():
matches = [(schemas[key].seqId, c) for key, c in allClaimsAttributes.items() if predicate.attrName in c and
(schemas[key].seqId == predicate.schema_seq_no if predicate.schema_seq_no else True) and
(schemas[key].issuerId == predicate.issuer_did if predicate.issuer_did else True)]
if len(matches) == 0:
raise ValueError(
"A claim isn't found for the following predicate: {}", predicate)
schemaId, claim = matches[0]
foundPredicates[uuid] = str(schemaId)
if schemaId not in proofClaims:
await addProof()
requestedProof = RequestedProof(
revealed_attrs=foundRevealedAttrs, predicates=foundPredicates)
return proofClaims, requestedProof
async def _prepareProof(self, claims: Dict[SchemaKey, ProofClaims],
nonce, requestedProof) -> FullProof:
m1Tilde = cmod.integer(cmod.randomBits(LARGE_M2_TILDE))
initProofs = {}
CList = []
TauList = []
# 1. init proofs
for schemaId, val in claims.items():
c1, c2, revealedAttrs, predicates = val.claims.primaryClaim, val.claims.nonRevocClaim, val.revealedAttrs, val.predicates
claim = await self.wallet.getClaimAttributes(ID(schemaId=schemaId))
nonRevocInitProof = None
if c2:
nonRevocInitProof = await self._nonRevocProofBuilder.initProof(
schemaId, c2)
CList += nonRevocInitProof.asCList()
TauList += nonRevocInitProof.asTauList()
primaryInitProof = None
if c1:
m2Tilde = cmod.integer(int(
nonRevocInitProof.TauListParams.m2)) if nonRevocInitProof else None
primaryInitProof = await self._primaryProofBuilder.initProof(
schemaId, c1, revealedAttrs, predicates,
m1Tilde, m2Tilde, claim)
CList += primaryInitProof.asCList()
TauList += primaryInitProof.asTauList()
initProof = InitProof(nonRevocInitProof, primaryInitProof)
initProofs[schemaId] = initProof
# 2. hash
cH = self._get_hash(self._prepare_collection(
CList), self._prepare_collection(TauList), nonce)
# 3. finalize proofs
proofs = {}
for schemaId, initProof in initProofs.items():
nonRevocProof = None
if initProof.nonRevocInitProof:
nonRevocProof = await self._nonRevocProofBuilder.finalizeProof(
schemaId, cH, initProof.nonRevocInitProof)
primaryProof = await self._primaryProofBuilder.finalizeProof(
schemaId, cH, initProof.primaryInitProof)
schema = await self.wallet.getSchema(ID(schemaId=schemaId))
proof = Proof(primaryProof, nonRevocProof)
proofInfo = ProofInfo(
proof=proof, schema_seq_no=schemaId, issuer_did=schema.issuerId)
proofs[str(schemaId)] = proofInfo
aggregatedProof = AggregatedProof(cH, self._prepare_collection(CList))
return FullProof(proofs, aggregatedProof, requestedProof)
async def _getCList(self, initProofs: Dict[Schema, InitProof]):
CList = []
for initProof in initProofs.values():
CList += await initProof.nonRevocInitProof.asCList()
CList += await initProof.primaryInitProof.asCList()
return CList
async def _getTauList(self, initProofs: Dict[Schema, InitProof]):
TauList = []
for initProof in initProofs.values():
TauList += await initProof.nonRevocInitProof.asTauList()
TauList += await initProof.primaryInitProof.asTauList()
return TauList
def _prepare_collection(self, values):
return [cmod.toInt(el) if isCryptoInteger(el) else el for el in values]
def _get_hash(self, CList, TauList, nonce):
return get_hash_as_int(nonce,
*reduce(lambda x, y: x + y, [TauList, CList]))
|
hyperledger-archives/indy-anoncreds
|
anoncreds/protocol/prover.py
|
Prover.processClaim
|
python
|
async def processClaim(self, schemaId: ID, claimAttributes: Dict[str, ClaimAttributeValues], signature: Claims):
await self.wallet.submitContextAttr(schemaId, signature.primaryClaim.m2)
await self.wallet.submitClaimAttributes(schemaId, claimAttributes)
await self._initPrimaryClaim(schemaId, signature.primaryClaim)
if signature.nonRevocClaim:
await self._initNonRevocationClaim(schemaId, signature.nonRevocClaim)
|
Processes and saves a received Claim for the given Schema.
:param schemaId: The schema ID (reference to claim
definition schema)
:param claims: claims to be processed and saved
|
train
|
https://github.com/hyperledger-archives/indy-anoncreds/blob/9d9cda3d505c312257d99a13d74d8f05dac3091a/anoncreds/protocol/prover.py#L75-L88
|
[
"async def _initPrimaryClaim(self, schemaId: ID, claim: PrimaryClaim):\n claim = await self._primaryClaimInitializer.preparePrimaryClaim(\n schemaId,\n claim)\n await self.wallet.submitPrimaryClaim(schemaId=schemaId, claim=claim)\n",
"async def _initNonRevocationClaim(self, schemaId: ID,\n claim: NonRevocationClaim):\n claim = await self._nonRevocClaimInitializer.initNonRevocationClaim(\n schemaId,\n claim)\n await self.wallet.submitNonRevocClaim(schemaId=schemaId,\n claim=claim)\n",
"async def submitClaimAttributes(\n self, schemaId: ID,\n claimAttributes: Dict[str, ClaimAttributeValues]):\n await self._cacheValueForId(self._claims, schemaId, claimAttributes)\n",
"async def submitContextAttr(self, schemaId: ID, m2):\n await self._cacheValueForId(self._m2s, schemaId, m2)\n"
] |
class Prover:
def __init__(self, wallet: ProverWallet):
self.wallet = wallet
self._primaryClaimInitializer = PrimaryClaimInitializer(wallet)
self._nonRevocClaimInitializer = NonRevocationClaimInitializer(wallet)
self._primaryProofBuilder = PrimaryProofBuilder(wallet)
self._nonRevocProofBuilder = NonRevocationProofBuilder(wallet)
#
# PUBLIC
#
@property
def proverId(self):
return self.wallet.walletId
async def createClaimRequest(self, schemaId: ID, proverId=None,
reqNonRevoc=True) -> ClaimRequest:
"""
Creates a claim request to the issuer.
:param schemaId: The schema ID (reference to claim
definition schema)
:param proverId: a prover ID request a claim for (if None then
the current prover default ID is used)
:param reqNonRevoc: whether to request non-revocation claim
:return: Claim Request
"""
await self._genMasterSecret(schemaId)
U = await self._genU(schemaId)
Ur = None if not reqNonRevoc else await self._genUr(schemaId)
proverId = proverId if proverId else self.proverId
return ClaimRequest(userId=proverId, U=U, Ur=Ur)
async def createClaimRequests(self, schemaIds: Sequence[ID],
proverId=None,
reqNonRevoc=True) -> Dict[ID, ClaimRequest]:
"""
Creates a claim request to the issuer.
:param schemaIds: The schema IDs (references to claim
definition schema)
:param proverId: a prover ID request a claim for (if None then
the current prover default ID is used)
:param reqNonRevoc: whether to request non-revocation claim
:return: a dictionary of Claim Requests for each Schema.
"""
res = {}
for schemaId in schemaIds:
res[schemaId] = await self.createClaimRequest(schemaId,
proverId,
reqNonRevoc)
return res
async def processClaims(self, allClaims: Dict[ID, Claims]):
"""
Processes and saves received Claims.
:param claims: claims to be processed and saved for each claim
definition.
"""
res = []
for schemaId, (claim_signature, claim_attributes) in allClaims.items():
res.append(await self.processClaim(schemaId, claim_attributes, claim_signature))
return res
async def presentProof(self, proofRequest: ProofRequest) -> FullProof:
"""
Presents a proof to the verifier.
:param proofRequest: description of a proof to be presented (revealed
attributes, predicates, timestamps for non-revocation)
:return: a proof (both primary and non-revocation) and revealed attributes (initial non-encoded values)
"""
claims, requestedProof = await self._findClaims(proofRequest)
proof = await self._prepareProof(claims, proofRequest.nonce, requestedProof)
return proof
#
# REQUEST CLAIMS
#
async def _genMasterSecret(self, schemaId: ID):
ms = cmod.integer(cmod.randomBits(LARGE_MASTER_SECRET))
await self.wallet.submitMasterSecret(schemaId=schemaId, ms=ms)
async def _genU(self, schemaId: ID):
claimInitData = await self._primaryClaimInitializer.genClaimInitData(
schemaId)
await self.wallet.submitPrimaryClaimInitData(schemaId=schemaId,
claimInitData=claimInitData)
return claimInitData.U
async def _genUr(self, schemaId: ID):
claimInitData = await self._nonRevocClaimInitializer.genClaimInitData(
schemaId)
await self.wallet.submitNonRevocClaimInitData(schemaId=schemaId,
claimInitData=claimInitData)
return claimInitData.U
async def _initPrimaryClaim(self, schemaId: ID, claim: PrimaryClaim):
claim = await self._primaryClaimInitializer.preparePrimaryClaim(
schemaId,
claim)
await self.wallet.submitPrimaryClaim(schemaId=schemaId, claim=claim)
async def _initNonRevocationClaim(self, schemaId: ID,
claim: NonRevocationClaim):
claim = await self._nonRevocClaimInitializer.initNonRevocationClaim(
schemaId,
claim)
await self.wallet.submitNonRevocClaim(schemaId=schemaId,
claim=claim)
#
# PRESENT PROOF
#
async def _findClaims(self, proofRequest: ProofRequest) -> (
Dict[SchemaKey, ProofClaims], Dict[str, Any]):
revealedAttrs, predicates = proofRequest.verifiableAttributes, proofRequest.predicates
foundRevealedAttrs = {}
foundPredicates = {}
proofClaims = {}
schemas = {}
allClaimsAttributes = await self.wallet.getAllClaimsAttributes()
async def addProof():
revealedAttrsForClaim = [
a for a in revealedAttrs.values() if a.name in claim.keys()]
revealedPredicatesForClaim = [
p for p in predicates.values() if p.attrName in claim.keys()]
claims = await self.wallet.getClaimSignature(ID(schemaId=schemaId))
proofClaim = ProofClaims(claims=claims, revealedAttrs=revealedAttrsForClaim,
predicates=revealedPredicatesForClaim)
proofClaims[schemaId] = proofClaim
for schemaKey, c in allClaimsAttributes.items():
schemas[schemaKey] = (await self.wallet.getSchema(ID(schemaKey)))
for uuid, revealedAttr in revealedAttrs.items():
matches = [(schemas[key].seqId, c) for key, c in allClaimsAttributes.items() if revealedAttr.name in c and
(schemas[key].seqId == revealedAttr.schema_seq_no if revealedAttr.schema_seq_no else True) and
(schemas[key].issuerId == revealedAttr.issuer_did if revealedAttr.issuer_did else True)]
if len(matches) == 0:
raise ValueError(
"A claim isn't found for the following attributes: {}", revealedAttr.name)
schemaId, claim = matches[0]
foundRevealedAttrs[uuid] = [str(schemaId), str(claim[revealedAttr.name].raw),
str(claim[revealedAttr.name].encoded)]
if schemaId not in proofClaims:
await addProof()
for uuid, predicate in predicates.items():
matches = [(schemas[key].seqId, c) for key, c in allClaimsAttributes.items() if predicate.attrName in c and
(schemas[key].seqId == predicate.schema_seq_no if predicate.schema_seq_no else True) and
(schemas[key].issuerId == predicate.issuer_did if predicate.issuer_did else True)]
if len(matches) == 0:
raise ValueError(
"A claim isn't found for the following predicate: {}", predicate)
schemaId, claim = matches[0]
foundPredicates[uuid] = str(schemaId)
if schemaId not in proofClaims:
await addProof()
requestedProof = RequestedProof(
revealed_attrs=foundRevealedAttrs, predicates=foundPredicates)
return proofClaims, requestedProof
async def _prepareProof(self, claims: Dict[SchemaKey, ProofClaims],
nonce, requestedProof) -> FullProof:
m1Tilde = cmod.integer(cmod.randomBits(LARGE_M2_TILDE))
initProofs = {}
CList = []
TauList = []
# 1. init proofs
for schemaId, val in claims.items():
c1, c2, revealedAttrs, predicates = val.claims.primaryClaim, val.claims.nonRevocClaim, val.revealedAttrs, val.predicates
claim = await self.wallet.getClaimAttributes(ID(schemaId=schemaId))
nonRevocInitProof = None
if c2:
nonRevocInitProof = await self._nonRevocProofBuilder.initProof(
schemaId, c2)
CList += nonRevocInitProof.asCList()
TauList += nonRevocInitProof.asTauList()
primaryInitProof = None
if c1:
m2Tilde = cmod.integer(int(
nonRevocInitProof.TauListParams.m2)) if nonRevocInitProof else None
primaryInitProof = await self._primaryProofBuilder.initProof(
schemaId, c1, revealedAttrs, predicates,
m1Tilde, m2Tilde, claim)
CList += primaryInitProof.asCList()
TauList += primaryInitProof.asTauList()
initProof = InitProof(nonRevocInitProof, primaryInitProof)
initProofs[schemaId] = initProof
# 2. hash
cH = self._get_hash(self._prepare_collection(
CList), self._prepare_collection(TauList), nonce)
# 3. finalize proofs
proofs = {}
for schemaId, initProof in initProofs.items():
nonRevocProof = None
if initProof.nonRevocInitProof:
nonRevocProof = await self._nonRevocProofBuilder.finalizeProof(
schemaId, cH, initProof.nonRevocInitProof)
primaryProof = await self._primaryProofBuilder.finalizeProof(
schemaId, cH, initProof.primaryInitProof)
schema = await self.wallet.getSchema(ID(schemaId=schemaId))
proof = Proof(primaryProof, nonRevocProof)
proofInfo = ProofInfo(
proof=proof, schema_seq_no=schemaId, issuer_did=schema.issuerId)
proofs[str(schemaId)] = proofInfo
aggregatedProof = AggregatedProof(cH, self._prepare_collection(CList))
return FullProof(proofs, aggregatedProof, requestedProof)
async def _getCList(self, initProofs: Dict[Schema, InitProof]):
CList = []
for initProof in initProofs.values():
CList += await initProof.nonRevocInitProof.asCList()
CList += await initProof.primaryInitProof.asCList()
return CList
async def _getTauList(self, initProofs: Dict[Schema, InitProof]):
TauList = []
for initProof in initProofs.values():
TauList += await initProof.nonRevocInitProof.asTauList()
TauList += await initProof.primaryInitProof.asTauList()
return TauList
def _prepare_collection(self, values):
return [cmod.toInt(el) if isCryptoInteger(el) else el for el in values]
def _get_hash(self, CList, TauList, nonce):
return get_hash_as_int(nonce,
*reduce(lambda x, y: x + y, [TauList, CList]))
|
hyperledger-archives/indy-anoncreds
|
anoncreds/protocol/prover.py
|
Prover.processClaims
|
python
|
async def processClaims(self, allClaims: Dict[ID, Claims]):
res = []
for schemaId, (claim_signature, claim_attributes) in allClaims.items():
res.append(await self.processClaim(schemaId, claim_attributes, claim_signature))
return res
|
Processes and saves received Claims.
:param claims: claims to be processed and saved for each claim
definition.
|
train
|
https://github.com/hyperledger-archives/indy-anoncreds/blob/9d9cda3d505c312257d99a13d74d8f05dac3091a/anoncreds/protocol/prover.py#L90-L100
|
[
"async def processClaim(self, schemaId: ID, claimAttributes: Dict[str, ClaimAttributeValues], signature: Claims):\n \"\"\"\n Processes and saves a received Claim for the given Schema.\n\n :param schemaId: The schema ID (reference to claim\n definition schema)\n :param claims: claims to be processed and saved\n \"\"\"\n await self.wallet.submitContextAttr(schemaId, signature.primaryClaim.m2)\n await self.wallet.submitClaimAttributes(schemaId, claimAttributes)\n\n await self._initPrimaryClaim(schemaId, signature.primaryClaim)\n if signature.nonRevocClaim:\n await self._initNonRevocationClaim(schemaId, signature.nonRevocClaim)\n"
] |
class Prover:
def __init__(self, wallet: ProverWallet):
self.wallet = wallet
self._primaryClaimInitializer = PrimaryClaimInitializer(wallet)
self._nonRevocClaimInitializer = NonRevocationClaimInitializer(wallet)
self._primaryProofBuilder = PrimaryProofBuilder(wallet)
self._nonRevocProofBuilder = NonRevocationProofBuilder(wallet)
#
# PUBLIC
#
@property
def proverId(self):
return self.wallet.walletId
async def createClaimRequest(self, schemaId: ID, proverId=None,
reqNonRevoc=True) -> ClaimRequest:
"""
Creates a claim request to the issuer.
:param schemaId: The schema ID (reference to claim
definition schema)
:param proverId: a prover ID request a claim for (if None then
the current prover default ID is used)
:param reqNonRevoc: whether to request non-revocation claim
:return: Claim Request
"""
await self._genMasterSecret(schemaId)
U = await self._genU(schemaId)
Ur = None if not reqNonRevoc else await self._genUr(schemaId)
proverId = proverId if proverId else self.proverId
return ClaimRequest(userId=proverId, U=U, Ur=Ur)
async def createClaimRequests(self, schemaIds: Sequence[ID],
proverId=None,
reqNonRevoc=True) -> Dict[ID, ClaimRequest]:
"""
Creates a claim request to the issuer.
:param schemaIds: The schema IDs (references to claim
definition schema)
:param proverId: a prover ID request a claim for (if None then
the current prover default ID is used)
:param reqNonRevoc: whether to request non-revocation claim
:return: a dictionary of Claim Requests for each Schema.
"""
res = {}
for schemaId in schemaIds:
res[schemaId] = await self.createClaimRequest(schemaId,
proverId,
reqNonRevoc)
return res
async def processClaim(self, schemaId: ID, claimAttributes: Dict[str, ClaimAttributeValues], signature: Claims):
"""
Processes and saves a received Claim for the given Schema.
:param schemaId: The schema ID (reference to claim
definition schema)
:param claims: claims to be processed and saved
"""
await self.wallet.submitContextAttr(schemaId, signature.primaryClaim.m2)
await self.wallet.submitClaimAttributes(schemaId, claimAttributes)
await self._initPrimaryClaim(schemaId, signature.primaryClaim)
if signature.nonRevocClaim:
await self._initNonRevocationClaim(schemaId, signature.nonRevocClaim)
async def presentProof(self, proofRequest: ProofRequest) -> FullProof:
"""
Presents a proof to the verifier.
:param proofRequest: description of a proof to be presented (revealed
attributes, predicates, timestamps for non-revocation)
:return: a proof (both primary and non-revocation) and revealed attributes (initial non-encoded values)
"""
claims, requestedProof = await self._findClaims(proofRequest)
proof = await self._prepareProof(claims, proofRequest.nonce, requestedProof)
return proof
#
# REQUEST CLAIMS
#
async def _genMasterSecret(self, schemaId: ID):
ms = cmod.integer(cmod.randomBits(LARGE_MASTER_SECRET))
await self.wallet.submitMasterSecret(schemaId=schemaId, ms=ms)
async def _genU(self, schemaId: ID):
claimInitData = await self._primaryClaimInitializer.genClaimInitData(
schemaId)
await self.wallet.submitPrimaryClaimInitData(schemaId=schemaId,
claimInitData=claimInitData)
return claimInitData.U
async def _genUr(self, schemaId: ID):
claimInitData = await self._nonRevocClaimInitializer.genClaimInitData(
schemaId)
await self.wallet.submitNonRevocClaimInitData(schemaId=schemaId,
claimInitData=claimInitData)
return claimInitData.U
async def _initPrimaryClaim(self, schemaId: ID, claim: PrimaryClaim):
claim = await self._primaryClaimInitializer.preparePrimaryClaim(
schemaId,
claim)
await self.wallet.submitPrimaryClaim(schemaId=schemaId, claim=claim)
async def _initNonRevocationClaim(self, schemaId: ID,
claim: NonRevocationClaim):
claim = await self._nonRevocClaimInitializer.initNonRevocationClaim(
schemaId,
claim)
await self.wallet.submitNonRevocClaim(schemaId=schemaId,
claim=claim)
#
# PRESENT PROOF
#
async def _findClaims(self, proofRequest: ProofRequest) -> (
Dict[SchemaKey, ProofClaims], Dict[str, Any]):
revealedAttrs, predicates = proofRequest.verifiableAttributes, proofRequest.predicates
foundRevealedAttrs = {}
foundPredicates = {}
proofClaims = {}
schemas = {}
allClaimsAttributes = await self.wallet.getAllClaimsAttributes()
async def addProof():
revealedAttrsForClaim = [
a for a in revealedAttrs.values() if a.name in claim.keys()]
revealedPredicatesForClaim = [
p for p in predicates.values() if p.attrName in claim.keys()]
claims = await self.wallet.getClaimSignature(ID(schemaId=schemaId))
proofClaim = ProofClaims(claims=claims, revealedAttrs=revealedAttrsForClaim,
predicates=revealedPredicatesForClaim)
proofClaims[schemaId] = proofClaim
for schemaKey, c in allClaimsAttributes.items():
schemas[schemaKey] = (await self.wallet.getSchema(ID(schemaKey)))
for uuid, revealedAttr in revealedAttrs.items():
matches = [(schemas[key].seqId, c) for key, c in allClaimsAttributes.items() if revealedAttr.name in c and
(schemas[key].seqId == revealedAttr.schema_seq_no if revealedAttr.schema_seq_no else True) and
(schemas[key].issuerId == revealedAttr.issuer_did if revealedAttr.issuer_did else True)]
if len(matches) == 0:
raise ValueError(
"A claim isn't found for the following attributes: {}", revealedAttr.name)
schemaId, claim = matches[0]
foundRevealedAttrs[uuid] = [str(schemaId), str(claim[revealedAttr.name].raw),
str(claim[revealedAttr.name].encoded)]
if schemaId not in proofClaims:
await addProof()
for uuid, predicate in predicates.items():
matches = [(schemas[key].seqId, c) for key, c in allClaimsAttributes.items() if predicate.attrName in c and
(schemas[key].seqId == predicate.schema_seq_no if predicate.schema_seq_no else True) and
(schemas[key].issuerId == predicate.issuer_did if predicate.issuer_did else True)]
if len(matches) == 0:
raise ValueError(
"A claim isn't found for the following predicate: {}", predicate)
schemaId, claim = matches[0]
foundPredicates[uuid] = str(schemaId)
if schemaId not in proofClaims:
await addProof()
requestedProof = RequestedProof(
revealed_attrs=foundRevealedAttrs, predicates=foundPredicates)
return proofClaims, requestedProof
async def _prepareProof(self, claims: Dict[SchemaKey, ProofClaims],
nonce, requestedProof) -> FullProof:
m1Tilde = cmod.integer(cmod.randomBits(LARGE_M2_TILDE))
initProofs = {}
CList = []
TauList = []
# 1. init proofs
for schemaId, val in claims.items():
c1, c2, revealedAttrs, predicates = val.claims.primaryClaim, val.claims.nonRevocClaim, val.revealedAttrs, val.predicates
claim = await self.wallet.getClaimAttributes(ID(schemaId=schemaId))
nonRevocInitProof = None
if c2:
nonRevocInitProof = await self._nonRevocProofBuilder.initProof(
schemaId, c2)
CList += nonRevocInitProof.asCList()
TauList += nonRevocInitProof.asTauList()
primaryInitProof = None
if c1:
m2Tilde = cmod.integer(int(
nonRevocInitProof.TauListParams.m2)) if nonRevocInitProof else None
primaryInitProof = await self._primaryProofBuilder.initProof(
schemaId, c1, revealedAttrs, predicates,
m1Tilde, m2Tilde, claim)
CList += primaryInitProof.asCList()
TauList += primaryInitProof.asTauList()
initProof = InitProof(nonRevocInitProof, primaryInitProof)
initProofs[schemaId] = initProof
# 2. hash
cH = self._get_hash(self._prepare_collection(
CList), self._prepare_collection(TauList), nonce)
# 3. finalize proofs
proofs = {}
for schemaId, initProof in initProofs.items():
nonRevocProof = None
if initProof.nonRevocInitProof:
nonRevocProof = await self._nonRevocProofBuilder.finalizeProof(
schemaId, cH, initProof.nonRevocInitProof)
primaryProof = await self._primaryProofBuilder.finalizeProof(
schemaId, cH, initProof.primaryInitProof)
schema = await self.wallet.getSchema(ID(schemaId=schemaId))
proof = Proof(primaryProof, nonRevocProof)
proofInfo = ProofInfo(
proof=proof, schema_seq_no=schemaId, issuer_did=schema.issuerId)
proofs[str(schemaId)] = proofInfo
aggregatedProof = AggregatedProof(cH, self._prepare_collection(CList))
return FullProof(proofs, aggregatedProof, requestedProof)
async def _getCList(self, initProofs: Dict[Schema, InitProof]):
CList = []
for initProof in initProofs.values():
CList += await initProof.nonRevocInitProof.asCList()
CList += await initProof.primaryInitProof.asCList()
return CList
async def _getTauList(self, initProofs: Dict[Schema, InitProof]):
TauList = []
for initProof in initProofs.values():
TauList += await initProof.nonRevocInitProof.asTauList()
TauList += await initProof.primaryInitProof.asTauList()
return TauList
def _prepare_collection(self, values):
return [cmod.toInt(el) if isCryptoInteger(el) else el for el in values]
def _get_hash(self, CList, TauList, nonce):
return get_hash_as_int(nonce,
*reduce(lambda x, y: x + y, [TauList, CList]))
|
hyperledger-archives/indy-anoncreds
|
anoncreds/protocol/prover.py
|
Prover.presentProof
|
python
|
async def presentProof(self, proofRequest: ProofRequest) -> FullProof:
claims, requestedProof = await self._findClaims(proofRequest)
proof = await self._prepareProof(claims, proofRequest.nonce, requestedProof)
return proof
|
Presents a proof to the verifier.
:param proofRequest: description of a proof to be presented (revealed
attributes, predicates, timestamps for non-revocation)
:return: a proof (both primary and non-revocation) and revealed attributes (initial non-encoded values)
|
train
|
https://github.com/hyperledger-archives/indy-anoncreds/blob/9d9cda3d505c312257d99a13d74d8f05dac3091a/anoncreds/protocol/prover.py#L102-L112
|
[
"async def _findClaims(self, proofRequest: ProofRequest) -> (\n Dict[SchemaKey, ProofClaims], Dict[str, Any]):\n revealedAttrs, predicates = proofRequest.verifiableAttributes, proofRequest.predicates\n\n foundRevealedAttrs = {}\n foundPredicates = {}\n proofClaims = {}\n schemas = {}\n allClaimsAttributes = await self.wallet.getAllClaimsAttributes()\n\n async def addProof():\n revealedAttrsForClaim = [\n a for a in revealedAttrs.values() if a.name in claim.keys()]\n revealedPredicatesForClaim = [\n p for p in predicates.values() if p.attrName in claim.keys()]\n\n claims = await self.wallet.getClaimSignature(ID(schemaId=schemaId))\n proofClaim = ProofClaims(claims=claims, revealedAttrs=revealedAttrsForClaim,\n predicates=revealedPredicatesForClaim)\n\n proofClaims[schemaId] = proofClaim\n\n for schemaKey, c in allClaimsAttributes.items():\n schemas[schemaKey] = (await self.wallet.getSchema(ID(schemaKey)))\n\n for uuid, revealedAttr in revealedAttrs.items():\n matches = [(schemas[key].seqId, c) for key, c in allClaimsAttributes.items() if revealedAttr.name in c and\n (schemas[key].seqId == revealedAttr.schema_seq_no if revealedAttr.schema_seq_no else True) and\n (schemas[key].issuerId == revealedAttr.issuer_did if revealedAttr.issuer_did else True)]\n\n if len(matches) == 0:\n raise ValueError(\n \"A claim isn't found for the following attributes: {}\", revealedAttr.name)\n\n schemaId, claim = matches[0]\n foundRevealedAttrs[uuid] = [str(schemaId), str(claim[revealedAttr.name].raw),\n str(claim[revealedAttr.name].encoded)]\n\n if schemaId not in proofClaims:\n await addProof()\n\n for uuid, predicate in predicates.items():\n matches = [(schemas[key].seqId, c) for key, c in allClaimsAttributes.items() if predicate.attrName in c and\n (schemas[key].seqId == predicate.schema_seq_no if predicate.schema_seq_no else True) and\n (schemas[key].issuerId == predicate.issuer_did if predicate.issuer_did else True)]\n\n if len(matches) == 0:\n raise ValueError(\n \"A claim isn't found for the following predicate: {}\", predicate)\n\n schemaId, claim = matches[0]\n foundPredicates[uuid] = str(schemaId)\n\n if schemaId not in proofClaims:\n await addProof()\n\n requestedProof = RequestedProof(\n revealed_attrs=foundRevealedAttrs, predicates=foundPredicates)\n\n return proofClaims, requestedProof\n",
"async def _prepareProof(self, claims: Dict[SchemaKey, ProofClaims],\n nonce, requestedProof) -> FullProof:\n m1Tilde = cmod.integer(cmod.randomBits(LARGE_M2_TILDE))\n initProofs = {}\n CList = []\n TauList = []\n\n # 1. init proofs\n for schemaId, val in claims.items():\n c1, c2, revealedAttrs, predicates = val.claims.primaryClaim, val.claims.nonRevocClaim, val.revealedAttrs, val.predicates\n\n claim = await self.wallet.getClaimAttributes(ID(schemaId=schemaId))\n\n nonRevocInitProof = None\n if c2:\n nonRevocInitProof = await self._nonRevocProofBuilder.initProof(\n schemaId, c2)\n CList += nonRevocInitProof.asCList()\n TauList += nonRevocInitProof.asTauList()\n\n primaryInitProof = None\n if c1:\n m2Tilde = cmod.integer(int(\n nonRevocInitProof.TauListParams.m2)) if nonRevocInitProof else None\n primaryInitProof = await self._primaryProofBuilder.initProof(\n schemaId, c1, revealedAttrs, predicates,\n m1Tilde, m2Tilde, claim)\n CList += primaryInitProof.asCList()\n TauList += primaryInitProof.asTauList()\n\n initProof = InitProof(nonRevocInitProof, primaryInitProof)\n initProofs[schemaId] = initProof\n\n # 2. hash\n cH = self._get_hash(self._prepare_collection(\n CList), self._prepare_collection(TauList), nonce)\n\n # 3. finalize proofs\n proofs = {}\n for schemaId, initProof in initProofs.items():\n nonRevocProof = None\n if initProof.nonRevocInitProof:\n nonRevocProof = await self._nonRevocProofBuilder.finalizeProof(\n schemaId, cH, initProof.nonRevocInitProof)\n primaryProof = await self._primaryProofBuilder.finalizeProof(\n schemaId, cH, initProof.primaryInitProof)\n\n schema = await self.wallet.getSchema(ID(schemaId=schemaId))\n\n proof = Proof(primaryProof, nonRevocProof)\n proofInfo = ProofInfo(\n proof=proof, schema_seq_no=schemaId, issuer_did=schema.issuerId)\n\n proofs[str(schemaId)] = proofInfo\n\n aggregatedProof = AggregatedProof(cH, self._prepare_collection(CList))\n\n return FullProof(proofs, aggregatedProof, requestedProof)\n"
] |
class Prover:
def __init__(self, wallet: ProverWallet):
self.wallet = wallet
self._primaryClaimInitializer = PrimaryClaimInitializer(wallet)
self._nonRevocClaimInitializer = NonRevocationClaimInitializer(wallet)
self._primaryProofBuilder = PrimaryProofBuilder(wallet)
self._nonRevocProofBuilder = NonRevocationProofBuilder(wallet)
#
# PUBLIC
#
@property
def proverId(self):
return self.wallet.walletId
async def createClaimRequest(self, schemaId: ID, proverId=None,
reqNonRevoc=True) -> ClaimRequest:
"""
Creates a claim request to the issuer.
:param schemaId: The schema ID (reference to claim
definition schema)
:param proverId: a prover ID request a claim for (if None then
the current prover default ID is used)
:param reqNonRevoc: whether to request non-revocation claim
:return: Claim Request
"""
await self._genMasterSecret(schemaId)
U = await self._genU(schemaId)
Ur = None if not reqNonRevoc else await self._genUr(schemaId)
proverId = proverId if proverId else self.proverId
return ClaimRequest(userId=proverId, U=U, Ur=Ur)
async def createClaimRequests(self, schemaIds: Sequence[ID],
proverId=None,
reqNonRevoc=True) -> Dict[ID, ClaimRequest]:
"""
Creates a claim request to the issuer.
:param schemaIds: The schema IDs (references to claim
definition schema)
:param proverId: a prover ID request a claim for (if None then
the current prover default ID is used)
:param reqNonRevoc: whether to request non-revocation claim
:return: a dictionary of Claim Requests for each Schema.
"""
res = {}
for schemaId in schemaIds:
res[schemaId] = await self.createClaimRequest(schemaId,
proverId,
reqNonRevoc)
return res
async def processClaim(self, schemaId: ID, claimAttributes: Dict[str, ClaimAttributeValues], signature: Claims):
"""
Processes and saves a received Claim for the given Schema.
:param schemaId: The schema ID (reference to claim
definition schema)
:param claims: claims to be processed and saved
"""
await self.wallet.submitContextAttr(schemaId, signature.primaryClaim.m2)
await self.wallet.submitClaimAttributes(schemaId, claimAttributes)
await self._initPrimaryClaim(schemaId, signature.primaryClaim)
if signature.nonRevocClaim:
await self._initNonRevocationClaim(schemaId, signature.nonRevocClaim)
async def processClaims(self, allClaims: Dict[ID, Claims]):
"""
Processes and saves received Claims.
:param claims: claims to be processed and saved for each claim
definition.
"""
res = []
for schemaId, (claim_signature, claim_attributes) in allClaims.items():
res.append(await self.processClaim(schemaId, claim_attributes, claim_signature))
return res
#
# REQUEST CLAIMS
#
async def _genMasterSecret(self, schemaId: ID):
ms = cmod.integer(cmod.randomBits(LARGE_MASTER_SECRET))
await self.wallet.submitMasterSecret(schemaId=schemaId, ms=ms)
async def _genU(self, schemaId: ID):
claimInitData = await self._primaryClaimInitializer.genClaimInitData(
schemaId)
await self.wallet.submitPrimaryClaimInitData(schemaId=schemaId,
claimInitData=claimInitData)
return claimInitData.U
async def _genUr(self, schemaId: ID):
claimInitData = await self._nonRevocClaimInitializer.genClaimInitData(
schemaId)
await self.wallet.submitNonRevocClaimInitData(schemaId=schemaId,
claimInitData=claimInitData)
return claimInitData.U
async def _initPrimaryClaim(self, schemaId: ID, claim: PrimaryClaim):
claim = await self._primaryClaimInitializer.preparePrimaryClaim(
schemaId,
claim)
await self.wallet.submitPrimaryClaim(schemaId=schemaId, claim=claim)
async def _initNonRevocationClaim(self, schemaId: ID,
claim: NonRevocationClaim):
claim = await self._nonRevocClaimInitializer.initNonRevocationClaim(
schemaId,
claim)
await self.wallet.submitNonRevocClaim(schemaId=schemaId,
claim=claim)
#
# PRESENT PROOF
#
async def _findClaims(self, proofRequest: ProofRequest) -> (
Dict[SchemaKey, ProofClaims], Dict[str, Any]):
revealedAttrs, predicates = proofRequest.verifiableAttributes, proofRequest.predicates
foundRevealedAttrs = {}
foundPredicates = {}
proofClaims = {}
schemas = {}
allClaimsAttributes = await self.wallet.getAllClaimsAttributes()
async def addProof():
revealedAttrsForClaim = [
a for a in revealedAttrs.values() if a.name in claim.keys()]
revealedPredicatesForClaim = [
p for p in predicates.values() if p.attrName in claim.keys()]
claims = await self.wallet.getClaimSignature(ID(schemaId=schemaId))
proofClaim = ProofClaims(claims=claims, revealedAttrs=revealedAttrsForClaim,
predicates=revealedPredicatesForClaim)
proofClaims[schemaId] = proofClaim
for schemaKey, c in allClaimsAttributes.items():
schemas[schemaKey] = (await self.wallet.getSchema(ID(schemaKey)))
for uuid, revealedAttr in revealedAttrs.items():
matches = [(schemas[key].seqId, c) for key, c in allClaimsAttributes.items() if revealedAttr.name in c and
(schemas[key].seqId == revealedAttr.schema_seq_no if revealedAttr.schema_seq_no else True) and
(schemas[key].issuerId == revealedAttr.issuer_did if revealedAttr.issuer_did else True)]
if len(matches) == 0:
raise ValueError(
"A claim isn't found for the following attributes: {}", revealedAttr.name)
schemaId, claim = matches[0]
foundRevealedAttrs[uuid] = [str(schemaId), str(claim[revealedAttr.name].raw),
str(claim[revealedAttr.name].encoded)]
if schemaId not in proofClaims:
await addProof()
for uuid, predicate in predicates.items():
matches = [(schemas[key].seqId, c) for key, c in allClaimsAttributes.items() if predicate.attrName in c and
(schemas[key].seqId == predicate.schema_seq_no if predicate.schema_seq_no else True) and
(schemas[key].issuerId == predicate.issuer_did if predicate.issuer_did else True)]
if len(matches) == 0:
raise ValueError(
"A claim isn't found for the following predicate: {}", predicate)
schemaId, claim = matches[0]
foundPredicates[uuid] = str(schemaId)
if schemaId not in proofClaims:
await addProof()
requestedProof = RequestedProof(
revealed_attrs=foundRevealedAttrs, predicates=foundPredicates)
return proofClaims, requestedProof
async def _prepareProof(self, claims: Dict[SchemaKey, ProofClaims],
nonce, requestedProof) -> FullProof:
m1Tilde = cmod.integer(cmod.randomBits(LARGE_M2_TILDE))
initProofs = {}
CList = []
TauList = []
# 1. init proofs
for schemaId, val in claims.items():
c1, c2, revealedAttrs, predicates = val.claims.primaryClaim, val.claims.nonRevocClaim, val.revealedAttrs, val.predicates
claim = await self.wallet.getClaimAttributes(ID(schemaId=schemaId))
nonRevocInitProof = None
if c2:
nonRevocInitProof = await self._nonRevocProofBuilder.initProof(
schemaId, c2)
CList += nonRevocInitProof.asCList()
TauList += nonRevocInitProof.asTauList()
primaryInitProof = None
if c1:
m2Tilde = cmod.integer(int(
nonRevocInitProof.TauListParams.m2)) if nonRevocInitProof else None
primaryInitProof = await self._primaryProofBuilder.initProof(
schemaId, c1, revealedAttrs, predicates,
m1Tilde, m2Tilde, claim)
CList += primaryInitProof.asCList()
TauList += primaryInitProof.asTauList()
initProof = InitProof(nonRevocInitProof, primaryInitProof)
initProofs[schemaId] = initProof
# 2. hash
cH = self._get_hash(self._prepare_collection(
CList), self._prepare_collection(TauList), nonce)
# 3. finalize proofs
proofs = {}
for schemaId, initProof in initProofs.items():
nonRevocProof = None
if initProof.nonRevocInitProof:
nonRevocProof = await self._nonRevocProofBuilder.finalizeProof(
schemaId, cH, initProof.nonRevocInitProof)
primaryProof = await self._primaryProofBuilder.finalizeProof(
schemaId, cH, initProof.primaryInitProof)
schema = await self.wallet.getSchema(ID(schemaId=schemaId))
proof = Proof(primaryProof, nonRevocProof)
proofInfo = ProofInfo(
proof=proof, schema_seq_no=schemaId, issuer_did=schema.issuerId)
proofs[str(schemaId)] = proofInfo
aggregatedProof = AggregatedProof(cH, self._prepare_collection(CList))
return FullProof(proofs, aggregatedProof, requestedProof)
async def _getCList(self, initProofs: Dict[Schema, InitProof]):
CList = []
for initProof in initProofs.values():
CList += await initProof.nonRevocInitProof.asCList()
CList += await initProof.primaryInitProof.asCList()
return CList
async def _getTauList(self, initProofs: Dict[Schema, InitProof]):
TauList = []
for initProof in initProofs.values():
TauList += await initProof.nonRevocInitProof.asTauList()
TauList += await initProof.primaryInitProof.asTauList()
return TauList
def _prepare_collection(self, values):
return [cmod.toInt(el) if isCryptoInteger(el) else el for el in values]
def _get_hash(self, CList, TauList, nonce):
return get_hash_as_int(nonce,
*reduce(lambda x, y: x + y, [TauList, CList]))
|
rmax/scrapy-redis
|
src/scrapy_redis/dupefilter.py
|
RFPDupeFilter.from_settings
|
python
|
def from_settings(cls, settings):
server = get_redis_from_settings(settings)
# XXX: This creates one-time key. needed to support to use this
# class as standalone dupefilter with scrapy's default scheduler
# if scrapy passes spider on open() method this wouldn't be needed
# TODO: Use SCRAPY_JOB env as default and fallback to timestamp.
key = defaults.DUPEFILTER_KEY % {'timestamp': int(time.time())}
debug = settings.getbool('DUPEFILTER_DEBUG')
return cls(server, key=key, debug=debug)
|
Returns an instance from given settings.
This uses by default the key ``dupefilter:<timestamp>``. When using the
``scrapy_redis.scheduler.Scheduler`` class, this method is not used as
it needs to pass the spider name in the key.
Parameters
----------
settings : scrapy.settings.Settings
Returns
-------
RFPDupeFilter
A RFPDupeFilter instance.
|
train
|
https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/src/scrapy_redis/dupefilter.py#L43-L68
|
[
"def get_redis_from_settings(settings):\n \"\"\"Returns a redis client instance from given Scrapy settings object.\n\n This function uses ``get_client`` to instantiate the client and uses\n ``defaults.REDIS_PARAMS`` global as defaults values for the parameters. You\n can override them using the ``REDIS_PARAMS`` setting.\n\n Parameters\n ----------\n settings : Settings\n A scrapy settings object. See the supported settings below.\n\n Returns\n -------\n server\n Redis client instance.\n\n Other Parameters\n ----------------\n REDIS_URL : str, optional\n Server connection URL.\n REDIS_HOST : str, optional\n Server host.\n REDIS_PORT : str, optional\n Server port.\n REDIS_ENCODING : str, optional\n Data encoding.\n REDIS_PARAMS : dict, optional\n Additional client parameters.\n\n \"\"\"\n params = defaults.REDIS_PARAMS.copy()\n params.update(settings.getdict('REDIS_PARAMS'))\n # XXX: Deprecate REDIS_* settings.\n for source, dest in SETTINGS_PARAMS_MAP.items():\n val = settings.get(source)\n if val:\n params[dest] = val\n\n # Allow ``redis_cls`` to be a path to a class.\n if isinstance(params.get('redis_cls'), six.string_types):\n params['redis_cls'] = load_object(params['redis_cls'])\n\n return get_redis(**params)\n"
] |
class RFPDupeFilter(BaseDupeFilter):
"""Redis-based request duplicates filter.
This class can also be used with default Scrapy's scheduler.
"""
logger = logger
def __init__(self, server, key, debug=False):
"""Initialize the duplicates filter.
Parameters
----------
server : redis.StrictRedis
The redis server instance.
key : str
Redis key Where to store fingerprints.
debug : bool, optional
Whether to log filtered requests.
"""
self.server = server
self.key = key
self.debug = debug
self.logdupes = True
@classmethod
@classmethod
def from_crawler(cls, crawler):
"""Returns instance from crawler.
Parameters
----------
crawler : scrapy.crawler.Crawler
Returns
-------
RFPDupeFilter
Instance of RFPDupeFilter.
"""
return cls.from_settings(crawler.settings)
def request_seen(self, request):
"""Returns True if request was already seen.
Parameters
----------
request : scrapy.http.Request
Returns
-------
bool
"""
fp = self.request_fingerprint(request)
# This returns the number of values added, zero if already exists.
added = self.server.sadd(self.key, fp)
return added == 0
def request_fingerprint(self, request):
"""Returns a fingerprint for a given request.
Parameters
----------
request : scrapy.http.Request
Returns
-------
str
"""
return request_fingerprint(request)
@classmethod
def from_spider(cls, spider):
settings = spider.settings
server = get_redis_from_settings(settings)
dupefilter_key = settings.get("SCHEDULER_DUPEFILTER_KEY", defaults.SCHEDULER_DUPEFILTER_KEY)
key = dupefilter_key % {'spider': spider.name}
debug = settings.getbool('DUPEFILTER_DEBUG')
return cls(server, key=key, debug=debug)
def close(self, reason=''):
"""Delete data on close. Called by Scrapy's scheduler.
Parameters
----------
reason : str, optional
"""
self.clear()
def clear(self):
"""Clears fingerprints data."""
self.server.delete(self.key)
def log(self, request, spider):
"""Logs given request.
Parameters
----------
request : scrapy.http.Request
spider : scrapy.spiders.Spider
"""
if self.debug:
msg = "Filtered duplicate request: %(request)s"
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
elif self.logdupes:
msg = ("Filtered duplicate request %(request)s"
" - no more duplicates will be shown"
" (see DUPEFILTER_DEBUG to show all duplicates)")
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
self.logdupes = False
|
rmax/scrapy-redis
|
src/scrapy_redis/dupefilter.py
|
RFPDupeFilter.request_seen
|
python
|
def request_seen(self, request):
fp = self.request_fingerprint(request)
# This returns the number of values added, zero if already exists.
added = self.server.sadd(self.key, fp)
return added == 0
|
Returns True if request was already seen.
Parameters
----------
request : scrapy.http.Request
Returns
-------
bool
|
train
|
https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/src/scrapy_redis/dupefilter.py#L86-L101
| null |
class RFPDupeFilter(BaseDupeFilter):
"""Redis-based request duplicates filter.
This class can also be used with default Scrapy's scheduler.
"""
logger = logger
def __init__(self, server, key, debug=False):
"""Initialize the duplicates filter.
Parameters
----------
server : redis.StrictRedis
The redis server instance.
key : str
Redis key Where to store fingerprints.
debug : bool, optional
Whether to log filtered requests.
"""
self.server = server
self.key = key
self.debug = debug
self.logdupes = True
@classmethod
def from_settings(cls, settings):
"""Returns an instance from given settings.
This uses by default the key ``dupefilter:<timestamp>``. When using the
``scrapy_redis.scheduler.Scheduler`` class, this method is not used as
it needs to pass the spider name in the key.
Parameters
----------
settings : scrapy.settings.Settings
Returns
-------
RFPDupeFilter
A RFPDupeFilter instance.
"""
server = get_redis_from_settings(settings)
# XXX: This creates one-time key. needed to support to use this
# class as standalone dupefilter with scrapy's default scheduler
# if scrapy passes spider on open() method this wouldn't be needed
# TODO: Use SCRAPY_JOB env as default and fallback to timestamp.
key = defaults.DUPEFILTER_KEY % {'timestamp': int(time.time())}
debug = settings.getbool('DUPEFILTER_DEBUG')
return cls(server, key=key, debug=debug)
@classmethod
def from_crawler(cls, crawler):
"""Returns instance from crawler.
Parameters
----------
crawler : scrapy.crawler.Crawler
Returns
-------
RFPDupeFilter
Instance of RFPDupeFilter.
"""
return cls.from_settings(crawler.settings)
def request_fingerprint(self, request):
"""Returns a fingerprint for a given request.
Parameters
----------
request : scrapy.http.Request
Returns
-------
str
"""
return request_fingerprint(request)
@classmethod
def from_spider(cls, spider):
settings = spider.settings
server = get_redis_from_settings(settings)
dupefilter_key = settings.get("SCHEDULER_DUPEFILTER_KEY", defaults.SCHEDULER_DUPEFILTER_KEY)
key = dupefilter_key % {'spider': spider.name}
debug = settings.getbool('DUPEFILTER_DEBUG')
return cls(server, key=key, debug=debug)
def close(self, reason=''):
"""Delete data on close. Called by Scrapy's scheduler.
Parameters
----------
reason : str, optional
"""
self.clear()
def clear(self):
"""Clears fingerprints data."""
self.server.delete(self.key)
def log(self, request, spider):
"""Logs given request.
Parameters
----------
request : scrapy.http.Request
spider : scrapy.spiders.Spider
"""
if self.debug:
msg = "Filtered duplicate request: %(request)s"
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
elif self.logdupes:
msg = ("Filtered duplicate request %(request)s"
" - no more duplicates will be shown"
" (see DUPEFILTER_DEBUG to show all duplicates)")
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
self.logdupes = False
|
rmax/scrapy-redis
|
src/scrapy_redis/dupefilter.py
|
RFPDupeFilter.log
|
python
|
def log(self, request, spider):
if self.debug:
msg = "Filtered duplicate request: %(request)s"
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
elif self.logdupes:
msg = ("Filtered duplicate request %(request)s"
" - no more duplicates will be shown"
" (see DUPEFILTER_DEBUG to show all duplicates)")
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
self.logdupes = False
|
Logs given request.
Parameters
----------
request : scrapy.http.Request
spider : scrapy.spiders.Spider
|
train
|
https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/src/scrapy_redis/dupefilter.py#L140-L157
| null |
class RFPDupeFilter(BaseDupeFilter):
"""Redis-based request duplicates filter.
This class can also be used with default Scrapy's scheduler.
"""
logger = logger
def __init__(self, server, key, debug=False):
"""Initialize the duplicates filter.
Parameters
----------
server : redis.StrictRedis
The redis server instance.
key : str
Redis key Where to store fingerprints.
debug : bool, optional
Whether to log filtered requests.
"""
self.server = server
self.key = key
self.debug = debug
self.logdupes = True
@classmethod
def from_settings(cls, settings):
"""Returns an instance from given settings.
This uses by default the key ``dupefilter:<timestamp>``. When using the
``scrapy_redis.scheduler.Scheduler`` class, this method is not used as
it needs to pass the spider name in the key.
Parameters
----------
settings : scrapy.settings.Settings
Returns
-------
RFPDupeFilter
A RFPDupeFilter instance.
"""
server = get_redis_from_settings(settings)
# XXX: This creates one-time key. needed to support to use this
# class as standalone dupefilter with scrapy's default scheduler
# if scrapy passes spider on open() method this wouldn't be needed
# TODO: Use SCRAPY_JOB env as default and fallback to timestamp.
key = defaults.DUPEFILTER_KEY % {'timestamp': int(time.time())}
debug = settings.getbool('DUPEFILTER_DEBUG')
return cls(server, key=key, debug=debug)
@classmethod
def from_crawler(cls, crawler):
"""Returns instance from crawler.
Parameters
----------
crawler : scrapy.crawler.Crawler
Returns
-------
RFPDupeFilter
Instance of RFPDupeFilter.
"""
return cls.from_settings(crawler.settings)
def request_seen(self, request):
"""Returns True if request was already seen.
Parameters
----------
request : scrapy.http.Request
Returns
-------
bool
"""
fp = self.request_fingerprint(request)
# This returns the number of values added, zero if already exists.
added = self.server.sadd(self.key, fp)
return added == 0
def request_fingerprint(self, request):
"""Returns a fingerprint for a given request.
Parameters
----------
request : scrapy.http.Request
Returns
-------
str
"""
return request_fingerprint(request)
@classmethod
def from_spider(cls, spider):
settings = spider.settings
server = get_redis_from_settings(settings)
dupefilter_key = settings.get("SCHEDULER_DUPEFILTER_KEY", defaults.SCHEDULER_DUPEFILTER_KEY)
key = dupefilter_key % {'spider': spider.name}
debug = settings.getbool('DUPEFILTER_DEBUG')
return cls(server, key=key, debug=debug)
def close(self, reason=''):
"""Delete data on close. Called by Scrapy's scheduler.
Parameters
----------
reason : str, optional
"""
self.clear()
def clear(self):
"""Clears fingerprints data."""
self.server.delete(self.key)
|
rmax/scrapy-redis
|
example-project/process_items.py
|
process_items
|
python
|
def process_items(r, keys, timeout, limit=0, log_every=1000, wait=.1):
limit = limit or float('inf')
processed = 0
while processed < limit:
# Change ``blpop`` to ``brpop`` to process as LIFO.
ret = r.blpop(keys, timeout)
# If data is found before the timeout then we consider we are done.
if ret is None:
time.sleep(wait)
continue
source, data = ret
try:
item = json.loads(data)
except Exception:
logger.exception("Failed to load item:\n%r", pprint.pformat(data))
continue
try:
name = item.get('name') or item.get('title')
url = item.get('url') or item.get('link')
logger.debug("[%s] Processing item: %s <%s>", source, name, url)
except KeyError:
logger.exception("[%s] Failed to process item:\n%r",
source, pprint.pformat(item))
continue
processed += 1
if processed % log_every == 0:
logger.info("Processed %s items", processed)
|
Process items from a redis queue.
Parameters
----------
r : Redis
Redis connection instance.
keys : list
List of keys to read the items from.
timeout: int
Read timeout.
|
train
|
https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/example-project/process_items.py#L20-L61
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A script to process items from a redis queue."""
from __future__ import print_function, unicode_literals
import argparse
import json
import logging
import pprint
import sys
import time
from scrapy_redis import get_redis
logger = logging.getLogger('process_items')
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('key', help="Redis key where items are stored")
parser.add_argument('--host')
parser.add_argument('--port')
parser.add_argument('--timeout', type=int, default=5)
parser.add_argument('--limit', type=int, default=0)
parser.add_argument('--progress-every', type=int, default=100)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
params = {}
if args.host:
params['host'] = args.host
if args.port:
params['port'] = args.port
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
r = get_redis(**params)
host = r.connection_pool.get_connection('info').host
logger.info("Waiting for items in '%s' (server: %s)", args.key, host)
kwargs = {
'keys': [args.key],
'timeout': args.timeout,
'limit': args.limit,
'log_every': args.progress_every,
}
try:
process_items(r, **kwargs)
retcode = 0 # ok
except KeyboardInterrupt:
retcode = 0 # ok
except Exception:
logger.exception("Unhandled exception")
retcode = 2
return retcode
if __name__ == '__main__':
sys.exit(main())
|
rmax/scrapy-redis
|
src/scrapy_redis/connection.py
|
get_redis_from_settings
|
python
|
def get_redis_from_settings(settings):
params = defaults.REDIS_PARAMS.copy()
params.update(settings.getdict('REDIS_PARAMS'))
# XXX: Deprecate REDIS_* settings.
for source, dest in SETTINGS_PARAMS_MAP.items():
val = settings.get(source)
if val:
params[dest] = val
# Allow ``redis_cls`` to be a path to a class.
if isinstance(params.get('redis_cls'), six.string_types):
params['redis_cls'] = load_object(params['redis_cls'])
return get_redis(**params)
|
Returns a redis client instance from given Scrapy settings object.
This function uses ``get_client`` to instantiate the client and uses
``defaults.REDIS_PARAMS`` global as defaults values for the parameters. You
can override them using the ``REDIS_PARAMS`` setting.
Parameters
----------
settings : Settings
A scrapy settings object. See the supported settings below.
Returns
-------
server
Redis client instance.
Other Parameters
----------------
REDIS_URL : str, optional
Server connection URL.
REDIS_HOST : str, optional
Server host.
REDIS_PORT : str, optional
Server port.
REDIS_ENCODING : str, optional
Data encoding.
REDIS_PARAMS : dict, optional
Additional client parameters.
|
train
|
https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/src/scrapy_redis/connection.py#L17-L60
|
[
"def get_redis(**kwargs):\n \"\"\"Returns a redis client instance.\n\n Parameters\n ----------\n redis_cls : class, optional\n Defaults to ``redis.StrictRedis``.\n url : str, optional\n If given, ``redis_cls.from_url`` is used to instantiate the class.\n **kwargs\n Extra parameters to be passed to the ``redis_cls`` class.\n\n Returns\n -------\n server\n Redis client instance.\n\n \"\"\"\n redis_cls = kwargs.pop('redis_cls', defaults.REDIS_CLS)\n url = kwargs.pop('url', None)\n if url:\n return redis_cls.from_url(url, **kwargs)\n else:\n return redis_cls(**kwargs)\n"
] |
import six
from scrapy.utils.misc import load_object
from . import defaults
# Shortcut maps 'setting name' -> 'parmater name'.
SETTINGS_PARAMS_MAP = {
'REDIS_URL': 'url',
'REDIS_HOST': 'host',
'REDIS_PORT': 'port',
'REDIS_ENCODING': 'encoding',
}
# Backwards compatible alias.
from_settings = get_redis_from_settings
def get_redis(**kwargs):
"""Returns a redis client instance.
Parameters
----------
redis_cls : class, optional
Defaults to ``redis.StrictRedis``.
url : str, optional
If given, ``redis_cls.from_url`` is used to instantiate the class.
**kwargs
Extra parameters to be passed to the ``redis_cls`` class.
Returns
-------
server
Redis client instance.
"""
redis_cls = kwargs.pop('redis_cls', defaults.REDIS_CLS)
url = kwargs.pop('url', None)
if url:
return redis_cls.from_url(url, **kwargs)
else:
return redis_cls(**kwargs)
|
rmax/scrapy-redis
|
src/scrapy_redis/connection.py
|
get_redis
|
python
|
def get_redis(**kwargs):
redis_cls = kwargs.pop('redis_cls', defaults.REDIS_CLS)
url = kwargs.pop('url', None)
if url:
return redis_cls.from_url(url, **kwargs)
else:
return redis_cls(**kwargs)
|
Returns a redis client instance.
Parameters
----------
redis_cls : class, optional
Defaults to ``redis.StrictRedis``.
url : str, optional
If given, ``redis_cls.from_url`` is used to instantiate the class.
**kwargs
Extra parameters to be passed to the ``redis_cls`` class.
Returns
-------
server
Redis client instance.
|
train
|
https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/src/scrapy_redis/connection.py#L67-L90
| null |
import six
from scrapy.utils.misc import load_object
from . import defaults
# Shortcut maps 'setting name' -> 'parmater name'.
SETTINGS_PARAMS_MAP = {
'REDIS_URL': 'url',
'REDIS_HOST': 'host',
'REDIS_PORT': 'port',
'REDIS_ENCODING': 'encoding',
}
def get_redis_from_settings(settings):
"""Returns a redis client instance from given Scrapy settings object.
This function uses ``get_client`` to instantiate the client and uses
``defaults.REDIS_PARAMS`` global as defaults values for the parameters. You
can override them using the ``REDIS_PARAMS`` setting.
Parameters
----------
settings : Settings
A scrapy settings object. See the supported settings below.
Returns
-------
server
Redis client instance.
Other Parameters
----------------
REDIS_URL : str, optional
Server connection URL.
REDIS_HOST : str, optional
Server host.
REDIS_PORT : str, optional
Server port.
REDIS_ENCODING : str, optional
Data encoding.
REDIS_PARAMS : dict, optional
Additional client parameters.
"""
params = defaults.REDIS_PARAMS.copy()
params.update(settings.getdict('REDIS_PARAMS'))
# XXX: Deprecate REDIS_* settings.
for source, dest in SETTINGS_PARAMS_MAP.items():
val = settings.get(source)
if val:
params[dest] = val
# Allow ``redis_cls`` to be a path to a class.
if isinstance(params.get('redis_cls'), six.string_types):
params['redis_cls'] = load_object(params['redis_cls'])
return get_redis(**params)
# Backwards compatible alias.
from_settings = get_redis_from_settings
|
rmax/scrapy-redis
|
src/scrapy_redis/utils.py
|
bytes_to_str
|
python
|
def bytes_to_str(s, encoding='utf-8'):
if six.PY3 and isinstance(s, bytes):
return s.decode(encoding)
return s
|
Returns a str if a bytes object is given.
|
train
|
https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/src/scrapy_redis/utils.py#L4-L8
| null |
import six
|
rmax/scrapy-redis
|
src/scrapy_redis/spiders.py
|
RedisMixin.setup_redis
|
python
|
def setup_redis(self, crawler=None):
if self.server is not None:
return
if crawler is None:
# We allow optional crawler argument to keep backwards
# compatibility.
# XXX: Raise a deprecation warning.
crawler = getattr(self, 'crawler', None)
if crawler is None:
raise ValueError("crawler is required")
settings = crawler.settings
if self.redis_key is None:
self.redis_key = settings.get(
'REDIS_START_URLS_KEY', defaults.START_URLS_KEY,
)
self.redis_key = self.redis_key % {'name': self.name}
if not self.redis_key.strip():
raise ValueError("redis_key must not be empty")
if self.redis_batch_size is None:
# TODO: Deprecate this setting (REDIS_START_URLS_BATCH_SIZE).
self.redis_batch_size = settings.getint(
'REDIS_START_URLS_BATCH_SIZE',
settings.getint('CONCURRENT_REQUESTS'),
)
try:
self.redis_batch_size = int(self.redis_batch_size)
except (TypeError, ValueError):
raise ValueError("redis_batch_size must be an integer")
if self.redis_encoding is None:
self.redis_encoding = settings.get('REDIS_ENCODING', defaults.REDIS_ENCODING)
self.logger.info("Reading start URLs from redis key '%(redis_key)s' "
"(batch size: %(redis_batch_size)s, encoding: %(redis_encoding)s",
self.__dict__)
self.server = connection.from_settings(crawler.settings)
# The idle signal is called when the spider has no requests left,
# that's when we will schedule new requests from redis queue
crawler.signals.connect(self.spider_idle, signal=signals.spider_idle)
|
Setup redis connection and idle signal.
This should be called after the spider has set its crawler object.
|
train
|
https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/src/scrapy_redis/spiders.py#L22-L73
|
[
"def get_redis_from_settings(settings):\n \"\"\"Returns a redis client instance from given Scrapy settings object.\n\n This function uses ``get_client`` to instantiate the client and uses\n ``defaults.REDIS_PARAMS`` global as defaults values for the parameters. You\n can override them using the ``REDIS_PARAMS`` setting.\n\n Parameters\n ----------\n settings : Settings\n A scrapy settings object. See the supported settings below.\n\n Returns\n -------\n server\n Redis client instance.\n\n Other Parameters\n ----------------\n REDIS_URL : str, optional\n Server connection URL.\n REDIS_HOST : str, optional\n Server host.\n REDIS_PORT : str, optional\n Server port.\n REDIS_ENCODING : str, optional\n Data encoding.\n REDIS_PARAMS : dict, optional\n Additional client parameters.\n\n \"\"\"\n params = defaults.REDIS_PARAMS.copy()\n params.update(settings.getdict('REDIS_PARAMS'))\n # XXX: Deprecate REDIS_* settings.\n for source, dest in SETTINGS_PARAMS_MAP.items():\n val = settings.get(source)\n if val:\n params[dest] = val\n\n # Allow ``redis_cls`` to be a path to a class.\n if isinstance(params.get('redis_cls'), six.string_types):\n params['redis_cls'] = load_object(params['redis_cls'])\n\n return get_redis(**params)\n"
] |
class RedisMixin(object):
"""Mixin class to implement reading urls from a redis queue."""
redis_key = None
redis_batch_size = None
redis_encoding = None
# Redis client placeholder.
server = None
def start_requests(self):
"""Returns a batch of start requests from redis."""
return self.next_requests()
def next_requests(self):
"""Returns a request to be scheduled or none."""
use_set = self.settings.getbool('REDIS_START_URLS_AS_SET', defaults.START_URLS_AS_SET)
fetch_one = self.server.spop if use_set else self.server.lpop
# XXX: Do we need to use a timeout here?
found = 0
# TODO: Use redis pipeline execution.
while found < self.redis_batch_size:
data = fetch_one(self.redis_key)
if not data:
# Queue empty.
break
req = self.make_request_from_data(data)
if req:
yield req
found += 1
else:
self.logger.debug("Request not made from data: %r", data)
if found:
self.logger.debug("Read %s requests from '%s'", found, self.redis_key)
def make_request_from_data(self, data):
"""Returns a Request instance from data coming from Redis.
By default, ``data`` is an encoded URL. You can override this method to
provide your own message decoding.
Parameters
----------
data : bytes
Message from redis.
"""
url = bytes_to_str(data, self.redis_encoding)
return self.make_requests_from_url(url)
def schedule_next_requests(self):
"""Schedules a request if available"""
# TODO: While there is capacity, schedule a batch of redis requests.
for req in self.next_requests():
self.crawler.engine.crawl(req, spider=self)
def spider_idle(self):
"""Schedules a request if available, otherwise waits."""
# XXX: Handle a sentinel to close the spider.
self.schedule_next_requests()
raise DontCloseSpider
|
rmax/scrapy-redis
|
src/scrapy_redis/spiders.py
|
RedisMixin.next_requests
|
python
|
def next_requests(self):
use_set = self.settings.getbool('REDIS_START_URLS_AS_SET', defaults.START_URLS_AS_SET)
fetch_one = self.server.spop if use_set else self.server.lpop
# XXX: Do we need to use a timeout here?
found = 0
# TODO: Use redis pipeline execution.
while found < self.redis_batch_size:
data = fetch_one(self.redis_key)
if not data:
# Queue empty.
break
req = self.make_request_from_data(data)
if req:
yield req
found += 1
else:
self.logger.debug("Request not made from data: %r", data)
if found:
self.logger.debug("Read %s requests from '%s'", found, self.redis_key)
|
Returns a request to be scheduled or none.
|
train
|
https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/src/scrapy_redis/spiders.py#L75-L95
|
[
"def make_request_from_data(self, data):\n \"\"\"Returns a Request instance from data coming from Redis.\n\n By default, ``data`` is an encoded URL. You can override this method to\n provide your own message decoding.\n\n Parameters\n ----------\n data : bytes\n Message from redis.\n\n \"\"\"\n url = bytes_to_str(data, self.redis_encoding)\n return self.make_requests_from_url(url)\n"
] |
class RedisMixin(object):
"""Mixin class to implement reading urls from a redis queue."""
redis_key = None
redis_batch_size = None
redis_encoding = None
# Redis client placeholder.
server = None
def start_requests(self):
"""Returns a batch of start requests from redis."""
return self.next_requests()
def setup_redis(self, crawler=None):
"""Setup redis connection and idle signal.
This should be called after the spider has set its crawler object.
"""
if self.server is not None:
return
if crawler is None:
# We allow optional crawler argument to keep backwards
# compatibility.
# XXX: Raise a deprecation warning.
crawler = getattr(self, 'crawler', None)
if crawler is None:
raise ValueError("crawler is required")
settings = crawler.settings
if self.redis_key is None:
self.redis_key = settings.get(
'REDIS_START_URLS_KEY', defaults.START_URLS_KEY,
)
self.redis_key = self.redis_key % {'name': self.name}
if not self.redis_key.strip():
raise ValueError("redis_key must not be empty")
if self.redis_batch_size is None:
# TODO: Deprecate this setting (REDIS_START_URLS_BATCH_SIZE).
self.redis_batch_size = settings.getint(
'REDIS_START_URLS_BATCH_SIZE',
settings.getint('CONCURRENT_REQUESTS'),
)
try:
self.redis_batch_size = int(self.redis_batch_size)
except (TypeError, ValueError):
raise ValueError("redis_batch_size must be an integer")
if self.redis_encoding is None:
self.redis_encoding = settings.get('REDIS_ENCODING', defaults.REDIS_ENCODING)
self.logger.info("Reading start URLs from redis key '%(redis_key)s' "
"(batch size: %(redis_batch_size)s, encoding: %(redis_encoding)s",
self.__dict__)
self.server = connection.from_settings(crawler.settings)
# The idle signal is called when the spider has no requests left,
# that's when we will schedule new requests from redis queue
crawler.signals.connect(self.spider_idle, signal=signals.spider_idle)
def make_request_from_data(self, data):
"""Returns a Request instance from data coming from Redis.
By default, ``data`` is an encoded URL. You can override this method to
provide your own message decoding.
Parameters
----------
data : bytes
Message from redis.
"""
url = bytes_to_str(data, self.redis_encoding)
return self.make_requests_from_url(url)
def schedule_next_requests(self):
"""Schedules a request if available"""
# TODO: While there is capacity, schedule a batch of redis requests.
for req in self.next_requests():
self.crawler.engine.crawl(req, spider=self)
def spider_idle(self):
"""Schedules a request if available, otherwise waits."""
# XXX: Handle a sentinel to close the spider.
self.schedule_next_requests()
raise DontCloseSpider
|
rmax/scrapy-redis
|
src/scrapy_redis/spiders.py
|
RedisMixin.make_request_from_data
|
python
|
def make_request_from_data(self, data):
url = bytes_to_str(data, self.redis_encoding)
return self.make_requests_from_url(url)
|
Returns a Request instance from data coming from Redis.
By default, ``data`` is an encoded URL. You can override this method to
provide your own message decoding.
Parameters
----------
data : bytes
Message from redis.
|
train
|
https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/src/scrapy_redis/spiders.py#L97-L110
|
[
"def bytes_to_str(s, encoding='utf-8'):\n \"\"\"Returns a str if a bytes object is given.\"\"\"\n if six.PY3 and isinstance(s, bytes):\n return s.decode(encoding)\n return s\n"
] |
class RedisMixin(object):
"""Mixin class to implement reading urls from a redis queue."""
redis_key = None
redis_batch_size = None
redis_encoding = None
# Redis client placeholder.
server = None
def start_requests(self):
"""Returns a batch of start requests from redis."""
return self.next_requests()
def setup_redis(self, crawler=None):
"""Setup redis connection and idle signal.
This should be called after the spider has set its crawler object.
"""
if self.server is not None:
return
if crawler is None:
# We allow optional crawler argument to keep backwards
# compatibility.
# XXX: Raise a deprecation warning.
crawler = getattr(self, 'crawler', None)
if crawler is None:
raise ValueError("crawler is required")
settings = crawler.settings
if self.redis_key is None:
self.redis_key = settings.get(
'REDIS_START_URLS_KEY', defaults.START_URLS_KEY,
)
self.redis_key = self.redis_key % {'name': self.name}
if not self.redis_key.strip():
raise ValueError("redis_key must not be empty")
if self.redis_batch_size is None:
# TODO: Deprecate this setting (REDIS_START_URLS_BATCH_SIZE).
self.redis_batch_size = settings.getint(
'REDIS_START_URLS_BATCH_SIZE',
settings.getint('CONCURRENT_REQUESTS'),
)
try:
self.redis_batch_size = int(self.redis_batch_size)
except (TypeError, ValueError):
raise ValueError("redis_batch_size must be an integer")
if self.redis_encoding is None:
self.redis_encoding = settings.get('REDIS_ENCODING', defaults.REDIS_ENCODING)
self.logger.info("Reading start URLs from redis key '%(redis_key)s' "
"(batch size: %(redis_batch_size)s, encoding: %(redis_encoding)s",
self.__dict__)
self.server = connection.from_settings(crawler.settings)
# The idle signal is called when the spider has no requests left,
# that's when we will schedule new requests from redis queue
crawler.signals.connect(self.spider_idle, signal=signals.spider_idle)
def next_requests(self):
"""Returns a request to be scheduled or none."""
use_set = self.settings.getbool('REDIS_START_URLS_AS_SET', defaults.START_URLS_AS_SET)
fetch_one = self.server.spop if use_set else self.server.lpop
# XXX: Do we need to use a timeout here?
found = 0
# TODO: Use redis pipeline execution.
while found < self.redis_batch_size:
data = fetch_one(self.redis_key)
if not data:
# Queue empty.
break
req = self.make_request_from_data(data)
if req:
yield req
found += 1
else:
self.logger.debug("Request not made from data: %r", data)
if found:
self.logger.debug("Read %s requests from '%s'", found, self.redis_key)
def schedule_next_requests(self):
"""Schedules a request if available"""
# TODO: While there is capacity, schedule a batch of redis requests.
for req in self.next_requests():
self.crawler.engine.crawl(req, spider=self)
def spider_idle(self):
"""Schedules a request if available, otherwise waits."""
# XXX: Handle a sentinel to close the spider.
self.schedule_next_requests()
raise DontCloseSpider
|
rmax/scrapy-redis
|
src/scrapy_redis/spiders.py
|
RedisMixin.schedule_next_requests
|
python
|
def schedule_next_requests(self):
# TODO: While there is capacity, schedule a batch of redis requests.
for req in self.next_requests():
self.crawler.engine.crawl(req, spider=self)
|
Schedules a request if available
|
train
|
https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/src/scrapy_redis/spiders.py#L112-L116
|
[
"def next_requests(self):\n \"\"\"Returns a request to be scheduled or none.\"\"\"\n use_set = self.settings.getbool('REDIS_START_URLS_AS_SET', defaults.START_URLS_AS_SET)\n fetch_one = self.server.spop if use_set else self.server.lpop\n # XXX: Do we need to use a timeout here?\n found = 0\n # TODO: Use redis pipeline execution.\n while found < self.redis_batch_size:\n data = fetch_one(self.redis_key)\n if not data:\n # Queue empty.\n break\n req = self.make_request_from_data(data)\n if req:\n yield req\n found += 1\n else:\n self.logger.debug(\"Request not made from data: %r\", data)\n\n if found:\n self.logger.debug(\"Read %s requests from '%s'\", found, self.redis_key)\n"
] |
class RedisMixin(object):
"""Mixin class to implement reading urls from a redis queue."""
redis_key = None
redis_batch_size = None
redis_encoding = None
# Redis client placeholder.
server = None
def start_requests(self):
"""Returns a batch of start requests from redis."""
return self.next_requests()
def setup_redis(self, crawler=None):
"""Setup redis connection and idle signal.
This should be called after the spider has set its crawler object.
"""
if self.server is not None:
return
if crawler is None:
# We allow optional crawler argument to keep backwards
# compatibility.
# XXX: Raise a deprecation warning.
crawler = getattr(self, 'crawler', None)
if crawler is None:
raise ValueError("crawler is required")
settings = crawler.settings
if self.redis_key is None:
self.redis_key = settings.get(
'REDIS_START_URLS_KEY', defaults.START_URLS_KEY,
)
self.redis_key = self.redis_key % {'name': self.name}
if not self.redis_key.strip():
raise ValueError("redis_key must not be empty")
if self.redis_batch_size is None:
# TODO: Deprecate this setting (REDIS_START_URLS_BATCH_SIZE).
self.redis_batch_size = settings.getint(
'REDIS_START_URLS_BATCH_SIZE',
settings.getint('CONCURRENT_REQUESTS'),
)
try:
self.redis_batch_size = int(self.redis_batch_size)
except (TypeError, ValueError):
raise ValueError("redis_batch_size must be an integer")
if self.redis_encoding is None:
self.redis_encoding = settings.get('REDIS_ENCODING', defaults.REDIS_ENCODING)
self.logger.info("Reading start URLs from redis key '%(redis_key)s' "
"(batch size: %(redis_batch_size)s, encoding: %(redis_encoding)s",
self.__dict__)
self.server = connection.from_settings(crawler.settings)
# The idle signal is called when the spider has no requests left,
# that's when we will schedule new requests from redis queue
crawler.signals.connect(self.spider_idle, signal=signals.spider_idle)
def next_requests(self):
"""Returns a request to be scheduled or none."""
use_set = self.settings.getbool('REDIS_START_URLS_AS_SET', defaults.START_URLS_AS_SET)
fetch_one = self.server.spop if use_set else self.server.lpop
# XXX: Do we need to use a timeout here?
found = 0
# TODO: Use redis pipeline execution.
while found < self.redis_batch_size:
data = fetch_one(self.redis_key)
if not data:
# Queue empty.
break
req = self.make_request_from_data(data)
if req:
yield req
found += 1
else:
self.logger.debug("Request not made from data: %r", data)
if found:
self.logger.debug("Read %s requests from '%s'", found, self.redis_key)
def make_request_from_data(self, data):
"""Returns a Request instance from data coming from Redis.
By default, ``data`` is an encoded URL. You can override this method to
provide your own message decoding.
Parameters
----------
data : bytes
Message from redis.
"""
url = bytes_to_str(data, self.redis_encoding)
return self.make_requests_from_url(url)
def spider_idle(self):
"""Schedules a request if available, otherwise waits."""
# XXX: Handle a sentinel to close the spider.
self.schedule_next_requests()
raise DontCloseSpider
|
rmax/scrapy-redis
|
src/scrapy_redis/queue.py
|
Base._encode_request
|
python
|
def _encode_request(self, request):
obj = request_to_dict(request, self.spider)
return self.serializer.dumps(obj)
|
Encode a request object
|
train
|
https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/src/scrapy_redis/queue.py#L40-L43
| null |
class Base(object):
"""Per-spider base queue class"""
def __init__(self, server, spider, key, serializer=None):
"""Initialize per-spider redis queue.
Parameters
----------
server : StrictRedis
Redis client instance.
spider : Spider
Scrapy spider instance.
key: str
Redis key where to put and get messages.
serializer : object
Serializer object with ``loads`` and ``dumps`` methods.
"""
if serializer is None:
# Backward compatibility.
# TODO: deprecate pickle.
serializer = picklecompat
if not hasattr(serializer, 'loads'):
raise TypeError("serializer does not implement 'loads' function: %r"
% serializer)
if not hasattr(serializer, 'dumps'):
raise TypeError("serializer '%s' does not implement 'dumps' function: %r"
% serializer)
self.server = server
self.spider = spider
self.key = key % {'spider': spider.name}
self.serializer = serializer
def _decode_request(self, encoded_request):
"""Decode an request previously encoded"""
obj = self.serializer.loads(encoded_request)
return request_from_dict(obj, self.spider)
def __len__(self):
"""Return the length of the queue"""
raise NotImplementedError
def push(self, request):
"""Push a request"""
raise NotImplementedError
def pop(self, timeout=0):
"""Pop a request"""
raise NotImplementedError
def clear(self):
"""Clear queue/stack"""
self.server.delete(self.key)
|
rmax/scrapy-redis
|
src/scrapy_redis/queue.py
|
Base._decode_request
|
python
|
def _decode_request(self, encoded_request):
obj = self.serializer.loads(encoded_request)
return request_from_dict(obj, self.spider)
|
Decode an request previously encoded
|
train
|
https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/src/scrapy_redis/queue.py#L45-L48
| null |
class Base(object):
"""Per-spider base queue class"""
def __init__(self, server, spider, key, serializer=None):
"""Initialize per-spider redis queue.
Parameters
----------
server : StrictRedis
Redis client instance.
spider : Spider
Scrapy spider instance.
key: str
Redis key where to put and get messages.
serializer : object
Serializer object with ``loads`` and ``dumps`` methods.
"""
if serializer is None:
# Backward compatibility.
# TODO: deprecate pickle.
serializer = picklecompat
if not hasattr(serializer, 'loads'):
raise TypeError("serializer does not implement 'loads' function: %r"
% serializer)
if not hasattr(serializer, 'dumps'):
raise TypeError("serializer '%s' does not implement 'dumps' function: %r"
% serializer)
self.server = server
self.spider = spider
self.key = key % {'spider': spider.name}
self.serializer = serializer
def _encode_request(self, request):
"""Encode a request object"""
obj = request_to_dict(request, self.spider)
return self.serializer.dumps(obj)
def __len__(self):
"""Return the length of the queue"""
raise NotImplementedError
def push(self, request):
"""Push a request"""
raise NotImplementedError
def pop(self, timeout=0):
"""Pop a request"""
raise NotImplementedError
def clear(self):
"""Clear queue/stack"""
self.server.delete(self.key)
|
rmax/scrapy-redis
|
src/scrapy_redis/queue.py
|
FifoQueue.push
|
python
|
def push(self, request):
self.server.lpush(self.key, self._encode_request(request))
|
Push a request
|
train
|
https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/src/scrapy_redis/queue.py#L74-L76
|
[
"def _encode_request(self, request):\n \"\"\"Encode a request object\"\"\"\n obj = request_to_dict(request, self.spider)\n return self.serializer.dumps(obj)\n"
] |
class FifoQueue(Base):
"""Per-spider FIFO queue"""
def __len__(self):
"""Return the length of the queue"""
return self.server.llen(self.key)
def pop(self, timeout=0):
"""Pop a request"""
if timeout > 0:
data = self.server.brpop(self.key, timeout)
if isinstance(data, tuple):
data = data[1]
else:
data = self.server.rpop(self.key)
if data:
return self._decode_request(data)
|
rmax/scrapy-redis
|
src/scrapy_redis/queue.py
|
PriorityQueue.push
|
python
|
def push(self, request):
data = self._encode_request(request)
score = -request.priority
# We don't use zadd method as the order of arguments change depending on
# whether the class is Redis or StrictRedis, and the option of using
# kwargs only accepts strings, not bytes.
self.server.execute_command('ZADD', self.key, score, data)
|
Push a request
|
train
|
https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/src/scrapy_redis/queue.py#L97-L104
|
[
"def _encode_request(self, request):\n \"\"\"Encode a request object\"\"\"\n obj = request_to_dict(request, self.spider)\n return self.serializer.dumps(obj)\n"
] |
class PriorityQueue(Base):
"""Per-spider priority queue abstraction using redis' sorted set"""
def __len__(self):
"""Return the length of the queue"""
return self.server.zcard(self.key)
def pop(self, timeout=0):
"""
Pop a request
timeout not support in this queue class
"""
# use atomic range/remove using multi/exec
pipe = self.server.pipeline()
pipe.multi()
pipe.zrange(self.key, 0, 0).zremrangebyrank(self.key, 0, 0)
results, count = pipe.execute()
if results:
return self._decode_request(results[0])
|
rmax/scrapy-redis
|
src/scrapy_redis/queue.py
|
PriorityQueue.pop
|
python
|
def pop(self, timeout=0):
# use atomic range/remove using multi/exec
pipe = self.server.pipeline()
pipe.multi()
pipe.zrange(self.key, 0, 0).zremrangebyrank(self.key, 0, 0)
results, count = pipe.execute()
if results:
return self._decode_request(results[0])
|
Pop a request
timeout not support in this queue class
|
train
|
https://github.com/rmax/scrapy-redis/blob/31c022dd145654cb4ea1429f09852a82afa0a01c/src/scrapy_redis/queue.py#L106-L117
|
[
"def _decode_request(self, encoded_request):\n \"\"\"Decode an request previously encoded\"\"\"\n obj = self.serializer.loads(encoded_request)\n return request_from_dict(obj, self.spider)\n"
] |
class PriorityQueue(Base):
"""Per-spider priority queue abstraction using redis' sorted set"""
def __len__(self):
"""Return the length of the queue"""
return self.server.zcard(self.key)
def push(self, request):
"""Push a request"""
data = self._encode_request(request)
score = -request.priority
# We don't use zadd method as the order of arguments change depending on
# whether the class is Redis or StrictRedis, and the option of using
# kwargs only accepts strings, not bytes.
self.server.execute_command('ZADD', self.key, score, data)
|
gusutabopb/aioinflux
|
aioinflux/serialization/common.py
|
escape
|
python
|
def escape(string, escape_pattern):
try:
return string.translate(escape_pattern)
except AttributeError:
warnings.warn("Non-string-like data passed. "
"Attempting to convert to 'str'.")
return str(string).translate(tag_escape)
|
Assistant function for string escaping
|
train
|
https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/common.py#L13-L20
| null |
import warnings
# Special characters documentation:
# https://docs.influxdata.com/influxdb/v1.4/write_protocols/line_protocol_reference/#special-characters
# Although not in the official docs, new line characters are removed in order to avoid issues.
# Go implementation: https://github.com/influxdata/influxdb/blob/master/pkg/escape/strings.go
key_escape = str.maketrans({'\\': '\\\\', ',': r'\,', ' ': r'\ ', '=': r'\=', '\n': ''})
tag_escape = str.maketrans({'\\': '\\\\', ',': r'\,', ' ': r'\ ', '=': r'\=', '\n': ''})
str_escape = str.maketrans({'\\': '\\\\', '"': r'\"', '\n': ''})
measurement_escape = str.maketrans({'\\': '\\\\', ',': r'\,', ' ': r'\ ', '\n': ''})
|
gusutabopb/aioinflux
|
aioinflux/serialization/usertype.py
|
_make_serializer
|
python
|
def _make_serializer(meas, schema, rm_none, extra_tags, placeholder): # noqa: C901
_validate_schema(schema, placeholder)
tags = []
fields = []
ts = None
meas = meas
for k, t in schema.items():
if t is MEASUREMENT:
meas = f"{{i.{k}}}"
elif t is TIMEINT:
ts = f"{{i.{k}}}"
elif t is TIMESTR:
if pd:
ts = f"{{pd.Timestamp(i.{k} or 0).value}}"
else:
ts = f"{{dt_to_int(str_to_dt(i.{k}))}}"
elif t is TIMEDT:
if pd:
ts = f"{{pd.Timestamp(i.{k} or 0).value}}"
else:
ts = f"{{dt_to_int(i.{k})}}"
elif t is TAG:
tags.append(f"{k}={{str(i.{k}).translate(tag_escape)}}")
elif t is TAGENUM:
tags.append(f"{k}={{getattr(i.{k}, 'name', i.{k} or None)}}")
elif t in (FLOAT, BOOL):
fields.append(f"{k}={{i.{k}}}")
elif t is INT:
fields.append(f"{k}={{i.{k}}}i")
elif t is STR:
fields.append(f"{k}=\\\"{{str(i.{k}).translate(str_escape)}}\\\"")
elif t is ENUM:
fields.append(f"{k}=\\\"{{getattr(i.{k}, 'name', i.{k} or None)}}\\\"")
else:
raise SchemaError(f"Invalid attribute type {k!r}: {t!r}")
extra_tags = extra_tags or {}
for k, v in extra_tags.items():
tags.append(f"{k}={v}")
if placeholder:
fields.insert(0, f"_=true")
sep = ',' if tags else ''
ts = f' {ts}' if ts else ''
fmt = f"{meas}{sep}{','.join(tags)} {','.join(fields)}{ts}"
if rm_none:
# Has substantial runtime impact. Best avoided if performance is critical.
# First field can't be removed.
pat = r',\w+="?None"?i?'
f = eval('lambda i: re.sub(r\'{}\', "", f"{}").encode()'.format(pat, fmt))
else:
f = eval('lambda i: f"{}".encode()'.format(fmt))
f.__doc__ = "Returns InfluxDB line protocol representation of user-defined class"
f._args = dict(meas=meas, schema=schema, rm_none=rm_none,
extra_tags=extra_tags, placeholder=placeholder)
return f
|
Factory of line protocol parsers
|
train
|
https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/usertype.py#L67-L122
|
[
"def _validate_schema(schema, placeholder):\n c = Counter(schema.values())\n if not c:\n raise SchemaError(\"Schema/type annotations missing\")\n if c[MEASUREMENT] > 1:\n raise SchemaError(\"Class can't have more than one 'MEASUREMENT' attribute\")\n if sum(c[e] for e in time_types) > 1:\n raise SchemaError(f\"Can't have more than one timestamp-type attribute {time_types}\")\n if sum(c[e] for e in field_types) < 1 and not placeholder:\n raise SchemaError(f\"Must have one or more field-type attributes {field_types}\")\n"
] |
import enum
import ciso8601
import time
# noinspection PyUnresolvedReferences
import re # noqa
from collections import Counter
from typing import TypeVar, Optional, Mapping
from datetime import datetime
# noinspection PyUnresolvedReferences
from .common import * # noqa
from ..compat import pd
__all__ = [
'lineprotocol', 'SchemaError',
'MEASUREMENT', 'TIMEINT', 'TIMESTR', 'TIMEDT',
'TAG', 'TAGENUM',
'BOOL', 'INT', 'FLOAT', 'STR', 'ENUM',
]
MEASUREMENT = TypeVar('MEASUREMENT', bound=str)
TIMEINT = TypeVar('TIMEINT', bound=int)
TIMESTR = TypeVar('TIMESTR', bound=str)
TIMEDT = TypeVar('TIMEDT', bound=datetime)
TAG = TypeVar('TAG', bound=str)
TAGENUM = TypeVar('TAGENUM', enum.Enum, str)
BOOL = TypeVar('BOOL', bound=bool)
INT = TypeVar('INT', bound=int)
FLOAT = TypeVar('FLOAT', bound=float)
STR = TypeVar('STR', bound=str)
ENUM = TypeVar('ENUM', enum.Enum, str)
time_types = [TIMEINT, TIMEDT, TIMESTR]
field_types = [BOOL, INT, FLOAT, STR, ENUM]
class SchemaError(TypeError):
"""Raised when invalid schema is passed to :func:`lineprotocol`"""
def str_to_dt(s):
dt = ciso8601.parse_datetime(s)
if dt:
return dt
raise ValueError(f'Invalid datetime string: {dt!r}')
def dt_to_int(dt):
if not dt.tzinfo:
# Assume tz-naive input to be in UTC, not local time
return int(dt.timestamp() - time.timezone) * 10 ** 9 + dt.microsecond * 1000
return int(dt.timestamp()) * 10 ** 9 + dt.microsecond * 1000
def _validate_schema(schema, placeholder):
c = Counter(schema.values())
if not c:
raise SchemaError("Schema/type annotations missing")
if c[MEASUREMENT] > 1:
raise SchemaError("Class can't have more than one 'MEASUREMENT' attribute")
if sum(c[e] for e in time_types) > 1:
raise SchemaError(f"Can't have more than one timestamp-type attribute {time_types}")
if sum(c[e] for e in field_types) < 1 and not placeholder:
raise SchemaError(f"Must have one or more field-type attributes {field_types}")
def lineprotocol(
cls=None,
*,
schema: Optional[Mapping[str, type]] = None,
rm_none: bool = False,
extra_tags: Optional[Mapping[str, str]] = None,
placeholder: bool = False
):
"""Adds ``to_lineprotocol`` method to arbitrary user-defined classes
:param cls: Class to monkey-patch
:param schema: Schema dictionary (attr/type pairs).
:param rm_none: Whether apply a regex to remove ``None`` values.
If ``False``, passing ``None`` values to boolean, integer or float or time fields
will result in write errors. Setting to ``True`` is "safer" but impacts performance.
:param extra_tags: Hard coded tags to be added to every point generated.
:param placeholder: If no field attributes are present, add a placeholder attribute (``_``)
which is always equal to ``True``. This is a workaround for creating field-less points
(which is not supported natively by InfluxDB)
"""
def _lineprotocol(cls):
_schema = schema or getattr(cls, '__annotations__', {})
f = _make_serializer(cls.__name__, _schema, rm_none, extra_tags, placeholder)
cls.to_lineprotocol = f
return cls
return _lineprotocol(cls) if cls else _lineprotocol
|
gusutabopb/aioinflux
|
aioinflux/serialization/usertype.py
|
lineprotocol
|
python
|
def lineprotocol(
cls=None,
*,
schema: Optional[Mapping[str, type]] = None,
rm_none: bool = False,
extra_tags: Optional[Mapping[str, str]] = None,
placeholder: bool = False
):
def _lineprotocol(cls):
_schema = schema or getattr(cls, '__annotations__', {})
f = _make_serializer(cls.__name__, _schema, rm_none, extra_tags, placeholder)
cls.to_lineprotocol = f
return cls
return _lineprotocol(cls) if cls else _lineprotocol
|
Adds ``to_lineprotocol`` method to arbitrary user-defined classes
:param cls: Class to monkey-patch
:param schema: Schema dictionary (attr/type pairs).
:param rm_none: Whether apply a regex to remove ``None`` values.
If ``False``, passing ``None`` values to boolean, integer or float or time fields
will result in write errors. Setting to ``True`` is "safer" but impacts performance.
:param extra_tags: Hard coded tags to be added to every point generated.
:param placeholder: If no field attributes are present, add a placeholder attribute (``_``)
which is always equal to ``True``. This is a workaround for creating field-less points
(which is not supported natively by InfluxDB)
|
train
|
https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/usertype.py#L125-L152
|
[
"def _lineprotocol(cls):\n _schema = schema or getattr(cls, '__annotations__', {})\n f = _make_serializer(cls.__name__, _schema, rm_none, extra_tags, placeholder)\n cls.to_lineprotocol = f\n return cls\n"
] |
import enum
import ciso8601
import time
# noinspection PyUnresolvedReferences
import re # noqa
from collections import Counter
from typing import TypeVar, Optional, Mapping
from datetime import datetime
# noinspection PyUnresolvedReferences
from .common import * # noqa
from ..compat import pd
__all__ = [
'lineprotocol', 'SchemaError',
'MEASUREMENT', 'TIMEINT', 'TIMESTR', 'TIMEDT',
'TAG', 'TAGENUM',
'BOOL', 'INT', 'FLOAT', 'STR', 'ENUM',
]
MEASUREMENT = TypeVar('MEASUREMENT', bound=str)
TIMEINT = TypeVar('TIMEINT', bound=int)
TIMESTR = TypeVar('TIMESTR', bound=str)
TIMEDT = TypeVar('TIMEDT', bound=datetime)
TAG = TypeVar('TAG', bound=str)
TAGENUM = TypeVar('TAGENUM', enum.Enum, str)
BOOL = TypeVar('BOOL', bound=bool)
INT = TypeVar('INT', bound=int)
FLOAT = TypeVar('FLOAT', bound=float)
STR = TypeVar('STR', bound=str)
ENUM = TypeVar('ENUM', enum.Enum, str)
time_types = [TIMEINT, TIMEDT, TIMESTR]
field_types = [BOOL, INT, FLOAT, STR, ENUM]
class SchemaError(TypeError):
"""Raised when invalid schema is passed to :func:`lineprotocol`"""
def str_to_dt(s):
dt = ciso8601.parse_datetime(s)
if dt:
return dt
raise ValueError(f'Invalid datetime string: {dt!r}')
def dt_to_int(dt):
if not dt.tzinfo:
# Assume tz-naive input to be in UTC, not local time
return int(dt.timestamp() - time.timezone) * 10 ** 9 + dt.microsecond * 1000
return int(dt.timestamp()) * 10 ** 9 + dt.microsecond * 1000
def _validate_schema(schema, placeholder):
c = Counter(schema.values())
if not c:
raise SchemaError("Schema/type annotations missing")
if c[MEASUREMENT] > 1:
raise SchemaError("Class can't have more than one 'MEASUREMENT' attribute")
if sum(c[e] for e in time_types) > 1:
raise SchemaError(f"Can't have more than one timestamp-type attribute {time_types}")
if sum(c[e] for e in field_types) < 1 and not placeholder:
raise SchemaError(f"Must have one or more field-type attributes {field_types}")
def _make_serializer(meas, schema, rm_none, extra_tags, placeholder): # noqa: C901
"""Factory of line protocol parsers"""
_validate_schema(schema, placeholder)
tags = []
fields = []
ts = None
meas = meas
for k, t in schema.items():
if t is MEASUREMENT:
meas = f"{{i.{k}}}"
elif t is TIMEINT:
ts = f"{{i.{k}}}"
elif t is TIMESTR:
if pd:
ts = f"{{pd.Timestamp(i.{k} or 0).value}}"
else:
ts = f"{{dt_to_int(str_to_dt(i.{k}))}}"
elif t is TIMEDT:
if pd:
ts = f"{{pd.Timestamp(i.{k} or 0).value}}"
else:
ts = f"{{dt_to_int(i.{k})}}"
elif t is TAG:
tags.append(f"{k}={{str(i.{k}).translate(tag_escape)}}")
elif t is TAGENUM:
tags.append(f"{k}={{getattr(i.{k}, 'name', i.{k} or None)}}")
elif t in (FLOAT, BOOL):
fields.append(f"{k}={{i.{k}}}")
elif t is INT:
fields.append(f"{k}={{i.{k}}}i")
elif t is STR:
fields.append(f"{k}=\\\"{{str(i.{k}).translate(str_escape)}}\\\"")
elif t is ENUM:
fields.append(f"{k}=\\\"{{getattr(i.{k}, 'name', i.{k} or None)}}\\\"")
else:
raise SchemaError(f"Invalid attribute type {k!r}: {t!r}")
extra_tags = extra_tags or {}
for k, v in extra_tags.items():
tags.append(f"{k}={v}")
if placeholder:
fields.insert(0, f"_=true")
sep = ',' if tags else ''
ts = f' {ts}' if ts else ''
fmt = f"{meas}{sep}{','.join(tags)} {','.join(fields)}{ts}"
if rm_none:
# Has substantial runtime impact. Best avoided if performance is critical.
# First field can't be removed.
pat = r',\w+="?None"?i?'
f = eval('lambda i: re.sub(r\'{}\', "", f"{}").encode()'.format(pat, fmt))
else:
f = eval('lambda i: f"{}".encode()'.format(fmt))
f.__doc__ = "Returns InfluxDB line protocol representation of user-defined class"
f._args = dict(meas=meas, schema=schema, rm_none=rm_none,
extra_tags=extra_tags, placeholder=placeholder)
return f
|
gusutabopb/aioinflux
|
aioinflux/serialization/mapping.py
|
serialize
|
python
|
def serialize(point: Mapping, measurement=None, **extra_tags) -> bytes:
tags = _serialize_tags(point, extra_tags)
return (
f'{_serialize_measurement(point, measurement)}'
f'{"," if tags else ""}{tags} '
f'{_serialize_fields(point)} '
f'{_serialize_timestamp(point)}'
).encode()
|
Converts dictionary-like data into a single line protocol line (point)
|
train
|
https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/mapping.py#L9-L17
|
[
"def _serialize_tags(point, extra_tags):\n output = []\n for k, v in {**point.get('tags', {}), **extra_tags}.items():\n k = escape(k, key_escape)\n v = escape(v, tag_escape)\n if not v:\n continue # ignore blank/null string tags\n output.append(f'{k}={v}')\n return ','.join(output)\n",
"def _serialize_measurement(point, measurement):\n try:\n return escape(point['measurement'], measurement_escape)\n except KeyError:\n if measurement is None:\n raise ValueError(\"'measurement' missing\")\n return escape(measurement, measurement_escape)\n",
"def _serialize_fields(point):\n \"\"\"Field values can be floats, integers, strings, or Booleans.\"\"\"\n output = []\n for k, v in point['fields'].items():\n k = escape(k, key_escape)\n if isinstance(v, bool):\n output.append(f'{k}={v}')\n elif isinstance(v, int):\n output.append(f'{k}={v}i')\n elif isinstance(v, str):\n output.append(f'{k}=\"{v.translate(str_escape)}\"')\n elif v is None:\n # Empty values\n continue\n else:\n # Floats\n output.append(f'{k}={v}')\n return ','.join(output)\n",
"def _serialize_timestamp(point):\n dt = point.get('time')\n if not dt:\n return ''\n elif isinstance(dt, int):\n return dt\n elif isinstance(dt, (str, bytes)):\n dt = ciso8601.parse_datetime(dt)\n if not dt:\n raise ValueError(f'Invalid datetime string: {dt!r}')\n\n if not dt.tzinfo:\n # Assume tz-naive input to be in UTC, not local time\n return int(dt.timestamp() - time.timezone) * 10 ** 9 + dt.microsecond * 1000\n return int(dt.timestamp()) * 10 ** 9 + dt.microsecond * 1000\n"
] |
import time
from typing import Mapping
import ciso8601
from .common import *
def _serialize_measurement(point, measurement):
try:
return escape(point['measurement'], measurement_escape)
except KeyError:
if measurement is None:
raise ValueError("'measurement' missing")
return escape(measurement, measurement_escape)
def _serialize_tags(point, extra_tags):
output = []
for k, v in {**point.get('tags', {}), **extra_tags}.items():
k = escape(k, key_escape)
v = escape(v, tag_escape)
if not v:
continue # ignore blank/null string tags
output.append(f'{k}={v}')
return ','.join(output)
def _serialize_timestamp(point):
dt = point.get('time')
if not dt:
return ''
elif isinstance(dt, int):
return dt
elif isinstance(dt, (str, bytes)):
dt = ciso8601.parse_datetime(dt)
if not dt:
raise ValueError(f'Invalid datetime string: {dt!r}')
if not dt.tzinfo:
# Assume tz-naive input to be in UTC, not local time
return int(dt.timestamp() - time.timezone) * 10 ** 9 + dt.microsecond * 1000
return int(dt.timestamp()) * 10 ** 9 + dt.microsecond * 1000
def _serialize_fields(point):
"""Field values can be floats, integers, strings, or Booleans."""
output = []
for k, v in point['fields'].items():
k = escape(k, key_escape)
if isinstance(v, bool):
output.append(f'{k}={v}')
elif isinstance(v, int):
output.append(f'{k}={v}i')
elif isinstance(v, str):
output.append(f'{k}="{v.translate(str_escape)}"')
elif v is None:
# Empty values
continue
else:
# Floats
output.append(f'{k}={v}')
return ','.join(output)
|
gusutabopb/aioinflux
|
aioinflux/serialization/mapping.py
|
_serialize_fields
|
python
|
def _serialize_fields(point):
output = []
for k, v in point['fields'].items():
k = escape(k, key_escape)
if isinstance(v, bool):
output.append(f'{k}={v}')
elif isinstance(v, int):
output.append(f'{k}={v}i')
elif isinstance(v, str):
output.append(f'{k}="{v.translate(str_escape)}"')
elif v is None:
# Empty values
continue
else:
# Floats
output.append(f'{k}={v}')
return ','.join(output)
|
Field values can be floats, integers, strings, or Booleans.
|
train
|
https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/mapping.py#L57-L74
|
[
"def escape(string, escape_pattern):\n \"\"\"Assistant function for string escaping\"\"\"\n try:\n return string.translate(escape_pattern)\n except AttributeError:\n warnings.warn(\"Non-string-like data passed. \"\n \"Attempting to convert to 'str'.\")\n return str(string).translate(tag_escape)\n"
] |
import time
from typing import Mapping
import ciso8601
from .common import *
def serialize(point: Mapping, measurement=None, **extra_tags) -> bytes:
"""Converts dictionary-like data into a single line protocol line (point)"""
tags = _serialize_tags(point, extra_tags)
return (
f'{_serialize_measurement(point, measurement)}'
f'{"," if tags else ""}{tags} '
f'{_serialize_fields(point)} '
f'{_serialize_timestamp(point)}'
).encode()
def _serialize_measurement(point, measurement):
try:
return escape(point['measurement'], measurement_escape)
except KeyError:
if measurement is None:
raise ValueError("'measurement' missing")
return escape(measurement, measurement_escape)
def _serialize_tags(point, extra_tags):
output = []
for k, v in {**point.get('tags', {}), **extra_tags}.items():
k = escape(k, key_escape)
v = escape(v, tag_escape)
if not v:
continue # ignore blank/null string tags
output.append(f'{k}={v}')
return ','.join(output)
def _serialize_timestamp(point):
dt = point.get('time')
if not dt:
return ''
elif isinstance(dt, int):
return dt
elif isinstance(dt, (str, bytes)):
dt = ciso8601.parse_datetime(dt)
if not dt:
raise ValueError(f'Invalid datetime string: {dt!r}')
if not dt.tzinfo:
# Assume tz-naive input to be in UTC, not local time
return int(dt.timestamp() - time.timezone) * 10 ** 9 + dt.microsecond * 1000
return int(dt.timestamp()) * 10 ** 9 + dt.microsecond * 1000
|
gusutabopb/aioinflux
|
aioinflux/serialization/__init__.py
|
serialize
|
python
|
def serialize(data, measurement=None, tag_columns=None, **extra_tags):
if isinstance(data, bytes):
return data
elif isinstance(data, str):
return data.encode('utf-8')
elif hasattr(data, 'to_lineprotocol'):
return data.to_lineprotocol()
elif pd is not None and isinstance(data, pd.DataFrame):
return dataframe.serialize(data, measurement, tag_columns, **extra_tags)
elif isinstance(data, dict):
return mapping.serialize(data, measurement, **extra_tags)
elif hasattr(data, '__iter__'):
return b'\n'.join([serialize(i, measurement, tag_columns, **extra_tags) for i in data])
else:
raise ValueError('Invalid input', data)
|
Converts input data into line protocol format
|
train
|
https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/__init__.py#L9-L24
|
[
"def serialize(df, measurement, tag_columns=None, **extra_tags) -> bytes:\n \"\"\"Converts a Pandas DataFrame into line protocol format\"\"\"\n # Pre-processing\n if measurement is None:\n raise ValueError(\"Missing 'measurement'\")\n if not isinstance(df.index, pd.DatetimeIndex):\n raise ValueError('DataFrame index is not DatetimeIndex')\n tag_columns = set(tag_columns or [])\n isnull = df.isnull().any(axis=1)\n\n # Make parser function\n tags = []\n fields = []\n for k, v in extra_tags.items():\n tags.append(f\"{k}={escape(v, key_escape)}\")\n for i, (k, v) in enumerate(df.dtypes.items()):\n k = k.translate(key_escape)\n if k in tag_columns:\n tags.append(f\"{k}={{p[{i+1}]}}\")\n elif issubclass(v.type, np.integer):\n fields.append(f\"{k}={{p[{i+1}]}}i\")\n elif issubclass(v.type, (np.float, np.bool_)):\n fields.append(f\"{k}={{p[{i+1}]}}\")\n else:\n # String escaping is skipped for performance reasons\n # Strings containing double-quotes can cause strange write errors\n # and should be sanitized by the user.\n # e.g., df[k] = df[k].astype('str').str.translate(str_escape)\n fields.append(f\"{k}=\\\"{{p[{i+1}]}}\\\"\")\n fmt = (f'{measurement}', f'{\",\" if tags else \"\"}', ','.join(tags),\n ' ', ','.join(fields), ' {p[0].value}')\n f = eval(\"lambda p: f'{}'\".format(''.join(fmt)))\n\n # Map/concat\n if isnull.any():\n lp = map(f, _itertuples(df[~isnull]))\n rep = _replace(df)\n lp_nan = (reduce(lambda a, b: re.sub(*b, a), rep, f(p))\n for p in _itertuples(df[isnull]))\n return '\\n'.join(chain(lp, lp_nan)).encode('utf-8')\n else:\n return '\\n'.join(map(f, _itertuples(df))).encode('utf-8')\n",
"def serialize(point: Mapping, measurement=None, **extra_tags) -> bytes:\n \"\"\"Converts dictionary-like data into a single line protocol line (point)\"\"\"\n tags = _serialize_tags(point, extra_tags)\n return (\n f'{_serialize_measurement(point, measurement)}'\n f'{\",\" if tags else \"\"}{tags} '\n f'{_serialize_fields(point)} '\n f'{_serialize_timestamp(point)}'\n ).encode()\n"
] |
# flake8: noqa 402
from ..compat import pd
if pd:
from . import dataframe
from . import mapping
|
gusutabopb/aioinflux
|
aioinflux/iterutils.py
|
iterpoints
|
python
|
def iterpoints(resp: dict, parser: Optional[Callable] = None) -> Iterator[Any]:
for statement in resp['results']:
if 'series' not in statement:
continue
for series in statement['series']:
if parser is None:
return (x for x in series['values'])
elif 'meta' in inspect.signature(parser).parameters:
meta = {k: series[k] for k in series if k != 'values'}
meta['statement_id'] = statement['statement_id']
return (parser(*x, meta=meta) for x in series['values'])
else:
return (parser(*x) for x in series['values'])
return iter([])
|
Iterates a response JSON yielding data point by point.
Can be used with both regular and chunked responses.
By default, returns just a plain list of values representing each point,
without column names, or other metadata.
In case a specific format is needed, an optional ``parser`` argument can be passed.
``parser`` is a function/callable that takes data point values
and, optionally, a ``meta`` parameter containing which takes a
dictionary containing all or a subset of the following:
``{'columns', 'name', 'tags', 'statement_id'}``.
Sample parser functions:
.. code:: python
# Function optional meta argument
def parser(*x, meta):
return dict(zip(meta['columns'], x))
# Namedtuple (callable)
from collections import namedtuple
parser = namedtuple('MyPoint', ['col1', 'col2', 'col3'])
:param resp: Dictionary containing parsed JSON (output from InfluxDBClient.query)
:param parser: Optional parser function/callable
:return: Generator object
|
train
|
https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/iterutils.py#L6-L48
| null |
import inspect
from typing import Optional, Iterator, Callable, Any
|
gusutabopb/aioinflux
|
aioinflux/serialization/dataframe.py
|
parse
|
python
|
def parse(resp) -> DataFrameType:
statements = []
for statement in resp['results']:
series = {}
for s in statement.get('series', []):
series[_get_name(s)] = _drop_zero_index(_serializer(s))
statements.append(series)
if len(statements) == 1:
series: dict = statements[0]
if len(series) == 1:
return list(series.values())[0] # DataFrame
else:
return series # dict
return statements
|
Makes a dictionary of DataFrames from a response object
|
train
|
https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/dataframe.py#L44-L59
|
[
"def _serializer(series) -> pd.DataFrame:\n df = pd.DataFrame(series.get('values', []), columns=series['columns'])\n if 'time' not in df.columns:\n return df\n df: pd.DataFrame = df.set_index(pd.to_datetime(df['time'])).drop('time', axis=1)\n df.index = df.index.tz_localize('UTC')\n df.index.name = None\n if 'tags' in series:\n for k, v in series['tags'].items():\n df[k] = v\n if 'name' in series:\n df.name = series['name']\n return df\n",
"def _get_name(series):\n tags = [f'{k}={v}' for k, v in series.get('tags', {}).items()]\n return ','.join(filter(None, [series.get('name'), *tags])) or None\n",
"def _drop_zero_index(df):\n if isinstance(df.index, pd.DatetimeIndex):\n if all(i.value == 0 for i in df.index):\n return df.reset_index(drop=True)\n return df\n"
] |
import re
from functools import reduce
from itertools import chain
from typing import Union, Dict, List
import pandas as pd
import numpy as np
from .common import *
DataFrameType = Union[pd.DataFrame, Dict[str, pd.DataFrame], List[Dict[str, pd.DataFrame]]]
# Serialization helper functions
# -------------------------------
def _serializer(series) -> pd.DataFrame:
df = pd.DataFrame(series.get('values', []), columns=series['columns'])
if 'time' not in df.columns:
return df
df: pd.DataFrame = df.set_index(pd.to_datetime(df['time'])).drop('time', axis=1)
df.index = df.index.tz_localize('UTC')
df.index.name = None
if 'tags' in series:
for k, v in series['tags'].items():
df[k] = v
if 'name' in series:
df.name = series['name']
return df
def _get_name(series):
tags = [f'{k}={v}' for k, v in series.get('tags', {}).items()]
return ','.join(filter(None, [series.get('name'), *tags])) or None
def _drop_zero_index(df):
if isinstance(df.index, pd.DatetimeIndex):
if all(i.value == 0 for i in df.index):
return df.reset_index(drop=True)
return df
# list
# Parsing helper functions
# -------------------------
def _itertuples(df):
"""Custom implementation of ``DataFrame.itertuples`` that
returns plain tuples instead of namedtuples. About 50% faster.
"""
cols = [df.iloc[:, k] for k in range(len(df.columns))]
return zip(df.index, *cols)
def _replace(df):
obj_cols = {k for k, v in dict(df.dtypes).items() if v is np.dtype('O')}
other_cols = set(df.columns) - obj_cols
obj_nans = (f'{k}="nan"' for k in obj_cols)
other_nans = (f'{k}=nani?' for k in other_cols)
replacements = [
('|'.join(chain(obj_nans, other_nans)), ''),
(',{2,}', ','),
('|'.join([', ,', ', ', ' ,']), ' '),
]
return replacements
def serialize(df, measurement, tag_columns=None, **extra_tags) -> bytes:
"""Converts a Pandas DataFrame into line protocol format"""
# Pre-processing
if measurement is None:
raise ValueError("Missing 'measurement'")
if not isinstance(df.index, pd.DatetimeIndex):
raise ValueError('DataFrame index is not DatetimeIndex')
tag_columns = set(tag_columns or [])
isnull = df.isnull().any(axis=1)
# Make parser function
tags = []
fields = []
for k, v in extra_tags.items():
tags.append(f"{k}={escape(v, key_escape)}")
for i, (k, v) in enumerate(df.dtypes.items()):
k = k.translate(key_escape)
if k in tag_columns:
tags.append(f"{k}={{p[{i+1}]}}")
elif issubclass(v.type, np.integer):
fields.append(f"{k}={{p[{i+1}]}}i")
elif issubclass(v.type, (np.float, np.bool_)):
fields.append(f"{k}={{p[{i+1}]}}")
else:
# String escaping is skipped for performance reasons
# Strings containing double-quotes can cause strange write errors
# and should be sanitized by the user.
# e.g., df[k] = df[k].astype('str').str.translate(str_escape)
fields.append(f"{k}=\"{{p[{i+1}]}}\"")
fmt = (f'{measurement}', f'{"," if tags else ""}', ','.join(tags),
' ', ','.join(fields), ' {p[0].value}')
f = eval("lambda p: f'{}'".format(''.join(fmt)))
# Map/concat
if isnull.any():
lp = map(f, _itertuples(df[~isnull]))
rep = _replace(df)
lp_nan = (reduce(lambda a, b: re.sub(*b, a), rep, f(p))
for p in _itertuples(df[isnull]))
return '\n'.join(chain(lp, lp_nan)).encode('utf-8')
else:
return '\n'.join(map(f, _itertuples(df))).encode('utf-8')
|
gusutabopb/aioinflux
|
aioinflux/serialization/dataframe.py
|
_itertuples
|
python
|
def _itertuples(df):
cols = [df.iloc[:, k] for k in range(len(df.columns))]
return zip(df.index, *cols)
|
Custom implementation of ``DataFrame.itertuples`` that
returns plain tuples instead of namedtuples. About 50% faster.
|
train
|
https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/dataframe.py#L65-L70
| null |
import re
from functools import reduce
from itertools import chain
from typing import Union, Dict, List
import pandas as pd
import numpy as np
from .common import *
DataFrameType = Union[pd.DataFrame, Dict[str, pd.DataFrame], List[Dict[str, pd.DataFrame]]]
# Serialization helper functions
# -------------------------------
def _serializer(series) -> pd.DataFrame:
df = pd.DataFrame(series.get('values', []), columns=series['columns'])
if 'time' not in df.columns:
return df
df: pd.DataFrame = df.set_index(pd.to_datetime(df['time'])).drop('time', axis=1)
df.index = df.index.tz_localize('UTC')
df.index.name = None
if 'tags' in series:
for k, v in series['tags'].items():
df[k] = v
if 'name' in series:
df.name = series['name']
return df
def _get_name(series):
tags = [f'{k}={v}' for k, v in series.get('tags', {}).items()]
return ','.join(filter(None, [series.get('name'), *tags])) or None
def _drop_zero_index(df):
if isinstance(df.index, pd.DatetimeIndex):
if all(i.value == 0 for i in df.index):
return df.reset_index(drop=True)
return df
def parse(resp) -> DataFrameType:
"""Makes a dictionary of DataFrames from a response object"""
statements = []
for statement in resp['results']:
series = {}
for s in statement.get('series', []):
series[_get_name(s)] = _drop_zero_index(_serializer(s))
statements.append(series)
if len(statements) == 1:
series: dict = statements[0]
if len(series) == 1:
return list(series.values())[0] # DataFrame
else:
return series # dict
return statements # list
# Parsing helper functions
# -------------------------
def _replace(df):
obj_cols = {k for k, v in dict(df.dtypes).items() if v is np.dtype('O')}
other_cols = set(df.columns) - obj_cols
obj_nans = (f'{k}="nan"' for k in obj_cols)
other_nans = (f'{k}=nani?' for k in other_cols)
replacements = [
('|'.join(chain(obj_nans, other_nans)), ''),
(',{2,}', ','),
('|'.join([', ,', ', ', ' ,']), ' '),
]
return replacements
def serialize(df, measurement, tag_columns=None, **extra_tags) -> bytes:
"""Converts a Pandas DataFrame into line protocol format"""
# Pre-processing
if measurement is None:
raise ValueError("Missing 'measurement'")
if not isinstance(df.index, pd.DatetimeIndex):
raise ValueError('DataFrame index is not DatetimeIndex')
tag_columns = set(tag_columns or [])
isnull = df.isnull().any(axis=1)
# Make parser function
tags = []
fields = []
for k, v in extra_tags.items():
tags.append(f"{k}={escape(v, key_escape)}")
for i, (k, v) in enumerate(df.dtypes.items()):
k = k.translate(key_escape)
if k in tag_columns:
tags.append(f"{k}={{p[{i+1}]}}")
elif issubclass(v.type, np.integer):
fields.append(f"{k}={{p[{i+1}]}}i")
elif issubclass(v.type, (np.float, np.bool_)):
fields.append(f"{k}={{p[{i+1}]}}")
else:
# String escaping is skipped for performance reasons
# Strings containing double-quotes can cause strange write errors
# and should be sanitized by the user.
# e.g., df[k] = df[k].astype('str').str.translate(str_escape)
fields.append(f"{k}=\"{{p[{i+1}]}}\"")
fmt = (f'{measurement}', f'{"," if tags else ""}', ','.join(tags),
' ', ','.join(fields), ' {p[0].value}')
f = eval("lambda p: f'{}'".format(''.join(fmt)))
# Map/concat
if isnull.any():
lp = map(f, _itertuples(df[~isnull]))
rep = _replace(df)
lp_nan = (reduce(lambda a, b: re.sub(*b, a), rep, f(p))
for p in _itertuples(df[isnull]))
return '\n'.join(chain(lp, lp_nan)).encode('utf-8')
else:
return '\n'.join(map(f, _itertuples(df))).encode('utf-8')
|
gusutabopb/aioinflux
|
aioinflux/serialization/dataframe.py
|
serialize
|
python
|
def serialize(df, measurement, tag_columns=None, **extra_tags) -> bytes:
# Pre-processing
if measurement is None:
raise ValueError("Missing 'measurement'")
if not isinstance(df.index, pd.DatetimeIndex):
raise ValueError('DataFrame index is not DatetimeIndex')
tag_columns = set(tag_columns or [])
isnull = df.isnull().any(axis=1)
# Make parser function
tags = []
fields = []
for k, v in extra_tags.items():
tags.append(f"{k}={escape(v, key_escape)}")
for i, (k, v) in enumerate(df.dtypes.items()):
k = k.translate(key_escape)
if k in tag_columns:
tags.append(f"{k}={{p[{i+1}]}}")
elif issubclass(v.type, np.integer):
fields.append(f"{k}={{p[{i+1}]}}i")
elif issubclass(v.type, (np.float, np.bool_)):
fields.append(f"{k}={{p[{i+1}]}}")
else:
# String escaping is skipped for performance reasons
# Strings containing double-quotes can cause strange write errors
# and should be sanitized by the user.
# e.g., df[k] = df[k].astype('str').str.translate(str_escape)
fields.append(f"{k}=\"{{p[{i+1}]}}\"")
fmt = (f'{measurement}', f'{"," if tags else ""}', ','.join(tags),
' ', ','.join(fields), ' {p[0].value}')
f = eval("lambda p: f'{}'".format(''.join(fmt)))
# Map/concat
if isnull.any():
lp = map(f, _itertuples(df[~isnull]))
rep = _replace(df)
lp_nan = (reduce(lambda a, b: re.sub(*b, a), rep, f(p))
for p in _itertuples(df[isnull]))
return '\n'.join(chain(lp, lp_nan)).encode('utf-8')
else:
return '\n'.join(map(f, _itertuples(df))).encode('utf-8')
|
Converts a Pandas DataFrame into line protocol format
|
train
|
https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/dataframe.py#L86-L127
|
[
"def escape(string, escape_pattern):\n \"\"\"Assistant function for string escaping\"\"\"\n try:\n return string.translate(escape_pattern)\n except AttributeError:\n warnings.warn(\"Non-string-like data passed. \"\n \"Attempting to convert to 'str'.\")\n return str(string).translate(tag_escape)\n",
"def _replace(df):\n obj_cols = {k for k, v in dict(df.dtypes).items() if v is np.dtype('O')}\n other_cols = set(df.columns) - obj_cols\n obj_nans = (f'{k}=\"nan\"' for k in obj_cols)\n other_nans = (f'{k}=nani?' for k in other_cols)\n replacements = [\n ('|'.join(chain(obj_nans, other_nans)), ''),\n (',{2,}', ','),\n ('|'.join([', ,', ', ', ' ,']), ' '),\n ]\n return replacements\n",
"def _itertuples(df):\n \"\"\"Custom implementation of ``DataFrame.itertuples`` that\n returns plain tuples instead of namedtuples. About 50% faster.\n \"\"\"\n cols = [df.iloc[:, k] for k in range(len(df.columns))]\n return zip(df.index, *cols)\n"
] |
import re
from functools import reduce
from itertools import chain
from typing import Union, Dict, List
import pandas as pd
import numpy as np
from .common import *
DataFrameType = Union[pd.DataFrame, Dict[str, pd.DataFrame], List[Dict[str, pd.DataFrame]]]
# Serialization helper functions
# -------------------------------
def _serializer(series) -> pd.DataFrame:
df = pd.DataFrame(series.get('values', []), columns=series['columns'])
if 'time' not in df.columns:
return df
df: pd.DataFrame = df.set_index(pd.to_datetime(df['time'])).drop('time', axis=1)
df.index = df.index.tz_localize('UTC')
df.index.name = None
if 'tags' in series:
for k, v in series['tags'].items():
df[k] = v
if 'name' in series:
df.name = series['name']
return df
def _get_name(series):
tags = [f'{k}={v}' for k, v in series.get('tags', {}).items()]
return ','.join(filter(None, [series.get('name'), *tags])) or None
def _drop_zero_index(df):
if isinstance(df.index, pd.DatetimeIndex):
if all(i.value == 0 for i in df.index):
return df.reset_index(drop=True)
return df
def parse(resp) -> DataFrameType:
"""Makes a dictionary of DataFrames from a response object"""
statements = []
for statement in resp['results']:
series = {}
for s in statement.get('series', []):
series[_get_name(s)] = _drop_zero_index(_serializer(s))
statements.append(series)
if len(statements) == 1:
series: dict = statements[0]
if len(series) == 1:
return list(series.values())[0] # DataFrame
else:
return series # dict
return statements # list
# Parsing helper functions
# -------------------------
def _itertuples(df):
"""Custom implementation of ``DataFrame.itertuples`` that
returns plain tuples instead of namedtuples. About 50% faster.
"""
cols = [df.iloc[:, k] for k in range(len(df.columns))]
return zip(df.index, *cols)
def _replace(df):
obj_cols = {k for k, v in dict(df.dtypes).items() if v is np.dtype('O')}
other_cols = set(df.columns) - obj_cols
obj_nans = (f'{k}="nan"' for k in obj_cols)
other_nans = (f'{k}=nani?' for k in other_cols)
replacements = [
('|'.join(chain(obj_nans, other_nans)), ''),
(',{2,}', ','),
('|'.join([', ,', ', ', ' ,']), ' '),
]
return replacements
|
gusutabopb/aioinflux
|
aioinflux/client.py
|
runner
|
python
|
def runner(coro):
@wraps(coro)
def inner(self, *args, **kwargs):
if self.mode == 'async':
return coro(self, *args, **kwargs)
return self._loop.run_until_complete(coro(self, *args, **kwargs))
return inner
|
Function execution decorator.
|
train
|
https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/client.py#L25-L34
| null |
import asyncio
import json
import logging
import warnings
from functools import wraps
from typing import TypeVar, Union, AnyStr, Mapping, Iterable, Optional, AsyncGenerator
import aiohttp
from . import serialization
from .compat import *
if pd:
PointType = TypeVar('PointType', Mapping, dict, bytes, pd.DataFrame)
ResultType = TypeVar('ResultType', dict, bytes, pd.DataFrame)
else:
PointType = TypeVar('PointType', Mapping, dict, bytes)
ResultType = TypeVar('ResultType', dict, bytes)
# Aioinflux uses logging mainly for debugging purposes.
# Please attach your own handlers if you need logging.
logger = logging.getLogger('aioinflux')
class InfluxDBError(Exception):
"""Raised when an server-side error occurs"""
pass
class InfluxDBWriteError(InfluxDBError):
"""Raised when a server-side writing error occurs"""
def __init__(self, resp):
self.status = resp.status
self.headers = resp.headers
self.reason = resp.reason
super().__init__(f'Error writing data ({self.status} - {self.reason}): '
f'{self.headers.get("X-Influxdb-Error", "")}')
class InfluxDBClient:
def __init__(
self,
host: str = 'localhost',
port: int = 8086,
mode: str = 'async',
output: str = 'json',
db: Optional[str] = None,
database: Optional[str] = None,
ssl: bool = False,
*,
unix_socket: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
timeout: Optional[Union[aiohttp.ClientTimeout, float]] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
redis_opts: Optional[dict] = None,
cache_expiry: int = 86400,
**kwargs
):
"""
:class:`~aioinflux.client.InfluxDBClient` holds information necessary
to interact with InfluxDB.
It is async by default, but can also be used as a sync/blocking client.
When querying, responses are returned as parsed JSON by default,
but can also be wrapped in easily iterable
wrapper object or be parsed to Pandas DataFrames.
The three main public methods are the three endpoints of the InfluxDB API, namely:
1. :meth:`~.InfluxDBClient.ping`
2. :meth:`~.InfluxDBClient.write`
3. :meth:`~.InfluxDBClient.query`
See each of the above methods documentation for further usage details.
See also: https://docs.influxdata.com/influxdb/latest/tools/api/
:param host: Hostname to connect to InfluxDB.
:param port: Port to connect to InfluxDB.
:param mode: Mode in which client should run. Available options:
- ``async``: Default mode. Each query/request to the backend will
- ``blocking``: Behaves in sync/blocking fashion,
similar to the official InfluxDB-Python client.
:param output: Output format of the response received from InfluxDB.
- ``json``: Default format.
Returns parsed JSON as received from InfluxDB.
- ``dataframe``: Parses results into :py:class`pandas.DataFrame`.
Not compatible with chunked responses.
:param db: Default database to be used by the client.
:param ssl: If https should be used.
:param unix_socket: Path to the InfluxDB Unix domain socket.
:param username: Username to use to connect to InfluxDB.
:param password: User password.
:param timeout: Timeout in seconds or :class:`aiohttp.ClientTimeout` object
:param database: Default database to be used by the client.
This field is for argument consistency with the official InfluxDB Python client.
:param loop: Asyncio event loop.
:param redis_opts: Dict fo keyword arguments for :func:`aioredis.create_redis`
:param cache_expiry: Expiry time (in seconds) for cached data
:param kwargs: Additional kwargs for :class:`aiohttp.ClientSession`
"""
self._loop = loop or asyncio.get_event_loop()
self._session: aiohttp.ClientSession = None
self._redis: aioredis.Redis = None
self._mode = None
self._output = None
self._db = None
self.ssl = ssl
self.host = host
self.port = port
self.mode = mode
self.output = output
self.db = database or db
# ClientSession configuration
if username:
kwargs.update(auth=aiohttp.BasicAuth(username, password))
if unix_socket:
kwargs.update(connector=aiohttp.UnixConnector(unix_socket, loop=self._loop))
if timeout:
if isinstance(timeout, aiohttp.ClientTimeout):
kwargs.update(timeout=timeout)
else:
kwargs.update(timeout=aiohttp.ClientTimeout(total=timeout))
self.opts = kwargs
# Cache configuration
self.redis_opts = redis_opts
self.cache_expiry = cache_expiry
async def create_session(self, **kwargs):
"""Creates an :class:`aiohttp.ClientSession`
Override this or call it with ``kwargs`` to use other :mod:`aiohttp`
functionality not covered by :class:`~.InfluxDBClient.__init__`
"""
self.opts.update(kwargs)
self._session = aiohttp.ClientSession(**self.opts, loop=self._loop)
if self.redis_opts:
if aioredis:
self._redis = await aioredis.create_redis(**self.redis_opts,
loop=self._loop)
else:
warnings.warn(no_redis_warning)
@property
def url(self):
return f'{"https" if self.ssl else "http"}://{self.host}:{self.port}/{{endpoint}}'
@property
def mode(self):
return self._mode
@property
def output(self):
return self._output
@property
def db(self):
return self._db
@mode.setter
def mode(self, mode):
if mode not in ('async', 'blocking'):
raise ValueError('Invalid running mode')
self._mode = mode
@output.setter
def output(self, output):
if pd is None and output == 'dataframe':
raise ValueError(no_pandas_warning)
if output not in ('json', 'dataframe'):
raise ValueError('Invalid output format')
self._output = output
@db.setter
def db(self, db):
self._db = db
if not db:
warnings.warn(f'No default databases is set. '
f'Database must be specified when querying/writing.')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
def __del__(self):
if not self._loop.is_closed() and self._session:
asyncio.ensure_future(self._session.close(), loop=self._loop)
def __repr__(self):
items = [f'{k}={v}' for k, v in vars(self).items() if not k.startswith('_')]
items.append(f'mode={self.mode}')
return f'{type(self).__name__}({", ".join(items)})'
@runner
async def close(self):
if self._session:
await self._session.close()
self._session = None
if self._redis:
self._redis.close()
@runner
async def ping(self) -> dict:
"""Pings InfluxDB
Returns a dictionary containing the headers of the response from ``influxd``.
"""
if not self._session:
await self.create_session()
async with self._session.get(self.url.format(endpoint='ping')) as resp:
logger.debug(f'{resp.status}: {resp.reason}')
return dict(resp.headers.items())
@runner
async def write(
self,
data: Union[PointType, Iterable[PointType]],
measurement: Optional[str] = None,
db: Optional[str] = None,
precision: Optional[str] = None,
rp: Optional[str] = None,
tag_columns: Optional[Iterable] = None,
**extra_tags,
) -> bool:
"""Writes data to InfluxDB.
Input can be:
1. A mapping (e.g. ``dict``) containing the keys:
``measurement``, ``time``, ``tags``, ``fields``
2. A Pandas :class:`~pandas.DataFrame` with a :class:`~pandas.DatetimeIndex`
3. A user defined class decorated w/
:func:`~aioinflux.serialization.usertype.lineprotocol`
4. A string (``str`` or ``bytes``) properly formatted in InfluxDB's line protocol
5. An iterable of one of the above
Input data in formats 1-3 are parsed to the line protocol before being
written to InfluxDB.
See the `InfluxDB docs <https://docs.influxdata.com/influxdb/latest/
write_protocols/line_protocol_reference/>`_ for more details.
:param data: Input data (see description above).
:param measurement: Measurement name. Mandatory when when writing DataFrames only.
When writing dictionary-like data, this field is treated as the default value
for points that do not contain a `measurement` field.
:param db: Database to be written to. Defaults to `self.db`.
:param precision: Sets the precision for the supplied Unix time values.
Ignored if input timestamp data is of non-integer type.
Valid values: ``{'ns', 'u', 'µ', 'ms', 's', 'm', 'h'}``
:param rp: Sets the target retention policy for the write.
If unspecified, data is written to the default retention policy.
:param tag_columns: Columns to be treated as tags
(used when writing DataFrames only)
:param extra_tags: Additional tags to be added to all points passed.
:return: Returns ``True`` if insert is successful.
Raises :py:class:`ValueError` otherwise.
"""
if not self._session:
await self.create_session()
if precision is not None:
# FIXME: Implement. Related issue: aioinflux/pull/13
raise NotImplementedError("'precision' parameter is not supported yet")
data = serialization.serialize(data, measurement, tag_columns, **extra_tags)
params = {'db': db or self.db}
if rp:
params['rp'] = rp
url = self.url.format(endpoint='write')
async with self._session.post(url, params=params, data=data) as resp:
if resp.status == 204:
return True
raise InfluxDBWriteError(resp)
@runner
async def query(
self,
q: AnyStr,
*,
epoch: str = 'ns',
chunked: bool = False,
chunk_size: Optional[int] = None,
db: Optional[str] = None,
use_cache: bool = False,
) -> Union[AsyncGenerator[ResultType, None], ResultType]:
"""Sends a query to InfluxDB.
Please refer to the InfluxDB documentation for all the possible queries:
https://docs.influxdata.com/influxdb/latest/query_language/
:param q: Raw query string
:param db: Database to be queried. Defaults to `self.db`.
:param epoch: Precision level of response timestamps.
Valid values: ``{'ns', 'u', 'µ', 'ms', 's', 'm', 'h'}``.
:param chunked: If ``True``, makes InfluxDB return results in streamed batches
rather than as a single response.
Returns an AsyncGenerator which yields responses
in the same format as non-chunked queries.
:param chunk_size: Max number of points for each chunk. By default, InfluxDB chunks
responses by series or by every 10,000 points, whichever occurs first.
:param use_cache:
:return: Response in the format specified by the combination of
:attr:`.InfluxDBClient.output` and ``chunked``
"""
async def _chunked_generator(url, data):
async with self._session.post(url, data=data) as resp:
logger.debug(f'{resp.status} (CHUNKED): {q}')
# Hack to avoid aiohttp raising ValueError('Line is too long')
# The number 16 is arbitrary (may be too large/small).
resp.content._high_water *= 16
async for chunk in resp.content:
chunk = json.loads(chunk)
self._check_error(chunk)
yield chunk
if not self._session:
await self.create_session()
# InfluxDB documentation is wrong regarding `/query` parameters
# See https://github.com/influxdata/docs.influxdata.com/issues/1807
if not isinstance(chunked, bool):
raise ValueError("'chunked' must be a boolean")
data = dict(q=q, db=db or self.db, chunked=str(chunked).lower(), epoch=epoch)
if chunked and chunk_size:
data['chunk_size'] = chunk_size
url = self.url.format(endpoint='query')
if chunked:
if use_cache:
raise ValueError("Can't use cache w/ chunked queries")
if self.mode != 'async':
raise ValueError("Can't use 'chunked' with non-async mode")
if self.output == 'json':
return _chunked_generator(url, data)
raise ValueError(f"Chunked queries are not support with {self.output!r} output")
key = f'aioinflux:{q}'
if use_cache and self._redis and await self._redis.exists(key):
logger.debug(f'Cache HIT: {q}')
data = lz4.decompress(await self._redis.get(key))
else:
async with self._session.post(url, data=data) as resp:
data = await resp.read()
if use_cache and self._redis:
logger.debug(f'Cache MISS ({resp.status}): {q}')
if resp.status == 200:
await self._redis.set(key, lz4.compress(data))
await self._redis.expire(key, self.cache_expiry)
else:
logger.debug(f'{resp.status}: {q}')
data = json.loads(data)
self._check_error(data)
if self.output == 'json':
return data
elif self.output == 'dataframe':
return serialization.dataframe.parse(data)
else:
raise ValueError('Invalid output format')
@staticmethod
def _check_error(response):
"""Checks for JSON error messages and raises Python exception"""
if 'error' in response:
raise InfluxDBError(response['error'])
elif 'results' in response:
for statement in response['results']:
if 'error' in statement:
msg = '{d[error]} (statement {d[statement_id]})'
raise InfluxDBError(msg.format(d=statement))
# InfluxQL - Data management
# --------------------------
def create_database(self, db=None):
db = db or self.db
return self.query(f'CREATE DATABASE "{db}"')
def drop_database(self, db=None):
db = db or self.db
return self.query(f'DROP DATABASE "{db}"')
def drop_measurement(self, measurement):
return self.query(f'DROP MEASUREMENT "{measurement}"')
# InfluxQL - Schema exploration
# -----------------------------
def show_databases(self):
return self.query("SHOW DATABASES")
def show_measurements(self):
return self.query("SHOW MEASUREMENTS")
def show_users(self):
return self.query("SHOW USERS")
def show_series(self, measurement=None):
if measurement:
return self.query(f"SHOW SERIES FROM {measurement}")
return self.query("SHOW SERIES")
def show_tag_keys(self, measurement=None):
if measurement:
return self.query(f"SHOW TAG KEYS FROM {measurement}")
return self.query("SHOW TAG KEYS")
def show_field_keys(self, measurement=None):
if measurement:
return self.query(f"SHOW FIELD KEYS FROM {measurement}")
return self.query("SHOW FIELD KEYS")
def show_tag_values(self, key, measurement=None):
if measurement:
return self.query(f'SHOW TAG VALUES FROM "{measurement}" WITH key = "{key}"')
return self.query(f'SHOW TAG VALUES WITH key = "{key}"')
def show_retention_policies(self):
return self.query("SHOW RETENTION POLICIES")
# InfluxQL - Other
# ----------------
def show_continuous_queries(self):
return self.query("SHOW CONTINUOUS QUERIES")
|
gusutabopb/aioinflux
|
aioinflux/client.py
|
InfluxDBClient.create_session
|
python
|
async def create_session(self, **kwargs):
self.opts.update(kwargs)
self._session = aiohttp.ClientSession(**self.opts, loop=self._loop)
if self.redis_opts:
if aioredis:
self._redis = await aioredis.create_redis(**self.redis_opts,
loop=self._loop)
else:
warnings.warn(no_redis_warning)
|
Creates an :class:`aiohttp.ClientSession`
Override this or call it with ``kwargs`` to use other :mod:`aiohttp`
functionality not covered by :class:`~.InfluxDBClient.__init__`
|
train
|
https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/client.py#L146-L159
| null |
class InfluxDBClient:
def __init__(
self,
host: str = 'localhost',
port: int = 8086,
mode: str = 'async',
output: str = 'json',
db: Optional[str] = None,
database: Optional[str] = None,
ssl: bool = False,
*,
unix_socket: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
timeout: Optional[Union[aiohttp.ClientTimeout, float]] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
redis_opts: Optional[dict] = None,
cache_expiry: int = 86400,
**kwargs
):
"""
:class:`~aioinflux.client.InfluxDBClient` holds information necessary
to interact with InfluxDB.
It is async by default, but can also be used as a sync/blocking client.
When querying, responses are returned as parsed JSON by default,
but can also be wrapped in easily iterable
wrapper object or be parsed to Pandas DataFrames.
The three main public methods are the three endpoints of the InfluxDB API, namely:
1. :meth:`~.InfluxDBClient.ping`
2. :meth:`~.InfluxDBClient.write`
3. :meth:`~.InfluxDBClient.query`
See each of the above methods documentation for further usage details.
See also: https://docs.influxdata.com/influxdb/latest/tools/api/
:param host: Hostname to connect to InfluxDB.
:param port: Port to connect to InfluxDB.
:param mode: Mode in which client should run. Available options:
- ``async``: Default mode. Each query/request to the backend will
- ``blocking``: Behaves in sync/blocking fashion,
similar to the official InfluxDB-Python client.
:param output: Output format of the response received from InfluxDB.
- ``json``: Default format.
Returns parsed JSON as received from InfluxDB.
- ``dataframe``: Parses results into :py:class`pandas.DataFrame`.
Not compatible with chunked responses.
:param db: Default database to be used by the client.
:param ssl: If https should be used.
:param unix_socket: Path to the InfluxDB Unix domain socket.
:param username: Username to use to connect to InfluxDB.
:param password: User password.
:param timeout: Timeout in seconds or :class:`aiohttp.ClientTimeout` object
:param database: Default database to be used by the client.
This field is for argument consistency with the official InfluxDB Python client.
:param loop: Asyncio event loop.
:param redis_opts: Dict fo keyword arguments for :func:`aioredis.create_redis`
:param cache_expiry: Expiry time (in seconds) for cached data
:param kwargs: Additional kwargs for :class:`aiohttp.ClientSession`
"""
self._loop = loop or asyncio.get_event_loop()
self._session: aiohttp.ClientSession = None
self._redis: aioredis.Redis = None
self._mode = None
self._output = None
self._db = None
self.ssl = ssl
self.host = host
self.port = port
self.mode = mode
self.output = output
self.db = database or db
# ClientSession configuration
if username:
kwargs.update(auth=aiohttp.BasicAuth(username, password))
if unix_socket:
kwargs.update(connector=aiohttp.UnixConnector(unix_socket, loop=self._loop))
if timeout:
if isinstance(timeout, aiohttp.ClientTimeout):
kwargs.update(timeout=timeout)
else:
kwargs.update(timeout=aiohttp.ClientTimeout(total=timeout))
self.opts = kwargs
# Cache configuration
self.redis_opts = redis_opts
self.cache_expiry = cache_expiry
@property
def url(self):
return f'{"https" if self.ssl else "http"}://{self.host}:{self.port}/{{endpoint}}'
@property
def mode(self):
return self._mode
@property
def output(self):
return self._output
@property
def db(self):
return self._db
@mode.setter
def mode(self, mode):
if mode not in ('async', 'blocking'):
raise ValueError('Invalid running mode')
self._mode = mode
@output.setter
def output(self, output):
if pd is None and output == 'dataframe':
raise ValueError(no_pandas_warning)
if output not in ('json', 'dataframe'):
raise ValueError('Invalid output format')
self._output = output
@db.setter
def db(self, db):
self._db = db
if not db:
warnings.warn(f'No default databases is set. '
f'Database must be specified when querying/writing.')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
def __del__(self):
if not self._loop.is_closed() and self._session:
asyncio.ensure_future(self._session.close(), loop=self._loop)
def __repr__(self):
items = [f'{k}={v}' for k, v in vars(self).items() if not k.startswith('_')]
items.append(f'mode={self.mode}')
return f'{type(self).__name__}({", ".join(items)})'
@runner
async def close(self):
if self._session:
await self._session.close()
self._session = None
if self._redis:
self._redis.close()
@runner
async def ping(self) -> dict:
"""Pings InfluxDB
Returns a dictionary containing the headers of the response from ``influxd``.
"""
if not self._session:
await self.create_session()
async with self._session.get(self.url.format(endpoint='ping')) as resp:
logger.debug(f'{resp.status}: {resp.reason}')
return dict(resp.headers.items())
@runner
async def write(
self,
data: Union[PointType, Iterable[PointType]],
measurement: Optional[str] = None,
db: Optional[str] = None,
precision: Optional[str] = None,
rp: Optional[str] = None,
tag_columns: Optional[Iterable] = None,
**extra_tags,
) -> bool:
"""Writes data to InfluxDB.
Input can be:
1. A mapping (e.g. ``dict``) containing the keys:
``measurement``, ``time``, ``tags``, ``fields``
2. A Pandas :class:`~pandas.DataFrame` with a :class:`~pandas.DatetimeIndex`
3. A user defined class decorated w/
:func:`~aioinflux.serialization.usertype.lineprotocol`
4. A string (``str`` or ``bytes``) properly formatted in InfluxDB's line protocol
5. An iterable of one of the above
Input data in formats 1-3 are parsed to the line protocol before being
written to InfluxDB.
See the `InfluxDB docs <https://docs.influxdata.com/influxdb/latest/
write_protocols/line_protocol_reference/>`_ for more details.
:param data: Input data (see description above).
:param measurement: Measurement name. Mandatory when when writing DataFrames only.
When writing dictionary-like data, this field is treated as the default value
for points that do not contain a `measurement` field.
:param db: Database to be written to. Defaults to `self.db`.
:param precision: Sets the precision for the supplied Unix time values.
Ignored if input timestamp data is of non-integer type.
Valid values: ``{'ns', 'u', 'µ', 'ms', 's', 'm', 'h'}``
:param rp: Sets the target retention policy for the write.
If unspecified, data is written to the default retention policy.
:param tag_columns: Columns to be treated as tags
(used when writing DataFrames only)
:param extra_tags: Additional tags to be added to all points passed.
:return: Returns ``True`` if insert is successful.
Raises :py:class:`ValueError` otherwise.
"""
if not self._session:
await self.create_session()
if precision is not None:
# FIXME: Implement. Related issue: aioinflux/pull/13
raise NotImplementedError("'precision' parameter is not supported yet")
data = serialization.serialize(data, measurement, tag_columns, **extra_tags)
params = {'db': db or self.db}
if rp:
params['rp'] = rp
url = self.url.format(endpoint='write')
async with self._session.post(url, params=params, data=data) as resp:
if resp.status == 204:
return True
raise InfluxDBWriteError(resp)
@runner
async def query(
self,
q: AnyStr,
*,
epoch: str = 'ns',
chunked: bool = False,
chunk_size: Optional[int] = None,
db: Optional[str] = None,
use_cache: bool = False,
) -> Union[AsyncGenerator[ResultType, None], ResultType]:
"""Sends a query to InfluxDB.
Please refer to the InfluxDB documentation for all the possible queries:
https://docs.influxdata.com/influxdb/latest/query_language/
:param q: Raw query string
:param db: Database to be queried. Defaults to `self.db`.
:param epoch: Precision level of response timestamps.
Valid values: ``{'ns', 'u', 'µ', 'ms', 's', 'm', 'h'}``.
:param chunked: If ``True``, makes InfluxDB return results in streamed batches
rather than as a single response.
Returns an AsyncGenerator which yields responses
in the same format as non-chunked queries.
:param chunk_size: Max number of points for each chunk. By default, InfluxDB chunks
responses by series or by every 10,000 points, whichever occurs first.
:param use_cache:
:return: Response in the format specified by the combination of
:attr:`.InfluxDBClient.output` and ``chunked``
"""
async def _chunked_generator(url, data):
async with self._session.post(url, data=data) as resp:
logger.debug(f'{resp.status} (CHUNKED): {q}')
# Hack to avoid aiohttp raising ValueError('Line is too long')
# The number 16 is arbitrary (may be too large/small).
resp.content._high_water *= 16
async for chunk in resp.content:
chunk = json.loads(chunk)
self._check_error(chunk)
yield chunk
if not self._session:
await self.create_session()
# InfluxDB documentation is wrong regarding `/query` parameters
# See https://github.com/influxdata/docs.influxdata.com/issues/1807
if not isinstance(chunked, bool):
raise ValueError("'chunked' must be a boolean")
data = dict(q=q, db=db or self.db, chunked=str(chunked).lower(), epoch=epoch)
if chunked and chunk_size:
data['chunk_size'] = chunk_size
url = self.url.format(endpoint='query')
if chunked:
if use_cache:
raise ValueError("Can't use cache w/ chunked queries")
if self.mode != 'async':
raise ValueError("Can't use 'chunked' with non-async mode")
if self.output == 'json':
return _chunked_generator(url, data)
raise ValueError(f"Chunked queries are not support with {self.output!r} output")
key = f'aioinflux:{q}'
if use_cache and self._redis and await self._redis.exists(key):
logger.debug(f'Cache HIT: {q}')
data = lz4.decompress(await self._redis.get(key))
else:
async with self._session.post(url, data=data) as resp:
data = await resp.read()
if use_cache and self._redis:
logger.debug(f'Cache MISS ({resp.status}): {q}')
if resp.status == 200:
await self._redis.set(key, lz4.compress(data))
await self._redis.expire(key, self.cache_expiry)
else:
logger.debug(f'{resp.status}: {q}')
data = json.loads(data)
self._check_error(data)
if self.output == 'json':
return data
elif self.output == 'dataframe':
return serialization.dataframe.parse(data)
else:
raise ValueError('Invalid output format')
@staticmethod
def _check_error(response):
"""Checks for JSON error messages and raises Python exception"""
if 'error' in response:
raise InfluxDBError(response['error'])
elif 'results' in response:
for statement in response['results']:
if 'error' in statement:
msg = '{d[error]} (statement {d[statement_id]})'
raise InfluxDBError(msg.format(d=statement))
# InfluxQL - Data management
# --------------------------
def create_database(self, db=None):
db = db or self.db
return self.query(f'CREATE DATABASE "{db}"')
def drop_database(self, db=None):
db = db or self.db
return self.query(f'DROP DATABASE "{db}"')
def drop_measurement(self, measurement):
return self.query(f'DROP MEASUREMENT "{measurement}"')
# InfluxQL - Schema exploration
# -----------------------------
def show_databases(self):
return self.query("SHOW DATABASES")
def show_measurements(self):
return self.query("SHOW MEASUREMENTS")
def show_users(self):
return self.query("SHOW USERS")
def show_series(self, measurement=None):
if measurement:
return self.query(f"SHOW SERIES FROM {measurement}")
return self.query("SHOW SERIES")
def show_tag_keys(self, measurement=None):
if measurement:
return self.query(f"SHOW TAG KEYS FROM {measurement}")
return self.query("SHOW TAG KEYS")
def show_field_keys(self, measurement=None):
if measurement:
return self.query(f"SHOW FIELD KEYS FROM {measurement}")
return self.query("SHOW FIELD KEYS")
def show_tag_values(self, key, measurement=None):
if measurement:
return self.query(f'SHOW TAG VALUES FROM "{measurement}" WITH key = "{key}"')
return self.query(f'SHOW TAG VALUES WITH key = "{key}"')
def show_retention_policies(self):
return self.query("SHOW RETENTION POLICIES")
# InfluxQL - Other
# ----------------
def show_continuous_queries(self):
return self.query("SHOW CONTINUOUS QUERIES")
|
gusutabopb/aioinflux
|
aioinflux/client.py
|
InfluxDBClient.ping
|
python
|
async def ping(self) -> dict:
if not self._session:
await self.create_session()
async with self._session.get(self.url.format(endpoint='ping')) as resp:
logger.debug(f'{resp.status}: {resp.reason}')
return dict(resp.headers.items())
|
Pings InfluxDB
Returns a dictionary containing the headers of the response from ``influxd``.
|
train
|
https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/client.py#L228-L237
|
[
"async def create_session(self, **kwargs):\n \"\"\"Creates an :class:`aiohttp.ClientSession`\n\n Override this or call it with ``kwargs`` to use other :mod:`aiohttp`\n functionality not covered by :class:`~.InfluxDBClient.__init__`\n \"\"\"\n self.opts.update(kwargs)\n self._session = aiohttp.ClientSession(**self.opts, loop=self._loop)\n if self.redis_opts:\n if aioredis:\n self._redis = await aioredis.create_redis(**self.redis_opts,\n loop=self._loop)\n else:\n warnings.warn(no_redis_warning)\n"
] |
class InfluxDBClient:
def __init__(
self,
host: str = 'localhost',
port: int = 8086,
mode: str = 'async',
output: str = 'json',
db: Optional[str] = None,
database: Optional[str] = None,
ssl: bool = False,
*,
unix_socket: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
timeout: Optional[Union[aiohttp.ClientTimeout, float]] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
redis_opts: Optional[dict] = None,
cache_expiry: int = 86400,
**kwargs
):
"""
:class:`~aioinflux.client.InfluxDBClient` holds information necessary
to interact with InfluxDB.
It is async by default, but can also be used as a sync/blocking client.
When querying, responses are returned as parsed JSON by default,
but can also be wrapped in easily iterable
wrapper object or be parsed to Pandas DataFrames.
The three main public methods are the three endpoints of the InfluxDB API, namely:
1. :meth:`~.InfluxDBClient.ping`
2. :meth:`~.InfluxDBClient.write`
3. :meth:`~.InfluxDBClient.query`
See each of the above methods documentation for further usage details.
See also: https://docs.influxdata.com/influxdb/latest/tools/api/
:param host: Hostname to connect to InfluxDB.
:param port: Port to connect to InfluxDB.
:param mode: Mode in which client should run. Available options:
- ``async``: Default mode. Each query/request to the backend will
- ``blocking``: Behaves in sync/blocking fashion,
similar to the official InfluxDB-Python client.
:param output: Output format of the response received from InfluxDB.
- ``json``: Default format.
Returns parsed JSON as received from InfluxDB.
- ``dataframe``: Parses results into :py:class`pandas.DataFrame`.
Not compatible with chunked responses.
:param db: Default database to be used by the client.
:param ssl: If https should be used.
:param unix_socket: Path to the InfluxDB Unix domain socket.
:param username: Username to use to connect to InfluxDB.
:param password: User password.
:param timeout: Timeout in seconds or :class:`aiohttp.ClientTimeout` object
:param database: Default database to be used by the client.
This field is for argument consistency with the official InfluxDB Python client.
:param loop: Asyncio event loop.
:param redis_opts: Dict fo keyword arguments for :func:`aioredis.create_redis`
:param cache_expiry: Expiry time (in seconds) for cached data
:param kwargs: Additional kwargs for :class:`aiohttp.ClientSession`
"""
self._loop = loop or asyncio.get_event_loop()
self._session: aiohttp.ClientSession = None
self._redis: aioredis.Redis = None
self._mode = None
self._output = None
self._db = None
self.ssl = ssl
self.host = host
self.port = port
self.mode = mode
self.output = output
self.db = database or db
# ClientSession configuration
if username:
kwargs.update(auth=aiohttp.BasicAuth(username, password))
if unix_socket:
kwargs.update(connector=aiohttp.UnixConnector(unix_socket, loop=self._loop))
if timeout:
if isinstance(timeout, aiohttp.ClientTimeout):
kwargs.update(timeout=timeout)
else:
kwargs.update(timeout=aiohttp.ClientTimeout(total=timeout))
self.opts = kwargs
# Cache configuration
self.redis_opts = redis_opts
self.cache_expiry = cache_expiry
async def create_session(self, **kwargs):
"""Creates an :class:`aiohttp.ClientSession`
Override this or call it with ``kwargs`` to use other :mod:`aiohttp`
functionality not covered by :class:`~.InfluxDBClient.__init__`
"""
self.opts.update(kwargs)
self._session = aiohttp.ClientSession(**self.opts, loop=self._loop)
if self.redis_opts:
if aioredis:
self._redis = await aioredis.create_redis(**self.redis_opts,
loop=self._loop)
else:
warnings.warn(no_redis_warning)
@property
def url(self):
return f'{"https" if self.ssl else "http"}://{self.host}:{self.port}/{{endpoint}}'
@property
def mode(self):
return self._mode
@property
def output(self):
return self._output
@property
def db(self):
return self._db
@mode.setter
def mode(self, mode):
if mode not in ('async', 'blocking'):
raise ValueError('Invalid running mode')
self._mode = mode
@output.setter
def output(self, output):
if pd is None and output == 'dataframe':
raise ValueError(no_pandas_warning)
if output not in ('json', 'dataframe'):
raise ValueError('Invalid output format')
self._output = output
@db.setter
def db(self, db):
self._db = db
if not db:
warnings.warn(f'No default databases is set. '
f'Database must be specified when querying/writing.')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
def __del__(self):
if not self._loop.is_closed() and self._session:
asyncio.ensure_future(self._session.close(), loop=self._loop)
def __repr__(self):
items = [f'{k}={v}' for k, v in vars(self).items() if not k.startswith('_')]
items.append(f'mode={self.mode}')
return f'{type(self).__name__}({", ".join(items)})'
@runner
async def close(self):
if self._session:
await self._session.close()
self._session = None
if self._redis:
self._redis.close()
@runner
@runner
async def write(
self,
data: Union[PointType, Iterable[PointType]],
measurement: Optional[str] = None,
db: Optional[str] = None,
precision: Optional[str] = None,
rp: Optional[str] = None,
tag_columns: Optional[Iterable] = None,
**extra_tags,
) -> bool:
"""Writes data to InfluxDB.
Input can be:
1. A mapping (e.g. ``dict``) containing the keys:
``measurement``, ``time``, ``tags``, ``fields``
2. A Pandas :class:`~pandas.DataFrame` with a :class:`~pandas.DatetimeIndex`
3. A user defined class decorated w/
:func:`~aioinflux.serialization.usertype.lineprotocol`
4. A string (``str`` or ``bytes``) properly formatted in InfluxDB's line protocol
5. An iterable of one of the above
Input data in formats 1-3 are parsed to the line protocol before being
written to InfluxDB.
See the `InfluxDB docs <https://docs.influxdata.com/influxdb/latest/
write_protocols/line_protocol_reference/>`_ for more details.
:param data: Input data (see description above).
:param measurement: Measurement name. Mandatory when when writing DataFrames only.
When writing dictionary-like data, this field is treated as the default value
for points that do not contain a `measurement` field.
:param db: Database to be written to. Defaults to `self.db`.
:param precision: Sets the precision for the supplied Unix time values.
Ignored if input timestamp data is of non-integer type.
Valid values: ``{'ns', 'u', 'µ', 'ms', 's', 'm', 'h'}``
:param rp: Sets the target retention policy for the write.
If unspecified, data is written to the default retention policy.
:param tag_columns: Columns to be treated as tags
(used when writing DataFrames only)
:param extra_tags: Additional tags to be added to all points passed.
:return: Returns ``True`` if insert is successful.
Raises :py:class:`ValueError` otherwise.
"""
if not self._session:
await self.create_session()
if precision is not None:
# FIXME: Implement. Related issue: aioinflux/pull/13
raise NotImplementedError("'precision' parameter is not supported yet")
data = serialization.serialize(data, measurement, tag_columns, **extra_tags)
params = {'db': db or self.db}
if rp:
params['rp'] = rp
url = self.url.format(endpoint='write')
async with self._session.post(url, params=params, data=data) as resp:
if resp.status == 204:
return True
raise InfluxDBWriteError(resp)
@runner
async def query(
self,
q: AnyStr,
*,
epoch: str = 'ns',
chunked: bool = False,
chunk_size: Optional[int] = None,
db: Optional[str] = None,
use_cache: bool = False,
) -> Union[AsyncGenerator[ResultType, None], ResultType]:
"""Sends a query to InfluxDB.
Please refer to the InfluxDB documentation for all the possible queries:
https://docs.influxdata.com/influxdb/latest/query_language/
:param q: Raw query string
:param db: Database to be queried. Defaults to `self.db`.
:param epoch: Precision level of response timestamps.
Valid values: ``{'ns', 'u', 'µ', 'ms', 's', 'm', 'h'}``.
:param chunked: If ``True``, makes InfluxDB return results in streamed batches
rather than as a single response.
Returns an AsyncGenerator which yields responses
in the same format as non-chunked queries.
:param chunk_size: Max number of points for each chunk. By default, InfluxDB chunks
responses by series or by every 10,000 points, whichever occurs first.
:param use_cache:
:return: Response in the format specified by the combination of
:attr:`.InfluxDBClient.output` and ``chunked``
"""
async def _chunked_generator(url, data):
async with self._session.post(url, data=data) as resp:
logger.debug(f'{resp.status} (CHUNKED): {q}')
# Hack to avoid aiohttp raising ValueError('Line is too long')
# The number 16 is arbitrary (may be too large/small).
resp.content._high_water *= 16
async for chunk in resp.content:
chunk = json.loads(chunk)
self._check_error(chunk)
yield chunk
if not self._session:
await self.create_session()
# InfluxDB documentation is wrong regarding `/query` parameters
# See https://github.com/influxdata/docs.influxdata.com/issues/1807
if not isinstance(chunked, bool):
raise ValueError("'chunked' must be a boolean")
data = dict(q=q, db=db or self.db, chunked=str(chunked).lower(), epoch=epoch)
if chunked and chunk_size:
data['chunk_size'] = chunk_size
url = self.url.format(endpoint='query')
if chunked:
if use_cache:
raise ValueError("Can't use cache w/ chunked queries")
if self.mode != 'async':
raise ValueError("Can't use 'chunked' with non-async mode")
if self.output == 'json':
return _chunked_generator(url, data)
raise ValueError(f"Chunked queries are not support with {self.output!r} output")
key = f'aioinflux:{q}'
if use_cache and self._redis and await self._redis.exists(key):
logger.debug(f'Cache HIT: {q}')
data = lz4.decompress(await self._redis.get(key))
else:
async with self._session.post(url, data=data) as resp:
data = await resp.read()
if use_cache and self._redis:
logger.debug(f'Cache MISS ({resp.status}): {q}')
if resp.status == 200:
await self._redis.set(key, lz4.compress(data))
await self._redis.expire(key, self.cache_expiry)
else:
logger.debug(f'{resp.status}: {q}')
data = json.loads(data)
self._check_error(data)
if self.output == 'json':
return data
elif self.output == 'dataframe':
return serialization.dataframe.parse(data)
else:
raise ValueError('Invalid output format')
@staticmethod
def _check_error(response):
"""Checks for JSON error messages and raises Python exception"""
if 'error' in response:
raise InfluxDBError(response['error'])
elif 'results' in response:
for statement in response['results']:
if 'error' in statement:
msg = '{d[error]} (statement {d[statement_id]})'
raise InfluxDBError(msg.format(d=statement))
# InfluxQL - Data management
# --------------------------
def create_database(self, db=None):
db = db or self.db
return self.query(f'CREATE DATABASE "{db}"')
def drop_database(self, db=None):
db = db or self.db
return self.query(f'DROP DATABASE "{db}"')
def drop_measurement(self, measurement):
return self.query(f'DROP MEASUREMENT "{measurement}"')
# InfluxQL - Schema exploration
# -----------------------------
def show_databases(self):
return self.query("SHOW DATABASES")
def show_measurements(self):
return self.query("SHOW MEASUREMENTS")
def show_users(self):
return self.query("SHOW USERS")
def show_series(self, measurement=None):
if measurement:
return self.query(f"SHOW SERIES FROM {measurement}")
return self.query("SHOW SERIES")
def show_tag_keys(self, measurement=None):
if measurement:
return self.query(f"SHOW TAG KEYS FROM {measurement}")
return self.query("SHOW TAG KEYS")
def show_field_keys(self, measurement=None):
if measurement:
return self.query(f"SHOW FIELD KEYS FROM {measurement}")
return self.query("SHOW FIELD KEYS")
def show_tag_values(self, key, measurement=None):
if measurement:
return self.query(f'SHOW TAG VALUES FROM "{measurement}" WITH key = "{key}"')
return self.query(f'SHOW TAG VALUES WITH key = "{key}"')
def show_retention_policies(self):
return self.query("SHOW RETENTION POLICIES")
# InfluxQL - Other
# ----------------
def show_continuous_queries(self):
return self.query("SHOW CONTINUOUS QUERIES")
|
gusutabopb/aioinflux
|
aioinflux/client.py
|
InfluxDBClient.write
|
python
|
async def write(
self,
data: Union[PointType, Iterable[PointType]],
measurement: Optional[str] = None,
db: Optional[str] = None,
precision: Optional[str] = None,
rp: Optional[str] = None,
tag_columns: Optional[Iterable] = None,
**extra_tags,
) -> bool:
if not self._session:
await self.create_session()
if precision is not None:
# FIXME: Implement. Related issue: aioinflux/pull/13
raise NotImplementedError("'precision' parameter is not supported yet")
data = serialization.serialize(data, measurement, tag_columns, **extra_tags)
params = {'db': db or self.db}
if rp:
params['rp'] = rp
url = self.url.format(endpoint='write')
async with self._session.post(url, params=params, data=data) as resp:
if resp.status == 204:
return True
raise InfluxDBWriteError(resp)
|
Writes data to InfluxDB.
Input can be:
1. A mapping (e.g. ``dict``) containing the keys:
``measurement``, ``time``, ``tags``, ``fields``
2. A Pandas :class:`~pandas.DataFrame` with a :class:`~pandas.DatetimeIndex`
3. A user defined class decorated w/
:func:`~aioinflux.serialization.usertype.lineprotocol`
4. A string (``str`` or ``bytes``) properly formatted in InfluxDB's line protocol
5. An iterable of one of the above
Input data in formats 1-3 are parsed to the line protocol before being
written to InfluxDB.
See the `InfluxDB docs <https://docs.influxdata.com/influxdb/latest/
write_protocols/line_protocol_reference/>`_ for more details.
:param data: Input data (see description above).
:param measurement: Measurement name. Mandatory when when writing DataFrames only.
When writing dictionary-like data, this field is treated as the default value
for points that do not contain a `measurement` field.
:param db: Database to be written to. Defaults to `self.db`.
:param precision: Sets the precision for the supplied Unix time values.
Ignored if input timestamp data is of non-integer type.
Valid values: ``{'ns', 'u', 'µ', 'ms', 's', 'm', 'h'}``
:param rp: Sets the target retention policy for the write.
If unspecified, data is written to the default retention policy.
:param tag_columns: Columns to be treated as tags
(used when writing DataFrames only)
:param extra_tags: Additional tags to be added to all points passed.
:return: Returns ``True`` if insert is successful.
Raises :py:class:`ValueError` otherwise.
|
train
|
https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/client.py#L240-L295
|
[
"def serialize(data, measurement=None, tag_columns=None, **extra_tags):\n \"\"\"Converts input data into line protocol format\"\"\"\n if isinstance(data, bytes):\n return data\n elif isinstance(data, str):\n return data.encode('utf-8')\n elif hasattr(data, 'to_lineprotocol'):\n return data.to_lineprotocol()\n elif pd is not None and isinstance(data, pd.DataFrame):\n return dataframe.serialize(data, measurement, tag_columns, **extra_tags)\n elif isinstance(data, dict):\n return mapping.serialize(data, measurement, **extra_tags)\n elif hasattr(data, '__iter__'):\n return b'\\n'.join([serialize(i, measurement, tag_columns, **extra_tags) for i in data])\n else:\n raise ValueError('Invalid input', data)\n",
"async def create_session(self, **kwargs):\n \"\"\"Creates an :class:`aiohttp.ClientSession`\n\n Override this or call it with ``kwargs`` to use other :mod:`aiohttp`\n functionality not covered by :class:`~.InfluxDBClient.__init__`\n \"\"\"\n self.opts.update(kwargs)\n self._session = aiohttp.ClientSession(**self.opts, loop=self._loop)\n if self.redis_opts:\n if aioredis:\n self._redis = await aioredis.create_redis(**self.redis_opts,\n loop=self._loop)\n else:\n warnings.warn(no_redis_warning)\n"
] |
class InfluxDBClient:
def __init__(
self,
host: str = 'localhost',
port: int = 8086,
mode: str = 'async',
output: str = 'json',
db: Optional[str] = None,
database: Optional[str] = None,
ssl: bool = False,
*,
unix_socket: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
timeout: Optional[Union[aiohttp.ClientTimeout, float]] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
redis_opts: Optional[dict] = None,
cache_expiry: int = 86400,
**kwargs
):
"""
:class:`~aioinflux.client.InfluxDBClient` holds information necessary
to interact with InfluxDB.
It is async by default, but can also be used as a sync/blocking client.
When querying, responses are returned as parsed JSON by default,
but can also be wrapped in easily iterable
wrapper object or be parsed to Pandas DataFrames.
The three main public methods are the three endpoints of the InfluxDB API, namely:
1. :meth:`~.InfluxDBClient.ping`
2. :meth:`~.InfluxDBClient.write`
3. :meth:`~.InfluxDBClient.query`
See each of the above methods documentation for further usage details.
See also: https://docs.influxdata.com/influxdb/latest/tools/api/
:param host: Hostname to connect to InfluxDB.
:param port: Port to connect to InfluxDB.
:param mode: Mode in which client should run. Available options:
- ``async``: Default mode. Each query/request to the backend will
- ``blocking``: Behaves in sync/blocking fashion,
similar to the official InfluxDB-Python client.
:param output: Output format of the response received from InfluxDB.
- ``json``: Default format.
Returns parsed JSON as received from InfluxDB.
- ``dataframe``: Parses results into :py:class`pandas.DataFrame`.
Not compatible with chunked responses.
:param db: Default database to be used by the client.
:param ssl: If https should be used.
:param unix_socket: Path to the InfluxDB Unix domain socket.
:param username: Username to use to connect to InfluxDB.
:param password: User password.
:param timeout: Timeout in seconds or :class:`aiohttp.ClientTimeout` object
:param database: Default database to be used by the client.
This field is for argument consistency with the official InfluxDB Python client.
:param loop: Asyncio event loop.
:param redis_opts: Dict fo keyword arguments for :func:`aioredis.create_redis`
:param cache_expiry: Expiry time (in seconds) for cached data
:param kwargs: Additional kwargs for :class:`aiohttp.ClientSession`
"""
self._loop = loop or asyncio.get_event_loop()
self._session: aiohttp.ClientSession = None
self._redis: aioredis.Redis = None
self._mode = None
self._output = None
self._db = None
self.ssl = ssl
self.host = host
self.port = port
self.mode = mode
self.output = output
self.db = database or db
# ClientSession configuration
if username:
kwargs.update(auth=aiohttp.BasicAuth(username, password))
if unix_socket:
kwargs.update(connector=aiohttp.UnixConnector(unix_socket, loop=self._loop))
if timeout:
if isinstance(timeout, aiohttp.ClientTimeout):
kwargs.update(timeout=timeout)
else:
kwargs.update(timeout=aiohttp.ClientTimeout(total=timeout))
self.opts = kwargs
# Cache configuration
self.redis_opts = redis_opts
self.cache_expiry = cache_expiry
async def create_session(self, **kwargs):
"""Creates an :class:`aiohttp.ClientSession`
Override this or call it with ``kwargs`` to use other :mod:`aiohttp`
functionality not covered by :class:`~.InfluxDBClient.__init__`
"""
self.opts.update(kwargs)
self._session = aiohttp.ClientSession(**self.opts, loop=self._loop)
if self.redis_opts:
if aioredis:
self._redis = await aioredis.create_redis(**self.redis_opts,
loop=self._loop)
else:
warnings.warn(no_redis_warning)
@property
def url(self):
return f'{"https" if self.ssl else "http"}://{self.host}:{self.port}/{{endpoint}}'
@property
def mode(self):
return self._mode
@property
def output(self):
return self._output
@property
def db(self):
return self._db
@mode.setter
def mode(self, mode):
if mode not in ('async', 'blocking'):
raise ValueError('Invalid running mode')
self._mode = mode
@output.setter
def output(self, output):
if pd is None and output == 'dataframe':
raise ValueError(no_pandas_warning)
if output not in ('json', 'dataframe'):
raise ValueError('Invalid output format')
self._output = output
@db.setter
def db(self, db):
self._db = db
if not db:
warnings.warn(f'No default databases is set. '
f'Database must be specified when querying/writing.')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
def __del__(self):
if not self._loop.is_closed() and self._session:
asyncio.ensure_future(self._session.close(), loop=self._loop)
def __repr__(self):
items = [f'{k}={v}' for k, v in vars(self).items() if not k.startswith('_')]
items.append(f'mode={self.mode}')
return f'{type(self).__name__}({", ".join(items)})'
@runner
async def close(self):
if self._session:
await self._session.close()
self._session = None
if self._redis:
self._redis.close()
@runner
async def ping(self) -> dict:
"""Pings InfluxDB
Returns a dictionary containing the headers of the response from ``influxd``.
"""
if not self._session:
await self.create_session()
async with self._session.get(self.url.format(endpoint='ping')) as resp:
logger.debug(f'{resp.status}: {resp.reason}')
return dict(resp.headers.items())
@runner
@runner
async def query(
self,
q: AnyStr,
*,
epoch: str = 'ns',
chunked: bool = False,
chunk_size: Optional[int] = None,
db: Optional[str] = None,
use_cache: bool = False,
) -> Union[AsyncGenerator[ResultType, None], ResultType]:
"""Sends a query to InfluxDB.
Please refer to the InfluxDB documentation for all the possible queries:
https://docs.influxdata.com/influxdb/latest/query_language/
:param q: Raw query string
:param db: Database to be queried. Defaults to `self.db`.
:param epoch: Precision level of response timestamps.
Valid values: ``{'ns', 'u', 'µ', 'ms', 's', 'm', 'h'}``.
:param chunked: If ``True``, makes InfluxDB return results in streamed batches
rather than as a single response.
Returns an AsyncGenerator which yields responses
in the same format as non-chunked queries.
:param chunk_size: Max number of points for each chunk. By default, InfluxDB chunks
responses by series or by every 10,000 points, whichever occurs first.
:param use_cache:
:return: Response in the format specified by the combination of
:attr:`.InfluxDBClient.output` and ``chunked``
"""
async def _chunked_generator(url, data):
async with self._session.post(url, data=data) as resp:
logger.debug(f'{resp.status} (CHUNKED): {q}')
# Hack to avoid aiohttp raising ValueError('Line is too long')
# The number 16 is arbitrary (may be too large/small).
resp.content._high_water *= 16
async for chunk in resp.content:
chunk = json.loads(chunk)
self._check_error(chunk)
yield chunk
if not self._session:
await self.create_session()
# InfluxDB documentation is wrong regarding `/query` parameters
# See https://github.com/influxdata/docs.influxdata.com/issues/1807
if not isinstance(chunked, bool):
raise ValueError("'chunked' must be a boolean")
data = dict(q=q, db=db or self.db, chunked=str(chunked).lower(), epoch=epoch)
if chunked and chunk_size:
data['chunk_size'] = chunk_size
url = self.url.format(endpoint='query')
if chunked:
if use_cache:
raise ValueError("Can't use cache w/ chunked queries")
if self.mode != 'async':
raise ValueError("Can't use 'chunked' with non-async mode")
if self.output == 'json':
return _chunked_generator(url, data)
raise ValueError(f"Chunked queries are not support with {self.output!r} output")
key = f'aioinflux:{q}'
if use_cache and self._redis and await self._redis.exists(key):
logger.debug(f'Cache HIT: {q}')
data = lz4.decompress(await self._redis.get(key))
else:
async with self._session.post(url, data=data) as resp:
data = await resp.read()
if use_cache and self._redis:
logger.debug(f'Cache MISS ({resp.status}): {q}')
if resp.status == 200:
await self._redis.set(key, lz4.compress(data))
await self._redis.expire(key, self.cache_expiry)
else:
logger.debug(f'{resp.status}: {q}')
data = json.loads(data)
self._check_error(data)
if self.output == 'json':
return data
elif self.output == 'dataframe':
return serialization.dataframe.parse(data)
else:
raise ValueError('Invalid output format')
@staticmethod
def _check_error(response):
"""Checks for JSON error messages and raises Python exception"""
if 'error' in response:
raise InfluxDBError(response['error'])
elif 'results' in response:
for statement in response['results']:
if 'error' in statement:
msg = '{d[error]} (statement {d[statement_id]})'
raise InfluxDBError(msg.format(d=statement))
# InfluxQL - Data management
# --------------------------
def create_database(self, db=None):
db = db or self.db
return self.query(f'CREATE DATABASE "{db}"')
def drop_database(self, db=None):
db = db or self.db
return self.query(f'DROP DATABASE "{db}"')
def drop_measurement(self, measurement):
return self.query(f'DROP MEASUREMENT "{measurement}"')
# InfluxQL - Schema exploration
# -----------------------------
def show_databases(self):
return self.query("SHOW DATABASES")
def show_measurements(self):
return self.query("SHOW MEASUREMENTS")
def show_users(self):
return self.query("SHOW USERS")
def show_series(self, measurement=None):
if measurement:
return self.query(f"SHOW SERIES FROM {measurement}")
return self.query("SHOW SERIES")
def show_tag_keys(self, measurement=None):
if measurement:
return self.query(f"SHOW TAG KEYS FROM {measurement}")
return self.query("SHOW TAG KEYS")
def show_field_keys(self, measurement=None):
if measurement:
return self.query(f"SHOW FIELD KEYS FROM {measurement}")
return self.query("SHOW FIELD KEYS")
def show_tag_values(self, key, measurement=None):
if measurement:
return self.query(f'SHOW TAG VALUES FROM "{measurement}" WITH key = "{key}"')
return self.query(f'SHOW TAG VALUES WITH key = "{key}"')
def show_retention_policies(self):
return self.query("SHOW RETENTION POLICIES")
# InfluxQL - Other
# ----------------
def show_continuous_queries(self):
return self.query("SHOW CONTINUOUS QUERIES")
|
gusutabopb/aioinflux
|
aioinflux/client.py
|
InfluxDBClient.query
|
python
|
async def query(
self,
q: AnyStr,
*,
epoch: str = 'ns',
chunked: bool = False,
chunk_size: Optional[int] = None,
db: Optional[str] = None,
use_cache: bool = False,
) -> Union[AsyncGenerator[ResultType, None], ResultType]:
async def _chunked_generator(url, data):
async with self._session.post(url, data=data) as resp:
logger.debug(f'{resp.status} (CHUNKED): {q}')
# Hack to avoid aiohttp raising ValueError('Line is too long')
# The number 16 is arbitrary (may be too large/small).
resp.content._high_water *= 16
async for chunk in resp.content:
chunk = json.loads(chunk)
self._check_error(chunk)
yield chunk
if not self._session:
await self.create_session()
# InfluxDB documentation is wrong regarding `/query` parameters
# See https://github.com/influxdata/docs.influxdata.com/issues/1807
if not isinstance(chunked, bool):
raise ValueError("'chunked' must be a boolean")
data = dict(q=q, db=db or self.db, chunked=str(chunked).lower(), epoch=epoch)
if chunked and chunk_size:
data['chunk_size'] = chunk_size
url = self.url.format(endpoint='query')
if chunked:
if use_cache:
raise ValueError("Can't use cache w/ chunked queries")
if self.mode != 'async':
raise ValueError("Can't use 'chunked' with non-async mode")
if self.output == 'json':
return _chunked_generator(url, data)
raise ValueError(f"Chunked queries are not support with {self.output!r} output")
key = f'aioinflux:{q}'
if use_cache and self._redis and await self._redis.exists(key):
logger.debug(f'Cache HIT: {q}')
data = lz4.decompress(await self._redis.get(key))
else:
async with self._session.post(url, data=data) as resp:
data = await resp.read()
if use_cache and self._redis:
logger.debug(f'Cache MISS ({resp.status}): {q}')
if resp.status == 200:
await self._redis.set(key, lz4.compress(data))
await self._redis.expire(key, self.cache_expiry)
else:
logger.debug(f'{resp.status}: {q}')
data = json.loads(data)
self._check_error(data)
if self.output == 'json':
return data
elif self.output == 'dataframe':
return serialization.dataframe.parse(data)
else:
raise ValueError('Invalid output format')
|
Sends a query to InfluxDB.
Please refer to the InfluxDB documentation for all the possible queries:
https://docs.influxdata.com/influxdb/latest/query_language/
:param q: Raw query string
:param db: Database to be queried. Defaults to `self.db`.
:param epoch: Precision level of response timestamps.
Valid values: ``{'ns', 'u', 'µ', 'ms', 's', 'm', 'h'}``.
:param chunked: If ``True``, makes InfluxDB return results in streamed batches
rather than as a single response.
Returns an AsyncGenerator which yields responses
in the same format as non-chunked queries.
:param chunk_size: Max number of points for each chunk. By default, InfluxDB chunks
responses by series or by every 10,000 points, whichever occurs first.
:param use_cache:
:return: Response in the format specified by the combination of
:attr:`.InfluxDBClient.output` and ``chunked``
|
train
|
https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/client.py#L298-L381
|
[
"def parse(resp) -> DataFrameType:\n \"\"\"Makes a dictionary of DataFrames from a response object\"\"\"\n statements = []\n for statement in resp['results']:\n series = {}\n for s in statement.get('series', []):\n series[_get_name(s)] = _drop_zero_index(_serializer(s))\n statements.append(series)\n\n if len(statements) == 1:\n series: dict = statements[0]\n if len(series) == 1:\n return list(series.values())[0] # DataFrame\n else:\n return series # dict\n return statements # list\n",
"async def create_session(self, **kwargs):\n \"\"\"Creates an :class:`aiohttp.ClientSession`\n\n Override this or call it with ``kwargs`` to use other :mod:`aiohttp`\n functionality not covered by :class:`~.InfluxDBClient.__init__`\n \"\"\"\n self.opts.update(kwargs)\n self._session = aiohttp.ClientSession(**self.opts, loop=self._loop)\n if self.redis_opts:\n if aioredis:\n self._redis = await aioredis.create_redis(**self.redis_opts,\n loop=self._loop)\n else:\n warnings.warn(no_redis_warning)\n",
"def _check_error(response):\n \"\"\"Checks for JSON error messages and raises Python exception\"\"\"\n if 'error' in response:\n raise InfluxDBError(response['error'])\n elif 'results' in response:\n for statement in response['results']:\n if 'error' in statement:\n msg = '{d[error]} (statement {d[statement_id]})'\n raise InfluxDBError(msg.format(d=statement))\n",
"async def _chunked_generator(url, data):\n async with self._session.post(url, data=data) as resp:\n logger.debug(f'{resp.status} (CHUNKED): {q}')\n # Hack to avoid aiohttp raising ValueError('Line is too long')\n # The number 16 is arbitrary (may be too large/small).\n resp.content._high_water *= 16\n async for chunk in resp.content:\n chunk = json.loads(chunk)\n self._check_error(chunk)\n yield chunk\n"
] |
class InfluxDBClient:
def __init__(
self,
host: str = 'localhost',
port: int = 8086,
mode: str = 'async',
output: str = 'json',
db: Optional[str] = None,
database: Optional[str] = None,
ssl: bool = False,
*,
unix_socket: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
timeout: Optional[Union[aiohttp.ClientTimeout, float]] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
redis_opts: Optional[dict] = None,
cache_expiry: int = 86400,
**kwargs
):
"""
:class:`~aioinflux.client.InfluxDBClient` holds information necessary
to interact with InfluxDB.
It is async by default, but can also be used as a sync/blocking client.
When querying, responses are returned as parsed JSON by default,
but can also be wrapped in easily iterable
wrapper object or be parsed to Pandas DataFrames.
The three main public methods are the three endpoints of the InfluxDB API, namely:
1. :meth:`~.InfluxDBClient.ping`
2. :meth:`~.InfluxDBClient.write`
3. :meth:`~.InfluxDBClient.query`
See each of the above methods documentation for further usage details.
See also: https://docs.influxdata.com/influxdb/latest/tools/api/
:param host: Hostname to connect to InfluxDB.
:param port: Port to connect to InfluxDB.
:param mode: Mode in which client should run. Available options:
- ``async``: Default mode. Each query/request to the backend will
- ``blocking``: Behaves in sync/blocking fashion,
similar to the official InfluxDB-Python client.
:param output: Output format of the response received from InfluxDB.
- ``json``: Default format.
Returns parsed JSON as received from InfluxDB.
- ``dataframe``: Parses results into :py:class`pandas.DataFrame`.
Not compatible with chunked responses.
:param db: Default database to be used by the client.
:param ssl: If https should be used.
:param unix_socket: Path to the InfluxDB Unix domain socket.
:param username: Username to use to connect to InfluxDB.
:param password: User password.
:param timeout: Timeout in seconds or :class:`aiohttp.ClientTimeout` object
:param database: Default database to be used by the client.
This field is for argument consistency with the official InfluxDB Python client.
:param loop: Asyncio event loop.
:param redis_opts: Dict fo keyword arguments for :func:`aioredis.create_redis`
:param cache_expiry: Expiry time (in seconds) for cached data
:param kwargs: Additional kwargs for :class:`aiohttp.ClientSession`
"""
self._loop = loop or asyncio.get_event_loop()
self._session: aiohttp.ClientSession = None
self._redis: aioredis.Redis = None
self._mode = None
self._output = None
self._db = None
self.ssl = ssl
self.host = host
self.port = port
self.mode = mode
self.output = output
self.db = database or db
# ClientSession configuration
if username:
kwargs.update(auth=aiohttp.BasicAuth(username, password))
if unix_socket:
kwargs.update(connector=aiohttp.UnixConnector(unix_socket, loop=self._loop))
if timeout:
if isinstance(timeout, aiohttp.ClientTimeout):
kwargs.update(timeout=timeout)
else:
kwargs.update(timeout=aiohttp.ClientTimeout(total=timeout))
self.opts = kwargs
# Cache configuration
self.redis_opts = redis_opts
self.cache_expiry = cache_expiry
async def create_session(self, **kwargs):
"""Creates an :class:`aiohttp.ClientSession`
Override this or call it with ``kwargs`` to use other :mod:`aiohttp`
functionality not covered by :class:`~.InfluxDBClient.__init__`
"""
self.opts.update(kwargs)
self._session = aiohttp.ClientSession(**self.opts, loop=self._loop)
if self.redis_opts:
if aioredis:
self._redis = await aioredis.create_redis(**self.redis_opts,
loop=self._loop)
else:
warnings.warn(no_redis_warning)
@property
def url(self):
return f'{"https" if self.ssl else "http"}://{self.host}:{self.port}/{{endpoint}}'
@property
def mode(self):
return self._mode
@property
def output(self):
return self._output
@property
def db(self):
return self._db
@mode.setter
def mode(self, mode):
if mode not in ('async', 'blocking'):
raise ValueError('Invalid running mode')
self._mode = mode
@output.setter
def output(self, output):
if pd is None and output == 'dataframe':
raise ValueError(no_pandas_warning)
if output not in ('json', 'dataframe'):
raise ValueError('Invalid output format')
self._output = output
@db.setter
def db(self, db):
self._db = db
if not db:
warnings.warn(f'No default databases is set. '
f'Database must be specified when querying/writing.')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
def __del__(self):
if not self._loop.is_closed() and self._session:
asyncio.ensure_future(self._session.close(), loop=self._loop)
def __repr__(self):
items = [f'{k}={v}' for k, v in vars(self).items() if not k.startswith('_')]
items.append(f'mode={self.mode}')
return f'{type(self).__name__}({", ".join(items)})'
@runner
async def close(self):
if self._session:
await self._session.close()
self._session = None
if self._redis:
self._redis.close()
@runner
async def ping(self) -> dict:
"""Pings InfluxDB
Returns a dictionary containing the headers of the response from ``influxd``.
"""
if not self._session:
await self.create_session()
async with self._session.get(self.url.format(endpoint='ping')) as resp:
logger.debug(f'{resp.status}: {resp.reason}')
return dict(resp.headers.items())
@runner
async def write(
self,
data: Union[PointType, Iterable[PointType]],
measurement: Optional[str] = None,
db: Optional[str] = None,
precision: Optional[str] = None,
rp: Optional[str] = None,
tag_columns: Optional[Iterable] = None,
**extra_tags,
) -> bool:
"""Writes data to InfluxDB.
Input can be:
1. A mapping (e.g. ``dict``) containing the keys:
``measurement``, ``time``, ``tags``, ``fields``
2. A Pandas :class:`~pandas.DataFrame` with a :class:`~pandas.DatetimeIndex`
3. A user defined class decorated w/
:func:`~aioinflux.serialization.usertype.lineprotocol`
4. A string (``str`` or ``bytes``) properly formatted in InfluxDB's line protocol
5. An iterable of one of the above
Input data in formats 1-3 are parsed to the line protocol before being
written to InfluxDB.
See the `InfluxDB docs <https://docs.influxdata.com/influxdb/latest/
write_protocols/line_protocol_reference/>`_ for more details.
:param data: Input data (see description above).
:param measurement: Measurement name. Mandatory when when writing DataFrames only.
When writing dictionary-like data, this field is treated as the default value
for points that do not contain a `measurement` field.
:param db: Database to be written to. Defaults to `self.db`.
:param precision: Sets the precision for the supplied Unix time values.
Ignored if input timestamp data is of non-integer type.
Valid values: ``{'ns', 'u', 'µ', 'ms', 's', 'm', 'h'}``
:param rp: Sets the target retention policy for the write.
If unspecified, data is written to the default retention policy.
:param tag_columns: Columns to be treated as tags
(used when writing DataFrames only)
:param extra_tags: Additional tags to be added to all points passed.
:return: Returns ``True`` if insert is successful.
Raises :py:class:`ValueError` otherwise.
"""
if not self._session:
await self.create_session()
if precision is not None:
# FIXME: Implement. Related issue: aioinflux/pull/13
raise NotImplementedError("'precision' parameter is not supported yet")
data = serialization.serialize(data, measurement, tag_columns, **extra_tags)
params = {'db': db or self.db}
if rp:
params['rp'] = rp
url = self.url.format(endpoint='write')
async with self._session.post(url, params=params, data=data) as resp:
if resp.status == 204:
return True
raise InfluxDBWriteError(resp)
@runner
@staticmethod
def _check_error(response):
"""Checks for JSON error messages and raises Python exception"""
if 'error' in response:
raise InfluxDBError(response['error'])
elif 'results' in response:
for statement in response['results']:
if 'error' in statement:
msg = '{d[error]} (statement {d[statement_id]})'
raise InfluxDBError(msg.format(d=statement))
# InfluxQL - Data management
# --------------------------
def create_database(self, db=None):
db = db or self.db
return self.query(f'CREATE DATABASE "{db}"')
def drop_database(self, db=None):
db = db or self.db
return self.query(f'DROP DATABASE "{db}"')
def drop_measurement(self, measurement):
return self.query(f'DROP MEASUREMENT "{measurement}"')
# InfluxQL - Schema exploration
# -----------------------------
def show_databases(self):
return self.query("SHOW DATABASES")
def show_measurements(self):
return self.query("SHOW MEASUREMENTS")
def show_users(self):
return self.query("SHOW USERS")
def show_series(self, measurement=None):
if measurement:
return self.query(f"SHOW SERIES FROM {measurement}")
return self.query("SHOW SERIES")
def show_tag_keys(self, measurement=None):
if measurement:
return self.query(f"SHOW TAG KEYS FROM {measurement}")
return self.query("SHOW TAG KEYS")
def show_field_keys(self, measurement=None):
if measurement:
return self.query(f"SHOW FIELD KEYS FROM {measurement}")
return self.query("SHOW FIELD KEYS")
def show_tag_values(self, key, measurement=None):
if measurement:
return self.query(f'SHOW TAG VALUES FROM "{measurement}" WITH key = "{key}"')
return self.query(f'SHOW TAG VALUES WITH key = "{key}"')
def show_retention_policies(self):
return self.query("SHOW RETENTION POLICIES")
# InfluxQL - Other
# ----------------
def show_continuous_queries(self):
return self.query("SHOW CONTINUOUS QUERIES")
|
gusutabopb/aioinflux
|
aioinflux/client.py
|
InfluxDBClient._check_error
|
python
|
def _check_error(response):
if 'error' in response:
raise InfluxDBError(response['error'])
elif 'results' in response:
for statement in response['results']:
if 'error' in statement:
msg = '{d[error]} (statement {d[statement_id]})'
raise InfluxDBError(msg.format(d=statement))
|
Checks for JSON error messages and raises Python exception
|
train
|
https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/client.py#L384-L392
| null |
class InfluxDBClient:
def __init__(
self,
host: str = 'localhost',
port: int = 8086,
mode: str = 'async',
output: str = 'json',
db: Optional[str] = None,
database: Optional[str] = None,
ssl: bool = False,
*,
unix_socket: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
timeout: Optional[Union[aiohttp.ClientTimeout, float]] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
redis_opts: Optional[dict] = None,
cache_expiry: int = 86400,
**kwargs
):
"""
:class:`~aioinflux.client.InfluxDBClient` holds information necessary
to interact with InfluxDB.
It is async by default, but can also be used as a sync/blocking client.
When querying, responses are returned as parsed JSON by default,
but can also be wrapped in easily iterable
wrapper object or be parsed to Pandas DataFrames.
The three main public methods are the three endpoints of the InfluxDB API, namely:
1. :meth:`~.InfluxDBClient.ping`
2. :meth:`~.InfluxDBClient.write`
3. :meth:`~.InfluxDBClient.query`
See each of the above methods documentation for further usage details.
See also: https://docs.influxdata.com/influxdb/latest/tools/api/
:param host: Hostname to connect to InfluxDB.
:param port: Port to connect to InfluxDB.
:param mode: Mode in which client should run. Available options:
- ``async``: Default mode. Each query/request to the backend will
- ``blocking``: Behaves in sync/blocking fashion,
similar to the official InfluxDB-Python client.
:param output: Output format of the response received from InfluxDB.
- ``json``: Default format.
Returns parsed JSON as received from InfluxDB.
- ``dataframe``: Parses results into :py:class`pandas.DataFrame`.
Not compatible with chunked responses.
:param db: Default database to be used by the client.
:param ssl: If https should be used.
:param unix_socket: Path to the InfluxDB Unix domain socket.
:param username: Username to use to connect to InfluxDB.
:param password: User password.
:param timeout: Timeout in seconds or :class:`aiohttp.ClientTimeout` object
:param database: Default database to be used by the client.
This field is for argument consistency with the official InfluxDB Python client.
:param loop: Asyncio event loop.
:param redis_opts: Dict fo keyword arguments for :func:`aioredis.create_redis`
:param cache_expiry: Expiry time (in seconds) for cached data
:param kwargs: Additional kwargs for :class:`aiohttp.ClientSession`
"""
self._loop = loop or asyncio.get_event_loop()
self._session: aiohttp.ClientSession = None
self._redis: aioredis.Redis = None
self._mode = None
self._output = None
self._db = None
self.ssl = ssl
self.host = host
self.port = port
self.mode = mode
self.output = output
self.db = database or db
# ClientSession configuration
if username:
kwargs.update(auth=aiohttp.BasicAuth(username, password))
if unix_socket:
kwargs.update(connector=aiohttp.UnixConnector(unix_socket, loop=self._loop))
if timeout:
if isinstance(timeout, aiohttp.ClientTimeout):
kwargs.update(timeout=timeout)
else:
kwargs.update(timeout=aiohttp.ClientTimeout(total=timeout))
self.opts = kwargs
# Cache configuration
self.redis_opts = redis_opts
self.cache_expiry = cache_expiry
async def create_session(self, **kwargs):
"""Creates an :class:`aiohttp.ClientSession`
Override this or call it with ``kwargs`` to use other :mod:`aiohttp`
functionality not covered by :class:`~.InfluxDBClient.__init__`
"""
self.opts.update(kwargs)
self._session = aiohttp.ClientSession(**self.opts, loop=self._loop)
if self.redis_opts:
if aioredis:
self._redis = await aioredis.create_redis(**self.redis_opts,
loop=self._loop)
else:
warnings.warn(no_redis_warning)
@property
def url(self):
return f'{"https" if self.ssl else "http"}://{self.host}:{self.port}/{{endpoint}}'
@property
def mode(self):
return self._mode
@property
def output(self):
return self._output
@property
def db(self):
return self._db
@mode.setter
def mode(self, mode):
if mode not in ('async', 'blocking'):
raise ValueError('Invalid running mode')
self._mode = mode
@output.setter
def output(self, output):
if pd is None and output == 'dataframe':
raise ValueError(no_pandas_warning)
if output not in ('json', 'dataframe'):
raise ValueError('Invalid output format')
self._output = output
@db.setter
def db(self, db):
self._db = db
if not db:
warnings.warn(f'No default databases is set. '
f'Database must be specified when querying/writing.')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
def __del__(self):
if not self._loop.is_closed() and self._session:
asyncio.ensure_future(self._session.close(), loop=self._loop)
def __repr__(self):
items = [f'{k}={v}' for k, v in vars(self).items() if not k.startswith('_')]
items.append(f'mode={self.mode}')
return f'{type(self).__name__}({", ".join(items)})'
@runner
async def close(self):
if self._session:
await self._session.close()
self._session = None
if self._redis:
self._redis.close()
@runner
async def ping(self) -> dict:
"""Pings InfluxDB
Returns a dictionary containing the headers of the response from ``influxd``.
"""
if not self._session:
await self.create_session()
async with self._session.get(self.url.format(endpoint='ping')) as resp:
logger.debug(f'{resp.status}: {resp.reason}')
return dict(resp.headers.items())
@runner
async def write(
self,
data: Union[PointType, Iterable[PointType]],
measurement: Optional[str] = None,
db: Optional[str] = None,
precision: Optional[str] = None,
rp: Optional[str] = None,
tag_columns: Optional[Iterable] = None,
**extra_tags,
) -> bool:
"""Writes data to InfluxDB.
Input can be:
1. A mapping (e.g. ``dict``) containing the keys:
``measurement``, ``time``, ``tags``, ``fields``
2. A Pandas :class:`~pandas.DataFrame` with a :class:`~pandas.DatetimeIndex`
3. A user defined class decorated w/
:func:`~aioinflux.serialization.usertype.lineprotocol`
4. A string (``str`` or ``bytes``) properly formatted in InfluxDB's line protocol
5. An iterable of one of the above
Input data in formats 1-3 are parsed to the line protocol before being
written to InfluxDB.
See the `InfluxDB docs <https://docs.influxdata.com/influxdb/latest/
write_protocols/line_protocol_reference/>`_ for more details.
:param data: Input data (see description above).
:param measurement: Measurement name. Mandatory when when writing DataFrames only.
When writing dictionary-like data, this field is treated as the default value
for points that do not contain a `measurement` field.
:param db: Database to be written to. Defaults to `self.db`.
:param precision: Sets the precision for the supplied Unix time values.
Ignored if input timestamp data is of non-integer type.
Valid values: ``{'ns', 'u', 'µ', 'ms', 's', 'm', 'h'}``
:param rp: Sets the target retention policy for the write.
If unspecified, data is written to the default retention policy.
:param tag_columns: Columns to be treated as tags
(used when writing DataFrames only)
:param extra_tags: Additional tags to be added to all points passed.
:return: Returns ``True`` if insert is successful.
Raises :py:class:`ValueError` otherwise.
"""
if not self._session:
await self.create_session()
if precision is not None:
# FIXME: Implement. Related issue: aioinflux/pull/13
raise NotImplementedError("'precision' parameter is not supported yet")
data = serialization.serialize(data, measurement, tag_columns, **extra_tags)
params = {'db': db or self.db}
if rp:
params['rp'] = rp
url = self.url.format(endpoint='write')
async with self._session.post(url, params=params, data=data) as resp:
if resp.status == 204:
return True
raise InfluxDBWriteError(resp)
@runner
async def query(
self,
q: AnyStr,
*,
epoch: str = 'ns',
chunked: bool = False,
chunk_size: Optional[int] = None,
db: Optional[str] = None,
use_cache: bool = False,
) -> Union[AsyncGenerator[ResultType, None], ResultType]:
"""Sends a query to InfluxDB.
Please refer to the InfluxDB documentation for all the possible queries:
https://docs.influxdata.com/influxdb/latest/query_language/
:param q: Raw query string
:param db: Database to be queried. Defaults to `self.db`.
:param epoch: Precision level of response timestamps.
Valid values: ``{'ns', 'u', 'µ', 'ms', 's', 'm', 'h'}``.
:param chunked: If ``True``, makes InfluxDB return results in streamed batches
rather than as a single response.
Returns an AsyncGenerator which yields responses
in the same format as non-chunked queries.
:param chunk_size: Max number of points for each chunk. By default, InfluxDB chunks
responses by series or by every 10,000 points, whichever occurs first.
:param use_cache:
:return: Response in the format specified by the combination of
:attr:`.InfluxDBClient.output` and ``chunked``
"""
async def _chunked_generator(url, data):
async with self._session.post(url, data=data) as resp:
logger.debug(f'{resp.status} (CHUNKED): {q}')
# Hack to avoid aiohttp raising ValueError('Line is too long')
# The number 16 is arbitrary (may be too large/small).
resp.content._high_water *= 16
async for chunk in resp.content:
chunk = json.loads(chunk)
self._check_error(chunk)
yield chunk
if not self._session:
await self.create_session()
# InfluxDB documentation is wrong regarding `/query` parameters
# See https://github.com/influxdata/docs.influxdata.com/issues/1807
if not isinstance(chunked, bool):
raise ValueError("'chunked' must be a boolean")
data = dict(q=q, db=db or self.db, chunked=str(chunked).lower(), epoch=epoch)
if chunked and chunk_size:
data['chunk_size'] = chunk_size
url = self.url.format(endpoint='query')
if chunked:
if use_cache:
raise ValueError("Can't use cache w/ chunked queries")
if self.mode != 'async':
raise ValueError("Can't use 'chunked' with non-async mode")
if self.output == 'json':
return _chunked_generator(url, data)
raise ValueError(f"Chunked queries are not support with {self.output!r} output")
key = f'aioinflux:{q}'
if use_cache and self._redis and await self._redis.exists(key):
logger.debug(f'Cache HIT: {q}')
data = lz4.decompress(await self._redis.get(key))
else:
async with self._session.post(url, data=data) as resp:
data = await resp.read()
if use_cache and self._redis:
logger.debug(f'Cache MISS ({resp.status}): {q}')
if resp.status == 200:
await self._redis.set(key, lz4.compress(data))
await self._redis.expire(key, self.cache_expiry)
else:
logger.debug(f'{resp.status}: {q}')
data = json.loads(data)
self._check_error(data)
if self.output == 'json':
return data
elif self.output == 'dataframe':
return serialization.dataframe.parse(data)
else:
raise ValueError('Invalid output format')
@staticmethod
# InfluxQL - Data management
# --------------------------
def create_database(self, db=None):
db = db or self.db
return self.query(f'CREATE DATABASE "{db}"')
def drop_database(self, db=None):
db = db or self.db
return self.query(f'DROP DATABASE "{db}"')
def drop_measurement(self, measurement):
return self.query(f'DROP MEASUREMENT "{measurement}"')
# InfluxQL - Schema exploration
# -----------------------------
def show_databases(self):
return self.query("SHOW DATABASES")
def show_measurements(self):
return self.query("SHOW MEASUREMENTS")
def show_users(self):
return self.query("SHOW USERS")
def show_series(self, measurement=None):
if measurement:
return self.query(f"SHOW SERIES FROM {measurement}")
return self.query("SHOW SERIES")
def show_tag_keys(self, measurement=None):
if measurement:
return self.query(f"SHOW TAG KEYS FROM {measurement}")
return self.query("SHOW TAG KEYS")
def show_field_keys(self, measurement=None):
if measurement:
return self.query(f"SHOW FIELD KEYS FROM {measurement}")
return self.query("SHOW FIELD KEYS")
def show_tag_values(self, key, measurement=None):
if measurement:
return self.query(f'SHOW TAG VALUES FROM "{measurement}" WITH key = "{key}"')
return self.query(f'SHOW TAG VALUES WITH key = "{key}"')
def show_retention_policies(self):
return self.query("SHOW RETENTION POLICIES")
# InfluxQL - Other
# ----------------
def show_continuous_queries(self):
return self.query("SHOW CONTINUOUS QUERIES")
|
elmotec/massedit
|
massedit.py
|
get_function
|
python
|
def get_function(fn_name):
module_name, callable_name = fn_name.split(':')
current = globals()
if not callable_name:
callable_name = module_name
else:
import importlib
try:
module = importlib.import_module(module_name)
except ImportError:
log.error("failed to import %s", module_name)
raise
current = module
for level in callable_name.split('.'):
current = getattr(current, level)
code = current.__code__
if code.co_argcount != 2:
raise ValueError('function should take 2 arguments: lines, file_name')
return current
|
Retrieve the function defined by the function_name.
Arguments:
fn_name: specification of the type module:function_name.
|
train
|
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L66-L90
| null |
#!/usr/bin/env python
# encoding: utf-8
"""A python bulk editor class to apply the same code to many files."""
# Copyright (c) 2012-19 Jérôme Lecomte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import unicode_literals
import os
import shutil
import sys
import logging
import argparse
import difflib
import re # For convenience, pylint: disable=W0611
import fnmatch
import io
import subprocess
import textwrap
__version__ = '0.68.5' # UPDATE setup.py when changing version.
__author__ = 'Jérôme Lecomte'
__license__ = 'MIT'
log = logging.getLogger(__name__)
try:
unicode
except NameError:
unicode = str # pylint: disable=invalid-name, redefined-builtin
def is_list(arg):
"""Factor determination if arg is a list.
Small utility for a better diagnostic because str/unicode are also
iterable.
"""
return iter(arg) and not isinstance(arg, unicode)
class MassEdit(object):
"""Mass edit lines of files."""
def __init__(self, **kwds):
"""Initialize MassEdit object.
Args:
- code (byte code object): code to execute on input file.
- function (str or callable): function to call on input file.
- module (str): module name where to find the function.
- executable (str): executable file name to execute on input file.
- dry_run (bool): skip actual modification of input file if True.
"""
self.code_objs = dict()
self._codes = []
self._functions = []
self._executables = []
self.dry_run = None
self.encoding = 'utf-8'
self.newline = None
if 'module' in kwds:
self.import_module(kwds['module'])
if 'code' in kwds:
self.append_code_expr(kwds['code'])
if 'function' in kwds:
self.append_function(kwds['function'])
if 'executable' in kwds:
self.append_executable(kwds['executable'])
if 'dry_run' in kwds:
self.dry_run = kwds['dry_run']
if 'encoding' in kwds:
self.encoding = kwds['encoding']
if 'newline' in kwds:
self.newline = kwds['newline']
@staticmethod
def import_module(module): # pylint: disable=R0201
"""Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
"""
if isinstance(module, list):
all_modules = module
else:
all_modules = [module]
for mod in all_modules:
globals()[mod] = __import__(mod.strip())
@staticmethod
def __edit_line(line, code, code_obj): # pylint: disable=R0201
"""Edit a line with one code object built in the ctor."""
try:
# pylint: disable=eval-used
result = eval(code_obj, globals(), locals())
except TypeError as ex:
log.error("failed to execute %s: %s", code, ex)
raise
if result is None:
log.error("cannot process line '%s' with %s", line, code)
raise RuntimeError('failed to process line')
elif isinstance(result, list) or isinstance(result, tuple):
line = unicode(' '.join([unicode(res_element)
for res_element in result]))
else:
line = unicode(result)
return line
def edit_line(self, line):
"""Edit a single line using the code expression."""
for code, code_obj in self.code_objs.items():
line = self.__edit_line(line, code, code_obj)
return line
def edit_content(self, original_lines, file_name):
"""Processes a file contents.
First processes the contents line by line applying the registered
expressions, then process the resulting contents using the
registered functions.
Arguments:
original_lines (list of str): file content.
file_name (str): name of the file.
"""
lines = [self.edit_line(line) for line in original_lines]
for function in self._functions:
try:
lines = list(function(lines, file_name))
except UnicodeDecodeError as err:
log.error('failed to process %s: %s', file_name, err)
return lines
except Exception as err:
log.error("failed to process %s with code %s: %s",
file_name, function, err)
raise # Let the exception be handled at a higher level.
return lines
def edit_file(self, file_name):
"""Edit file in place, returns a list of modifications (unified diff).
Arguments:
file_name (str, unicode): The name of the file.
"""
with io.open(file_name, "r", encoding=self.encoding) as from_file:
try:
from_lines = from_file.readlines()
except UnicodeDecodeError as err:
log.error("encoding error (see --encoding): %s", err)
raise
if self._executables:
nb_execs = len(self._executables)
if nb_execs > 1:
log.warn("found %d executables. Will use first one", nb_execs)
exec_list = self._executables[0].split()
exec_list.append(file_name)
try:
log.info("running %s...", " ".join(exec_list))
output = subprocess.check_output(exec_list,
universal_newlines=True)
except Exception as err:
log.error("failed to execute %s: %s", " ".join(exec_list), err)
raise # Let the exception be handled at a higher level.
to_lines = output.split(unicode("\n"))
else:
to_lines = from_lines
# unified_diff wants structure of known length. Convert to a list.
to_lines = list(self.edit_content(to_lines, file_name))
diffs = difflib.unified_diff(from_lines, to_lines,
fromfile=file_name, tofile='<new>')
if not self.dry_run:
bak_file_name = file_name + ".bak"
if os.path.exists(bak_file_name):
msg = "{} already exists".format(bak_file_name)
if sys.version_info < (3, 3):
raise OSError(msg)
else:
# noinspection PyCompatibility
# pylint: disable=undefined-variable
raise FileExistsError(msg)
try:
os.rename(file_name, bak_file_name)
with io.open(file_name, 'w', encoding=self.encoding, newline=self.newline) as new:
new.writelines(to_lines)
# Keeps mode of original file.
shutil.copymode(bak_file_name, file_name)
except Exception as err:
log.error("failed to write output to %s: %s", file_name, err)
# Try to recover...
try:
os.rename(bak_file_name, file_name)
except OSError as err:
log.error("failed to restore %s from %s: %s",
file_name, bak_file_name, err)
raise
try:
os.unlink(bak_file_name)
except OSError as err:
log.warning("failed to remove backup %s: %s",
bak_file_name, err)
return list(diffs)
def append_code_expr(self, code):
"""Compile argument and adds it to the list of code objects."""
# expects a string.
if isinstance(code, str) and not isinstance(code, unicode):
code = unicode(code)
if not isinstance(code, unicode):
raise TypeError("string expected")
log.debug("compiling code %s...", code)
try:
code_obj = compile(code, '<string>', 'eval')
self.code_objs[code] = code_obj
except SyntaxError as syntax_err:
log.error("cannot compile %s: %s", code, syntax_err)
raise
log.debug("compiled code %s", code)
def append_function(self, function):
"""Append the function to the list of functions to be called.
If the function is already a callable, use it. If it's a type str
try to interpret it as [module]:?<callable>, load the module
if there is one and retrieve the callable.
Argument:
function (str or callable): function to call on input.
"""
if not hasattr(function, '__call__'):
function = get_function(function)
if not hasattr(function, '__call__'):
raise ValueError("function is expected to be callable")
self._functions.append(function)
log.debug("registered %s", function.__name__)
def append_executable(self, executable):
"""Append san executable os command to the list to be called.
Argument:
executable (str): os callable executable.
"""
if isinstance(executable, str) and not isinstance(executable, unicode):
executable = unicode(executable)
if not isinstance(executable, unicode):
raise TypeError("expected executable name as str, not {}".
format(executable.__class__.__name__))
self._executables.append(executable)
def set_code_exprs(self, codes):
"""Convenience: sets all the code expressions at once."""
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code)
def set_functions(self, functions):
"""Check functions passed as argument and set them to be used."""
for func in functions:
try:
self.append_function(func)
except (ValueError, AttributeError) as ex:
log.error("'%s' is not a callable function: %s", func, ex)
raise
def set_executables(self, executables):
"""Check and set the executables to be used."""
for exc in executables:
self.append_executable(exc)
def parse_command_line(argv):
"""Parse command line argument. See -h option.
Arguments:
argv: arguments on the command line must include caller file name.
"""
import textwrap
example = textwrap.dedent("""
Examples:
# Simple string substitution (-e). Will show a diff. No changes applied.
{0} -e "re.sub('failIf', 'assertFalse', line)" *.py
# File level modifications (-f). Overwrites the files in place (-w).
{0} -w -f fixer:fixit *.py
# Will change all test*.py in subdirectories of tests.
{0} -e "re.sub('failIf', 'assertFalse', line)" -s tests test*.py
""").format(os.path.basename(argv[0]))
formatter_class = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description="Python mass editor",
epilog=example,
formatter_class=formatter_class)
parser.add_argument("-V", "--version", action="version",
version="%(prog)s {}".format(__version__))
parser.add_argument("-w", "--write", dest="dry_run",
action="store_false", default=True,
help="modify target file(s) in place. "
"Shows diff otherwise.")
parser.add_argument("-v", "--verbose", dest="verbose_count",
action="count", default=0,
help="increases log verbosity (can be specified "
"multiple times)")
parser.add_argument("-e", "--expression", dest="expressions", nargs=1,
help="Python expressions applied to target files. "
"Use the line variable to reference the current line.")
parser.add_argument("-f", "--function", dest="functions", nargs=1,
help="Python function to apply to target file. "
"Takes file content as input and yield lines. "
"Specify function as [module]:?<function name>.")
parser.add_argument("-x", "--executable", dest="executables", nargs=1,
help="Python executable to apply to target file.")
parser.add_argument("-s", "--start", dest="start_dirs",
help="Directory(ies) from which to look for targets.")
parser.add_argument("-m", "--max-depth-level", type=int, dest="max_depth",
help="Maximum depth when walking subdirectories.")
parser.add_argument("-o", "--output", metavar="FILE",
type=argparse.FileType("w"), default=sys.stdout,
help="redirect output to a file")
parser.add_argument("-g", "--generate", metavar="FILE", type=str,
help="generate input file suitable for -f option")
parser.add_argument("--encoding", dest="encoding",
help="Encoding of input and output files")
parser.add_argument("--newline", dest="newline",
help="Newline character for output files")
parser.add_argument("patterns", metavar="pattern",
nargs="*", # argparse.REMAINDER,
help="shell-like file name patterns to process.")
arguments = parser.parse_args(argv[1:])
if not (arguments.expressions or
arguments.functions or
arguments.generate or
arguments.executables):
parser.error(
'--expression, --function, --generate or --executable missing')
# Sets log level to WARN going more verbose for each new -V.
log.setLevel(max(3 - arguments.verbose_count, 0) * 10)
return arguments
def get_paths(patterns, start_dirs=None, max_depth=1):
"""Retrieve files that match any of the patterns."""
# Shortcut: if there is only one pattern, make sure we process just that.
if len(patterns) == 1 and not start_dirs:
pattern = patterns[0]
directory = os.path.dirname(pattern)
if directory:
patterns = [os.path.basename(pattern)]
start_dirs = directory
max_depth = 1
if not start_dirs or start_dirs == '.':
start_dirs = os.getcwd()
for start_dir in start_dirs.split(','):
for root, dirs, files in os.walk(start_dir): # pylint: disable=W0612
if max_depth is not None:
relpath = os.path.relpath(root, start=start_dir)
depth = len(relpath.split(os.sep))
if depth > max_depth:
continue
names = []
for pattern in patterns:
names += fnmatch.filter(files, pattern)
for name in names:
path = os.path.join(root, name)
yield path
fixer_template = """\
#!/usr/bin/env python
def fixit(lines, file_name):
'''Edit files passed to massedit
:param list(str) lines: list of lines contained in the input file
:param str file_name: name of the file the lines were read from
:return: modified lines
:rtype: list(str)
Please modify the logic below (it does not change anything right now)
and apply your logic to the in your directory like this:
massedit -f <file name>:fixit files_to_modify\*
See massedit -h for help and other options.
'''
changed_lines = []
for lineno, line in enumerate(lines):
changed_lines.append(line)
return changed_lines
"""
def generate_fixer_file(output):
"""Generate a template fixer file to be used with --function option."""
with open(output, "w+") as fh:
fh.write(fixer_template)
return
# pylint: disable=too-many-arguments, too-many-locals
def edit_files(patterns, expressions=None,
functions=None, executables=None,
start_dirs=None, max_depth=1, dry_run=True,
output=sys.stdout, encoding=None, newline=None):
"""Process patterns with MassEdit.
Arguments:
patterns: file pattern to identify the files to be processed.
expressions: single python expression to be applied line by line.
functions: functions to process files contents.
executables: os executables to execute on the argument files.
Keyword arguments:
max_depth: maximum recursion level when looking for file matches.
start_dirs: workspace(ies) where to start the file search.
dry_run: only display differences if True. Save modified file otherwise.
output: handle where the output should be redirected.
Return:
list of files processed.
"""
if not is_list(patterns):
raise TypeError("patterns should be a list")
if expressions and not is_list(expressions):
raise TypeError("expressions should be a list of exec expressions")
if functions and not is_list(functions):
raise TypeError("functions should be a list of functions")
if executables and not is_list(executables):
raise TypeError("executables should be a list of program names")
editor = MassEdit(dry_run=dry_run, encoding=encoding, newline=newline)
if expressions:
editor.set_code_exprs(expressions)
if functions:
editor.set_functions(functions)
if executables:
editor.set_executables(executables)
processed_paths = []
for path in get_paths(patterns, start_dirs=start_dirs,
max_depth=max_depth):
try:
diffs = list(editor.edit_file(path))
if dry_run:
# At this point, encoding is the input encoding.
diff = "".join(diffs)
if not diff:
continue
# The encoding of the target output may not match the input
# encoding. If it's defined, we round trip the diff text
# to bytes and back to silence any conversion errors.
encoding = output.encoding
if encoding:
bytes_diff = diff.encode(encoding=encoding, errors='ignore')
diff = bytes_diff.decode(encoding=output.encoding)
output.write(diff)
except UnicodeDecodeError as err:
log.error("failed to process %s: %s", path, err)
continue
processed_paths.append(os.path.abspath(path))
return processed_paths
def command_line(argv):
"""Instantiate an editor and process arguments.
Optional argument:
- processed_paths: paths processed are appended to the list.
"""
arguments = parse_command_line(argv)
if arguments.generate:
generate_fixer_file(arguments.generate)
paths = edit_files(arguments.patterns,
expressions=arguments.expressions,
functions=arguments.functions,
executables=arguments.executables,
start_dirs=arguments.start_dirs,
max_depth=arguments.max_depth,
dry_run=arguments.dry_run,
output=arguments.output,
encoding=arguments.encoding,
newline=arguments.newline)
# If the output is not sys.stdout, we need to close it because
# argparse.FileType does not do it for us.
is_sys = arguments.output in [sys.stdout, sys.stderr]
if not is_sys and isinstance(arguments.output, io.IOBase):
arguments.output.close()
return paths
def main():
"""Main function."""
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
try:
command_line(sys.argv)
finally:
logging.shutdown()
if __name__ == "__main__":
sys.exit(main())
|
elmotec/massedit
|
massedit.py
|
parse_command_line
|
python
|
def parse_command_line(argv):
import textwrap
example = textwrap.dedent("""
Examples:
# Simple string substitution (-e). Will show a diff. No changes applied.
{0} -e "re.sub('failIf', 'assertFalse', line)" *.py
# File level modifications (-f). Overwrites the files in place (-w).
{0} -w -f fixer:fixit *.py
# Will change all test*.py in subdirectories of tests.
{0} -e "re.sub('failIf', 'assertFalse', line)" -s tests test*.py
""").format(os.path.basename(argv[0]))
formatter_class = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description="Python mass editor",
epilog=example,
formatter_class=formatter_class)
parser.add_argument("-V", "--version", action="version",
version="%(prog)s {}".format(__version__))
parser.add_argument("-w", "--write", dest="dry_run",
action="store_false", default=True,
help="modify target file(s) in place. "
"Shows diff otherwise.")
parser.add_argument("-v", "--verbose", dest="verbose_count",
action="count", default=0,
help="increases log verbosity (can be specified "
"multiple times)")
parser.add_argument("-e", "--expression", dest="expressions", nargs=1,
help="Python expressions applied to target files. "
"Use the line variable to reference the current line.")
parser.add_argument("-f", "--function", dest="functions", nargs=1,
help="Python function to apply to target file. "
"Takes file content as input and yield lines. "
"Specify function as [module]:?<function name>.")
parser.add_argument("-x", "--executable", dest="executables", nargs=1,
help="Python executable to apply to target file.")
parser.add_argument("-s", "--start", dest="start_dirs",
help="Directory(ies) from which to look for targets.")
parser.add_argument("-m", "--max-depth-level", type=int, dest="max_depth",
help="Maximum depth when walking subdirectories.")
parser.add_argument("-o", "--output", metavar="FILE",
type=argparse.FileType("w"), default=sys.stdout,
help="redirect output to a file")
parser.add_argument("-g", "--generate", metavar="FILE", type=str,
help="generate input file suitable for -f option")
parser.add_argument("--encoding", dest="encoding",
help="Encoding of input and output files")
parser.add_argument("--newline", dest="newline",
help="Newline character for output files")
parser.add_argument("patterns", metavar="pattern",
nargs="*", # argparse.REMAINDER,
help="shell-like file name patterns to process.")
arguments = parser.parse_args(argv[1:])
if not (arguments.expressions or
arguments.functions or
arguments.generate or
arguments.executables):
parser.error(
'--expression, --function, --generate or --executable missing')
# Sets log level to WARN going more verbose for each new -V.
log.setLevel(max(3 - arguments.verbose_count, 0) * 10)
return arguments
|
Parse command line argument. See -h option.
Arguments:
argv: arguments on the command line must include caller file name.
|
train
|
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L332-L402
| null |
#!/usr/bin/env python
# encoding: utf-8
"""A python bulk editor class to apply the same code to many files."""
# Copyright (c) 2012-19 Jérôme Lecomte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import unicode_literals
import os
import shutil
import sys
import logging
import argparse
import difflib
import re # For convenience, pylint: disable=W0611
import fnmatch
import io
import subprocess
import textwrap
__version__ = '0.68.5' # UPDATE setup.py when changing version.
__author__ = 'Jérôme Lecomte'
__license__ = 'MIT'
log = logging.getLogger(__name__)
try:
unicode
except NameError:
unicode = str # pylint: disable=invalid-name, redefined-builtin
def is_list(arg):
"""Factor determination if arg is a list.
Small utility for a better diagnostic because str/unicode are also
iterable.
"""
return iter(arg) and not isinstance(arg, unicode)
def get_function(fn_name):
"""Retrieve the function defined by the function_name.
Arguments:
fn_name: specification of the type module:function_name.
"""
module_name, callable_name = fn_name.split(':')
current = globals()
if not callable_name:
callable_name = module_name
else:
import importlib
try:
module = importlib.import_module(module_name)
except ImportError:
log.error("failed to import %s", module_name)
raise
current = module
for level in callable_name.split('.'):
current = getattr(current, level)
code = current.__code__
if code.co_argcount != 2:
raise ValueError('function should take 2 arguments: lines, file_name')
return current
class MassEdit(object):
"""Mass edit lines of files."""
def __init__(self, **kwds):
"""Initialize MassEdit object.
Args:
- code (byte code object): code to execute on input file.
- function (str or callable): function to call on input file.
- module (str): module name where to find the function.
- executable (str): executable file name to execute on input file.
- dry_run (bool): skip actual modification of input file if True.
"""
self.code_objs = dict()
self._codes = []
self._functions = []
self._executables = []
self.dry_run = None
self.encoding = 'utf-8'
self.newline = None
if 'module' in kwds:
self.import_module(kwds['module'])
if 'code' in kwds:
self.append_code_expr(kwds['code'])
if 'function' in kwds:
self.append_function(kwds['function'])
if 'executable' in kwds:
self.append_executable(kwds['executable'])
if 'dry_run' in kwds:
self.dry_run = kwds['dry_run']
if 'encoding' in kwds:
self.encoding = kwds['encoding']
if 'newline' in kwds:
self.newline = kwds['newline']
@staticmethod
def import_module(module): # pylint: disable=R0201
"""Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
"""
if isinstance(module, list):
all_modules = module
else:
all_modules = [module]
for mod in all_modules:
globals()[mod] = __import__(mod.strip())
@staticmethod
def __edit_line(line, code, code_obj): # pylint: disable=R0201
"""Edit a line with one code object built in the ctor."""
try:
# pylint: disable=eval-used
result = eval(code_obj, globals(), locals())
except TypeError as ex:
log.error("failed to execute %s: %s", code, ex)
raise
if result is None:
log.error("cannot process line '%s' with %s", line, code)
raise RuntimeError('failed to process line')
elif isinstance(result, list) or isinstance(result, tuple):
line = unicode(' '.join([unicode(res_element)
for res_element in result]))
else:
line = unicode(result)
return line
def edit_line(self, line):
"""Edit a single line using the code expression."""
for code, code_obj in self.code_objs.items():
line = self.__edit_line(line, code, code_obj)
return line
def edit_content(self, original_lines, file_name):
"""Processes a file contents.
First processes the contents line by line applying the registered
expressions, then process the resulting contents using the
registered functions.
Arguments:
original_lines (list of str): file content.
file_name (str): name of the file.
"""
lines = [self.edit_line(line) for line in original_lines]
for function in self._functions:
try:
lines = list(function(lines, file_name))
except UnicodeDecodeError as err:
log.error('failed to process %s: %s', file_name, err)
return lines
except Exception as err:
log.error("failed to process %s with code %s: %s",
file_name, function, err)
raise # Let the exception be handled at a higher level.
return lines
def edit_file(self, file_name):
"""Edit file in place, returns a list of modifications (unified diff).
Arguments:
file_name (str, unicode): The name of the file.
"""
with io.open(file_name, "r", encoding=self.encoding) as from_file:
try:
from_lines = from_file.readlines()
except UnicodeDecodeError as err:
log.error("encoding error (see --encoding): %s", err)
raise
if self._executables:
nb_execs = len(self._executables)
if nb_execs > 1:
log.warn("found %d executables. Will use first one", nb_execs)
exec_list = self._executables[0].split()
exec_list.append(file_name)
try:
log.info("running %s...", " ".join(exec_list))
output = subprocess.check_output(exec_list,
universal_newlines=True)
except Exception as err:
log.error("failed to execute %s: %s", " ".join(exec_list), err)
raise # Let the exception be handled at a higher level.
to_lines = output.split(unicode("\n"))
else:
to_lines = from_lines
# unified_diff wants structure of known length. Convert to a list.
to_lines = list(self.edit_content(to_lines, file_name))
diffs = difflib.unified_diff(from_lines, to_lines,
fromfile=file_name, tofile='<new>')
if not self.dry_run:
bak_file_name = file_name + ".bak"
if os.path.exists(bak_file_name):
msg = "{} already exists".format(bak_file_name)
if sys.version_info < (3, 3):
raise OSError(msg)
else:
# noinspection PyCompatibility
# pylint: disable=undefined-variable
raise FileExistsError(msg)
try:
os.rename(file_name, bak_file_name)
with io.open(file_name, 'w', encoding=self.encoding, newline=self.newline) as new:
new.writelines(to_lines)
# Keeps mode of original file.
shutil.copymode(bak_file_name, file_name)
except Exception as err:
log.error("failed to write output to %s: %s", file_name, err)
# Try to recover...
try:
os.rename(bak_file_name, file_name)
except OSError as err:
log.error("failed to restore %s from %s: %s",
file_name, bak_file_name, err)
raise
try:
os.unlink(bak_file_name)
except OSError as err:
log.warning("failed to remove backup %s: %s",
bak_file_name, err)
return list(diffs)
def append_code_expr(self, code):
"""Compile argument and adds it to the list of code objects."""
# expects a string.
if isinstance(code, str) and not isinstance(code, unicode):
code = unicode(code)
if not isinstance(code, unicode):
raise TypeError("string expected")
log.debug("compiling code %s...", code)
try:
code_obj = compile(code, '<string>', 'eval')
self.code_objs[code] = code_obj
except SyntaxError as syntax_err:
log.error("cannot compile %s: %s", code, syntax_err)
raise
log.debug("compiled code %s", code)
def append_function(self, function):
"""Append the function to the list of functions to be called.
If the function is already a callable, use it. If it's a type str
try to interpret it as [module]:?<callable>, load the module
if there is one and retrieve the callable.
Argument:
function (str or callable): function to call on input.
"""
if not hasattr(function, '__call__'):
function = get_function(function)
if not hasattr(function, '__call__'):
raise ValueError("function is expected to be callable")
self._functions.append(function)
log.debug("registered %s", function.__name__)
def append_executable(self, executable):
"""Append san executable os command to the list to be called.
Argument:
executable (str): os callable executable.
"""
if isinstance(executable, str) and not isinstance(executable, unicode):
executable = unicode(executable)
if not isinstance(executable, unicode):
raise TypeError("expected executable name as str, not {}".
format(executable.__class__.__name__))
self._executables.append(executable)
def set_code_exprs(self, codes):
"""Convenience: sets all the code expressions at once."""
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code)
def set_functions(self, functions):
"""Check functions passed as argument and set them to be used."""
for func in functions:
try:
self.append_function(func)
except (ValueError, AttributeError) as ex:
log.error("'%s' is not a callable function: %s", func, ex)
raise
def set_executables(self, executables):
"""Check and set the executables to be used."""
for exc in executables:
self.append_executable(exc)
def get_paths(patterns, start_dirs=None, max_depth=1):
"""Retrieve files that match any of the patterns."""
# Shortcut: if there is only one pattern, make sure we process just that.
if len(patterns) == 1 and not start_dirs:
pattern = patterns[0]
directory = os.path.dirname(pattern)
if directory:
patterns = [os.path.basename(pattern)]
start_dirs = directory
max_depth = 1
if not start_dirs or start_dirs == '.':
start_dirs = os.getcwd()
for start_dir in start_dirs.split(','):
for root, dirs, files in os.walk(start_dir): # pylint: disable=W0612
if max_depth is not None:
relpath = os.path.relpath(root, start=start_dir)
depth = len(relpath.split(os.sep))
if depth > max_depth:
continue
names = []
for pattern in patterns:
names += fnmatch.filter(files, pattern)
for name in names:
path = os.path.join(root, name)
yield path
fixer_template = """\
#!/usr/bin/env python
def fixit(lines, file_name):
'''Edit files passed to massedit
:param list(str) lines: list of lines contained in the input file
:param str file_name: name of the file the lines were read from
:return: modified lines
:rtype: list(str)
Please modify the logic below (it does not change anything right now)
and apply your logic to the in your directory like this:
massedit -f <file name>:fixit files_to_modify\*
See massedit -h for help and other options.
'''
changed_lines = []
for lineno, line in enumerate(lines):
changed_lines.append(line)
return changed_lines
"""
def generate_fixer_file(output):
"""Generate a template fixer file to be used with --function option."""
with open(output, "w+") as fh:
fh.write(fixer_template)
return
# pylint: disable=too-many-arguments, too-many-locals
def edit_files(patterns, expressions=None,
functions=None, executables=None,
start_dirs=None, max_depth=1, dry_run=True,
output=sys.stdout, encoding=None, newline=None):
"""Process patterns with MassEdit.
Arguments:
patterns: file pattern to identify the files to be processed.
expressions: single python expression to be applied line by line.
functions: functions to process files contents.
executables: os executables to execute on the argument files.
Keyword arguments:
max_depth: maximum recursion level when looking for file matches.
start_dirs: workspace(ies) where to start the file search.
dry_run: only display differences if True. Save modified file otherwise.
output: handle where the output should be redirected.
Return:
list of files processed.
"""
if not is_list(patterns):
raise TypeError("patterns should be a list")
if expressions and not is_list(expressions):
raise TypeError("expressions should be a list of exec expressions")
if functions and not is_list(functions):
raise TypeError("functions should be a list of functions")
if executables and not is_list(executables):
raise TypeError("executables should be a list of program names")
editor = MassEdit(dry_run=dry_run, encoding=encoding, newline=newline)
if expressions:
editor.set_code_exprs(expressions)
if functions:
editor.set_functions(functions)
if executables:
editor.set_executables(executables)
processed_paths = []
for path in get_paths(patterns, start_dirs=start_dirs,
max_depth=max_depth):
try:
diffs = list(editor.edit_file(path))
if dry_run:
# At this point, encoding is the input encoding.
diff = "".join(diffs)
if not diff:
continue
# The encoding of the target output may not match the input
# encoding. If it's defined, we round trip the diff text
# to bytes and back to silence any conversion errors.
encoding = output.encoding
if encoding:
bytes_diff = diff.encode(encoding=encoding, errors='ignore')
diff = bytes_diff.decode(encoding=output.encoding)
output.write(diff)
except UnicodeDecodeError as err:
log.error("failed to process %s: %s", path, err)
continue
processed_paths.append(os.path.abspath(path))
return processed_paths
def command_line(argv):
"""Instantiate an editor and process arguments.
Optional argument:
- processed_paths: paths processed are appended to the list.
"""
arguments = parse_command_line(argv)
if arguments.generate:
generate_fixer_file(arguments.generate)
paths = edit_files(arguments.patterns,
expressions=arguments.expressions,
functions=arguments.functions,
executables=arguments.executables,
start_dirs=arguments.start_dirs,
max_depth=arguments.max_depth,
dry_run=arguments.dry_run,
output=arguments.output,
encoding=arguments.encoding,
newline=arguments.newline)
# If the output is not sys.stdout, we need to close it because
# argparse.FileType does not do it for us.
is_sys = arguments.output in [sys.stdout, sys.stderr]
if not is_sys and isinstance(arguments.output, io.IOBase):
arguments.output.close()
return paths
def main():
"""Main function."""
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
try:
command_line(sys.argv)
finally:
logging.shutdown()
if __name__ == "__main__":
sys.exit(main())
|
elmotec/massedit
|
massedit.py
|
get_paths
|
python
|
def get_paths(patterns, start_dirs=None, max_depth=1):
# Shortcut: if there is only one pattern, make sure we process just that.
if len(patterns) == 1 and not start_dirs:
pattern = patterns[0]
directory = os.path.dirname(pattern)
if directory:
patterns = [os.path.basename(pattern)]
start_dirs = directory
max_depth = 1
if not start_dirs or start_dirs == '.':
start_dirs = os.getcwd()
for start_dir in start_dirs.split(','):
for root, dirs, files in os.walk(start_dir): # pylint: disable=W0612
if max_depth is not None:
relpath = os.path.relpath(root, start=start_dir)
depth = len(relpath.split(os.sep))
if depth > max_depth:
continue
names = []
for pattern in patterns:
names += fnmatch.filter(files, pattern)
for name in names:
path = os.path.join(root, name)
yield path
|
Retrieve files that match any of the patterns.
|
train
|
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L405-L430
| null |
#!/usr/bin/env python
# encoding: utf-8
"""A python bulk editor class to apply the same code to many files."""
# Copyright (c) 2012-19 Jérôme Lecomte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import unicode_literals
import os
import shutil
import sys
import logging
import argparse
import difflib
import re # For convenience, pylint: disable=W0611
import fnmatch
import io
import subprocess
import textwrap
__version__ = '0.68.5' # UPDATE setup.py when changing version.
__author__ = 'Jérôme Lecomte'
__license__ = 'MIT'
log = logging.getLogger(__name__)
try:
unicode
except NameError:
unicode = str # pylint: disable=invalid-name, redefined-builtin
def is_list(arg):
"""Factor determination if arg is a list.
Small utility for a better diagnostic because str/unicode are also
iterable.
"""
return iter(arg) and not isinstance(arg, unicode)
def get_function(fn_name):
"""Retrieve the function defined by the function_name.
Arguments:
fn_name: specification of the type module:function_name.
"""
module_name, callable_name = fn_name.split(':')
current = globals()
if not callable_name:
callable_name = module_name
else:
import importlib
try:
module = importlib.import_module(module_name)
except ImportError:
log.error("failed to import %s", module_name)
raise
current = module
for level in callable_name.split('.'):
current = getattr(current, level)
code = current.__code__
if code.co_argcount != 2:
raise ValueError('function should take 2 arguments: lines, file_name')
return current
class MassEdit(object):
"""Mass edit lines of files."""
def __init__(self, **kwds):
"""Initialize MassEdit object.
Args:
- code (byte code object): code to execute on input file.
- function (str or callable): function to call on input file.
- module (str): module name where to find the function.
- executable (str): executable file name to execute on input file.
- dry_run (bool): skip actual modification of input file if True.
"""
self.code_objs = dict()
self._codes = []
self._functions = []
self._executables = []
self.dry_run = None
self.encoding = 'utf-8'
self.newline = None
if 'module' in kwds:
self.import_module(kwds['module'])
if 'code' in kwds:
self.append_code_expr(kwds['code'])
if 'function' in kwds:
self.append_function(kwds['function'])
if 'executable' in kwds:
self.append_executable(kwds['executable'])
if 'dry_run' in kwds:
self.dry_run = kwds['dry_run']
if 'encoding' in kwds:
self.encoding = kwds['encoding']
if 'newline' in kwds:
self.newline = kwds['newline']
@staticmethod
def import_module(module): # pylint: disable=R0201
"""Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
"""
if isinstance(module, list):
all_modules = module
else:
all_modules = [module]
for mod in all_modules:
globals()[mod] = __import__(mod.strip())
@staticmethod
def __edit_line(line, code, code_obj): # pylint: disable=R0201
"""Edit a line with one code object built in the ctor."""
try:
# pylint: disable=eval-used
result = eval(code_obj, globals(), locals())
except TypeError as ex:
log.error("failed to execute %s: %s", code, ex)
raise
if result is None:
log.error("cannot process line '%s' with %s", line, code)
raise RuntimeError('failed to process line')
elif isinstance(result, list) or isinstance(result, tuple):
line = unicode(' '.join([unicode(res_element)
for res_element in result]))
else:
line = unicode(result)
return line
def edit_line(self, line):
"""Edit a single line using the code expression."""
for code, code_obj in self.code_objs.items():
line = self.__edit_line(line, code, code_obj)
return line
def edit_content(self, original_lines, file_name):
"""Processes a file contents.
First processes the contents line by line applying the registered
expressions, then process the resulting contents using the
registered functions.
Arguments:
original_lines (list of str): file content.
file_name (str): name of the file.
"""
lines = [self.edit_line(line) for line in original_lines]
for function in self._functions:
try:
lines = list(function(lines, file_name))
except UnicodeDecodeError as err:
log.error('failed to process %s: %s', file_name, err)
return lines
except Exception as err:
log.error("failed to process %s with code %s: %s",
file_name, function, err)
raise # Let the exception be handled at a higher level.
return lines
def edit_file(self, file_name):
"""Edit file in place, returns a list of modifications (unified diff).
Arguments:
file_name (str, unicode): The name of the file.
"""
with io.open(file_name, "r", encoding=self.encoding) as from_file:
try:
from_lines = from_file.readlines()
except UnicodeDecodeError as err:
log.error("encoding error (see --encoding): %s", err)
raise
if self._executables:
nb_execs = len(self._executables)
if nb_execs > 1:
log.warn("found %d executables. Will use first one", nb_execs)
exec_list = self._executables[0].split()
exec_list.append(file_name)
try:
log.info("running %s...", " ".join(exec_list))
output = subprocess.check_output(exec_list,
universal_newlines=True)
except Exception as err:
log.error("failed to execute %s: %s", " ".join(exec_list), err)
raise # Let the exception be handled at a higher level.
to_lines = output.split(unicode("\n"))
else:
to_lines = from_lines
# unified_diff wants structure of known length. Convert to a list.
to_lines = list(self.edit_content(to_lines, file_name))
diffs = difflib.unified_diff(from_lines, to_lines,
fromfile=file_name, tofile='<new>')
if not self.dry_run:
bak_file_name = file_name + ".bak"
if os.path.exists(bak_file_name):
msg = "{} already exists".format(bak_file_name)
if sys.version_info < (3, 3):
raise OSError(msg)
else:
# noinspection PyCompatibility
# pylint: disable=undefined-variable
raise FileExistsError(msg)
try:
os.rename(file_name, bak_file_name)
with io.open(file_name, 'w', encoding=self.encoding, newline=self.newline) as new:
new.writelines(to_lines)
# Keeps mode of original file.
shutil.copymode(bak_file_name, file_name)
except Exception as err:
log.error("failed to write output to %s: %s", file_name, err)
# Try to recover...
try:
os.rename(bak_file_name, file_name)
except OSError as err:
log.error("failed to restore %s from %s: %s",
file_name, bak_file_name, err)
raise
try:
os.unlink(bak_file_name)
except OSError as err:
log.warning("failed to remove backup %s: %s",
bak_file_name, err)
return list(diffs)
def append_code_expr(self, code):
"""Compile argument and adds it to the list of code objects."""
# expects a string.
if isinstance(code, str) and not isinstance(code, unicode):
code = unicode(code)
if not isinstance(code, unicode):
raise TypeError("string expected")
log.debug("compiling code %s...", code)
try:
code_obj = compile(code, '<string>', 'eval')
self.code_objs[code] = code_obj
except SyntaxError as syntax_err:
log.error("cannot compile %s: %s", code, syntax_err)
raise
log.debug("compiled code %s", code)
def append_function(self, function):
"""Append the function to the list of functions to be called.
If the function is already a callable, use it. If it's a type str
try to interpret it as [module]:?<callable>, load the module
if there is one and retrieve the callable.
Argument:
function (str or callable): function to call on input.
"""
if not hasattr(function, '__call__'):
function = get_function(function)
if not hasattr(function, '__call__'):
raise ValueError("function is expected to be callable")
self._functions.append(function)
log.debug("registered %s", function.__name__)
def append_executable(self, executable):
"""Append san executable os command to the list to be called.
Argument:
executable (str): os callable executable.
"""
if isinstance(executable, str) and not isinstance(executable, unicode):
executable = unicode(executable)
if not isinstance(executable, unicode):
raise TypeError("expected executable name as str, not {}".
format(executable.__class__.__name__))
self._executables.append(executable)
def set_code_exprs(self, codes):
"""Convenience: sets all the code expressions at once."""
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code)
def set_functions(self, functions):
"""Check functions passed as argument and set them to be used."""
for func in functions:
try:
self.append_function(func)
except (ValueError, AttributeError) as ex:
log.error("'%s' is not a callable function: %s", func, ex)
raise
def set_executables(self, executables):
"""Check and set the executables to be used."""
for exc in executables:
self.append_executable(exc)
def parse_command_line(argv):
"""Parse command line argument. See -h option.
Arguments:
argv: arguments on the command line must include caller file name.
"""
import textwrap
example = textwrap.dedent("""
Examples:
# Simple string substitution (-e). Will show a diff. No changes applied.
{0} -e "re.sub('failIf', 'assertFalse', line)" *.py
# File level modifications (-f). Overwrites the files in place (-w).
{0} -w -f fixer:fixit *.py
# Will change all test*.py in subdirectories of tests.
{0} -e "re.sub('failIf', 'assertFalse', line)" -s tests test*.py
""").format(os.path.basename(argv[0]))
formatter_class = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description="Python mass editor",
epilog=example,
formatter_class=formatter_class)
parser.add_argument("-V", "--version", action="version",
version="%(prog)s {}".format(__version__))
parser.add_argument("-w", "--write", dest="dry_run",
action="store_false", default=True,
help="modify target file(s) in place. "
"Shows diff otherwise.")
parser.add_argument("-v", "--verbose", dest="verbose_count",
action="count", default=0,
help="increases log verbosity (can be specified "
"multiple times)")
parser.add_argument("-e", "--expression", dest="expressions", nargs=1,
help="Python expressions applied to target files. "
"Use the line variable to reference the current line.")
parser.add_argument("-f", "--function", dest="functions", nargs=1,
help="Python function to apply to target file. "
"Takes file content as input and yield lines. "
"Specify function as [module]:?<function name>.")
parser.add_argument("-x", "--executable", dest="executables", nargs=1,
help="Python executable to apply to target file.")
parser.add_argument("-s", "--start", dest="start_dirs",
help="Directory(ies) from which to look for targets.")
parser.add_argument("-m", "--max-depth-level", type=int, dest="max_depth",
help="Maximum depth when walking subdirectories.")
parser.add_argument("-o", "--output", metavar="FILE",
type=argparse.FileType("w"), default=sys.stdout,
help="redirect output to a file")
parser.add_argument("-g", "--generate", metavar="FILE", type=str,
help="generate input file suitable for -f option")
parser.add_argument("--encoding", dest="encoding",
help="Encoding of input and output files")
parser.add_argument("--newline", dest="newline",
help="Newline character for output files")
parser.add_argument("patterns", metavar="pattern",
nargs="*", # argparse.REMAINDER,
help="shell-like file name patterns to process.")
arguments = parser.parse_args(argv[1:])
if not (arguments.expressions or
arguments.functions or
arguments.generate or
arguments.executables):
parser.error(
'--expression, --function, --generate or --executable missing')
# Sets log level to WARN going more verbose for each new -V.
log.setLevel(max(3 - arguments.verbose_count, 0) * 10)
return arguments
fixer_template = """\
#!/usr/bin/env python
def fixit(lines, file_name):
'''Edit files passed to massedit
:param list(str) lines: list of lines contained in the input file
:param str file_name: name of the file the lines were read from
:return: modified lines
:rtype: list(str)
Please modify the logic below (it does not change anything right now)
and apply your logic to the in your directory like this:
massedit -f <file name>:fixit files_to_modify\*
See massedit -h for help and other options.
'''
changed_lines = []
for lineno, line in enumerate(lines):
changed_lines.append(line)
return changed_lines
"""
def generate_fixer_file(output):
"""Generate a template fixer file to be used with --function option."""
with open(output, "w+") as fh:
fh.write(fixer_template)
return
# pylint: disable=too-many-arguments, too-many-locals
def edit_files(patterns, expressions=None,
functions=None, executables=None,
start_dirs=None, max_depth=1, dry_run=True,
output=sys.stdout, encoding=None, newline=None):
"""Process patterns with MassEdit.
Arguments:
patterns: file pattern to identify the files to be processed.
expressions: single python expression to be applied line by line.
functions: functions to process files contents.
executables: os executables to execute on the argument files.
Keyword arguments:
max_depth: maximum recursion level when looking for file matches.
start_dirs: workspace(ies) where to start the file search.
dry_run: only display differences if True. Save modified file otherwise.
output: handle where the output should be redirected.
Return:
list of files processed.
"""
if not is_list(patterns):
raise TypeError("patterns should be a list")
if expressions and not is_list(expressions):
raise TypeError("expressions should be a list of exec expressions")
if functions and not is_list(functions):
raise TypeError("functions should be a list of functions")
if executables and not is_list(executables):
raise TypeError("executables should be a list of program names")
editor = MassEdit(dry_run=dry_run, encoding=encoding, newline=newline)
if expressions:
editor.set_code_exprs(expressions)
if functions:
editor.set_functions(functions)
if executables:
editor.set_executables(executables)
processed_paths = []
for path in get_paths(patterns, start_dirs=start_dirs,
max_depth=max_depth):
try:
diffs = list(editor.edit_file(path))
if dry_run:
# At this point, encoding is the input encoding.
diff = "".join(diffs)
if not diff:
continue
# The encoding of the target output may not match the input
# encoding. If it's defined, we round trip the diff text
# to bytes and back to silence any conversion errors.
encoding = output.encoding
if encoding:
bytes_diff = diff.encode(encoding=encoding, errors='ignore')
diff = bytes_diff.decode(encoding=output.encoding)
output.write(diff)
except UnicodeDecodeError as err:
log.error("failed to process %s: %s", path, err)
continue
processed_paths.append(os.path.abspath(path))
return processed_paths
def command_line(argv):
"""Instantiate an editor and process arguments.
Optional argument:
- processed_paths: paths processed are appended to the list.
"""
arguments = parse_command_line(argv)
if arguments.generate:
generate_fixer_file(arguments.generate)
paths = edit_files(arguments.patterns,
expressions=arguments.expressions,
functions=arguments.functions,
executables=arguments.executables,
start_dirs=arguments.start_dirs,
max_depth=arguments.max_depth,
dry_run=arguments.dry_run,
output=arguments.output,
encoding=arguments.encoding,
newline=arguments.newline)
# If the output is not sys.stdout, we need to close it because
# argparse.FileType does not do it for us.
is_sys = arguments.output in [sys.stdout, sys.stderr]
if not is_sys and isinstance(arguments.output, io.IOBase):
arguments.output.close()
return paths
def main():
"""Main function."""
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
try:
command_line(sys.argv)
finally:
logging.shutdown()
if __name__ == "__main__":
sys.exit(main())
|
elmotec/massedit
|
massedit.py
|
edit_files
|
python
|
def edit_files(patterns, expressions=None,
functions=None, executables=None,
start_dirs=None, max_depth=1, dry_run=True,
output=sys.stdout, encoding=None, newline=None):
if not is_list(patterns):
raise TypeError("patterns should be a list")
if expressions and not is_list(expressions):
raise TypeError("expressions should be a list of exec expressions")
if functions and not is_list(functions):
raise TypeError("functions should be a list of functions")
if executables and not is_list(executables):
raise TypeError("executables should be a list of program names")
editor = MassEdit(dry_run=dry_run, encoding=encoding, newline=newline)
if expressions:
editor.set_code_exprs(expressions)
if functions:
editor.set_functions(functions)
if executables:
editor.set_executables(executables)
processed_paths = []
for path in get_paths(patterns, start_dirs=start_dirs,
max_depth=max_depth):
try:
diffs = list(editor.edit_file(path))
if dry_run:
# At this point, encoding is the input encoding.
diff = "".join(diffs)
if not diff:
continue
# The encoding of the target output may not match the input
# encoding. If it's defined, we round trip the diff text
# to bytes and back to silence any conversion errors.
encoding = output.encoding
if encoding:
bytes_diff = diff.encode(encoding=encoding, errors='ignore')
diff = bytes_diff.decode(encoding=output.encoding)
output.write(diff)
except UnicodeDecodeError as err:
log.error("failed to process %s: %s", path, err)
continue
processed_paths.append(os.path.abspath(path))
return processed_paths
|
Process patterns with MassEdit.
Arguments:
patterns: file pattern to identify the files to be processed.
expressions: single python expression to be applied line by line.
functions: functions to process files contents.
executables: os executables to execute on the argument files.
Keyword arguments:
max_depth: maximum recursion level when looking for file matches.
start_dirs: workspace(ies) where to start the file search.
dry_run: only display differences if True. Save modified file otherwise.
output: handle where the output should be redirected.
Return:
list of files processed.
|
train
|
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L469-L530
|
[
"def is_list(arg):\n \"\"\"Factor determination if arg is a list.\n\n Small utility for a better diagnostic because str/unicode are also\n iterable.\n\n \"\"\"\n return iter(arg) and not isinstance(arg, unicode)\n",
"def get_paths(patterns, start_dirs=None, max_depth=1):\n \"\"\"Retrieve files that match any of the patterns.\"\"\"\n # Shortcut: if there is only one pattern, make sure we process just that.\n if len(patterns) == 1 and not start_dirs:\n pattern = patterns[0]\n directory = os.path.dirname(pattern)\n if directory:\n patterns = [os.path.basename(pattern)]\n start_dirs = directory\n max_depth = 1\n\n if not start_dirs or start_dirs == '.':\n start_dirs = os.getcwd()\n for start_dir in start_dirs.split(','):\n for root, dirs, files in os.walk(start_dir): # pylint: disable=W0612\n if max_depth is not None:\n relpath = os.path.relpath(root, start=start_dir)\n depth = len(relpath.split(os.sep))\n if depth > max_depth:\n continue\n names = []\n for pattern in patterns:\n names += fnmatch.filter(files, pattern)\n for name in names:\n path = os.path.join(root, name)\n yield path\n",
"def edit_file(self, file_name):\n \"\"\"Edit file in place, returns a list of modifications (unified diff).\n\n Arguments:\n file_name (str, unicode): The name of the file.\n\n \"\"\"\n with io.open(file_name, \"r\", encoding=self.encoding) as from_file:\n try:\n from_lines = from_file.readlines()\n except UnicodeDecodeError as err:\n log.error(\"encoding error (see --encoding): %s\", err)\n raise\n\n if self._executables:\n nb_execs = len(self._executables)\n if nb_execs > 1:\n log.warn(\"found %d executables. Will use first one\", nb_execs)\n exec_list = self._executables[0].split()\n exec_list.append(file_name)\n try:\n log.info(\"running %s...\", \" \".join(exec_list))\n output = subprocess.check_output(exec_list,\n universal_newlines=True)\n except Exception as err:\n log.error(\"failed to execute %s: %s\", \" \".join(exec_list), err)\n raise # Let the exception be handled at a higher level.\n to_lines = output.split(unicode(\"\\n\"))\n else:\n to_lines = from_lines\n\n # unified_diff wants structure of known length. Convert to a list.\n to_lines = list(self.edit_content(to_lines, file_name))\n diffs = difflib.unified_diff(from_lines, to_lines,\n fromfile=file_name, tofile='<new>')\n if not self.dry_run:\n bak_file_name = file_name + \".bak\"\n if os.path.exists(bak_file_name):\n msg = \"{} already exists\".format(bak_file_name)\n if sys.version_info < (3, 3):\n raise OSError(msg)\n else:\n # noinspection PyCompatibility\n # pylint: disable=undefined-variable\n raise FileExistsError(msg)\n try:\n os.rename(file_name, bak_file_name)\n with io.open(file_name, 'w', encoding=self.encoding, newline=self.newline) as new:\n new.writelines(to_lines)\n # Keeps mode of original file.\n shutil.copymode(bak_file_name, file_name)\n except Exception as err:\n log.error(\"failed to write output to %s: %s\", file_name, err)\n # Try to recover...\n try:\n os.rename(bak_file_name, file_name)\n except OSError as err:\n log.error(\"failed to restore %s from %s: %s\",\n file_name, bak_file_name, err)\n raise\n try:\n os.unlink(bak_file_name)\n except OSError as err:\n log.warning(\"failed to remove backup %s: %s\",\n bak_file_name, err)\n return list(diffs)\n",
"def set_executables(self, executables):\n \"\"\"Check and set the executables to be used.\"\"\"\n for exc in executables:\n self.append_executable(exc)\n"
] |
#!/usr/bin/env python
# encoding: utf-8
"""A python bulk editor class to apply the same code to many files."""
# Copyright (c) 2012-19 Jérôme Lecomte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import unicode_literals
import os
import shutil
import sys
import logging
import argparse
import difflib
import re # For convenience, pylint: disable=W0611
import fnmatch
import io
import subprocess
import textwrap
__version__ = '0.68.5' # UPDATE setup.py when changing version.
__author__ = 'Jérôme Lecomte'
__license__ = 'MIT'
log = logging.getLogger(__name__)
try:
unicode
except NameError:
unicode = str # pylint: disable=invalid-name, redefined-builtin
def is_list(arg):
"""Factor determination if arg is a list.
Small utility for a better diagnostic because str/unicode are also
iterable.
"""
return iter(arg) and not isinstance(arg, unicode)
def get_function(fn_name):
"""Retrieve the function defined by the function_name.
Arguments:
fn_name: specification of the type module:function_name.
"""
module_name, callable_name = fn_name.split(':')
current = globals()
if not callable_name:
callable_name = module_name
else:
import importlib
try:
module = importlib.import_module(module_name)
except ImportError:
log.error("failed to import %s", module_name)
raise
current = module
for level in callable_name.split('.'):
current = getattr(current, level)
code = current.__code__
if code.co_argcount != 2:
raise ValueError('function should take 2 arguments: lines, file_name')
return current
class MassEdit(object):
"""Mass edit lines of files."""
def __init__(self, **kwds):
"""Initialize MassEdit object.
Args:
- code (byte code object): code to execute on input file.
- function (str or callable): function to call on input file.
- module (str): module name where to find the function.
- executable (str): executable file name to execute on input file.
- dry_run (bool): skip actual modification of input file if True.
"""
self.code_objs = dict()
self._codes = []
self._functions = []
self._executables = []
self.dry_run = None
self.encoding = 'utf-8'
self.newline = None
if 'module' in kwds:
self.import_module(kwds['module'])
if 'code' in kwds:
self.append_code_expr(kwds['code'])
if 'function' in kwds:
self.append_function(kwds['function'])
if 'executable' in kwds:
self.append_executable(kwds['executable'])
if 'dry_run' in kwds:
self.dry_run = kwds['dry_run']
if 'encoding' in kwds:
self.encoding = kwds['encoding']
if 'newline' in kwds:
self.newline = kwds['newline']
@staticmethod
def import_module(module): # pylint: disable=R0201
"""Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
"""
if isinstance(module, list):
all_modules = module
else:
all_modules = [module]
for mod in all_modules:
globals()[mod] = __import__(mod.strip())
@staticmethod
def __edit_line(line, code, code_obj): # pylint: disable=R0201
"""Edit a line with one code object built in the ctor."""
try:
# pylint: disable=eval-used
result = eval(code_obj, globals(), locals())
except TypeError as ex:
log.error("failed to execute %s: %s", code, ex)
raise
if result is None:
log.error("cannot process line '%s' with %s", line, code)
raise RuntimeError('failed to process line')
elif isinstance(result, list) or isinstance(result, tuple):
line = unicode(' '.join([unicode(res_element)
for res_element in result]))
else:
line = unicode(result)
return line
def edit_line(self, line):
"""Edit a single line using the code expression."""
for code, code_obj in self.code_objs.items():
line = self.__edit_line(line, code, code_obj)
return line
def edit_content(self, original_lines, file_name):
"""Processes a file contents.
First processes the contents line by line applying the registered
expressions, then process the resulting contents using the
registered functions.
Arguments:
original_lines (list of str): file content.
file_name (str): name of the file.
"""
lines = [self.edit_line(line) for line in original_lines]
for function in self._functions:
try:
lines = list(function(lines, file_name))
except UnicodeDecodeError as err:
log.error('failed to process %s: %s', file_name, err)
return lines
except Exception as err:
log.error("failed to process %s with code %s: %s",
file_name, function, err)
raise # Let the exception be handled at a higher level.
return lines
def edit_file(self, file_name):
"""Edit file in place, returns a list of modifications (unified diff).
Arguments:
file_name (str, unicode): The name of the file.
"""
with io.open(file_name, "r", encoding=self.encoding) as from_file:
try:
from_lines = from_file.readlines()
except UnicodeDecodeError as err:
log.error("encoding error (see --encoding): %s", err)
raise
if self._executables:
nb_execs = len(self._executables)
if nb_execs > 1:
log.warn("found %d executables. Will use first one", nb_execs)
exec_list = self._executables[0].split()
exec_list.append(file_name)
try:
log.info("running %s...", " ".join(exec_list))
output = subprocess.check_output(exec_list,
universal_newlines=True)
except Exception as err:
log.error("failed to execute %s: %s", " ".join(exec_list), err)
raise # Let the exception be handled at a higher level.
to_lines = output.split(unicode("\n"))
else:
to_lines = from_lines
# unified_diff wants structure of known length. Convert to a list.
to_lines = list(self.edit_content(to_lines, file_name))
diffs = difflib.unified_diff(from_lines, to_lines,
fromfile=file_name, tofile='<new>')
if not self.dry_run:
bak_file_name = file_name + ".bak"
if os.path.exists(bak_file_name):
msg = "{} already exists".format(bak_file_name)
if sys.version_info < (3, 3):
raise OSError(msg)
else:
# noinspection PyCompatibility
# pylint: disable=undefined-variable
raise FileExistsError(msg)
try:
os.rename(file_name, bak_file_name)
with io.open(file_name, 'w', encoding=self.encoding, newline=self.newline) as new:
new.writelines(to_lines)
# Keeps mode of original file.
shutil.copymode(bak_file_name, file_name)
except Exception as err:
log.error("failed to write output to %s: %s", file_name, err)
# Try to recover...
try:
os.rename(bak_file_name, file_name)
except OSError as err:
log.error("failed to restore %s from %s: %s",
file_name, bak_file_name, err)
raise
try:
os.unlink(bak_file_name)
except OSError as err:
log.warning("failed to remove backup %s: %s",
bak_file_name, err)
return list(diffs)
def append_code_expr(self, code):
"""Compile argument and adds it to the list of code objects."""
# expects a string.
if isinstance(code, str) and not isinstance(code, unicode):
code = unicode(code)
if not isinstance(code, unicode):
raise TypeError("string expected")
log.debug("compiling code %s...", code)
try:
code_obj = compile(code, '<string>', 'eval')
self.code_objs[code] = code_obj
except SyntaxError as syntax_err:
log.error("cannot compile %s: %s", code, syntax_err)
raise
log.debug("compiled code %s", code)
def append_function(self, function):
"""Append the function to the list of functions to be called.
If the function is already a callable, use it. If it's a type str
try to interpret it as [module]:?<callable>, load the module
if there is one and retrieve the callable.
Argument:
function (str or callable): function to call on input.
"""
if not hasattr(function, '__call__'):
function = get_function(function)
if not hasattr(function, '__call__'):
raise ValueError("function is expected to be callable")
self._functions.append(function)
log.debug("registered %s", function.__name__)
def append_executable(self, executable):
"""Append san executable os command to the list to be called.
Argument:
executable (str): os callable executable.
"""
if isinstance(executable, str) and not isinstance(executable, unicode):
executable = unicode(executable)
if not isinstance(executable, unicode):
raise TypeError("expected executable name as str, not {}".
format(executable.__class__.__name__))
self._executables.append(executable)
def set_code_exprs(self, codes):
"""Convenience: sets all the code expressions at once."""
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code)
def set_functions(self, functions):
"""Check functions passed as argument and set them to be used."""
for func in functions:
try:
self.append_function(func)
except (ValueError, AttributeError) as ex:
log.error("'%s' is not a callable function: %s", func, ex)
raise
def set_executables(self, executables):
"""Check and set the executables to be used."""
for exc in executables:
self.append_executable(exc)
def parse_command_line(argv):
"""Parse command line argument. See -h option.
Arguments:
argv: arguments on the command line must include caller file name.
"""
import textwrap
example = textwrap.dedent("""
Examples:
# Simple string substitution (-e). Will show a diff. No changes applied.
{0} -e "re.sub('failIf', 'assertFalse', line)" *.py
# File level modifications (-f). Overwrites the files in place (-w).
{0} -w -f fixer:fixit *.py
# Will change all test*.py in subdirectories of tests.
{0} -e "re.sub('failIf', 'assertFalse', line)" -s tests test*.py
""").format(os.path.basename(argv[0]))
formatter_class = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description="Python mass editor",
epilog=example,
formatter_class=formatter_class)
parser.add_argument("-V", "--version", action="version",
version="%(prog)s {}".format(__version__))
parser.add_argument("-w", "--write", dest="dry_run",
action="store_false", default=True,
help="modify target file(s) in place. "
"Shows diff otherwise.")
parser.add_argument("-v", "--verbose", dest="verbose_count",
action="count", default=0,
help="increases log verbosity (can be specified "
"multiple times)")
parser.add_argument("-e", "--expression", dest="expressions", nargs=1,
help="Python expressions applied to target files. "
"Use the line variable to reference the current line.")
parser.add_argument("-f", "--function", dest="functions", nargs=1,
help="Python function to apply to target file. "
"Takes file content as input and yield lines. "
"Specify function as [module]:?<function name>.")
parser.add_argument("-x", "--executable", dest="executables", nargs=1,
help="Python executable to apply to target file.")
parser.add_argument("-s", "--start", dest="start_dirs",
help="Directory(ies) from which to look for targets.")
parser.add_argument("-m", "--max-depth-level", type=int, dest="max_depth",
help="Maximum depth when walking subdirectories.")
parser.add_argument("-o", "--output", metavar="FILE",
type=argparse.FileType("w"), default=sys.stdout,
help="redirect output to a file")
parser.add_argument("-g", "--generate", metavar="FILE", type=str,
help="generate input file suitable for -f option")
parser.add_argument("--encoding", dest="encoding",
help="Encoding of input and output files")
parser.add_argument("--newline", dest="newline",
help="Newline character for output files")
parser.add_argument("patterns", metavar="pattern",
nargs="*", # argparse.REMAINDER,
help="shell-like file name patterns to process.")
arguments = parser.parse_args(argv[1:])
if not (arguments.expressions or
arguments.functions or
arguments.generate or
arguments.executables):
parser.error(
'--expression, --function, --generate or --executable missing')
# Sets log level to WARN going more verbose for each new -V.
log.setLevel(max(3 - arguments.verbose_count, 0) * 10)
return arguments
def get_paths(patterns, start_dirs=None, max_depth=1):
"""Retrieve files that match any of the patterns."""
# Shortcut: if there is only one pattern, make sure we process just that.
if len(patterns) == 1 and not start_dirs:
pattern = patterns[0]
directory = os.path.dirname(pattern)
if directory:
patterns = [os.path.basename(pattern)]
start_dirs = directory
max_depth = 1
if not start_dirs or start_dirs == '.':
start_dirs = os.getcwd()
for start_dir in start_dirs.split(','):
for root, dirs, files in os.walk(start_dir): # pylint: disable=W0612
if max_depth is not None:
relpath = os.path.relpath(root, start=start_dir)
depth = len(relpath.split(os.sep))
if depth > max_depth:
continue
names = []
for pattern in patterns:
names += fnmatch.filter(files, pattern)
for name in names:
path = os.path.join(root, name)
yield path
fixer_template = """\
#!/usr/bin/env python
def fixit(lines, file_name):
'''Edit files passed to massedit
:param list(str) lines: list of lines contained in the input file
:param str file_name: name of the file the lines were read from
:return: modified lines
:rtype: list(str)
Please modify the logic below (it does not change anything right now)
and apply your logic to the in your directory like this:
massedit -f <file name>:fixit files_to_modify\*
See massedit -h for help and other options.
'''
changed_lines = []
for lineno, line in enumerate(lines):
changed_lines.append(line)
return changed_lines
"""
def generate_fixer_file(output):
"""Generate a template fixer file to be used with --function option."""
with open(output, "w+") as fh:
fh.write(fixer_template)
return
# pylint: disable=too-many-arguments, too-many-locals
def command_line(argv):
"""Instantiate an editor and process arguments.
Optional argument:
- processed_paths: paths processed are appended to the list.
"""
arguments = parse_command_line(argv)
if arguments.generate:
generate_fixer_file(arguments.generate)
paths = edit_files(arguments.patterns,
expressions=arguments.expressions,
functions=arguments.functions,
executables=arguments.executables,
start_dirs=arguments.start_dirs,
max_depth=arguments.max_depth,
dry_run=arguments.dry_run,
output=arguments.output,
encoding=arguments.encoding,
newline=arguments.newline)
# If the output is not sys.stdout, we need to close it because
# argparse.FileType does not do it for us.
is_sys = arguments.output in [sys.stdout, sys.stderr]
if not is_sys and isinstance(arguments.output, io.IOBase):
arguments.output.close()
return paths
def main():
"""Main function."""
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
try:
command_line(sys.argv)
finally:
logging.shutdown()
if __name__ == "__main__":
sys.exit(main())
|
elmotec/massedit
|
massedit.py
|
command_line
|
python
|
def command_line(argv):
arguments = parse_command_line(argv)
if arguments.generate:
generate_fixer_file(arguments.generate)
paths = edit_files(arguments.patterns,
expressions=arguments.expressions,
functions=arguments.functions,
executables=arguments.executables,
start_dirs=arguments.start_dirs,
max_depth=arguments.max_depth,
dry_run=arguments.dry_run,
output=arguments.output,
encoding=arguments.encoding,
newline=arguments.newline)
# If the output is not sys.stdout, we need to close it because
# argparse.FileType does not do it for us.
is_sys = arguments.output in [sys.stdout, sys.stderr]
if not is_sys and isinstance(arguments.output, io.IOBase):
arguments.output.close()
return paths
|
Instantiate an editor and process arguments.
Optional argument:
- processed_paths: paths processed are appended to the list.
|
train
|
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L533-L558
|
[
"def parse_command_line(argv):\n \"\"\"Parse command line argument. See -h option.\n\n Arguments:\n argv: arguments on the command line must include caller file name.\n\n \"\"\"\n import textwrap\n\n example = textwrap.dedent(\"\"\"\n Examples:\n # Simple string substitution (-e). Will show a diff. No changes applied.\n {0} -e \"re.sub('failIf', 'assertFalse', line)\" *.py\n\n # File level modifications (-f). Overwrites the files in place (-w).\n {0} -w -f fixer:fixit *.py\n\n # Will change all test*.py in subdirectories of tests.\n {0} -e \"re.sub('failIf', 'assertFalse', line)\" -s tests test*.py\n \"\"\").format(os.path.basename(argv[0]))\n formatter_class = argparse.RawDescriptionHelpFormatter\n parser = argparse.ArgumentParser(description=\"Python mass editor\",\n epilog=example,\n formatter_class=formatter_class)\n parser.add_argument(\"-V\", \"--version\", action=\"version\",\n version=\"%(prog)s {}\".format(__version__))\n parser.add_argument(\"-w\", \"--write\", dest=\"dry_run\",\n action=\"store_false\", default=True,\n help=\"modify target file(s) in place. \"\n \"Shows diff otherwise.\")\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose_count\",\n action=\"count\", default=0,\n help=\"increases log verbosity (can be specified \"\n \"multiple times)\")\n parser.add_argument(\"-e\", \"--expression\", dest=\"expressions\", nargs=1,\n help=\"Python expressions applied to target files. \"\n \"Use the line variable to reference the current line.\")\n parser.add_argument(\"-f\", \"--function\", dest=\"functions\", nargs=1,\n help=\"Python function to apply to target file. \"\n \"Takes file content as input and yield lines. \"\n \"Specify function as [module]:?<function name>.\")\n parser.add_argument(\"-x\", \"--executable\", dest=\"executables\", nargs=1,\n help=\"Python executable to apply to target file.\")\n parser.add_argument(\"-s\", \"--start\", dest=\"start_dirs\",\n help=\"Directory(ies) from which to look for targets.\")\n parser.add_argument(\"-m\", \"--max-depth-level\", type=int, dest=\"max_depth\",\n help=\"Maximum depth when walking subdirectories.\")\n parser.add_argument(\"-o\", \"--output\", metavar=\"FILE\",\n type=argparse.FileType(\"w\"), default=sys.stdout,\n help=\"redirect output to a file\")\n parser.add_argument(\"-g\", \"--generate\", metavar=\"FILE\", type=str,\n help=\"generate input file suitable for -f option\")\n parser.add_argument(\"--encoding\", dest=\"encoding\",\n help=\"Encoding of input and output files\")\n parser.add_argument(\"--newline\", dest=\"newline\",\n help=\"Newline character for output files\")\n parser.add_argument(\"patterns\", metavar=\"pattern\",\n nargs=\"*\", # argparse.REMAINDER,\n help=\"shell-like file name patterns to process.\")\n arguments = parser.parse_args(argv[1:])\n\n if not (arguments.expressions or\n arguments.functions or\n arguments.generate or\n arguments.executables):\n parser.error(\n '--expression, --function, --generate or --executable missing')\n\n # Sets log level to WARN going more verbose for each new -V.\n log.setLevel(max(3 - arguments.verbose_count, 0) * 10)\n return arguments\n",
"def generate_fixer_file(output):\n \"\"\"Generate a template fixer file to be used with --function option.\"\"\"\n with open(output, \"w+\") as fh:\n fh.write(fixer_template)\n return\n",
"def edit_files(patterns, expressions=None,\n functions=None, executables=None,\n start_dirs=None, max_depth=1, dry_run=True,\n output=sys.stdout, encoding=None, newline=None):\n \"\"\"Process patterns with MassEdit.\n\n Arguments:\n patterns: file pattern to identify the files to be processed.\n expressions: single python expression to be applied line by line.\n functions: functions to process files contents.\n executables: os executables to execute on the argument files.\n\n Keyword arguments:\n max_depth: maximum recursion level when looking for file matches.\n start_dirs: workspace(ies) where to start the file search.\n dry_run: only display differences if True. Save modified file otherwise.\n output: handle where the output should be redirected.\n\n Return:\n list of files processed.\n\n \"\"\"\n if not is_list(patterns):\n raise TypeError(\"patterns should be a list\")\n if expressions and not is_list(expressions):\n raise TypeError(\"expressions should be a list of exec expressions\")\n if functions and not is_list(functions):\n raise TypeError(\"functions should be a list of functions\")\n if executables and not is_list(executables):\n raise TypeError(\"executables should be a list of program names\")\n\n editor = MassEdit(dry_run=dry_run, encoding=encoding, newline=newline)\n if expressions:\n editor.set_code_exprs(expressions)\n if functions:\n editor.set_functions(functions)\n if executables:\n editor.set_executables(executables)\n\n processed_paths = []\n for path in get_paths(patterns, start_dirs=start_dirs,\n max_depth=max_depth):\n try:\n diffs = list(editor.edit_file(path))\n if dry_run:\n # At this point, encoding is the input encoding.\n diff = \"\".join(diffs)\n if not diff:\n continue\n # The encoding of the target output may not match the input\n # encoding. If it's defined, we round trip the diff text\n # to bytes and back to silence any conversion errors.\n encoding = output.encoding\n if encoding:\n bytes_diff = diff.encode(encoding=encoding, errors='ignore')\n diff = bytes_diff.decode(encoding=output.encoding)\n output.write(diff)\n except UnicodeDecodeError as err:\n log.error(\"failed to process %s: %s\", path, err)\n continue\n processed_paths.append(os.path.abspath(path))\n return processed_paths\n"
] |
#!/usr/bin/env python
# encoding: utf-8
"""A python bulk editor class to apply the same code to many files."""
# Copyright (c) 2012-19 Jérôme Lecomte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import unicode_literals
import os
import shutil
import sys
import logging
import argparse
import difflib
import re # For convenience, pylint: disable=W0611
import fnmatch
import io
import subprocess
import textwrap
__version__ = '0.68.5' # UPDATE setup.py when changing version.
__author__ = 'Jérôme Lecomte'
__license__ = 'MIT'
log = logging.getLogger(__name__)
try:
unicode
except NameError:
unicode = str # pylint: disable=invalid-name, redefined-builtin
def is_list(arg):
"""Factor determination if arg is a list.
Small utility for a better diagnostic because str/unicode are also
iterable.
"""
return iter(arg) and not isinstance(arg, unicode)
def get_function(fn_name):
"""Retrieve the function defined by the function_name.
Arguments:
fn_name: specification of the type module:function_name.
"""
module_name, callable_name = fn_name.split(':')
current = globals()
if not callable_name:
callable_name = module_name
else:
import importlib
try:
module = importlib.import_module(module_name)
except ImportError:
log.error("failed to import %s", module_name)
raise
current = module
for level in callable_name.split('.'):
current = getattr(current, level)
code = current.__code__
if code.co_argcount != 2:
raise ValueError('function should take 2 arguments: lines, file_name')
return current
class MassEdit(object):
"""Mass edit lines of files."""
def __init__(self, **kwds):
"""Initialize MassEdit object.
Args:
- code (byte code object): code to execute on input file.
- function (str or callable): function to call on input file.
- module (str): module name where to find the function.
- executable (str): executable file name to execute on input file.
- dry_run (bool): skip actual modification of input file if True.
"""
self.code_objs = dict()
self._codes = []
self._functions = []
self._executables = []
self.dry_run = None
self.encoding = 'utf-8'
self.newline = None
if 'module' in kwds:
self.import_module(kwds['module'])
if 'code' in kwds:
self.append_code_expr(kwds['code'])
if 'function' in kwds:
self.append_function(kwds['function'])
if 'executable' in kwds:
self.append_executable(kwds['executable'])
if 'dry_run' in kwds:
self.dry_run = kwds['dry_run']
if 'encoding' in kwds:
self.encoding = kwds['encoding']
if 'newline' in kwds:
self.newline = kwds['newline']
@staticmethod
def import_module(module): # pylint: disable=R0201
"""Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
"""
if isinstance(module, list):
all_modules = module
else:
all_modules = [module]
for mod in all_modules:
globals()[mod] = __import__(mod.strip())
@staticmethod
def __edit_line(line, code, code_obj): # pylint: disable=R0201
"""Edit a line with one code object built in the ctor."""
try:
# pylint: disable=eval-used
result = eval(code_obj, globals(), locals())
except TypeError as ex:
log.error("failed to execute %s: %s", code, ex)
raise
if result is None:
log.error("cannot process line '%s' with %s", line, code)
raise RuntimeError('failed to process line')
elif isinstance(result, list) or isinstance(result, tuple):
line = unicode(' '.join([unicode(res_element)
for res_element in result]))
else:
line = unicode(result)
return line
def edit_line(self, line):
"""Edit a single line using the code expression."""
for code, code_obj in self.code_objs.items():
line = self.__edit_line(line, code, code_obj)
return line
def edit_content(self, original_lines, file_name):
"""Processes a file contents.
First processes the contents line by line applying the registered
expressions, then process the resulting contents using the
registered functions.
Arguments:
original_lines (list of str): file content.
file_name (str): name of the file.
"""
lines = [self.edit_line(line) for line in original_lines]
for function in self._functions:
try:
lines = list(function(lines, file_name))
except UnicodeDecodeError as err:
log.error('failed to process %s: %s', file_name, err)
return lines
except Exception as err:
log.error("failed to process %s with code %s: %s",
file_name, function, err)
raise # Let the exception be handled at a higher level.
return lines
def edit_file(self, file_name):
"""Edit file in place, returns a list of modifications (unified diff).
Arguments:
file_name (str, unicode): The name of the file.
"""
with io.open(file_name, "r", encoding=self.encoding) as from_file:
try:
from_lines = from_file.readlines()
except UnicodeDecodeError as err:
log.error("encoding error (see --encoding): %s", err)
raise
if self._executables:
nb_execs = len(self._executables)
if nb_execs > 1:
log.warn("found %d executables. Will use first one", nb_execs)
exec_list = self._executables[0].split()
exec_list.append(file_name)
try:
log.info("running %s...", " ".join(exec_list))
output = subprocess.check_output(exec_list,
universal_newlines=True)
except Exception as err:
log.error("failed to execute %s: %s", " ".join(exec_list), err)
raise # Let the exception be handled at a higher level.
to_lines = output.split(unicode("\n"))
else:
to_lines = from_lines
# unified_diff wants structure of known length. Convert to a list.
to_lines = list(self.edit_content(to_lines, file_name))
diffs = difflib.unified_diff(from_lines, to_lines,
fromfile=file_name, tofile='<new>')
if not self.dry_run:
bak_file_name = file_name + ".bak"
if os.path.exists(bak_file_name):
msg = "{} already exists".format(bak_file_name)
if sys.version_info < (3, 3):
raise OSError(msg)
else:
# noinspection PyCompatibility
# pylint: disable=undefined-variable
raise FileExistsError(msg)
try:
os.rename(file_name, bak_file_name)
with io.open(file_name, 'w', encoding=self.encoding, newline=self.newline) as new:
new.writelines(to_lines)
# Keeps mode of original file.
shutil.copymode(bak_file_name, file_name)
except Exception as err:
log.error("failed to write output to %s: %s", file_name, err)
# Try to recover...
try:
os.rename(bak_file_name, file_name)
except OSError as err:
log.error("failed to restore %s from %s: %s",
file_name, bak_file_name, err)
raise
try:
os.unlink(bak_file_name)
except OSError as err:
log.warning("failed to remove backup %s: %s",
bak_file_name, err)
return list(diffs)
def append_code_expr(self, code):
"""Compile argument and adds it to the list of code objects."""
# expects a string.
if isinstance(code, str) and not isinstance(code, unicode):
code = unicode(code)
if not isinstance(code, unicode):
raise TypeError("string expected")
log.debug("compiling code %s...", code)
try:
code_obj = compile(code, '<string>', 'eval')
self.code_objs[code] = code_obj
except SyntaxError as syntax_err:
log.error("cannot compile %s: %s", code, syntax_err)
raise
log.debug("compiled code %s", code)
def append_function(self, function):
"""Append the function to the list of functions to be called.
If the function is already a callable, use it. If it's a type str
try to interpret it as [module]:?<callable>, load the module
if there is one and retrieve the callable.
Argument:
function (str or callable): function to call on input.
"""
if not hasattr(function, '__call__'):
function = get_function(function)
if not hasattr(function, '__call__'):
raise ValueError("function is expected to be callable")
self._functions.append(function)
log.debug("registered %s", function.__name__)
def append_executable(self, executable):
"""Append san executable os command to the list to be called.
Argument:
executable (str): os callable executable.
"""
if isinstance(executable, str) and not isinstance(executable, unicode):
executable = unicode(executable)
if not isinstance(executable, unicode):
raise TypeError("expected executable name as str, not {}".
format(executable.__class__.__name__))
self._executables.append(executable)
def set_code_exprs(self, codes):
"""Convenience: sets all the code expressions at once."""
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code)
def set_functions(self, functions):
"""Check functions passed as argument and set them to be used."""
for func in functions:
try:
self.append_function(func)
except (ValueError, AttributeError) as ex:
log.error("'%s' is not a callable function: %s", func, ex)
raise
def set_executables(self, executables):
"""Check and set the executables to be used."""
for exc in executables:
self.append_executable(exc)
def parse_command_line(argv):
"""Parse command line argument. See -h option.
Arguments:
argv: arguments on the command line must include caller file name.
"""
import textwrap
example = textwrap.dedent("""
Examples:
# Simple string substitution (-e). Will show a diff. No changes applied.
{0} -e "re.sub('failIf', 'assertFalse', line)" *.py
# File level modifications (-f). Overwrites the files in place (-w).
{0} -w -f fixer:fixit *.py
# Will change all test*.py in subdirectories of tests.
{0} -e "re.sub('failIf', 'assertFalse', line)" -s tests test*.py
""").format(os.path.basename(argv[0]))
formatter_class = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description="Python mass editor",
epilog=example,
formatter_class=formatter_class)
parser.add_argument("-V", "--version", action="version",
version="%(prog)s {}".format(__version__))
parser.add_argument("-w", "--write", dest="dry_run",
action="store_false", default=True,
help="modify target file(s) in place. "
"Shows diff otherwise.")
parser.add_argument("-v", "--verbose", dest="verbose_count",
action="count", default=0,
help="increases log verbosity (can be specified "
"multiple times)")
parser.add_argument("-e", "--expression", dest="expressions", nargs=1,
help="Python expressions applied to target files. "
"Use the line variable to reference the current line.")
parser.add_argument("-f", "--function", dest="functions", nargs=1,
help="Python function to apply to target file. "
"Takes file content as input and yield lines. "
"Specify function as [module]:?<function name>.")
parser.add_argument("-x", "--executable", dest="executables", nargs=1,
help="Python executable to apply to target file.")
parser.add_argument("-s", "--start", dest="start_dirs",
help="Directory(ies) from which to look for targets.")
parser.add_argument("-m", "--max-depth-level", type=int, dest="max_depth",
help="Maximum depth when walking subdirectories.")
parser.add_argument("-o", "--output", metavar="FILE",
type=argparse.FileType("w"), default=sys.stdout,
help="redirect output to a file")
parser.add_argument("-g", "--generate", metavar="FILE", type=str,
help="generate input file suitable for -f option")
parser.add_argument("--encoding", dest="encoding",
help="Encoding of input and output files")
parser.add_argument("--newline", dest="newline",
help="Newline character for output files")
parser.add_argument("patterns", metavar="pattern",
nargs="*", # argparse.REMAINDER,
help="shell-like file name patterns to process.")
arguments = parser.parse_args(argv[1:])
if not (arguments.expressions or
arguments.functions or
arguments.generate or
arguments.executables):
parser.error(
'--expression, --function, --generate or --executable missing')
# Sets log level to WARN going more verbose for each new -V.
log.setLevel(max(3 - arguments.verbose_count, 0) * 10)
return arguments
def get_paths(patterns, start_dirs=None, max_depth=1):
"""Retrieve files that match any of the patterns."""
# Shortcut: if there is only one pattern, make sure we process just that.
if len(patterns) == 1 and not start_dirs:
pattern = patterns[0]
directory = os.path.dirname(pattern)
if directory:
patterns = [os.path.basename(pattern)]
start_dirs = directory
max_depth = 1
if not start_dirs or start_dirs == '.':
start_dirs = os.getcwd()
for start_dir in start_dirs.split(','):
for root, dirs, files in os.walk(start_dir): # pylint: disable=W0612
if max_depth is not None:
relpath = os.path.relpath(root, start=start_dir)
depth = len(relpath.split(os.sep))
if depth > max_depth:
continue
names = []
for pattern in patterns:
names += fnmatch.filter(files, pattern)
for name in names:
path = os.path.join(root, name)
yield path
fixer_template = """\
#!/usr/bin/env python
def fixit(lines, file_name):
'''Edit files passed to massedit
:param list(str) lines: list of lines contained in the input file
:param str file_name: name of the file the lines were read from
:return: modified lines
:rtype: list(str)
Please modify the logic below (it does not change anything right now)
and apply your logic to the in your directory like this:
massedit -f <file name>:fixit files_to_modify\*
See massedit -h for help and other options.
'''
changed_lines = []
for lineno, line in enumerate(lines):
changed_lines.append(line)
return changed_lines
"""
def generate_fixer_file(output):
"""Generate a template fixer file to be used with --function option."""
with open(output, "w+") as fh:
fh.write(fixer_template)
return
# pylint: disable=too-many-arguments, too-many-locals
def edit_files(patterns, expressions=None,
functions=None, executables=None,
start_dirs=None, max_depth=1, dry_run=True,
output=sys.stdout, encoding=None, newline=None):
"""Process patterns with MassEdit.
Arguments:
patterns: file pattern to identify the files to be processed.
expressions: single python expression to be applied line by line.
functions: functions to process files contents.
executables: os executables to execute on the argument files.
Keyword arguments:
max_depth: maximum recursion level when looking for file matches.
start_dirs: workspace(ies) where to start the file search.
dry_run: only display differences if True. Save modified file otherwise.
output: handle where the output should be redirected.
Return:
list of files processed.
"""
if not is_list(patterns):
raise TypeError("patterns should be a list")
if expressions and not is_list(expressions):
raise TypeError("expressions should be a list of exec expressions")
if functions and not is_list(functions):
raise TypeError("functions should be a list of functions")
if executables and not is_list(executables):
raise TypeError("executables should be a list of program names")
editor = MassEdit(dry_run=dry_run, encoding=encoding, newline=newline)
if expressions:
editor.set_code_exprs(expressions)
if functions:
editor.set_functions(functions)
if executables:
editor.set_executables(executables)
processed_paths = []
for path in get_paths(patterns, start_dirs=start_dirs,
max_depth=max_depth):
try:
diffs = list(editor.edit_file(path))
if dry_run:
# At this point, encoding is the input encoding.
diff = "".join(diffs)
if not diff:
continue
# The encoding of the target output may not match the input
# encoding. If it's defined, we round trip the diff text
# to bytes and back to silence any conversion errors.
encoding = output.encoding
if encoding:
bytes_diff = diff.encode(encoding=encoding, errors='ignore')
diff = bytes_diff.decode(encoding=output.encoding)
output.write(diff)
except UnicodeDecodeError as err:
log.error("failed to process %s: %s", path, err)
continue
processed_paths.append(os.path.abspath(path))
return processed_paths
def main():
"""Main function."""
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
try:
command_line(sys.argv)
finally:
logging.shutdown()
if __name__ == "__main__":
sys.exit(main())
|
elmotec/massedit
|
massedit.py
|
main
|
python
|
def main():
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
try:
command_line(sys.argv)
finally:
logging.shutdown()
|
Main function.
|
train
|
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L561-L567
|
[
"def command_line(argv):\n \"\"\"Instantiate an editor and process arguments.\n\n Optional argument:\n - processed_paths: paths processed are appended to the list.\n\n \"\"\"\n arguments = parse_command_line(argv)\n if arguments.generate:\n generate_fixer_file(arguments.generate)\n paths = edit_files(arguments.patterns,\n expressions=arguments.expressions,\n functions=arguments.functions,\n executables=arguments.executables,\n start_dirs=arguments.start_dirs,\n max_depth=arguments.max_depth,\n dry_run=arguments.dry_run,\n output=arguments.output,\n encoding=arguments.encoding,\n newline=arguments.newline)\n # If the output is not sys.stdout, we need to close it because\n # argparse.FileType does not do it for us.\n is_sys = arguments.output in [sys.stdout, sys.stderr]\n if not is_sys and isinstance(arguments.output, io.IOBase):\n arguments.output.close()\n return paths\n"
] |
#!/usr/bin/env python
# encoding: utf-8
"""A python bulk editor class to apply the same code to many files."""
# Copyright (c) 2012-19 Jérôme Lecomte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import unicode_literals
import os
import shutil
import sys
import logging
import argparse
import difflib
import re # For convenience, pylint: disable=W0611
import fnmatch
import io
import subprocess
import textwrap
__version__ = '0.68.5' # UPDATE setup.py when changing version.
__author__ = 'Jérôme Lecomte'
__license__ = 'MIT'
log = logging.getLogger(__name__)
try:
unicode
except NameError:
unicode = str # pylint: disable=invalid-name, redefined-builtin
def is_list(arg):
"""Factor determination if arg is a list.
Small utility for a better diagnostic because str/unicode are also
iterable.
"""
return iter(arg) and not isinstance(arg, unicode)
def get_function(fn_name):
"""Retrieve the function defined by the function_name.
Arguments:
fn_name: specification of the type module:function_name.
"""
module_name, callable_name = fn_name.split(':')
current = globals()
if not callable_name:
callable_name = module_name
else:
import importlib
try:
module = importlib.import_module(module_name)
except ImportError:
log.error("failed to import %s", module_name)
raise
current = module
for level in callable_name.split('.'):
current = getattr(current, level)
code = current.__code__
if code.co_argcount != 2:
raise ValueError('function should take 2 arguments: lines, file_name')
return current
class MassEdit(object):
"""Mass edit lines of files."""
def __init__(self, **kwds):
"""Initialize MassEdit object.
Args:
- code (byte code object): code to execute on input file.
- function (str or callable): function to call on input file.
- module (str): module name where to find the function.
- executable (str): executable file name to execute on input file.
- dry_run (bool): skip actual modification of input file if True.
"""
self.code_objs = dict()
self._codes = []
self._functions = []
self._executables = []
self.dry_run = None
self.encoding = 'utf-8'
self.newline = None
if 'module' in kwds:
self.import_module(kwds['module'])
if 'code' in kwds:
self.append_code_expr(kwds['code'])
if 'function' in kwds:
self.append_function(kwds['function'])
if 'executable' in kwds:
self.append_executable(kwds['executable'])
if 'dry_run' in kwds:
self.dry_run = kwds['dry_run']
if 'encoding' in kwds:
self.encoding = kwds['encoding']
if 'newline' in kwds:
self.newline = kwds['newline']
@staticmethod
def import_module(module): # pylint: disable=R0201
"""Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
"""
if isinstance(module, list):
all_modules = module
else:
all_modules = [module]
for mod in all_modules:
globals()[mod] = __import__(mod.strip())
@staticmethod
def __edit_line(line, code, code_obj): # pylint: disable=R0201
"""Edit a line with one code object built in the ctor."""
try:
# pylint: disable=eval-used
result = eval(code_obj, globals(), locals())
except TypeError as ex:
log.error("failed to execute %s: %s", code, ex)
raise
if result is None:
log.error("cannot process line '%s' with %s", line, code)
raise RuntimeError('failed to process line')
elif isinstance(result, list) or isinstance(result, tuple):
line = unicode(' '.join([unicode(res_element)
for res_element in result]))
else:
line = unicode(result)
return line
def edit_line(self, line):
"""Edit a single line using the code expression."""
for code, code_obj in self.code_objs.items():
line = self.__edit_line(line, code, code_obj)
return line
def edit_content(self, original_lines, file_name):
"""Processes a file contents.
First processes the contents line by line applying the registered
expressions, then process the resulting contents using the
registered functions.
Arguments:
original_lines (list of str): file content.
file_name (str): name of the file.
"""
lines = [self.edit_line(line) for line in original_lines]
for function in self._functions:
try:
lines = list(function(lines, file_name))
except UnicodeDecodeError as err:
log.error('failed to process %s: %s', file_name, err)
return lines
except Exception as err:
log.error("failed to process %s with code %s: %s",
file_name, function, err)
raise # Let the exception be handled at a higher level.
return lines
def edit_file(self, file_name):
"""Edit file in place, returns a list of modifications (unified diff).
Arguments:
file_name (str, unicode): The name of the file.
"""
with io.open(file_name, "r", encoding=self.encoding) as from_file:
try:
from_lines = from_file.readlines()
except UnicodeDecodeError as err:
log.error("encoding error (see --encoding): %s", err)
raise
if self._executables:
nb_execs = len(self._executables)
if nb_execs > 1:
log.warn("found %d executables. Will use first one", nb_execs)
exec_list = self._executables[0].split()
exec_list.append(file_name)
try:
log.info("running %s...", " ".join(exec_list))
output = subprocess.check_output(exec_list,
universal_newlines=True)
except Exception as err:
log.error("failed to execute %s: %s", " ".join(exec_list), err)
raise # Let the exception be handled at a higher level.
to_lines = output.split(unicode("\n"))
else:
to_lines = from_lines
# unified_diff wants structure of known length. Convert to a list.
to_lines = list(self.edit_content(to_lines, file_name))
diffs = difflib.unified_diff(from_lines, to_lines,
fromfile=file_name, tofile='<new>')
if not self.dry_run:
bak_file_name = file_name + ".bak"
if os.path.exists(bak_file_name):
msg = "{} already exists".format(bak_file_name)
if sys.version_info < (3, 3):
raise OSError(msg)
else:
# noinspection PyCompatibility
# pylint: disable=undefined-variable
raise FileExistsError(msg)
try:
os.rename(file_name, bak_file_name)
with io.open(file_name, 'w', encoding=self.encoding, newline=self.newline) as new:
new.writelines(to_lines)
# Keeps mode of original file.
shutil.copymode(bak_file_name, file_name)
except Exception as err:
log.error("failed to write output to %s: %s", file_name, err)
# Try to recover...
try:
os.rename(bak_file_name, file_name)
except OSError as err:
log.error("failed to restore %s from %s: %s",
file_name, bak_file_name, err)
raise
try:
os.unlink(bak_file_name)
except OSError as err:
log.warning("failed to remove backup %s: %s",
bak_file_name, err)
return list(diffs)
def append_code_expr(self, code):
"""Compile argument and adds it to the list of code objects."""
# expects a string.
if isinstance(code, str) and not isinstance(code, unicode):
code = unicode(code)
if not isinstance(code, unicode):
raise TypeError("string expected")
log.debug("compiling code %s...", code)
try:
code_obj = compile(code, '<string>', 'eval')
self.code_objs[code] = code_obj
except SyntaxError as syntax_err:
log.error("cannot compile %s: %s", code, syntax_err)
raise
log.debug("compiled code %s", code)
def append_function(self, function):
"""Append the function to the list of functions to be called.
If the function is already a callable, use it. If it's a type str
try to interpret it as [module]:?<callable>, load the module
if there is one and retrieve the callable.
Argument:
function (str or callable): function to call on input.
"""
if not hasattr(function, '__call__'):
function = get_function(function)
if not hasattr(function, '__call__'):
raise ValueError("function is expected to be callable")
self._functions.append(function)
log.debug("registered %s", function.__name__)
def append_executable(self, executable):
"""Append san executable os command to the list to be called.
Argument:
executable (str): os callable executable.
"""
if isinstance(executable, str) and not isinstance(executable, unicode):
executable = unicode(executable)
if not isinstance(executable, unicode):
raise TypeError("expected executable name as str, not {}".
format(executable.__class__.__name__))
self._executables.append(executable)
def set_code_exprs(self, codes):
"""Convenience: sets all the code expressions at once."""
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code)
def set_functions(self, functions):
"""Check functions passed as argument and set them to be used."""
for func in functions:
try:
self.append_function(func)
except (ValueError, AttributeError) as ex:
log.error("'%s' is not a callable function: %s", func, ex)
raise
def set_executables(self, executables):
"""Check and set the executables to be used."""
for exc in executables:
self.append_executable(exc)
def parse_command_line(argv):
"""Parse command line argument. See -h option.
Arguments:
argv: arguments on the command line must include caller file name.
"""
import textwrap
example = textwrap.dedent("""
Examples:
# Simple string substitution (-e). Will show a diff. No changes applied.
{0} -e "re.sub('failIf', 'assertFalse', line)" *.py
# File level modifications (-f). Overwrites the files in place (-w).
{0} -w -f fixer:fixit *.py
# Will change all test*.py in subdirectories of tests.
{0} -e "re.sub('failIf', 'assertFalse', line)" -s tests test*.py
""").format(os.path.basename(argv[0]))
formatter_class = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description="Python mass editor",
epilog=example,
formatter_class=formatter_class)
parser.add_argument("-V", "--version", action="version",
version="%(prog)s {}".format(__version__))
parser.add_argument("-w", "--write", dest="dry_run",
action="store_false", default=True,
help="modify target file(s) in place. "
"Shows diff otherwise.")
parser.add_argument("-v", "--verbose", dest="verbose_count",
action="count", default=0,
help="increases log verbosity (can be specified "
"multiple times)")
parser.add_argument("-e", "--expression", dest="expressions", nargs=1,
help="Python expressions applied to target files. "
"Use the line variable to reference the current line.")
parser.add_argument("-f", "--function", dest="functions", nargs=1,
help="Python function to apply to target file. "
"Takes file content as input and yield lines. "
"Specify function as [module]:?<function name>.")
parser.add_argument("-x", "--executable", dest="executables", nargs=1,
help="Python executable to apply to target file.")
parser.add_argument("-s", "--start", dest="start_dirs",
help="Directory(ies) from which to look for targets.")
parser.add_argument("-m", "--max-depth-level", type=int, dest="max_depth",
help="Maximum depth when walking subdirectories.")
parser.add_argument("-o", "--output", metavar="FILE",
type=argparse.FileType("w"), default=sys.stdout,
help="redirect output to a file")
parser.add_argument("-g", "--generate", metavar="FILE", type=str,
help="generate input file suitable for -f option")
parser.add_argument("--encoding", dest="encoding",
help="Encoding of input and output files")
parser.add_argument("--newline", dest="newline",
help="Newline character for output files")
parser.add_argument("patterns", metavar="pattern",
nargs="*", # argparse.REMAINDER,
help="shell-like file name patterns to process.")
arguments = parser.parse_args(argv[1:])
if not (arguments.expressions or
arguments.functions or
arguments.generate or
arguments.executables):
parser.error(
'--expression, --function, --generate or --executable missing')
# Sets log level to WARN going more verbose for each new -V.
log.setLevel(max(3 - arguments.verbose_count, 0) * 10)
return arguments
def get_paths(patterns, start_dirs=None, max_depth=1):
"""Retrieve files that match any of the patterns."""
# Shortcut: if there is only one pattern, make sure we process just that.
if len(patterns) == 1 and not start_dirs:
pattern = patterns[0]
directory = os.path.dirname(pattern)
if directory:
patterns = [os.path.basename(pattern)]
start_dirs = directory
max_depth = 1
if not start_dirs or start_dirs == '.':
start_dirs = os.getcwd()
for start_dir in start_dirs.split(','):
for root, dirs, files in os.walk(start_dir): # pylint: disable=W0612
if max_depth is not None:
relpath = os.path.relpath(root, start=start_dir)
depth = len(relpath.split(os.sep))
if depth > max_depth:
continue
names = []
for pattern in patterns:
names += fnmatch.filter(files, pattern)
for name in names:
path = os.path.join(root, name)
yield path
fixer_template = """\
#!/usr/bin/env python
def fixit(lines, file_name):
'''Edit files passed to massedit
:param list(str) lines: list of lines contained in the input file
:param str file_name: name of the file the lines were read from
:return: modified lines
:rtype: list(str)
Please modify the logic below (it does not change anything right now)
and apply your logic to the in your directory like this:
massedit -f <file name>:fixit files_to_modify\*
See massedit -h for help and other options.
'''
changed_lines = []
for lineno, line in enumerate(lines):
changed_lines.append(line)
return changed_lines
"""
def generate_fixer_file(output):
"""Generate a template fixer file to be used with --function option."""
with open(output, "w+") as fh:
fh.write(fixer_template)
return
# pylint: disable=too-many-arguments, too-many-locals
def edit_files(patterns, expressions=None,
functions=None, executables=None,
start_dirs=None, max_depth=1, dry_run=True,
output=sys.stdout, encoding=None, newline=None):
"""Process patterns with MassEdit.
Arguments:
patterns: file pattern to identify the files to be processed.
expressions: single python expression to be applied line by line.
functions: functions to process files contents.
executables: os executables to execute on the argument files.
Keyword arguments:
max_depth: maximum recursion level when looking for file matches.
start_dirs: workspace(ies) where to start the file search.
dry_run: only display differences if True. Save modified file otherwise.
output: handle where the output should be redirected.
Return:
list of files processed.
"""
if not is_list(patterns):
raise TypeError("patterns should be a list")
if expressions and not is_list(expressions):
raise TypeError("expressions should be a list of exec expressions")
if functions and not is_list(functions):
raise TypeError("functions should be a list of functions")
if executables and not is_list(executables):
raise TypeError("executables should be a list of program names")
editor = MassEdit(dry_run=dry_run, encoding=encoding, newline=newline)
if expressions:
editor.set_code_exprs(expressions)
if functions:
editor.set_functions(functions)
if executables:
editor.set_executables(executables)
processed_paths = []
for path in get_paths(patterns, start_dirs=start_dirs,
max_depth=max_depth):
try:
diffs = list(editor.edit_file(path))
if dry_run:
# At this point, encoding is the input encoding.
diff = "".join(diffs)
if not diff:
continue
# The encoding of the target output may not match the input
# encoding. If it's defined, we round trip the diff text
# to bytes and back to silence any conversion errors.
encoding = output.encoding
if encoding:
bytes_diff = diff.encode(encoding=encoding, errors='ignore')
diff = bytes_diff.decode(encoding=output.encoding)
output.write(diff)
except UnicodeDecodeError as err:
log.error("failed to process %s: %s", path, err)
continue
processed_paths.append(os.path.abspath(path))
return processed_paths
def command_line(argv):
"""Instantiate an editor and process arguments.
Optional argument:
- processed_paths: paths processed are appended to the list.
"""
arguments = parse_command_line(argv)
if arguments.generate:
generate_fixer_file(arguments.generate)
paths = edit_files(arguments.patterns,
expressions=arguments.expressions,
functions=arguments.functions,
executables=arguments.executables,
start_dirs=arguments.start_dirs,
max_depth=arguments.max_depth,
dry_run=arguments.dry_run,
output=arguments.output,
encoding=arguments.encoding,
newline=arguments.newline)
# If the output is not sys.stdout, we need to close it because
# argparse.FileType does not do it for us.
is_sys = arguments.output in [sys.stdout, sys.stderr]
if not is_sys and isinstance(arguments.output, io.IOBase):
arguments.output.close()
return paths
if __name__ == "__main__":
sys.exit(main())
|
elmotec/massedit
|
massedit.py
|
MassEdit.import_module
|
python
|
def import_module(module): # pylint: disable=R0201
if isinstance(module, list):
all_modules = module
else:
all_modules = [module]
for mod in all_modules:
globals()[mod] = __import__(mod.strip())
|
Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
|
train
|
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L131-L143
| null |
class MassEdit(object):
"""Mass edit lines of files."""
def __init__(self, **kwds):
"""Initialize MassEdit object.
Args:
- code (byte code object): code to execute on input file.
- function (str or callable): function to call on input file.
- module (str): module name where to find the function.
- executable (str): executable file name to execute on input file.
- dry_run (bool): skip actual modification of input file if True.
"""
self.code_objs = dict()
self._codes = []
self._functions = []
self._executables = []
self.dry_run = None
self.encoding = 'utf-8'
self.newline = None
if 'module' in kwds:
self.import_module(kwds['module'])
if 'code' in kwds:
self.append_code_expr(kwds['code'])
if 'function' in kwds:
self.append_function(kwds['function'])
if 'executable' in kwds:
self.append_executable(kwds['executable'])
if 'dry_run' in kwds:
self.dry_run = kwds['dry_run']
if 'encoding' in kwds:
self.encoding = kwds['encoding']
if 'newline' in kwds:
self.newline = kwds['newline']
@staticmethod
@staticmethod
def __edit_line(line, code, code_obj): # pylint: disable=R0201
"""Edit a line with one code object built in the ctor."""
try:
# pylint: disable=eval-used
result = eval(code_obj, globals(), locals())
except TypeError as ex:
log.error("failed to execute %s: %s", code, ex)
raise
if result is None:
log.error("cannot process line '%s' with %s", line, code)
raise RuntimeError('failed to process line')
elif isinstance(result, list) or isinstance(result, tuple):
line = unicode(' '.join([unicode(res_element)
for res_element in result]))
else:
line = unicode(result)
return line
def edit_line(self, line):
"""Edit a single line using the code expression."""
for code, code_obj in self.code_objs.items():
line = self.__edit_line(line, code, code_obj)
return line
def edit_content(self, original_lines, file_name):
"""Processes a file contents.
First processes the contents line by line applying the registered
expressions, then process the resulting contents using the
registered functions.
Arguments:
original_lines (list of str): file content.
file_name (str): name of the file.
"""
lines = [self.edit_line(line) for line in original_lines]
for function in self._functions:
try:
lines = list(function(lines, file_name))
except UnicodeDecodeError as err:
log.error('failed to process %s: %s', file_name, err)
return lines
except Exception as err:
log.error("failed to process %s with code %s: %s",
file_name, function, err)
raise # Let the exception be handled at a higher level.
return lines
def edit_file(self, file_name):
"""Edit file in place, returns a list of modifications (unified diff).
Arguments:
file_name (str, unicode): The name of the file.
"""
with io.open(file_name, "r", encoding=self.encoding) as from_file:
try:
from_lines = from_file.readlines()
except UnicodeDecodeError as err:
log.error("encoding error (see --encoding): %s", err)
raise
if self._executables:
nb_execs = len(self._executables)
if nb_execs > 1:
log.warn("found %d executables. Will use first one", nb_execs)
exec_list = self._executables[0].split()
exec_list.append(file_name)
try:
log.info("running %s...", " ".join(exec_list))
output = subprocess.check_output(exec_list,
universal_newlines=True)
except Exception as err:
log.error("failed to execute %s: %s", " ".join(exec_list), err)
raise # Let the exception be handled at a higher level.
to_lines = output.split(unicode("\n"))
else:
to_lines = from_lines
# unified_diff wants structure of known length. Convert to a list.
to_lines = list(self.edit_content(to_lines, file_name))
diffs = difflib.unified_diff(from_lines, to_lines,
fromfile=file_name, tofile='<new>')
if not self.dry_run:
bak_file_name = file_name + ".bak"
if os.path.exists(bak_file_name):
msg = "{} already exists".format(bak_file_name)
if sys.version_info < (3, 3):
raise OSError(msg)
else:
# noinspection PyCompatibility
# pylint: disable=undefined-variable
raise FileExistsError(msg)
try:
os.rename(file_name, bak_file_name)
with io.open(file_name, 'w', encoding=self.encoding, newline=self.newline) as new:
new.writelines(to_lines)
# Keeps mode of original file.
shutil.copymode(bak_file_name, file_name)
except Exception as err:
log.error("failed to write output to %s: %s", file_name, err)
# Try to recover...
try:
os.rename(bak_file_name, file_name)
except OSError as err:
log.error("failed to restore %s from %s: %s",
file_name, bak_file_name, err)
raise
try:
os.unlink(bak_file_name)
except OSError as err:
log.warning("failed to remove backup %s: %s",
bak_file_name, err)
return list(diffs)
def append_code_expr(self, code):
"""Compile argument and adds it to the list of code objects."""
# expects a string.
if isinstance(code, str) and not isinstance(code, unicode):
code = unicode(code)
if not isinstance(code, unicode):
raise TypeError("string expected")
log.debug("compiling code %s...", code)
try:
code_obj = compile(code, '<string>', 'eval')
self.code_objs[code] = code_obj
except SyntaxError as syntax_err:
log.error("cannot compile %s: %s", code, syntax_err)
raise
log.debug("compiled code %s", code)
def append_function(self, function):
"""Append the function to the list of functions to be called.
If the function is already a callable, use it. If it's a type str
try to interpret it as [module]:?<callable>, load the module
if there is one and retrieve the callable.
Argument:
function (str or callable): function to call on input.
"""
if not hasattr(function, '__call__'):
function = get_function(function)
if not hasattr(function, '__call__'):
raise ValueError("function is expected to be callable")
self._functions.append(function)
log.debug("registered %s", function.__name__)
def append_executable(self, executable):
"""Append san executable os command to the list to be called.
Argument:
executable (str): os callable executable.
"""
if isinstance(executable, str) and not isinstance(executable, unicode):
executable = unicode(executable)
if not isinstance(executable, unicode):
raise TypeError("expected executable name as str, not {}".
format(executable.__class__.__name__))
self._executables.append(executable)
def set_code_exprs(self, codes):
"""Convenience: sets all the code expressions at once."""
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code)
def set_functions(self, functions):
"""Check functions passed as argument and set them to be used."""
for func in functions:
try:
self.append_function(func)
except (ValueError, AttributeError) as ex:
log.error("'%s' is not a callable function: %s", func, ex)
raise
def set_executables(self, executables):
"""Check and set the executables to be used."""
for exc in executables:
self.append_executable(exc)
|
elmotec/massedit
|
massedit.py
|
MassEdit.__edit_line
|
python
|
def __edit_line(line, code, code_obj): # pylint: disable=R0201
try:
# pylint: disable=eval-used
result = eval(code_obj, globals(), locals())
except TypeError as ex:
log.error("failed to execute %s: %s", code, ex)
raise
if result is None:
log.error("cannot process line '%s' with %s", line, code)
raise RuntimeError('failed to process line')
elif isinstance(result, list) or isinstance(result, tuple):
line = unicode(' '.join([unicode(res_element)
for res_element in result]))
else:
line = unicode(result)
return line
|
Edit a line with one code object built in the ctor.
|
train
|
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L146-L162
| null |
class MassEdit(object):
"""Mass edit lines of files."""
def __init__(self, **kwds):
"""Initialize MassEdit object.
Args:
- code (byte code object): code to execute on input file.
- function (str or callable): function to call on input file.
- module (str): module name where to find the function.
- executable (str): executable file name to execute on input file.
- dry_run (bool): skip actual modification of input file if True.
"""
self.code_objs = dict()
self._codes = []
self._functions = []
self._executables = []
self.dry_run = None
self.encoding = 'utf-8'
self.newline = None
if 'module' in kwds:
self.import_module(kwds['module'])
if 'code' in kwds:
self.append_code_expr(kwds['code'])
if 'function' in kwds:
self.append_function(kwds['function'])
if 'executable' in kwds:
self.append_executable(kwds['executable'])
if 'dry_run' in kwds:
self.dry_run = kwds['dry_run']
if 'encoding' in kwds:
self.encoding = kwds['encoding']
if 'newline' in kwds:
self.newline = kwds['newline']
@staticmethod
def import_module(module): # pylint: disable=R0201
"""Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
"""
if isinstance(module, list):
all_modules = module
else:
all_modules = [module]
for mod in all_modules:
globals()[mod] = __import__(mod.strip())
@staticmethod
def edit_line(self, line):
"""Edit a single line using the code expression."""
for code, code_obj in self.code_objs.items():
line = self.__edit_line(line, code, code_obj)
return line
def edit_content(self, original_lines, file_name):
"""Processes a file contents.
First processes the contents line by line applying the registered
expressions, then process the resulting contents using the
registered functions.
Arguments:
original_lines (list of str): file content.
file_name (str): name of the file.
"""
lines = [self.edit_line(line) for line in original_lines]
for function in self._functions:
try:
lines = list(function(lines, file_name))
except UnicodeDecodeError as err:
log.error('failed to process %s: %s', file_name, err)
return lines
except Exception as err:
log.error("failed to process %s with code %s: %s",
file_name, function, err)
raise # Let the exception be handled at a higher level.
return lines
def edit_file(self, file_name):
"""Edit file in place, returns a list of modifications (unified diff).
Arguments:
file_name (str, unicode): The name of the file.
"""
with io.open(file_name, "r", encoding=self.encoding) as from_file:
try:
from_lines = from_file.readlines()
except UnicodeDecodeError as err:
log.error("encoding error (see --encoding): %s", err)
raise
if self._executables:
nb_execs = len(self._executables)
if nb_execs > 1:
log.warn("found %d executables. Will use first one", nb_execs)
exec_list = self._executables[0].split()
exec_list.append(file_name)
try:
log.info("running %s...", " ".join(exec_list))
output = subprocess.check_output(exec_list,
universal_newlines=True)
except Exception as err:
log.error("failed to execute %s: %s", " ".join(exec_list), err)
raise # Let the exception be handled at a higher level.
to_lines = output.split(unicode("\n"))
else:
to_lines = from_lines
# unified_diff wants structure of known length. Convert to a list.
to_lines = list(self.edit_content(to_lines, file_name))
diffs = difflib.unified_diff(from_lines, to_lines,
fromfile=file_name, tofile='<new>')
if not self.dry_run:
bak_file_name = file_name + ".bak"
if os.path.exists(bak_file_name):
msg = "{} already exists".format(bak_file_name)
if sys.version_info < (3, 3):
raise OSError(msg)
else:
# noinspection PyCompatibility
# pylint: disable=undefined-variable
raise FileExistsError(msg)
try:
os.rename(file_name, bak_file_name)
with io.open(file_name, 'w', encoding=self.encoding, newline=self.newline) as new:
new.writelines(to_lines)
# Keeps mode of original file.
shutil.copymode(bak_file_name, file_name)
except Exception as err:
log.error("failed to write output to %s: %s", file_name, err)
# Try to recover...
try:
os.rename(bak_file_name, file_name)
except OSError as err:
log.error("failed to restore %s from %s: %s",
file_name, bak_file_name, err)
raise
try:
os.unlink(bak_file_name)
except OSError as err:
log.warning("failed to remove backup %s: %s",
bak_file_name, err)
return list(diffs)
def append_code_expr(self, code):
"""Compile argument and adds it to the list of code objects."""
# expects a string.
if isinstance(code, str) and not isinstance(code, unicode):
code = unicode(code)
if not isinstance(code, unicode):
raise TypeError("string expected")
log.debug("compiling code %s...", code)
try:
code_obj = compile(code, '<string>', 'eval')
self.code_objs[code] = code_obj
except SyntaxError as syntax_err:
log.error("cannot compile %s: %s", code, syntax_err)
raise
log.debug("compiled code %s", code)
def append_function(self, function):
"""Append the function to the list of functions to be called.
If the function is already a callable, use it. If it's a type str
try to interpret it as [module]:?<callable>, load the module
if there is one and retrieve the callable.
Argument:
function (str or callable): function to call on input.
"""
if not hasattr(function, '__call__'):
function = get_function(function)
if not hasattr(function, '__call__'):
raise ValueError("function is expected to be callable")
self._functions.append(function)
log.debug("registered %s", function.__name__)
def append_executable(self, executable):
"""Append san executable os command to the list to be called.
Argument:
executable (str): os callable executable.
"""
if isinstance(executable, str) and not isinstance(executable, unicode):
executable = unicode(executable)
if not isinstance(executable, unicode):
raise TypeError("expected executable name as str, not {}".
format(executable.__class__.__name__))
self._executables.append(executable)
def set_code_exprs(self, codes):
"""Convenience: sets all the code expressions at once."""
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code)
def set_functions(self, functions):
"""Check functions passed as argument and set them to be used."""
for func in functions:
try:
self.append_function(func)
except (ValueError, AttributeError) as ex:
log.error("'%s' is not a callable function: %s", func, ex)
raise
def set_executables(self, executables):
"""Check and set the executables to be used."""
for exc in executables:
self.append_executable(exc)
|
elmotec/massedit
|
massedit.py
|
MassEdit.edit_line
|
python
|
def edit_line(self, line):
for code, code_obj in self.code_objs.items():
line = self.__edit_line(line, code, code_obj)
return line
|
Edit a single line using the code expression.
|
train
|
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L164-L168
|
[
"def __edit_line(line, code, code_obj): # pylint: disable=R0201\n \"\"\"Edit a line with one code object built in the ctor.\"\"\"\n try:\n # pylint: disable=eval-used\n result = eval(code_obj, globals(), locals())\n except TypeError as ex:\n log.error(\"failed to execute %s: %s\", code, ex)\n raise\n if result is None:\n log.error(\"cannot process line '%s' with %s\", line, code)\n raise RuntimeError('failed to process line')\n elif isinstance(result, list) or isinstance(result, tuple):\n line = unicode(' '.join([unicode(res_element)\n for res_element in result]))\n else:\n line = unicode(result)\n return line\n"
] |
class MassEdit(object):
"""Mass edit lines of files."""
def __init__(self, **kwds):
"""Initialize MassEdit object.
Args:
- code (byte code object): code to execute on input file.
- function (str or callable): function to call on input file.
- module (str): module name where to find the function.
- executable (str): executable file name to execute on input file.
- dry_run (bool): skip actual modification of input file if True.
"""
self.code_objs = dict()
self._codes = []
self._functions = []
self._executables = []
self.dry_run = None
self.encoding = 'utf-8'
self.newline = None
if 'module' in kwds:
self.import_module(kwds['module'])
if 'code' in kwds:
self.append_code_expr(kwds['code'])
if 'function' in kwds:
self.append_function(kwds['function'])
if 'executable' in kwds:
self.append_executable(kwds['executable'])
if 'dry_run' in kwds:
self.dry_run = kwds['dry_run']
if 'encoding' in kwds:
self.encoding = kwds['encoding']
if 'newline' in kwds:
self.newline = kwds['newline']
@staticmethod
def import_module(module): # pylint: disable=R0201
"""Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
"""
if isinstance(module, list):
all_modules = module
else:
all_modules = [module]
for mod in all_modules:
globals()[mod] = __import__(mod.strip())
@staticmethod
def __edit_line(line, code, code_obj): # pylint: disable=R0201
"""Edit a line with one code object built in the ctor."""
try:
# pylint: disable=eval-used
result = eval(code_obj, globals(), locals())
except TypeError as ex:
log.error("failed to execute %s: %s", code, ex)
raise
if result is None:
log.error("cannot process line '%s' with %s", line, code)
raise RuntimeError('failed to process line')
elif isinstance(result, list) or isinstance(result, tuple):
line = unicode(' '.join([unicode(res_element)
for res_element in result]))
else:
line = unicode(result)
return line
def edit_content(self, original_lines, file_name):
"""Processes a file contents.
First processes the contents line by line applying the registered
expressions, then process the resulting contents using the
registered functions.
Arguments:
original_lines (list of str): file content.
file_name (str): name of the file.
"""
lines = [self.edit_line(line) for line in original_lines]
for function in self._functions:
try:
lines = list(function(lines, file_name))
except UnicodeDecodeError as err:
log.error('failed to process %s: %s', file_name, err)
return lines
except Exception as err:
log.error("failed to process %s with code %s: %s",
file_name, function, err)
raise # Let the exception be handled at a higher level.
return lines
def edit_file(self, file_name):
"""Edit file in place, returns a list of modifications (unified diff).
Arguments:
file_name (str, unicode): The name of the file.
"""
with io.open(file_name, "r", encoding=self.encoding) as from_file:
try:
from_lines = from_file.readlines()
except UnicodeDecodeError as err:
log.error("encoding error (see --encoding): %s", err)
raise
if self._executables:
nb_execs = len(self._executables)
if nb_execs > 1:
log.warn("found %d executables. Will use first one", nb_execs)
exec_list = self._executables[0].split()
exec_list.append(file_name)
try:
log.info("running %s...", " ".join(exec_list))
output = subprocess.check_output(exec_list,
universal_newlines=True)
except Exception as err:
log.error("failed to execute %s: %s", " ".join(exec_list), err)
raise # Let the exception be handled at a higher level.
to_lines = output.split(unicode("\n"))
else:
to_lines = from_lines
# unified_diff wants structure of known length. Convert to a list.
to_lines = list(self.edit_content(to_lines, file_name))
diffs = difflib.unified_diff(from_lines, to_lines,
fromfile=file_name, tofile='<new>')
if not self.dry_run:
bak_file_name = file_name + ".bak"
if os.path.exists(bak_file_name):
msg = "{} already exists".format(bak_file_name)
if sys.version_info < (3, 3):
raise OSError(msg)
else:
# noinspection PyCompatibility
# pylint: disable=undefined-variable
raise FileExistsError(msg)
try:
os.rename(file_name, bak_file_name)
with io.open(file_name, 'w', encoding=self.encoding, newline=self.newline) as new:
new.writelines(to_lines)
# Keeps mode of original file.
shutil.copymode(bak_file_name, file_name)
except Exception as err:
log.error("failed to write output to %s: %s", file_name, err)
# Try to recover...
try:
os.rename(bak_file_name, file_name)
except OSError as err:
log.error("failed to restore %s from %s: %s",
file_name, bak_file_name, err)
raise
try:
os.unlink(bak_file_name)
except OSError as err:
log.warning("failed to remove backup %s: %s",
bak_file_name, err)
return list(diffs)
def append_code_expr(self, code):
"""Compile argument and adds it to the list of code objects."""
# expects a string.
if isinstance(code, str) and not isinstance(code, unicode):
code = unicode(code)
if not isinstance(code, unicode):
raise TypeError("string expected")
log.debug("compiling code %s...", code)
try:
code_obj = compile(code, '<string>', 'eval')
self.code_objs[code] = code_obj
except SyntaxError as syntax_err:
log.error("cannot compile %s: %s", code, syntax_err)
raise
log.debug("compiled code %s", code)
def append_function(self, function):
"""Append the function to the list of functions to be called.
If the function is already a callable, use it. If it's a type str
try to interpret it as [module]:?<callable>, load the module
if there is one and retrieve the callable.
Argument:
function (str or callable): function to call on input.
"""
if not hasattr(function, '__call__'):
function = get_function(function)
if not hasattr(function, '__call__'):
raise ValueError("function is expected to be callable")
self._functions.append(function)
log.debug("registered %s", function.__name__)
def append_executable(self, executable):
"""Append san executable os command to the list to be called.
Argument:
executable (str): os callable executable.
"""
if isinstance(executable, str) and not isinstance(executable, unicode):
executable = unicode(executable)
if not isinstance(executable, unicode):
raise TypeError("expected executable name as str, not {}".
format(executable.__class__.__name__))
self._executables.append(executable)
def set_code_exprs(self, codes):
"""Convenience: sets all the code expressions at once."""
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code)
def set_functions(self, functions):
"""Check functions passed as argument and set them to be used."""
for func in functions:
try:
self.append_function(func)
except (ValueError, AttributeError) as ex:
log.error("'%s' is not a callable function: %s", func, ex)
raise
def set_executables(self, executables):
"""Check and set the executables to be used."""
for exc in executables:
self.append_executable(exc)
|
elmotec/massedit
|
massedit.py
|
MassEdit.edit_content
|
python
|
def edit_content(self, original_lines, file_name):
lines = [self.edit_line(line) for line in original_lines]
for function in self._functions:
try:
lines = list(function(lines, file_name))
except UnicodeDecodeError as err:
log.error('failed to process %s: %s', file_name, err)
return lines
except Exception as err:
log.error("failed to process %s with code %s: %s",
file_name, function, err)
raise # Let the exception be handled at a higher level.
return lines
|
Processes a file contents.
First processes the contents line by line applying the registered
expressions, then process the resulting contents using the
registered functions.
Arguments:
original_lines (list of str): file content.
file_name (str): name of the file.
|
train
|
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L170-L193
| null |
class MassEdit(object):
"""Mass edit lines of files."""
def __init__(self, **kwds):
"""Initialize MassEdit object.
Args:
- code (byte code object): code to execute on input file.
- function (str or callable): function to call on input file.
- module (str): module name where to find the function.
- executable (str): executable file name to execute on input file.
- dry_run (bool): skip actual modification of input file if True.
"""
self.code_objs = dict()
self._codes = []
self._functions = []
self._executables = []
self.dry_run = None
self.encoding = 'utf-8'
self.newline = None
if 'module' in kwds:
self.import_module(kwds['module'])
if 'code' in kwds:
self.append_code_expr(kwds['code'])
if 'function' in kwds:
self.append_function(kwds['function'])
if 'executable' in kwds:
self.append_executable(kwds['executable'])
if 'dry_run' in kwds:
self.dry_run = kwds['dry_run']
if 'encoding' in kwds:
self.encoding = kwds['encoding']
if 'newline' in kwds:
self.newline = kwds['newline']
@staticmethod
def import_module(module): # pylint: disable=R0201
"""Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
"""
if isinstance(module, list):
all_modules = module
else:
all_modules = [module]
for mod in all_modules:
globals()[mod] = __import__(mod.strip())
@staticmethod
def __edit_line(line, code, code_obj): # pylint: disable=R0201
"""Edit a line with one code object built in the ctor."""
try:
# pylint: disable=eval-used
result = eval(code_obj, globals(), locals())
except TypeError as ex:
log.error("failed to execute %s: %s", code, ex)
raise
if result is None:
log.error("cannot process line '%s' with %s", line, code)
raise RuntimeError('failed to process line')
elif isinstance(result, list) or isinstance(result, tuple):
line = unicode(' '.join([unicode(res_element)
for res_element in result]))
else:
line = unicode(result)
return line
def edit_line(self, line):
"""Edit a single line using the code expression."""
for code, code_obj in self.code_objs.items():
line = self.__edit_line(line, code, code_obj)
return line
def edit_file(self, file_name):
"""Edit file in place, returns a list of modifications (unified diff).
Arguments:
file_name (str, unicode): The name of the file.
"""
with io.open(file_name, "r", encoding=self.encoding) as from_file:
try:
from_lines = from_file.readlines()
except UnicodeDecodeError as err:
log.error("encoding error (see --encoding): %s", err)
raise
if self._executables:
nb_execs = len(self._executables)
if nb_execs > 1:
log.warn("found %d executables. Will use first one", nb_execs)
exec_list = self._executables[0].split()
exec_list.append(file_name)
try:
log.info("running %s...", " ".join(exec_list))
output = subprocess.check_output(exec_list,
universal_newlines=True)
except Exception as err:
log.error("failed to execute %s: %s", " ".join(exec_list), err)
raise # Let the exception be handled at a higher level.
to_lines = output.split(unicode("\n"))
else:
to_lines = from_lines
# unified_diff wants structure of known length. Convert to a list.
to_lines = list(self.edit_content(to_lines, file_name))
diffs = difflib.unified_diff(from_lines, to_lines,
fromfile=file_name, tofile='<new>')
if not self.dry_run:
bak_file_name = file_name + ".bak"
if os.path.exists(bak_file_name):
msg = "{} already exists".format(bak_file_name)
if sys.version_info < (3, 3):
raise OSError(msg)
else:
# noinspection PyCompatibility
# pylint: disable=undefined-variable
raise FileExistsError(msg)
try:
os.rename(file_name, bak_file_name)
with io.open(file_name, 'w', encoding=self.encoding, newline=self.newline) as new:
new.writelines(to_lines)
# Keeps mode of original file.
shutil.copymode(bak_file_name, file_name)
except Exception as err:
log.error("failed to write output to %s: %s", file_name, err)
# Try to recover...
try:
os.rename(bak_file_name, file_name)
except OSError as err:
log.error("failed to restore %s from %s: %s",
file_name, bak_file_name, err)
raise
try:
os.unlink(bak_file_name)
except OSError as err:
log.warning("failed to remove backup %s: %s",
bak_file_name, err)
return list(diffs)
def append_code_expr(self, code):
"""Compile argument and adds it to the list of code objects."""
# expects a string.
if isinstance(code, str) and not isinstance(code, unicode):
code = unicode(code)
if not isinstance(code, unicode):
raise TypeError("string expected")
log.debug("compiling code %s...", code)
try:
code_obj = compile(code, '<string>', 'eval')
self.code_objs[code] = code_obj
except SyntaxError as syntax_err:
log.error("cannot compile %s: %s", code, syntax_err)
raise
log.debug("compiled code %s", code)
def append_function(self, function):
"""Append the function to the list of functions to be called.
If the function is already a callable, use it. If it's a type str
try to interpret it as [module]:?<callable>, load the module
if there is one and retrieve the callable.
Argument:
function (str or callable): function to call on input.
"""
if not hasattr(function, '__call__'):
function = get_function(function)
if not hasattr(function, '__call__'):
raise ValueError("function is expected to be callable")
self._functions.append(function)
log.debug("registered %s", function.__name__)
def append_executable(self, executable):
"""Append san executable os command to the list to be called.
Argument:
executable (str): os callable executable.
"""
if isinstance(executable, str) and not isinstance(executable, unicode):
executable = unicode(executable)
if not isinstance(executable, unicode):
raise TypeError("expected executable name as str, not {}".
format(executable.__class__.__name__))
self._executables.append(executable)
def set_code_exprs(self, codes):
"""Convenience: sets all the code expressions at once."""
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code)
def set_functions(self, functions):
"""Check functions passed as argument and set them to be used."""
for func in functions:
try:
self.append_function(func)
except (ValueError, AttributeError) as ex:
log.error("'%s' is not a callable function: %s", func, ex)
raise
def set_executables(self, executables):
"""Check and set the executables to be used."""
for exc in executables:
self.append_executable(exc)
|
elmotec/massedit
|
massedit.py
|
MassEdit.edit_file
|
python
|
def edit_file(self, file_name):
with io.open(file_name, "r", encoding=self.encoding) as from_file:
try:
from_lines = from_file.readlines()
except UnicodeDecodeError as err:
log.error("encoding error (see --encoding): %s", err)
raise
if self._executables:
nb_execs = len(self._executables)
if nb_execs > 1:
log.warn("found %d executables. Will use first one", nb_execs)
exec_list = self._executables[0].split()
exec_list.append(file_name)
try:
log.info("running %s...", " ".join(exec_list))
output = subprocess.check_output(exec_list,
universal_newlines=True)
except Exception as err:
log.error("failed to execute %s: %s", " ".join(exec_list), err)
raise # Let the exception be handled at a higher level.
to_lines = output.split(unicode("\n"))
else:
to_lines = from_lines
# unified_diff wants structure of known length. Convert to a list.
to_lines = list(self.edit_content(to_lines, file_name))
diffs = difflib.unified_diff(from_lines, to_lines,
fromfile=file_name, tofile='<new>')
if not self.dry_run:
bak_file_name = file_name + ".bak"
if os.path.exists(bak_file_name):
msg = "{} already exists".format(bak_file_name)
if sys.version_info < (3, 3):
raise OSError(msg)
else:
# noinspection PyCompatibility
# pylint: disable=undefined-variable
raise FileExistsError(msg)
try:
os.rename(file_name, bak_file_name)
with io.open(file_name, 'w', encoding=self.encoding, newline=self.newline) as new:
new.writelines(to_lines)
# Keeps mode of original file.
shutil.copymode(bak_file_name, file_name)
except Exception as err:
log.error("failed to write output to %s: %s", file_name, err)
# Try to recover...
try:
os.rename(bak_file_name, file_name)
except OSError as err:
log.error("failed to restore %s from %s: %s",
file_name, bak_file_name, err)
raise
try:
os.unlink(bak_file_name)
except OSError as err:
log.warning("failed to remove backup %s: %s",
bak_file_name, err)
return list(diffs)
|
Edit file in place, returns a list of modifications (unified diff).
Arguments:
file_name (str, unicode): The name of the file.
|
train
|
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L195-L260
|
[
"def edit_content(self, original_lines, file_name):\n \"\"\"Processes a file contents.\n\n First processes the contents line by line applying the registered\n expressions, then process the resulting contents using the\n registered functions.\n\n Arguments:\n original_lines (list of str): file content.\n file_name (str): name of the file.\n\n \"\"\"\n lines = [self.edit_line(line) for line in original_lines]\n for function in self._functions:\n try:\n lines = list(function(lines, file_name))\n except UnicodeDecodeError as err:\n log.error('failed to process %s: %s', file_name, err)\n return lines\n except Exception as err:\n log.error(\"failed to process %s with code %s: %s\",\n file_name, function, err)\n raise # Let the exception be handled at a higher level.\n return lines\n"
] |
class MassEdit(object):
"""Mass edit lines of files."""
def __init__(self, **kwds):
"""Initialize MassEdit object.
Args:
- code (byte code object): code to execute on input file.
- function (str or callable): function to call on input file.
- module (str): module name where to find the function.
- executable (str): executable file name to execute on input file.
- dry_run (bool): skip actual modification of input file if True.
"""
self.code_objs = dict()
self._codes = []
self._functions = []
self._executables = []
self.dry_run = None
self.encoding = 'utf-8'
self.newline = None
if 'module' in kwds:
self.import_module(kwds['module'])
if 'code' in kwds:
self.append_code_expr(kwds['code'])
if 'function' in kwds:
self.append_function(kwds['function'])
if 'executable' in kwds:
self.append_executable(kwds['executable'])
if 'dry_run' in kwds:
self.dry_run = kwds['dry_run']
if 'encoding' in kwds:
self.encoding = kwds['encoding']
if 'newline' in kwds:
self.newline = kwds['newline']
@staticmethod
def import_module(module): # pylint: disable=R0201
"""Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
"""
if isinstance(module, list):
all_modules = module
else:
all_modules = [module]
for mod in all_modules:
globals()[mod] = __import__(mod.strip())
@staticmethod
def __edit_line(line, code, code_obj): # pylint: disable=R0201
"""Edit a line with one code object built in the ctor."""
try:
# pylint: disable=eval-used
result = eval(code_obj, globals(), locals())
except TypeError as ex:
log.error("failed to execute %s: %s", code, ex)
raise
if result is None:
log.error("cannot process line '%s' with %s", line, code)
raise RuntimeError('failed to process line')
elif isinstance(result, list) or isinstance(result, tuple):
line = unicode(' '.join([unicode(res_element)
for res_element in result]))
else:
line = unicode(result)
return line
def edit_line(self, line):
"""Edit a single line using the code expression."""
for code, code_obj in self.code_objs.items():
line = self.__edit_line(line, code, code_obj)
return line
def edit_content(self, original_lines, file_name):
"""Processes a file contents.
First processes the contents line by line applying the registered
expressions, then process the resulting contents using the
registered functions.
Arguments:
original_lines (list of str): file content.
file_name (str): name of the file.
"""
lines = [self.edit_line(line) for line in original_lines]
for function in self._functions:
try:
lines = list(function(lines, file_name))
except UnicodeDecodeError as err:
log.error('failed to process %s: %s', file_name, err)
return lines
except Exception as err:
log.error("failed to process %s with code %s: %s",
file_name, function, err)
raise # Let the exception be handled at a higher level.
return lines
def append_code_expr(self, code):
"""Compile argument and adds it to the list of code objects."""
# expects a string.
if isinstance(code, str) and not isinstance(code, unicode):
code = unicode(code)
if not isinstance(code, unicode):
raise TypeError("string expected")
log.debug("compiling code %s...", code)
try:
code_obj = compile(code, '<string>', 'eval')
self.code_objs[code] = code_obj
except SyntaxError as syntax_err:
log.error("cannot compile %s: %s", code, syntax_err)
raise
log.debug("compiled code %s", code)
def append_function(self, function):
"""Append the function to the list of functions to be called.
If the function is already a callable, use it. If it's a type str
try to interpret it as [module]:?<callable>, load the module
if there is one and retrieve the callable.
Argument:
function (str or callable): function to call on input.
"""
if not hasattr(function, '__call__'):
function = get_function(function)
if not hasattr(function, '__call__'):
raise ValueError("function is expected to be callable")
self._functions.append(function)
log.debug("registered %s", function.__name__)
def append_executable(self, executable):
"""Append san executable os command to the list to be called.
Argument:
executable (str): os callable executable.
"""
if isinstance(executable, str) and not isinstance(executable, unicode):
executable = unicode(executable)
if not isinstance(executable, unicode):
raise TypeError("expected executable name as str, not {}".
format(executable.__class__.__name__))
self._executables.append(executable)
def set_code_exprs(self, codes):
"""Convenience: sets all the code expressions at once."""
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code)
def set_functions(self, functions):
"""Check functions passed as argument and set them to be used."""
for func in functions:
try:
self.append_function(func)
except (ValueError, AttributeError) as ex:
log.error("'%s' is not a callable function: %s", func, ex)
raise
def set_executables(self, executables):
"""Check and set the executables to be used."""
for exc in executables:
self.append_executable(exc)
|
elmotec/massedit
|
massedit.py
|
MassEdit.append_code_expr
|
python
|
def append_code_expr(self, code):
# expects a string.
if isinstance(code, str) and not isinstance(code, unicode):
code = unicode(code)
if not isinstance(code, unicode):
raise TypeError("string expected")
log.debug("compiling code %s...", code)
try:
code_obj = compile(code, '<string>', 'eval')
self.code_objs[code] = code_obj
except SyntaxError as syntax_err:
log.error("cannot compile %s: %s", code, syntax_err)
raise
log.debug("compiled code %s", code)
|
Compile argument and adds it to the list of code objects.
|
train
|
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L262-L276
| null |
class MassEdit(object):
"""Mass edit lines of files."""
def __init__(self, **kwds):
"""Initialize MassEdit object.
Args:
- code (byte code object): code to execute on input file.
- function (str or callable): function to call on input file.
- module (str): module name where to find the function.
- executable (str): executable file name to execute on input file.
- dry_run (bool): skip actual modification of input file if True.
"""
self.code_objs = dict()
self._codes = []
self._functions = []
self._executables = []
self.dry_run = None
self.encoding = 'utf-8'
self.newline = None
if 'module' in kwds:
self.import_module(kwds['module'])
if 'code' in kwds:
self.append_code_expr(kwds['code'])
if 'function' in kwds:
self.append_function(kwds['function'])
if 'executable' in kwds:
self.append_executable(kwds['executable'])
if 'dry_run' in kwds:
self.dry_run = kwds['dry_run']
if 'encoding' in kwds:
self.encoding = kwds['encoding']
if 'newline' in kwds:
self.newline = kwds['newline']
@staticmethod
def import_module(module): # pylint: disable=R0201
"""Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
"""
if isinstance(module, list):
all_modules = module
else:
all_modules = [module]
for mod in all_modules:
globals()[mod] = __import__(mod.strip())
@staticmethod
def __edit_line(line, code, code_obj): # pylint: disable=R0201
"""Edit a line with one code object built in the ctor."""
try:
# pylint: disable=eval-used
result = eval(code_obj, globals(), locals())
except TypeError as ex:
log.error("failed to execute %s: %s", code, ex)
raise
if result is None:
log.error("cannot process line '%s' with %s", line, code)
raise RuntimeError('failed to process line')
elif isinstance(result, list) or isinstance(result, tuple):
line = unicode(' '.join([unicode(res_element)
for res_element in result]))
else:
line = unicode(result)
return line
def edit_line(self, line):
"""Edit a single line using the code expression."""
for code, code_obj in self.code_objs.items():
line = self.__edit_line(line, code, code_obj)
return line
def edit_content(self, original_lines, file_name):
"""Processes a file contents.
First processes the contents line by line applying the registered
expressions, then process the resulting contents using the
registered functions.
Arguments:
original_lines (list of str): file content.
file_name (str): name of the file.
"""
lines = [self.edit_line(line) for line in original_lines]
for function in self._functions:
try:
lines = list(function(lines, file_name))
except UnicodeDecodeError as err:
log.error('failed to process %s: %s', file_name, err)
return lines
except Exception as err:
log.error("failed to process %s with code %s: %s",
file_name, function, err)
raise # Let the exception be handled at a higher level.
return lines
def edit_file(self, file_name):
"""Edit file in place, returns a list of modifications (unified diff).
Arguments:
file_name (str, unicode): The name of the file.
"""
with io.open(file_name, "r", encoding=self.encoding) as from_file:
try:
from_lines = from_file.readlines()
except UnicodeDecodeError as err:
log.error("encoding error (see --encoding): %s", err)
raise
if self._executables:
nb_execs = len(self._executables)
if nb_execs > 1:
log.warn("found %d executables. Will use first one", nb_execs)
exec_list = self._executables[0].split()
exec_list.append(file_name)
try:
log.info("running %s...", " ".join(exec_list))
output = subprocess.check_output(exec_list,
universal_newlines=True)
except Exception as err:
log.error("failed to execute %s: %s", " ".join(exec_list), err)
raise # Let the exception be handled at a higher level.
to_lines = output.split(unicode("\n"))
else:
to_lines = from_lines
# unified_diff wants structure of known length. Convert to a list.
to_lines = list(self.edit_content(to_lines, file_name))
diffs = difflib.unified_diff(from_lines, to_lines,
fromfile=file_name, tofile='<new>')
if not self.dry_run:
bak_file_name = file_name + ".bak"
if os.path.exists(bak_file_name):
msg = "{} already exists".format(bak_file_name)
if sys.version_info < (3, 3):
raise OSError(msg)
else:
# noinspection PyCompatibility
# pylint: disable=undefined-variable
raise FileExistsError(msg)
try:
os.rename(file_name, bak_file_name)
with io.open(file_name, 'w', encoding=self.encoding, newline=self.newline) as new:
new.writelines(to_lines)
# Keeps mode of original file.
shutil.copymode(bak_file_name, file_name)
except Exception as err:
log.error("failed to write output to %s: %s", file_name, err)
# Try to recover...
try:
os.rename(bak_file_name, file_name)
except OSError as err:
log.error("failed to restore %s from %s: %s",
file_name, bak_file_name, err)
raise
try:
os.unlink(bak_file_name)
except OSError as err:
log.warning("failed to remove backup %s: %s",
bak_file_name, err)
return list(diffs)
def append_function(self, function):
"""Append the function to the list of functions to be called.
If the function is already a callable, use it. If it's a type str
try to interpret it as [module]:?<callable>, load the module
if there is one and retrieve the callable.
Argument:
function (str or callable): function to call on input.
"""
if not hasattr(function, '__call__'):
function = get_function(function)
if not hasattr(function, '__call__'):
raise ValueError("function is expected to be callable")
self._functions.append(function)
log.debug("registered %s", function.__name__)
def append_executable(self, executable):
"""Append san executable os command to the list to be called.
Argument:
executable (str): os callable executable.
"""
if isinstance(executable, str) and not isinstance(executable, unicode):
executable = unicode(executable)
if not isinstance(executable, unicode):
raise TypeError("expected executable name as str, not {}".
format(executable.__class__.__name__))
self._executables.append(executable)
def set_code_exprs(self, codes):
"""Convenience: sets all the code expressions at once."""
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code)
def set_functions(self, functions):
"""Check functions passed as argument and set them to be used."""
for func in functions:
try:
self.append_function(func)
except (ValueError, AttributeError) as ex:
log.error("'%s' is not a callable function: %s", func, ex)
raise
def set_executables(self, executables):
"""Check and set the executables to be used."""
for exc in executables:
self.append_executable(exc)
|
elmotec/massedit
|
massedit.py
|
MassEdit.append_function
|
python
|
def append_function(self, function):
if not hasattr(function, '__call__'):
function = get_function(function)
if not hasattr(function, '__call__'):
raise ValueError("function is expected to be callable")
self._functions.append(function)
log.debug("registered %s", function.__name__)
|
Append the function to the list of functions to be called.
If the function is already a callable, use it. If it's a type str
try to interpret it as [module]:?<callable>, load the module
if there is one and retrieve the callable.
Argument:
function (str or callable): function to call on input.
|
train
|
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L278-L294
|
[
"def get_function(fn_name):\n \"\"\"Retrieve the function defined by the function_name.\n\n Arguments:\n fn_name: specification of the type module:function_name.\n\n \"\"\"\n module_name, callable_name = fn_name.split(':')\n current = globals()\n if not callable_name:\n callable_name = module_name\n else:\n import importlib\n try:\n module = importlib.import_module(module_name)\n except ImportError:\n log.error(\"failed to import %s\", module_name)\n raise\n current = module\n for level in callable_name.split('.'):\n current = getattr(current, level)\n code = current.__code__\n if code.co_argcount != 2:\n raise ValueError('function should take 2 arguments: lines, file_name')\n return current\n"
] |
class MassEdit(object):
"""Mass edit lines of files."""
def __init__(self, **kwds):
"""Initialize MassEdit object.
Args:
- code (byte code object): code to execute on input file.
- function (str or callable): function to call on input file.
- module (str): module name where to find the function.
- executable (str): executable file name to execute on input file.
- dry_run (bool): skip actual modification of input file if True.
"""
self.code_objs = dict()
self._codes = []
self._functions = []
self._executables = []
self.dry_run = None
self.encoding = 'utf-8'
self.newline = None
if 'module' in kwds:
self.import_module(kwds['module'])
if 'code' in kwds:
self.append_code_expr(kwds['code'])
if 'function' in kwds:
self.append_function(kwds['function'])
if 'executable' in kwds:
self.append_executable(kwds['executable'])
if 'dry_run' in kwds:
self.dry_run = kwds['dry_run']
if 'encoding' in kwds:
self.encoding = kwds['encoding']
if 'newline' in kwds:
self.newline = kwds['newline']
@staticmethod
def import_module(module): # pylint: disable=R0201
"""Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
"""
if isinstance(module, list):
all_modules = module
else:
all_modules = [module]
for mod in all_modules:
globals()[mod] = __import__(mod.strip())
@staticmethod
def __edit_line(line, code, code_obj): # pylint: disable=R0201
"""Edit a line with one code object built in the ctor."""
try:
# pylint: disable=eval-used
result = eval(code_obj, globals(), locals())
except TypeError as ex:
log.error("failed to execute %s: %s", code, ex)
raise
if result is None:
log.error("cannot process line '%s' with %s", line, code)
raise RuntimeError('failed to process line')
elif isinstance(result, list) or isinstance(result, tuple):
line = unicode(' '.join([unicode(res_element)
for res_element in result]))
else:
line = unicode(result)
return line
def edit_line(self, line):
"""Edit a single line using the code expression."""
for code, code_obj in self.code_objs.items():
line = self.__edit_line(line, code, code_obj)
return line
def edit_content(self, original_lines, file_name):
"""Processes a file contents.
First processes the contents line by line applying the registered
expressions, then process the resulting contents using the
registered functions.
Arguments:
original_lines (list of str): file content.
file_name (str): name of the file.
"""
lines = [self.edit_line(line) for line in original_lines]
for function in self._functions:
try:
lines = list(function(lines, file_name))
except UnicodeDecodeError as err:
log.error('failed to process %s: %s', file_name, err)
return lines
except Exception as err:
log.error("failed to process %s with code %s: %s",
file_name, function, err)
raise # Let the exception be handled at a higher level.
return lines
def edit_file(self, file_name):
"""Edit file in place, returns a list of modifications (unified diff).
Arguments:
file_name (str, unicode): The name of the file.
"""
with io.open(file_name, "r", encoding=self.encoding) as from_file:
try:
from_lines = from_file.readlines()
except UnicodeDecodeError as err:
log.error("encoding error (see --encoding): %s", err)
raise
if self._executables:
nb_execs = len(self._executables)
if nb_execs > 1:
log.warn("found %d executables. Will use first one", nb_execs)
exec_list = self._executables[0].split()
exec_list.append(file_name)
try:
log.info("running %s...", " ".join(exec_list))
output = subprocess.check_output(exec_list,
universal_newlines=True)
except Exception as err:
log.error("failed to execute %s: %s", " ".join(exec_list), err)
raise # Let the exception be handled at a higher level.
to_lines = output.split(unicode("\n"))
else:
to_lines = from_lines
# unified_diff wants structure of known length. Convert to a list.
to_lines = list(self.edit_content(to_lines, file_name))
diffs = difflib.unified_diff(from_lines, to_lines,
fromfile=file_name, tofile='<new>')
if not self.dry_run:
bak_file_name = file_name + ".bak"
if os.path.exists(bak_file_name):
msg = "{} already exists".format(bak_file_name)
if sys.version_info < (3, 3):
raise OSError(msg)
else:
# noinspection PyCompatibility
# pylint: disable=undefined-variable
raise FileExistsError(msg)
try:
os.rename(file_name, bak_file_name)
with io.open(file_name, 'w', encoding=self.encoding, newline=self.newline) as new:
new.writelines(to_lines)
# Keeps mode of original file.
shutil.copymode(bak_file_name, file_name)
except Exception as err:
log.error("failed to write output to %s: %s", file_name, err)
# Try to recover...
try:
os.rename(bak_file_name, file_name)
except OSError as err:
log.error("failed to restore %s from %s: %s",
file_name, bak_file_name, err)
raise
try:
os.unlink(bak_file_name)
except OSError as err:
log.warning("failed to remove backup %s: %s",
bak_file_name, err)
return list(diffs)
def append_code_expr(self, code):
"""Compile argument and adds it to the list of code objects."""
# expects a string.
if isinstance(code, str) and not isinstance(code, unicode):
code = unicode(code)
if not isinstance(code, unicode):
raise TypeError("string expected")
log.debug("compiling code %s...", code)
try:
code_obj = compile(code, '<string>', 'eval')
self.code_objs[code] = code_obj
except SyntaxError as syntax_err:
log.error("cannot compile %s: %s", code, syntax_err)
raise
log.debug("compiled code %s", code)
def append_executable(self, executable):
"""Append san executable os command to the list to be called.
Argument:
executable (str): os callable executable.
"""
if isinstance(executable, str) and not isinstance(executable, unicode):
executable = unicode(executable)
if not isinstance(executable, unicode):
raise TypeError("expected executable name as str, not {}".
format(executable.__class__.__name__))
self._executables.append(executable)
def set_code_exprs(self, codes):
"""Convenience: sets all the code expressions at once."""
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code)
def set_functions(self, functions):
"""Check functions passed as argument and set them to be used."""
for func in functions:
try:
self.append_function(func)
except (ValueError, AttributeError) as ex:
log.error("'%s' is not a callable function: %s", func, ex)
raise
def set_executables(self, executables):
"""Check and set the executables to be used."""
for exc in executables:
self.append_executable(exc)
|
elmotec/massedit
|
massedit.py
|
MassEdit.append_executable
|
python
|
def append_executable(self, executable):
if isinstance(executable, str) and not isinstance(executable, unicode):
executable = unicode(executable)
if not isinstance(executable, unicode):
raise TypeError("expected executable name as str, not {}".
format(executable.__class__.__name__))
self._executables.append(executable)
|
Append san executable os command to the list to be called.
Argument:
executable (str): os callable executable.
|
train
|
https://github.com/elmotec/massedit/blob/57e22787354896d63a8850312314b19aa0308906/massedit.py#L296-L308
| null |
class MassEdit(object):
"""Mass edit lines of files."""
def __init__(self, **kwds):
"""Initialize MassEdit object.
Args:
- code (byte code object): code to execute on input file.
- function (str or callable): function to call on input file.
- module (str): module name where to find the function.
- executable (str): executable file name to execute on input file.
- dry_run (bool): skip actual modification of input file if True.
"""
self.code_objs = dict()
self._codes = []
self._functions = []
self._executables = []
self.dry_run = None
self.encoding = 'utf-8'
self.newline = None
if 'module' in kwds:
self.import_module(kwds['module'])
if 'code' in kwds:
self.append_code_expr(kwds['code'])
if 'function' in kwds:
self.append_function(kwds['function'])
if 'executable' in kwds:
self.append_executable(kwds['executable'])
if 'dry_run' in kwds:
self.dry_run = kwds['dry_run']
if 'encoding' in kwds:
self.encoding = kwds['encoding']
if 'newline' in kwds:
self.newline = kwds['newline']
@staticmethod
def import_module(module): # pylint: disable=R0201
"""Import module that are needed for the code expr to compile.
Argument:
module (str or list): module(s) to import.
"""
if isinstance(module, list):
all_modules = module
else:
all_modules = [module]
for mod in all_modules:
globals()[mod] = __import__(mod.strip())
@staticmethod
def __edit_line(line, code, code_obj): # pylint: disable=R0201
"""Edit a line with one code object built in the ctor."""
try:
# pylint: disable=eval-used
result = eval(code_obj, globals(), locals())
except TypeError as ex:
log.error("failed to execute %s: %s", code, ex)
raise
if result is None:
log.error("cannot process line '%s' with %s", line, code)
raise RuntimeError('failed to process line')
elif isinstance(result, list) or isinstance(result, tuple):
line = unicode(' '.join([unicode(res_element)
for res_element in result]))
else:
line = unicode(result)
return line
def edit_line(self, line):
"""Edit a single line using the code expression."""
for code, code_obj in self.code_objs.items():
line = self.__edit_line(line, code, code_obj)
return line
def edit_content(self, original_lines, file_name):
"""Processes a file contents.
First processes the contents line by line applying the registered
expressions, then process the resulting contents using the
registered functions.
Arguments:
original_lines (list of str): file content.
file_name (str): name of the file.
"""
lines = [self.edit_line(line) for line in original_lines]
for function in self._functions:
try:
lines = list(function(lines, file_name))
except UnicodeDecodeError as err:
log.error('failed to process %s: %s', file_name, err)
return lines
except Exception as err:
log.error("failed to process %s with code %s: %s",
file_name, function, err)
raise # Let the exception be handled at a higher level.
return lines
def edit_file(self, file_name):
"""Edit file in place, returns a list of modifications (unified diff).
Arguments:
file_name (str, unicode): The name of the file.
"""
with io.open(file_name, "r", encoding=self.encoding) as from_file:
try:
from_lines = from_file.readlines()
except UnicodeDecodeError as err:
log.error("encoding error (see --encoding): %s", err)
raise
if self._executables:
nb_execs = len(self._executables)
if nb_execs > 1:
log.warn("found %d executables. Will use first one", nb_execs)
exec_list = self._executables[0].split()
exec_list.append(file_name)
try:
log.info("running %s...", " ".join(exec_list))
output = subprocess.check_output(exec_list,
universal_newlines=True)
except Exception as err:
log.error("failed to execute %s: %s", " ".join(exec_list), err)
raise # Let the exception be handled at a higher level.
to_lines = output.split(unicode("\n"))
else:
to_lines = from_lines
# unified_diff wants structure of known length. Convert to a list.
to_lines = list(self.edit_content(to_lines, file_name))
diffs = difflib.unified_diff(from_lines, to_lines,
fromfile=file_name, tofile='<new>')
if not self.dry_run:
bak_file_name = file_name + ".bak"
if os.path.exists(bak_file_name):
msg = "{} already exists".format(bak_file_name)
if sys.version_info < (3, 3):
raise OSError(msg)
else:
# noinspection PyCompatibility
# pylint: disable=undefined-variable
raise FileExistsError(msg)
try:
os.rename(file_name, bak_file_name)
with io.open(file_name, 'w', encoding=self.encoding, newline=self.newline) as new:
new.writelines(to_lines)
# Keeps mode of original file.
shutil.copymode(bak_file_name, file_name)
except Exception as err:
log.error("failed to write output to %s: %s", file_name, err)
# Try to recover...
try:
os.rename(bak_file_name, file_name)
except OSError as err:
log.error("failed to restore %s from %s: %s",
file_name, bak_file_name, err)
raise
try:
os.unlink(bak_file_name)
except OSError as err:
log.warning("failed to remove backup %s: %s",
bak_file_name, err)
return list(diffs)
def append_code_expr(self, code):
"""Compile argument and adds it to the list of code objects."""
# expects a string.
if isinstance(code, str) and not isinstance(code, unicode):
code = unicode(code)
if not isinstance(code, unicode):
raise TypeError("string expected")
log.debug("compiling code %s...", code)
try:
code_obj = compile(code, '<string>', 'eval')
self.code_objs[code] = code_obj
except SyntaxError as syntax_err:
log.error("cannot compile %s: %s", code, syntax_err)
raise
log.debug("compiled code %s", code)
def append_function(self, function):
"""Append the function to the list of functions to be called.
If the function is already a callable, use it. If it's a type str
try to interpret it as [module]:?<callable>, load the module
if there is one and retrieve the callable.
Argument:
function (str or callable): function to call on input.
"""
if not hasattr(function, '__call__'):
function = get_function(function)
if not hasattr(function, '__call__'):
raise ValueError("function is expected to be callable")
self._functions.append(function)
log.debug("registered %s", function.__name__)
def set_code_exprs(self, codes):
"""Convenience: sets all the code expressions at once."""
self.code_objs = dict()
self._codes = []
for code in codes:
self.append_code_expr(code)
def set_functions(self, functions):
"""Check functions passed as argument and set them to be used."""
for func in functions:
try:
self.append_function(func)
except (ValueError, AttributeError) as ex:
log.error("'%s' is not a callable function: %s", func, ex)
raise
def set_executables(self, executables):
"""Check and set the executables to be used."""
for exc in executables:
self.append_executable(exc)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.